Changeset - 4eb3ab63f9dd
[Not reviewed]
default
0 2 0
Laman - 10 months ago 2024-06-30 23:16:37

refactored expand_alphabet interation
2 files changed with 14 insertions and 16 deletions:
0 comments (0 inline, 0 general)
regexp.py
Show inline comments
 
@@ -374,105 +374,104 @@ class RegexpDFA:
 
		ctrl = True
 
		while ctrl:
 
			ctrl = False
 
			for (s1, s2) in equivalents.copy():
 
				for ci in range(n):
 
					t1 = self.rules[s1*n + ci]
 
					t2 = self.rules[s2*n + ci]
 
					key = (min(t1, t2), max(t1, t2))
 
					if t1 != t2 and key not in equivalents:
 
						equivalents.remove((s1, s2))
 
						ctrl = True
 
						break
 
		
 
		return equivalents
 
	
 
	def _collapse_states(self, equivalents):
 
		n = len(self.alphabet_index)
 
		rules = []
 

	
 
		eq_mapping = dict()
 
		for (s1, s2) in equivalents:
 
			eq_mapping[s2] = min(s1, eq_mapping.get(s2, math.inf))
 

	
 
		discard_mapping = dict()
 
		discard_count = 0
 

	
 
		for i in range(0, len(self.rules), n):
 
			si = i//n
 
			if si in eq_mapping:
 
				discard_count += 1
 
				continue
 
			discard_mapping[si] = si - discard_count
 
			rules.extend(map(lambda st: eq_mapping.get(st, st), self.rules[i:i+n]))
 
		
 
		rules = [discard_mapping[st] for st in rules]
 
		end_states = {discard_mapping[eq_mapping.get(st, st)] for st in self.end_states}
 
		
 
		return (rules, end_states)
 

	
 
	def _expand_alphabet(self, alphabet_index):
 
		if alphabet_index == self.alphabet_index:
 
			return self
 

	
 
		n1 = len(self.alphabet_index)
 
		m = len(self.rules) // n1
 

	
 
		combined_alphabet = set(self.alphabet_index.keys()) | set(alphabet_index.keys())
 
		combined_index = {c: i for (i, c) in enumerate(sorted(combined_alphabet))}
 
		conversion_index = {v: combined_index[k] for (k, v) in self.alphabet_index.items()}
 
		conversion_index = {combined_index[k]: v for (k, v) in self.alphabet_index.items()}
 
		n2 = len(combined_alphabet)
 

	
 
		rules = []
 
		for i in range(0, len(self.rules), n1):
 
			row = ([m]*n2)
 
			for (j, st) in enumerate(self.rules[i:i+n1]):
 
				row[conversion_index[j]] = st
 
			rules.extend(row)
 
		rules = [
 
			self.rules[i*n1 + conversion_index[j]]
 
			if j in conversion_index else m
 
			for i in range(m) for j in range(n2)
 
		]
 
		rules.extend([m]*n2)
 

	
 
		return RegexpDFA(rules, self.end_states, combined_index).reduce().normalize()
 

	
 
	def _build_product_automaton(self, r):
 
		n = len(self.alphabet_index)
 
		m = len(r.rules) // n
 
		k = len(self.rules) // n
 

	
 
		rules = []
 
		end_states = set()
 

	
 
		for s1 in range(k):
 
			row1 = self.rules[s1*n:(s1+1)*n]
 
			for s2 in range(m):
 
				row2 = r.rules[s2*n:(s2+1)*n]
 
				rules.extend([x*m + y for (x, y) in zip(row1, row2)])
 
				if (s1 in self.end_states) != (s2 in r.end_states):
 
					end_states.add(s1*m + s2)
 

	
 
		return RegexpDFA(rules, end_states, self.alphabet_index).reduce().normalize()
 

	
 

	
 
def test():
 
	tests = ["", "a", "ab", "aabb", "abab", "abcd", "abcbcdbcd"]
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*", "(a|b)*a(a|b)(a|b)(a|b)", "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"]:
 
		print("#", pattern)
 
		try:
 
			r = RegexpDFA.create(pattern).reduce().normalize()
 
		except ParsingError as e:
 
			print("Failed to parse the regexp:")
 
			print(e)
 
			continue
 
		for t in tests:
 
			print(t, r.match(t))
 
		print()
 

	
 

	
 
if __name__ == "__main__":
 
	parser = argparse.ArgumentParser()
 
	subparsers = parser.add_subparsers()
 
	
 
	test_parser = subparsers.add_parser("test")
 
	test_parser.set_defaults(name="test")
 

	
 
	match_parser = subparsers.add_parser("match")
 
	match_parser.add_argument("pattern")
 
	match_parser.add_argument("string")
src/regexp.rs
Show inline comments
 
@@ -253,86 +253,85 @@ impl RegexpDFA {
 
					return t1 != t2 && !equivalents.contains(&key);
 
				})
 
			}).copied().collect();
 
		}
 

	
 
		return Vec::from_iter(equivalents.into_iter());
 
	}
 

	
 
	fn collapse_states(&self, equivalents: Vec<(usize, usize)>) -> RegexpDFA {
 
		let n = self.alphabet_index.len();
 
		let m = self.rules.len()/n;
 
		let mut rules = Vec::new();
 

	
 
		let mut eq_mapping: Vec<usize> = ((0..m)).collect();
 
		for (s1, s2) in equivalents.into_iter() {
 
			eq_mapping[s2] = eq_mapping[s2].min(s1);
 
		}
 

	
 
		let mut discard_mapping: Vec<usize> = ((0..m)).collect();
 
		let mut discard_count = 0;
 

	
 
		for si in 0..m {
 
			if eq_mapping[si] != si {
 
				discard_count += 1;
 
				continue;
 
			}
 
			discard_mapping[si] = si-discard_count;
 
			rules.extend(self.rules[si*n..(si+1)*n].iter().map(|&st| eq_mapping[st]));
 
		}
 

	
 
		rules = rules.into_iter().map(|st| discard_mapping[st]).collect();
 
		let end_states = self.end_states.iter().map(|st| discard_mapping[eq_mapping[*st]]).collect();
 

	
 
		return RegexpDFA{rules, end_states, alphabet_index: self.alphabet_index.clone()};
 
	}
 

	
 
	fn expand_alphabet(&self, alphabet_index: &HashMap<char, usize>) -> RegexpDFA {
 
		if *alphabet_index == self.alphabet_index {
 
			return self.clone();
 
		}
 

	
 
		let n1 = self.alphabet_index.len();
 
		let m = self.rules.len() / n1;
 

	
 
		let combined_alphabet: HashSet<char> = HashSet::from_iter(self.alphabet_index.keys().chain(alphabet_index.keys()).copied());
 
		let mut combined_vec = Vec::from_iter(combined_alphabet.into_iter());
 
		combined_vec.sort();
 
		let combined_index = HashMap::from_iter(combined_vec.iter().enumerate().map(|(i, c)| (*c, i)));
 
		let conversion_index: HashMap<usize, usize> = HashMap::from_iter(self.alphabet_index.iter().map(|(k, v)| (*v, combined_index[k])));
 
		let conversion_index: HashMap<usize, usize> = HashMap::from_iter(self.alphabet_index.iter().map(|(k, v)| (combined_index[k], *v)));
 
		let n2 = combined_vec.len();
 

	
 
		let mut rules = vec![];
 
		for i in 0..m {
 
			let mut row = vec![m;n2];
 
			for (j, st) in self.rules[i*n1..(i+1)*n1].iter().enumerate() {
 
				row[conversion_index[&j]] = *st;
 
		let rules: Vec<usize> = (0..m*n2).map(
 
			|i| {
 
				let (j, k) = (i/n2, i%n2);
 
				return if conversion_index.contains_key(&k) {
 
					self.rules[j*n1 + conversion_index[&k]]
 
				} else {m};
 
			}
 
			rules.append(&mut row);
 
		}
 
		rules.append(&mut vec![m;n2]);
 
		).chain(std::iter::repeat(m).take(n2)).collect();
 

	
 
		return RegexpDFA{rules, end_states: self.end_states.clone(), alphabet_index: combined_index}.reduce().normalize();
 
	}
 

	
 
	fn build_product_automaton(&self, other: &RegexpDFA) -> RegexpDFA {
 
		let n = self.alphabet_index.len();
 
		let m = other.rules.len() / n;
 
		let k = self.rules.len() / n;
 

	
 
		let mut rules = vec![];
 
		let mut end_states = HashSet::new();
 

	
 
		for s1 in 0..k {
 
			let row1 = &self.rules[s1*n..(s1+1)*n];
 
			for s2 in 0..m {
 
				let row2 = &other.rules[s2*n..(s2+1)*n];
 
				rules.extend(row1.iter().zip(row2.iter()).map(|(x, y)| x*m + y));
 
				if (self.end_states.contains(&s1)) ^ (other.end_states.contains(&s2)) {
 
					end_states.insert(s1*m + s2);
 
				}
 
			}
 
		}
 

	
 
		return RegexpDFA{rules, end_states, alphabet_index: self.alphabet_index.clone()}.reduce().normalize();
 
	}
 
}
0 comments (0 inline, 0 general)