Changeset - 4f7b6352013d
[Not reviewed]
default
0 4 0
Laman - 10 months ago 2024-06-23 22:48:03

added the automaton normalization
4 files changed with 55 insertions and 14 deletions:
0 comments (0 inline, 0 general)
regexp.py
Show inline comments
 
from abc import abstractmethod
 
from collections import deque
 

	
 

	
 
class ParsingError(Exception):
 
	pass
 

	
 

	
 
class Token:
 
	is_skippable = False
 

	
 
	@abstractmethod
 
	def list_first(self):
 
		pass
 

	
 
	@abstractmethod
 
	def list_last(self):
 
		pass
 

	
 
	@abstractmethod
 
	def list_neighbours(self):
 
		pass
 

	
 

	
 
class Lambda(Token):
 
	is_skippable = True
 

	
 
	def list_first(self):
 
		yield from []
 

	
 
	def list_last(self):
 
		yield from []
 

	
 
	def list_neighbours(self):
 
		yield from []
 

	
 

	
 
class Symbol(Token):
 
	def __init__(self, position, value):
 
		self.position = position
 
		self.value = value
 

	
 
	def list_first(self):
 
		yield self.position
 

	
 
	def list_last(self):
 
		yield self.position
 

	
 
	def list_neighbours(self):
 
		yield from []
 

	
 
	def __str__(self):
 
		return self.value
 

	
 

	
 
class Asterisk(Token):
 
	is_skippable = True
 

	
 
	def __init__(self, content: Token):
 
		self.content = content
 

	
 
	def list_first(self):
 
		yield from self.content.list_first()
 

	
 
	def list_last(self):
 
		yield from self.content.list_last()
 

	
 
	def list_neighbours(self):
 
		yield from self.content.list_neighbours()
 
		for x in self.list_last():
 
			for y in self.list_first():
 
				yield (x, y)
 

	
 
	def __str__(self):
 
		return str(self.content) + "*"
 

	
 

	
 
class Alternative(Token):
 
	def __init__(self, content: list):
 
		self.variants = []
 
		subsequence = []
 

	
 
		for token in content:
 
			if isinstance(token, AlternativeSeparator):
 
				if not subsequence:
 
					raise ParsingError("Found an empty Alternative variant.")
 
				self.variants.append(Chain(subsequence))
 
				subsequence = []
 
			else:
 
				subsequence.append(token)
 
		
 
		if not subsequence:
 
				raise ParsingError("Found an empty Alternative variant.")
 
		self.variants.append(Chain(subsequence))
 
		
 

	
 
	def list_first(self):
 
		for x in self.variants:
 
			yield from x.list_first()
 

	
 
	def list_last(self):
 
		for x in self.variants:
 
			yield from x.list_last()
 
	
 
	def list_neighbours(self):
 
		for x in self.variants:
 
			yield from x.list_neighbours()
 

	
 
	@property
 
	def is_skippable(self):
 
		return any(x.is_skippable for x in self.variants)
 

	
 
class AlternativeSeparator:
 
	pass
 

	
 
class Chain(Token):
 
	def __init__(self, content: list):
 
		self.content = content
 

	
 
	def list_first(self):
 
		for token in self.content:
 
			yield from token.list_first()
 
			if not token.is_skippable:
 
				break
 

	
 
	def list_last(self):
 
		for token in reversed(self.content):
 
			yield from token.list_last()
 
			if not token.is_skippable:
 
				break
 

	
 
	def list_neighbours(self):
 
		previous = []
 
		for token in self.content:
 
			for t in previous:
 
				for x in t.list_last():
 
					for y in token.list_first():
 
						yield (x, y)
 
			yield from token.list_neighbours()
 

	
 
			if token.is_skippable:
 
				previous.append(token)
 
			else:
 
				previous = [token]
 

	
 
	@property
 
	def is_skippable(self):
 
		return all(x.is_skippable for x in self.content)
 

	
 
	def __str__(self):
 
		return "(" + "".join(str(x) for x in self.content) + ")"
 

	
 

	
 
def find_closing_parenthesis(pattern, k):
 
	counter = 0
 

	
 
	for (i, c) in enumerate(pattern[k:]):
 
		if c == "(":
 
			counter += 1
 
		elif c == ")":
 
			counter -= 1
 
		if counter == 0:
 
			return k+i
 

	
 
	raise ParsingError(f'A closing parenthesis not found. Pattern: "{pattern}", position: {k}')
 

	
 

	
 
def parse(pattern, offset=0):
 
	res = []
 
	is_alternative = False
 

	
 
	i = 0
 
	while i < len(pattern):
 
		c = pattern[i]
 
		if c == "(":
 
			j = find_closing_parenthesis(pattern, i)
 
			inner_content = parse(pattern[i+1:j], offset+i+1)
 
			res.append(inner_content)
 
			i = j+1
 
		elif c == "*":
 
			try:
 
				token = res.pop()
 
			except IndexError as e:
 
				raise ParsingError(f'The asterisk operator is missing an argument. Pattern: "{pattern}", position {i}')
 
			res.append(Asterisk(token))
 
			i += 1
 
		elif c == ")":
 
			raise ParsingError(f'An opening parenthesis not found. Pattern: "{pattern}", position: {i}')
 
		elif c == "|" or c == "+":
 
			is_alternative = True
 
			res.append(AlternativeSeparator())
 
			i += 1
 
		elif c == "_":
 
			res.append(Lambda())
 
			i += 1
 
		else:
 
			res.append(Symbol(i+offset, c))
 
			i += 1
 

	
 
	if is_alternative:
 
		return Alternative(res)
 
	else:
 
		return Chain(res)
 

	
 

	
 
class Regexp:
 
	def __init__(self, pattern):
 
		(self.rules, self.end_states) = self._parse(pattern)
 

	
 
	def _parse(self, s):
 
		r = parse(s)
 
		rules = dict()
 

	
 
		for i in r.list_first():
 
			c = s[i]
 
			key = (-1, c)
 
			if key not in rules:
 
				rules[key] = set()
 
			rules[key].add(i)
 

	
 
		for (i, j) in r.list_neighbours():
 
			c = s[j]
 
			key = (i, c)
 
			if key not in rules:
 
				rules[key] = set()
 
			rules[key].add(j)
 

	
 
		end_states = set(r.list_last())
 
		if r.is_skippable:
 
			end_states.add(-1)
 

	
 
		return rules, end_states
 

	
 
	def match(self, s):
 
		current = {-1}
 

	
 
		for c in s:
 
			new_state = set()
 
			for st in current:
 
				key = (st, c)
 
				if key in self.rules:
 
					new_state.update(self.rules[key])
 
			current = new_state
 

	
 
		return any(st in self.end_states for st in current)
 

	
 
	def determinize(self):
 
		rules = dict()
 
		end_states = {(-1,)} if -1 in self.end_states else set()
 

	
 
		stack = [(-1,)]
 
		processed_states = set()
 
		while stack:
 
			multistate = stack.pop()
 
			new_rules = dict()
 
			
 
			for ((st, c), target) in filter(lambda item: item[0][0] in multistate, self.rules.items()):
 
				if c not in new_rules:
 
					new_rules[c] = set()
 
				new_rules[c].update(target)
 
			
 
			for (c, target_set) in new_rules.items():
 
				new_target = tuple(sorted(target_set))
 
				rules[(multistate, c)] = new_target
 
				if any(st in self.end_states for st in new_target):
 
					end_states.add(new_target)
 
				if new_target not in processed_states:
 
					stack.append(new_target)
 
					processed_states.add(new_target)
 
		
 
		return (rules, end_states)
 

	
 

	
 
class RegexpDFA:
 
	def __init__(self, rules, end_states):
 
		self.rules = rules
 
		self.end_states = end_states
 

	
 
	@classmethod
 
	def create(cls, pattern):
 
		r = Regexp(pattern)
 
		(rules, end_states) = r.determinize()
 

	
 
		return cls(rules, end_states)
 

	
 
	def match(self, s):
 
		st = (-1,)
 
		st = 0
 

	
 
		for c in s:
 
			key = (st, c)
 
			if key in self.rules:
 
				st = self.rules[key]
 
			else:
 
				return False
 

	
 
		return st in self.end_states
 

	
 
	def reduce(self):
 
		equivalents = self._find_equivalent_states()
 
		(rules, end_states) = self._collapse_states(equivalents)
 

	
 
		return RegexpDFA(rules, end_states)
 

	
 
	def normalize(self):
 
		index = {(-1,): 0}
 
		queue = deque([(-1,)])
 

	
 
		while queue:
 
			state = queue.popleft()
 
			edges = [((st, c), t) for ((st, c), t) in self.rules.items() if st == state]
 
			edges.sort()
 
			for ((st, c), t) in edges:
 
				if t not in index:
 
					index[t] = len(index)
 
					queue.append(t)
 
		
 
		rules = dict(((index[st], c), index[t]) for ((st, c), t) in self.rules.items())
 
		end_states = {index[st] for st in self.end_states}
 

	
 
		return RegexpDFA(rules, end_states)
 

	
 
	def _find_equivalent_states(self):
 
		state_set = [(-2,), (-1,)] + sorted(set(self.rules.values()))
 
		alphabet = {c for (st, c) in self.rules.keys()}
 
		equivalents = {(s1, s2) for (i, s1) in enumerate(state_set) for s2 in state_set[i+1:]}
 

	
 
		for (s1, s2) in equivalents.copy():
 
			if (s1 in self.end_states and s2 not in self.end_states) or (s1 not in self.end_states and s2 in self.end_states):
 
				equivalents.remove((s1, s2))
 
		
 
		ctrl = True
 
		while ctrl:
 
			ctrl = False
 
			for (s1, s2) in equivalents.copy():
 
				for c in alphabet:
 
					t1 = self.rules.get((s1, c), (-2,))
 
					t2 = self.rules.get((s2, c), (-2,))
 
					key = (min(t1, t2), max(t1, t2))
 
					if t1 != t2 and key not in equivalents:
 
						equivalents.remove((s1, s2))
 
						ctrl = True
 
						break
 
		
 
		return equivalents
 
	
 
	def _collapse_states(self, equivalents):
 
		rules = self.rules.items()
 
		end_states = self.end_states.copy()
 

	
 
		for (s1, s2) in sorted(equivalents):
 
			rules = map(
 
				lambda item: (item[0], s1 if item[1] == s2 else item[1]),
 
				filter(lambda item: item[0][0] != s2, rules)
 
			)
 
			end_states.discard(s2)
 
		
 
		return (dict(rules), end_states)
 

	
 

	
 
if __name__ == "__main__":
 
	tests = ["", "a", "ab", "aabb", "abab", "abcd", "abcbcdbcd"]
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*"]:
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*", "(a|b)*a(a|b)(a|b)(a|b)"]:
 
		print("#", pattern)
 
		try:
 
			r = RegexpDFA.create(pattern).reduce()
 
			r = RegexpDFA.create(pattern).reduce().normalize()
 
		except ParsingError as e:
 
			print("Failed to parse the regexp:")
 
			print(e)
 
			continue
 
		for t in tests:
 
			print(t, r.match(t))
 
		print()
src/main.rs
Show inline comments
 
use regexp::Regexp;
 

	
 
fn main() {
 
	let tests = ["", "a", "ab", "aabb", "abab", "abcd", "abcbcdbcd"];
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*"] {
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*", "(a|b)*a(a|b)(a|b)(a|b)"] {
 
		println!("# {pattern}");
 
		let r = match Regexp::new(&pattern.to_string()) {
 
			Ok(r1) => r1.determinize().reduce(),
 
			Ok(r1) => r1.determinize().reduce().normalize(),
 
			Err(e) => {
 
				println!("{e}");
 
				continue;
 
			}
 
		};
 
		for &t in tests.iter() {
 
			println!("{t} {}", r.eval(t.to_string()));
 
		}
 
		println!();
 
	}
 
}
src/regexp.rs
Show inline comments
 
use std::collections::{HashMap, HashSet};
 
use std::collections::{HashMap, HashSet, VecDeque};
 

	
 
mod token;
 
pub use token::ParsingError;
 
use token::parse;
 

	
 
const START: usize = usize::MAX;
 
const FAIL: usize = START-1;
 

	
 
fn encode_set(set: &HashSet<usize>) -> u64 {
 
	let mut res = 0;
 
	for x in set.iter() {
 
		res ^= 1<<x;
 
	}
 
	return res;
 
}
 

	
 
fn decode_set(x: u64) ->HashSet<usize> {
 
	if x == START as u64 {return HashSet::from([START]);}
 

	
 
	let mut x = x;
 
	let mut res: HashSet<usize> = HashSet::new();
 
	
 
	while x > 0 {
 
		let y = x.trailing_zeros();
 
		res.insert(y as usize);
 
		x ^= 1 << y;
 
	}
 

	
 
	return res;
 
}
 

	
 
#[derive(Debug)]
 
pub struct Regexp {
 
	rules: HashMap<(usize, char), HashSet<usize>>,
 
	end_states: HashSet<usize>
 
}
 

	
 
impl Regexp {
 
	pub fn new(pattern: &String) -> Result<Regexp, ParsingError> {
 
		let r = parse(pattern, 0)?;
 
		let pattern_chars = Vec::from_iter(pattern.chars());
 
		let mut rules: HashMap<(usize, char), HashSet<usize>> = HashMap::new();
 
		
 
		for i in r.list_first() {
 
			let c = pattern_chars[i];
 
			let key = (START, c);
 
			match rules.get_mut(&key) {
 
				Some(set) => {set.insert(i);},
 
				None => {rules.insert(key, HashSet::from([i]));}
 
			};
 
		}
 

	
 
		for (i, j) in r.list_neighbours() {
 
			let c = pattern_chars[j];
 
			let key = (i, c);
 
			match rules.get_mut(&key) {
 
				Some(set) => {set.insert(j);},
 
				None => {rules.insert(key, HashSet::from([j]));}
 
			};
 
		}
 

	
 
		let mut end_states = HashSet::from_iter(r.list_last().into_iter());
 
		if r.is_skippable() {
 
			end_states.insert(START);
 
		}
 

	
 
		return Ok(Regexp{rules, end_states});
 
	}
 

	
 
	pub fn eval(&self, s: String) -> bool {
 
		let mut multistate = HashSet::from([START]);
 

	
 
		for c in s.chars() {
 
			let mut new_multistate = HashSet::new();
 

	
 
			for state in multistate {
 
				if let Some(x) = self.rules.get(&(state, c)) {
 
					new_multistate = new_multistate.union(&x).map(|&y| y).collect();
 
				} else if let Some(x) = self.rules.get(&(state, '.')) {
 
					new_multistate = new_multistate.union(&x).map(|&y| y).collect();
 
				}
 
			}
 
			multistate = new_multistate;
 
		}
 

	
 
		return multistate.iter().any(|x| self.end_states.contains(x));
 
	}
 

	
 
	pub fn determinize(&self) -> RegexpDFA {
 
		let mut rules: HashMap<(u64, char), u64> = HashMap::new();
 
		let mut end_states: HashSet<u64> = HashSet::new();
 
		if self.end_states.contains(&START) {end_states.insert(START as u64);}
 

	
 
		let mut stack = Vec::from([START as u64]);
 
		let mut processed_states = HashSet::new();
 
		while !stack.is_empty() {
 
			let state = stack.pop().unwrap();
 
			let multistate = decode_set(state);
 
			let mut new_rules: HashMap<char, HashSet<usize>> = HashMap::new();
 

	
 
			for key in self.rules.keys().filter(|key| multistate.contains(&key.0)) {
 
				let (_st, c) = key;
 
				if !new_rules.contains_key(c) {
 
					new_rules.insert(*c, HashSet::new());
 
				}
 
				for target in &self.rules[key] {
 
					new_rules.get_mut(c).unwrap().insert(*target);
 
				}
 
			}
 

	
 
			for (c, target_set) in new_rules.into_iter() {
 
				let encoded_target = encode_set(&target_set);
 
				rules.insert((state, c), encoded_target);
 
				if target_set.iter().any(|st| self.end_states.contains(st)) {
 
					end_states.insert(encoded_target);
 
				}
 
				if !processed_states.contains(&encoded_target) {
 
					stack.push(encoded_target);
 
					processed_states.insert(encoded_target);
 
				}
 
			}
 
		}
 

	
 
		return RegexpDFA{rules, end_states};
 
	}
 
}
 

	
 
pub struct RegexpDFA {
 
	rules: HashMap<(u64, char), u64>,
 
	end_states: HashSet<u64>
 
}
 

	
 
impl RegexpDFA {
 
	pub fn eval(&self, s: String) -> bool {
 
		let mut state = START as u64;
 

	
 
		for c in s.chars() {
 
			if let Some(x) = self.rules.get(&(state, c)) {
 
				state = *x;
 
			} else {
 
				return false;
 
			}
 
		}
 

	
 
		return self.end_states.contains(&state);
 
	}
 

	
 
	pub fn reduce(&self) -> RegexpDFA {
 
		let equivalents = self.find_equivalent_states();
 
		return self.collapse_states(equivalents);
 
	}
 

	
 
	pub fn normalize(&self) -> RegexpDFA {
 
		let mut index = HashMap::from([(START as u64, START as u64)]);
 
		let mut queue = VecDeque::from([START as u64]);
 

	
 
		while !queue.is_empty() {
 
			let state = queue.pop_front().unwrap();
 
			let mut edges: Vec<((u64, char), u64)> = self.rules.iter().filter(|((st, c), t)| *st == state).map(|((st, c), t)| ((*st, *c), *t)).collect();
 
			edges.sort();
 
			for ((_st, _c), t) in edges {
 
				if !index.contains_key(&t) {
 
					index.insert(t, index.len() as u64);
 
					queue.push_back(t);
 
				}
 
			}
 
		}
 

	
 
		let rules = self.rules.iter().map(|((st, c), t)| ((index[st as &u64], *c), index[t as &u64])).collect();
 
		let end_states = self.end_states.iter().map(|st| index[st]).collect();
 
		
 
		return RegexpDFA{rules, end_states};
 
	}
 

	
 
	fn find_equivalent_states(&self) -> Vec<(u64, u64)> {
 
		let state_set: HashSet<u64> = HashSet::from_iter(self.rules.values().copied());
 
		let mut state_vec: Vec<u64> = Vec::from_iter(state_set.into_iter());
 
		state_vec.push(START as u64);
 
		state_vec.push(FAIL as u64);
 
		state_vec.sort();
 
		state_vec.reverse();
 
		let alphabet: HashSet<char> = self.rules.keys().map(|(_st, c)| c).copied().collect();
 

	
 
		let mut equivalents = HashSet::new();
 
		state_vec.iter().enumerate().for_each(|(i, s1)| {
 
			equivalents.extend(
 
				state_vec[i+1..].iter()
 
				.filter(|s2| !(self.end_states.contains(s1)^self.end_states.contains(s2)))
 
				.map(|s2| (*s1, *s2))
 
			);
 
		});
 

	
 
		let mut n = usize::MAX;
 
		while equivalents.len() < n {
 
			n = equivalents.len();
 
			equivalents = equivalents.iter().filter(|(s1, s2)| {
 
				!alphabet.iter().any(|c| {
 
					let t1 = self.rules.get(&(*s1, *c)).unwrap_or(&(FAIL as u64));
 
					let t2 = self.rules.get(&(*s2, *c)).unwrap_or(&(FAIL as u64));
 
					let key = (*t1.min(t2), *t1.max(t2));
 
					return t1 != t2 && !equivalents.contains(&key);
 
				})
 
			}).copied().collect();
 
		}
 

	
 
		return Vec::from_iter(equivalents.into_iter());
 
	}
 

	
 
	fn collapse_states(&self, mut equivalents: Vec<(u64, u64)>) -> RegexpDFA {
 
		let mut rules = self.rules.clone();
 
		let mut end_states = self.end_states.clone();
 
		equivalents.sort();
 

	
 
		for (s1, s2) in equivalents.into_iter() {
 
			rules = rules.into_iter()
 
				.filter(|((st, _c), _t)| *st != s2)
 
				.map(|(key, t)| (key, if t==s2 {s1} else {t})).collect();
 
			end_states.remove(&s2);
 
		}
 

	
 
		return RegexpDFA{rules, end_states};
 
	}
 
}
tests/test_regexp.rs
Show inline comments
 
use regexp::Regexp;
 
use regexp::ParsingError;
 

	
 
#[test]
 
fn test_eval_basic_nfa() {
 
	let r = Regexp::new(&String::from("abc")).unwrap();
 
	assert!(r.eval(String::from("abc")));
 
	assert!(!r.eval(String::from("ab")));
 
	assert!(!r.eval(String::from("abcd")));
 
}
 

	
 
#[test]
 
fn test_eval_basic_dfa() {
 
	let r = Regexp::new(&String::from("abc")).unwrap().determinize().reduce();
 
	let r = Regexp::new(&String::from("abc")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("abc")));
 
	assert!(!r.eval(String::from("ab")));
 
	assert!(!r.eval(String::from("abcd")));
 
}
 

	
 
#[test]
 
fn test_eval_empty_nfa() {
 
	assert!(Regexp::new(&String::from("a*")).unwrap().eval(String::from("")));
 
	assert!(Regexp::new(&String::from("")).unwrap().eval(String::from("")));
 
	assert!(!Regexp::new(&String::from("")).unwrap().eval(String::from("a")));
 
	assert!(!Regexp::new(&String::from("a")).unwrap().eval(String::from("")));
 
}
 

	
 
#[test]
 
fn test_eval_empty_dfa() {
 
	assert!(Regexp::new(&String::from("a*")).unwrap().determinize().reduce().eval(String::from("")));
 
	assert!(Regexp::new(&String::from("")).unwrap().determinize().reduce().eval(String::from("")));
 
	assert!(!Regexp::new(&String::from("")).unwrap().determinize().reduce().eval(String::from("a")));
 
	assert!(Regexp::new(&String::from("a*")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(Regexp::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(!Regexp::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("a")));
 
}
 

	
 
#[test]
 
fn test_eval_asterisk_nfa() {
 
	let r = Regexp::new(&String::from("a*b*")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("ab")));
 
	assert!(r.eval(String::from("aabb")));
 
	assert!(!r.eval(String::from("abab")));
 
}
 

	
 
#[test]
 
fn test_eval_asterisk_dfa() {
 
	let r = Regexp::new(&String::from("a*b*")).unwrap().determinize().reduce();
 
	let r = Regexp::new(&String::from("a*b*")).unwrap().determinize().normalize().reduce();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("ab")));
 
	assert!(r.eval(String::from("aabb")));
 
	assert!(!r.eval(String::from("abab")));
 
}
 

	
 
#[test]
 
fn test_eval_alternative_nfa() {
 
	let r = Regexp::new(&String::from("a|b|c")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("b")));
 
	assert!(r.eval(String::from("c")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 
}
 

	
 
#[test]
 
fn test_eval_alternative_dfa() {
 
	let r = Regexp::new(&String::from("a|b|c")).unwrap().determinize().reduce();
 
	let r = Regexp::new(&String::from("a|b|c")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("b")));
 
	assert!(r.eval(String::from("c")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 
}
 

	
 
#[test]
 
fn test_eval_lambda_nfa() {
 
	let r = Regexp::new(&String::from("a_")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 

	
 
	let r = Regexp::new(&String::from("a|_")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("")));
 
	assert!(!r.eval(String::from("b")));
 
}
 

	
 
#[test]
 
fn test_eval_lambda_dfa() {
 
	let r = Regexp::new(&String::from("a_")).unwrap().determinize().reduce();
 
	let r = Regexp::new(&String::from("a_")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 

	
 
	let r = Regexp::new(&String::from("a|_")).unwrap().determinize().reduce();
 
	let r = Regexp::new(&String::from("a|_")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("")));
 
	assert!(!r.eval(String::from("b")));
 
}
 

	
 
#[test]
 
fn test_invalid_asterisk() {
 
	let x = Regexp::new(&String::from("*"));
 
	assert!(matches!(x, Err(ParsingError::Asterisk{s: _, pos: 0})));
 
}
 

	
 
#[test]
 
fn test_invalid_closing_parenthesis() {
 
	let x = Regexp::new(&String::from("(a"));
 
	assert!(matches!(x, Err(ParsingError::ClosingParenthesis{s: _, pos: 0})));
 
}
 

	
 
#[test]
 
fn test_invalid_opening_parenthesis() {
 
	let x = Regexp::new(&String::from("a)"));
 
	assert!(matches!(x, Err(ParsingError::OpeningParenthesis{s: _, pos: 1})));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_start() {
 
	let x = Regexp::new(&String::from("a(|b)"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_end() {
 
	let x = Regexp::new(&String::from("a|"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_middle() {
 
	let x = Regexp::new(&String::from("a||b"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
0 comments (0 inline, 0 general)