Changeset - 08f1519c859e
[Not reviewed]
default
0 6 0
Laman - 9 months ago 2024-07-09 13:05:02

added Rust documentation
6 files changed with 93 insertions and 45 deletions:
0 comments (0 inline, 0 general)
regexp.py
Show inline comments
 
import math
 
import itertools
 
import argparse
 
from abc import abstractmethod
 
from collections import deque
 
from typing import Iterator, Optional
 

	
 

	
 
class ParsingError(Exception):
 
	pass
 

	
 

	
 
class Token:
 
	"""Abstract base class representing a single item of a regular expressions."""
 
	"""Abstract base class representing a single item of a regular expression."""
 
	# is the Token mandatory, or can it be omitted
 
	is_skippable = False
 

	
 
	@abstractmethod
 
	def list_first(self) -> Iterator[int]:
 
		"""List all possible string positions the token can start with."""
 
		pass
 

	
 
	@abstractmethod
 
	def list_last(self) -> Iterator[int]:
 
		"""List all possible string positions the token can end with."""
 
		pass
 

	
 
	@abstractmethod
 
	def list_neighbours(self) -> Iterator[tuple[int, int]]:
 
		"""List positions of all possibly neighbouring subtokens."""
 
		pass
 

	
 

	
 
class Lambda(Token):
 
	"""An empty string, useful as an `Alternative`."""
 
	is_skippable = True
 

	
 
	def list_first(self):
 
		yield from []
 

	
 
	def list_last(self):
 
		yield from []
 

	
 
	def list_neighbours(self):
 
		yield from []
 

	
 

	
 
class Symbol(Token):
 
	"""A regular letter or any other symbol without a special function."""
 
	def __init__(self, position: int, value: str):
 
		self.position = position
 
		self.value = value
 

	
 
	def list_first(self):
 
		yield self.position
 

	
 
	def list_last(self):
 
		yield self.position
 

	
 
	def list_neighbours(self):
 
		yield from []
 

	
 
	def __str__(self):
 
		return self.value
 

	
 

	
 
class Asterisk(Token):
 
	"""A unary operator specifying its content to occur zero or more times."""
 
	is_skippable = True
 

	
 
	def __init__(self, content: Token):
 
		self.content = content
 

	
 
	def list_first(self):
 
		yield from self.content.list_first()
 

	
 
	def list_last(self):
 
		yield from self.content.list_last()
 

	
 
	def list_neighbours(self):
 
		yield from self.content.list_neighbours()
 
		for x in self.list_last():
 
			for y in self.list_first():
 
				yield (x, y)
 

	
 
	def __str__(self):
 
		return str(self.content) + "*"
 

	
 

	
 
class Alternative(Token):
 
	"""An operator with a variable number of arguments, specifying exchangeable alternatives."""
 
	def __init__(self, content: list[Token]):
 
		""":raises ParsingError: raised on an empty variant. That is an AlternativeSeparator surrounded by no other Tokens."""
 
		self.variants = []
 
		subsequence = []
 

	
 
		for token in content:
 
			if isinstance(token, AlternativeSeparator):
 
				if not subsequence:
 
					raise ParsingError("Found an empty Alternative variant.")
 
				self.variants.append(Chain(subsequence))
 
				subsequence = []
 
			else:
 
				subsequence.append(token)
 
		
 
		if not subsequence:
 
				raise ParsingError("Found an empty Alternative variant.")
 
		self.variants.append(Chain(subsequence))
 
		
 

	
 
	def list_first(self):
 
		for x in self.variants:
 
			yield from x.list_first()
 

	
 
	def list_last(self):
 
		for x in self.variants:
 
			yield from x.list_last()
 
	
 
	def list_neighbours(self):
 
		for x in self.variants:
 
			yield from x.list_neighbours()
 

	
 
	@property
 
	def is_skippable(self):
 
		return any(x.is_skippable for x in self.variants)
 

	
 
class AlternativeSeparator:
 
	"""A special token to temporarily separate Alternative variants. Removed in the Alternative constructor."""
 
	pass
 

	
 
class Chain(Token):
 
	"""An operator expressing a concatenation of its content Tokens."""
 
	def __init__(self, content: list[Token]):
 
		self.content = content
 

	
 
	def list_first(self):
 
		for token in self.content:
 
			yield from token.list_first()
 
			if not token.is_skippable:
 
				break
 

	
 
	def list_last(self):
 
		for token in reversed(self.content):
 
			yield from token.list_last()
 
			if not token.is_skippable:
 
				break
 

	
 
	def list_neighbours(self):
 
		previous = []
 
		for token in self.content:
 
			for t in previous:
 
				for x in t.list_last():
 
					for y in token.list_first():
 
						yield (x, y)
 
			yield from token.list_neighbours()
 

	
 
			if token.is_skippable:
 
				previous.append(token)
 
			else:
 
				previous = [token]
 

	
 
	@property
 
	def is_skippable(self):
 
		return all(x.is_skippable for x in self.content)
 

	
 
	def __str__(self):
 
		return "(" + "".join(str(x) for x in self.content) + ")"
 

	
 

	
 
def find_closing_parenthesis(pattern: str, pos: int) -> int:
 
	"""Given an opening parenthesis, find the closing one.
 
	
 
	:param pattern: the regexp pattern
 
	:param pos: the opening parenthesis position
 
	:return: a position of the matching closing parenthesis
 
	:raises AssertionError: on an incorrect call, if there is not an opening parenthesis at `pos`
 
	:raises ParsingError: on a malformed pattern with no matching parenthesis"""
 
	assert pattern[pos] == "("
 
	counter = 0
 

	
 
	for (i, c) in enumerate(pattern[pos:]):
 
		if c == "(":
 
			counter += 1
 
		elif c == ")":
 
			counter -= 1
 
		if counter == 0:
 
			return pos+i
 

	
 
	raise ParsingError(f'A closing parenthesis not found. Pattern: "{pattern}", position: {pos}')
 

	
 

	
 
def parse(pattern: str, offset: int=0) -> Token:
 
	"""Recursively parse the pattern into a Token tree.
 

	
 
	:param pattern: the regexp string
 
	:param offset: where the `pattern` starts in the original pattern, to record correct global token positions in the subcalls
 
	:return: a token sequence, wrapped in a Chain or an Alternative"""
 
	res = []
 
	is_alternative = False
 

	
 
	i = 0
 
	while i < len(pattern):
 
		c = pattern[i]
 
		if c == "(":
 
			j = find_closing_parenthesis(pattern, i)
 
			inner_content = parse(pattern[i+1:j], offset+i+1)
 
			res.append(inner_content)
 
			i = j+1
 
		elif c == "*":
 
			try:
 
				token = res.pop()
 
			except IndexError as e:
 
				raise ParsingError(f'The asterisk operator is missing an argument. Pattern: "{pattern}", position {i}')
 
			res.append(Asterisk(token))
 
			i += 1
 
		elif c == ")":
 
			raise ParsingError(f'An opening parenthesis not found. Pattern: "{pattern}", position: {i}')
 
		elif c == "|" or c == "+":
 
			is_alternative = True
 
			res.append(AlternativeSeparator())
 
			i += 1
 
		elif c == "_":
 
			res.append(Lambda())
 
			i += 1
 
		else:
 
			res.append(Symbol(i+offset, c))
 
			i += 1
 

	
 
	if is_alternative:
 
		return Alternative(res)
 
	else:
 
		return Chain(res)
 

	
 

	
 
def print_dfa(dfa: "RegexpDFA", label: str=""):
 
	"""Utility function for printing automatons in a readable format."""
 
	n = len(dfa.alphabet_index)
 
	print(label)
 
	for i in range(0, len(dfa.rules), n):
 
		print(i//n, dfa.rules[i:i+n])
 
	print(dfa.end_states)
 

	
 

	
 
class Regexp:
 
	def __init__(self, pattern: str):
 
		"""Parse a pattern string into a sequence of Tokens and build a non-deterministic finite automaton from them."""
 
		r = parse(pattern)
 
		# (state, symbol): {state1, ...}
 
		rules = dict()
 
		alphabet = set()
 

	
 
		# record possible starting symbols
 
		for i in r.list_first():
 
			c = pattern[i]
 
			alphabet.add(c)
 
			key = (-1, c)
 
			if key not in rules:
 
				rules[key] = set()
 
			rules[key].add(i)
 

	
 
		# record all pairs of symbols that can immediately follow each other
 
		for (i, j) in r.list_neighbours():
 
			c = pattern[j]
 
			alphabet.add(c)
 
			key = (i, c)
 
			if key not in rules:
 
				rules[key] = set()
 
			rules[key].add(j)
 

	
 
		# record possible last symbols and mark them as accepting states
 
		end_states = set(r.list_last())
 
		if r.is_skippable:
 
			end_states.add(-1)
 

	
 
		self.rules = rules
 
		self.end_states = end_states
 
		self.alphabet = alphabet
 

	
 
	def match(self, s: str) -> bool:
 
		"""Decide if a string matches the regexp.
 
		
 
		:param s: an input string"""
 
		current = {-1}
 

	
 
		for c in s:
 
			new_state = set()
 
			for st in current:
 
				key = (st, c)
 
				if key in self.rules:
 
					new_state.update(self.rules[key])
 
			current = new_state
 

	
 
		return any(st in self.end_states for st in current)
 

	
 
	def determinize(self) -> "RegexpDFA":
 
		"""Convert the non-deterministic finite automaton into a deterministic one."""
 
		alphabet_index = {c: i for (i, c) in enumerate(sorted(self.alphabet))}
 
		n = len(alphabet_index)
 
		compact_rules = [-1] * n
 
		end_states = {0} if -1 in self.end_states else set()
 

	
 
		index = {(-1,): 0}
 
		stack = [(-1,)]
 
		# explore all possible state sets the NFA can visit
 
		while stack:
 
			multistate = stack.pop()
 
			new_rules = dict()
 
			
 
			# collect possible target states
 
			for ((st, c), target) in filter(lambda item: item[0][0] in multistate, self.rules.items()):
 
				if c not in new_rules:
 
					new_rules[c] = set()
 
				new_rules[c].update(target)
 
			
 
			# map the state sets to integer labels and record them in a single dimensional rules list
 
			for (c, target_set) in new_rules.items():
 
				target_tup = tuple(sorted(target_set))
 
				if target_tup not in index:
 
					new_target = len(index)
 
					index[target_tup] = new_target
 
					compact_rules.extend([-1] * n)
 
					stack.append(target_tup)
 
				compact_rules[index[multistate]*n + alphabet_index[c]] = index[target_tup]
 
				if any(st in self.end_states for st in target_set):
 
					end_states.add(index[target_tup])
 

	
 
		# create an additional fail state
 
		# and redirect all undefined transition rules to it
 
		fail = len(index)
 
		compact_rules = [(st if st >= 0 else fail) for st in compact_rules]
 
		compact_rules.extend([fail] * n)
 
		
 
		return RegexpDFA(compact_rules, end_states, alphabet_index)
 

	
 

	
 
class RegexpDFA:
 
	def __init__(self, rules: list[int], end_states: set[int], alphabet_index: dict[str, int]):
 
		"""A deterministic finite automaton constructor.
 

	
 
		If the `rules` or the `alphabet_index` are empty, then instead of an empty automaton we create a trivial automaton failing on any input (accepting empty strings).
 

	
 
		:param rules: transition rules, where (i*n+j)th item defines the transition for i-th state on j-th alphabet symbol
 
		:param end_states: accepting states
 
		:param alphabet_index: mapping from alphabet symbols to rule columns"""
 
		self.rules = rules or [1, 1]
 
		self.end_states = end_states
 
		self.alphabet_index = alphabet_index or {"": 0}
 

	
 
	@staticmethod
 
	def create(pattern: str) -> "RegexpDFA":
 
		"""Create a `Regexp` and determinize it."""
 
		r = Regexp(pattern)
 
		return r.determinize()
 

	
 
	def match(self, s: str):
 
		"""Decide if a string matches the regexp.
 
		
 
		:param s: an input string"""
 
		st = 0
 
		n = len(self.alphabet_index)
 

	
 
		for c in s:
 
			if c not in self.alphabet_index:
 
				return False
 
			key = (st*n + self.alphabet_index[c])
 
			st = self.rules[key]
 

	
 
		return st in self.end_states
 

	
 
	def reduce(self) -> "RegexpDFA":
 
		"""Minimize the automaton by collapsing equivalent states."""
 
		partition = self._find_equivalent_states()
 
		(rules, end_states) = self._collapse_states(partition)
 

	
 
		return RegexpDFA(rules, end_states, self.alphabet_index)
 

	
 
	def normalize(self) -> "RegexpDFA":
 
		"""Normalize the automaton by relabeling the states in a breadth first search walkthrough order and remove unreachable states."""
 
		n = len(self.alphabet_index)
 
		index = {0: 0}
 
		queue = deque([0])
 

	
 
		rules = []
 

	
 
		while queue:
 
			si = queue.popleft()
 
			row = self.rules[si*n:(si+1)*n]
 
			for sj in row:
 
				if sj not in index:
 
					index[sj] = len(index)
 
					queue.append(sj)
 
			rules.extend(index[sj] for sj in row)
 
		
 
		end_states = {index[si] for si in self.end_states if si in index}
 

	
 
		return RegexpDFA(rules, end_states, self.alphabet_index)
 

	
 
	def find_distinguishing_string(self, r: "RegexpDFA") -> Optional[str]:
 
		"""Find the shortest string that is accepted by self or `r`, but not both.
 
		
 
		:param r: the other automaton
 
		:return: the distinguishing string, or None if the automatons are equivalent"""
 
		r1 = self.reduce().normalize()
 
		r2 = r.reduce().normalize()
 

	
 
		if r1.rules == r2.rules and r1.end_states == r2.end_states:
 
			return None
 

	
 
		r1 = r1._expand_alphabet(r2.alphabet_index)
 
		r2 = r2._expand_alphabet(r1.alphabet_index)
 
		product = r1._build_product_automaton(r2)
 

	
 
		n = len(product.alphabet_index)
 
		# state: symbol
 
		inverse_alphabet_index = {v: k for (k, v) in product.alphabet_index.items()}
 
		queue = deque([(0, "")])
 
		visited = {0}
 
		while queue:
 
			(state, acc) = queue.popleft()
 
			if state in product.end_states:
 
				return acc
 
			for (i, target) in enumerate(product.rules[state*n:(state+1)*n]):
 
				if target not in visited:
 
					queue.append((target, acc+inverse_alphabet_index[i]))
 
					visited.add(target)
 
		
 
		assert False
 

	
 
	def _find_equivalent_states(self) -> list[set[int]]:
 
		"""Partition states into their equivalence classes with Hopcroft's algorithm.
 
		
 
		:return: sets of equivalent states. Unique states get single item sets."""
 
		n = len(self.alphabet_index)
 
		m = len(self.rules) // n
 
		inverse_rules = [set() for i in range(m*n)]
 

	
 
		# a transition rules inverse. target state + alphabet symbol -> source states
 
		for i in range(m):
 
			for j in range(n):
 
				target = self.rules[i*n + j]
 
				inverse_rules[target*n + j].add(i)
 

	
 
		set_bag = [self.end_states, set(range(m))-self.end_states]
 
		res = {0, 1}
 
		work = {0, 1}
 

	
 
		while work:
 
			key = work.pop()
 
			target_set = set_bag[key]
 
			for j in range(n):
 
				source_set = set(itertools.chain.from_iterable(inverse_rules[t*n + j] for t in target_set))
 
				for k in res.copy():
 
					part = set_bag[k]
 
					intersection = part & source_set
 
					diff = part - source_set
 
					if not intersection or not diff:
 
						continue
 
					res.remove(k)
 
					ki = len(set_bag)
 
					set_bag.append(intersection)
 
					res.add(ki)
 
					kd = len(set_bag)
 
					set_bag.append(diff)
 
					res.add(kd)
 
					if k in work:
 
						work.remove(k)
 
						work.add(ki)
 
						work.add(kd)
 
					elif len(intersection) < len(diff):
 
						work.add(ki)
 
					else:
 
						work.add(kd)
 
		
 
		return [set_bag[k] for k in res]
 
	
 
	def _collapse_states(self, partition: list[set[int]]) -> tuple[list[int], set[int]]:
 
		"""Collapse equivalent states from each class into a single one.
 
		
 
		:param partition: list of equivalence classes
 
		:return: rules list, accepting states"""
 
		n = len(self.alphabet_index)
 
		rules = []
 

	
 
		# states mapping due to the equivalents collapse
 
		eq_mapping = dict()
 
		for eq_set in partition:
 
			states = sorted(eq_set)
 
			for st in states:
 
				eq_mapping[st] = states[0]
 

	
 
		# states mapping to keep the rules list compact after the equivalents collapse
 
		discard_mapping = dict()
 
		discard_count = 0
 

	
 
		for i in range(0, len(self.rules), n):
 
			si = i//n
 
			if eq_mapping[si] != si:
 
				discard_count += 1
 
				continue
 
			discard_mapping[si] = si - discard_count
 
			rules.extend(map(lambda st: eq_mapping[st], self.rules[i:i+n]))
 
		
 
		rules = [discard_mapping[st] for st in rules]
 
		end_states = {discard_mapping[eq_mapping[st]] for st in self.end_states}
 
		
 
		return (rules, end_states)
 

	
 
	def _expand_alphabet(self, alphabet_index: dict[str, int]) -> "RegexpDFA":
 
		"""Expand the automaton to accommodate union of `self.alphabet_index` and the provided `alphabet_index`.
 
		
 
		:param alphabet_index: a mapping from letters to rules columns
 
		:return: a RegexpDFA over the unified alphabet"""
 
		if alphabet_index == self.alphabet_index:
 
			return self
 

	
 
		n1 = len(self.alphabet_index)
 
		m = len(self.rules) // n1
 

	
 
		combined_alphabet = set(self.alphabet_index.keys()) | set(alphabet_index.keys())
 
		# a new alphabet_index
 
		combined_index = {c: i for (i, c) in enumerate(sorted(combined_alphabet))}
 
		# a new letter key: the old letter key
 
		conversion_index = {combined_index[k]: v for (k, v) in self.alphabet_index.items()}
 
		n2 = len(combined_alphabet)
 

	
 
		# rewrite the rules into a new table, filling blanks with a new fail state
 
		rules = [
 
			self.rules[i*n1 + conversion_index[j]]
 
			if j in conversion_index else m
 
			for i in range(m) for j in range(n2)
 
		]
 
		rules.extend([m]*n2)
 

	
 
		return RegexpDFA(rules, self.end_states, combined_index).reduce().normalize()
 

	
 
	def _build_product_automaton(self, r: "RegexpDFA") -> "RegexpDFA":
 
		"""Create a new automaton, with a carthesian product of the arguments' states and corresponding transition rules.
 
		
 
		:param r: the other finite automaton. It must have the same `alphabet_index` as `self`"""
 
		assert self.alphabet_index == r.alphabet_index
 
		n = len(self.alphabet_index)
 
		m = len(r.rules) // n
 
		k = len(self.rules) // n
 

	
 
		rules = []
 
		end_states = set()
 

	
 
		# expand each self state into m new states, one for each of the r states
 
		for s1 in range(k):
 
			row1 = self.rules[s1*n:(s1+1)*n]
 
			for s2 in range(m):
 
				row2 = r.rules[s2*n:(s2+1)*n]
 
				rules.extend([x*m + y for (x, y) in zip(row1, row2)])
 
				if (s1 in self.end_states) != (s2 in r.end_states):
 
					end_states.add(s1*m + s2)
 

	
 
		return RegexpDFA(rules, end_states, self.alphabet_index).reduce().normalize()
 

	
 

	
 
def test():
 
	tests = ["", "a", "ab", "aabb", "abab", "abcd", "abcbcdbcd"]
 
	for pattern in ["", "a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*", "(a|b)*a(a|b)(a|b)(a|b)", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"]:
 
		print("#", pattern)
 
		try:
 
			r = RegexpDFA.create(pattern).reduce().normalize()
 
		except ParsingError as e:
 
			print("Failed to parse the regexp:")
 
			print(e)
 
			continue
 
		for t in tests:
 
			print(t, r.match(t))
 
		print()
 

	
 

	
 
if __name__ == "__main__":
 
	parser = argparse.ArgumentParser()
 
	subparsers = parser.add_subparsers()
 
	
 
	test_parser = subparsers.add_parser("test")
 
	test_parser.set_defaults(name="test")
 

	
 
	match_parser = subparsers.add_parser("match")
 
	match_parser.add_argument("pattern")
 
	match_parser.add_argument("string")
 
	match_parser.set_defaults(name="match")
 

	
 
	compare_parser = subparsers.add_parser("compare")
 
	compare_parser.add_argument("pattern1")
 
	compare_parser.add_argument("pattern2")
 
	compare_parser.set_defaults(name="compare")
 

	
 
	args = parser.parse_args()
 

	
 
	if args.name == "test":
 
		test()
 
	elif args.name == "match":
 
		try:
 
			r = RegexpDFA.create(args.pattern).reduce().normalize()
 
		except ParsingError as e:
 
			print("Failed to parse the regexp:")
 
			print(e)
 
		print(r.match(args.string))
 
	elif args.name == "compare":
 
		r1 = RegexpDFA.create(args.pattern1).reduce().normalize()
 
		r2 = RegexpDFA.create(args.pattern2).reduce().normalize()
 
		print(r1.find_distinguishing_string(r2))
 
	else:
 
		parser.print_help()
src/lib.rs
Show inline comments
 
mod regexp;
 
pub use regexp::{Regexp, ParsingError};
 
pub use regexp::{RegexpNFA, ParsingError};
src/main.rs
Show inline comments
 
use std::env;
 
use regexp::Regexp;
 
use regexp::RegexpNFA;
 

	
 
fn test() {
 
	let tests = ["", "a", "ab", "aabb", "abab", "abcd", "abcbcdbcd"];
 
	for pattern in ["a(b|c)", "a*b*", "(ab)*", "a((bc)*d)*", "(a|b)*a(a|b)(a|b)(a|b)", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"] {
 
		println!("# {pattern}");
 
		let r = match Regexp::new(&pattern.to_string()) {
 
		let r = match RegexpNFA::new(&pattern.to_string()) {
 
			Ok(r1) => r1.determinize().reduce().normalize(),
 
			Err(e) => {
 
				println!("{e}");
 
				continue;
 
			}
 
		};
 
		for &t in tests.iter() {
 
			println!("{t} {}", r.eval(t.to_string()));
 
		}
 
		println!();
 
	}
 
}
 

	
 
fn main() {
 
	let args: Vec<String> = env::args().collect();
 
	match args[1].as_str() {
 
		"test" => test(),
 
		"match" => {
 
			let r = match Regexp::new(&args[2].to_string()) {
 
			let r = match RegexpNFA::new(&args[2].to_string()) {
 
				Ok(r1) => r1.determinize().reduce().normalize(),
 
				Err(e) => {
 
					panic!("ERROR: {e}");
 
				}
 
			};
 
			println!("{}", r.eval(args[3].to_string()));
 
		},
 
		"compare" => {
 
			let r1 = match Regexp::new(&args[2].to_string()) {
 
			let r1 = match RegexpNFA::new(&args[2].to_string()) {
 
				Ok(r) => r.determinize().reduce().normalize(),
 
				Err(e) => {
 
					panic!("ERROR: {e}");
 
				}
 
			};
 
			let r2 = match Regexp::new(&args[3].to_string()) {
 
			let r2 = match RegexpNFA::new(&args[3].to_string()) {
 
				Ok(r) => r.determinize().reduce().normalize(),
 
				Err(e) => {
 
					panic!("ERROR: {e}");
 
				}
 
			};
 
			println!("{}", r1.find_distinguishing_string(&r2).unwrap_or("None".to_string()));
 
		},
 
		s => {
 
			println!("An unknown command: \"{s}\". Use \"match\" or \"test\".")
 
			println!("An unknown command: \"{s}\". Use \"match\", \"compare\" or \"test\".")
 
		}
 
	}
 
}
src/regexp.rs
Show inline comments
 
use std::{collections::{HashMap, HashSet, VecDeque}, iter};
 

	
 
mod token;
 
pub use token::ParsingError;
 
use token::parse;
 

	
 
const START_NFA: usize = usize::MAX;
 
const START_DFA: usize = 0;
 

	
 
/// A regular expression implemented as a non-deterministic finite automaton.
 
#[derive(Debug)]
 
pub struct Regexp {
 
pub struct RegexpNFA {
 
	rules: HashMap<(usize, char), HashSet<usize>>,
 
	end_states: HashSet<usize>,
 
	alphabet: Vec<char>
 
}
 

	
 
impl Regexp {
 
	pub fn new(pattern: &String) -> Result<Regexp, ParsingError> {
 
impl RegexpNFA {
 
	pub fn new(pattern: &String) -> Result<RegexpNFA, ParsingError> {
 
		let r = parse(pattern, 0)?;
 
		let pattern_chars = Vec::from_iter(pattern.chars());
 
		let mut rules: HashMap<(usize, char), HashSet<usize>> = HashMap::new();
 
		let mut alphabet: HashSet<char> = HashSet::new();
 
		
 
		for i in r.list_first() {
 
			let c = pattern_chars[i];
 
			alphabet.insert(c);
 
			let key = (START_NFA, c);
 
			match rules.get_mut(&key) {
 
				Some(set) => {set.insert(i);},
 
				None => {rules.insert(key, HashSet::from([i]));}
 
			};
 
		}
 

	
 
		for (i, j) in r.list_neighbours() {
 
			let c = pattern_chars[j];
 
			alphabet.insert(c);
 
			let key = (i, c);
 
			match rules.get_mut(&key) {
 
				Some(set) => {set.insert(j);},
 
				None => {rules.insert(key, HashSet::from([j]));}
 
			};
 
		}
 

	
 
		let mut end_states = HashSet::from_iter(r.list_last().into_iter());
 
		if r.is_skippable() {
 
			end_states.insert(START_NFA);
 
		}
 

	
 
		let mut alphabet_vec = Vec::from_iter(alphabet.into_iter());
 
		alphabet_vec.sort();
 

	
 
		return Ok(Regexp{rules, end_states, alphabet: alphabet_vec});
 
		return Ok(RegexpNFA{rules, end_states, alphabet: alphabet_vec});
 
	}
 

	
 
	/// Decide if a string matches the regexp.
 
	pub fn eval(&self, s: String) -> bool {
 
		let mut multistate = HashSet::from([START_NFA]);
 

	
 
		for c in s.chars() {
 
			let mut new_multistate = HashSet::new();
 

	
 
			for state in multistate {
 
				if let Some(x) = self.rules.get(&(state, c)) {
 
					new_multistate = new_multistate.union(&x).map(|&y| y).collect();
 
				} else if let Some(x) = self.rules.get(&(state, '.')) {
 
					new_multistate = new_multistate.union(&x).map(|&y| y).collect();
 
				}
 
			}
 
			multistate = new_multistate;
 
		}
 

	
 
		return multistate.iter().any(|x| self.end_states.contains(x));
 
	}
 

	
 
	/// Convert the non-deterministic finite automaton into a deterministic one.
 
	pub fn determinize(&self) -> RegexpDFA {
 
		const FAIL: usize = usize::MAX;
 
		let alphabet_index: HashMap<char, usize> = self.alphabet.iter().enumerate().map(|(i, c)| (*c, i)).collect();
 
		let n = alphabet_index.len();
 
		let mut compact_rules = vec![FAIL; n];
 
		let mut end_states: HashSet<usize> = HashSet::new();
 
		if self.end_states.contains(&START_NFA) {end_states.insert(START_DFA);}
 

	
 
		// mapping the NFA state subsets -> DFA states
 
		let mut index = HashMap::from([(vec![START_NFA], START_DFA)]);
 
		let mut stack = vec![vec![START_NFA]];
 

	
 
		while !stack.is_empty() {
 
			let multistate = stack.pop().unwrap();
 
			let mut new_rules: HashMap<char, HashSet<usize>> = HashMap::new();
 

	
 
			// collect all possible destination states from the multistate
 
			for key in self.rules.keys().filter(|(st, _c)| multistate.binary_search(st).is_ok()) {
 
				let (_st, c) = key;
 
				if !new_rules.contains_key(c) {
 
					new_rules.insert(*c, HashSet::new());
 
				}
 
				for target in &self.rules[key] {
 
					new_rules.get_mut(c).unwrap().insert(*target);
 
				}
 
			}
 

	
 
			// build a row for the DFA transition function table
 
			for (c, target_set) in new_rules.into_iter() {
 
				let mut target_vec = Vec::from_iter(target_set.into_iter());
 
				target_vec.sort();
 
				let is_end = target_vec.iter().any(|st| self.end_states.contains(st));
 
				if !index.contains_key(&target_vec) {
 
					let target_new = index.len();
 
					index.insert(target_vec.clone(), target_new);
 
					compact_rules.extend(iter::repeat(FAIL).take(n));
 
					stack.push(target_vec.clone());
 
				}
 
				compact_rules[index[&multistate]*n + alphabet_index[&c]] = index[&target_vec];
 
				if is_end {
 
					end_states.insert(index[&target_vec]);
 
				}
 
			}
 
		}
 

	
 
		// add a fail state, so the transition function is complete
 
		let fail = index.len();
 
		compact_rules = compact_rules.into_iter().map(|st| if st != FAIL {st} else {fail}).collect();
 
		compact_rules.extend(iter::repeat(fail).take(n));
 
		
 
		return RegexpDFA::new(compact_rules, end_states, alphabet_index);
 
	}
 
}
 

	
 
/// A regular expression implemented as a deterministic finite automaton.
 
/// This simplifies support for more features compared to the NFA Regexp.
 
#[derive(Clone)]
 
pub struct RegexpDFA {
 
	rules: Vec<usize>,
 
	end_states: HashSet<usize>,
 
	alphabet_index: HashMap<char, usize>
 
}
 

	
 
impl RegexpDFA {
 
	/// Construct a DFA with the provided parameters, or a minimal DFA if the parameters are empty.
 
	pub fn new(rules: Vec<usize>, end_states: HashSet<usize>, alphabet_index: HashMap<char, usize>) -> RegexpDFA {
 
		if rules.len() > 0 {
 
			return RegexpDFA{rules, end_states, alphabet_index};
 
		} else {
 
			// this saves us checking for an empty `alphabet_index` in other methods.
 
			return RegexpDFA{
 
				rules: vec![1, 1],
 
				end_states,
 
				alphabet_index: HashMap::from([('\0', 0)])
 
			};
 
		}
 
	}
 

	
 
	/// Decide if a string matches the regexp.
 
	pub fn eval(&self, s: String) -> bool {
 
		let n = self.alphabet_index.len();
 
		let mut state = START_DFA;
 

	
 
		for c in s.chars() {
 
			if let Some(ci) = self.alphabet_index.get(&c) {
 
				state = self.rules[state*n + ci];
 
			} else {
 
				return false;
 
			}
 
		}
 

	
 
		return self.end_states.contains(&state);
 
	}
 

	
 
	/// Minimize the automaton by collapsing equivalent states.
 
	pub fn reduce(&self) -> RegexpDFA {
 
		let partition = self.find_equivalent_states();
 
		return self.collapse_states(partition);
 
	}
 

	
 
	/// Normalize the automaton by relabeling the states in a breadth first search walkthrough order and remove unreachable states.
 
	pub fn normalize(&self) -> RegexpDFA {
 
		let n = self.alphabet_index.len();
 
		let m = self.rules.len()/n;
 
		let fail = m;
 
		let mut index: Vec<usize> = vec![fail;m];
 
		index[0] = 0;
 
		let mut queue = VecDeque::from([START_DFA]);
 

	
 
		let mut rules = vec![];
 
		let mut k = 1;
 

	
 
		while !queue.is_empty() {
 
			let si = queue.pop_front().unwrap();
 
			let row = &self.rules[si*n..(si+1)*n];
 
			for &sj in row {
 
				if sj != fail && index[sj] == fail {
 
					index[sj] = k;
 
					k += 1;
 
					queue.push_back(sj);
 
				}
 
			}
 
			rules.extend(row.iter().map(|&st| index[st]));
 
		}
 

	
 
		let end_states = self.end_states.iter().map(|st| index[*st]).collect();
 
		
 
		return RegexpDFA{rules, end_states, alphabet_index: self.alphabet_index.clone()};
 
	}
 

	
 
	/// Find the shortest string that is accepted by self or `r`, but not both.
 
	/// It is expected that the automatons are already reduced and normalized.
 
	pub fn find_distinguishing_string(&self, other: &RegexpDFA) -> Option<String> {
 
		if self.rules == other.rules && self.end_states == other.end_states {
 
			return None;
 
		}
 

	
 
		let r1 = self.expand_alphabet(&other.alphabet_index);
 
		let r2 = other.expand_alphabet(&self.alphabet_index);
 
		let product = r1.build_product_automaton(&r2);
 
		let n = product.alphabet_index.len();
 
		let reverse_alphabet_index: HashMap<usize, char> = HashMap::from_iter(product.alphabet_index.iter().map(|(&k, &v)| (v, k)));
 
		// mapping letter keys -> letters
 
		let inverse_alphabet_index: HashMap<usize, char> = HashMap::from_iter(product.alphabet_index.iter().map(|(&k, &v)| (v, k)));
 

	
 
		// look for an accepting state with a BFS
 
		let mut queue = VecDeque::from([(0, "".to_string())]);
 
		let mut visited = HashSet::new();
 
		while !queue.is_empty() {
 
			let (state, acc) = queue.pop_front().unwrap();
 
			if product.end_states.contains(&state) {
 
				return Some(acc);
 
			}
 
			for (i, target) in product.rules[state*n..(state+1)*n].iter().enumerate() {
 
				if !visited.contains(target) {
 
					queue.push_back((*target, acc.clone()+&String::from(reverse_alphabet_index[&i])));
 
					queue.push_back((*target, acc.clone()+&String::from(inverse_alphabet_index[&i])));
 
					visited.insert(target);
 
				}
 
			}
 
		}
 

	
 
		panic!();
 
		panic!("Failed to detect the Regexps as equivalent and failed to find a distinguishing string.");
 
	}
 

	
 
	/// Partition states into their equivalence classes with Hopcroft's algorithm.
 
	fn find_equivalent_states(&self) -> Vec<HashSet<usize>> {
 
		let n = self.alphabet_index.len();
 
		let m = self.rules.len() / n;
 
		let mut inverse_rules = vec![HashSet::<usize>::new();m*n];
 

	
 
		for i in 0..m {
 
			for j in 0..n {
 
				let target = self.rules[i*n + j];
 
				inverse_rules[target*n + j].insert(i);
 
			}
 
		}
 

	
 
		// store state subsets here so we can just pass around their indices
 
		let mut set_bag = vec![
 
			self.end_states.clone(),
 
			HashSet::from_iter(0..m).difference(&self.end_states).copied().collect()
 
		];
 
		let mut res = HashSet::<usize>::from([0, 1]);
 
		let mut work = HashSet::<usize>::from([0, 1]);
 

	
 
		while !work.is_empty() {
 
			let key = *work.iter().next().unwrap();
 
			work.remove(&key);
 
			let target_set = set_bag[key].clone();
 
			for j in 0..n {
 
				let source_set = HashSet::<usize>::from_iter(target_set.iter().flat_map(|&t| inverse_rules[t*n+j].iter()).copied());
 
				for k in res.clone().into_iter() {
 
					let part = &set_bag[k];
 
					let intersection = part.intersection(&source_set).copied().collect::<HashSet<usize>>();
 
					let diff = part.difference(&source_set).copied().collect::<HashSet<usize>>();
 
					if intersection.is_empty() || diff.is_empty() {
 
						continue;
 
					}
 
					res.remove(&k);
 
					let ki = set_bag.len();
 
					set_bag.push(intersection);
 
					res.insert(ki);
 
					let kd = set_bag.len();
 
					set_bag.push(diff);
 
					res.insert(kd);
 
					if work.contains(&k) {
 
						work.remove(&k);
 
						work.insert(ki);
 
						work.insert(kd);
 
					} else if set_bag[ki].len() < set_bag[kd].len() {
 
						work.insert(ki);
 
					} else {
 
						work.insert(kd);
 
					}
 
				}
 
			}
 
		}
 

	
 
		return Vec::from_iter(res.into_iter().map(|k| std::mem::take(&mut set_bag[k])));
 
	}
 

	
 
	/// Collapse equivalent states from each partition class into a single state.
 
	fn collapse_states(&self, partition: Vec<HashSet<usize>>) -> RegexpDFA {
 
		let n = self.alphabet_index.len();
 
		let m = self.rules.len()/n;
 
		let mut rules = vec![];
 

	
 
		// states mapping due to the equivalents collapse
 
		let mut eq_mapping: Vec<usize> = ((0..m)).collect();
 
		for eq_set in partition.into_iter() {
 
			let mut eq_vec = Vec::from_iter(eq_set.into_iter());
 
			eq_vec.sort();
 
			let label = eq_vec[0];
 
			for st in eq_vec.into_iter() {
 
				eq_mapping[st] = label;
 
			}
 
		}
 

	
 
		// states mapping to keep the rules list compact after the equivalents collapse
 
		let mut discard_mapping: Vec<usize> = ((0..m)).collect();
 
		let mut discard_count = 0;
 

	
 
		for si in 0..m {
 
			if eq_mapping[si] != si {
 
				discard_count += 1;
 
				continue;
 
			}
 
			discard_mapping[si] = si-discard_count;
 
			rules.extend(self.rules[si*n..(si+1)*n].iter().map(|&st| eq_mapping[st]));
 
		}
 

	
 
		rules = rules.into_iter().map(|st| discard_mapping[st]).collect();
 
		let end_states = self.end_states.iter().map(|st| discard_mapping[eq_mapping[*st]]).collect();
 

	
 
		return RegexpDFA{rules, end_states, alphabet_index: self.alphabet_index.clone()};
 
	}
 

	
 
	/// Expand the automaton to accommodate union of `self.alphabet_index` and the provided `alphabet_index`.
 
	fn expand_alphabet(&self, alphabet_index: &HashMap<char, usize>) -> RegexpDFA {
 
		if *alphabet_index == self.alphabet_index {
 
			return self.clone();
 
		}
 

	
 
		let n1 = self.alphabet_index.len();
 
		let m = self.rules.len() / n1;
 

	
 
		let combined_alphabet: HashSet<char> = HashSet::from_iter(self.alphabet_index.keys().chain(alphabet_index.keys()).copied());
 
		let mut combined_vec = Vec::from_iter(combined_alphabet.into_iter());
 
		combined_vec.sort();
 
		// a new alphabet_index
 
		let combined_index = HashMap::from_iter(combined_vec.iter().enumerate().map(|(i, c)| (*c, i)));
 
		// a new letter key -> the old letter key
 
		let conversion_index: HashMap<usize, usize> = HashMap::from_iter(self.alphabet_index.iter().map(|(k, v)| (combined_index[k], *v)));
 
		let n2 = combined_vec.len();
 

	
 
		// rewrite the rules into a new table, filling blanks with a new fail state
 
		let rules: Vec<usize> = (0..m*n2).map(
 
			|i| {
 
				let (j, k) = (i/n2, i%n2);
 
				return if conversion_index.contains_key(&k) {
 
					self.rules[j*n1 + conversion_index[&k]]
 
				} else {m};
 
			}
 
		).chain(std::iter::repeat(m).take(n2)).collect();
 

	
 
		return RegexpDFA{rules, end_states: self.end_states.clone(), alphabet_index: combined_index}.reduce().normalize();
 
	}
 

	
 
	/// Create a new automaton, with a carthesian product of the arguments' states and corresponding transition rules.
 
	fn build_product_automaton(&self, other: &RegexpDFA) -> RegexpDFA {
 
		let n = self.alphabet_index.len();
 
		let m = other.rules.len() / n;
 
		let k = self.rules.len() / n;
 

	
 
		let mut rules = vec![];
 
		let mut end_states = HashSet::new();
 

	
 
		// expand each self state into m new states, one for each of the `other` states
 
		for s1 in 0..k {
 
			let row1 = &self.rules[s1*n..(s1+1)*n];
 
			for s2 in 0..m {
 
				let row2 = &other.rules[s2*n..(s2+1)*n];
 
				rules.extend(row1.iter().zip(row2.iter()).map(|(x, y)| x*m + y));
 
				if (self.end_states.contains(&s1)) ^ (other.end_states.contains(&s2)) {
 
					end_states.insert(s1*m + s2);
 
				}
 
			}
 
		}
 

	
 
		return RegexpDFA{rules, end_states, alphabet_index: self.alphabet_index.clone()}.reduce().normalize();
 
	}
 
}
src/regexp/token.rs
Show inline comments
 
use std::{borrow::Borrow, fmt};
 

	
 
#[derive(Debug, Clone)]
 
pub enum ParsingError {
 
	Asterisk {s: String, pos: usize},
 
	OpeningParenthesis {s: String, pos: usize},
 
	ClosingParenthesis {s: String, pos: usize},
 
	EmptyAlternativeVariant
 
}
 

	
 
impl fmt::Display for ParsingError {
 
	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
 
		match self {
 
			ParsingError::Asterisk {s, pos} => {
 
				write!(f, "The asterisk operator is missing an argument. Pattern \"{s}\", position {pos}")
 
			},
 
			ParsingError::OpeningParenthesis {s, pos} => {
 
				write!(f, "An opening parenthesis not found. Pattern \"{s}\", position {pos}")
 
			},
 
			ParsingError::ClosingParenthesis {s, pos} => {
 
				write!(f, "An closing parenthesis not found. Pattern \"{s}\", position {pos}")
 
				write!(f, "A closing parenthesis not found. Pattern \"{s}\", position {pos}")
 
			},
 
			ParsingError::EmptyAlternativeVariant => {
 
				write!(f, "Found an empty Alternative variant.")
 
			}
 
		}
 
	}
 
}
 

	
 
/// A single letter or other alphabet symbol.
 
pub struct Symbol {
 
	/// Symbol position in the regular expression.
 
	position: usize
 
}
 

	
 
/// A unary operator specifying its content to occur zero or more times.
 
pub struct Asterisk {
 
	content: Box<Token>
 
}
 

	
 
/// An operator with a variable number of arguments, specifying exchangeable alternatives.
 
pub struct Alternative {
 
	content: Vec<Box<Token>>
 
}
 

	
 
/// An operator expressing a concatenation of its content Tokens.
 
pub struct Chain {
 
	content: Vec<Box<Token>>
 
}
 

	
 
/// Enum encapsulating possible items of a regular expression.
 
pub enum Token {
 
	Lambda,
 
	Lambda, // An empty string, useful as an `Alternative`.
 
	Symbol(Symbol),
 
	Asterisk(Asterisk),
 
	Alternative(Alternative),
 
	// A special token to temporarily separate Alternative variants. Removed in the Alternative constructor.
 
	AlternativeSeparator,
 
	Chain(Chain)
 
}
 

	
 
impl Symbol {
 
	fn list_first(&self) -> Vec<usize> {
 
		return vec![self.position];
 
	}
 

	
 
	fn list_last(&self) -> Vec<usize> {
 
		return vec![self.position];
 
	}
 

	
 
	fn list_neighbours(&self) -> Vec<(usize, usize)> {
 
		return vec![];
 
	}
 
}
 

	
 
impl Asterisk {
 
	fn list_first(&self) -> Vec<usize> {
 
		return self.content.list_first();
 
	}
 

	
 
	fn list_last(&self) -> Vec<usize> {
 
		return self.content.list_last();
 
	}
 

	
 
	fn list_neighbours(&self) -> Vec<(usize, usize)> {
 
		let mut res = self.content.list_neighbours();
 

	
 
		for x in self.list_last() {
 
			for y in self.list_first() {
 
				res.push((x, y));
 
			}
 
		}
 

	
 
		return res;
 
	}
 
}
 

	
 
impl Alternative {
 
	/// Split a sequence of `Tokens` by `AlternativeSeparator` into alternative variants and return the result.
 
	/// If any variant is empty, return an `Err``.
 
	fn new(content: Vec<Box<Token>>) -> Result<Alternative, ParsingError> {
 
		let mut variants: Vec<Vec<Box<Token>>> = vec![Vec::new()];
 

	
 
		content.into_iter().for_each(|x| {
 
			if matches!(x.borrow(), Token::AlternativeSeparator) {
 
				variants.push(Vec::new());
 
			} else {
 
				variants.last_mut().unwrap().push(x);
 
			}
 
		});
 

	
 
		if variants.iter().any(|x| x.is_empty()) {
 
			return Err(ParsingError::EmptyAlternativeVariant);
 
		}
 

	
 
		return Ok(Alternative{
 
			content: variants.into_iter().map(
 
				|x| Box::new(Token::Chain(Chain{content: x}))
 
			).collect()
 
		});
 
	}
 

	
 
	fn is_skippable(&self) -> bool {
 
			return self.content.iter().any(|x| x.is_skippable());
 
	}
 

	
 
	fn list_first(&self) -> Vec<usize> {
 
		let mut res = Vec::new();
 
		for token in self.content.iter() {
 
			res.append(&mut token.list_first());
 
		}
 

	
 
		return res;
 
	}
 

	
 
	fn list_last(&self) -> Vec<usize> {
 
		let mut res = Vec::new();
 
		for token in self.content.iter() {
 
			res.append(&mut token.list_last());
 
		}
 

	
 
		return res;
 
	}
 

	
 
	fn list_neighbours(&self) -> Vec<(usize, usize)> {
 
		let mut res = Vec::new();
 
		for token in self.content.iter() {
 
			res.append(&mut token.list_neighbours());
 
		}
 

	
 
		return res;
 
	}
 
}
 

	
 
impl Chain {
 
	fn is_skippable(&self) -> bool {
 
		return self.content.iter().all(|x| x.is_skippable());
 
	}
 

	
 
	fn list_first(&self) -> Vec<usize> {
 
		let mut res = Vec::new();
 
		for token in self.content.iter() {
 
			res.append(&mut token.list_first());
 
			if !token.is_skippable() {break;}
 
		}
 

	
 
		return res;
 
	}
 

	
 
	fn list_last(&self) -> Vec<usize> {
 
		let mut res = Vec::new();
 
		for token in self.content.iter().rev() {
 
			res.append(&mut token.list_last());
 
			if !token.is_skippable() {break;}
 
		}
 

	
 
		return res;
 
	}
 

	
 
	fn list_neighbours(&self) -> Vec<(usize, usize)> {
 
		let mut res = Vec::new();
 
		let mut previous: Vec<&Box<Token>> = Vec::new();
 
		for token in self.content.iter() {
 
			for t in previous.iter() {
 
				for x in t.list_last() {
 
					for y in token.list_first() {
 
						res.push((x, y));
 
					}
 
				}
 
			}
 
			res.append(&mut token.list_neighbours());
 

	
 
			if token.is_skippable() {
 
				previous.push(token);
 
			} else {
 
				previous = vec![token];
 
			}
 
		}
 

	
 
		return res;
 
	}
 
}
 

	
 
impl Token {
 
	/// Decide whether the `Token` has to contain some `Symbols`,
 
	/// or whether it can be stepped over when looking for first, last and neighbouring items.
 
	pub fn is_skippable(&self) -> bool {
 
		match self {
 
			Token::Lambda => true,
 
			Token::Symbol(_) => false,
 
			Token::Asterisk(_) => true,
 
			Token::Alternative(t) => t.is_skippable(),
 
			Token::AlternativeSeparator => panic!(),
 
			Token::AlternativeSeparator => panic!("Separators must be already removed at this stage"),
 
			Token::Chain(t) => t.is_skippable()
 
		}
 
	}
 

	
 
	/// List all possible string positions the token can start with.
 
	pub fn list_first(&self) -> Vec<usize> {
 
		match self {
 
			Token::Lambda => vec![],
 
			Token::Symbol(t) => t.list_first(),
 
			Token::Asterisk(t) => t.list_first(),
 
			Token::Alternative(t) => t.list_first(),
 
			Token::AlternativeSeparator => panic!(),
 
			Token::AlternativeSeparator => panic!("Separators must be already removed at this stage"),
 
			Token::Chain(t) => t.list_first()
 
		}
 
	}
 

	
 
	/// List all possible string positions the token can end with.
 
	pub fn list_last(&self) -> Vec<usize> {
 
		match self {
 
			Token::Lambda => vec![],
 
			Token::Symbol(t) => t.list_last(),
 
			Token::Asterisk(t) => t.list_last(),
 
			Token::Alternative(t) => t.list_last(),
 
			Token::AlternativeSeparator => panic!(),
 
			Token::AlternativeSeparator => panic!("Separators must be already removed at this stage"),
 
			Token::Chain(t) => t.list_last()
 
		}
 
	}
 

	
 
	/// List positions of all possibly neighbouring subtokens.
 
	pub fn list_neighbours(&self) -> Vec<(usize, usize)> {
 
		match self {
 
			Token::Lambda => vec![],
 
			Token::Symbol(t) => t.list_neighbours(),
 
			Token::Asterisk(t) => t.list_neighbours(),
 
			Token::Alternative(t) => t.list_neighbours(),
 
			Token::AlternativeSeparator => panic!(),
 
			Token::AlternativeSeparator => panic!("Separators must be already removed at this stage"),
 
			Token::Chain(t) => t.list_neighbours()
 
		}
 
	}
 
}
 

	
 
/// For a string starting with a parenthesis, find its matching closing parenthesis, or return None.
 
fn find_closing_parenthesis(s: &String) -> Option<usize> {
 
	let chars: Vec<char> = s.chars().collect();
 
	let mut counter = 0;
 

	
 
	for (i, c) in chars.iter().enumerate() {
 
		if *c == '(' {counter += 1;}
 
		else if *c == ')' {counter -= 1;}
 
		if counter == 0 {return Some(i);}
 
	}
 

	
 
	return None;
 
}
 

	
 
/// Recursively parse the pattern into a `Token` tree.
 
/// 
 
/// The `offset` defines where the `pattern` starts relative to the original pattern,
 
/// to record correct global token positions in the subcalls
 
pub fn parse(pattern: &String, offset: usize) -> Result<Token, ParsingError> {
 
	let chars: Vec<char> = pattern.chars().collect();
 
	let mut res: Vec<Box<Token>> = Vec::new();
 
	let mut is_alternative = false;
 
	let mut i = 0;
 
	while i < pattern.len() {
 
		let c = chars[i];
 
		match c {
 
			'(' => {
 
				let j = find_closing_parenthesis(&pattern[i..].to_string()).ok_or(ParsingError::ClosingParenthesis {s: pattern.clone(), pos: i})? + i;
 
				let inner_content = parse(&pattern[i+1..j].to_string(), offset+i+1)?;
 
				res.push(Box::new(inner_content));
 
				i = j+1;
 
			}
 
			'*' => {
 
				let token = res.pop().ok_or(ParsingError::Asterisk{s: pattern.clone(), pos: i})?;
 
				res.push(Box::new(Token::Asterisk(Asterisk{content: token})));
 
				i += 1;
 
			}
 
			')' => {
 
				return Err(ParsingError::OpeningParenthesis {s: pattern.clone(), pos: i});
 
			}
 
			'|' | '+' => {
 
				is_alternative = true;
 
				res.push(Box::new(Token::AlternativeSeparator));
 
				i += 1;
 
			}
 
			'_' => {
 
				res.push(Box::new(Token::Lambda));
 
				i += 1;
 
			}
 
			_c => {
 
				res.push(Box::new(Token::Symbol(Symbol{position: i+offset})));
 
				i += 1;
 
			}
 
		}
 
	}
 

	
 
	if is_alternative {
 
		return Ok(Token::Alternative(Alternative::new(res)?));
 
	} else {
 
		return Ok(Token::Chain(Chain{content: res}));
 
	}
 
}
 

	
 
mod test {
 
	use super::*;
 

	
 
	#[test]
 
	fn test_closing_parenthesis() {
 
		let s = "()";
 
		assert_eq!(find_closing_parenthesis(&s.to_string()), Some(1));
 
	}
 
}
tests/test_regexp.rs
Show inline comments
 
use regexp::Regexp;
 
use regexp::RegexpNFA;
 
use regexp::ParsingError;
 

	
 
#[test]
 
fn test_eval_basic_nfa() {
 
	let r = Regexp::new(&String::from("abc")).unwrap();
 
	let r = RegexpNFA::new(&String::from("abc")).unwrap();
 
	assert!(r.eval(String::from("abc")));
 
	assert!(!r.eval(String::from("ab")));
 
	assert!(!r.eval(String::from("abcd")));
 
}
 

	
 
#[test]
 
fn test_eval_basic_dfa() {
 
	let r = Regexp::new(&String::from("abc")).unwrap().determinize().reduce().normalize();
 
	let r = RegexpNFA::new(&String::from("abc")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("abc")));
 
	assert!(!r.eval(String::from("ab")));
 
	assert!(!r.eval(String::from("abcd")));
 
}
 

	
 
#[test]
 
fn test_eval_empty_nfa() {
 
	assert!(Regexp::new(&String::from("a*")).unwrap().eval(String::from("")));
 
	assert!(Regexp::new(&String::from("")).unwrap().eval(String::from("")));
 
	assert!(!Regexp::new(&String::from("")).unwrap().eval(String::from("a")));
 
	assert!(!Regexp::new(&String::from("a")).unwrap().eval(String::from("")));
 
	assert!(RegexpNFA::new(&String::from("a*")).unwrap().eval(String::from("")));
 
	assert!(RegexpNFA::new(&String::from("")).unwrap().eval(String::from("")));
 
	assert!(!RegexpNFA::new(&String::from("")).unwrap().eval(String::from("a")));
 
	assert!(!RegexpNFA::new(&String::from("a")).unwrap().eval(String::from("")));
 
}
 

	
 
#[test]
 
fn test_eval_empty_dfa() {
 
	assert!(Regexp::new(&String::from("a*")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(Regexp::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(!Regexp::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("a")));
 
	assert!(RegexpNFA::new(&String::from("a*")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(RegexpNFA::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("")));
 
	assert!(!RegexpNFA::new(&String::from("")).unwrap().determinize().reduce().normalize().eval(String::from("a")));
 
}
 

	
 
#[test]
 
fn test_eval_asterisk_nfa() {
 
	let r = Regexp::new(&String::from("a*b*")).unwrap();
 
	let r = RegexpNFA::new(&String::from("a*b*")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("ab")));
 
	assert!(r.eval(String::from("aabb")));
 
	assert!(!r.eval(String::from("abab")));
 
}
 

	
 
#[test]
 
fn test_eval_asterisk_dfa() {
 
	let r = Regexp::new(&String::from("a*b*")).unwrap().determinize().normalize().reduce();
 
	let r = RegexpNFA::new(&String::from("a*b*")).unwrap().determinize().normalize().reduce();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("ab")));
 
	assert!(r.eval(String::from("aabb")));
 
	assert!(!r.eval(String::from("abab")));
 
}
 

	
 
#[test]
 
fn test_eval_alternative_nfa() {
 
	let r = Regexp::new(&String::from("a|b|c")).unwrap();
 
	let r = RegexpNFA::new(&String::from("a|b|c")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("b")));
 
	assert!(r.eval(String::from("c")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 
}
 

	
 
#[test]
 
fn test_eval_alternative_dfa() {
 
	let r = Regexp::new(&String::from("a|b|c")).unwrap().determinize().reduce().normalize();
 
	let r = RegexpNFA::new(&String::from("a|b|c")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("b")));
 
	assert!(r.eval(String::from("c")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 
}
 

	
 
#[test]
 
fn test_eval_lambda_nfa() {
 
	let r = Regexp::new(&String::from("a_")).unwrap();
 
	let r = RegexpNFA::new(&String::from("a_")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 

	
 
	let r = Regexp::new(&String::from("a|_")).unwrap();
 
	let r = RegexpNFA::new(&String::from("a|_")).unwrap();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("")));
 
	assert!(!r.eval(String::from("b")));
 
}
 

	
 
#[test]
 
fn test_eval_lambda_dfa() {
 
	let r = Regexp::new(&String::from("a_")).unwrap().determinize().reduce().normalize();
 
	let r = RegexpNFA::new(&String::from("a_")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(!r.eval(String::from("")));
 
	assert!(!r.eval(String::from("ab")));
 

	
 
	let r = Regexp::new(&String::from("a|_")).unwrap().determinize().reduce().normalize();
 
	let r = RegexpNFA::new(&String::from("a|_")).unwrap().determinize().reduce().normalize();
 
	assert!(r.eval(String::from("a")));
 
	assert!(r.eval(String::from("")));
 
	assert!(!r.eval(String::from("b")));
 
}
 

	
 
#[test]
 
fn test_invalid_asterisk() {
 
	let x = Regexp::new(&String::from("*"));
 
	let x = RegexpNFA::new(&String::from("*"));
 
	assert!(matches!(x, Err(ParsingError::Asterisk{s: _, pos: 0})));
 
}
 

	
 
#[test]
 
fn test_invalid_closing_parenthesis() {
 
	let x = Regexp::new(&String::from("(a"));
 
	let x = RegexpNFA::new(&String::from("(a"));
 
	assert!(matches!(x, Err(ParsingError::ClosingParenthesis{s: _, pos: 0})));
 
}
 

	
 
#[test]
 
fn test_invalid_opening_parenthesis() {
 
	let x = Regexp::new(&String::from("a)"));
 
	let x = RegexpNFA::new(&String::from("a)"));
 
	assert!(matches!(x, Err(ParsingError::OpeningParenthesis{s: _, pos: 1})));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_start() {
 
	let x = Regexp::new(&String::from("a(|b)"));
 
	let x = RegexpNFA::new(&String::from("a(|b)"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_end() {
 
	let x = Regexp::new(&String::from("a|"));
 
	let x = RegexpNFA::new(&String::from("a|"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
 

	
 
#[test]
 
fn test_invalid_empty_variant_middle() {
 
	let x = Regexp::new(&String::from("a||b"));
 
	let x = RegexpNFA::new(&String::from("a||b"));
 
	assert!(matches!(x, Err(ParsingError::EmptyAlternativeVariant)));
 
}
0 comments (0 inline, 0 general)