rewritingsystem.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. from sympy.combinatorics.rewritingsystem_fsm import StateMachine
  2. class RewritingSystem:
  3. '''
  4. A class implementing rewriting systems for `FpGroup`s.
  5. References
  6. ==========
  7. .. [1] Epstein, D., Holt, D. and Rees, S. (1991).
  8. The use of Knuth-Bendix methods to solve the word problem in automatic groups.
  9. Journal of Symbolic Computation, 12(4-5), pp.397-414.
  10. .. [2] GAP's Manual on its KBMAG package
  11. https://www.gap-system.org/Manuals/pkg/kbmag-1.5.3/doc/manual.pdf
  12. '''
  13. def __init__(self, group):
  14. from collections import deque
  15. self.group = group
  16. self.alphabet = group.generators
  17. self._is_confluent = None
  18. # these values are taken from [2]
  19. self.maxeqns = 32767 # max rules
  20. self.tidyint = 100 # rules before tidying
  21. # _max_exceeded is True if maxeqns is exceeded
  22. # at any point
  23. self._max_exceeded = False
  24. # Reduction automaton
  25. self.reduction_automaton = None
  26. self._new_rules = {}
  27. # dictionary of reductions
  28. self.rules = {}
  29. self.rules_cache = deque([], 50)
  30. self._init_rules()
  31. # All the transition symbols in the automaton
  32. generators = list(self.alphabet)
  33. generators += [gen**-1 for gen in generators]
  34. # Create a finite state machine as an instance of the StateMachine object
  35. self.reduction_automaton = StateMachine('Reduction automaton for '+ repr(self.group), generators)
  36. self.construct_automaton()
  37. def set_max(self, n):
  38. '''
  39. Set the maximum number of rules that can be defined
  40. '''
  41. if n > self.maxeqns:
  42. self._max_exceeded = False
  43. self.maxeqns = n
  44. return
  45. @property
  46. def is_confluent(self):
  47. '''
  48. Return `True` if the system is confluent
  49. '''
  50. if self._is_confluent is None:
  51. self._is_confluent = self._check_confluence()
  52. return self._is_confluent
  53. def _init_rules(self):
  54. identity = self.group.free_group.identity
  55. for r in self.group.relators:
  56. self.add_rule(r, identity)
  57. self._remove_redundancies()
  58. return
  59. def _add_rule(self, r1, r2):
  60. '''
  61. Add the rule r1 -> r2 with no checking or further
  62. deductions
  63. '''
  64. if len(self.rules) + 1 > self.maxeqns:
  65. self._is_confluent = self._check_confluence()
  66. self._max_exceeded = True
  67. raise RuntimeError("Too many rules were defined.")
  68. self.rules[r1] = r2
  69. # Add the newly added rule to the `new_rules` dictionary.
  70. if self.reduction_automaton:
  71. self._new_rules[r1] = r2
  72. def add_rule(self, w1, w2, check=False):
  73. new_keys = set()
  74. if w1 == w2:
  75. return new_keys
  76. if w1 < w2:
  77. w1, w2 = w2, w1
  78. if (w1, w2) in self.rules_cache:
  79. return new_keys
  80. self.rules_cache.append((w1, w2))
  81. s1, s2 = w1, w2
  82. # The following is the equivalent of checking
  83. # s1 for overlaps with the implicit reductions
  84. # {g*g**-1 -> <identity>} and {g**-1*g -> <identity>}
  85. # for any generator g without installing the
  86. # redundant rules that would result from processing
  87. # the overlaps. See [1], Section 3 for details.
  88. if len(s1) - len(s2) < 3:
  89. if s1 not in self.rules:
  90. new_keys.add(s1)
  91. if not check:
  92. self._add_rule(s1, s2)
  93. if s2**-1 > s1**-1 and s2**-1 not in self.rules:
  94. new_keys.add(s2**-1)
  95. if not check:
  96. self._add_rule(s2**-1, s1**-1)
  97. # overlaps on the right
  98. while len(s1) - len(s2) > -1:
  99. g = s1[len(s1)-1]
  100. s1 = s1.subword(0, len(s1)-1)
  101. s2 = s2*g**-1
  102. if len(s1) - len(s2) < 0:
  103. if s2 not in self.rules:
  104. if not check:
  105. self._add_rule(s2, s1)
  106. new_keys.add(s2)
  107. elif len(s1) - len(s2) < 3:
  108. new = self.add_rule(s1, s2, check)
  109. new_keys.update(new)
  110. # overlaps on the left
  111. while len(w1) - len(w2) > -1:
  112. g = w1[0]
  113. w1 = w1.subword(1, len(w1))
  114. w2 = g**-1*w2
  115. if len(w1) - len(w2) < 0:
  116. if w2 not in self.rules:
  117. if not check:
  118. self._add_rule(w2, w1)
  119. new_keys.add(w2)
  120. elif len(w1) - len(w2) < 3:
  121. new = self.add_rule(w1, w2, check)
  122. new_keys.update(new)
  123. return new_keys
  124. def _remove_redundancies(self, changes=False):
  125. '''
  126. Reduce left- and right-hand sides of reduction rules
  127. and remove redundant equations (i.e. those for which
  128. lhs == rhs). If `changes` is `True`, return a set
  129. containing the removed keys and a set containing the
  130. added keys
  131. '''
  132. removed = set()
  133. added = set()
  134. rules = self.rules.copy()
  135. for r in rules:
  136. v = self.reduce(r, exclude=r)
  137. w = self.reduce(rules[r])
  138. if v != r:
  139. del self.rules[r]
  140. removed.add(r)
  141. if v > w:
  142. added.add(v)
  143. self.rules[v] = w
  144. elif v < w:
  145. added.add(w)
  146. self.rules[w] = v
  147. else:
  148. self.rules[v] = w
  149. if changes:
  150. return removed, added
  151. return
  152. def make_confluent(self, check=False):
  153. '''
  154. Try to make the system confluent using the Knuth-Bendix
  155. completion algorithm
  156. '''
  157. if self._max_exceeded:
  158. return self._is_confluent
  159. lhs = list(self.rules.keys())
  160. def _overlaps(r1, r2):
  161. len1 = len(r1)
  162. len2 = len(r2)
  163. result = []
  164. for j in range(1, len1 + len2):
  165. if (r1.subword(len1 - j, len1 + len2 - j, strict=False)
  166. == r2.subword(j - len1, j, strict=False)):
  167. a = r1.subword(0, len1-j, strict=False)
  168. a = a*r2.subword(0, j-len1, strict=False)
  169. b = r2.subword(j-len1, j, strict=False)
  170. c = r2.subword(j, len2, strict=False)
  171. c = c*r1.subword(len1 + len2 - j, len1, strict=False)
  172. result.append(a*b*c)
  173. return result
  174. def _process_overlap(w, r1, r2, check):
  175. s = w.eliminate_word(r1, self.rules[r1])
  176. s = self.reduce(s)
  177. t = w.eliminate_word(r2, self.rules[r2])
  178. t = self.reduce(t)
  179. if s != t:
  180. if check:
  181. # system not confluent
  182. return [0]
  183. try:
  184. new_keys = self.add_rule(t, s, check)
  185. return new_keys
  186. except RuntimeError:
  187. return False
  188. return
  189. added = 0
  190. i = 0
  191. while i < len(lhs):
  192. r1 = lhs[i]
  193. i += 1
  194. # j could be i+1 to not
  195. # check each pair twice but lhs
  196. # is extended in the loop and the new
  197. # elements have to be checked with the
  198. # preceding ones. there is probably a better way
  199. # to handle this
  200. j = 0
  201. while j < len(lhs):
  202. r2 = lhs[j]
  203. j += 1
  204. if r1 == r2:
  205. continue
  206. overlaps = _overlaps(r1, r2)
  207. overlaps.extend(_overlaps(r1**-1, r2))
  208. if not overlaps:
  209. continue
  210. for w in overlaps:
  211. new_keys = _process_overlap(w, r1, r2, check)
  212. if new_keys:
  213. if check:
  214. return False
  215. lhs.extend(new_keys)
  216. added += len(new_keys)
  217. elif new_keys == False:
  218. # too many rules were added so the process
  219. # couldn't complete
  220. return self._is_confluent
  221. if added > self.tidyint and not check:
  222. # tidy up
  223. r, a = self._remove_redundancies(changes=True)
  224. added = 0
  225. if r:
  226. # reset i since some elements were removed
  227. i = min([lhs.index(s) for s in r])
  228. lhs = [l for l in lhs if l not in r]
  229. lhs.extend(a)
  230. if r1 in r:
  231. # r1 was removed as redundant
  232. break
  233. self._is_confluent = True
  234. if not check:
  235. self._remove_redundancies()
  236. return True
  237. def _check_confluence(self):
  238. return self.make_confluent(check=True)
  239. def reduce(self, word, exclude=None):
  240. '''
  241. Apply reduction rules to `word` excluding the reduction rule
  242. for the lhs equal to `exclude`
  243. '''
  244. rules = {r: self.rules[r] for r in self.rules if r != exclude}
  245. # the following is essentially `eliminate_words()` code from the
  246. # `FreeGroupElement` class, the only difference being the first
  247. # "if" statement
  248. again = True
  249. new = word
  250. while again:
  251. again = False
  252. for r in rules:
  253. prev = new
  254. if rules[r]**-1 > r**-1:
  255. new = new.eliminate_word(r, rules[r], _all=True, inverse=False)
  256. else:
  257. new = new.eliminate_word(r, rules[r], _all=True)
  258. if new != prev:
  259. again = True
  260. return new
  261. def _compute_inverse_rules(self, rules):
  262. '''
  263. Compute the inverse rules for a given set of rules.
  264. The inverse rules are used in the automaton for word reduction.
  265. Arguments:
  266. rules (dictionary): Rules for which the inverse rules are to computed.
  267. Returns:
  268. Dictionary of inverse_rules.
  269. '''
  270. inverse_rules = {}
  271. for r in rules:
  272. rule_key_inverse = r**-1
  273. rule_value_inverse = (rules[r])**-1
  274. if (rule_value_inverse < rule_key_inverse):
  275. inverse_rules[rule_key_inverse] = rule_value_inverse
  276. else:
  277. inverse_rules[rule_value_inverse] = rule_key_inverse
  278. return inverse_rules
  279. def construct_automaton(self):
  280. '''
  281. Construct the automaton based on the set of reduction rules of the system.
  282. Automata Design:
  283. The accept states of the automaton are the proper prefixes of the left hand side of the rules.
  284. The complete left hand side of the rules are the dead states of the automaton.
  285. '''
  286. self._add_to_automaton(self.rules)
  287. def _add_to_automaton(self, rules):
  288. '''
  289. Add new states and transitions to the automaton.
  290. Summary:
  291. States corresponding to the new rules added to the system are computed and added to the automaton.
  292. Transitions in the previously added states are also modified if necessary.
  293. Arguments:
  294. rules (dictionary) -- Dictionary of the newly added rules.
  295. '''
  296. # Automaton variables
  297. automaton_alphabet = []
  298. proper_prefixes = {}
  299. # compute the inverses of all the new rules added
  300. all_rules = rules
  301. inverse_rules = self._compute_inverse_rules(all_rules)
  302. all_rules.update(inverse_rules)
  303. # Keep track of the accept_states.
  304. accept_states = []
  305. for rule in all_rules:
  306. # The symbols present in the new rules are the symbols to be verified at each state.
  307. # computes the automaton_alphabet, as the transitions solely depend upon the new states.
  308. automaton_alphabet += rule.letter_form_elm
  309. # Compute the proper prefixes for every rule.
  310. proper_prefixes[rule] = []
  311. letter_word_array = [s for s in rule.letter_form_elm]
  312. len_letter_word_array = len(letter_word_array)
  313. for i in range (1, len_letter_word_array):
  314. letter_word_array[i] = letter_word_array[i-1]*letter_word_array[i]
  315. # Add accept states.
  316. elem = letter_word_array[i-1]
  317. if elem not in self.reduction_automaton.states:
  318. self.reduction_automaton.add_state(elem, state_type='a')
  319. accept_states.append(elem)
  320. proper_prefixes[rule] = letter_word_array
  321. # Check for overlaps between dead and accept states.
  322. if rule in accept_states:
  323. self.reduction_automaton.states[rule].state_type = 'd'
  324. self.reduction_automaton.states[rule].rh_rule = all_rules[rule]
  325. accept_states.remove(rule)
  326. # Add dead states
  327. if rule not in self.reduction_automaton.states:
  328. self.reduction_automaton.add_state(rule, state_type='d', rh_rule=all_rules[rule])
  329. automaton_alphabet = set(automaton_alphabet)
  330. # Add new transitions for every state.
  331. for state in self.reduction_automaton.states:
  332. current_state_name = state
  333. current_state_type = self.reduction_automaton.states[state].state_type
  334. # Transitions will be modified only when suffixes of the current_state
  335. # belongs to the proper_prefixes of the new rules.
  336. # The rest are ignored if they cannot lead to a dead state after a finite number of transisitons.
  337. if current_state_type == 's':
  338. for letter in automaton_alphabet:
  339. if letter in self.reduction_automaton.states:
  340. self.reduction_automaton.states[state].add_transition(letter, letter)
  341. else:
  342. self.reduction_automaton.states[state].add_transition(letter, current_state_name)
  343. elif current_state_type == 'a':
  344. # Check if the transition to any new state in possible.
  345. for letter in automaton_alphabet:
  346. _next = current_state_name*letter
  347. while len(_next) and _next not in self.reduction_automaton.states:
  348. _next = _next.subword(1, len(_next))
  349. if not len(_next):
  350. _next = 'start'
  351. self.reduction_automaton.states[state].add_transition(letter, _next)
  352. # Add transitions for new states. All symbols used in the automaton are considered here.
  353. # Ignore this if `reduction_automaton.automaton_alphabet` = `automaton_alphabet`.
  354. if len(self.reduction_automaton.automaton_alphabet) != len(automaton_alphabet):
  355. for state in accept_states:
  356. current_state_name = state
  357. for letter in self.reduction_automaton.automaton_alphabet:
  358. _next = current_state_name*letter
  359. while len(_next) and _next not in self.reduction_automaton.states:
  360. _next = _next.subword(1, len(_next))
  361. if not len(_next):
  362. _next = 'start'
  363. self.reduction_automaton.states[state].add_transition(letter, _next)
  364. def reduce_using_automaton(self, word):
  365. '''
  366. Reduce a word using an automaton.
  367. Summary:
  368. All the symbols of the word are stored in an array and are given as the input to the automaton.
  369. If the automaton reaches a dead state that subword is replaced and the automaton is run from the beginning.
  370. The complete word has to be replaced when the word is read and the automaton reaches a dead state.
  371. So, this process is repeated until the word is read completely and the automaton reaches the accept state.
  372. Arguments:
  373. word (instance of FreeGroupElement) -- Word that needs to be reduced.
  374. '''
  375. # Modify the automaton if new rules are found.
  376. if self._new_rules:
  377. self._add_to_automaton(self._new_rules)
  378. self._new_rules = {}
  379. flag = 1
  380. while flag:
  381. flag = 0
  382. current_state = self.reduction_automaton.states['start']
  383. word_array = [s for s in word.letter_form_elm]
  384. for i in range (0, len(word_array)):
  385. next_state_name = current_state.transitions[word_array[i]]
  386. next_state = self.reduction_automaton.states[next_state_name]
  387. if next_state.state_type == 'd':
  388. subst = next_state.rh_rule
  389. word = word.substituted_word(i - len(next_state_name) + 1, i+1, subst)
  390. flag = 1
  391. break
  392. current_state = next_state
  393. return word