diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/generators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/generators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3a64b36f0d23102f626d95e1cc97868b691054 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/generators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/partitions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/partitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cf37be25c6d1e5485f8bbb5abd3409fc7d0a267 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/partitions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/galois.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/galois.py new file mode 100644 index 0000000000000000000000000000000000000000..0666884676b367dd3fa4b49535cfbaee72b2eac9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/galois.py @@ -0,0 +1,611 @@ +r""" +Construct transitive subgroups of symmetric groups, useful in Galois theory. + +Besides constructing instances of the :py:class:`~.PermutationGroup` class to +represent the transitive subgroups of $S_n$ for small $n$, this module provides +*names* for these groups. + +In some applications, it may be preferable to know the name of a group, +rather than receive an instance of the :py:class:`~.PermutationGroup` +class, and then have to do extra work to determine which group it is, by +checking various properties. + +Names are instances of ``Enum`` classes defined in this module. With a name in +hand, the name's ``get_perm_group`` method can then be used to retrieve a +:py:class:`~.PermutationGroup`. + +The names used for groups in this module are taken from [1]. + +References +========== + +.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*. + +""" + +from collections import defaultdict +from enum import Enum +import itertools + +from sympy.combinatorics.named_groups import ( + SymmetricGroup, AlternatingGroup, CyclicGroup, DihedralGroup, + set_symmetric_group_properties, set_alternating_group_properties, +) +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.combinatorics.permutations import Permutation + + +class S1TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S1. + """ + S1 = "S1" + + def get_perm_group(self): + return SymmetricGroup(1) + + +class S2TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S2. + """ + S2 = "S2" + + def get_perm_group(self): + return SymmetricGroup(2) + + +class S3TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S3. + """ + A3 = "A3" + S3 = "S3" + + def get_perm_group(self): + if self == S3TransitiveSubgroups.A3: + return AlternatingGroup(3) + elif self == S3TransitiveSubgroups.S3: + return SymmetricGroup(3) + + +class S4TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S4. + """ + C4 = "C4" + V = "V" + D4 = "D4" + A4 = "A4" + S4 = "S4" + + def get_perm_group(self): + if self == S4TransitiveSubgroups.C4: + return CyclicGroup(4) + elif self == S4TransitiveSubgroups.V: + return four_group() + elif self == S4TransitiveSubgroups.D4: + return DihedralGroup(4) + elif self == S4TransitiveSubgroups.A4: + return AlternatingGroup(4) + elif self == S4TransitiveSubgroups.S4: + return SymmetricGroup(4) + + +class S5TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S5. + """ + C5 = "C5" + D5 = "D5" + M20 = "M20" + A5 = "A5" + S5 = "S5" + + def get_perm_group(self): + if self == S5TransitiveSubgroups.C5: + return CyclicGroup(5) + elif self == S5TransitiveSubgroups.D5: + return DihedralGroup(5) + elif self == S5TransitiveSubgroups.M20: + return M20() + elif self == S5TransitiveSubgroups.A5: + return AlternatingGroup(5) + elif self == S5TransitiveSubgroups.S5: + return SymmetricGroup(5) + + +class S6TransitiveSubgroups(Enum): + """ + Names for the transitive subgroups of S6. + """ + C6 = "C6" + S3 = "S3" + D6 = "D6" + A4 = "A4" + G18 = "G18" + A4xC2 = "A4 x C2" + S4m = "S4-" + S4p = "S4+" + G36m = "G36-" + G36p = "G36+" + S4xC2 = "S4 x C2" + PSL2F5 = "PSL2(F5)" + G72 = "G72" + PGL2F5 = "PGL2(F5)" + A6 = "A6" + S6 = "S6" + + def get_perm_group(self): + if self == S6TransitiveSubgroups.C6: + return CyclicGroup(6) + elif self == S6TransitiveSubgroups.S3: + return S3_in_S6() + elif self == S6TransitiveSubgroups.D6: + return DihedralGroup(6) + elif self == S6TransitiveSubgroups.A4: + return A4_in_S6() + elif self == S6TransitiveSubgroups.G18: + return G18() + elif self == S6TransitiveSubgroups.A4xC2: + return A4xC2() + elif self == S6TransitiveSubgroups.S4m: + return S4m() + elif self == S6TransitiveSubgroups.S4p: + return S4p() + elif self == S6TransitiveSubgroups.G36m: + return G36m() + elif self == S6TransitiveSubgroups.G36p: + return G36p() + elif self == S6TransitiveSubgroups.S4xC2: + return S4xC2() + elif self == S6TransitiveSubgroups.PSL2F5: + return PSL2F5() + elif self == S6TransitiveSubgroups.G72: + return G72() + elif self == S6TransitiveSubgroups.PGL2F5: + return PGL2F5() + elif self == S6TransitiveSubgroups.A6: + return AlternatingGroup(6) + elif self == S6TransitiveSubgroups.S6: + return SymmetricGroup(6) + + +def four_group(): + """ + Return a representation of the Klein four-group as a transitive subgroup + of S4. + """ + return PermutationGroup( + Permutation(0, 1)(2, 3), + Permutation(0, 2)(1, 3) + ) + + +def M20(): + """ + Return a representation of the metacyclic group M20, a transitive subgroup + of S5 that is one of the possible Galois groups for polys of degree 5. + + Notes + ===== + + See [1], Page 323. + + """ + G = PermutationGroup(Permutation(0, 1, 2, 3, 4), Permutation(1, 2, 4, 3)) + G._degree = 5 + G._order = 20 + G._is_transitive = True + G._is_sym = False + G._is_alt = False + G._is_cyclic = False + G._is_dihedral = False + return G + + +def S3_in_S6(): + """ + Return a representation of S3 as a transitive subgroup of S6. + + Notes + ===== + + The representation is found by viewing the group as the symmetries of a + triangular prism. + + """ + G = PermutationGroup(Permutation(0, 1, 2)(3, 4, 5), Permutation(0, 3)(2, 4)(1, 5)) + set_symmetric_group_properties(G, 3, 6) + return G + + +def A4_in_S6(): + """ + Return a representation of A4 as a transitive subgroup of S6. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + G = PermutationGroup(Permutation(0, 4, 5)(1, 3, 2), Permutation(0, 1, 2)(3, 5, 4)) + set_alternating_group_properties(G, 4, 6) + return G + + +def S4m(): + """ + Return a representation of the S4- transitive subgroup of S6. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + G = PermutationGroup(Permutation(1, 4, 5, 3), Permutation(0, 4)(1, 5)(2, 3)) + set_symmetric_group_properties(G, 4, 6) + return G + + +def S4p(): + """ + Return a representation of the S4+ transitive subgroup of S6. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + G = PermutationGroup(Permutation(0, 2, 4, 1)(3, 5), Permutation(0, 3)(4, 5)) + set_symmetric_group_properties(G, 4, 6) + return G + + +def A4xC2(): + """ + Return a representation of the (A4 x C2) transitive subgroup of S6. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + return PermutationGroup( + Permutation(0, 4, 5)(1, 3, 2), Permutation(0, 1, 2)(3, 5, 4), + Permutation(5)(2, 4)) + + +def S4xC2(): + """ + Return a representation of the (S4 x C2) transitive subgroup of S6. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + return PermutationGroup( + Permutation(1, 4, 5, 3), Permutation(0, 4)(1, 5)(2, 3), + Permutation(1, 4)(3, 5)) + + +def G18(): + """ + Return a representation of the group G18, a transitive subgroup of S6 + isomorphic to the semidirect product of C3^2 with C2. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + return PermutationGroup( + Permutation(5)(0, 1, 2), Permutation(3, 4, 5), + Permutation(0, 4)(1, 5)(2, 3)) + + +def G36m(): + """ + Return a representation of the group G36-, a transitive subgroup of S6 + isomorphic to the semidirect product of C3^2 with C2^2. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + return PermutationGroup( + Permutation(5)(0, 1, 2), Permutation(3, 4, 5), + Permutation(1, 2)(3, 5), Permutation(0, 4)(1, 5)(2, 3)) + + +def G36p(): + """ + Return a representation of the group G36+, a transitive subgroup of S6 + isomorphic to the semidirect product of C3^2 with C4. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + return PermutationGroup( + Permutation(5)(0, 1, 2), Permutation(3, 4, 5), + Permutation(0, 5, 2, 3)(1, 4)) + + +def G72(): + """ + Return a representation of the group G72, a transitive subgroup of S6 + isomorphic to the semidirect product of C3^2 with D4. + + Notes + ===== + + See [1], Page 325. + + """ + return PermutationGroup( + Permutation(5)(0, 1, 2), + Permutation(0, 4, 1, 3)(2, 5), Permutation(0, 3)(1, 4)(2, 5)) + + +def PSL2F5(): + r""" + Return a representation of the group $PSL_2(\mathbb{F}_5)$, as a transitive + subgroup of S6, isomorphic to $A_5$. + + Notes + ===== + + This was computed using :py:func:`~.find_transitive_subgroups_of_S6`. + + """ + G = PermutationGroup( + Permutation(0, 4, 5)(1, 3, 2), Permutation(0, 4, 3, 1, 5)) + set_alternating_group_properties(G, 5, 6) + return G + + +def PGL2F5(): + r""" + Return a representation of the group $PGL_2(\mathbb{F}_5)$, as a transitive + subgroup of S6, isomorphic to $S_5$. + + Notes + ===== + + See [1], Page 325. + + """ + G = PermutationGroup( + Permutation(0, 1, 2, 3, 4), Permutation(0, 5)(1, 2)(3, 4)) + set_symmetric_group_properties(G, 5, 6) + return G + + +def find_transitive_subgroups_of_S6(*targets, print_report=False): + r""" + Search for certain transitive subgroups of $S_6$. + + The symmetric group $S_6$ has 16 different transitive subgroups, up to + conjugacy. Some are more easily constructed than others. For example, the + dihedral group $D_6$ is immediately found, but it is not at all obvious how + to realize $S_4$ or $S_5$ *transitively* within $S_6$. + + In some cases there are well-known constructions that can be used. For + example, $S_5$ is isomorphic to $PGL_2(\mathbb{F}_5)$, which acts in a + natural way on the projective line $P^1(\mathbb{F}_5)$, a set of order 6. + + In absence of such special constructions however, we can simply search for + generators. For example, transitive instances of $A_4$ and $S_4$ can be + found within $S_6$ in this way. + + Once we are engaged in such searches, it may then be easier (if less + elegant) to find even those groups like $S_5$ that do have special + constructions, by mere search. + + This function locates generators for transitive instances in $S_6$ of the + following subgroups: + + * $A_4$ + * $S_4^-$ ($S_4$ not contained within $A_6$) + * $S_4^+$ ($S_4$ contained within $A_6$) + * $A_4 \times C_2$ + * $S_4 \times C_2$ + * $G_{18} = C_3^2 \rtimes C_2$ + * $G_{36}^- = C_3^2 \rtimes C_2^2$ + * $G_{36}^+ = C_3^2 \rtimes C_4$ + * $G_{72} = C_3^2 \rtimes D_4$ + * $A_5$ + * $S_5$ + + Note: Each of these groups also has a dedicated function in this module + that returns the group immediately, using generators that were found by + this search procedure. + + The search procedure serves as a record of how these generators were + found. Also, due to randomness in the generation of the elements of + permutation groups, it can be called again, in order to (probably) get + different generators for the same groups. + + Parameters + ========== + + targets : list of :py:class:`~.S6TransitiveSubgroups` values + The groups you want to find. + + print_report : bool (default False) + If True, print to stdout the generators found for each group. + + Returns + ======= + + dict + mapping each name in *targets* to the :py:class:`~.PermutationGroup` + that was found + + References + ========== + + .. [2] https://en.wikipedia.org/wiki/Projective_linear_group#Exceptional_isomorphisms + .. [3] https://en.wikipedia.org/wiki/Automorphisms_of_the_symmetric_and_alternating_groups#PGL(2,5) + + """ + def elts_by_order(G): + """Sort the elements of a group by their order. """ + elts = defaultdict(list) + for g in G.elements: + elts[g.order()].append(g) + return elts + + def order_profile(G, name=None): + """Determine how many elements a group has, of each order. """ + elts = elts_by_order(G) + profile = {o:len(e) for o, e in elts.items()} + if name: + print(f'{name}: ' + ' '.join(f'{len(profile[r])}@{r}' for r in sorted(profile.keys()))) + return profile + + S6 = SymmetricGroup(6) + A6 = AlternatingGroup(6) + S6_by_order = elts_by_order(S6) + + def search(existing_gens, needed_gen_orders, order, alt=None, profile=None, anti_profile=None): + """ + Find a transitive subgroup of S6. + + Parameters + ========== + + existing_gens : list of Permutation + Optionally empty list of generators that must be in the group. + + needed_gen_orders : list of positive int + Nonempty list of the orders of the additional generators that are + to be found. + + order: int + The order of the group being sought. + + alt: bool, None + If True, require the group to be contained in A6. + If False, require the group not to be contained in A6. + + profile : dict + If given, the group's order profile must equal this. + + anti_profile : dict + If given, the group's order profile must *not* equal this. + + """ + for gens in itertools.product(*[S6_by_order[n] for n in needed_gen_orders]): + if len(set(gens)) < len(gens): + continue + G = PermutationGroup(existing_gens + list(gens)) + if G.order() == order and G.is_transitive(): + if alt is not None and G.is_subgroup(A6) != alt: + continue + if profile and order_profile(G) != profile: + continue + if anti_profile and order_profile(G) == anti_profile: + continue + return G + + def match_known_group(G, alt=None): + needed = [g.order() for g in G.generators] + return search([], needed, G.order(), alt=alt, profile=order_profile(G)) + + found = {} + + def finish_up(name, G): + found[name] = G + if print_report: + print("=" * 40) + print(f"{name}:") + print(G.generators) + + if S6TransitiveSubgroups.A4 in targets or S6TransitiveSubgroups.A4xC2 in targets: + A4_in_S6 = match_known_group(AlternatingGroup(4)) + finish_up(S6TransitiveSubgroups.A4, A4_in_S6) + + if S6TransitiveSubgroups.S4m in targets or S6TransitiveSubgroups.S4xC2 in targets: + S4m_in_S6 = match_known_group(SymmetricGroup(4), alt=False) + finish_up(S6TransitiveSubgroups.S4m, S4m_in_S6) + + if S6TransitiveSubgroups.S4p in targets: + S4p_in_S6 = match_known_group(SymmetricGroup(4), alt=True) + finish_up(S6TransitiveSubgroups.S4p, S4p_in_S6) + + if S6TransitiveSubgroups.A4xC2 in targets: + A4xC2_in_S6 = search(A4_in_S6.generators, [2], 24, anti_profile=order_profile(SymmetricGroup(4))) + finish_up(S6TransitiveSubgroups.A4xC2, A4xC2_in_S6) + + if S6TransitiveSubgroups.S4xC2 in targets: + S4xC2_in_S6 = search(S4m_in_S6.generators, [2], 48) + finish_up(S6TransitiveSubgroups.S4xC2, S4xC2_in_S6) + + # For the normal factor N = C3^2 in any of the G_n subgroups, we take one + # obvious instance of C3^2 in S6: + N_gens = [Permutation(5)(0, 1, 2), Permutation(5)(3, 4, 5)] + + if S6TransitiveSubgroups.G18 in targets: + G18_in_S6 = search(N_gens, [2], 18) + finish_up(S6TransitiveSubgroups.G18, G18_in_S6) + + if S6TransitiveSubgroups.G36m in targets: + G36m_in_S6 = search(N_gens, [2, 2], 36, alt=False) + finish_up(S6TransitiveSubgroups.G36m, G36m_in_S6) + + if S6TransitiveSubgroups.G36p in targets: + G36p_in_S6 = search(N_gens, [4], 36, alt=True) + finish_up(S6TransitiveSubgroups.G36p, G36p_in_S6) + + if S6TransitiveSubgroups.G72 in targets: + G72_in_S6 = search(N_gens, [4, 2], 72) + finish_up(S6TransitiveSubgroups.G72, G72_in_S6) + + # The PSL2(F5) and PGL2(F5) subgroups are isomorphic to A5 and S5, resp. + + if S6TransitiveSubgroups.PSL2F5 in targets: + PSL2F5_in_S6 = match_known_group(AlternatingGroup(5)) + finish_up(S6TransitiveSubgroups.PSL2F5, PSL2F5_in_S6) + + if S6TransitiveSubgroups.PGL2F5 in targets: + PGL2F5_in_S6 = match_known_group(SymmetricGroup(5)) + finish_up(S6TransitiveSubgroups.PGL2F5, PGL2F5_in_S6) + + # There is little need to "search" for any of the groups C6, S3, D6, A6, + # or S6, since they all have obvious realizations within S6. However, we + # support them here just in case a random representation is desired. + + if S6TransitiveSubgroups.C6 in targets: + C6 = match_known_group(CyclicGroup(6)) + finish_up(S6TransitiveSubgroups.C6, C6) + + if S6TransitiveSubgroups.S3 in targets: + S3 = match_known_group(SymmetricGroup(3)) + finish_up(S6TransitiveSubgroups.S3, S3) + + if S6TransitiveSubgroups.D6 in targets: + D6 = match_known_group(DihedralGroup(6)) + finish_up(S6TransitiveSubgroups.D6, D6) + + if S6TransitiveSubgroups.A6 in targets: + A6 = match_known_group(A6) + finish_up(S6TransitiveSubgroups.A6, A6) + + if S6TransitiveSubgroups.S6 in targets: + S6 = match_known_group(S6) + finish_up(S6TransitiveSubgroups.S6, S6) + + return found diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_constructs.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_constructs.py new file mode 100644 index 0000000000000000000000000000000000000000..a5c16ec254191646b26eee869763e2926e187da5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_constructs.py @@ -0,0 +1,61 @@ +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.combinatorics.permutations import Permutation +from sympy.utilities.iterables import uniq + +_af_new = Permutation._af_new + + +def DirectProduct(*groups): + """ + Returns the direct product of several groups as a permutation group. + + Explanation + =========== + + This is implemented much like the __mul__ procedure for taking the direct + product of two permutation groups, but the idea of shifting the + generators is realized in the case of an arbitrary number of groups. + A call to DirectProduct(G1, G2, ..., Gn) is generally expected to be faster + than a call to G1*G2*...*Gn (and thus the need for this algorithm). + + Examples + ======== + + >>> from sympy.combinatorics.group_constructs import DirectProduct + >>> from sympy.combinatorics.named_groups import CyclicGroup + >>> C = CyclicGroup(4) + >>> G = DirectProduct(C, C, C) + >>> G.order() + 64 + + See Also + ======== + + sympy.combinatorics.perm_groups.PermutationGroup.__mul__ + + """ + degrees = [] + gens_count = [] + total_degree = 0 + total_gens = 0 + for group in groups: + current_deg = group.degree + current_num_gens = len(group.generators) + degrees.append(current_deg) + total_degree += current_deg + gens_count.append(current_num_gens) + total_gens += current_num_gens + array_gens = [] + for i in range(total_gens): + array_gens.append(list(range(total_degree))) + current_gen = 0 + current_deg = 0 + for i in range(len(gens_count)): + for j in range(current_gen, current_gen + gens_count[i]): + gen = ((groups[i].generators)[j - current_gen]).array_form + array_gens[j][current_deg:current_deg + degrees[i]] = \ + [x + current_deg for x in gen] + current_gen += gens_count[i] + current_deg += degrees[i] + perm_gens = list(uniq([_af_new(list(a)) for a in array_gens])) + return PermutationGroup(perm_gens, dups=False) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_numbers.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_numbers.py new file mode 100644 index 0000000000000000000000000000000000000000..4433d76d29636a974159ed67a53423d5702972d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/group_numbers.py @@ -0,0 +1,118 @@ +from sympy.core import Integer, Pow, Mod +from sympy import factorint + + +def is_nilpotent_number(n): + """ + Check whether `n` is a nilpotent number. A number `n` is said to be + nilpotent if and only if every finite group of order `n` is nilpotent. + For more information see [1]_. + + Examples + ======== + + >>> from sympy.combinatorics.group_numbers import is_nilpotent_number + >>> from sympy import randprime + >>> is_nilpotent_number(21) + False + >>> is_nilpotent_number(randprime(1, 30)**12) + True + + References + ========== + + .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*, + The American Mathematical Monthly, 107(7), 631-634. + + + """ + if n <= 0 or int(n) != n: + raise ValueError("n must be a positive integer, not %i" % n) + + n = Integer(n) + prime_factors = list(factorint(n).items()) + is_nilpotent = True + for p_j, a_j in prime_factors: + for p_i, a_i in prime_factors: + if any([Mod(Pow(p_i, k), p_j) == 1 for k in range(1, a_i + 1)]): + is_nilpotent = False + break + if not is_nilpotent: + break + + return is_nilpotent + + +def is_abelian_number(n): + """ + Check whether `n` is an abelian number. A number `n` is said to be abelian + if and only if every finite group of order `n` is abelian. For more + information see [1]_. + + Examples + ======== + + >>> from sympy.combinatorics.group_numbers import is_abelian_number + >>> from sympy import randprime + >>> is_abelian_number(4) + True + >>> is_abelian_number(randprime(1, 2000)**2) + True + >>> is_abelian_number(60) + False + + References + ========== + + .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*, + The American Mathematical Monthly, 107(7), 631-634. + + + """ + if n <= 0 or int(n) != n: + raise ValueError("n must be a positive integer, not %i" % n) + + n = Integer(n) + if not is_nilpotent_number(n): + return False + + prime_factors = list(factorint(n).items()) + is_abelian = all(a_i < 3 for p_i, a_i in prime_factors) + return is_abelian + + +def is_cyclic_number(n): + """ + Check whether `n` is a cyclic number. A number `n` is said to be cyclic + if and only if every finite group of order `n` is cyclic. For more + information see [1]_. + + Examples + ======== + + >>> from sympy.combinatorics.group_numbers import is_cyclic_number + >>> from sympy import randprime + >>> is_cyclic_number(15) + True + >>> is_cyclic_number(randprime(1, 2000)**2) + False + >>> is_cyclic_number(4) + False + + References + ========== + + .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*, + The American Mathematical Monthly, 107(7), 631-634. + + """ + if n <= 0 or int(n) != n: + raise ValueError("n must be a positive integer, not %i" % n) + + n = Integer(n) + if not is_nilpotent_number(n): + return False + + prime_factors = list(factorint(n).items()) + is_cyclic = all(a_i < 2 for p_i, a_i in prime_factors) + return is_cyclic diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/homomorphisms.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/homomorphisms.py new file mode 100644 index 0000000000000000000000000000000000000000..95b43c501300617e44a251388f46f2a2f1e9fca1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/homomorphisms.py @@ -0,0 +1,549 @@ +import itertools +from sympy.combinatorics.fp_groups import FpGroup, FpSubgroup, simplify_presentation +from sympy.combinatorics.free_groups import FreeGroup +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.core.numbers import igcd +from sympy.ntheory.factor_ import totient +from sympy.core.singleton import S + +class GroupHomomorphism: + ''' + A class representing group homomorphisms. Instantiate using `homomorphism()`. + + References + ========== + + .. [1] Holt, D., Eick, B. and O'Brien, E. (2005). Handbook of computational group theory. + + ''' + + def __init__(self, domain, codomain, images): + self.domain = domain + self.codomain = codomain + self.images = images + self._inverses = None + self._kernel = None + self._image = None + + def _invs(self): + ''' + Return a dictionary with `{gen: inverse}` where `gen` is a rewriting + generator of `codomain` (e.g. strong generator for permutation groups) + and `inverse` is an element of its preimage + + ''' + image = self.image() + inverses = {} + for k in list(self.images.keys()): + v = self.images[k] + if not (v in inverses + or v.is_identity): + inverses[v] = k + if isinstance(self.codomain, PermutationGroup): + gens = image.strong_gens + else: + gens = image.generators + for g in gens: + if g in inverses or g.is_identity: + continue + w = self.domain.identity + if isinstance(self.codomain, PermutationGroup): + parts = image._strong_gens_slp[g][::-1] + else: + parts = g + for s in parts: + if s in inverses: + w = w*inverses[s] + else: + w = w*inverses[s**-1]**-1 + inverses[g] = w + + return inverses + + def invert(self, g): + ''' + Return an element of the preimage of ``g`` or of each element + of ``g`` if ``g`` is a list. + + Explanation + =========== + + If the codomain is an FpGroup, the inverse for equal + elements might not always be the same unless the FpGroup's + rewriting system is confluent. However, making a system + confluent can be time-consuming. If it's important, try + `self.codomain.make_confluent()` first. + + ''' + from sympy.combinatorics import Permutation + from sympy.combinatorics.free_groups import FreeGroupElement + if isinstance(g, (Permutation, FreeGroupElement)): + if isinstance(self.codomain, FpGroup): + g = self.codomain.reduce(g) + if self._inverses is None: + self._inverses = self._invs() + image = self.image() + w = self.domain.identity + if isinstance(self.codomain, PermutationGroup): + gens = image.generator_product(g)[::-1] + else: + gens = g + # the following can't be "for s in gens:" + # because that would be equivalent to + # "for s in gens.array_form:" when g is + # a FreeGroupElement. On the other hand, + # when you call gens by index, the generator + # (or inverse) at position i is returned. + for i in range(len(gens)): + s = gens[i] + if s.is_identity: + continue + if s in self._inverses: + w = w*self._inverses[s] + else: + w = w*self._inverses[s**-1]**-1 + return w + elif isinstance(g, list): + return [self.invert(e) for e in g] + + def kernel(self): + ''' + Compute the kernel of `self`. + + ''' + if self._kernel is None: + self._kernel = self._compute_kernel() + return self._kernel + + def _compute_kernel(self): + G = self.domain + G_order = G.order() + if G_order is S.Infinity: + raise NotImplementedError( + "Kernel computation is not implemented for infinite groups") + gens = [] + if isinstance(G, PermutationGroup): + K = PermutationGroup(G.identity) + else: + K = FpSubgroup(G, gens, normal=True) + i = self.image().order() + while K.order()*i != G_order: + r = G.random() + k = r*self.invert(self(r))**-1 + if k not in K: + gens.append(k) + if isinstance(G, PermutationGroup): + K = PermutationGroup(gens) + else: + K = FpSubgroup(G, gens, normal=True) + return K + + def image(self): + ''' + Compute the image of `self`. + + ''' + if self._image is None: + values = list(set(self.images.values())) + if isinstance(self.codomain, PermutationGroup): + self._image = self.codomain.subgroup(values) + else: + self._image = FpSubgroup(self.codomain, values) + return self._image + + def _apply(self, elem): + ''' + Apply `self` to `elem`. + + ''' + if elem not in self.domain: + if isinstance(elem, (list, tuple)): + return [self._apply(e) for e in elem] + raise ValueError("The supplied element does not belong to the domain") + if elem.is_identity: + return self.codomain.identity + else: + images = self.images + value = self.codomain.identity + if isinstance(self.domain, PermutationGroup): + gens = self.domain.generator_product(elem, original=True) + for g in gens: + if g in self.images: + value = images[g]*value + else: + value = images[g**-1]**-1*value + else: + i = 0 + for _, p in elem.array_form: + if p < 0: + g = elem[i]**-1 + else: + g = elem[i] + value = value*images[g]**p + i += abs(p) + return value + + def __call__(self, elem): + return self._apply(elem) + + def is_injective(self): + ''' + Check if the homomorphism is injective + + ''' + return self.kernel().order() == 1 + + def is_surjective(self): + ''' + Check if the homomorphism is surjective + + ''' + im = self.image().order() + oth = self.codomain.order() + if im is S.Infinity and oth is S.Infinity: + return None + else: + return im == oth + + def is_isomorphism(self): + ''' + Check if `self` is an isomorphism. + + ''' + return self.is_injective() and self.is_surjective() + + def is_trivial(self): + ''' + Check is `self` is a trivial homomorphism, i.e. all elements + are mapped to the identity. + + ''' + return self.image().order() == 1 + + def compose(self, other): + ''' + Return the composition of `self` and `other`, i.e. + the homomorphism phi such that for all g in the domain + of `other`, phi(g) = self(other(g)) + + ''' + if not other.image().is_subgroup(self.domain): + raise ValueError("The image of `other` must be a subgroup of " + "the domain of `self`") + images = {g: self(other(g)) for g in other.images} + return GroupHomomorphism(other.domain, self.codomain, images) + + def restrict_to(self, H): + ''' + Return the restriction of the homomorphism to the subgroup `H` + of the domain. + + ''' + if not isinstance(H, PermutationGroup) or not H.is_subgroup(self.domain): + raise ValueError("Given H is not a subgroup of the domain") + domain = H + images = {g: self(g) for g in H.generators} + return GroupHomomorphism(domain, self.codomain, images) + + def invert_subgroup(self, H): + ''' + Return the subgroup of the domain that is the inverse image + of the subgroup ``H`` of the homomorphism image + + ''' + if not H.is_subgroup(self.image()): + raise ValueError("Given H is not a subgroup of the image") + gens = [] + P = PermutationGroup(self.image().identity) + for h in H.generators: + h_i = self.invert(h) + if h_i not in P: + gens.append(h_i) + P = PermutationGroup(gens) + for k in self.kernel().generators: + if k*h_i not in P: + gens.append(k*h_i) + P = PermutationGroup(gens) + return P + +def homomorphism(domain, codomain, gens, images=(), check=True): + ''' + Create (if possible) a group homomorphism from the group ``domain`` + to the group ``codomain`` defined by the images of the domain's + generators ``gens``. ``gens`` and ``images`` can be either lists or tuples + of equal sizes. If ``gens`` is a proper subset of the group's generators, + the unspecified generators will be mapped to the identity. If the + images are not specified, a trivial homomorphism will be created. + + If the given images of the generators do not define a homomorphism, + an exception is raised. + + If ``check`` is ``False``, do not check whether the given images actually + define a homomorphism. + + ''' + if not isinstance(domain, (PermutationGroup, FpGroup, FreeGroup)): + raise TypeError("The domain must be a group") + if not isinstance(codomain, (PermutationGroup, FpGroup, FreeGroup)): + raise TypeError("The codomain must be a group") + + generators = domain.generators + if not all(g in generators for g in gens): + raise ValueError("The supplied generators must be a subset of the domain's generators") + if not all(g in codomain for g in images): + raise ValueError("The images must be elements of the codomain") + + if images and len(images) != len(gens): + raise ValueError("The number of images must be equal to the number of generators") + + gens = list(gens) + images = list(images) + + images.extend([codomain.identity]*(len(generators)-len(images))) + gens.extend([g for g in generators if g not in gens]) + images = dict(zip(gens,images)) + + if check and not _check_homomorphism(domain, codomain, images): + raise ValueError("The given images do not define a homomorphism") + return GroupHomomorphism(domain, codomain, images) + +def _check_homomorphism(domain, codomain, images): + """ + Check that a given mapping of generators to images defines a homomorphism. + + Parameters + ========== + domain : PermutationGroup, FpGroup, FreeGroup + codomain : PermutationGroup, FpGroup, FreeGroup + images : dict + The set of keys must be equal to domain.generators. + The values must be elements of the codomain. + + """ + pres = domain if hasattr(domain, 'relators') else domain.presentation() + rels = pres.relators + gens = pres.generators + symbols = [g.ext_rep[0] for g in gens] + symbols_to_domain_generators = dict(zip(symbols, domain.generators)) + identity = codomain.identity + + def _image(r): + w = identity + for symbol, power in r.array_form: + g = symbols_to_domain_generators[symbol] + w *= images[g]**power + return w + + for r in rels: + if isinstance(codomain, FpGroup): + s = codomain.equals(_image(r), identity) + if s is None: + # only try to make the rewriting system + # confluent when it can't determine the + # truth of equality otherwise + success = codomain.make_confluent() + s = codomain.equals(_image(r), identity) + if s is None and not success: + raise RuntimeError("Can't determine if the images " + "define a homomorphism. Try increasing " + "the maximum number of rewriting rules " + "(group._rewriting_system.set_max(new_value); " + "the current value is stored in group._rewriting" + "_system.maxeqns)") + else: + s = _image(r).is_identity + if not s: + return False + return True + +def orbit_homomorphism(group, omega): + ''' + Return the homomorphism induced by the action of the permutation + group ``group`` on the set ``omega`` that is closed under the action. + + ''' + from sympy.combinatorics import Permutation + from sympy.combinatorics.named_groups import SymmetricGroup + codomain = SymmetricGroup(len(omega)) + identity = codomain.identity + omega = list(omega) + images = {g: identity*Permutation([omega.index(o^g) for o in omega]) for g in group.generators} + group._schreier_sims(base=omega) + H = GroupHomomorphism(group, codomain, images) + if len(group.basic_stabilizers) > len(omega): + H._kernel = group.basic_stabilizers[len(omega)] + else: + H._kernel = PermutationGroup([group.identity]) + return H + +def block_homomorphism(group, blocks): + ''' + Return the homomorphism induced by the action of the permutation + group ``group`` on the block system ``blocks``. The latter should be + of the same form as returned by the ``minimal_block`` method for + permutation groups, namely a list of length ``group.degree`` where + the i-th entry is a representative of the block i belongs to. + + ''' + from sympy.combinatorics import Permutation + from sympy.combinatorics.named_groups import SymmetricGroup + + n = len(blocks) + + # number the blocks; m is the total number, + # b is such that b[i] is the number of the block i belongs to, + # p is the list of length m such that p[i] is the representative + # of the i-th block + m = 0 + p = [] + b = [None]*n + for i in range(n): + if blocks[i] == i: + p.append(i) + b[i] = m + m += 1 + for i in range(n): + b[i] = b[blocks[i]] + + codomain = SymmetricGroup(m) + # the list corresponding to the identity permutation in codomain + identity = range(m) + images = {g: Permutation([b[p[i]^g] for i in identity]) for g in group.generators} + H = GroupHomomorphism(group, codomain, images) + return H + +def group_isomorphism(G, H, isomorphism=True): + ''' + Compute an isomorphism between 2 given groups. + + Parameters + ========== + + G : A finite ``FpGroup`` or a ``PermutationGroup``. + First group. + + H : A finite ``FpGroup`` or a ``PermutationGroup`` + Second group. + + isomorphism : bool + This is used to avoid the computation of homomorphism + when the user only wants to check if there exists + an isomorphism between the groups. + + Returns + ======= + + If isomorphism = False -- Returns a boolean. + If isomorphism = True -- Returns a boolean and an isomorphism between `G` and `H`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group, Permutation + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> from sympy.combinatorics.fp_groups import FpGroup + >>> from sympy.combinatorics.homomorphisms import group_isomorphism + >>> from sympy.combinatorics.named_groups import DihedralGroup, AlternatingGroup + + >>> D = DihedralGroup(8) + >>> p = Permutation(0, 1, 2, 3, 4, 5, 6, 7) + >>> P = PermutationGroup(p) + >>> group_isomorphism(D, P) + (False, None) + + >>> F, a, b = free_group("a, b") + >>> G = FpGroup(F, [a**3, b**3, (a*b)**2]) + >>> H = AlternatingGroup(4) + >>> (check, T) = group_isomorphism(G, H) + >>> check + True + >>> T(b*a*b**-1*a**-1*b**-1) + (0 2 3) + + Notes + ===== + + Uses the approach suggested by Robert Tarjan to compute the isomorphism between two groups. + First, the generators of ``G`` are mapped to the elements of ``H`` and + we check if the mapping induces an isomorphism. + + ''' + if not isinstance(G, (PermutationGroup, FpGroup)): + raise TypeError("The group must be a PermutationGroup or an FpGroup") + if not isinstance(H, (PermutationGroup, FpGroup)): + raise TypeError("The group must be a PermutationGroup or an FpGroup") + + if isinstance(G, FpGroup) and isinstance(H, FpGroup): + G = simplify_presentation(G) + H = simplify_presentation(H) + # Two infinite FpGroups with the same generators are isomorphic + # when the relators are same but are ordered differently. + if G.generators == H.generators and (G.relators).sort() == (H.relators).sort(): + if not isomorphism: + return True + return (True, homomorphism(G, H, G.generators, H.generators)) + + # `_H` is the permutation group isomorphic to `H`. + _H = H + g_order = G.order() + h_order = H.order() + + if g_order is S.Infinity: + raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.") + + if isinstance(H, FpGroup): + if h_order is S.Infinity: + raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.") + _H, h_isomorphism = H._to_perm_group() + + if (g_order != h_order) or (G.is_abelian != H.is_abelian): + if not isomorphism: + return False + return (False, None) + + if not isomorphism: + # Two groups of the same cyclic numbered order + # are isomorphic to each other. + n = g_order + if (igcd(n, totient(n))) == 1: + return True + + # Match the generators of `G` with subsets of `_H` + gens = list(G.generators) + for subset in itertools.permutations(_H, len(gens)): + images = list(subset) + images.extend([_H.identity]*(len(G.generators)-len(images))) + _images = dict(zip(gens,images)) + if _check_homomorphism(G, _H, _images): + if isinstance(H, FpGroup): + images = h_isomorphism.invert(images) + T = homomorphism(G, H, G.generators, images, check=False) + if T.is_isomorphism(): + # It is a valid isomorphism + if not isomorphism: + return True + return (True, T) + + if not isomorphism: + return False + return (False, None) + +def is_isomorphic(G, H): + ''' + Check if the groups are isomorphic to each other + + Parameters + ========== + + G : A finite ``FpGroup`` or a ``PermutationGroup`` + First group. + + H : A finite ``FpGroup`` or a ``PermutationGroup`` + Second group. + + Returns + ======= + + boolean + ''' + return group_isomorphism(G, H, isomorphism=False) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/perm_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/perm_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..25cf5f5edbc84c268461f4079ff123c9483e36c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/perm_groups.py @@ -0,0 +1,5472 @@ +from math import factorial as _factorial, log, prod +from itertools import chain, islice, product + + +from sympy.combinatorics import Permutation +from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert, + _af_rmul, _af_rmuln, _af_pow, Cycle) +from sympy.combinatorics.util import (_check_cycles_alt_sym, + _distribute_gens_by_base, _orbits_transversals_from_bsgs, + _handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr, + _strip, _strip_af) +from sympy.core import Basic +from sympy.core.random import _randrange, randrange, choice +from sympy.core.symbol import Symbol +from sympy.core.sympify import _sympify +from sympy.functions.combinatorial.factorials import factorial +from sympy.ntheory import primefactors, sieve +from sympy.ntheory.factor_ import (factorint, multiplicity) +from sympy.ntheory.primetest import isprime +from sympy.utilities.iterables import has_variety, is_sequence, uniq + +rmul = Permutation.rmul_with_af +_af_new = Permutation._af_new + + +class PermutationGroup(Basic): + r"""The class defining a Permutation group. + + Explanation + =========== + + ``PermutationGroup([p1, p2, ..., pn])`` returns the permutation group + generated by the list of permutations. This group can be supplied + to Polyhedron if one desires to decorate the elements to which the + indices of the permutation refer. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> from sympy.combinatorics import Polyhedron + + The permutations corresponding to motion of the front, right and + bottom face of a $2 \times 2$ Rubik's cube are defined: + + >>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5) + >>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9) + >>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21) + + These are passed as permutations to PermutationGroup: + + >>> G = PermutationGroup(F, R, D) + >>> G.order() + 3674160 + + The group can be supplied to a Polyhedron in order to track the + objects being moved. An example involving the $2 \times 2$ Rubik's cube is + given there, but here is a simple demonstration: + + >>> a = Permutation(2, 1) + >>> b = Permutation(1, 0) + >>> G = PermutationGroup(a, b) + >>> P = Polyhedron(list('ABC'), pgroup=G) + >>> P.corners + (A, B, C) + >>> P.rotate(0) # apply permutation 0 + >>> P.corners + (A, C, B) + >>> P.reset() + >>> P.corners + (A, B, C) + + Or one can make a permutation as a product of selected permutations + and apply them to an iterable directly: + + >>> P10 = G.make_perm([0, 1]) + >>> P10('ABC') + ['C', 'A', 'B'] + + See Also + ======== + + sympy.combinatorics.polyhedron.Polyhedron, + sympy.combinatorics.permutations.Permutation + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + + .. [2] Seress, A. + "Permutation Group Algorithms" + + .. [3] https://en.wikipedia.org/wiki/Schreier_vector + + .. [4] https://en.wikipedia.org/wiki/Nielsen_transformation#Product_replacement_algorithm + + .. [5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray, + Alice C.Niemeyer, and E.A.O'Brien. "Generating Random + Elements of a Finite Group" + + .. [6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29 + + .. [7] https://algorithmist.com/wiki/Union_find + + .. [8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups + + .. [9] https://en.wikipedia.org/wiki/Center_%28group_theory%29 + + .. [10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer + + .. [11] https://groupprops.subwiki.org/wiki/Derived_subgroup + + .. [12] https://en.wikipedia.org/wiki/Nilpotent_group + + .. [13] https://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf + + .. [14] https://docs.gap-system.org/doc/ref/manual.pdf + + """ + is_group = True + + def __new__(cls, *args, dups=True, **kwargs): + """The default constructor. Accepts Cycle and Permutation forms. + Removes duplicates unless ``dups`` keyword is ``False``. + """ + if not args: + args = [Permutation()] + else: + args = list(args[0] if is_sequence(args[0]) else args) + if not args: + args = [Permutation()] + if any(isinstance(a, Cycle) for a in args): + args = [Permutation(a) for a in args] + if has_variety(a.size for a in args): + degree = kwargs.pop('degree', None) + if degree is None: + degree = max(a.size for a in args) + for i in range(len(args)): + if args[i].size != degree: + args[i] = Permutation(args[i], size=degree) + if dups: + args = list(uniq([_af_new(list(a)) for a in args])) + if len(args) > 1: + args = [g for g in args if not g.is_identity] + return Basic.__new__(cls, *args, **kwargs) + + def __init__(self, *args, **kwargs): + self._generators = list(self.args) + self._order = None + self._center = [] + self._is_abelian = None + self._is_transitive = None + self._is_sym = None + self._is_alt = None + self._is_primitive = None + self._is_nilpotent = None + self._is_solvable = None + self._is_trivial = None + self._transitivity_degree = None + self._max_div = None + self._is_perfect = None + self._is_cyclic = None + self._is_dihedral = None + self._r = len(self._generators) + self._degree = self._generators[0].size + + # these attributes are assigned after running schreier_sims + self._base = [] + self._strong_gens = [] + self._strong_gens_slp = [] + self._basic_orbits = [] + self._transversals = [] + self._transversal_slp = [] + + # these attributes are assigned after running _random_pr_init + self._random_gens = [] + + # finite presentation of the group as an instance of `FpGroup` + self._fp_presentation = None + + def __getitem__(self, i): + return self._generators[i] + + def __contains__(self, i): + """Return ``True`` if *i* is contained in PermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> p = Permutation(1, 2, 3) + >>> Permutation(3) in PermutationGroup(p) + True + + """ + if not isinstance(i, Permutation): + raise TypeError("A PermutationGroup contains only Permutations as " + "elements, not elements of type %s" % type(i)) + return self.contains(i) + + def __len__(self): + return len(self._generators) + + def equals(self, other): + """Return ``True`` if PermutationGroup generated by elements in the + group are same i.e they represent the same PermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> p = Permutation(0, 1, 2, 3, 4, 5) + >>> G = PermutationGroup([p, p**2]) + >>> H = PermutationGroup([p**2, p]) + >>> G.generators == H.generators + False + >>> G.equals(H) + True + + """ + if not isinstance(other, PermutationGroup): + return False + + set_self_gens = set(self.generators) + set_other_gens = set(other.generators) + + # before reaching the general case there are also certain + # optimisation and obvious cases requiring less or no actual + # computation. + if set_self_gens == set_other_gens: + return True + + # in the most general case it will check that each generator of + # one group belongs to the other PermutationGroup and vice-versa + for gen1 in set_self_gens: + if not other.contains(gen1): + return False + for gen2 in set_other_gens: + if not self.contains(gen2): + return False + return True + + def __mul__(self, other): + """ + Return the direct product of two permutation groups as a permutation + group. + + Explanation + =========== + + This implementation realizes the direct product by shifting the index + set for the generators of the second group: so if we have ``G`` acting + on ``n1`` points and ``H`` acting on ``n2`` points, ``G*H`` acts on + ``n1 + n2`` points. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import CyclicGroup + >>> G = CyclicGroup(5) + >>> H = G*G + >>> H + PermutationGroup([ + (9)(0 1 2 3 4), + (5 6 7 8 9)]) + >>> H.order() + 25 + + """ + if isinstance(other, Permutation): + return Coset(other, self, dir='+') + gens1 = [perm._array_form for perm in self.generators] + gens2 = [perm._array_form for perm in other.generators] + n1 = self._degree + n2 = other._degree + start = list(range(n1)) + end = list(range(n1, n1 + n2)) + for i in range(len(gens2)): + gens2[i] = [x + n1 for x in gens2[i]] + gens2 = [start + gen for gen in gens2] + gens1 = [gen + end for gen in gens1] + together = gens1 + gens2 + gens = [_af_new(x) for x in together] + return PermutationGroup(gens) + + def _random_pr_init(self, r, n, _random_prec_n=None): + r"""Initialize random generators for the product replacement algorithm. + + Explanation + =========== + + The implementation uses a modification of the original product + replacement algorithm due to Leedham-Green, as described in [1], + pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical + analysis of the original product replacement algorithm, and [4]. + + The product replacement algorithm is used for producing random, + uniformly distributed elements of a group `G` with a set of generators + `S`. For the initialization ``_random_pr_init``, a list ``R`` of + `\max\{r, |S|\}` group generators is created as the attribute + ``G._random_gens``, repeating elements of `S` if necessary, and the + identity element of `G` is appended to ``R`` - we shall refer to this + last element as the accumulator. Then the function ``random_pr()`` + is called ``n`` times, randomizing the list ``R`` while preserving + the generation of `G` by ``R``. The function ``random_pr()`` itself + takes two random elements ``g, h`` among all elements of ``R`` but + the accumulator and replaces ``g`` with a randomly chosen element + from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied + by whatever ``g`` was replaced by. The new value of the accumulator is + then returned by ``random_pr()``. + + The elements returned will eventually (for ``n`` large enough) become + uniformly distributed across `G` ([5]). For practical purposes however, + the values ``n = 50, r = 11`` are suggested in [1]. + + Notes + ===== + + THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute + self._random_gens + + See Also + ======== + + random_pr + + """ + deg = self.degree + random_gens = [x._array_form for x in self.generators] + k = len(random_gens) + if k < r: + for i in range(k, r): + random_gens.append(random_gens[i - k]) + acc = list(range(deg)) + random_gens.append(acc) + self._random_gens = random_gens + + # handle randomized input for testing purposes + if _random_prec_n is None: + for i in range(n): + self.random_pr() + else: + for i in range(n): + self.random_pr(_random_prec=_random_prec_n[i]) + + def _union_find_merge(self, first, second, ranks, parents, not_rep): + """Merges two classes in a union-find data structure. + + Explanation + =========== + + Used in the implementation of Atkinson's algorithm as suggested in [1], + pp. 83-87. The class merging process uses union by rank as an + optimization. ([7]) + + Notes + ===== + + THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives, + ``parents``, the list of class sizes, ``ranks``, and the list of + elements that are not representatives, ``not_rep``, are changed due to + class merging. + + See Also + ======== + + minimal_block, _union_find_rep + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of computational group theory" + + .. [7] https://algorithmist.com/wiki/Union_find + + """ + rep_first = self._union_find_rep(first, parents) + rep_second = self._union_find_rep(second, parents) + if rep_first != rep_second: + # union by rank + if ranks[rep_first] >= ranks[rep_second]: + new_1, new_2 = rep_first, rep_second + else: + new_1, new_2 = rep_second, rep_first + total_rank = ranks[new_1] + ranks[new_2] + if total_rank > self.max_div: + return -1 + parents[new_2] = new_1 + ranks[new_1] = total_rank + not_rep.append(new_2) + return 1 + return 0 + + def _union_find_rep(self, num, parents): + """Find representative of a class in a union-find data structure. + + Explanation + =========== + + Used in the implementation of Atkinson's algorithm as suggested in [1], + pp. 83-87. After the representative of the class to which ``num`` + belongs is found, path compression is performed as an optimization + ([7]). + + Notes + ===== + + THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives, + ``parents``, is altered due to path compression. + + See Also + ======== + + minimal_block, _union_find_merge + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of computational group theory" + + .. [7] https://algorithmist.com/wiki/Union_find + + """ + rep, parent = num, parents[num] + while parent != rep: + rep = parent + parent = parents[rep] + # path compression + temp, parent = num, parents[num] + while parent != rep: + parents[temp] = rep + temp = parent + parent = parents[temp] + return rep + + @property + def base(self): + r"""Return a base from the Schreier-Sims algorithm. + + Explanation + =========== + + For a permutation group `G`, a base is a sequence of points + `B = (b_1, b_2, \dots, b_k)` such that no element of `G` apart + from the identity fixes all the points in `B`. The concepts of + a base and strong generating set and their applications are + discussed in depth in [1], pp. 87-89 and [2], pp. 55-57. + + An alternative way to think of `B` is that it gives the + indices of the stabilizer cosets that contain more than the + identity permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)]) + >>> G.base + [0, 2] + + See Also + ======== + + strong_gens, basic_transversals, basic_orbits, basic_stabilizers + + """ + if self._base == []: + self.schreier_sims() + return self._base + + def baseswap(self, base, strong_gens, pos, randomized=False, + transversals=None, basic_orbits=None, strong_gens_distr=None): + r"""Swap two consecutive base points in base and strong generating set. + + Explanation + =========== + + If a base for a group `G` is given by `(b_1, b_2, \dots, b_k)`, this + function returns a base `(b_1, b_2, \dots, b_{i+1}, b_i, \dots, b_k)`, + where `i` is given by ``pos``, and a strong generating set relative + to that base. The original base and strong generating set are not + modified. + + The randomized version (default) is of Las Vegas type. + + Parameters + ========== + + base, strong_gens + The base and strong generating set. + pos + The position at which swapping is performed. + randomized + A switch between randomized and deterministic version. + transversals + The transversals for the basic orbits, if known. + basic_orbits + The basic orbits, if known. + strong_gens_distr + The strong generators distributed by basic stabilizers, if known. + + Returns + ======= + + (base, strong_gens) + ``base`` is the new base, and ``strong_gens`` is a generating set + relative to it. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> S = SymmetricGroup(4) + >>> S.schreier_sims() + >>> S.base + [0, 1, 2] + >>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False) + >>> base, gens + ([0, 2, 1], + [(0 1 2 3), (3)(0 1), (1 3 2), + (2 3), (1 3)]) + + check that base, gens is a BSGS + + >>> S1 = PermutationGroup(gens) + >>> _verify_bsgs(S1, base, gens) + True + + See Also + ======== + + schreier_sims + + Notes + ===== + + The deterministic version of the algorithm is discussed in + [1], pp. 102-103; the randomized version is discussed in [1], p.103, and + [2], p.98. It is of Las Vegas type. + Notice that [1] contains a mistake in the pseudocode and + discussion of BASESWAP: on line 3 of the pseudocode, + `|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by + `|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the + discussion of the algorithm. + + """ + # construct the basic orbits, generators for the stabilizer chain + # and transversal elements from whatever was provided + transversals, basic_orbits, strong_gens_distr = \ + _handle_precomputed_bsgs(base, strong_gens, transversals, + basic_orbits, strong_gens_distr) + base_len = len(base) + degree = self.degree + # size of orbit of base[pos] under the stabilizer we seek to insert + # in the stabilizer chain at position pos + 1 + size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \ + //len(_orbit(degree, strong_gens_distr[pos], base[pos + 1])) + # initialize the wanted stabilizer by a subgroup + if pos + 2 > base_len - 1: + T = [] + else: + T = strong_gens_distr[pos + 2][:] + # randomized version + if randomized is True: + stab_pos = PermutationGroup(strong_gens_distr[pos]) + schreier_vector = stab_pos.schreier_vector(base[pos + 1]) + # add random elements of the stabilizer until they generate it + while len(_orbit(degree, T, base[pos])) != size: + new = stab_pos.random_stab(base[pos + 1], + schreier_vector=schreier_vector) + T.append(new) + # deterministic version + else: + Gamma = set(basic_orbits[pos]) + Gamma.remove(base[pos]) + if base[pos + 1] in Gamma: + Gamma.remove(base[pos + 1]) + # add elements of the stabilizer until they generate it by + # ruling out member of the basic orbit of base[pos] along the way + while len(_orbit(degree, T, base[pos])) != size: + gamma = next(iter(Gamma)) + x = transversals[pos][gamma] + temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1]) + if temp not in basic_orbits[pos + 1]: + Gamma = Gamma - _orbit(degree, T, gamma) + else: + y = transversals[pos + 1][temp] + el = rmul(x, y) + if el(base[pos]) not in _orbit(degree, T, base[pos]): + T.append(el) + Gamma = Gamma - _orbit(degree, T, base[pos]) + # build the new base and strong generating set + strong_gens_new_distr = strong_gens_distr[:] + strong_gens_new_distr[pos + 1] = T + base_new = base[:] + base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos] + strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr) + for gen in T: + if gen not in strong_gens_new: + strong_gens_new.append(gen) + return base_new, strong_gens_new + + @property + def basic_orbits(self): + r""" + Return the basic orbits relative to a base and strong generating set. + + Explanation + =========== + + If `(b_1, b_2, \dots, b_k)` is a base for a group `G`, and + `G^{(i)} = G_{b_1, b_2, \dots, b_{i-1}}` is the ``i``-th basic stabilizer + (so that `G^{(1)} = G`), the ``i``-th basic orbit relative to this base + is the orbit of `b_i` under `G^{(i)}`. See [1], pp. 87-89 for more + information. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> S = SymmetricGroup(4) + >>> S.basic_orbits + [[0, 1, 2, 3], [1, 2, 3], [2, 3]] + + See Also + ======== + + base, strong_gens, basic_transversals, basic_stabilizers + + """ + if self._basic_orbits == []: + self.schreier_sims() + return self._basic_orbits + + @property + def basic_stabilizers(self): + r""" + Return a chain of stabilizers relative to a base and strong generating + set. + + Explanation + =========== + + The ``i``-th basic stabilizer `G^{(i)}` relative to a base + `(b_1, b_2, \dots, b_k)` is `G_{b_1, b_2, \dots, b_{i-1}}`. For more + information, see [1], pp. 87-89. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> A = AlternatingGroup(4) + >>> A.schreier_sims() + >>> A.base + [0, 1] + >>> for g in A.basic_stabilizers: + ... print(g) + ... + PermutationGroup([ + (3)(0 1 2), + (1 2 3)]) + PermutationGroup([ + (1 2 3)]) + + See Also + ======== + + base, strong_gens, basic_orbits, basic_transversals + + """ + + if self._transversals == []: + self.schreier_sims() + strong_gens = self._strong_gens + base = self._base + if not base: # e.g. if self is trivial + return [] + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + basic_stabilizers = [] + for gens in strong_gens_distr: + basic_stabilizers.append(PermutationGroup(gens)) + return basic_stabilizers + + @property + def basic_transversals(self): + """ + Return basic transversals relative to a base and strong generating set. + + Explanation + =========== + + The basic transversals are transversals of the basic orbits. They + are provided as a list of dictionaries, each dictionary having + keys - the elements of one of the basic orbits, and values - the + corresponding transversal elements. See [1], pp. 87-89 for more + information. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> A = AlternatingGroup(4) + >>> A.basic_transversals + [{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}] + + See Also + ======== + + strong_gens, base, basic_orbits, basic_stabilizers + + """ + + if self._transversals == []: + self.schreier_sims() + return self._transversals + + def composition_series(self): + r""" + Return the composition series for a group as a list + of permutation groups. + + Explanation + =========== + + The composition series for a group `G` is defined as a + subnormal series `G = H_0 > H_1 > H_2 \ldots` A composition + series is a subnormal series such that each factor group + `H(i+1) / H(i)` is simple. + A subnormal series is a composition series only if it is of + maximum length. + + The algorithm works as follows: + Starting with the derived series the idea is to fill + the gap between `G = der[i]` and `H = der[i+1]` for each + `i` independently. Since, all subgroups of the abelian group + `G/H` are normal so, first step is to take the generators + `g` of `G` and add them to generators of `H` one by one. + + The factor groups formed are not simple in general. Each + group is obtained from the previous one by adding one + generator `g`, if the previous group is denoted by `H` + then the next group `K` is generated by `g` and `H`. + The factor group `K/H` is cyclic and it's order is + `K.order()//G.order()`. The series is then extended between + `K` and `H` by groups generated by powers of `g` and `H`. + The series formed is then prepended to the already existing + series. + + Examples + ======== + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.named_groups import CyclicGroup + >>> S = SymmetricGroup(12) + >>> G = S.sylow_subgroup(2) + >>> C = G.composition_series() + >>> [H.order() for H in C] + [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1] + >>> G = S.sylow_subgroup(3) + >>> C = G.composition_series() + >>> [H.order() for H in C] + [243, 81, 27, 9, 3, 1] + >>> G = CyclicGroup(12) + >>> C = G.composition_series() + >>> [H.order() for H in C] + [12, 6, 3, 1] + + """ + der = self.derived_series() + if not all(g.is_identity for g in der[-1].generators): + raise NotImplementedError('Group should be solvable') + series = [] + + for i in range(len(der)-1): + H = der[i+1] + up_seg = [] + for g in der[i].generators: + K = PermutationGroup([g] + H.generators) + order = K.order() // H.order() + down_seg = [] + for p, e in factorint(order).items(): + for _ in range(e): + down_seg.append(PermutationGroup([g] + H.generators)) + g = g**p + up_seg = down_seg + up_seg + H = K + up_seg[0] = der[i] + series.extend(up_seg) + series.append(der[-1]) + return series + + def coset_transversal(self, H): + """Return a transversal of the right cosets of self by its subgroup H + using the second method described in [1], Subsection 4.6.7 + + """ + + if not H.is_subgroup(self): + raise ValueError("The argument must be a subgroup") + + if H.order() == 1: + return self._elements + + self._schreier_sims(base=H.base) # make G.base an extension of H.base + + base = self.base + base_ordering = _base_ordering(base, self.degree) + identity = Permutation(self.degree - 1) + + transversals = self.basic_transversals[:] + # transversals is a list of dictionaries. Get rid of the keys + # so that it is a list of lists and sort each list in + # the increasing order of base[l]^x + for l, t in enumerate(transversals): + transversals[l] = sorted(t.values(), + key = lambda x: base_ordering[base[l]^x]) + + orbits = H.basic_orbits + h_stabs = H.basic_stabilizers + g_stabs = self.basic_stabilizers + + indices = [x.order()//y.order() for x, y in zip(g_stabs, h_stabs)] + + # T^(l) should be a right transversal of H^(l) in G^(l) for + # 1<=l<=len(base). While H^(l) is the trivial group, T^(l) + # contains all the elements of G^(l) so we might just as well + # start with l = len(h_stabs)-1 + if len(g_stabs) > len(h_stabs): + T = g_stabs[len(h_stabs)]._elements + else: + T = [identity] + l = len(h_stabs)-1 + t_len = len(T) + while l > -1: + T_next = [] + for u in transversals[l]: + if u == identity: + continue + b = base_ordering[base[l]^u] + for t in T: + p = t*u + if all(base_ordering[h^p] >= b for h in orbits[l]): + T_next.append(p) + if t_len + len(T_next) == indices[l]: + break + if t_len + len(T_next) == indices[l]: + break + T += T_next + t_len += len(T_next) + l -= 1 + T.remove(identity) + T = [identity] + T + return T + + def _coset_representative(self, g, H): + """Return the representative of Hg from the transversal that + would be computed by ``self.coset_transversal(H)``. + + """ + if H.order() == 1: + return g + # The base of self must be an extension of H.base. + if not(self.base[:len(H.base)] == H.base): + self._schreier_sims(base=H.base) + orbits = H.basic_orbits[:] + h_transversals = [list(_.values()) for _ in H.basic_transversals] + transversals = [list(_.values()) for _ in self.basic_transversals] + base = self.base + base_ordering = _base_ordering(base, self.degree) + def step(l, x): + gamma = sorted(orbits[l], key = lambda y: base_ordering[y^x])[0] + i = [base[l]^h for h in h_transversals[l]].index(gamma) + x = h_transversals[l][i]*x + if l < len(orbits)-1: + for u in transversals[l]: + if base[l]^u == base[l]^x: + break + x = step(l+1, x*u**-1)*u + return x + return step(0, g) + + def coset_table(self, H): + """Return the standardised (right) coset table of self in H as + a list of lists. + """ + # Maybe this should be made to return an instance of CosetTable + # from fp_groups.py but the class would need to be changed first + # to be compatible with PermutationGroups + + if not H.is_subgroup(self): + raise ValueError("The argument must be a subgroup") + T = self.coset_transversal(H) + n = len(T) + + A = list(chain.from_iterable((gen, gen**-1) + for gen in self.generators)) + + table = [] + for i in range(n): + row = [self._coset_representative(T[i]*x, H) for x in A] + row = [T.index(r) for r in row] + table.append(row) + + # standardize (this is the same as the algorithm used in coset_table) + # If CosetTable is made compatible with PermutationGroups, this + # should be replaced by table.standardize() + A = range(len(A)) + gamma = 1 + for alpha, a in product(range(n), A): + beta = table[alpha][a] + if beta >= gamma: + if beta > gamma: + for x in A: + z = table[gamma][x] + table[gamma][x] = table[beta][x] + table[beta][x] = z + for i in range(n): + if table[i][x] == beta: + table[i][x] = gamma + elif table[i][x] == gamma: + table[i][x] = beta + gamma += 1 + if gamma >= n-1: + return table + + def center(self): + r""" + Return the center of a permutation group. + + Explanation + =========== + + The center for a group `G` is defined as + `Z(G) = \{z\in G | \forall g\in G, zg = gz \}`, + the set of elements of `G` that commute with all elements of `G`. + It is equal to the centralizer of `G` inside `G`, and is naturally a + subgroup of `G` ([9]). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(4) + >>> G = D.center() + >>> G.order() + 2 + + See Also + ======== + + centralizer + + Notes + ===== + + This is a naive implementation that is a straightforward application + of ``.centralizer()`` + + """ + return self.centralizer(self) + + def centralizer(self, other): + r""" + Return the centralizer of a group/set/element. + + Explanation + =========== + + The centralizer of a set of permutations ``S`` inside + a group ``G`` is the set of elements of ``G`` that commute with all + elements of ``S``:: + + `C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10]) + + Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of + the full symmetric group, we allow for ``S`` to have elements outside + ``G``. + + It is naturally a subgroup of ``G``; the centralizer of a permutation + group is equal to the centralizer of any set of generators for that + group, since any element commuting with the generators commutes with + any product of the generators. + + Parameters + ========== + + other + a permutation group/list of permutations/single permutation + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... CyclicGroup) + >>> S = SymmetricGroup(6) + >>> C = CyclicGroup(6) + >>> H = S.centralizer(C) + >>> H.is_subgroup(C) + True + + See Also + ======== + + subgroup_search + + Notes + ===== + + The implementation is an application of ``.subgroup_search()`` with + tests using a specific base for the group ``G``. + + """ + if hasattr(other, 'generators'): + if other.is_trivial or self.is_trivial: + return self + degree = self.degree + identity = _af_new(list(range(degree))) + orbits = other.orbits() + num_orbits = len(orbits) + orbits.sort(key=lambda x: -len(x)) + long_base = [] + orbit_reps = [None]*num_orbits + orbit_reps_indices = [None]*num_orbits + orbit_descr = [None]*degree + for i in range(num_orbits): + orbit = list(orbits[i]) + orbit_reps[i] = orbit[0] + orbit_reps_indices[i] = len(long_base) + for point in orbit: + orbit_descr[point] = i + long_base = long_base + orbit + base, strong_gens = self.schreier_sims_incremental(base=long_base) + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + i = 0 + for i in range(len(base)): + if strong_gens_distr[i] == [identity]: + break + base = base[:i] + base_len = i + for j in range(num_orbits): + if base[base_len - 1] in orbits[j]: + break + rel_orbits = orbits[: j + 1] + num_rel_orbits = len(rel_orbits) + transversals = [None]*num_rel_orbits + for j in range(num_rel_orbits): + rep = orbit_reps[j] + transversals[j] = dict( + other.orbit_transversal(rep, pairs=True)) + trivial_test = lambda x: True + tests = [None]*base_len + for l in range(base_len): + if base[l] in orbit_reps: + tests[l] = trivial_test + else: + def test(computed_words, l=l): + g = computed_words[l] + rep_orb_index = orbit_descr[base[l]] + rep = orbit_reps[rep_orb_index] + im = g._array_form[base[l]] + im_rep = g._array_form[rep] + tr_el = transversals[rep_orb_index][base[l]] + # using the definition of transversal, + # base[l]^g = rep^(tr_el*g); + # if g belongs to the centralizer, then + # base[l]^g = (rep^g)^tr_el + return im == tr_el._array_form[im_rep] + tests[l] = test + + def prop(g): + return [rmul(g, gen) for gen in other.generators] == \ + [rmul(gen, g) for gen in other.generators] + return self.subgroup_search(prop, base=base, + strong_gens=strong_gens, tests=tests) + elif hasattr(other, '__getitem__'): + gens = list(other) + return self.centralizer(PermutationGroup(gens)) + elif hasattr(other, 'array_form'): + return self.centralizer(PermutationGroup([other])) + + def commutator(self, G, H): + """ + Return the commutator of two subgroups. + + Explanation + =========== + + For a permutation group ``K`` and subgroups ``G``, ``H``, the + commutator of ``G`` and ``H`` is defined as the group generated + by all the commutators `[g, h] = hgh^{-1}g^{-1}` for ``g`` in ``G`` and + ``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... AlternatingGroup) + >>> S = SymmetricGroup(5) + >>> A = AlternatingGroup(5) + >>> G = S.commutator(S, A) + >>> G.is_subgroup(A) + True + + See Also + ======== + + derived_subgroup + + Notes + ===== + + The commutator of two subgroups `H, G` is equal to the normal closure + of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h` + a generator of `H` and `g` a generator of `G` ([1], p.28) + + """ + ggens = G.generators + hgens = H.generators + commutators = [] + for ggen in ggens: + for hgen in hgens: + commutator = rmul(hgen, ggen, ~hgen, ~ggen) + if commutator not in commutators: + commutators.append(commutator) + res = self.normal_closure(commutators) + return res + + def coset_factor(self, g, factor_index=False): + """Return ``G``'s (self's) coset factorization of ``g`` + + Explanation + =========== + + If ``g`` is an element of ``G`` then it can be written as the product + of permutations drawn from the Schreier-Sims coset decomposition, + + The permutations returned in ``f`` are those for which + the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)`` + and ``B = G.base``. f[i] is one of the permutations in + ``self._basic_orbits[i]``. + + If factor_index==True, + returns a tuple ``[b[0],..,b[n]]``, where ``b[i]`` + belongs to ``self._basic_orbits[i]`` + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5) + >>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6) + >>> G = PermutationGroup([a, b]) + + Define g: + + >>> g = Permutation(7)(1, 2, 4)(3, 6, 5) + + Confirm that it is an element of G: + + >>> G.contains(g) + True + + Thus, it can be written as a product of factors (up to + 3) drawn from u. See below that a factor from u1 and u2 + and the Identity permutation have been used: + + >>> f = G.coset_factor(g) + >>> f[2]*f[1]*f[0] == g + True + >>> f1 = G.coset_factor(g, True); f1 + [0, 4, 4] + >>> tr = G.basic_transversals + >>> f[0] == tr[0][f1[0]] + True + + If g is not an element of G then [] is returned: + + >>> c = Permutation(5, 6, 7) + >>> G.coset_factor(c) + [] + + See Also + ======== + + sympy.combinatorics.util._strip + + """ + if isinstance(g, (Cycle, Permutation)): + g = g.list() + if len(g) != self._degree: + # this could either adjust the size or return [] immediately + # but we don't choose between the two and just signal a possible + # error + raise ValueError('g should be the same size as permutations of G') + I = list(range(self._degree)) + basic_orbits = self.basic_orbits + transversals = self._transversals + factors = [] + base = self.base + h = g + for i in range(len(base)): + beta = h[base[i]] + if beta == base[i]: + factors.append(beta) + continue + if beta not in basic_orbits[i]: + return [] + u = transversals[i][beta]._array_form + h = _af_rmul(_af_invert(u), h) + factors.append(beta) + if h != I: + return [] + if factor_index: + return factors + tr = self.basic_transversals + factors = [tr[i][factors[i]] for i in range(len(base))] + return factors + + def generator_product(self, g, original=False): + r''' + Return a list of strong generators `[s1, \dots, sn]` + s.t `g = sn \times \dots \times s1`. If ``original=True``, make the + list contain only the original group generators + + ''' + product = [] + if g.is_identity: + return [] + if g in self.strong_gens: + if not original or g in self.generators: + return [g] + else: + slp = self._strong_gens_slp[g] + for s in slp: + product.extend(self.generator_product(s, original=True)) + return product + elif g**-1 in self.strong_gens: + g = g**-1 + if not original or g in self.generators: + return [g**-1] + else: + slp = self._strong_gens_slp[g] + for s in slp: + product.extend(self.generator_product(s, original=True)) + l = len(product) + product = [product[l-i-1]**-1 for i in range(l)] + return product + + f = self.coset_factor(g, True) + for i, j in enumerate(f): + slp = self._transversal_slp[i][j] + for s in slp: + if not original: + product.append(self.strong_gens[s]) + else: + s = self.strong_gens[s] + product.extend(self.generator_product(s, original=True)) + return product + + def coset_rank(self, g): + """rank using Schreier-Sims representation. + + Explanation + =========== + + The coset rank of ``g`` is the ordering number in which + it appears in the lexicographic listing according to the + coset decomposition + + The ordering is the same as in G.generate(method='coset'). + If ``g`` does not belong to the group it returns None. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5) + >>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6) + >>> G = PermutationGroup([a, b]) + >>> c = Permutation(7)(2, 4)(3, 5) + >>> G.coset_rank(c) + 16 + >>> G.coset_unrank(16) + (7)(2 4)(3 5) + + See Also + ======== + + coset_factor + + """ + factors = self.coset_factor(g, True) + if not factors: + return None + rank = 0 + b = 1 + transversals = self._transversals + base = self._base + basic_orbits = self._basic_orbits + for i in range(len(base)): + k = factors[i] + j = basic_orbits[i].index(k) + rank += b*j + b = b*len(transversals[i]) + return rank + + def coset_unrank(self, rank, af=False): + """unrank using Schreier-Sims representation + + coset_unrank is the inverse operation of coset_rank + if 0 <= rank < order; otherwise it returns None. + + """ + if rank < 0 or rank >= self.order(): + return None + base = self.base + transversals = self.basic_transversals + basic_orbits = self.basic_orbits + m = len(base) + v = [0]*m + for i in range(m): + rank, c = divmod(rank, len(transversals[i])) + v[i] = basic_orbits[i][c] + a = [transversals[i][v[i]]._array_form for i in range(m)] + h = _af_rmuln(*a) + if af: + return h + else: + return _af_new(h) + + @property + def degree(self): + """Returns the size of the permutations in the group. + + Explanation + =========== + + The number of permutations comprising the group is given by + ``len(group)``; the number of permutations that can be generated + by the group is given by ``group.order()``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a]) + >>> G.degree + 3 + >>> len(G) + 1 + >>> G.order() + 2 + >>> list(G.generate()) + [(2), (2)(0 1)] + + See Also + ======== + + order + """ + return self._degree + + @property + def identity(self): + ''' + Return the identity element of the permutation group. + + ''' + return _af_new(list(range(self.degree))) + + @property + def elements(self): + """Returns all the elements of the permutation group as a set + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2)) + >>> p.elements + {(1 2 3), (1 3 2), (1 3), (2 3), (3), (3)(1 2)} + + """ + return set(self._elements) + + @property + def _elements(self): + """Returns all the elements of the permutation group as a list + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2)) + >>> p._elements + [(3), (3)(1 2), (1 3), (2 3), (1 2 3), (1 3 2)] + + """ + return list(islice(self.generate(), None)) + + def derived_series(self): + r"""Return the derived series for the group. + + Explanation + =========== + + The derived series for a group `G` is defined as + `G = G_0 > G_1 > G_2 > \ldots` where `G_i = [G_{i-1}, G_{i-1}]`, + i.e. `G_i` is the derived subgroup of `G_{i-1}`, for + `i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some + `k\in\mathbb{N}`, the series terminates. + + Returns + ======= + + A list of permutation groups containing the members of the derived + series in the order `G = G_0, G_1, G_2, \ldots`. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... AlternatingGroup, DihedralGroup) + >>> A = AlternatingGroup(5) + >>> len(A.derived_series()) + 1 + >>> S = SymmetricGroup(4) + >>> len(S.derived_series()) + 4 + >>> S.derived_series()[1].is_subgroup(AlternatingGroup(4)) + True + >>> S.derived_series()[2].is_subgroup(DihedralGroup(2)) + True + + See Also + ======== + + derived_subgroup + + """ + res = [self] + current = self + nxt = self.derived_subgroup() + while not current.is_subgroup(nxt): + res.append(nxt) + current = nxt + nxt = nxt.derived_subgroup() + return res + + def derived_subgroup(self): + r"""Compute the derived subgroup. + + Explanation + =========== + + The derived subgroup, or commutator subgroup is the subgroup generated + by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is + equal to the normal closure of the set of commutators of the generators + ([1], p.28, [11]). + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([1, 0, 2, 4, 3]) + >>> b = Permutation([0, 1, 3, 2, 4]) + >>> G = PermutationGroup([a, b]) + >>> C = G.derived_subgroup() + >>> list(C.generate(af=True)) + [[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]] + + See Also + ======== + + derived_series + + """ + r = self._r + gens = [p._array_form for p in self.generators] + set_commutators = set() + degree = self._degree + rng = list(range(degree)) + for i in range(r): + for j in range(r): + p1 = gens[i] + p2 = gens[j] + c = list(range(degree)) + for k in rng: + c[p2[p1[k]]] = p1[p2[k]] + ct = tuple(c) + if ct not in set_commutators: + set_commutators.add(ct) + cms = [_af_new(p) for p in set_commutators] + G2 = self.normal_closure(cms) + return G2 + + def generate(self, method="coset", af=False): + """Return iterator to generate the elements of the group. + + Explanation + =========== + + Iteration is done with one of these methods:: + + method='coset' using the Schreier-Sims coset representation + method='dimino' using the Dimino method + + If ``af = True`` it yields the array form of the permutations + + Examples + ======== + + >>> from sympy.combinatorics import PermutationGroup + >>> from sympy.combinatorics.polyhedron import tetrahedron + + The permutation group given in the tetrahedron object is also + true groups: + + >>> G = tetrahedron.pgroup + >>> G.is_group + True + + Also the group generated by the permutations in the tetrahedron + pgroup -- even the first two -- is a proper group: + + >>> H = PermutationGroup(G[0], G[1]) + >>> J = PermutationGroup(list(H.generate())); J + PermutationGroup([ + (0 1)(2 3), + (1 2 3), + (1 3 2), + (0 3 1), + (0 2 3), + (0 3)(1 2), + (0 1 3), + (3)(0 2 1), + (0 3 2), + (3)(0 1 2), + (0 2)(1 3)]) + >>> _.is_group + True + """ + if method == "coset": + return self.generate_schreier_sims(af) + elif method == "dimino": + return self.generate_dimino(af) + else: + raise NotImplementedError('No generation defined for %s' % method) + + def generate_dimino(self, af=False): + """Yield group elements using Dimino's algorithm. + + If ``af == True`` it yields the array form of the permutations. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1, 3]) + >>> b = Permutation([0, 2, 3, 1]) + >>> g = PermutationGroup([a, b]) + >>> list(g.generate_dimino(af=True)) + [[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1], + [0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]] + + References + ========== + + .. [1] The Implementation of Various Algorithms for Permutation Groups in + the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis + + """ + idn = list(range(self.degree)) + order = 0 + element_list = [idn] + set_element_list = {tuple(idn)} + if af: + yield idn + else: + yield _af_new(idn) + gens = [p._array_form for p in self.generators] + + for i in range(len(gens)): + # D elements of the subgroup G_i generated by gens[:i] + D = element_list[:] + N = [idn] + while N: + A = N + N = [] + for a in A: + for g in gens[:i + 1]: + ag = _af_rmul(a, g) + if tuple(ag) not in set_element_list: + # produce G_i*g + for d in D: + order += 1 + ap = _af_rmul(d, ag) + if af: + yield ap + else: + p = _af_new(ap) + yield p + element_list.append(ap) + set_element_list.add(tuple(ap)) + N.append(ap) + self._order = len(element_list) + + def generate_schreier_sims(self, af=False): + """Yield group elements using the Schreier-Sims representation + in coset_rank order + + If ``af = True`` it yields the array form of the permutations + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1, 3]) + >>> b = Permutation([0, 2, 3, 1]) + >>> g = PermutationGroup([a, b]) + >>> list(g.generate_schreier_sims(af=True)) + [[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1], + [0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]] + """ + + n = self._degree + u = self.basic_transversals + basic_orbits = self._basic_orbits + if len(u) == 0: + for x in self.generators: + if af: + yield x._array_form + else: + yield x + return + if len(u) == 1: + for i in basic_orbits[0]: + if af: + yield u[0][i]._array_form + else: + yield u[0][i] + return + + u = list(reversed(u)) + basic_orbits = basic_orbits[::-1] + # stg stack of group elements + stg = [list(range(n))] + posmax = [len(x) for x in u] + n1 = len(posmax) - 1 + pos = [0]*n1 + h = 0 + while 1: + # backtrack when finished iterating over coset + if pos[h] >= posmax[h]: + if h == 0: + return + pos[h] = 0 + h -= 1 + stg.pop() + continue + p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1]) + pos[h] += 1 + stg.append(p) + h += 1 + if h == n1: + if af: + for i in basic_orbits[-1]: + p = _af_rmul(u[-1][i]._array_form, stg[-1]) + yield p + else: + for i in basic_orbits[-1]: + p = _af_rmul(u[-1][i]._array_form, stg[-1]) + p1 = _af_new(p) + yield p1 + stg.pop() + h -= 1 + + @property + def generators(self): + """Returns the generators of the group. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.generators + [(1 2), (2)(0 1)] + + """ + return self._generators + + def contains(self, g, strict=True): + """Test if permutation ``g`` belong to self, ``G``. + + Explanation + =========== + + If ``g`` is an element of ``G`` it can be written as a product + of factors drawn from the cosets of ``G``'s stabilizers. To see + if ``g`` is one of the actual generators defining the group use + ``G.has(g)``. + + If ``strict`` is not ``True``, ``g`` will be resized, if necessary, + to match the size of permutations in ``self``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + + >>> a = Permutation(1, 2) + >>> b = Permutation(2, 3, 1) + >>> G = PermutationGroup(a, b, degree=5) + >>> G.contains(G[0]) # trivial check + True + >>> elem = Permutation([[2, 3]], size=5) + >>> G.contains(elem) + True + >>> G.contains(Permutation(4)(0, 1, 2, 3)) + False + + If strict is False, a permutation will be resized, if + necessary: + + >>> H = PermutationGroup(Permutation(5)) + >>> H.contains(Permutation(3)) + False + >>> H.contains(Permutation(3), strict=False) + True + + To test if a given permutation is present in the group: + + >>> elem in G.generators + False + >>> G.has(elem) + False + + See Also + ======== + + coset_factor, sympy.core.basic.Basic.has, __contains__ + + """ + if not isinstance(g, Permutation): + return False + if g.size != self.degree: + if strict: + return False + g = Permutation(g, size=self.degree) + if g in self.generators: + return True + return bool(self.coset_factor(g.array_form, True)) + + @property + def is_perfect(self): + """Return ``True`` if the group is perfect. + A group is perfect if it equals to its derived subgroup. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation(1,2,3)(4,5) + >>> b = Permutation(1,2,3,4,5) + >>> G = PermutationGroup([a, b]) + >>> G.is_perfect + False + + """ + if self._is_perfect is None: + self._is_perfect = self.equals(self.derived_subgroup()) + return self._is_perfect + + @property + def is_abelian(self): + """Test if the group is Abelian. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.is_abelian + False + >>> a = Permutation([0, 2, 1]) + >>> G = PermutationGroup([a]) + >>> G.is_abelian + True + + """ + if self._is_abelian is not None: + return self._is_abelian + + self._is_abelian = True + gens = [p._array_form for p in self.generators] + for x in gens: + for y in gens: + if y <= x: + continue + if not _af_commutes_with(x, y): + self._is_abelian = False + return False + return True + + def abelian_invariants(self): + """ + Returns the abelian invariants for the given group. + Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to + the direct product of finitely many nontrivial cyclic groups of + prime-power order. + + Explanation + =========== + + The prime-powers that occur as the orders of the factors are uniquely + determined by G. More precisely, the primes that occur in the orders of the + factors in any such decomposition of ``G`` are exactly the primes that divide + ``|G|`` and for any such prime ``p``, if the orders of the factors that are + p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, + then the orders of the factors that are p-groups in any such decomposition of ``G`` + are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``. + + The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken + for all primes that divide ``|G|`` are called the invariants of the nontrivial + group ``G`` as suggested in ([14], p. 542). + + Notes + ===== + + We adopt the convention that the invariants of a trivial group are []. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.abelian_invariants() + [2] + >>> from sympy.combinatorics import CyclicGroup + >>> G = CyclicGroup(7) + >>> G.abelian_invariants() + [7] + + """ + if self.is_trivial: + return [] + gns = self.generators + inv = [] + G = self + H = G.derived_subgroup() + Hgens = H.generators + for p in primefactors(G.order()): + ranks = [] + while True: + pows = [] + for g in gns: + elm = g**p + if not H.contains(elm): + pows.append(elm) + K = PermutationGroup(Hgens + pows) if pows else H + r = G.order()//K.order() + G = K + gns = pows + if r == 1: + break + ranks.append(multiplicity(p, r)) + + if ranks: + pows = [1]*ranks[0] + for i in ranks: + for j in range(i): + pows[j] = pows[j]*p + inv.extend(pows) + inv.sort() + return inv + + def is_elementary(self, p): + """Return ``True`` if the group is elementary abelian. An elementary + abelian group is a finite abelian group, where every nontrivial + element has order `p`, where `p` is a prime. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1]) + >>> G = PermutationGroup([a]) + >>> G.is_elementary(2) + True + >>> a = Permutation([0, 2, 1, 3]) + >>> b = Permutation([3, 1, 2, 0]) + >>> G = PermutationGroup([a, b]) + >>> G.is_elementary(2) + True + >>> G.is_elementary(3) + False + + """ + return self.is_abelian and all(g.order() == p for g in self.generators) + + def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False): + """A naive test using the group order.""" + if only_sym and only_alt: + raise ValueError( + "Both {} and {} cannot be set to True" + .format(only_sym, only_alt)) + + n = self.degree + sym_order = _factorial(n) + order = self.order() + + if order == sym_order: + self._is_sym = True + self._is_alt = False + if only_alt: + return False + return True + + elif 2*order == sym_order: + self._is_sym = False + self._is_alt = True + if only_sym: + return False + return True + + return False + + def _eval_is_alt_sym_monte_carlo(self, eps=0.05, perms=None): + """A test using monte-carlo algorithm. + + Parameters + ========== + + eps : float, optional + The criterion for the incorrect ``False`` return. + + perms : list[Permutation], optional + If explicitly given, it tests over the given candidates + for testing. + + If ``None``, it randomly computes ``N_eps`` and chooses + ``N_eps`` sample of the permutation from the group. + + See Also + ======== + + _check_cycles_alt_sym + """ + if perms is None: + n = self.degree + if n < 17: + c_n = 0.34 + else: + c_n = 0.57 + d_n = (c_n*log(2))/log(n) + N_eps = int(-log(eps)/d_n) + + perms = (self.random_pr() for i in range(N_eps)) + return self._eval_is_alt_sym_monte_carlo(perms=perms) + + for perm in perms: + if _check_cycles_alt_sym(perm): + return True + return False + + def is_alt_sym(self, eps=0.05, _random_prec=None): + r"""Monte Carlo test for the symmetric/alternating group for degrees + >= 8. + + Explanation + =========== + + More specifically, it is one-sided Monte Carlo with the + answer True (i.e., G is symmetric/alternating) guaranteed to be + correct, and the answer False being incorrect with probability eps. + + For degree < 8, the order of the group is checked so the test + is deterministic. + + Notes + ===== + + The algorithm itself uses some nontrivial results from group theory and + number theory: + 1) If a transitive group ``G`` of degree ``n`` contains an element + with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the + symmetric or alternating group ([1], pp. 81-82) + 2) The proportion of elements in the symmetric/alternating group having + the property described in 1) is approximately `\log(2)/\log(n)` + ([1], p.82; [2], pp. 226-227). + The helper function ``_check_cycles_alt_sym`` is used to + go over the cycles in a permutation and look for ones satisfying 1). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(10) + >>> D.is_alt_sym() + False + + See Also + ======== + + _check_cycles_alt_sym + + """ + if _random_prec is not None: + N_eps = _random_prec['N_eps'] + perms= (_random_prec[i] for i in range(N_eps)) + return self._eval_is_alt_sym_monte_carlo(perms=perms) + + if self._is_sym or self._is_alt: + return True + if self._is_sym is False and self._is_alt is False: + return False + + n = self.degree + if n < 8: + return self._eval_is_alt_sym_naive() + elif self.is_transitive(): + return self._eval_is_alt_sym_monte_carlo(eps=eps) + + self._is_sym, self._is_alt = False, False + return False + + @property + def is_nilpotent(self): + """Test if the group is nilpotent. + + Explanation + =========== + + A group `G` is nilpotent if it has a central series of finite length. + Alternatively, `G` is nilpotent if its lower central series terminates + with the trivial group. Every nilpotent group is also solvable + ([1], p.29, [12]). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... CyclicGroup) + >>> C = CyclicGroup(6) + >>> C.is_nilpotent + True + >>> S = SymmetricGroup(5) + >>> S.is_nilpotent + False + + See Also + ======== + + lower_central_series, is_solvable + + """ + if self._is_nilpotent is None: + lcs = self.lower_central_series() + terminator = lcs[len(lcs) - 1] + gens = terminator.generators + degree = self.degree + identity = _af_new(list(range(degree))) + if all(g == identity for g in gens): + self._is_solvable = True + self._is_nilpotent = True + return True + else: + self._is_nilpotent = False + return False + else: + return self._is_nilpotent + + def is_normal(self, gr, strict=True): + """Test if ``G=self`` is a normal subgroup of ``gr``. + + Explanation + =========== + + G is normal in gr if + for each g2 in G, g1 in gr, ``g = g1*g2*g1**-1`` belongs to G + It is sufficient to check this for each g1 in gr.generators and + g2 in G.generators. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([1, 2, 0]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G1 = PermutationGroup([a, Permutation([2, 0, 1])]) + >>> G1.is_normal(G) + True + + """ + if not self.is_subgroup(gr, strict=strict): + return False + d_self = self.degree + d_gr = gr.degree + if self.is_trivial and (d_self == d_gr or not strict): + return True + if self._is_abelian: + return True + new_self = self.copy() + if not strict and d_self != d_gr: + if d_self < d_gr: + new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)]) + else: + gr = PermGroup(gr.generators + [Permutation(d_self - 1)]) + gens2 = [p._array_form for p in new_self.generators] + gens1 = [p._array_form for p in gr.generators] + for g1 in gens1: + for g2 in gens2: + p = _af_rmuln(g1, g2, _af_invert(g1)) + if not new_self.coset_factor(p, True): + return False + return True + + def is_primitive(self, randomized=True): + r"""Test if a group is primitive. + + Explanation + =========== + + A permutation group ``G`` acting on a set ``S`` is called primitive if + ``S`` contains no nontrivial block under the action of ``G`` + (a block is nontrivial if its cardinality is more than ``1``). + + Notes + ===== + + The algorithm is described in [1], p.83, and uses the function + minimal_block to search for blocks of the form `\{0, k\}` for ``k`` + ranging over representatives for the orbits of `G_0`, the stabilizer of + ``0``. This algorithm has complexity `O(n^2)` where ``n`` is the degree + of the group, and will perform badly if `G_0` is small. + + There are two implementations offered: one finds `G_0` + deterministically using the function ``stabilizer``, and the other + (default) produces random elements of `G_0` using ``random_stab``, + hoping that they generate a subgroup of `G_0` with not too many more + orbits than `G_0` (this is suggested in [1], p.83). Behavior is changed + by the ``randomized`` flag. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(10) + >>> D.is_primitive() + False + + See Also + ======== + + minimal_block, random_stab + + """ + if self._is_primitive is not None: + return self._is_primitive + + if self.is_transitive() is False: + return False + + if randomized: + random_stab_gens = [] + v = self.schreier_vector(0) + for _ in range(len(self)): + random_stab_gens.append(self.random_stab(0, v)) + stab = PermutationGroup(random_stab_gens) + else: + stab = self.stabilizer(0) + orbits = stab.orbits() + for orb in orbits: + x = orb.pop() + if x != 0 and any(e != 0 for e in self.minimal_block([0, x])): + self._is_primitive = False + return False + self._is_primitive = True + return True + + def minimal_blocks(self, randomized=True): + ''' + For a transitive group, return the list of all minimal + block systems. If a group is intransitive, return `False`. + + Examples + ======== + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> DihedralGroup(6).minimal_blocks() + [[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]] + >>> G = PermutationGroup(Permutation(1,2,5)) + >>> G.minimal_blocks() + False + + See Also + ======== + + minimal_block, is_transitive, is_primitive + + ''' + def _number_blocks(blocks): + # number the blocks of a block system + # in order and return the number of + # blocks and the tuple with the + # reordering + n = len(blocks) + appeared = {} + m = 0 + b = [None]*n + for i in range(n): + if blocks[i] not in appeared: + appeared[blocks[i]] = m + b[i] = m + m += 1 + else: + b[i] = appeared[blocks[i]] + return tuple(b), m + + if not self.is_transitive(): + return False + blocks = [] + num_blocks = [] + rep_blocks = [] + if randomized: + random_stab_gens = [] + v = self.schreier_vector(0) + for i in range(len(self)): + random_stab_gens.append(self.random_stab(0, v)) + stab = PermutationGroup(random_stab_gens) + else: + stab = self.stabilizer(0) + orbits = stab.orbits() + for orb in orbits: + x = orb.pop() + if x != 0: + block = self.minimal_block([0, x]) + num_block, _ = _number_blocks(block) + # a representative block (containing 0) + rep = {j for j in range(self.degree) if num_block[j] == 0} + # check if the system is minimal with + # respect to the already discovere ones + minimal = True + blocks_remove_mask = [False] * len(blocks) + for i, r in enumerate(rep_blocks): + if len(r) > len(rep) and rep.issubset(r): + # i-th block system is not minimal + blocks_remove_mask[i] = True + elif len(r) < len(rep) and r.issubset(rep): + # the system being checked is not minimal + minimal = False + break + # remove non-minimal representative blocks + blocks = [b for i, b in enumerate(blocks) if not blocks_remove_mask[i]] + num_blocks = [n for i, n in enumerate(num_blocks) if not blocks_remove_mask[i]] + rep_blocks = [r for i, r in enumerate(rep_blocks) if not blocks_remove_mask[i]] + + if minimal and num_block not in num_blocks: + blocks.append(block) + num_blocks.append(num_block) + rep_blocks.append(rep) + return blocks + + @property + def is_solvable(self): + """Test if the group is solvable. + + ``G`` is solvable if its derived series terminates with the trivial + group ([1], p.29). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> S = SymmetricGroup(3) + >>> S.is_solvable + True + + See Also + ======== + + is_nilpotent, derived_series + + """ + if self._is_solvable is None: + if self.order() % 2 != 0: + return True + ds = self.derived_series() + terminator = ds[len(ds) - 1] + gens = terminator.generators + degree = self.degree + identity = _af_new(list(range(degree))) + if all(g == identity for g in gens): + self._is_solvable = True + return True + else: + self._is_solvable = False + return False + else: + return self._is_solvable + + def is_subgroup(self, G, strict=True): + """Return ``True`` if all elements of ``self`` belong to ``G``. + + If ``strict`` is ``False`` then if ``self``'s degree is smaller + than ``G``'s, the elements will be resized to have the same degree. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> from sympy.combinatorics import SymmetricGroup, CyclicGroup + + Testing is strict by default: the degree of each group must be the + same: + + >>> p = Permutation(0, 1, 2, 3, 4, 5) + >>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)]) + >>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)]) + >>> G3 = PermutationGroup([p, p**2]) + >>> assert G1.order() == G2.order() == G3.order() == 6 + >>> G1.is_subgroup(G2) + True + >>> G1.is_subgroup(G3) + False + >>> G3.is_subgroup(PermutationGroup(G3[1])) + False + >>> G3.is_subgroup(PermutationGroup(G3[0])) + True + + To ignore the size, set ``strict`` to ``False``: + + >>> S3 = SymmetricGroup(3) + >>> S5 = SymmetricGroup(5) + >>> S3.is_subgroup(S5, strict=False) + True + >>> C7 = CyclicGroup(7) + >>> G = S5*C7 + >>> S5.is_subgroup(G, False) + True + >>> C7.is_subgroup(G, 0) + False + + """ + if isinstance(G, SymmetricPermutationGroup): + if self.degree != G.degree: + return False + return True + if not isinstance(G, PermutationGroup): + return False + if self == G or self.generators[0]==Permutation(): + return True + if G.order() % self.order() != 0: + return False + if self.degree == G.degree or \ + (self.degree < G.degree and not strict): + gens = self.generators + else: + return False + return all(G.contains(g, strict=strict) for g in gens) + + @property + def is_polycyclic(self): + """Return ``True`` if a group is polycyclic. A group is polycyclic if + it has a subnormal series with cyclic factors. For finite groups, + this is the same as if the group is solvable. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1, 3]) + >>> b = Permutation([2, 0, 1, 3]) + >>> G = PermutationGroup([a, b]) + >>> G.is_polycyclic + True + + """ + return self.is_solvable + + def is_transitive(self, strict=True): + """Test if the group is transitive. + + Explanation + =========== + + A group is transitive if it has a single orbit. + + If ``strict`` is ``False`` the group is transitive if it has + a single orbit of length different from 1. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1, 3]) + >>> b = Permutation([2, 0, 1, 3]) + >>> G1 = PermutationGroup([a, b]) + >>> G1.is_transitive() + False + >>> G1.is_transitive(strict=False) + True + >>> c = Permutation([2, 3, 0, 1]) + >>> G2 = PermutationGroup([a, c]) + >>> G2.is_transitive() + True + >>> d = Permutation([1, 0, 2, 3]) + >>> e = Permutation([0, 1, 3, 2]) + >>> G3 = PermutationGroup([d, e]) + >>> G3.is_transitive() or G3.is_transitive(strict=False) + False + + """ + if self._is_transitive: # strict or not, if True then True + return self._is_transitive + if strict: + if self._is_transitive is not None: # we only store strict=True + return self._is_transitive + + ans = len(self.orbit(0)) == self.degree + self._is_transitive = ans + return ans + + got_orb = False + for x in self.orbits(): + if len(x) > 1: + if got_orb: + return False + got_orb = True + return got_orb + + @property + def is_trivial(self): + """Test if the group is the trivial group. + + This is true if the group contains only the identity permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> G = PermutationGroup([Permutation([0, 1, 2])]) + >>> G.is_trivial + True + + """ + if self._is_trivial is None: + self._is_trivial = len(self) == 1 and self[0].is_Identity + return self._is_trivial + + def lower_central_series(self): + r"""Return the lower central series for the group. + + The lower central series for a group `G` is the series + `G = G_0 > G_1 > G_2 > \ldots` where + `G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the + commutator of `G` and the previous term in `G1` ([1], p.29). + + Returns + ======= + + A list of permutation groups in the order `G = G_0, G_1, G_2, \ldots` + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (AlternatingGroup, + ... DihedralGroup) + >>> A = AlternatingGroup(4) + >>> len(A.lower_central_series()) + 2 + >>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2)) + True + + See Also + ======== + + commutator, derived_series + + """ + res = [self] + current = self + nxt = self.commutator(self, current) + while not current.is_subgroup(nxt): + res.append(nxt) + current = nxt + nxt = self.commutator(self, current) + return res + + @property + def max_div(self): + """Maximum proper divisor of the degree of a permutation group. + + Explanation + =========== + + Obviously, this is the degree divided by its minimal proper divisor + (larger than ``1``, if one exists). As it is guaranteed to be prime, + the ``sieve`` from ``sympy.ntheory`` is used. + This function is also used as an optimization tool for the functions + ``minimal_block`` and ``_union_find_merge``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> G = PermutationGroup([Permutation([0, 2, 1, 3])]) + >>> G.max_div + 2 + + See Also + ======== + + minimal_block, _union_find_merge + + """ + if self._max_div is not None: + return self._max_div + n = self.degree + if n == 1: + return 1 + for x in sieve: + if n % x == 0: + d = n//x + self._max_div = d + return d + + def minimal_block(self, points): + r"""For a transitive group, finds the block system generated by + ``points``. + + Explanation + =========== + + If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S`` + is called a block under the action of ``G`` if for all ``g`` in ``G`` + we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no + common points (``g`` moves ``B`` entirely). ([1], p.23; [6]). + + The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G`` + partition the set ``S`` and this set of translates is known as a block + system. Moreover, we obviously have that all blocks in the partition + have the same size, hence the block size divides ``|S|`` ([1], p.23). + A ``G``-congruence is an equivalence relation ``~`` on the set ``S`` + such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``. + For a transitive group, the equivalence classes of a ``G``-congruence + and the blocks of a block system are the same thing ([1], p.23). + + The algorithm below checks the group for transitivity, and then finds + the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2), + ..., (p_0,p_{k-1})`` which is the same as finding the maximal block + system (i.e., the one with minimum block size) such that + ``p_0, ..., p_{k-1}`` are in the same block ([1], p.83). + + It is an implementation of Atkinson's algorithm, as suggested in [1], + and manipulates an equivalence relation on the set ``S`` using a + union-find data structure. The running time is just above + `O(|points||S|)`. ([1], pp. 83-87; [7]). + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(10) + >>> D.minimal_block([0, 5]) + [0, 1, 2, 3, 4, 0, 1, 2, 3, 4] + >>> D.minimal_block([0, 1]) + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + See Also + ======== + + _union_find_rep, _union_find_merge, is_transitive, is_primitive + + """ + if not self.is_transitive(): + return False + n = self.degree + gens = self.generators + # initialize the list of equivalence class representatives + parents = list(range(n)) + ranks = [1]*n + not_rep = [] + k = len(points) + # the block size must divide the degree of the group + if k > self.max_div: + return [0]*n + for i in range(k - 1): + parents[points[i + 1]] = points[0] + not_rep.append(points[i + 1]) + ranks[points[0]] = k + i = 0 + len_not_rep = k - 1 + while i < len_not_rep: + gamma = not_rep[i] + i += 1 + for gen in gens: + # find has side effects: performs path compression on the list + # of representatives + delta = self._union_find_rep(gamma, parents) + # union has side effects: performs union by rank on the list + # of representatives + temp = self._union_find_merge(gen(gamma), gen(delta), ranks, + parents, not_rep) + if temp == -1: + return [0]*n + len_not_rep += temp + for i in range(n): + # force path compression to get the final state of the equivalence + # relation + self._union_find_rep(i, parents) + + # rewrite result so that block representatives are minimal + new_reps = {} + return [new_reps.setdefault(r, i) for i, r in enumerate(parents)] + + def conjugacy_class(self, x): + r"""Return the conjugacy class of an element in the group. + + Explanation + =========== + + The conjugacy class of an element ``g`` in a group ``G`` is the set of + elements ``x`` in ``G`` that are conjugate with ``g``, i.e. for which + + ``g = xax^{-1}`` + + for some ``a`` in ``G``. + + Note that conjugacy is an equivalence relation, and therefore that + conjugacy classes are partitions of ``G``. For a list of all the + conjugacy classes of the group, use the conjugacy_classes() method. + + In a permutation group, each conjugacy class corresponds to a particular + `cycle structure': for example, in ``S_3``, the conjugacy classes are: + + * the identity class, ``{()}`` + * all transpositions, ``{(1 2), (1 3), (2 3)}`` + * all 3-cycles, ``{(1 2 3), (1 3 2)}`` + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, SymmetricGroup + >>> S3 = SymmetricGroup(3) + >>> S3.conjugacy_class(Permutation(0, 1, 2)) + {(0 1 2), (0 2 1)} + + Notes + ===== + + This procedure computes the conjugacy class directly by finding the + orbit of the element under conjugation in G. This algorithm is only + feasible for permutation groups of relatively small order, but is like + the orbit() function itself in that respect. + """ + # Ref: "Computing the conjugacy classes of finite groups"; Butler, G. + # Groups '93 Galway/St Andrews; edited by Campbell, C. M. + new_class = {x} + last_iteration = new_class + + while len(last_iteration) > 0: + this_iteration = set() + + for y in last_iteration: + for s in self.generators: + conjugated = s * y * (~s) + if conjugated not in new_class: + this_iteration.add(conjugated) + + new_class.update(last_iteration) + last_iteration = this_iteration + + return new_class + + + def conjugacy_classes(self): + r"""Return the conjugacy classes of the group. + + Explanation + =========== + + As described in the documentation for the .conjugacy_class() function, + conjugacy is an equivalence relation on a group G which partitions the + set of elements. This method returns a list of all these conjugacy + classes of G. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> SymmetricGroup(3).conjugacy_classes() + [{(2)}, {(0 1 2), (0 2 1)}, {(0 2), (1 2), (2)(0 1)}] + + """ + identity = _af_new(list(range(self.degree))) + known_elements = {identity} + classes = [known_elements.copy()] + + for x in self.generate(): + if x not in known_elements: + new_class = self.conjugacy_class(x) + classes.append(new_class) + known_elements.update(new_class) + + return classes + + def normal_closure(self, other, k=10): + r"""Return the normal closure of a subgroup/set of permutations. + + Explanation + =========== + + If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G`` + is defined as the intersection of all normal subgroups of ``G`` that + contain ``A`` ([1], p.14). Alternatively, it is the group generated by + the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a + generator of the subgroup ``\left\langle S\right\rangle`` generated by + ``S`` (for some chosen generating set for ``\left\langle S\right\rangle``) + ([1], p.73). + + Parameters + ========== + + other + a subgroup/list of permutations/single permutation + k + an implementation-specific parameter that determines the number + of conjugates that are adjoined to ``other`` at once + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... CyclicGroup, AlternatingGroup) + >>> S = SymmetricGroup(5) + >>> C = CyclicGroup(5) + >>> G = S.normal_closure(C) + >>> G.order() + 60 + >>> G.is_subgroup(AlternatingGroup(5)) + True + + See Also + ======== + + commutator, derived_subgroup, random_pr + + Notes + ===== + + The algorithm is described in [1], pp. 73-74; it makes use of the + generation of random elements for permutation groups by the product + replacement algorithm. + + """ + if hasattr(other, 'generators'): + degree = self.degree + identity = _af_new(list(range(degree))) + + if all(g == identity for g in other.generators): + return other + Z = PermutationGroup(other.generators[:]) + base, strong_gens = Z.schreier_sims_incremental() + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + basic_orbits, basic_transversals = \ + _orbits_transversals_from_bsgs(base, strong_gens_distr) + + self._random_pr_init(r=10, n=20) + + _loop = True + while _loop: + Z._random_pr_init(r=10, n=10) + for _ in range(k): + g = self.random_pr() + h = Z.random_pr() + conj = h^g + res = _strip(conj, base, basic_orbits, basic_transversals) + if res[0] != identity or res[1] != len(base) + 1: + gens = Z.generators + gens.append(conj) + Z = PermutationGroup(gens) + strong_gens.append(conj) + temp_base, temp_strong_gens = \ + Z.schreier_sims_incremental(base, strong_gens) + base, strong_gens = temp_base, temp_strong_gens + strong_gens_distr = \ + _distribute_gens_by_base(base, strong_gens) + basic_orbits, basic_transversals = \ + _orbits_transversals_from_bsgs(base, + strong_gens_distr) + _loop = False + for g in self.generators: + for h in Z.generators: + conj = h^g + res = _strip(conj, base, basic_orbits, + basic_transversals) + if res[0] != identity or res[1] != len(base) + 1: + _loop = True + break + if _loop: + break + return Z + elif hasattr(other, '__getitem__'): + return self.normal_closure(PermutationGroup(other)) + elif hasattr(other, 'array_form'): + return self.normal_closure(PermutationGroup([other])) + + def orbit(self, alpha, action='tuples'): + r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set. + + Explanation + =========== + + The time complexity of the algorithm used here is `O(|Orb|*r)` where + `|Orb|` is the size of the orbit and ``r`` is the number of generators of + the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21. + Here alpha can be a single point, or a list of points. + + If alpha is a single point, the ordinary orbit is computed. + if alpha is a list of points, there are three available options: + + 'union' - computes the union of the orbits of the points in the list + 'tuples' - computes the orbit of the list interpreted as an ordered + tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) ) + 'sets' - computes the orbit of the list interpreted as a sets + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([1, 2, 0, 4, 5, 6, 3]) + >>> G = PermutationGroup([a]) + >>> G.orbit(0) + {0, 1, 2} + >>> G.orbit([0, 4], 'union') + {0, 1, 2, 3, 4, 5, 6} + + See Also + ======== + + orbit_transversal + + """ + return _orbit(self.degree, self.generators, alpha, action) + + def orbit_rep(self, alpha, beta, schreier_vector=None): + """Return a group element which sends ``alpha`` to ``beta``. + + Explanation + =========== + + If ``beta`` is not in the orbit of ``alpha``, the function returns + ``False``. This implementation makes use of the schreier vector. + For a proof of correctness, see [1], p.80 + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> G = AlternatingGroup(5) + >>> G.orbit_rep(0, 4) + (0 4 1 2 3) + + See Also + ======== + + schreier_vector + + """ + if schreier_vector is None: + schreier_vector = self.schreier_vector(alpha) + if schreier_vector[beta] is None: + return False + k = schreier_vector[beta] + gens = [x._array_form for x in self.generators] + a = [] + while k != -1: + a.append(gens[k]) + beta = gens[k].index(beta) # beta = (~gens[k])(beta) + k = schreier_vector[beta] + if a: + return _af_new(_af_rmuln(*a)) + else: + return _af_new(list(range(self._degree))) + + def orbit_transversal(self, alpha, pairs=False): + r"""Computes a transversal for the orbit of ``alpha`` as a set. + + Explanation + =========== + + For a permutation group `G`, a transversal for the orbit + `Orb = \{g(\alpha) | g \in G\}` is a set + `\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`. + Note that there may be more than one possible transversal. + If ``pairs`` is set to ``True``, it returns the list of pairs + `(\beta, g_\beta)`. For a proof of correctness, see [1], p.79 + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> G = DihedralGroup(6) + >>> G.orbit_transversal(0) + [(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)] + + See Also + ======== + + orbit + + """ + return _orbit_transversal(self._degree, self.generators, alpha, pairs) + + def orbits(self, rep=False): + """Return the orbits of ``self``, ordered according to lowest element + in each orbit. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation(1, 5)(2, 3)(4, 0, 6) + >>> b = Permutation(1, 5)(3, 4)(2, 6, 0) + >>> G = PermutationGroup([a, b]) + >>> G.orbits() + [{0, 2, 3, 4, 6}, {1, 5}] + """ + return _orbits(self._degree, self._generators) + + def order(self): + """Return the order of the group: the number of permutations that + can be generated from elements of the group. + + The number of permutations comprising the group is given by + ``len(group)``; the length of each permutation in the group is + given by ``group.size``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + + >>> a = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a]) + >>> G.degree + 3 + >>> len(G) + 1 + >>> G.order() + 2 + >>> list(G.generate()) + [(2), (2)(0 1)] + + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.order() + 6 + + See Also + ======== + + degree + + """ + if self._order is not None: + return self._order + if self._is_sym: + n = self._degree + self._order = factorial(n) + return self._order + if self._is_alt: + n = self._degree + self._order = factorial(n)/2 + return self._order + + m = prod([len(x) for x in self.basic_transversals]) + self._order = m + return m + + def index(self, H): + """ + Returns the index of a permutation group. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation(1,2,3) + >>> b =Permutation(3) + >>> G = PermutationGroup([a]) + >>> H = PermutationGroup([b]) + >>> G.index(H) + 3 + + """ + if H.is_subgroup(self): + return self.order()//H.order() + + @property + def is_symmetric(self): + """Return ``True`` if the group is symmetric. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> g = SymmetricGroup(5) + >>> g.is_symmetric + True + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> g = PermutationGroup( + ... Permutation(0, 1, 2, 3, 4), + ... Permutation(2, 3)) + >>> g.is_symmetric + True + + Notes + ===== + + This uses a naive test involving the computation of the full + group order. + If you need more quicker taxonomy for large groups, you can use + :meth:`PermutationGroup.is_alt_sym`. + However, :meth:`PermutationGroup.is_alt_sym` may not be accurate + and is not able to distinguish between an alternating group and + a symmetric group. + + See Also + ======== + + is_alt_sym + """ + _is_sym = self._is_sym + if _is_sym is not None: + return _is_sym + + n = self.degree + if n >= 8: + if self.is_transitive(): + _is_alt_sym = self._eval_is_alt_sym_monte_carlo() + if _is_alt_sym: + if any(g.is_odd for g in self.generators): + self._is_sym, self._is_alt = True, False + return True + + self._is_sym, self._is_alt = False, True + return False + + return self._eval_is_alt_sym_naive(only_sym=True) + + self._is_sym, self._is_alt = False, False + return False + + return self._eval_is_alt_sym_naive(only_sym=True) + + + @property + def is_alternating(self): + """Return ``True`` if the group is alternating. + + Examples + ======== + + >>> from sympy.combinatorics import AlternatingGroup + >>> g = AlternatingGroup(5) + >>> g.is_alternating + True + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> g = PermutationGroup( + ... Permutation(0, 1, 2, 3, 4), + ... Permutation(2, 3, 4)) + >>> g.is_alternating + True + + Notes + ===== + + This uses a naive test involving the computation of the full + group order. + If you need more quicker taxonomy for large groups, you can use + :meth:`PermutationGroup.is_alt_sym`. + However, :meth:`PermutationGroup.is_alt_sym` may not be accurate + and is not able to distinguish between an alternating group and + a symmetric group. + + See Also + ======== + + is_alt_sym + """ + _is_alt = self._is_alt + if _is_alt is not None: + return _is_alt + + n = self.degree + if n >= 8: + if self.is_transitive(): + _is_alt_sym = self._eval_is_alt_sym_monte_carlo() + if _is_alt_sym: + if all(g.is_even for g in self.generators): + self._is_sym, self._is_alt = False, True + return True + + self._is_sym, self._is_alt = True, False + return False + + return self._eval_is_alt_sym_naive(only_alt=True) + + self._is_sym, self._is_alt = False, False + return False + + return self._eval_is_alt_sym_naive(only_alt=True) + + @classmethod + def _distinct_primes_lemma(cls, primes): + """Subroutine to test if there is only one cyclic group for the + order.""" + primes = sorted(primes) + l = len(primes) + for i in range(l): + for j in range(i+1, l): + if primes[j] % primes[i] == 1: + return None + return True + + @property + def is_cyclic(self): + r""" + Return ``True`` if the group is Cyclic. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AbelianGroup + >>> G = AbelianGroup(3, 4) + >>> G.is_cyclic + True + >>> G = AbelianGroup(4, 4) + >>> G.is_cyclic + False + + Notes + ===== + + If the order of a group $n$ can be factored into the distinct + primes $p_1, p_2, \dots , p_s$ and if + + .. math:: + \forall i, j \in \{1, 2, \dots, s \}: + p_i \not \equiv 1 \pmod {p_j} + + holds true, there is only one group of the order $n$ which + is a cyclic group [1]_. This is a generalization of the lemma + that the group of order $15, 35, \dots$ are cyclic. + + And also, these additional lemmas can be used to test if a + group is cyclic if the order of the group is already found. + + - If the group is abelian and the order of the group is + square-free, the group is cyclic. + - If the order of the group is less than $6$ and is not $4$, the + group is cyclic. + - If the order of the group is prime, the group is cyclic. + + References + ========== + + .. [1] 1978: John S. Rose: A Course on Group Theory, + Introduction to Finite Group Theory: 1.4 + """ + if self._is_cyclic is not None: + return self._is_cyclic + + if len(self.generators) == 1: + self._is_cyclic = True + self._is_abelian = True + return True + + if self._is_abelian is False: + self._is_cyclic = False + return False + + order = self.order() + + if order < 6: + self._is_abelian = True + if order != 4: + self._is_cyclic = True + return True + + factors = factorint(order) + if all(v == 1 for v in factors.values()): + if self._is_abelian: + self._is_cyclic = True + return True + + primes = list(factors.keys()) + if PermutationGroup._distinct_primes_lemma(primes) is True: + self._is_cyclic = True + self._is_abelian = True + return True + + if not self.is_abelian: + self._is_cyclic = False + return False + + self._is_cyclic = all( + any(g**(order//p) != self.identity for g in self.generators) + for p, e in factors.items() if e > 1 + ) + return self._is_cyclic + + @property + def is_dihedral(self): + r""" + Return ``True`` if the group is dihedral. + + Examples + ======== + + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> from sympy.combinatorics.permutations import Permutation + >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup + >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6)) + >>> G.is_dihedral + True + >>> G = SymmetricGroup(3) + >>> G.is_dihedral + True + >>> G = CyclicGroup(6) + >>> G.is_dihedral + False + + References + ========== + + .. [Di1] https://math.stackexchange.com/a/827273 + .. [Di2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf + .. [Di3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf + .. [Di4] https://en.wikipedia.org/wiki/Dihedral_group + """ + if self._is_dihedral is not None: + return self._is_dihedral + + order = self.order() + + if order % 2 == 1: + self._is_dihedral = False + return False + if order == 2: + self._is_dihedral = True + return True + if order == 4: + # The dihedral group of order 4 is the Klein 4-group. + self._is_dihedral = not self.is_cyclic + return self._is_dihedral + if self.is_abelian: + # The only abelian dihedral groups are the ones of orders 2 and 4. + self._is_dihedral = False + return False + + # Now we know the group is of even order >= 6, and nonabelian. + n = order // 2 + + # Handle special cases where there are exactly two generators. + gens = self.generators + if len(gens) == 2: + x, y = gens + a, b = x.order(), y.order() + # Make a >= b + if a < b: + x, y, a, b = y, x, b, a + # Using Theorem 2.1 of [Di3]: + if a == 2 == b: + self._is_dihedral = True + return True + # Using Theorem 1.1 of [Di3]: + if a == n and b == 2 and y*x*y == ~x: + self._is_dihedral = True + return True + + # Proceed with algorithm of [Di1] + # Find elements of orders 2 and n + order_2, order_n = [], [] + for p in self.elements: + k = p.order() + if k == 2: + order_2.append(p) + elif k == n: + order_n.append(p) + + if len(order_2) != n + 1 - (n % 2): + self._is_dihedral = False + return False + + if not order_n: + self._is_dihedral = False + return False + + x = order_n[0] + # Want an element y of order 2 that is not a power of x + # (i.e. that is not the 180-deg rotation, when n is even). + y = order_2[0] + if n % 2 == 0 and y == x**(n//2): + y = order_2[1] + + self._is_dihedral = (y*x*y == ~x) + return self._is_dihedral + + def pointwise_stabilizer(self, points, incremental=True): + r"""Return the pointwise stabilizer for a set of points. + + Explanation + =========== + + For a permutation group `G` and a set of points + `\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of + `p_1, p_2, \ldots, p_k` is defined as + `G_{p_1,\ldots, p_k} = + \{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\}` ([1],p20). + It is a subgroup of `G`. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> S = SymmetricGroup(7) + >>> Stab = S.pointwise_stabilizer([2, 3, 5]) + >>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5)) + True + + See Also + ======== + + stabilizer, schreier_sims_incremental + + Notes + ===== + + When incremental == True, + rather than the obvious implementation using successive calls to + ``.stabilizer()``, this uses the incremental Schreier-Sims algorithm + to obtain a base with starting segment - the given points. + + """ + if incremental: + base, strong_gens = self.schreier_sims_incremental(base=points) + stab_gens = [] + degree = self.degree + for gen in strong_gens: + if [gen(point) for point in points] == points: + stab_gens.append(gen) + if not stab_gens: + stab_gens = _af_new(list(range(degree))) + return PermutationGroup(stab_gens) + else: + gens = self._generators + degree = self.degree + for x in points: + gens = _stabilizer(degree, gens, x) + return PermutationGroup(gens) + + def make_perm(self, n, seed=None): + """ + Multiply ``n`` randomly selected permutations from + pgroup together, starting with the identity + permutation. If ``n`` is a list of integers, those + integers will be used to select the permutations and they + will be applied in L to R order: make_perm((A, B, C)) will + give CBA(I) where I is the identity permutation. + + ``seed`` is used to set the seed for the random selection + of permutations from pgroup. If this is a list of integers, + the corresponding permutations from pgroup will be selected + in the order give. This is mainly used for testing purposes. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])] + >>> G = PermutationGroup([a, b]) + >>> G.make_perm(1, [0]) + (0 1)(2 3) + >>> G.make_perm(3, [0, 1, 0]) + (0 2 3 1) + >>> G.make_perm([0, 1, 0]) + (0 2 3 1) + + See Also + ======== + + random + """ + if is_sequence(n): + if seed is not None: + raise ValueError('If n is a sequence, seed should be None') + n, seed = len(n), n + else: + try: + n = int(n) + except TypeError: + raise ValueError('n must be an integer or a sequence.') + randomrange = _randrange(seed) + + # start with the identity permutation + result = Permutation(list(range(self.degree))) + m = len(self) + for _ in range(n): + p = self[randomrange(m)] + result = rmul(result, p) + return result + + def random(self, af=False): + """Return a random group element + """ + rank = randrange(self.order()) + return self.coset_unrank(rank, af) + + def random_pr(self, gen_count=11, iterations=50, _random_prec=None): + """Return a random group element using product replacement. + + Explanation + =========== + + For the details of the product replacement algorithm, see + ``_random_pr_init`` In ``random_pr`` the actual 'product replacement' + is performed. Notice that if the attribute ``_random_gens`` + is empty, it needs to be initialized by ``_random_pr_init``. + + See Also + ======== + + _random_pr_init + + """ + if self._random_gens == []: + self._random_pr_init(gen_count, iterations) + random_gens = self._random_gens + r = len(random_gens) - 1 + + # handle randomized input for testing purposes + if _random_prec is None: + s = randrange(r) + t = randrange(r - 1) + if t == s: + t = r - 1 + x = choice([1, 2]) + e = choice([-1, 1]) + else: + s = _random_prec['s'] + t = _random_prec['t'] + if t == s: + t = r - 1 + x = _random_prec['x'] + e = _random_prec['e'] + + if x == 1: + random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e)) + random_gens[r] = _af_rmul(random_gens[r], random_gens[s]) + else: + random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s]) + random_gens[r] = _af_rmul(random_gens[s], random_gens[r]) + return _af_new(random_gens[r]) + + def random_stab(self, alpha, schreier_vector=None, _random_prec=None): + """Random element from the stabilizer of ``alpha``. + + The schreier vector for ``alpha`` is an optional argument used + for speeding up repeated calls. The algorithm is described in [1], p.81 + + See Also + ======== + + random_pr, orbit_rep + + """ + if schreier_vector is None: + schreier_vector = self.schreier_vector(alpha) + if _random_prec is None: + rand = self.random_pr() + else: + rand = _random_prec['rand'] + beta = rand(alpha) + h = self.orbit_rep(alpha, beta, schreier_vector) + return rmul(~h, rand) + + def schreier_sims(self): + """Schreier-Sims algorithm. + + Explanation + =========== + + It computes the generators of the chain of stabilizers + `G > G_{b_1} > .. > G_{b1,..,b_r} > 1` + in which `G_{b_1,..,b_i}` stabilizes `b_1,..,b_i`, + and the corresponding ``s`` cosets. + An element of the group can be written as the product + `h_1*..*h_s`. + + We use the incremental Schreier-Sims algorithm. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.schreier_sims() + >>> G.basic_transversals + [{0: (2)(0 1), 1: (2), 2: (1 2)}, + {0: (2), 2: (0 2)}] + """ + if self._transversals: + return + self._schreier_sims() + return + + def _schreier_sims(self, base=None): + schreier = self.schreier_sims_incremental(base=base, slp_dict=True) + base, strong_gens = schreier[:2] + self._base = base + self._strong_gens = strong_gens + self._strong_gens_slp = schreier[2] + if not base: + self._transversals = [] + self._basic_orbits = [] + return + + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + basic_orbits, transversals, slps = _orbits_transversals_from_bsgs(base,\ + strong_gens_distr, slp=True) + + # rewrite the indices stored in slps in terms of strong_gens + for i, slp in enumerate(slps): + gens = strong_gens_distr[i] + for k in slp: + slp[k] = [strong_gens.index(gens[s]) for s in slp[k]] + + self._transversals = transversals + self._basic_orbits = [sorted(x) for x in basic_orbits] + self._transversal_slp = slps + + def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False): + """Extend a sequence of points and generating set to a base and strong + generating set. + + Parameters + ========== + + base + The sequence of points to be extended to a base. Optional + parameter with default value ``[]``. + gens + The generating set to be extended to a strong generating set + relative to the base obtained. Optional parameter with default + value ``self.generators``. + + slp_dict + If `True`, return a dictionary `{g: gens}` for each strong + generator `g` where `gens` is a list of strong generators + coming before `g` in `strong_gens`, such that the product + of the elements of `gens` is equal to `g`. + + Returns + ======= + + (base, strong_gens) + ``base`` is the base obtained, and ``strong_gens`` is the strong + generating set relative to it. The original parameters ``base``, + ``gens`` remain unchanged. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> A = AlternatingGroup(7) + >>> base = [2, 3] + >>> seq = [2, 3] + >>> base, strong_gens = A.schreier_sims_incremental(base=seq) + >>> _verify_bsgs(A, base, strong_gens) + True + >>> base[:2] + [2, 3] + + Notes + ===== + + This version of the Schreier-Sims algorithm runs in polynomial time. + There are certain assumptions in the implementation - if the trivial + group is provided, ``base`` and ``gens`` are returned immediately, + as any sequence of points is a base for the trivial group. If the + identity is present in the generators ``gens``, it is removed as + it is a redundant generator. + The implementation is described in [1], pp. 90-93. + + See Also + ======== + + schreier_sims, schreier_sims_random + + """ + if base is None: + base = [] + if gens is None: + gens = self.generators[:] + degree = self.degree + id_af = list(range(degree)) + # handle the trivial group + if len(gens) == 1 and gens[0].is_Identity: + if slp_dict: + return base, gens, {gens[0]: [gens[0]]} + return base, gens + # prevent side effects + _base, _gens = base[:], gens[:] + # remove the identity as a generator + _gens = [x for x in _gens if not x.is_Identity] + # make sure no generator fixes all base points + for gen in _gens: + if all(x == gen._array_form[x] for x in _base): + for new in id_af: + if gen._array_form[new] != new: + break + else: + assert None # can this ever happen? + _base.append(new) + # distribute generators according to basic stabilizers + strong_gens_distr = _distribute_gens_by_base(_base, _gens) + strong_gens_slp = [] + # initialize the basic stabilizers, basic orbits and basic transversals + orbs = {} + transversals = {} + slps = {} + base_len = len(_base) + for i in range(base_len): + transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i], + _base[i], pairs=True, af=True, slp=True) + transversals[i] = dict(transversals[i]) + orbs[i] = list(transversals[i].keys()) + # main loop: amend the stabilizer chain until we have generators + # for all stabilizers + i = base_len - 1 + while i >= 0: + # this flag is used to continue with the main loop from inside + # a nested loop + continue_i = False + # test the generators for being a strong generating set + db = {} + for beta, u_beta in list(transversals[i].items()): + for j, gen in enumerate(strong_gens_distr[i]): + gb = gen._array_form[beta] + u1 = transversals[i][gb] + g1 = _af_rmul(gen._array_form, u_beta) + slp = [(i, g) for g in slps[i][beta]] + slp = [(i, j)] + slp + if g1 != u1: + # test if the schreier generator is in the i+1-th + # would-be basic stabilizer + y = True + try: + u1_inv = db[gb] + except KeyError: + u1_inv = db[gb] = _af_invert(u1) + schreier_gen = _af_rmul(u1_inv, g1) + u1_inv_slp = slps[i][gb][:] + u1_inv_slp.reverse() + u1_inv_slp = [(i, (g,)) for g in u1_inv_slp] + slp = u1_inv_slp + slp + h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps) + if j <= base_len: + # new strong generator h at level j + y = False + elif h: + # h fixes all base points + y = False + moved = 0 + while h[moved] == moved: + moved += 1 + _base.append(moved) + base_len += 1 + strong_gens_distr.append([]) + if y is False: + # if a new strong generator is found, update the + # data structures and start over + h = _af_new(h) + strong_gens_slp.append((h, slp)) + for l in range(i + 1, j): + strong_gens_distr[l].append(h) + transversals[l], slps[l] =\ + _orbit_transversal(degree, strong_gens_distr[l], + _base[l], pairs=True, af=True, slp=True) + transversals[l] = dict(transversals[l]) + orbs[l] = list(transversals[l].keys()) + i = j - 1 + # continue main loop using the flag + continue_i = True + if continue_i is True: + break + if continue_i is True: + break + if continue_i is True: + continue + i -= 1 + + strong_gens = _gens[:] + + if slp_dict: + # create the list of the strong generators strong_gens and + # rewrite the indices of strong_gens_slp in terms of the + # elements of strong_gens + for k, slp in strong_gens_slp: + strong_gens.append(k) + for i in range(len(slp)): + s = slp[i] + if isinstance(s[1], tuple): + slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1 + else: + slp[i] = strong_gens_distr[s[0]][s[1]] + strong_gens_slp = dict(strong_gens_slp) + # add the original generators + for g in _gens: + strong_gens_slp[g] = [g] + return (_base, strong_gens, strong_gens_slp) + + strong_gens.extend([k for k, _ in strong_gens_slp]) + return _base, strong_gens + + def schreier_sims_random(self, base=None, gens=None, consec_succ=10, + _random_prec=None): + r"""Randomized Schreier-Sims algorithm. + + Explanation + =========== + + The randomized Schreier-Sims algorithm takes the sequence ``base`` + and the generating set ``gens``, and extends ``base`` to a base, and + ``gens`` to a strong generating set relative to that base with + probability of a wrong answer at most `2^{-consec\_succ}`, + provided the random generators are sufficiently random. + + Parameters + ========== + + base + The sequence to be extended to a base. + gens + The generating set to be extended to a strong generating set. + consec_succ + The parameter defining the probability of a wrong answer. + _random_prec + An internal parameter used for testing purposes. + + Returns + ======= + + (base, strong_gens) + ``base`` is the base and ``strong_gens`` is the strong generating + set relative to it. + + Examples + ======== + + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> S = SymmetricGroup(5) + >>> base, strong_gens = S.schreier_sims_random(consec_succ=5) + >>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP + True + + Notes + ===== + + The algorithm is described in detail in [1], pp. 97-98. It extends + the orbits ``orbs`` and the permutation groups ``stabs`` to + basic orbits and basic stabilizers for the base and strong generating + set produced in the end. + The idea of the extension process + is to "sift" random group elements through the stabilizer chain + and amend the stabilizers/orbits along the way when a sift + is not successful. + The helper function ``_strip`` is used to attempt + to decompose a random group element according to the current + state of the stabilizer chain and report whether the element was + fully decomposed (successful sift) or not (unsuccessful sift). In + the latter case, the level at which the sift failed is reported and + used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly. + The halting condition is for ``consec_succ`` consecutive successful + sifts to pass. This makes sure that the current ``base`` and ``gens`` + form a BSGS with probability at least `1 - 1/\text{consec\_succ}`. + + See Also + ======== + + schreier_sims + + """ + if base is None: + base = [] + if gens is None: + gens = self.generators + base_len = len(base) + n = self.degree + # make sure no generator fixes all base points + for gen in gens: + if all(gen(x) == x for x in base): + new = 0 + while gen._array_form[new] == new: + new += 1 + base.append(new) + base_len += 1 + # distribute generators according to basic stabilizers + strong_gens_distr = _distribute_gens_by_base(base, gens) + # initialize the basic stabilizers, basic transversals and basic orbits + transversals = {} + orbs = {} + for i in range(base_len): + transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i], + base[i], pairs=True)) + orbs[i] = list(transversals[i].keys()) + # initialize the number of consecutive elements sifted + c = 0 + # start sifting random elements while the number of consecutive sifts + # is less than consec_succ + while c < consec_succ: + if _random_prec is None: + g = self.random_pr() + else: + g = _random_prec['g'].pop() + h, j = _strip(g, base, orbs, transversals) + y = True + # determine whether a new base point is needed + if j <= base_len: + y = False + elif not h.is_Identity: + y = False + moved = 0 + while h(moved) == moved: + moved += 1 + base.append(moved) + base_len += 1 + strong_gens_distr.append([]) + # if the element doesn't sift, amend the strong generators and + # associated stabilizers and orbits + if y is False: + for l in range(1, j): + strong_gens_distr[l].append(h) + transversals[l] = dict(_orbit_transversal(n, + strong_gens_distr[l], base[l], pairs=True)) + orbs[l] = list(transversals[l].keys()) + c = 0 + else: + c += 1 + # build the strong generating set + strong_gens = strong_gens_distr[0][:] + for gen in strong_gens_distr[1]: + if gen not in strong_gens: + strong_gens.append(gen) + return base, strong_gens + + def schreier_vector(self, alpha): + """Computes the schreier vector for ``alpha``. + + Explanation + =========== + + The Schreier vector efficiently stores information + about the orbit of ``alpha``. It can later be used to quickly obtain + elements of the group that send ``alpha`` to a particular element + in the orbit. Notice that the Schreier vector depends on the order + in which the group generators are listed. For a definition, see [3]. + Since list indices start from zero, we adopt the convention to use + "None" instead of 0 to signify that an element does not belong + to the orbit. + For the algorithm and its correctness, see [2], pp.78-80. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([2, 4, 6, 3, 1, 5, 0]) + >>> b = Permutation([0, 1, 3, 5, 4, 6, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.schreier_vector(0) + [-1, None, 0, 1, None, 1, 0] + + See Also + ======== + + orbit + + """ + n = self.degree + v = [None]*n + v[alpha] = -1 + orb = [alpha] + used = [False]*n + used[alpha] = True + gens = self.generators + r = len(gens) + for b in orb: + for i in range(r): + temp = gens[i]._array_form[b] + if used[temp] is False: + orb.append(temp) + used[temp] = True + v[temp] = i + return v + + def stabilizer(self, alpha): + r"""Return the stabilizer subgroup of ``alpha``. + + Explanation + =========== + + The stabilizer of `\alpha` is the group `G_\alpha = + \{g \in G | g(\alpha) = \alpha\}`. + For a proof of correctness, see [1], p.79. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> G = DihedralGroup(6) + >>> G.stabilizer(5) + PermutationGroup([ + (5)(0 4)(1 3)]) + + See Also + ======== + + orbit + + """ + return PermGroup(_stabilizer(self._degree, self._generators, alpha)) + + @property + def strong_gens(self): + r"""Return a strong generating set from the Schreier-Sims algorithm. + + Explanation + =========== + + A generating set `S = \{g_1, g_2, \dots, g_t\}` for a permutation group + `G` is a strong generating set relative to the sequence of points + (referred to as a "base") `(b_1, b_2, \dots, b_k)` if, for + `1 \leq i \leq k` we have that the intersection of the pointwise + stabilizer `G^{(i+1)} := G_{b_1, b_2, \dots, b_i}` with `S` generates + the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and + strong generating set and their applications are discussed in depth + in [1], pp. 87-89 and [2], pp. 55-57. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(4) + >>> D.strong_gens + [(0 1 2 3), (0 3)(1 2), (1 3)] + >>> D.base + [0, 1] + + See Also + ======== + + base, basic_transversals, basic_orbits, basic_stabilizers + + """ + if self._strong_gens == []: + self.schreier_sims() + return self._strong_gens + + def subgroup(self, gens): + """ + Return the subgroup generated by `gens` which is a list of + elements of the group + """ + + if not all(g in self for g in gens): + raise ValueError("The group does not contain the supplied generators") + + G = PermutationGroup(gens) + return G + + def subgroup_search(self, prop, base=None, strong_gens=None, tests=None, + init_subgroup=None): + """Find the subgroup of all elements satisfying the property ``prop``. + + Explanation + =========== + + This is done by a depth-first search with respect to base images that + uses several tests to prune the search tree. + + Parameters + ========== + + prop + The property to be used. Has to be callable on group elements + and always return ``True`` or ``False``. It is assumed that + all group elements satisfying ``prop`` indeed form a subgroup. + base + A base for the supergroup. + strong_gens + A strong generating set for the supergroup. + tests + A list of callables of length equal to the length of ``base``. + These are used to rule out group elements by partial base images, + so that ``tests[l](g)`` returns False if the element ``g`` is known + not to satisfy prop base on where g sends the first ``l + 1`` base + points. + init_subgroup + if a subgroup of the sought group is + known in advance, it can be passed to the function as this + parameter. + + Returns + ======= + + res + The subgroup of all elements satisfying ``prop``. The generating + set for this group is guaranteed to be a strong generating set + relative to the base ``base``. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... AlternatingGroup) + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> S = SymmetricGroup(7) + >>> prop_even = lambda x: x.is_even + >>> base, strong_gens = S.schreier_sims_incremental() + >>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens) + >>> G.is_subgroup(AlternatingGroup(7)) + True + >>> _verify_bsgs(G, base, G.generators) + True + + Notes + ===== + + This function is extremely lengthy and complicated and will require + some careful attention. The implementation is described in + [1], pp. 114-117, and the comments for the code here follow the lines + of the pseudocode in the book for clarity. + + The complexity is exponential in general, since the search process by + itself visits all members of the supergroup. However, there are a lot + of tests which are used to prune the search tree, and users can define + their own tests via the ``tests`` parameter, so in practice, and for + some computations, it's not terrible. + + A crucial part in the procedure is the frequent base change performed + (this is line 11 in the pseudocode) in order to obtain a new basic + stabilizer. The book mentiones that this can be done by using + ``.baseswap(...)``, however the current implementation uses a more + straightforward way to find the next basic stabilizer - calling the + function ``.stabilizer(...)`` on the previous basic stabilizer. + + """ + # initialize BSGS and basic group properties + def get_reps(orbits): + # get the minimal element in the base ordering + return [min(orbit, key = lambda x: base_ordering[x]) \ + for orbit in orbits] + + def update_nu(l): + temp_index = len(basic_orbits[l]) + 1 -\ + len(res_basic_orbits_init_base[l]) + # this corresponds to the element larger than all points + if temp_index >= len(sorted_orbits[l]): + nu[l] = base_ordering[degree] + else: + nu[l] = sorted_orbits[l][temp_index] + + if base is None: + base, strong_gens = self.schreier_sims_incremental() + base_len = len(base) + degree = self.degree + identity = _af_new(list(range(degree))) + base_ordering = _base_ordering(base, degree) + # add an element larger than all points + base_ordering.append(degree) + # add an element smaller than all points + base_ordering.append(-1) + # compute BSGS-related structures + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + basic_orbits, transversals = _orbits_transversals_from_bsgs(base, + strong_gens_distr) + # handle subgroup initialization and tests + if init_subgroup is None: + init_subgroup = PermutationGroup([identity]) + if tests is None: + trivial_test = lambda x: True + tests = [] + for i in range(base_len): + tests.append(trivial_test) + # line 1: more initializations. + res = init_subgroup + f = base_len - 1 + l = base_len - 1 + # line 2: set the base for K to the base for G + res_base = base[:] + # line 3: compute BSGS and related structures for K + res_base, res_strong_gens = res.schreier_sims_incremental( + base=res_base) + res_strong_gens_distr = _distribute_gens_by_base(res_base, + res_strong_gens) + res_generators = res.generators + res_basic_orbits_init_base = \ + [_orbit(degree, res_strong_gens_distr[i], res_base[i])\ + for i in range(base_len)] + # initialize orbit representatives + orbit_reps = [None]*base_len + # line 4: orbit representatives for f-th basic stabilizer of K + orbits = _orbits(degree, res_strong_gens_distr[f]) + orbit_reps[f] = get_reps(orbits) + # line 5: remove the base point from the representatives to avoid + # getting the identity element as a generator for K + orbit_reps[f].remove(base[f]) + # line 6: more initializations + c = [0]*base_len + u = [identity]*base_len + sorted_orbits = [None]*base_len + for i in range(base_len): + sorted_orbits[i] = basic_orbits[i][:] + sorted_orbits[i].sort(key=lambda point: base_ordering[point]) + # line 7: initializations + mu = [None]*base_len + nu = [None]*base_len + # this corresponds to the element smaller than all points + mu[l] = degree + 1 + update_nu(l) + # initialize computed words + computed_words = [identity]*base_len + # line 8: main loop + while True: + # apply all the tests + while l < base_len - 1 and \ + computed_words[l](base[l]) in orbit_reps[l] and \ + base_ordering[mu[l]] < \ + base_ordering[computed_words[l](base[l])] < \ + base_ordering[nu[l]] and \ + tests[l](computed_words): + # line 11: change the (partial) base of K + new_point = computed_words[l](base[l]) + res_base[l] = new_point + new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l], + new_point) + res_strong_gens_distr[l + 1] = new_stab_gens + # line 12: calculate minimal orbit representatives for the + # l+1-th basic stabilizer + orbits = _orbits(degree, new_stab_gens) + orbit_reps[l + 1] = get_reps(orbits) + # line 13: amend sorted orbits + l += 1 + temp_orbit = [computed_words[l - 1](point) for point + in basic_orbits[l]] + temp_orbit.sort(key=lambda point: base_ordering[point]) + sorted_orbits[l] = temp_orbit + # lines 14 and 15: update variables used minimality tests + new_mu = degree + 1 + for i in range(l): + if base[l] in res_basic_orbits_init_base[i]: + candidate = computed_words[i](base[i]) + if base_ordering[candidate] > base_ordering[new_mu]: + new_mu = candidate + mu[l] = new_mu + update_nu(l) + # line 16: determine the new transversal element + c[l] = 0 + temp_point = sorted_orbits[l][c[l]] + gamma = computed_words[l - 1]._array_form.index(temp_point) + u[l] = transversals[l][gamma] + # update computed words + computed_words[l] = rmul(computed_words[l - 1], u[l]) + # lines 17 & 18: apply the tests to the group element found + g = computed_words[l] + temp_point = g(base[l]) + if l == base_len - 1 and \ + base_ordering[mu[l]] < \ + base_ordering[temp_point] < base_ordering[nu[l]] and \ + temp_point in orbit_reps[l] and \ + tests[l](computed_words) and \ + prop(g): + # line 19: reset the base of K + res_generators.append(g) + res_base = base[:] + # line 20: recalculate basic orbits (and transversals) + res_strong_gens.append(g) + res_strong_gens_distr = _distribute_gens_by_base(res_base, + res_strong_gens) + res_basic_orbits_init_base = \ + [_orbit(degree, res_strong_gens_distr[i], res_base[i]) \ + for i in range(base_len)] + # line 21: recalculate orbit representatives + # line 22: reset the search depth + orbit_reps[f] = get_reps(orbits) + l = f + # line 23: go up the tree until in the first branch not fully + # searched + while l >= 0 and c[l] == len(basic_orbits[l]) - 1: + l = l - 1 + # line 24: if the entire tree is traversed, return K + if l == -1: + return PermutationGroup(res_generators) + # lines 25-27: update orbit representatives + if l < f: + # line 26 + f = l + c[l] = 0 + # line 27 + temp_orbits = _orbits(degree, res_strong_gens_distr[f]) + orbit_reps[f] = get_reps(temp_orbits) + # line 28: update variables used for minimality testing + mu[l] = degree + 1 + temp_index = len(basic_orbits[l]) + 1 - \ + len(res_basic_orbits_init_base[l]) + if temp_index >= len(sorted_orbits[l]): + nu[l] = base_ordering[degree] + else: + nu[l] = sorted_orbits[l][temp_index] + # line 29: set the next element from the current branch and update + # accordingly + c[l] += 1 + if l == 0: + gamma = sorted_orbits[l][c[l]] + else: + gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]]) + + u[l] = transversals[l][gamma] + if l == 0: + computed_words[l] = u[l] + else: + computed_words[l] = rmul(computed_words[l - 1], u[l]) + + @property + def transitivity_degree(self): + r"""Compute the degree of transitivity of the group. + + Explanation + =========== + + A permutation group `G` acting on `\Omega = \{0, 1, \dots, n-1\}` is + ``k``-fold transitive, if, for any `k` points + `(a_1, a_2, \dots, a_k) \in \Omega` and any `k` points + `(b_1, b_2, \dots, b_k) \in \Omega` there exists `g \in G` such that + `g(a_1) = b_1, g(a_2) = b_2, \dots, g(a_k) = b_k` + The degree of transitivity of `G` is the maximum ``k`` such that + `G` is ``k``-fold transitive. ([8]) + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> a = Permutation([1, 2, 0]) + >>> b = Permutation([1, 0, 2]) + >>> G = PermutationGroup([a, b]) + >>> G.transitivity_degree + 3 + + See Also + ======== + + is_transitive, orbit + + """ + if self._transitivity_degree is None: + n = self.degree + G = self + # if G is k-transitive, a tuple (a_0,..,a_k) + # can be brought to (b_0,...,b_(k-1), b_k) + # where b_0,...,b_(k-1) are fixed points; + # consider the group G_k which stabilizes b_0,...,b_(k-1) + # if G_k is transitive on the subset excluding b_0,...,b_(k-1) + # then G is (k+1)-transitive + for i in range(n): + orb = G.orbit(i) + if len(orb) != n - i: + self._transitivity_degree = i + return i + G = G.stabilizer(i) + self._transitivity_degree = n + return n + else: + return self._transitivity_degree + + def _p_elements_group(self, p): + ''' + For an abelian p-group, return the subgroup consisting of + all elements of order p (and the identity) + + ''' + gens = self.generators[:] + gens = sorted(gens, key=lambda x: x.order(), reverse=True) + gens_p = [g**(g.order()/p) for g in gens] + gens_r = [] + for i in range(len(gens)): + x = gens[i] + x_order = x.order() + # x_p has order p + x_p = x**(x_order/p) + if i > 0: + P = PermutationGroup(gens_p[:i]) + else: + P = PermutationGroup(self.identity) + if x**(x_order/p) not in P: + gens_r.append(x**(x_order/p)) + else: + # replace x by an element of order (x.order()/p) + # so that gens still generates G + g = P.generator_product(x_p, original=True) + for s in g: + x = x*s**-1 + x_order = x_order/p + # insert x to gens so that the sorting is preserved + del gens[i] + del gens_p[i] + j = i - 1 + while j < len(gens) and gens[j].order() >= x_order: + j += 1 + gens = gens[:j] + [x] + gens[j:] + gens_p = gens_p[:j] + [x] + gens_p[j:] + return PermutationGroup(gens_r) + + def _sylow_alt_sym(self, p): + ''' + Return a p-Sylow subgroup of a symmetric or an + alternating group. + + Explanation + =========== + + The algorithm for this is hinted at in [1], Chapter 4, + Exercise 4. + + For Sym(n) with n = p^i, the idea is as follows. Partition + the interval [0..n-1] into p equal parts, each of length p^(i-1): + [0..p^(i-1)-1], [p^(i-1)..2*p^(i-1)-1]...[(p-1)*p^(i-1)..p^i-1]. + Find a p-Sylow subgroup of Sym(p^(i-1)) (treated as a subgroup + of ``self``) acting on each of the parts. Call the subgroups + P_1, P_2...P_p. The generators for the subgroups P_2...P_p + can be obtained from those of P_1 by applying a "shifting" + permutation to them, that is, a permutation mapping [0..p^(i-1)-1] + to the second part (the other parts are obtained by using the shift + multiple times). The union of this permutation and the generators + of P_1 is a p-Sylow subgroup of ``self``. + + For n not equal to a power of p, partition + [0..n-1] in accordance with how n would be written in base p. + E.g. for p=2 and n=11, 11 = 2^3 + 2^2 + 1 so the partition + is [[0..7], [8..9], {10}]. To generate a p-Sylow subgroup, + take the union of the generators for each of the parts. + For the above example, {(0 1), (0 2)(1 3), (0 4), (1 5)(2 7)} + from the first part, {(8 9)} from the second part and + nothing from the third. This gives 4 generators in total, and + the subgroup they generate is p-Sylow. + + Alternating groups are treated the same except when p=2. In this + case, (0 1)(s s+1) should be added for an appropriate s (the start + of a part) for each part in the partitions. + + See Also + ======== + + sylow_subgroup, is_alt_sym + + ''' + n = self.degree + gens = [] + identity = Permutation(n-1) + # the case of 2-sylow subgroups of alternating groups + # needs special treatment + alt = p == 2 and all(g.is_even for g in self.generators) + + # find the presentation of n in base p + coeffs = [] + m = n + while m > 0: + coeffs.append(m % p) + m = m // p + + power = len(coeffs)-1 + # for a symmetric group, gens[:i] is the generating + # set for a p-Sylow subgroup on [0..p**(i-1)-1]. For + # alternating groups, the same is given by gens[:2*(i-1)] + for i in range(1, power+1): + if i == 1 and alt: + # (0 1) shouldn't be added for alternating groups + continue + gen = Permutation([(j + p**(i-1)) % p**i for j in range(p**i)]) + gens.append(identity*gen) + if alt: + gen = Permutation(0, 1)*gen*Permutation(0, 1)*gen + gens.append(gen) + + # the first point in the current part (see the algorithm + # description in the docstring) + start = 0 + + while power > 0: + a = coeffs[power] + + # make the permutation shifting the start of the first + # part ([0..p^i-1] for some i) to the current one + for _ in range(a): + shift = Permutation() + if start > 0: + for i in range(p**power): + shift = shift(i, start + i) + + if alt: + gen = Permutation(0, 1)*shift*Permutation(0, 1)*shift + gens.append(gen) + j = 2*(power - 1) + else: + j = power + + for i, gen in enumerate(gens[:j]): + if alt and i % 2 == 1: + continue + # shift the generator to the start of the + # partition part + gen = shift*gen*shift + gens.append(gen) + + start += p**power + power = power-1 + + return gens + + def sylow_subgroup(self, p): + ''' + Return a p-Sylow subgroup of the group. + + The algorithm is described in [1], Chapter 4, Section 7 + + Examples + ======== + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.named_groups import AlternatingGroup + + >>> D = DihedralGroup(6) + >>> S = D.sylow_subgroup(2) + >>> S.order() + 4 + >>> G = SymmetricGroup(6) + >>> S = G.sylow_subgroup(5) + >>> S.order() + 5 + + >>> G1 = AlternatingGroup(3) + >>> G2 = AlternatingGroup(5) + >>> G3 = AlternatingGroup(9) + + >>> S1 = G1.sylow_subgroup(3) + >>> S2 = G2.sylow_subgroup(3) + >>> S3 = G3.sylow_subgroup(3) + + >>> len1 = len(S1.lower_central_series()) + >>> len2 = len(S2.lower_central_series()) + >>> len3 = len(S3.lower_central_series()) + + >>> len1 == len2 + True + >>> len1 < len3 + True + + ''' + from sympy.combinatorics.homomorphisms import ( + orbit_homomorphism, block_homomorphism) + + if not isprime(p): + raise ValueError("p must be a prime") + + def is_p_group(G): + # check if the order of G is a power of p + # and return the power + m = G.order() + n = 0 + while m % p == 0: + m = m/p + n += 1 + if m == 1: + return True, n + return False, n + + def _sylow_reduce(mu, nu): + # reduction based on two homomorphisms + # mu and nu with trivially intersecting + # kernels + Q = mu.image().sylow_subgroup(p) + Q = mu.invert_subgroup(Q) + nu = nu.restrict_to(Q) + R = nu.image().sylow_subgroup(p) + return nu.invert_subgroup(R) + + order = self.order() + if order % p != 0: + return PermutationGroup([self.identity]) + p_group, n = is_p_group(self) + if p_group: + return self + + if self.is_alt_sym(): + return PermutationGroup(self._sylow_alt_sym(p)) + + # if there is a non-trivial orbit with size not divisible + # by p, the sylow subgroup is contained in its stabilizer + # (by orbit-stabilizer theorem) + orbits = self.orbits() + non_p_orbits = [o for o in orbits if len(o) % p != 0 and len(o) != 1] + if non_p_orbits: + G = self.stabilizer(list(non_p_orbits[0]).pop()) + return G.sylow_subgroup(p) + + if not self.is_transitive(): + # apply _sylow_reduce to orbit actions + orbits = sorted(orbits, key=len) + omega1 = orbits.pop() + omega2 = orbits[0].union(*orbits) + mu = orbit_homomorphism(self, omega1) + nu = orbit_homomorphism(self, omega2) + return _sylow_reduce(mu, nu) + + blocks = self.minimal_blocks() + if len(blocks) > 1: + # apply _sylow_reduce to block system actions + mu = block_homomorphism(self, blocks[0]) + nu = block_homomorphism(self, blocks[1]) + return _sylow_reduce(mu, nu) + elif len(blocks) == 1: + block = list(blocks)[0] + if any(e != 0 for e in block): + # self is imprimitive + mu = block_homomorphism(self, block) + if not is_p_group(mu.image())[0]: + S = mu.image().sylow_subgroup(p) + return mu.invert_subgroup(S).sylow_subgroup(p) + + # find an element of order p + g = self.random() + g_order = g.order() + while g_order % p != 0 or g_order == 0: + g = self.random() + g_order = g.order() + g = g**(g_order // p) + if order % p**2 != 0: + return PermutationGroup(g) + + C = self.centralizer(g) + while C.order() % p**n != 0: + S = C.sylow_subgroup(p) + s_order = S.order() + Z = S.center() + P = Z._p_elements_group(p) + h = P.random() + C_h = self.centralizer(h) + while C_h.order() % p*s_order != 0: + h = P.random() + C_h = self.centralizer(h) + C = C_h + + return C.sylow_subgroup(p) + + def _block_verify(self, L, alpha): + delta = sorted(self.orbit(alpha)) + # p[i] will be the number of the block + # delta[i] belongs to + p = [-1]*len(delta) + blocks = [-1]*len(delta) + + B = [[]] # future list of blocks + u = [0]*len(delta) # u[i] in L s.t. alpha^u[i] = B[0][i] + + t = L.orbit_transversal(alpha, pairs=True) + for a, beta in t: + B[0].append(a) + i_a = delta.index(a) + p[i_a] = 0 + blocks[i_a] = alpha + u[i_a] = beta + + rho = 0 + m = 0 # number of blocks - 1 + + while rho <= m: + beta = B[rho][0] + for g in self.generators: + d = beta^g + i_d = delta.index(d) + sigma = p[i_d] + if sigma < 0: + # define a new block + m += 1 + sigma = m + u[i_d] = u[delta.index(beta)]*g + p[i_d] = sigma + rep = d + blocks[i_d] = rep + newb = [rep] + for gamma in B[rho][1:]: + i_gamma = delta.index(gamma) + d = gamma^g + i_d = delta.index(d) + if p[i_d] < 0: + u[i_d] = u[i_gamma]*g + p[i_d] = sigma + blocks[i_d] = rep + newb.append(d) + else: + # B[rho] is not a block + s = u[i_gamma]*g*u[i_d]**(-1) + return False, s + + B.append(newb) + else: + for h in B[rho][1:]: + if h^g not in B[sigma]: + # B[rho] is not a block + s = u[delta.index(beta)]*g*u[i_d]**(-1) + return False, s + rho += 1 + + return True, blocks + + def _verify(H, K, phi, z, alpha): + ''' + Return a list of relators ``rels`` in generators ``gens`_h` that + are mapped to ``H.generators`` by ``phi`` so that given a finite + presentation of ``K`` on a subset of ``gens_h`` + is a finite presentation of ``H``. + + Explanation + =========== + + ``H`` should be generated by the union of ``K.generators`` and ``z`` + (a single generator), and ``H.stabilizer(alpha) == K``; ``phi`` is a + canonical injection from a free group into a permutation group + containing ``H``. + + The algorithm is described in [1], Chapter 6. + + Examples + ======== + + >>> from sympy.combinatorics import free_group, Permutation, PermutationGroup + >>> from sympy.combinatorics.homomorphisms import homomorphism + >>> from sympy.combinatorics.fp_groups import FpGroup + + >>> H = PermutationGroup(Permutation(0, 2), Permutation (1, 5)) + >>> K = PermutationGroup(Permutation(5)(0, 2)) + >>> F = free_group("x_0 x_1")[0] + >>> gens = F.generators + >>> phi = homomorphism(F, H, F.generators, H.generators) + >>> rels_k = [gens[0]**2] # relators for presentation of K + >>> z= Permutation(1, 5) + >>> check, rels_h = H._verify(K, phi, z, 1) + >>> check + True + >>> rels = rels_k + rels_h + >>> G = FpGroup(F, rels) # presentation of H + >>> G.order() == H.order() + True + + See also + ======== + + strong_presentation, presentation, stabilizer + + ''' + + orbit = H.orbit(alpha) + beta = alpha^(z**-1) + + K_beta = K.stabilizer(beta) + + # orbit representatives of K_beta + gammas = [alpha, beta] + orbits = list({tuple(K_beta.orbit(o)) for o in orbit}) + orbit_reps = [orb[0] for orb in orbits] + for rep in orbit_reps: + if rep not in gammas: + gammas.append(rep) + + # orbit transversal of K + betas = [alpha, beta] + transversal = {alpha: phi.invert(H.identity), beta: phi.invert(z**-1)} + + for s, g in K.orbit_transversal(beta, pairs=True): + if s not in transversal: + transversal[s] = transversal[beta]*phi.invert(g) + + + union = K.orbit(alpha).union(K.orbit(beta)) + while (len(union) < len(orbit)): + for gamma in gammas: + if gamma in union: + r = gamma^z + if r not in union: + betas.append(r) + transversal[r] = transversal[gamma]*phi.invert(z) + for s, g in K.orbit_transversal(r, pairs=True): + if s not in transversal: + transversal[s] = transversal[r]*phi.invert(g) + union = union.union(K.orbit(r)) + break + + # compute relators + rels = [] + + for b in betas: + k_gens = K.stabilizer(b).generators + for y in k_gens: + new_rel = transversal[b] + gens = K.generator_product(y, original=True) + for g in gens[::-1]: + new_rel = new_rel*phi.invert(g) + new_rel = new_rel*transversal[b]**-1 + + perm = phi(new_rel) + try: + gens = K.generator_product(perm, original=True) + except ValueError: + return False, perm + for g in gens: + new_rel = new_rel*phi.invert(g)**-1 + if new_rel not in rels: + rels.append(new_rel) + + for gamma in gammas: + new_rel = transversal[gamma]*phi.invert(z)*transversal[gamma^z]**-1 + perm = phi(new_rel) + try: + gens = K.generator_product(perm, original=True) + except ValueError: + return False, perm + for g in gens: + new_rel = new_rel*phi.invert(g)**-1 + if new_rel not in rels: + rels.append(new_rel) + + return True, rels + + def strong_presentation(self): + ''' + Return a strong finite presentation of group. The generators + of the returned group are in the same order as the strong + generators of group. + + The algorithm is based on Sims' Verify algorithm described + in [1], Chapter 6. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> P = DihedralGroup(4) + >>> G = P.strong_presentation() + >>> P.order() == G.order() + True + + See Also + ======== + + presentation, _verify + + ''' + from sympy.combinatorics.fp_groups import (FpGroup, + simplify_presentation) + from sympy.combinatorics.free_groups import free_group + from sympy.combinatorics.homomorphisms import (block_homomorphism, + homomorphism, GroupHomomorphism) + + strong_gens = self.strong_gens[:] + stabs = self.basic_stabilizers[:] + base = self.base[:] + + # injection from a free group on len(strong_gens) + # generators into G + gen_syms = [('x_%d'%i) for i in range(len(strong_gens))] + F = free_group(', '.join(gen_syms))[0] + phi = homomorphism(F, self, F.generators, strong_gens) + + H = PermutationGroup(self.identity) + while stabs: + alpha = base.pop() + K = H + H = stabs.pop() + new_gens = [g for g in H.generators if g not in K] + + if K.order() == 1: + z = new_gens.pop() + rels = [F.generators[-1]**z.order()] + intermediate_gens = [z] + K = PermutationGroup(intermediate_gens) + + # add generators one at a time building up from K to H + while new_gens: + z = new_gens.pop() + intermediate_gens = [z] + intermediate_gens + K_s = PermutationGroup(intermediate_gens) + orbit = K_s.orbit(alpha) + orbit_k = K.orbit(alpha) + + # split into cases based on the orbit of K_s + if orbit_k == orbit: + if z in K: + rel = phi.invert(z) + perm = z + else: + t = K.orbit_rep(alpha, alpha^z) + rel = phi.invert(z)*phi.invert(t)**-1 + perm = z*t**-1 + for g in K.generator_product(perm, original=True): + rel = rel*phi.invert(g)**-1 + new_rels = [rel] + elif len(orbit_k) == 1: + # `success` is always true because `strong_gens` + # and `base` are already a verified BSGS. Later + # this could be changed to start with a randomly + # generated (potential) BSGS, and then new elements + # would have to be appended to it when `success` + # is false. + success, new_rels = K_s._verify(K, phi, z, alpha) + else: + # K.orbit(alpha) should be a block + # under the action of K_s on K_s.orbit(alpha) + check, block = K_s._block_verify(K, alpha) + if check: + # apply _verify to the action of K_s + # on the block system; for convenience, + # add the blocks as additional points + # that K_s should act on + t = block_homomorphism(K_s, block) + m = t.codomain.degree # number of blocks + d = K_s.degree + + # conjugating with p will shift + # permutations in t.image() to + # higher numbers, e.g. + # p*(0 1)*p = (m m+1) + p = Permutation() + for i in range(m): + p *= Permutation(i, i+d) + + t_img = t.images + # combine generators of K_s with their + # action on the block system + images = {g: g*p*t_img[g]*p for g in t_img} + for g in self.strong_gens[:-len(K_s.generators)]: + images[g] = g + K_s_act = PermutationGroup(list(images.values())) + f = GroupHomomorphism(self, K_s_act, images) + + K_act = PermutationGroup([f(g) for g in K.generators]) + success, new_rels = K_s_act._verify(K_act, f.compose(phi), f(z), d) + + for n in new_rels: + if n not in rels: + rels.append(n) + K = K_s + + group = FpGroup(F, rels) + return simplify_presentation(group) + + def presentation(self, eliminate_gens=True): + ''' + Return an `FpGroup` presentation of the group. + + The algorithm is described in [1], Chapter 6.1. + + ''' + from sympy.combinatorics.fp_groups import (FpGroup, + simplify_presentation) + from sympy.combinatorics.coset_table import CosetTable + from sympy.combinatorics.free_groups import free_group + from sympy.combinatorics.homomorphisms import homomorphism + + if self._fp_presentation: + return self._fp_presentation + + def _factor_group_by_rels(G, rels): + if isinstance(G, FpGroup): + rels.extend(G.relators) + return FpGroup(G.free_group, list(set(rels))) + return FpGroup(G, rels) + + gens = self.generators + len_g = len(gens) + + if len_g == 1: + order = gens[0].order() + # handle the trivial group + if order == 1: + return free_group([])[0] + F, x = free_group('x') + return FpGroup(F, [x**order]) + + if self.order() > 20: + half_gens = self.generators[0:(len_g+1)//2] + else: + half_gens = [] + H = PermutationGroup(half_gens) + H_p = H.presentation() + + len_h = len(H_p.generators) + + C = self.coset_table(H) + n = len(C) # subgroup index + + gen_syms = [('x_%d'%i) for i in range(len(gens))] + F = free_group(', '.join(gen_syms))[0] + + # mapping generators of H_p to those of F + images = [F.generators[i] for i in range(len_h)] + R = homomorphism(H_p, F, H_p.generators, images, check=False) + + # rewrite relators + rels = R(H_p.relators) + G_p = FpGroup(F, rels) + + # injective homomorphism from G_p into self + T = homomorphism(G_p, self, G_p.generators, gens) + + C_p = CosetTable(G_p, []) + + C_p.table = [[None]*(2*len_g) for i in range(n)] + + # initiate the coset transversal + transversal = [None]*n + transversal[0] = G_p.identity + + # fill in the coset table as much as possible + for i in range(2*len_h): + C_p.table[0][i] = 0 + + gamma = 1 + for alpha, x in product(range(n), range(2*len_g)): + beta = C[alpha][x] + if beta == gamma: + gen = G_p.generators[x//2]**((-1)**(x % 2)) + transversal[beta] = transversal[alpha]*gen + C_p.table[alpha][x] = beta + C_p.table[beta][x + (-1)**(x % 2)] = alpha + gamma += 1 + if gamma == n: + break + + C_p.p = list(range(n)) + beta = x = 0 + + while not C_p.is_complete(): + # find the first undefined entry + while C_p.table[beta][x] == C[beta][x]: + x = (x + 1) % (2*len_g) + if x == 0: + beta = (beta + 1) % n + + # define a new relator + gen = G_p.generators[x//2]**((-1)**(x % 2)) + new_rel = transversal[beta]*gen*transversal[C[beta][x]]**-1 + perm = T(new_rel) + nxt = G_p.identity + for s in H.generator_product(perm, original=True): + nxt = nxt*T.invert(s)**-1 + new_rel = new_rel*nxt + + # continue coset enumeration + G_p = _factor_group_by_rels(G_p, [new_rel]) + C_p.scan_and_fill(0, new_rel) + C_p = G_p.coset_enumeration([], strategy="coset_table", + draft=C_p, max_cosets=n, incomplete=True) + + self._fp_presentation = simplify_presentation(G_p) + return self._fp_presentation + + def polycyclic_group(self): + """ + Return the PolycyclicGroup instance with below parameters: + + Explanation + =========== + + * pc_sequence : Polycyclic sequence is formed by collecting all + the missing generators between the adjacent groups in the + derived series of given permutation group. + + * pc_series : Polycyclic series is formed by adding all the missing + generators of ``der[i+1]`` in ``der[i]``, where ``der`` represents + the derived series. + + * relative_order : A list, computed by the ratio of adjacent groups in + pc_series. + + """ + from sympy.combinatorics.pc_groups import PolycyclicGroup + if not self.is_polycyclic: + raise ValueError("The group must be solvable") + + der = self.derived_series() + pc_series = [] + pc_sequence = [] + relative_order = [] + pc_series.append(der[-1]) + der.reverse() + + for i in range(len(der)-1): + H = der[i] + for g in der[i+1].generators: + if g not in H: + H = PermutationGroup([g] + H.generators) + pc_series.insert(0, H) + pc_sequence.insert(0, g) + + G1 = pc_series[0].order() + G2 = pc_series[1].order() + relative_order.insert(0, G1 // G2) + + return PolycyclicGroup(pc_sequence, pc_series, relative_order, collector=None) + + +def _orbit(degree, generators, alpha, action='tuples'): + r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set. + + Explanation + =========== + + The time complexity of the algorithm used here is `O(|Orb|*r)` where + `|Orb|` is the size of the orbit and ``r`` is the number of generators of + the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21. + Here alpha can be a single point, or a list of points. + + If alpha is a single point, the ordinary orbit is computed. + if alpha is a list of points, there are three available options: + + 'union' - computes the union of the orbits of the points in the list + 'tuples' - computes the orbit of the list interpreted as an ordered + tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) ) + 'sets' - computes the orbit of the list interpreted as a sets + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> from sympy.combinatorics.perm_groups import _orbit + >>> a = Permutation([1, 2, 0, 4, 5, 6, 3]) + >>> G = PermutationGroup([a]) + >>> _orbit(G.degree, G.generators, 0) + {0, 1, 2} + >>> _orbit(G.degree, G.generators, [0, 4], 'union') + {0, 1, 2, 3, 4, 5, 6} + + See Also + ======== + + orbit, orbit_transversal + + """ + if not hasattr(alpha, '__getitem__'): + alpha = [alpha] + + gens = [x._array_form for x in generators] + if len(alpha) == 1 or action == 'union': + orb = alpha + used = [False]*degree + for el in alpha: + used[el] = True + for b in orb: + for gen in gens: + temp = gen[b] + if used[temp] == False: + orb.append(temp) + used[temp] = True + return set(orb) + elif action == 'tuples': + alpha = tuple(alpha) + orb = [alpha] + used = {alpha} + for b in orb: + for gen in gens: + temp = tuple([gen[x] for x in b]) + if temp not in used: + orb.append(temp) + used.add(temp) + return set(orb) + elif action == 'sets': + alpha = frozenset(alpha) + orb = [alpha] + used = {alpha} + for b in orb: + for gen in gens: + temp = frozenset([gen[x] for x in b]) + if temp not in used: + orb.append(temp) + used.add(temp) + return {tuple(x) for x in orb} + + +def _orbits(degree, generators): + """Compute the orbits of G. + + If ``rep=False`` it returns a list of sets else it returns a list of + representatives of the orbits + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy.combinatorics.perm_groups import _orbits + >>> a = Permutation([0, 2, 1]) + >>> b = Permutation([1, 0, 2]) + >>> _orbits(a.size, [a, b]) + [{0, 1, 2}] + """ + + orbs = [] + sorted_I = list(range(degree)) + I = set(sorted_I) + while I: + i = sorted_I[0] + orb = _orbit(degree, generators, i) + orbs.append(orb) + # remove all indices that are in this orbit + I -= orb + sorted_I = [i for i in sorted_I if i not in orb] + return orbs + + +def _orbit_transversal(degree, generators, alpha, pairs, af=False, slp=False): + r"""Computes a transversal for the orbit of ``alpha`` as a set. + + Explanation + =========== + + generators generators of the group ``G`` + + For a permutation group ``G``, a transversal for the orbit + `Orb = \{g(\alpha) | g \in G\}` is a set + `\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`. + Note that there may be more than one possible transversal. + If ``pairs`` is set to ``True``, it returns the list of pairs + `(\beta, g_\beta)`. For a proof of correctness, see [1], p.79 + + if ``af`` is ``True``, the transversal elements are given in + array form. + + If `slp` is `True`, a dictionary `{beta: slp_beta}` is returned + for `\beta \in Orb` where `slp_beta` is a list of indices of the + generators in `generators` s.t. if `slp_beta = [i_1 \dots i_n]` + `g_\beta = generators[i_n] \times \dots \times generators[i_1]`. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> from sympy.combinatorics.perm_groups import _orbit_transversal + >>> G = DihedralGroup(6) + >>> _orbit_transversal(G.degree, G.generators, 0, False) + [(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)] + """ + + tr = [(alpha, list(range(degree)))] + slp_dict = {alpha: []} + used = [False]*degree + used[alpha] = True + gens = [x._array_form for x in generators] + for x, px in tr: + px_slp = slp_dict[x] + for gen in gens: + temp = gen[x] + if used[temp] == False: + slp_dict[temp] = [gens.index(gen)] + px_slp + tr.append((temp, _af_rmul(gen, px))) + used[temp] = True + if pairs: + if not af: + tr = [(x, _af_new(y)) for x, y in tr] + if not slp: + return tr + return tr, slp_dict + + if af: + tr = [y for _, y in tr] + if not slp: + return tr + return tr, slp_dict + + tr = [_af_new(y) for _, y in tr] + if not slp: + return tr + return tr, slp_dict + + +def _stabilizer(degree, generators, alpha): + r"""Return the stabilizer subgroup of ``alpha``. + + Explanation + =========== + + The stabilizer of `\alpha` is the group `G_\alpha = + \{g \in G | g(\alpha) = \alpha\}`. + For a proof of correctness, see [1], p.79. + + degree : degree of G + generators : generators of G + + Examples + ======== + + >>> from sympy.combinatorics.perm_groups import _stabilizer + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> G = DihedralGroup(6) + >>> _stabilizer(G.degree, G.generators, 5) + [(5)(0 4)(1 3), (5)] + + See Also + ======== + + orbit + + """ + orb = [alpha] + table = {alpha: list(range(degree))} + table_inv = {alpha: list(range(degree))} + used = [False]*degree + used[alpha] = True + gens = [x._array_form for x in generators] + stab_gens = [] + for b in orb: + for gen in gens: + temp = gen[b] + if used[temp] is False: + gen_temp = _af_rmul(gen, table[b]) + orb.append(temp) + table[temp] = gen_temp + table_inv[temp] = _af_invert(gen_temp) + used[temp] = True + else: + schreier_gen = _af_rmuln(table_inv[temp], gen, table[b]) + if schreier_gen not in stab_gens: + stab_gens.append(schreier_gen) + return [_af_new(x) for x in stab_gens] + + +PermGroup = PermutationGroup + + +class SymmetricPermutationGroup(Basic): + """ + The class defining the lazy form of SymmetricGroup. + + deg : int + + """ + def __new__(cls, deg): + deg = _sympify(deg) + obj = Basic.__new__(cls, deg) + return obj + + def __init__(self, *args, **kwargs): + self._deg = self.args[0] + self._order = None + + def __contains__(self, i): + """Return ``True`` if *i* is contained in SymmetricPermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, SymmetricPermutationGroup + >>> G = SymmetricPermutationGroup(4) + >>> Permutation(1, 2, 3) in G + True + + """ + if not isinstance(i, Permutation): + raise TypeError("A SymmetricPermutationGroup contains only Permutations as " + "elements, not elements of type %s" % type(i)) + return i.size == self.degree + + def order(self): + """ + Return the order of the SymmetricPermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricPermutationGroup + >>> G = SymmetricPermutationGroup(4) + >>> G.order() + 24 + """ + if self._order is not None: + return self._order + n = self._deg + self._order = factorial(n) + return self._order + + @property + def degree(self): + """ + Return the degree of the SymmetricPermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricPermutationGroup + >>> G = SymmetricPermutationGroup(4) + >>> G.degree + 4 + + """ + return self._deg + + @property + def identity(self): + ''' + Return the identity element of the SymmetricPermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricPermutationGroup + >>> G = SymmetricPermutationGroup(4) + >>> G.identity() + (3) + + ''' + return _af_new(list(range(self._deg))) + + +class Coset(Basic): + """A left coset of a permutation group with respect to an element. + + Parameters + ========== + + g : Permutation + + H : PermutationGroup + + dir : "+" or "-", If not specified by default it will be "+" + here ``dir`` specified the type of coset "+" represent the + right coset and "-" represent the left coset. + + G : PermutationGroup, optional + The group which contains *H* as its subgroup and *g* as its + element. + + If not specified, it would automatically become a symmetric + group ``SymmetricPermutationGroup(g.size)`` and + ``SymmetricPermutationGroup(H.degree)`` if ``g.size`` and ``H.degree`` + are matching.``SymmetricPermutationGroup`` is a lazy form of SymmetricGroup + used for representation purpose. + + """ + + def __new__(cls, g, H, G=None, dir="+"): + g = _sympify(g) + if not isinstance(g, Permutation): + raise NotImplementedError + + H = _sympify(H) + if not isinstance(H, PermutationGroup): + raise NotImplementedError + + if G is not None: + G = _sympify(G) + if not isinstance(G, (PermutationGroup, SymmetricPermutationGroup)): + raise NotImplementedError + if not H.is_subgroup(G): + raise ValueError("{} must be a subgroup of {}.".format(H, G)) + if g not in G: + raise ValueError("{} must be an element of {}.".format(g, G)) + else: + g_size = g.size + h_degree = H.degree + if g_size != h_degree: + raise ValueError( + "The size of the permutation {} and the degree of " + "the permutation group {} should be matching " + .format(g, H)) + G = SymmetricPermutationGroup(g.size) + + if isinstance(dir, str): + dir = Symbol(dir) + elif not isinstance(dir, Symbol): + raise TypeError("dir must be of type basestring or " + "Symbol, not %s" % type(dir)) + if str(dir) not in ('+', '-'): + raise ValueError("dir must be one of '+' or '-' not %s" % dir) + obj = Basic.__new__(cls, g, H, G, dir) + return obj + + def __init__(self, *args, **kwargs): + self._dir = self.args[3] + + @property + def is_left_coset(self): + """ + Check if the coset is left coset that is ``gH``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup, Coset + >>> a = Permutation(1, 2) + >>> b = Permutation(0, 1) + >>> G = PermutationGroup([a, b]) + >>> cst = Coset(a, G, dir="-") + >>> cst.is_left_coset + True + + """ + return str(self._dir) == '-' + + @property + def is_right_coset(self): + """ + Check if the coset is right coset that is ``Hg``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, PermutationGroup, Coset + >>> a = Permutation(1, 2) + >>> b = Permutation(0, 1) + >>> G = PermutationGroup([a, b]) + >>> cst = Coset(a, G, dir="+") + >>> cst.is_right_coset + True + + """ + return str(self._dir) == '+' + + def as_list(self): + """ + Return all the elements of coset in the form of list. + """ + g = self.args[0] + H = self.args[1] + cst = [] + if str(self._dir) == '+': + for h in H.elements: + cst.append(h*g) + else: + for h in H.elements: + cst.append(g*h) + return cst diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/prufer.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/prufer.py new file mode 100644 index 0000000000000000000000000000000000000000..c1241e055eb1fdf697ab06dae07ee0b322bf4d6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/prufer.py @@ -0,0 +1,436 @@ +from sympy.core import Basic +from sympy.core.containers import Tuple +from sympy.tensor.array import Array +from sympy.core.sympify import _sympify +from sympy.utilities.iterables import flatten, iterable +from sympy.utilities.misc import as_int + +from collections import defaultdict + + +class Prufer(Basic): + """ + The Prufer correspondence is an algorithm that describes the + bijection between labeled trees and the Prufer code. A Prufer + code of a labeled tree is unique up to isomorphism and has + a length of n - 2. + + Prufer sequences were first used by Heinz Prufer to give a + proof of Cayley's formula. + + References + ========== + + .. [1] https://mathworld.wolfram.com/LabeledTree.html + + """ + _prufer_repr = None + _tree_repr = None + _nodes = None + _rank = None + + @property + def prufer_repr(self): + """Returns Prufer sequence for the Prufer object. + + This sequence is found by removing the highest numbered vertex, + recording the node it was attached to, and continuing until only + two vertices remain. The Prufer sequence is the list of recorded nodes. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr + [3, 3, 3, 4] + >>> Prufer([1, 0, 0]).prufer_repr + [1, 0, 0] + + See Also + ======== + + to_prufer + + """ + if self._prufer_repr is None: + self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes) + return self._prufer_repr + + @property + def tree_repr(self): + """Returns the tree representation of the Prufer object. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr + [[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]] + >>> Prufer([1, 0, 0]).tree_repr + [[1, 2], [0, 1], [0, 3], [0, 4]] + + See Also + ======== + + to_tree + + """ + if self._tree_repr is None: + self._tree_repr = self.to_tree(self._prufer_repr[:]) + return self._tree_repr + + @property + def nodes(self): + """Returns the number of nodes in the tree. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes + 6 + >>> Prufer([1, 0, 0]).nodes + 5 + + """ + return self._nodes + + @property + def rank(self): + """Returns the rank of the Prufer sequence. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]) + >>> p.rank + 778 + >>> p.next(1).rank + 779 + >>> p.prev().rank + 777 + + See Also + ======== + + prufer_rank, next, prev, size + + """ + if self._rank is None: + self._rank = self.prufer_rank() + return self._rank + + @property + def size(self): + """Return the number of possible trees of this Prufer object. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer([0]*4).size == Prufer([6]*4).size == 1296 + True + + See Also + ======== + + prufer_rank, rank, next, prev + + """ + return self.prev(self.rank).prev().rank + 1 + + @staticmethod + def to_prufer(tree, n): + """Return the Prufer sequence for a tree given as a list of edges where + ``n`` is the number of nodes in the tree. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) + >>> a.prufer_repr + [0, 0] + >>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4) + [0, 0] + + See Also + ======== + prufer_repr: returns Prufer sequence of a Prufer object. + + """ + d = defaultdict(int) + L = [] + for edge in tree: + # Increment the value of the corresponding + # node in the degree list as we encounter an + # edge involving it. + d[edge[0]] += 1 + d[edge[1]] += 1 + for i in range(n - 2): + # find the smallest leaf + for x in range(n): + if d[x] == 1: + break + # find the node it was connected to + y = None + for edge in tree: + if x == edge[0]: + y = edge[1] + elif x == edge[1]: + y = edge[0] + if y is not None: + break + # record and update + L.append(y) + for j in (x, y): + d[j] -= 1 + if not d[j]: + d.pop(j) + tree.remove(edge) + return L + + @staticmethod + def to_tree(prufer): + """Return the tree (as a list of edges) of the given Prufer sequence. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> a = Prufer([0, 2], 4) + >>> a.tree_repr + [[0, 1], [0, 2], [2, 3]] + >>> Prufer.to_tree([0, 2]) + [[0, 1], [0, 2], [2, 3]] + + References + ========== + + .. [1] https://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html + + See Also + ======== + tree_repr: returns tree representation of a Prufer object. + + """ + tree = [] + last = [] + n = len(prufer) + 2 + d = defaultdict(lambda: 1) + for p in prufer: + d[p] += 1 + for i in prufer: + for j in range(n): + # find the smallest leaf (degree = 1) + if d[j] == 1: + break + # (i, j) is the new edge that we append to the tree + # and remove from the degree dictionary + d[i] -= 1 + d[j] -= 1 + tree.append(sorted([i, j])) + last = [i for i in range(n) if d[i] == 1] or [0, 1] + tree.append(last) + + return tree + + @staticmethod + def edges(*runs): + """Return a list of edges and the number of nodes from the given runs + that connect nodes in an integer-labelled tree. + + All node numbers will be shifted so that the minimum node is 0. It is + not a problem if edges are repeated in the runs; only unique edges are + returned. There is no assumption made about what the range of the node + labels should be, but all nodes from the smallest through the largest + must be present. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T + ([[0, 1], [1, 2], [1, 3], [3, 4]], 5) + + Duplicate edges are removed: + + >>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K + ([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7) + + """ + e = set() + nmin = runs[0][0] + for r in runs: + for i in range(len(r) - 1): + a, b = r[i: i + 2] + if b < a: + a, b = b, a + e.add((a, b)) + rv = [] + got = set() + nmin = nmax = None + for ei in e: + for i in ei: + got.add(i) + nmin = min(ei[0], nmin) if nmin is not None else ei[0] + nmax = max(ei[1], nmax) if nmax is not None else ei[1] + rv.append(list(ei)) + missing = set(range(nmin, nmax + 1)) - got + if missing: + missing = [i + nmin for i in missing] + if len(missing) == 1: + msg = 'Node %s is missing.' % missing.pop() + else: + msg = 'Nodes %s are missing.' % sorted(missing) + raise ValueError(msg) + if nmin != 0: + for i, ei in enumerate(rv): + rv[i] = [n - nmin for n in ei] + nmax -= nmin + return sorted(rv), nmax + 1 + + def prufer_rank(self): + """Computes the rank of a Prufer sequence. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) + >>> a.prufer_rank() + 0 + + See Also + ======== + + rank, next, prev, size + + """ + r = 0 + p = 1 + for i in range(self.nodes - 3, -1, -1): + r += p*self.prufer_repr[i] + p *= self.nodes + return r + + @classmethod + def unrank(self, rank, n): + """Finds the unranked Prufer sequence. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> Prufer.unrank(0, 4) + Prufer([0, 0]) + + """ + n, rank = as_int(n), as_int(rank) + L = defaultdict(int) + for i in range(n - 3, -1, -1): + L[i] = rank % n + rank = (rank - L[i])//n + return Prufer([L[i] for i in range(len(L))]) + + def __new__(cls, *args, **kw_args): + """The constructor for the Prufer object. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + + A Prufer object can be constructed from a list of edges: + + >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) + >>> a.prufer_repr + [0, 0] + + If the number of nodes is given, no checking of the nodes will + be performed; it will be assumed that nodes 0 through n - 1 are + present: + + >>> Prufer([[0, 1], [0, 2], [0, 3]], 4) + Prufer([[0, 1], [0, 2], [0, 3]], 4) + + A Prufer object can be constructed from a Prufer sequence: + + >>> b = Prufer([1, 3]) + >>> b.tree_repr + [[0, 1], [1, 3], [2, 3]] + + """ + arg0 = Array(args[0]) if args[0] else Tuple() + args = (arg0,) + tuple(_sympify(arg) for arg in args[1:]) + ret_obj = Basic.__new__(cls, *args, **kw_args) + args = [list(args[0])] + if args[0] and iterable(args[0][0]): + if not args[0][0]: + raise ValueError( + 'Prufer expects at least one edge in the tree.') + if len(args) > 1: + nnodes = args[1] + else: + nodes = set(flatten(args[0])) + nnodes = max(nodes) + 1 + if nnodes != len(nodes): + missing = set(range(nnodes)) - nodes + if len(missing) == 1: + msg = 'Node %s is missing.' % missing.pop() + else: + msg = 'Nodes %s are missing.' % sorted(missing) + raise ValueError(msg) + ret_obj._tree_repr = [list(i) for i in args[0]] + ret_obj._nodes = nnodes + else: + ret_obj._prufer_repr = args[0] + ret_obj._nodes = len(ret_obj._prufer_repr) + 2 + return ret_obj + + def next(self, delta=1): + """Generates the Prufer sequence that is delta beyond the current one. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) + >>> b = a.next(1) # == a.next() + >>> b.tree_repr + [[0, 2], [0, 1], [1, 3]] + >>> b.rank + 1 + + See Also + ======== + + prufer_rank, rank, prev, size + + """ + return Prufer.unrank(self.rank + delta, self.nodes) + + def prev(self, delta=1): + """Generates the Prufer sequence that is -delta before the current one. + + Examples + ======== + + >>> from sympy.combinatorics.prufer import Prufer + >>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]]) + >>> a.rank + 36 + >>> b = a.prev() + >>> b + Prufer([1, 2, 0]) + >>> b.rank + 35 + + See Also + ======== + + prufer_rank, rank, next, size + + """ + return Prufer.unrank(self.rank -delta, self.nodes) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/subsets.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/subsets.py new file mode 100644 index 0000000000000000000000000000000000000000..e540cb2395cb27e04c9d513831cb834a05ec2abd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/subsets.py @@ -0,0 +1,619 @@ +from itertools import combinations + +from sympy.combinatorics.graycode import GrayCode + + +class Subset(): + """ + Represents a basic subset object. + + Explanation + =========== + + We generate subsets using essentially two techniques, + binary enumeration and lexicographic enumeration. + The Subset class takes two arguments, the first one + describes the initial subset to consider and the second + describes the superset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.next_binary().subset + ['b'] + >>> a.prev_binary().subset + ['c'] + """ + + _rank_binary = None + _rank_lex = None + _rank_graycode = None + _subset = None + _superset = None + + def __new__(cls, subset, superset): + """ + Default constructor. + + It takes the ``subset`` and its ``superset`` as its parameters. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.subset + ['c', 'd'] + >>> a.superset + ['a', 'b', 'c', 'd'] + >>> a.size + 2 + """ + if len(subset) > len(superset): + raise ValueError('Invalid arguments have been provided. The ' + 'superset must be larger than the subset.') + for elem in subset: + if elem not in superset: + raise ValueError('The superset provided is invalid as it does ' + 'not contain the element {}'.format(elem)) + obj = object.__new__(cls) + obj._subset = subset + obj._superset = superset + return obj + + def __eq__(self, other): + """Return a boolean indicating whether a == b on the basis of + whether both objects are of the class Subset and if the values + of the subset and superset attributes are the same. + """ + if not isinstance(other, Subset): + return NotImplemented + return self.subset == other.subset and self.superset == other.superset + + def iterate_binary(self, k): + """ + This is a helper function. It iterates over the + binary subsets by ``k`` steps. This variable can be + both positive or negative. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.iterate_binary(-2).subset + ['d'] + >>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd']) + >>> a.iterate_binary(2).subset + [] + + See Also + ======== + + next_binary, prev_binary + """ + bin_list = Subset.bitlist_from_subset(self.subset, self.superset) + n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size + bits = bin(n)[2:].rjust(self.superset_size, '0') + return Subset.subset_from_bitlist(self.superset, bits) + + def next_binary(self): + """ + Generates the next binary ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.next_binary().subset + ['b'] + >>> a = Subset(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.next_binary().subset + [] + + See Also + ======== + + prev_binary, iterate_binary + """ + return self.iterate_binary(1) + + def prev_binary(self): + """ + Generates the previous binary ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([], ['a', 'b', 'c', 'd']) + >>> a.prev_binary().subset + ['a', 'b', 'c', 'd'] + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.prev_binary().subset + ['c'] + + See Also + ======== + + next_binary, iterate_binary + """ + return self.iterate_binary(-1) + + def next_lexicographic(self): + """ + Generates the next lexicographically ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.next_lexicographic().subset + ['d'] + >>> a = Subset(['d'], ['a', 'b', 'c', 'd']) + >>> a.next_lexicographic().subset + [] + + See Also + ======== + + prev_lexicographic + """ + i = self.superset_size - 1 + indices = Subset.subset_indices(self.subset, self.superset) + + if i in indices: + if i - 1 in indices: + indices.remove(i - 1) + else: + indices.remove(i) + i = i - 1 + while i >= 0 and i not in indices: + i = i - 1 + if i >= 0: + indices.remove(i) + indices.append(i+1) + else: + while i not in indices and i >= 0: + i = i - 1 + indices.append(i + 1) + + ret_set = [] + super_set = self.superset + for i in indices: + ret_set.append(super_set[i]) + return Subset(ret_set, super_set) + + def prev_lexicographic(self): + """ + Generates the previous lexicographically ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([], ['a', 'b', 'c', 'd']) + >>> a.prev_lexicographic().subset + ['d'] + >>> a = Subset(['c','d'], ['a', 'b', 'c', 'd']) + >>> a.prev_lexicographic().subset + ['c'] + + See Also + ======== + + next_lexicographic + """ + i = self.superset_size - 1 + indices = Subset.subset_indices(self.subset, self.superset) + + while i >= 0 and i not in indices: + i = i - 1 + + if i == 0 or i - 1 in indices: + indices.remove(i) + else: + if i >= 0: + indices.remove(i) + indices.append(i - 1) + indices.append(self.superset_size - 1) + + ret_set = [] + super_set = self.superset + for i in indices: + ret_set.append(super_set[i]) + return Subset(ret_set, super_set) + + def iterate_graycode(self, k): + """ + Helper function used for prev_gray and next_gray. + It performs ``k`` step overs to get the respective Gray codes. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) + >>> a.iterate_graycode(3).subset + [1, 4] + >>> a.iterate_graycode(-2).subset + [1, 2, 4] + + See Also + ======== + + next_gray, prev_gray + """ + unranked_code = GrayCode.unrank(self.superset_size, + (self.rank_gray + k) % self.cardinality) + return Subset.subset_from_bitlist(self.superset, + unranked_code) + + def next_gray(self): + """ + Generates the next Gray code ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) + >>> a.next_gray().subset + [1, 3] + + See Also + ======== + + iterate_graycode, prev_gray + """ + return self.iterate_graycode(1) + + def prev_gray(self): + """ + Generates the previous Gray code ordered subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([2, 3, 4], [1, 2, 3, 4, 5]) + >>> a.prev_gray().subset + [2, 3, 4, 5] + + See Also + ======== + + iterate_graycode, next_gray + """ + return self.iterate_graycode(-1) + + @property + def rank_binary(self): + """ + Computes the binary ordered rank. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset([], ['a','b','c','d']) + >>> a.rank_binary + 0 + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.rank_binary + 3 + + See Also + ======== + + iterate_binary, unrank_binary + """ + if self._rank_binary is None: + self._rank_binary = int("".join( + Subset.bitlist_from_subset(self.subset, + self.superset)), 2) + return self._rank_binary + + @property + def rank_lexicographic(self): + """ + Computes the lexicographic ranking of the subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.rank_lexicographic + 14 + >>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6]) + >>> a.rank_lexicographic + 43 + """ + if self._rank_lex is None: + def _ranklex(self, subset_index, i, n): + if subset_index == [] or i > n: + return 0 + if i in subset_index: + subset_index.remove(i) + return 1 + _ranklex(self, subset_index, i + 1, n) + return 2**(n - i - 1) + _ranklex(self, subset_index, i + 1, n) + indices = Subset.subset_indices(self.subset, self.superset) + self._rank_lex = _ranklex(self, indices, 0, self.superset_size) + return self._rank_lex + + @property + def rank_gray(self): + """ + Computes the Gray code ranking of the subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c','d'], ['a','b','c','d']) + >>> a.rank_gray + 2 + >>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6]) + >>> a.rank_gray + 27 + + See Also + ======== + + iterate_graycode, unrank_gray + """ + if self._rank_graycode is None: + bits = Subset.bitlist_from_subset(self.subset, self.superset) + self._rank_graycode = GrayCode(len(bits), start=bits).rank + return self._rank_graycode + + @property + def subset(self): + """ + Gets the subset represented by the current instance. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.subset + ['c', 'd'] + + See Also + ======== + + superset, size, superset_size, cardinality + """ + return self._subset + + @property + def size(self): + """ + Gets the size of the subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.size + 2 + + See Also + ======== + + subset, superset, superset_size, cardinality + """ + return len(self.subset) + + @property + def superset(self): + """ + Gets the superset of the subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.superset + ['a', 'b', 'c', 'd'] + + See Also + ======== + + subset, size, superset_size, cardinality + """ + return self._superset + + @property + def superset_size(self): + """ + Returns the size of the superset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.superset_size + 4 + + See Also + ======== + + subset, superset, size, cardinality + """ + return len(self.superset) + + @property + def cardinality(self): + """ + Returns the number of all possible subsets. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + >>> a.cardinality + 16 + + See Also + ======== + + subset, superset, size, superset_size + """ + return 2**(self.superset_size) + + @classmethod + def subset_from_bitlist(self, super_set, bitlist): + """ + Gets the subset defined by the bitlist. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> Subset.subset_from_bitlist(['a', 'b', 'c', 'd'], '0011').subset + ['c', 'd'] + + See Also + ======== + + bitlist_from_subset + """ + if len(super_set) != len(bitlist): + raise ValueError("The sizes of the lists are not equal") + ret_set = [] + for i in range(len(bitlist)): + if bitlist[i] == '1': + ret_set.append(super_set[i]) + return Subset(ret_set, super_set) + + @classmethod + def bitlist_from_subset(self, subset, superset): + """ + Gets the bitlist corresponding to a subset. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> Subset.bitlist_from_subset(['c', 'd'], ['a', 'b', 'c', 'd']) + '0011' + + See Also + ======== + + subset_from_bitlist + """ + bitlist = ['0'] * len(superset) + if isinstance(subset, Subset): + subset = subset.subset + for i in Subset.subset_indices(subset, superset): + bitlist[i] = '1' + return ''.join(bitlist) + + @classmethod + def unrank_binary(self, rank, superset): + """ + Gets the binary ordered subset of the specified rank. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset + ['b'] + + See Also + ======== + + iterate_binary, rank_binary + """ + bits = bin(rank)[2:].rjust(len(superset), '0') + return Subset.subset_from_bitlist(superset, bits) + + @classmethod + def unrank_gray(self, rank, superset): + """ + Gets the Gray code ordered subset of the specified rank. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset + ['a', 'b'] + >>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset + [] + + See Also + ======== + + iterate_graycode, rank_gray + """ + graycode_bitlist = GrayCode.unrank(len(superset), rank) + return Subset.subset_from_bitlist(superset, graycode_bitlist) + + @classmethod + def subset_indices(self, subset, superset): + """Return indices of subset in superset in a list; the list is empty + if all elements of ``subset`` are not in ``superset``. + + Examples + ======== + + >>> from sympy.combinatorics import Subset + >>> superset = [1, 3, 2, 5, 4] + >>> Subset.subset_indices([3, 2, 1], superset) + [1, 2, 0] + >>> Subset.subset_indices([1, 6], superset) + [] + >>> Subset.subset_indices([], superset) + [] + + """ + a, b = superset, subset + sb = set(b) + d = {} + for i, ai in enumerate(a): + if ai in sb: + d[ai] = i + sb.remove(ai) + if not sb: + break + else: + return [] + return [d[bi] for bi in b] + + +def ksubsets(superset, k): + """ + Finds the subsets of size ``k`` in lexicographic order. + + This uses the itertools generator. + + Examples + ======== + + >>> from sympy.combinatorics.subsets import ksubsets + >>> list(ksubsets([1, 2, 3], 2)) + [(1, 2), (1, 3), (2, 3)] + >>> list(ksubsets([1, 2, 3, 4, 5], 2)) + [(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \ + (2, 5), (3, 4), (3, 5), (4, 5)] + + See Also + ======== + + Subset + """ + return combinations(superset, k) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tensor_can.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tensor_can.py new file mode 100644 index 0000000000000000000000000000000000000000..f132a3faffb0d0066a5960efa412e4d8f96db792 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tensor_can.py @@ -0,0 +1,1190 @@ +from sympy.combinatorics.permutations import Permutation, _af_rmul, \ + _af_invert, _af_new +from sympy.combinatorics.perm_groups import PermutationGroup, _orbit, \ + _orbit_transversal +from sympy.combinatorics.util import _distribute_gens_by_base, \ + _orbits_transversals_from_bsgs + +""" + References for tensor canonicalization: + + [1] R. Portugal "Algorithmic simplification of tensor expressions", + J. Phys. A 32 (1999) 7779-7789 + + [2] R. Portugal, B.F. Svaiter "Group-theoretic Approach for Symbolic + Tensor Manipulation: I. Free Indices" + arXiv:math-ph/0107031v1 + + [3] L.R.U. Manssur, R. Portugal "Group-theoretic Approach for Symbolic + Tensor Manipulation: II. Dummy Indices" + arXiv:math-ph/0107032v1 + + [4] xperm.c part of XPerm written by J. M. Martin-Garcia + http://www.xact.es/index.html +""" + + +def dummy_sgs(dummies, sym, n): + """ + Return the strong generators for dummy indices. + + Parameters + ========== + + dummies : List of dummy indices. + `dummies[2k], dummies[2k+1]` are paired indices. + In base form, the dummy indices are always in + consecutive positions. + sym : symmetry under interchange of contracted dummies:: + * None no symmetry + * 0 commuting + * 1 anticommuting + + n : number of indices + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import dummy_sgs + >>> dummy_sgs(list(range(2, 8)), 0, 8) + [[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9], + [0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9], + [0, 1, 2, 3, 6, 7, 4, 5, 8, 9]] + """ + if len(dummies) > n: + raise ValueError("List too large") + res = [] + # exchange of contravariant and covariant indices + if sym is not None: + for j in dummies[::2]: + a = list(range(n + 2)) + if sym == 1: + a[n] = n + 1 + a[n + 1] = n + a[j], a[j + 1] = a[j + 1], a[j] + res.append(a) + # rename dummy indices + for j in dummies[:-3:2]: + a = list(range(n + 2)) + a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1] + res.append(a) + return res + + +def _min_dummies(dummies, sym, indices): + """ + Return list of minima of the orbits of indices in group of dummies. + See ``double_coset_can_rep`` for the description of ``dummies`` and ``sym``. + ``indices`` is the initial list of dummy indices. + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import _min_dummies + >>> _min_dummies([list(range(2, 8))], [0], list(range(10))) + [0, 1, 2, 2, 2, 2, 2, 2, 8, 9] + """ + num_types = len(sym) + m = [min(dx) if dx else None for dx in dummies] + res = indices[:] + for i in range(num_types): + for c, i in enumerate(indices): + for j in range(num_types): + if i in dummies[j]: + res[c] = m[j] + break + return res + + +def _trace_S(s, j, b, S_cosets): + """ + Return the representative h satisfying s[h[b]] == j + + If there is not such a representative return None + """ + for h in S_cosets[b]: + if s[h[b]] == j: + return h + return None + + +def _trace_D(gj, p_i, Dxtrav): + """ + Return the representative h satisfying h[gj] == p_i + + If there is not such a representative return None + """ + for h in Dxtrav: + if h[gj] == p_i: + return h + return None + + +def _dumx_remove(dumx, dumx_flat, p0): + """ + remove p0 from dumx + """ + res = [] + for dx in dumx: + if p0 not in dx: + res.append(dx) + continue + k = dx.index(p0) + if k % 2 == 0: + p0_paired = dx[k + 1] + else: + p0_paired = dx[k - 1] + dx.remove(p0) + dx.remove(p0_paired) + dumx_flat.remove(p0) + dumx_flat.remove(p0_paired) + res.append(dx) + + +def transversal2coset(size, base, transversal): + a = [] + j = 0 + for i in range(size): + if i in base: + a.append(sorted(transversal[j].values())) + j += 1 + else: + a.append([list(range(size))]) + j = len(a) - 1 + while a[j] == [list(range(size))]: + j -= 1 + return a[:j + 1] + + +def double_coset_can_rep(dummies, sym, b_S, sgens, S_transversals, g): + r""" + Butler-Portugal algorithm for tensor canonicalization with dummy indices. + + Parameters + ========== + + dummies + list of lists of dummy indices, + one list for each type of index; + the dummy indices are put in order contravariant, covariant + [d0, -d0, d1, -d1, ...]. + + sym + list of the symmetries of the index metric for each type. + + possible symmetries of the metrics + * 0 symmetric + * 1 antisymmetric + * None no symmetry + + b_S + base of a minimal slot symmetry BSGS. + + sgens + generators of the slot symmetry BSGS. + + S_transversals + transversals for the slot BSGS. + + g + permutation representing the tensor. + + Returns + ======= + + Return 0 if the tensor is zero, else return the array form of + the permutation representing the canonical form of the tensor. + + Notes + ===== + + A tensor with dummy indices can be represented in a number + of equivalent ways which typically grows exponentially with + the number of indices. To be able to establish if two tensors + with many indices are equal becomes computationally very slow + in absence of an efficient algorithm. + + The Butler-Portugal algorithm [3] is an efficient algorithm to + put tensors in canonical form, solving the above problem. + + Portugal observed that a tensor can be represented by a permutation, + and that the class of tensors equivalent to it under slot and dummy + symmetries is equivalent to the double coset `D*g*S` + (Note: in this documentation we use the conventions for multiplication + of permutations p, q with (p*q)(i) = p[q[i]] which is opposite + to the one used in the Permutation class) + + Using the algorithm by Butler to find a representative of the + double coset one can find a canonical form for the tensor. + + To see this correspondence, + let `g` be a permutation in array form; a tensor with indices `ind` + (the indices including both the contravariant and the covariant ones) + can be written as + + `t = T(ind[g[0]], \dots, ind[g[n-1]])`, + + where `n = len(ind)`; + `g` has size `n + 2`, the last two indices for the sign of the tensor + (trick introduced in [4]). + + A slot symmetry transformation `s` is a permutation acting on the slots + `t \rightarrow T(ind[(g*s)[0]], \dots, ind[(g*s)[n-1]])` + + A dummy symmetry transformation acts on `ind` + `t \rightarrow T(ind[(d*g)[0]], \dots, ind[(d*g)[n-1]])` + + Being interested only in the transformations of the tensor under + these symmetries, one can represent the tensor by `g`, which transforms + as + + `g -> d*g*s`, so it belongs to the coset `D*g*S`, or in other words + to the set of all permutations allowed by the slot and dummy symmetries. + + Let us explain the conventions by an example. + + Given a tensor `T^{d3 d2 d1}{}_{d1 d2 d3}` with the slot symmetries + `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}` + + `T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}` + + and symmetric metric, find the tensor equivalent to it which + is the lowest under the ordering of indices: + lexicographic ordering `d1, d2, d3` and then contravariant + before covariant index; that is the canonical form of the tensor. + + The canonical form is `-T^{d1 d2 d3}{}_{d1 d2 d3}` + obtained using `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`. + + To convert this problem in the input for this function, + use the following ordering of the index names + (- for covariant for short) `d1, -d1, d2, -d2, d3, -d3` + + `T^{d3 d2 d1}{}_{d1 d2 d3}` corresponds to `g = [4, 2, 0, 1, 3, 5, 6, 7]` + where the last two indices are for the sign + + `sgens = [Permutation(0, 2)(6, 7), Permutation(0, 4)(6, 7)]` + + sgens[0] is the slot symmetry `-(0, 2)` + `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}` + + sgens[1] is the slot symmetry `-(0, 4)` + `T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}` + + The dummy symmetry group D is generated by the strong base generators + `[(0, 1), (2, 3), (4, 5), (0, 2)(1, 3), (0, 4)(1, 5)]` + where the first three interchange covariant and contravariant + positions of the same index (d1 <-> -d1) and the last two interchange + the dummy indices themselves (d1 <-> d2). + + The dummy symmetry acts from the left + `d = [1, 0, 2, 3, 4, 5, 6, 7]` exchange `d1 \leftrightarrow -d1` + `T^{d3 d2 d1}{}_{d1 d2 d3} == T^{d3 d2}{}_{d1}{}^{d1}{}_{d2 d3}` + + `g=[4, 2, 0, 1, 3, 5, 6, 7] -> [4, 2, 1, 0, 3, 5, 6, 7] = _af_rmul(d, g)` + which differs from `_af_rmul(g, d)`. + + The slot symmetry acts from the right + `s = [2, 1, 0, 3, 4, 5, 7, 6]` exchanges slots 0 and 2 and changes sign + `T^{d3 d2 d1}{}_{d1 d2 d3} == -T^{d1 d2 d3}{}_{d1 d2 d3}` + + `g=[4,2,0,1,3,5,6,7] -> [0, 2, 4, 1, 3, 5, 7, 6] = _af_rmul(g, s)` + + Example in which the tensor is zero, same slot symmetries as above: + `T^{d2}{}_{d1 d3}{}^{d1 d3}{}_{d2}` + + `= -T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,4)`; + + `= T_{d3 d1}{}^{d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,2)`; + + `= T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` symmetric metric; + + `= 0` since two of these lines have tensors differ only for the sign. + + The double coset D*g*S consists of permutations `h = d*g*s` corresponding + to equivalent tensors; if there are two `h` which are the same apart + from the sign, return zero; otherwise + choose as representative the tensor with indices + ordered lexicographically according to `[d1, -d1, d2, -d2, d3, -d3]` + that is ``rep = min(D*g*S) = min([d*g*s for d in D for s in S])`` + + The indices are fixed one by one; first choose the lowest index + for slot 0, then the lowest remaining index for slot 1, etc. + Doing this one obtains a chain of stabilizers + + `S \rightarrow S_{b0} \rightarrow S_{b0,b1} \rightarrow \dots` and + `D \rightarrow D_{p0} \rightarrow D_{p0,p1} \rightarrow \dots` + + where ``[b0, b1, ...] = range(b)`` is a base of the symmetric group; + the strong base `b_S` of S is an ordered sublist of it; + therefore it is sufficient to compute once the + strong base generators of S using the Schreier-Sims algorithm; + the stabilizers of the strong base generators are the + strong base generators of the stabilizer subgroup. + + ``dbase = [p0, p1, ...]`` is not in general in lexicographic order, + so that one must recompute the strong base generators each time; + however this is trivial, there is no need to use the Schreier-Sims + algorithm for D. + + The algorithm keeps a TAB of elements `(s_i, d_i, h_i)` + where `h_i = d_i \times g \times s_i` satisfying `h_i[j] = p_j` for `0 \le j < i` + starting from `s_0 = id, d_0 = id, h_0 = g`. + + The equations `h_0[0] = p_0, h_1[1] = p_1, \dots` are solved in this order, + choosing each time the lowest possible value of p_i + + For `j < i` + `d_i*g*s_i*S_{b_0, \dots, b_{i-1}}*b_j = D_{p_0, \dots, p_{i-1}}*p_j` + so that for dx in `D_{p_0,\dots,p_{i-1}}` and sx in + `S_{base[0], \dots, base[i-1]}` one has `dx*d_i*g*s_i*sx*b_j = p_j` + + Search for dx, sx such that this equation holds for `j = i`; + it can be written as `s_i*sx*b_j = J, dx*d_i*g*J = p_j` + `sx*b_j = s_i**-1*J; sx = trace(s_i**-1, S_{b_0,...,b_{i-1}})` + `dx**-1*p_j = d_i*g*J; dx = trace(d_i*g*J, D_{p_0,...,p_{i-1}})` + + `s_{i+1} = s_i*trace(s_i**-1*J, S_{b_0,...,b_{i-1}})` + `d_{i+1} = trace(d_i*g*J, D_{p_0,...,p_{i-1}})**-1*d_i` + `h_{i+1}*b_i = d_{i+1}*g*s_{i+1}*b_i = p_i` + + `h_n*b_j = p_j` for all j, so that `h_n` is the solution. + + Add the found `(s, d, h)` to TAB1. + + At the end of the iteration sort TAB1 with respect to the `h`; + if there are two consecutive `h` in TAB1 which differ only for the + sign, the tensor is zero, so return 0; + if there are two consecutive `h` which are equal, keep only one. + + Then stabilize the slot generators under `i` and the dummy generators + under `p_i`. + + Assign `TAB = TAB1` at the end of the iteration step. + + At the end `TAB` contains a unique `(s, d, h)`, since all the slots + of the tensor `h` have been fixed to have the minimum value according + to the symmetries. The algorithm returns `h`. + + It is important that the slot BSGS has lexicographic minimal base, + otherwise there is an `i` which does not belong to the slot base + for which `p_i` is fixed by the dummy symmetry only, while `i` + is not invariant from the slot stabilizer, so `p_i` is not in + general the minimal value. + + This algorithm differs slightly from the original algorithm [3]: + the canonical form is minimal lexicographically, and + the BSGS has minimal base under lexicographic order. + Equal tensors `h` are eliminated from TAB. + + + Examples + ======== + + >>> from sympy.combinatorics.permutations import Permutation + >>> from sympy.combinatorics.tensor_can import double_coset_can_rep, get_transversals + >>> gens = [Permutation(x) for x in [[2, 1, 0, 3, 4, 5, 7, 6], [4, 1, 2, 3, 0, 5, 7, 6]]] + >>> base = [0, 2] + >>> g = Permutation([4, 2, 0, 1, 3, 5, 6, 7]) + >>> transversals = get_transversals(base, gens) + >>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g) + [0, 1, 2, 3, 4, 5, 7, 6] + + >>> g = Permutation([4, 1, 3, 0, 5, 2, 6, 7]) + >>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g) + 0 + """ + size = g.size + g = g.array_form + num_dummies = size - 2 + indices = list(range(num_dummies)) + all_metrics_with_sym = not any(_ is None for _ in sym) + num_types = len(sym) + dumx = dummies[:] + dumx_flat = [] + for dx in dumx: + dumx_flat.extend(dx) + b_S = b_S[:] + sgensx = [h._array_form for h in sgens] + if b_S: + S_transversals = transversal2coset(size, b_S, S_transversals) + # strong generating set for D + dsgsx = [] + for i in range(num_types): + dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies)) + idn = list(range(size)) + # TAB = list of entries (s, d, h) where h = _af_rmuln(d,g,s) + # for short, in the following d*g*s means _af_rmuln(d,g,s) + TAB = [(idn, idn, g)] + for i in range(size - 2): + b = i + testb = b in b_S and sgensx + if testb: + sgensx1 = [_af_new(_) for _ in sgensx] + deltab = _orbit(size, sgensx1, b) + else: + deltab = {b} + # p1 = min(IMAGES) = min(Union D_p*h*deltab for h in TAB) + if all_metrics_with_sym: + md = _min_dummies(dumx, sym, indices) + else: + md = [min(_orbit(size, [_af_new( + ddx) for ddx in dsgsx], ii)) for ii in range(size - 2)] + + p_i = min([min([md[h[x]] for x in deltab]) for s, d, h in TAB]) + dsgsx1 = [_af_new(_) for _ in dsgsx] + Dxtrav = _orbit_transversal(size, dsgsx1, p_i, False, af=True) \ + if dsgsx else None + if Dxtrav: + Dxtrav = [_af_invert(x) for x in Dxtrav] + # compute the orbit of p_i + for ii in range(num_types): + if p_i in dumx[ii]: + # the orbit is made by all the indices in dum[ii] + if sym[ii] is not None: + deltap = dumx[ii] + else: + # the orbit is made by all the even indices if p_i + # is even, by all the odd indices if p_i is odd + p_i_index = dumx[ii].index(p_i) % 2 + deltap = dumx[ii][p_i_index::2] + break + else: + deltap = [p_i] + TAB1 = [] + while TAB: + s, d, h = TAB.pop() + if min([md[h[x]] for x in deltab]) != p_i: + continue + deltab1 = [x for x in deltab if md[h[x]] == p_i] + # NEXT = s*deltab1 intersection (d*g)**-1*deltap + dg = _af_rmul(d, g) + dginv = _af_invert(dg) + sdeltab = [s[x] for x in deltab1] + gdeltap = [dginv[x] for x in deltap] + NEXT = [x for x in sdeltab if x in gdeltap] + # d, s satisfy + # d*g*s*base[i-1] = p_{i-1}; using the stabilizers + # d*g*s*S_{base[0],...,base[i-1]}*base[i-1] = + # D_{p_0,...,p_{i-1}}*p_{i-1} + # so that to find d1, s1 satisfying d1*g*s1*b = p_i + # one can look for dx in D_{p_0,...,p_{i-1}} and + # sx in S_{base[0],...,base[i-1]} + # d1 = dx*d; s1 = s*sx + # d1*g*s1*b = dx*d*g*s*sx*b = p_i + for j in NEXT: + if testb: + # solve s1*b = j with s1 = s*sx for some element sx + # of the stabilizer of ..., base[i-1] + # sx*b = s**-1*j; sx = _trace_S(s, j,...) + # s1 = s*trace_S(s**-1*j,...) + s1 = _trace_S(s, j, b, S_transversals) + if not s1: + continue + else: + s1 = [s[ix] for ix in s1] + else: + s1 = s + # assert s1[b] == j # invariant + # solve d1*g*j = p_i with d1 = dx*d for some element dg + # of the stabilizer of ..., p_{i-1} + # dx**-1*p_i = d*g*j; dx**-1 = trace_D(d*g*j,...) + # d1 = trace_D(d*g*j,...)**-1*d + # to save an inversion in the inner loop; notice we did + # Dxtrav = [perm_af_invert(x) for x in Dxtrav] out of the loop + if Dxtrav: + d1 = _trace_D(dg[j], p_i, Dxtrav) + if not d1: + continue + else: + if p_i != dg[j]: + continue + d1 = idn + assert d1[dg[j]] == p_i # invariant + d1 = [d1[ix] for ix in d] + h1 = [d1[g[ix]] for ix in s1] + # assert h1[b] == p_i # invariant + TAB1.append((s1, d1, h1)) + + # if TAB contains equal permutations, keep only one of them; + # if TAB contains equal permutations up to the sign, return 0 + TAB1.sort(key=lambda x: x[-1]) + prev = [0] * size + while TAB1: + s, d, h = TAB1.pop() + if h[:-2] == prev[:-2]: + if h[-1] != prev[-1]: + return 0 + else: + TAB.append((s, d, h)) + prev = h + + # stabilize the SGS + sgensx = [h for h in sgensx if h[b] == b] + if b in b_S: + b_S.remove(b) + _dumx_remove(dumx, dumx_flat, p_i) + dsgsx = [] + for i in range(num_types): + dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies)) + return TAB[0][-1] + + +def canonical_free(base, gens, g, num_free): + """ + Canonicalization of a tensor with respect to free indices + choosing the minimum with respect to lexicographical ordering + in the free indices. + + Explanation + =========== + + ``base``, ``gens`` BSGS for slot permutation group + ``g`` permutation representing the tensor + ``num_free`` number of free indices + The indices must be ordered with first the free indices + + See explanation in double_coset_can_rep + The algorithm is a variation of the one given in [2]. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy.combinatorics.tensor_can import canonical_free + >>> gens = [[1, 0, 2, 3, 5, 4], [2, 3, 0, 1, 4, 5],[0, 1, 3, 2, 5, 4]] + >>> gens = [Permutation(h) for h in gens] + >>> base = [0, 2] + >>> g = Permutation([2, 1, 0, 3, 4, 5]) + >>> canonical_free(base, gens, g, 4) + [0, 3, 1, 2, 5, 4] + + Consider the product of Riemann tensors + ``T = R^{a}_{d0}^{d1,d2}*R_{d2,d1}^{d0,b}`` + The order of the indices is ``[a, b, d0, -d0, d1, -d1, d2, -d2]`` + The permutation corresponding to the tensor is + ``g = [0, 3, 4, 6, 7, 5, 2, 1, 8, 9]`` + + In particular ``a`` is position ``0``, ``b`` is in position ``9``. + Use the slot symmetries to get `T` is a form which is the minimal + in lexicographic order in the free indices ``a`` and ``b``, e.g. + ``-R^{a}_{d0}^{d1,d2}*R^{b,d0}_{d2,d1}`` corresponding to + ``[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]`` + + >>> from sympy.combinatorics.tensor_can import riemann_bsgs, tensor_gens + >>> base, gens = riemann_bsgs + >>> size, sbase, sgens = tensor_gens(base, gens, [[], []], 0) + >>> g = Permutation([0, 3, 4, 6, 7, 5, 2, 1, 8, 9]) + >>> canonical_free(sbase, [Permutation(h) for h in sgens], g, 2) + [0, 3, 4, 6, 1, 2, 7, 5, 9, 8] + """ + g = g.array_form + size = len(g) + if not base: + return g[:] + + transversals = get_transversals(base, gens) + for x in sorted(g[:-2]): + if x not in base: + base.append(x) + h = g + for i, transv in enumerate(transversals): + h_i = [size]*num_free + # find the element s in transversals[i] such that + # _af_rmul(h, s) has its free elements with the lowest position in h + s = None + for sk in transv.values(): + h1 = _af_rmul(h, sk) + hi = [h1.index(ix) for ix in range(num_free)] + if hi < h_i: + h_i = hi + s = sk + if s: + h = _af_rmul(h, s) + return h + + +def _get_map_slots(size, fixed_slots): + res = list(range(size)) + pos = 0 + for i in range(size): + if i in fixed_slots: + continue + res[i] = pos + pos += 1 + return res + + +def _lift_sgens(size, fixed_slots, free, s): + a = [] + j = k = 0 + fd = list(zip(fixed_slots, free)) + fd = [y for x, y in sorted(fd)] + num_free = len(free) + for i in range(size): + if i in fixed_slots: + a.append(fd[k]) + k += 1 + else: + a.append(s[j] + num_free) + j += 1 + return a + + +def canonicalize(g, dummies, msym, *v): + """ + canonicalize tensor formed by tensors + + Parameters + ========== + + g : permutation representing the tensor + + dummies : list representing the dummy indices + it can be a list of dummy indices of the same type + or a list of lists of dummy indices, one list for each + type of index; + the dummy indices must come after the free indices, + and put in order contravariant, covariant + [d0, -d0, d1,-d1,...] + + msym : symmetry of the metric(s) + it can be an integer or a list; + in the first case it is the symmetry of the dummy index metric; + in the second case it is the list of the symmetries of the + index metric for each type + + v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i` + + base_i, gens_i : BSGS for tensors of this type. + The BSGS should have minimal base under lexicographic ordering; + if not, an attempt is made do get the minimal BSGS; + in case of failure, + canonicalize_naive is used, which is much slower. + + n_i : number of tensors of type `i`. + + sym_i : symmetry under exchange of component tensors of type `i`. + + Both for msym and sym_i the cases are + * None no symmetry + * 0 commuting + * 1 anticommuting + + Returns + ======= + + 0 if the tensor is zero, else return the array form of + the permutation representing the canonical form of the tensor. + + Algorithm + ========= + + First one uses canonical_free to get the minimum tensor under + lexicographic order, using only the slot symmetries. + If the component tensors have not minimal BSGS, it is attempted + to find it; if the attempt fails canonicalize_naive + is used instead. + + Compute the residual slot symmetry keeping fixed the free indices + using tensor_gens(base, gens, list_free_indices, sym). + + Reduce the problem eliminating the free indices. + + Then use double_coset_can_rep and lift back the result reintroducing + the free indices. + + Examples + ======== + + one type of index with commuting metric; + + `A_{a b}` and `B_{a b}` antisymmetric and commuting + + `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}` + + `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices + + g = [1, 3, 0, 5, 4, 2, 6, 7] + + `T_c = 0` + + >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product + >>> from sympy.combinatorics import Permutation + >>> base2a, gens2a = get_symmetric_group_sgs(2, 1) + >>> t0 = (base2a, gens2a, 1, 0) + >>> t1 = (base2a, gens2a, 2, 0) + >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7]) + >>> canonicalize(g, range(6), 0, t0, t1) + 0 + + same as above, but with `B_{a b}` anticommuting + + `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}` + + can = [0,2,1,4,3,5,7,6] + + >>> t1 = (base2a, gens2a, 2, 1) + >>> canonicalize(g, range(6), 0, t0, t1) + [0, 2, 1, 4, 3, 5, 7, 6] + + two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order, + both with commuting metric + + `f^{a b c}` antisymmetric, commuting + + `A_{m a}` no symmetry, commuting + + `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}` + + ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n] + + g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15] + + The canonical tensor is + `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}` + + can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14] + + >>> base_f, gens_f = get_symmetric_group_sgs(3, 1) + >>> base1, gens1 = get_symmetric_group_sgs(1) + >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1) + >>> t0 = (base_f, gens_f, 2, 0) + >>> t1 = (base_A, gens_A, 4, 0) + >>> dummies = [range(2, 10), range(10, 14)] + >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15]) + >>> canonicalize(g, dummies, [0, 0], t0, t1) + [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14] + """ + from sympy.combinatorics.testutil import canonicalize_naive + if not isinstance(msym, list): + if msym not in (0, 1, None): + raise ValueError('msym must be 0, 1 or None') + num_types = 1 + else: + num_types = len(msym) + if not all(msymx in (0, 1, None) for msymx in msym): + raise ValueError('msym entries must be 0, 1 or None') + if len(dummies) != num_types: + raise ValueError( + 'dummies and msym must have the same number of elements') + size = g.size + num_tensors = 0 + v1 = [] + for base_i, gens_i, n_i, sym_i in v: + # check that the BSGS is minimal; + # this property is used in double_coset_can_rep; + # if it is not minimal use canonicalize_naive + if not _is_minimal_bsgs(base_i, gens_i): + mbsgs = get_minimal_bsgs(base_i, gens_i) + if not mbsgs: + can = canonicalize_naive(g, dummies, msym, *v) + return can + base_i, gens_i = mbsgs + v1.append((base_i, gens_i, [[]] * n_i, sym_i)) + num_tensors += n_i + + if num_types == 1 and not isinstance(msym, list): + dummies = [dummies] + msym = [msym] + flat_dummies = [] + for dumx in dummies: + flat_dummies.extend(dumx) + + if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)): + raise ValueError('dummies is not valid') + + # slot symmetry of the tensor + size1, sbase, sgens = gens_products(*v1) + if size != size1: + raise ValueError( + 'g has size %d, generators have size %d' % (size, size1)) + free = [i for i in range(size - 2) if i not in flat_dummies] + num_free = len(free) + + # g1 minimal tensor under slot symmetry + g1 = canonical_free(sbase, sgens, g, num_free) + if not flat_dummies: + return g1 + # save the sign of g1 + sign = 0 if g1[-1] == size - 1 else 1 + + # the free indices are kept fixed. + # Determine free_i, the list of slots of tensors which are fixed + # since they are occupied by free indices, which are fixed. + start = 0 + for i, (base_i, gens_i, n_i, sym_i) in enumerate(v): + free_i = [] + len_tens = gens_i[0].size - 2 + # for each component tensor get a list od fixed islots + for j in range(n_i): + # get the elements corresponding to the component tensor + h = g1[start:(start + len_tens)] + fr = [] + # get the positions of the fixed elements in h + for k in free: + if k in h: + fr.append(h.index(k)) + free_i.append(fr) + start += len_tens + v1[i] = (base_i, gens_i, free_i, sym_i) + # BSGS of the tensor with fixed free indices + # if tensor_gens fails in gens_product, use canonicalize_naive + size, sbase, sgens = gens_products(*v1) + + # reduce the permutations getting rid of the free indices + pos_free = [g1.index(x) for x in range(num_free)] + size_red = size - num_free + g1_red = [x - num_free for x in g1 if x in flat_dummies] + if sign: + g1_red.extend([size_red - 1, size_red - 2]) + else: + g1_red.extend([size_red - 2, size_red - 1]) + map_slots = _get_map_slots(size, pos_free) + sbase_red = [map_slots[i] for i in sbase if i not in pos_free] + sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens] + dummies_red = [[x - num_free for x in y] for y in dummies] + transv_red = get_transversals(sbase_red, sgens_red) + g1_red = _af_new(g1_red) + g2 = double_coset_can_rep( + dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red) + if g2 == 0: + return 0 + # lift to the case with the free indices + g3 = _lift_sgens(size, pos_free, free, g2) + return g3 + + +def perm_af_direct_product(gens1, gens2, signed=True): + """ + Direct products of the generators gens1 and gens2. + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import perm_af_direct_product + >>> gens1 = [[1, 0, 2, 3], [0, 1, 3, 2]] + >>> gens2 = [[1, 0]] + >>> perm_af_direct_product(gens1, gens2, False) + [[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]] + >>> gens1 = [[1, 0, 2, 3, 5, 4], [0, 1, 3, 2, 4, 5]] + >>> gens2 = [[1, 0, 2, 3]] + >>> perm_af_direct_product(gens1, gens2, True) + [[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]] + """ + gens1 = [list(x) for x in gens1] + gens2 = [list(x) for x in gens2] + s = 2 if signed else 0 + n1 = len(gens1[0]) - s + n2 = len(gens2[0]) - s + start = list(range(n1)) + end = list(range(n1, n1 + n2)) + if signed: + gens1 = [gen[:-2] + end + [gen[-2] + n2, gen[-1] + n2] + for gen in gens1] + gens2 = [start + [x + n1 for x in gen] for gen in gens2] + else: + gens1 = [gen + end for gen in gens1] + gens2 = [start + [x + n1 for x in gen] for gen in gens2] + + res = gens1 + gens2 + + return res + + +def bsgs_direct_product(base1, gens1, base2, gens2, signed=True): + """ + Direct product of two BSGS. + + Parameters + ========== + + base1 : base of the first BSGS. + + gens1 : strong generating sequence of the first BSGS. + + base2, gens2 : similarly for the second BSGS. + + signed : flag for signed permutations. + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import (get_symmetric_group_sgs, bsgs_direct_product) + >>> base1, gens1 = get_symmetric_group_sgs(1) + >>> base2, gens2 = get_symmetric_group_sgs(2) + >>> bsgs_direct_product(base1, gens1, base2, gens2) + ([1], [(4)(1 2)]) + """ + s = 2 if signed else 0 + n1 = gens1[0].size - s + base = list(base1) + base += [x + n1 for x in base2] + gens1 = [h._array_form for h in gens1] + gens2 = [h._array_form for h in gens2] + gens = perm_af_direct_product(gens1, gens2, signed) + size = len(gens[0]) + id_af = list(range(size)) + gens = [h for h in gens if h != id_af] + if not gens: + gens = [id_af] + return base, [_af_new(h) for h in gens] + + +def get_symmetric_group_sgs(n, antisym=False): + """ + Return base, gens of the minimal BSGS for (anti)symmetric tensor + + Parameters + ========== + + n : rank of the tensor + antisym : bool + ``antisym = False`` symmetric tensor + ``antisym = True`` antisymmetric tensor + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs + >>> get_symmetric_group_sgs(3) + ([0, 1], [(4)(0 1), (4)(1 2)]) + """ + if n == 1: + return [], [_af_new(list(range(3)))] + gens = [Permutation(n - 1)(i, i + 1)._array_form for i in range(n - 1)] + if antisym == 0: + gens = [x + [n, n + 1] for x in gens] + else: + gens = [x + [n + 1, n] for x in gens] + base = list(range(n - 1)) + return base, [_af_new(h) for h in gens] + +riemann_bsgs = [0, 2], [Permutation(0, 1)(4, 5), Permutation(2, 3)(4, 5), + Permutation(5)(0, 2)(1, 3)] + + +def get_transversals(base, gens): + """ + Return transversals for the group with BSGS base, gens + """ + if not base: + return [] + stabs = _distribute_gens_by_base(base, gens) + orbits, transversals = _orbits_transversals_from_bsgs(base, stabs) + transversals = [{x: h._array_form for x, h in y.items()} for y in + transversals] + return transversals + + +def _is_minimal_bsgs(base, gens): + """ + Check if the BSGS has minimal base under lexigographic order. + + base, gens BSGS + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy.combinatorics.tensor_can import riemann_bsgs, _is_minimal_bsgs + >>> _is_minimal_bsgs(*riemann_bsgs) + True + >>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)])) + >>> _is_minimal_bsgs(*riemann_bsgs1) + False + """ + base1 = [] + sgs1 = gens[:] + size = gens[0].size + for i in range(size): + if not all(h._array_form[i] == i for h in sgs1): + base1.append(i) + sgs1 = [h for h in sgs1 if h._array_form[i] == i] + return base1 == base + + +def get_minimal_bsgs(base, gens): + """ + Compute a minimal GSGS + + base, gens BSGS + + If base, gens is a minimal BSGS return it; else return a minimal BSGS + if it fails in finding one, it returns None + + TODO: use baseswap in the case in which if it fails in finding a + minimal BSGS + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy.combinatorics.tensor_can import get_minimal_bsgs + >>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)])) + >>> get_minimal_bsgs(*riemann_bsgs1) + ([0, 2], [(0 1)(4 5), (5)(0 2)(1 3), (2 3)(4 5)]) + """ + G = PermutationGroup(gens) + base, gens = G.schreier_sims_incremental() + if not _is_minimal_bsgs(base, gens): + return None + return base, gens + + +def tensor_gens(base, gens, list_free_indices, sym=0): + """ + Returns size, res_base, res_gens BSGS for n tensors of the + same type. + + Explanation + =========== + + base, gens BSGS for tensors of this type + list_free_indices list of the slots occupied by fixed indices + for each of the tensors + + sym symmetry under commutation of two tensors + sym None no symmetry + sym 0 commuting + sym 1 anticommuting + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import tensor_gens, get_symmetric_group_sgs + + two symmetric tensors with 3 indices without free indices + + >>> base, gens = get_symmetric_group_sgs(3) + >>> tensor_gens(base, gens, [[], []]) + (8, [0, 1, 3, 4], [(7)(0 1), (7)(1 2), (7)(3 4), (7)(4 5), (7)(0 3)(1 4)(2 5)]) + + two symmetric tensors with 3 indices with free indices in slot 1 and 0 + + >>> tensor_gens(base, gens, [[1], [0]]) + (8, [0, 4], [(7)(0 2), (7)(4 5)]) + + four symmetric tensors with 3 indices, two of which with free indices + + """ + def _get_bsgs(G, base, gens, free_indices): + """ + return the BSGS for G.pointwise_stabilizer(free_indices) + """ + if not free_indices: + return base[:], gens[:] + else: + H = G.pointwise_stabilizer(free_indices) + base, sgs = H.schreier_sims_incremental() + return base, sgs + + # if not base there is no slot symmetry for the component tensors + # if list_free_indices.count([]) < 2 there is no commutation symmetry + # so there is no resulting slot symmetry + if not base and list_free_indices.count([]) < 2: + n = len(list_free_indices) + size = gens[0].size + size = n * (size - 2) + 2 + return size, [], [_af_new(list(range(size)))] + + # if any(list_free_indices) one needs to compute the pointwise + # stabilizer, so G is needed + if any(list_free_indices): + G = PermutationGroup(gens) + else: + G = None + + # no_free list of lists of indices for component tensors without fixed + # indices + no_free = [] + size = gens[0].size + id_af = list(range(size)) + num_indices = size - 2 + if not list_free_indices[0]: + no_free.append(list(range(num_indices))) + res_base, res_gens = _get_bsgs(G, base, gens, list_free_indices[0]) + for i in range(1, len(list_free_indices)): + base1, gens1 = _get_bsgs(G, base, gens, list_free_indices[i]) + res_base, res_gens = bsgs_direct_product(res_base, res_gens, + base1, gens1, 1) + if not list_free_indices[i]: + no_free.append(list(range(size - 2, size - 2 + num_indices))) + size += num_indices + nr = size - 2 + res_gens = [h for h in res_gens if h._array_form != id_af] + # if sym there are no commuting tensors stop here + if sym is None or not no_free: + if not res_gens: + res_gens = [_af_new(id_af)] + return size, res_base, res_gens + + # if the component tensors have moinimal BSGS, so is their direct + # product P; the slot symmetry group is S = P*C, where C is the group + # to (anti)commute the component tensors with no free indices + # a stabilizer has the property S_i = P_i*C_i; + # the BSGS of P*C has SGS_P + SGS_C and the base is + # the ordered union of the bases of P and C. + # If P has minimal BSGS, so has S with this base. + base_comm = [] + for i in range(len(no_free) - 1): + ind1 = no_free[i] + ind2 = no_free[i + 1] + a = list(range(ind1[0])) + a.extend(ind2) + a.extend(ind1) + base_comm.append(ind1[0]) + a.extend(list(range(ind2[-1] + 1, nr))) + if sym == 0: + a.extend([nr, nr + 1]) + else: + a.extend([nr + 1, nr]) + res_gens.append(_af_new(a)) + res_base = list(res_base) + # each base is ordered; order the union of the two bases + for i in base_comm: + if i not in res_base: + res_base.append(i) + res_base.sort() + if not res_gens: + res_gens = [_af_new(id_af)] + + return size, res_base, res_gens + + +def gens_products(*v): + """ + Returns size, res_base, res_gens BSGS for n tensors of different types. + + Explanation + =========== + + v is a sequence of (base_i, gens_i, free_i, sym_i) + where + base_i, gens_i BSGS of tensor of type `i` + free_i list of the fixed slots for each of the tensors + of type `i`; if there are `n_i` tensors of type `i` + and none of them have fixed slots, `free = [[]]*n_i` + sym 0 (1) if the tensors of type `i` (anti)commute among themselves + + Examples + ======== + + >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, gens_products + >>> base, gens = get_symmetric_group_sgs(2) + >>> gens_products((base, gens, [[], []], 0)) + (6, [0, 2], [(5)(0 1), (5)(2 3), (5)(0 2)(1 3)]) + >>> gens_products((base, gens, [[1], []], 0)) + (6, [2], [(5)(2 3)]) + """ + res_size, res_base, res_gens = tensor_gens(*v[0]) + for i in range(1, len(v)): + size, base, gens = tensor_gens(*v[i]) + res_base, res_gens = bsgs_direct_product(res_base, res_gens, base, + gens, 1) + res_size = res_gens[0].size + id_af = list(range(res_size)) + res_gens = [h for h in res_gens if h != id_af] + if not res_gens: + res_gens = [id_af] + return res_size, res_base, res_gens diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be0dd9b1fee23e13add63fafd309d29e1c5b4532 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_coset_table.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_coset_table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103832a69f3551f56b603ec690aed0714e79ff01 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_coset_table.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_fp_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_fp_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae220cd7849df1eb7a097dae47717d5ef6bb2f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_fp_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_free_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_free_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61aec5fc73ce52282150bfbd3f81dcdb28a2b9be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_free_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_galois.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_galois.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfbe5340559d87ddf387eef457de48685f2d75f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_galois.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_generators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_generators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb817688ae35255141077e3bca859db10c4dbf3d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_generators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_group_numbers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_group_numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a889e0beba014ec4953f67ede2bb60fcb30e7a24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_group_numbers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_named_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_named_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..137ccc761e21f7d4f30996c9cd3a2cbcfddd6aed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_named_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_partitions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_partitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc1cd3acc3b6944fae41a894b6be8504ebd9cdb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_partitions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_pc_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_pc_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176b7dc6b9fe41bc163bc6dd091a1a3262ce606e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_pc_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_permutations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_permutations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873e2504e0a5dea947019a7d9a293389aa87dffc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_permutations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_polyhedron.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_polyhedron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..600fc84d87cb5338e872406199bd5aba7e227e54 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_polyhedron.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_rewriting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_rewriting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b4df08975d7fa0a41b60b9f5706c69044b61203 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_rewriting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_schur_number.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_schur_number.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc74985673a54d7727c16059c7c1cd61dd03cbab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_schur_number.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_subsets.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_subsets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aecdcc129ae44181be186f3d6f8563217b01e80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_subsets.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_tensor_can.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_tensor_can.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9531db6a45ba470f7183335315f38314d7a7c92 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_tensor_can.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_testutil.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_testutil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce8f42fe117db7a492bbc3309224e64c6452a5c3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_testutil.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31711d193b0ef26801be6a62954bdec5dbcb6429 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/__pycache__/test_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_coset_table.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_coset_table.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3f62880445c5deb526797ee0623fe3510bcbc3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_coset_table.py @@ -0,0 +1,825 @@ +from sympy.combinatorics.fp_groups import FpGroup +from sympy.combinatorics.coset_table import (CosetTable, + coset_enumeration_r, coset_enumeration_c) +from sympy.combinatorics.coset_table import modified_coset_enumeration_r +from sympy.combinatorics.free_groups import free_group + +from sympy.testing.pytest import slow + +""" +References +========== + +[1] Holt, D., Eick, B., O'Brien, E. +"Handbook of Computational Group Theory" + +[2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson +Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490. +"Implementation and Analysis of the Todd-Coxeter Algorithm" + +""" + +def test_scan_1(): + # Example 5.1 from [1] + F, x, y = free_group("x, y") + f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + c = CosetTable(f, [x]) + + c.scan_and_fill(0, x) + assert c.table == [[0, 0, None, None]] + assert c.p == [0] + assert c.n == 1 + assert c.omega == [0] + + c.scan_and_fill(0, x**3) + assert c.table == [[0, 0, None, None]] + assert c.p == [0] + assert c.n == 1 + assert c.omega == [0] + + c.scan_and_fill(0, y**3) + assert c.table == [[0, 0, 1, 2], [None, None, 2, 0], [None, None, 0, 1]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(0, x**-1*y**-1*x*y) + assert c.table == [[0, 0, 1, 2], [None, None, 2, 0], [2, 2, 0, 1]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(1, x**3) + assert c.table == [[0, 0, 1, 2], [3, 4, 2, 0], [2, 2, 0, 1], \ + [4, 1, None, None], [1, 3, None, None]] + assert c.p == [0, 1, 2, 3, 4] + assert c.n == 5 + assert c.omega == [0, 1, 2, 3, 4] + + c.scan_and_fill(1, y**3) + assert c.table == [[0, 0, 1, 2], [3, 4, 2, 0], [2, 2, 0, 1], \ + [4, 1, None, None], [1, 3, None, None]] + assert c.p == [0, 1, 2, 3, 4] + assert c.n == 5 + assert c.omega == [0, 1, 2, 3, 4] + + c.scan_and_fill(1, x**-1*y**-1*x*y) + assert c.table == [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1], \ + [None, 1, None, None], [1, 3, None, None]] + assert c.p == [0, 1, 2, 1, 1] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + # Example 5.2 from [1] + f = FpGroup(F, [x**2, y**3, (x*y)**3]) + c = CosetTable(f, [x*y]) + + c.scan_and_fill(0, x*y) + assert c.table == [[1, None, None, 1], [None, 0, 0, None]] + assert c.p == [0, 1] + assert c.n == 2 + assert c.omega == [0, 1] + + c.scan_and_fill(0, x**2) + assert c.table == [[1, 1, None, 1], [0, 0, 0, None]] + assert c.p == [0, 1] + assert c.n == 2 + assert c.omega == [0, 1] + + c.scan_and_fill(0, y**3) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [None, None, 1, 0]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(0, (x*y)**3) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [None, None, 1, 0]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(1, x**2) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [None, None, 1, 0]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(1, y**3) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [None, None, 1, 0]] + assert c.p == [0, 1, 2] + assert c.n == 3 + assert c.omega == [0, 1, 2] + + c.scan_and_fill(1, (x*y)**3) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [3, 4, 1, 0], [None, 2, 4, None], [2, None, None, 3]] + assert c.p == [0, 1, 2, 3, 4] + assert c.n == 5 + assert c.omega == [0, 1, 2, 3, 4] + + c.scan_and_fill(2, x**2) + assert c.table == [[1, 1, 2, 1], [0, 0, 0, 2], [3, 3, 1, 0], [2, 2, 3, 3], [2, None, None, 3]] + assert c.p == [0, 1, 2, 3, 3] + assert c.n == 4 + assert c.omega == [0, 1, 2, 3] + + +@slow +def test_coset_enumeration(): + # this test function contains the combined tests for the two strategies + # i.e. HLT and Felsch strategies. + + # Example 5.1 from [1] + F, x, y = free_group("x, y") + f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + C_r = coset_enumeration_r(f, [x]) + C_r.compress(); C_r.standardize() + C_c = coset_enumeration_c(f, [x]) + C_c.compress(); C_c.standardize() + table1 = [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] + assert C_r.table == table1 + assert C_c.table == table1 + + # E1 from [2] Pg. 474 + F, r, s, t = free_group("r, s, t") + E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2]) + C_r = coset_enumeration_r(E1, []) + C_r.compress() + C_c = coset_enumeration_c(E1, []) + C_c.compress() + table2 = [[0, 0, 0, 0, 0, 0]] + assert C_r.table == table2 + # test for issue #11449 + assert C_c.table == table2 + + # Cox group from [2] Pg. 474 + F, a, b = free_group("a, b") + Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5]) + C_r = coset_enumeration_r(Cox, [a]) + C_r.compress(); C_r.standardize() + C_c = coset_enumeration_c(Cox, [a]) + C_c.compress(); C_c.standardize() + table3 = [[0, 0, 1, 2], + [2, 3, 4, 0], + [5, 1, 0, 6], + [1, 7, 8, 9], + [9, 10, 11, 1], + [12, 2, 9, 13], + [14, 9, 2, 11], + [3, 12, 15, 16], + [16, 17, 18, 3], + [6, 4, 3, 5], + [4, 19, 20, 21], + [21, 22, 6, 4], + [7, 5, 23, 24], + [25, 23, 5, 18], + [19, 6, 22, 26], + [24, 27, 28, 7], + [29, 8, 7, 30], + [8, 31, 32, 33], + [33, 34, 13, 8], + [10, 14, 35, 35], + [35, 36, 37, 10], + [30, 11, 10, 29], + [11, 38, 39, 14], + [13, 39, 38, 12], + [40, 15, 12, 41], + [42, 13, 34, 43], + [44, 35, 14, 45], + [15, 46, 47, 34], + [34, 48, 49, 15], + [50, 16, 21, 51], + [52, 21, 16, 49], + [17, 50, 53, 54], + [54, 55, 56, 17], + [41, 18, 17, 40], + [18, 28, 27, 25], + [26, 20, 19, 19], + [20, 57, 58, 59], + [59, 60, 51, 20], + [22, 52, 61, 23], + [23, 62, 63, 22], + [64, 24, 33, 65], + [48, 33, 24, 61], + [62, 25, 54, 66], + [67, 54, 25, 68], + [57, 26, 59, 69], + [70, 59, 26, 63], + [27, 64, 71, 72], + [72, 73, 68, 27], + [28, 41, 74, 75], + [75, 76, 30, 28], + [31, 29, 77, 78], + [79, 77, 29, 37], + [38, 30, 76, 80], + [78, 81, 82, 31], + [43, 32, 31, 42], + [32, 83, 84, 85], + [85, 86, 65, 32], + [36, 44, 87, 88], + [88, 89, 90, 36], + [45, 37, 36, 44], + [37, 82, 81, 79], + [80, 74, 41, 38], + [39, 42, 91, 92], + [92, 93, 45, 39], + [46, 40, 94, 95], + [96, 94, 40, 56], + [97, 91, 42, 82], + [83, 43, 98, 99], + [100, 98, 43, 47], + [101, 87, 44, 90], + [82, 45, 93, 97], + [95, 102, 103, 46], + [104, 47, 46, 105], + [47, 106, 107, 100], + [61, 108, 109, 48], + [105, 49, 48, 104], + [49, 110, 111, 52], + [51, 111, 110, 50], + [112, 53, 50, 113], + [114, 51, 60, 115], + [116, 61, 52, 117], + [53, 118, 119, 60], + [60, 70, 66, 53], + [55, 67, 120, 121], + [121, 122, 123, 55], + [113, 56, 55, 112], + [56, 103, 102, 96], + [69, 124, 125, 57], + [115, 58, 57, 114], + [58, 126, 127, 128], + [128, 128, 69, 58], + [66, 129, 130, 62], + [117, 63, 62, 116], + [63, 125, 124, 70], + [65, 109, 108, 64], + [131, 71, 64, 132], + [133, 65, 86, 134], + [135, 66, 70, 136], + [68, 130, 129, 67], + [137, 120, 67, 138], + [132, 68, 73, 131], + [139, 69, 128, 140], + [71, 141, 142, 86], + [86, 143, 144, 71], + [145, 72, 75, 146], + [147, 75, 72, 144], + [73, 145, 148, 120], + [120, 149, 150, 73], + [74, 151, 152, 94], + [94, 153, 146, 74], + [76, 147, 154, 77], + [77, 155, 156, 76], + [157, 78, 85, 158], + [143, 85, 78, 154], + [155, 79, 88, 159], + [160, 88, 79, 161], + [151, 80, 92, 162], + [163, 92, 80, 156], + [81, 157, 164, 165], + [165, 166, 161, 81], + [99, 107, 106, 83], + [134, 84, 83, 133], + [84, 167, 168, 169], + [169, 170, 158, 84], + [87, 171, 172, 93], + [93, 163, 159, 87], + [89, 160, 173, 174], + [174, 175, 176, 89], + [90, 90, 89, 101], + [91, 177, 178, 98], + [98, 179, 162, 91], + [180, 95, 100, 181], + [179, 100, 95, 152], + [153, 96, 121, 148], + [182, 121, 96, 183], + [177, 97, 165, 184], + [185, 165, 97, 172], + [186, 99, 169, 187], + [188, 169, 99, 178], + [171, 101, 174, 189], + [190, 174, 101, 176], + [102, 180, 191, 192], + [192, 193, 183, 102], + [103, 113, 194, 195], + [195, 196, 105, 103], + [106, 104, 197, 198], + [199, 197, 104, 109], + [110, 105, 196, 200], + [198, 201, 133, 106], + [107, 186, 202, 203], + [203, 204, 181, 107], + [108, 116, 205, 206], + [206, 207, 132, 108], + [109, 133, 201, 199], + [200, 194, 113, 110], + [111, 114, 208, 209], + [209, 210, 117, 111], + [118, 112, 211, 212], + [213, 211, 112, 123], + [214, 208, 114, 125], + [126, 115, 215, 216], + [217, 215, 115, 119], + [218, 205, 116, 130], + [125, 117, 210, 214], + [212, 219, 220, 118], + [136, 119, 118, 135], + [119, 221, 222, 217], + [122, 182, 223, 224], + [224, 225, 226, 122], + [138, 123, 122, 137], + [123, 220, 219, 213], + [124, 139, 227, 228], + [228, 229, 136, 124], + [216, 222, 221, 126], + [140, 127, 126, 139], + [127, 230, 231, 232], + [232, 233, 140, 127], + [129, 135, 234, 235], + [235, 236, 138, 129], + [130, 132, 207, 218], + [141, 131, 237, 238], + [239, 237, 131, 150], + [167, 134, 240, 241], + [242, 240, 134, 142], + [243, 234, 135, 220], + [221, 136, 229, 244], + [149, 137, 245, 246], + [247, 245, 137, 226], + [220, 138, 236, 243], + [244, 227, 139, 221], + [230, 140, 233, 248], + [238, 249, 250, 141], + [251, 142, 141, 252], + [142, 253, 254, 242], + [154, 255, 256, 143], + [252, 144, 143, 251], + [144, 257, 258, 147], + [146, 258, 257, 145], + [259, 148, 145, 260], + [261, 146, 153, 262], + [263, 154, 147, 264], + [148, 265, 266, 153], + [246, 267, 268, 149], + [260, 150, 149, 259], + [150, 250, 249, 239], + [162, 269, 270, 151], + [262, 152, 151, 261], + [152, 271, 272, 179], + [159, 273, 274, 155], + [264, 156, 155, 263], + [156, 270, 269, 163], + [158, 256, 255, 157], + [275, 164, 157, 276], + [277, 158, 170, 278], + [279, 159, 163, 280], + [161, 274, 273, 160], + [281, 173, 160, 282], + [276, 161, 166, 275], + [283, 162, 179, 284], + [164, 285, 286, 170], + [170, 188, 184, 164], + [166, 185, 189, 173], + [173, 287, 288, 166], + [241, 254, 253, 167], + [278, 168, 167, 277], + [168, 289, 290, 291], + [291, 292, 187, 168], + [189, 293, 294, 171], + [280, 172, 171, 279], + [172, 295, 296, 185], + [175, 190, 297, 297], + [297, 298, 299, 175], + [282, 176, 175, 281], + [176, 294, 293, 190], + [184, 296, 295, 177], + [284, 178, 177, 283], + [178, 300, 301, 188], + [181, 272, 271, 180], + [302, 191, 180, 303], + [304, 181, 204, 305], + [183, 266, 265, 182], + [306, 223, 182, 307], + [303, 183, 193, 302], + [308, 184, 188, 309], + [310, 189, 185, 311], + [187, 301, 300, 186], + [305, 202, 186, 304], + [312, 187, 292, 313], + [314, 297, 190, 315], + [191, 316, 317, 204], + [204, 318, 319, 191], + [320, 192, 195, 321], + [322, 195, 192, 319], + [193, 320, 323, 223], + [223, 324, 325, 193], + [194, 326, 327, 211], + [211, 328, 321, 194], + [196, 322, 329, 197], + [197, 330, 331, 196], + [332, 198, 203, 333], + [318, 203, 198, 329], + [330, 199, 206, 334], + [335, 206, 199, 336], + [326, 200, 209, 337], + [338, 209, 200, 331], + [201, 332, 339, 240], + [240, 340, 336, 201], + [202, 341, 342, 292], + [292, 343, 333, 202], + [205, 344, 345, 210], + [210, 338, 334, 205], + [207, 335, 346, 237], + [237, 347, 348, 207], + [208, 349, 350, 215], + [215, 351, 337, 208], + [352, 212, 217, 353], + [351, 217, 212, 327], + [328, 213, 224, 323], + [354, 224, 213, 355], + [349, 214, 228, 356], + [357, 228, 214, 345], + [358, 216, 232, 359], + [360, 232, 216, 350], + [344, 218, 235, 361], + [362, 235, 218, 348], + [219, 352, 363, 364], + [364, 365, 355, 219], + [222, 358, 366, 367], + [367, 368, 353, 222], + [225, 354, 369, 370], + [370, 371, 372, 225], + [307, 226, 225, 306], + [226, 268, 267, 247], + [227, 373, 374, 233], + [233, 360, 356, 227], + [229, 357, 361, 234], + [234, 375, 376, 229], + [248, 231, 230, 230], + [231, 377, 378, 379], + [379, 380, 359, 231], + [236, 362, 381, 245], + [245, 382, 383, 236], + [384, 238, 242, 385], + [340, 242, 238, 346], + [347, 239, 246, 381], + [386, 246, 239, 387], + [388, 241, 291, 389], + [343, 291, 241, 339], + [375, 243, 364, 390], + [391, 364, 243, 383], + [373, 244, 367, 392], + [393, 367, 244, 376], + [382, 247, 370, 394], + [395, 370, 247, 396], + [377, 248, 379, 397], + [398, 379, 248, 374], + [249, 384, 399, 400], + [400, 401, 387, 249], + [250, 260, 402, 403], + [403, 404, 252, 250], + [253, 251, 405, 406], + [407, 405, 251, 256], + [257, 252, 404, 408], + [406, 409, 277, 253], + [254, 388, 410, 411], + [411, 412, 385, 254], + [255, 263, 413, 414], + [414, 415, 276, 255], + [256, 277, 409, 407], + [408, 402, 260, 257], + [258, 261, 416, 417], + [417, 418, 264, 258], + [265, 259, 419, 420], + [421, 419, 259, 268], + [422, 416, 261, 270], + [271, 262, 423, 424], + [425, 423, 262, 266], + [426, 413, 263, 274], + [270, 264, 418, 422], + [420, 427, 307, 265], + [266, 303, 428, 425], + [267, 386, 429, 430], + [430, 431, 396, 267], + [268, 307, 427, 421], + [269, 283, 432, 433], + [433, 434, 280, 269], + [424, 428, 303, 271], + [272, 304, 435, 436], + [436, 437, 284, 272], + [273, 279, 438, 439], + [439, 440, 282, 273], + [274, 276, 415, 426], + [285, 275, 441, 442], + [443, 441, 275, 288], + [289, 278, 444, 445], + [446, 444, 278, 286], + [447, 438, 279, 294], + [295, 280, 434, 448], + [287, 281, 449, 450], + [451, 449, 281, 299], + [294, 282, 440, 447], + [448, 432, 283, 295], + [300, 284, 437, 452], + [442, 453, 454, 285], + [309, 286, 285, 308], + [286, 455, 456, 446], + [450, 457, 458, 287], + [311, 288, 287, 310], + [288, 454, 453, 443], + [445, 456, 455, 289], + [313, 290, 289, 312], + [290, 459, 460, 461], + [461, 462, 389, 290], + [293, 310, 463, 464], + [464, 465, 315, 293], + [296, 308, 466, 467], + [467, 468, 311, 296], + [298, 314, 469, 470], + [470, 471, 472, 298], + [315, 299, 298, 314], + [299, 458, 457, 451], + [452, 435, 304, 300], + [301, 312, 473, 474], + [474, 475, 309, 301], + [316, 302, 476, 477], + [478, 476, 302, 325], + [341, 305, 479, 480], + [481, 479, 305, 317], + [324, 306, 482, 483], + [484, 482, 306, 372], + [485, 466, 308, 454], + [455, 309, 475, 486], + [487, 463, 310, 458], + [454, 311, 468, 485], + [486, 473, 312, 455], + [459, 313, 488, 489], + [490, 488, 313, 342], + [491, 469, 314, 472], + [458, 315, 465, 487], + [477, 492, 485, 316], + [463, 317, 316, 468], + [317, 487, 493, 481], + [329, 447, 464, 318], + [468, 319, 318, 463], + [319, 467, 448, 322], + [321, 448, 467, 320], + [475, 323, 320, 466], + [432, 321, 328, 437], + [438, 329, 322, 434], + [323, 474, 452, 328], + [483, 494, 486, 324], + [466, 325, 324, 475], + [325, 485, 492, 478], + [337, 422, 433, 326], + [437, 327, 326, 432], + [327, 436, 424, 351], + [334, 426, 439, 330], + [434, 331, 330, 438], + [331, 433, 422, 338], + [333, 464, 447, 332], + [449, 339, 332, 440], + [465, 333, 343, 469], + [413, 334, 338, 418], + [336, 439, 426, 335], + [441, 346, 335, 415], + [440, 336, 340, 449], + [416, 337, 351, 423], + [339, 451, 470, 343], + [346, 443, 450, 340], + [480, 493, 487, 341], + [469, 342, 341, 465], + [342, 491, 495, 490], + [361, 407, 414, 344], + [418, 345, 344, 413], + [345, 417, 408, 357], + [381, 446, 442, 347], + [415, 348, 347, 441], + [348, 414, 407, 362], + [356, 408, 417, 349], + [423, 350, 349, 416], + [350, 425, 420, 360], + [353, 424, 436, 352], + [479, 363, 352, 435], + [428, 353, 368, 476], + [355, 452, 474, 354], + [488, 369, 354, 473], + [435, 355, 365, 479], + [402, 356, 360, 419], + [405, 361, 357, 404], + [359, 420, 425, 358], + [476, 366, 358, 428], + [427, 359, 380, 482], + [444, 381, 362, 409], + [363, 481, 477, 368], + [368, 393, 390, 363], + [365, 391, 394, 369], + [369, 490, 480, 365], + [366, 478, 483, 380], + [380, 398, 392, 366], + [371, 395, 496, 497], + [497, 498, 489, 371], + [473, 372, 371, 488], + [372, 486, 494, 484], + [392, 400, 403, 373], + [419, 374, 373, 402], + [374, 421, 430, 398], + [390, 411, 406, 375], + [404, 376, 375, 405], + [376, 403, 400, 393], + [397, 430, 421, 377], + [482, 378, 377, 427], + [378, 484, 497, 499], + [499, 499, 397, 378], + [394, 461, 445, 382], + [409, 383, 382, 444], + [383, 406, 411, 391], + [385, 450, 443, 384], + [492, 399, 384, 453], + [457, 385, 412, 493], + [387, 442, 446, 386], + [494, 429, 386, 456], + [453, 387, 401, 492], + [389, 470, 451, 388], + [493, 410, 388, 457], + [471, 389, 462, 495], + [412, 390, 393, 399], + [462, 394, 391, 410], + [401, 392, 398, 429], + [396, 445, 461, 395], + [498, 496, 395, 460], + [456, 396, 431, 494], + [431, 397, 499, 496], + [399, 477, 481, 412], + [429, 483, 478, 401], + [410, 480, 490, 462], + [496, 497, 484, 431], + [489, 495, 491, 459], + [495, 460, 459, 471], + [460, 489, 498, 498], + [472, 472, 471, 491]] + + assert C_r.table == table3 + assert C_c.table == table3 + + # Group denoted by B2,4 from [2] Pg. 474 + F, a, b = free_group("a, b") + B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \ + (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4]) + C_r = coset_enumeration_r(B_2_4, [a]) + C_c = coset_enumeration_c(B_2_4, [a]) + index_r = 0 + for i in range(len(C_r.p)): + if C_r.p[i] == i: + index_r += 1 + assert index_r == 1024 + + index_c = 0 + for i in range(len(C_c.p)): + if C_c.p[i] == i: + index_c += 1 + assert index_c == 1024 + + # trivial Macdonald group G(2,2) from [2] Pg. 480 + M = FpGroup(F, [b**-1*a**-1*b*a*b**-1*a*b*a**-2, a**-1*b**-1*a*b*a**-1*b*a*b**-2]) + C_r = coset_enumeration_r(M, [a]) + C_r.compress(); C_r.standardize() + C_c = coset_enumeration_c(M, [a]) + C_c.compress(); C_c.standardize() + table4 = [[0, 0, 0, 0]] + assert C_r.table == table4 + assert C_c.table == table4 + + +def test_look_ahead(): + # Section 3.2 [Test Example] Example (d) from [2] + F, a, b, c = free_group("a, b, c") + f = FpGroup(F, [a**11, b**5, c**4, (a*c)**3, b**2*c**-1*b**-1*c, a**4*b**-1*a**-1*b]) + H = [c, b, c**2] + table0 = [[1, 2, 0, 0, 0, 0], + [3, 0, 4, 5, 6, 7], + [0, 8, 9, 10, 11, 12], + [5, 1, 10, 13, 14, 15], + [16, 5, 16, 1, 17, 18], + [4, 3, 1, 8, 19, 20], + [12, 21, 22, 23, 24, 1], + [25, 26, 27, 28, 1, 24], + [2, 10, 5, 16, 22, 28], + [10, 13, 13, 2, 29, 30]] + CosetTable.max_stack_size = 10 + C_c = coset_enumeration_c(f, H) + C_c.compress(); C_c.standardize() + assert C_c.table[: 10] == table0 + +def test_modified_methods(): + ''' + Tests for modified coset table methods. + Example 5.7 from [1] Holt, D., Eick, B., O'Brien + "Handbook of Computational Group Theory". + + ''' + F, x, y = free_group("x, y") + f = FpGroup(F, [x**3, y**5, (x*y)**2]) + H = [x*y, x**-1*y**-1*x*y*x] + C = CosetTable(f, H) + C.modified_define(0, x) + identity = C._grp.identity + a_0 = C._grp.generators[0] + a_1 = C._grp.generators[1] + + assert C.P == [[identity, None, None, None], + [None, identity, None, None]] + assert C.table == [[1, None, None, None], + [None, 0, None, None]] + + C.modified_define(1, x) + assert C.table == [[1, None, None, None], + [2, 0, None, None], + [None, 1, None, None]] + assert C.P == [[identity, None, None, None], + [identity, identity, None, None], + [None, identity, None, None]] + + C.modified_scan(0, x**3, C._grp.identity, fill=False) + assert C.P == [[identity, identity, None, None], + [identity, identity, None, None], + [identity, identity, None, None]] + assert C.table == [[1, 2, None, None], + [2, 0, None, None], + [0, 1, None, None]] + + C.modified_scan(0, x*y, C._grp.generators[0], fill=False) + assert C.P == [[identity, identity, None, a_0**-1], + [identity, identity, a_0, None], + [identity, identity, None, None]] + assert C.table == [[1, 2, None, 1], + [2, 0, 0, None], + [0, 1, None, None]] + + C.modified_define(2, y**-1) + assert C.table == [[1, 2, None, 1], + [2, 0, 0, None], + [0, 1, None, 3], + [None, None, 2, None]] + assert C.P == [[identity, identity, None, a_0**-1], + [identity, identity, a_0, None], + [identity, identity, None, identity], + [None, None, identity, None]] + + C.modified_scan(0, x**-1*y**-1*x*y*x, C._grp.generators[1]) + assert C.table == [[1, 2, None, 1], + [2, 0, 0, None], + [0, 1, None, 3], + [3, 3, 2, None]] + assert C.P == [[identity, identity, None, a_0**-1], + [identity, identity, a_0, None], + [identity, identity, None, identity], + [a_1, a_1**-1, identity, None]] + + C.modified_scan(2, (x*y)**2, C._grp.identity) + assert C.table == [[1, 2, 3, 1], + [2, 0, 0, None], + [0, 1, None, 3], + [3, 3, 2, 0]] + assert C.P == [[identity, identity, a_1**-1, a_0**-1], + [identity, identity, a_0, None], + [identity, identity, None, identity], + [a_1, a_1**-1, identity, a_1]] + + C.modified_define(2, y) + assert C.table == [[1, 2, 3, 1], + [2, 0, 0, None], + [0, 1, 4, 3], + [3, 3, 2, 0], + [None, None, None, 2]] + assert C.P == [[identity, identity, a_1**-1, a_0**-1], + [identity, identity, a_0, None], + [identity, identity, identity, identity], + [a_1, a_1**-1, identity, a_1], + [None, None, None, identity]] + + C.modified_scan(0, y**5, C._grp.identity) + assert C.table == [[1, 2, 3, 1], [2, 0, 0, 4], [0, 1, 4, 3], [3, 3, 2, 0], [None, None, 1, 2]] + assert C.P == [[identity, identity, a_1**-1, a_0**-1], + [identity, identity, a_0, a_0*a_1**-1], + [identity, identity, identity, identity], + [a_1, a_1**-1, identity, a_1], + [None, None, a_1*a_0**-1, identity]] + + C.modified_scan(1, (x*y)**2, C._grp.identity) + assert C.table == [[1, 2, 3, 1], + [2, 0, 0, 4], + [0, 1, 4, 3], + [3, 3, 2, 0], + [4, 4, 1, 2]] + assert C.P == [[identity, identity, a_1**-1, a_0**-1], + [identity, identity, a_0, a_0*a_1**-1], + [identity, identity, identity, identity], + [a_1, a_1**-1, identity, a_1], + [a_0*a_1**-1, a_1*a_0**-1, a_1*a_0**-1, identity]] + + # Modified coset enumeration test + f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + C = coset_enumeration_r(f, [x]) + C_m = modified_coset_enumeration_r(f, [x]) + assert C_m.table == C.table diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_fp_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_fp_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..2f042fc937ac747108e10bcbaba375a8b443e3a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_fp_groups.py @@ -0,0 +1,252 @@ +from sympy.core.singleton import S +from sympy.combinatorics.fp_groups import (FpGroup, low_index_subgroups, + reidemeister_presentation, FpSubgroup, + simplify_presentation) +from sympy.combinatorics.free_groups import (free_group, FreeGroup) + +from sympy.testing.pytest import slow + +""" +References +========== + +[1] Holt, D., Eick, B., O'Brien, E. +"Handbook of Computational Group Theory" + +[2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson +Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490. +"Implementation and Analysis of the Todd-Coxeter Algorithm" + +[3] PROC. SECOND INTERNAT. CONF. THEORY OF GROUPS, CANBERRA 1973, +pp. 347-356. "A Reidemeister-Schreier program" by George Havas. +http://staff.itee.uq.edu.au/havas/1973cdhw.pdf + +""" + +def test_low_index_subgroups(): + F, x, y = free_group("x, y") + + # Example 5.10 from [1] Pg. 194 + f = FpGroup(F, [x**2, y**3, (x*y)**4]) + L = low_index_subgroups(f, 4) + t1 = [[[0, 0, 0, 0]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]], + [[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]], + [[1, 1, 0, 0], [0, 0, 1, 1]]] + for i in range(len(t1)): + assert L[i].table == t1[i] + + f = FpGroup(F, [x**2, y**3, (x*y)**7]) + L = low_index_subgroups(f, 15) + t2 = [[[0, 0, 0, 0]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], + [4, 4, 5, 3], [6, 6, 3, 4], [5, 5, 6, 6]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], + [6, 6, 5, 3], [5, 5, 3, 4], [4, 4, 6, 6]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], + [6, 6, 5, 3], [7, 7, 3, 4], [4, 4, 8, 9], [5, 5, 10, 11], + [11, 11, 9, 6], [9, 9, 6, 8], [12, 12, 11, 7], [8, 8, 7, 10], + [10, 10, 13, 14], [14, 14, 14, 12], [13, 13, 12, 13]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], + [6, 6, 5, 3], [7, 7, 3, 4], [4, 4, 8, 9], [5, 5, 10, 11], + [11, 11, 9, 6], [12, 12, 6, 8], [10, 10, 11, 7], [8, 8, 7, 10], + [9, 9, 13, 14], [14, 14, 14, 12], [13, 13, 12, 13]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], + [6, 6, 5, 3], [7, 7, 3, 4], [4, 4, 8, 9], [5, 5, 10, 11], + [11, 11, 9, 6], [12, 12, 6, 8], [13, 13, 11, 7], [8, 8, 7, 10], + [9, 9, 12, 12], [10, 10, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 3, 3], [2, 2, 5, 6] + , [7, 7, 6, 4], [8, 8, 4, 5], [5, 5, 8, 9], [6, 6, 9, 7], + [10, 10, 7, 8], [9, 9, 11, 12], [11, 11, 12, 10], [13, 13, 10, 11], + [12, 12, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 3, 3], [2, 2, 5, 6] + , [7, 7, 6, 4], [8, 8, 4, 5], [5, 5, 8, 9], [6, 6, 9, 7], + [10, 10, 7, 8], [9, 9, 11, 12], [13, 13, 12, 10], [12, 12, 10, 11], + [11, 11, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 4, 4] + , [7, 7, 6, 3], [8, 8, 3, 5], [5, 5, 8, 9], [6, 6, 9, 7], + [10, 10, 7, 8], [9, 9, 11, 12], [13, 13, 12, 10], [12, 12, 10, 11], + [11, 11, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [5, 5, 6, 3], [9, 9, 3, 5], [10, 10, 8, 4], [8, 8, 4, 7], + [6, 6, 10, 11], [7, 7, 11, 9], [12, 12, 9, 10], [11, 11, 13, 14], + [14, 14, 14, 12], [13, 13, 12, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [6, 6, 6, 3], [5, 5, 3, 5], [8, 8, 8, 4], [7, 7, 4, 7]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [9, 9, 6, 3], [6, 6, 3, 5], [10, 10, 8, 4], [11, 11, 4, 7], + [5, 5, 10, 12], [7, 7, 12, 9], [8, 8, 11, 11], [13, 13, 9, 10], + [12, 12, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [9, 9, 6, 3], [6, 6, 3, 5], [10, 10, 8, 4], [11, 11, 4, 7], + [5, 5, 12, 11], [7, 7, 10, 10], [8, 8, 9, 12], [13, 13, 11, 9], + [12, 12, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [9, 9, 6, 3], [10, 10, 3, 5], [7, 7, 8, 4], [11, 11, 4, 7], + [5, 5, 9, 9], [6, 6, 11, 12], [8, 8, 12, 10], [13, 13, 10, 11], + [12, 12, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [9, 9, 6, 3], [10, 10, 3, 5], [7, 7, 8, 4], [11, 11, 4, 7], + [5, 5, 12, 11], [6, 6, 10, 10], [8, 8, 9, 12], [13, 13, 11, 9], + [12, 12, 13, 13]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8] + , [9, 9, 6, 3], [10, 10, 3, 5], [11, 11, 8, 4], [12, 12, 4, 7], + [5, 5, 9, 9], [6, 6, 12, 13], [7, 7, 11, 11], [8, 8, 13, 10], + [13, 13, 10, 12]], + [[1, 1, 0, 0], [0, 0, 2, 3], [4, 4, 3, 1], [5, 5, 1, 2], [2, 2, 4, 4] + , [3, 3, 6, 7], [7, 7, 7, 5], [6, 6, 5, 6]]] + for i in range(len(t2)): + assert L[i].table == t2[i] + + f = FpGroup(F, [x**2, y**3, (x*y)**7]) + L = low_index_subgroups(f, 10, [x]) + t3 = [[[0, 0, 0, 0]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], [4, 4, 5, 3], + [6, 6, 3, 4], [5, 5, 6, 6]], + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 4, 5], [6, 6, 5, 3], + [5, 5, 3, 4], [4, 4, 6, 6]], + [[0, 0, 1, 2], [3, 3, 2, 0], [4, 4, 0, 1], [1, 1, 5, 6], [2, 2, 7, 8], + [6, 6, 6, 3], [5, 5, 3, 5], [8, 8, 8, 4], [7, 7, 4, 7]]] + for i in range(len(t3)): + assert L[i].table == t3[i] + + +def test_subgroup_presentations(): + F, x, y = free_group("x, y") + f = FpGroup(F, [x**3, y**5, (x*y)**2]) + H = [x*y, x**-1*y**-1*x*y*x] + p1 = reidemeister_presentation(f, H) + assert str(p1) == "((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1))" + + H = f.subgroup(H) + assert (H.generators, H.relators) == p1 + + f = FpGroup(F, [x**3, y**3, (x*y)**3]) + H = [x*y, x*y**-1] + p2 = reidemeister_presentation(f, H) + assert str(p2) == "((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0))" + + f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) + H = [x] + p3 = reidemeister_presentation(f, H) + assert str(p3) == "((x_0,), (x_0**4,))" + + f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2]) + H = [x] + p4 = reidemeister_presentation(f, H) + assert str(p4) == "((x_0,), (x_0**6,))" + + # this presentation can be improved, the most simplified form + # of presentation is + # See [2] Pg 474 group PSL_2(11) + # This is the group PSL_2(11) + F, a, b, c = free_group("a, b, c") + f = FpGroup(F, [a**11, b**5, c**4, (b*c**2)**2, (a*b*c)**3, (a**4*c**2)**3, b**2*c**-1*b**-1*c, a**4*b**-1*a**-1*b]) + H = [a, b, c**2] + gens, rels = reidemeister_presentation(f, H) + assert str(gens) == "(b_1, c_3)" + assert len(rels) == 18 + + +@slow +def test_order(): + F, x, y = free_group("x, y") + f = FpGroup(F, [x**4, y**2, x*y*x**-1*y]) + assert f.order() == 8 + + f = FpGroup(F, [x*y*x**-1*y**-1, y**2]) + assert f.order() is S.Infinity + + F, a, b, c = free_group("a, b, c") + f = FpGroup(F, [a**250, b**2, c*b*c**-1*b, c**4, c**-1*a**-1*c*a, a**-1*b**-1*a*b]) + assert f.order() == 2000 + + F, x = free_group("x") + f = FpGroup(F, []) + assert f.order() is S.Infinity + + f = FpGroup(free_group('')[0], []) + assert f.order() == 1 + +def test_fp_subgroup(): + def _test_subgroup(K, T, S): + _gens = T(K.generators) + assert all(elem in S for elem in _gens) + assert T.is_injective() + assert T.image().order() == S.order() + F, x, y = free_group("x, y") + f = FpGroup(F, [x**4, y**2, x*y*x**-1*y]) + S = FpSubgroup(f, [x*y]) + assert (x*y)**-3 in S + K, T = f.subgroup([x*y], homomorphism=True) + assert T(K.generators) == [y*x**-1] + _test_subgroup(K, T, S) + + S = FpSubgroup(f, [x**-1*y*x]) + assert x**-1*y**4*x in S + assert x**-1*y**4*x**2 not in S + K, T = f.subgroup([x**-1*y*x], homomorphism=True) + assert T(K.generators[0]**3) == y**3 + _test_subgroup(K, T, S) + + f = FpGroup(F, [x**3, y**5, (x*y)**2]) + H = [x*y, x**-1*y**-1*x*y*x] + K, T = f.subgroup(H, homomorphism=True) + S = FpSubgroup(f, H) + _test_subgroup(K, T, S) + +def test_permutation_methods(): + F, x, y = free_group("x, y") + # DihedralGroup(8) + G = FpGroup(F, [x**2, y**8, x*y*x**-1*y]) + T = G._to_perm_group()[1] + assert T.is_isomorphism() + assert G.center() == [y**4] + + # DiheadralGroup(4) + G = FpGroup(F, [x**2, y**4, x*y*x**-1*y]) + S = FpSubgroup(G, G.normal_closure([x])) + assert x in S + assert y**-1*x*y in S + + # Z_5xZ_4 + G = FpGroup(F, [x*y*x**-1*y**-1, y**5, x**4]) + assert G.is_abelian + assert G.is_solvable + + # AlternatingGroup(5) + G = FpGroup(F, [x**3, y**2, (x*y)**5]) + assert not G.is_solvable + + # AlternatingGroup(4) + G = FpGroup(F, [x**3, y**2, (x*y)**3]) + assert len(G.derived_series()) == 3 + S = FpSubgroup(G, G.derived_subgroup()) + assert S.order() == 4 + + +def test_simplify_presentation(): + # ref #16083 + G = simplify_presentation(FpGroup(FreeGroup([]), [])) + assert not G.generators + assert not G.relators + + +def test_cyclic(): + F, x, y = free_group("x, y") + f = FpGroup(F, [x*y, x**-1*y**-1*x*y*x]) + assert f.is_cyclic + f = FpGroup(F, [x*y, x*y**-1]) + assert f.is_cyclic + f = FpGroup(F, [x**4, y**2, x*y*x**-1*y]) + assert not f.is_cyclic + + +def test_abelian_invariants(): + F, x, y = free_group("x, y") + f = FpGroup(F, [x*y, x**-1*y**-1*x*y*x]) + assert f.abelian_invariants() == [] + f = FpGroup(F, [x*y, x*y**-1]) + assert f.abelian_invariants() == [2] + f = FpGroup(F, [x**4, y**2, x*y*x**-1*y]) + assert f.abelian_invariants() == [2, 4] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_galois.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_galois.py new file mode 100644 index 0000000000000000000000000000000000000000..0d2ac29a846db88444d275b72a85ce3debaeaf05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_galois.py @@ -0,0 +1,82 @@ +"""Test groups defined by the galois module. """ + +from sympy.combinatorics.galois import ( + S4TransitiveSubgroups, S5TransitiveSubgroups, S6TransitiveSubgroups, + find_transitive_subgroups_of_S6, +) +from sympy.combinatorics.homomorphisms import is_isomorphic +from sympy.combinatorics.named_groups import ( + SymmetricGroup, AlternatingGroup, CyclicGroup, +) + + +def test_four_group(): + G = S4TransitiveSubgroups.V.get_perm_group() + A4 = AlternatingGroup(4) + assert G.is_subgroup(A4) + assert G.degree == 4 + assert G.is_transitive() + assert G.order() == 4 + assert not G.is_cyclic + + +def test_M20(): + G = S5TransitiveSubgroups.M20.get_perm_group() + S5 = SymmetricGroup(5) + A5 = AlternatingGroup(5) + assert G.is_subgroup(S5) + assert not G.is_subgroup(A5) + assert G.degree == 5 + assert G.is_transitive() + assert G.order() == 20 + + +# Setting this True means that for each of the transitive subgroups of S6, +# we run a test not only on the fixed representation, but also on one freshly +# generated by the search procedure. +INCLUDE_SEARCH_REPS = False +S6_randomized = {} +if INCLUDE_SEARCH_REPS: + S6_randomized = find_transitive_subgroups_of_S6(*list(S6TransitiveSubgroups)) + + +def get_versions_of_S6_subgroup(name): + vers = [name.get_perm_group()] + if INCLUDE_SEARCH_REPS: + vers.append(S6_randomized[name]) + return vers + + +def test_S6_transitive_subgroups(): + """ + Test enough characteristics to distinguish all 16 transitive subgroups. + """ + ts = S6TransitiveSubgroups + A6 = AlternatingGroup(6) + for name, alt, order, is_isom, not_isom in [ + (ts.C6, False, 6, CyclicGroup(6), None), + (ts.S3, False, 6, SymmetricGroup(3), None), + (ts.D6, False, 12, None, None), + (ts.A4, True, 12, None, None), + (ts.G18, False, 18, None, None), + (ts.A4xC2, False, 24, None, SymmetricGroup(4)), + (ts.S4m, False, 24, SymmetricGroup(4), None), + (ts.S4p, True, 24, None, None), + (ts.G36m, False, 36, None, None), + (ts.G36p, True, 36, None, None), + (ts.S4xC2, False, 48, None, None), + (ts.PSL2F5, True, 60, None, None), + (ts.G72, False, 72, None, None), + (ts.PGL2F5, False, 120, None, None), + (ts.A6, True, 360, None, None), + (ts.S6, False, 720, None, None), + ]: + for G in get_versions_of_S6_subgroup(name): + assert G.is_transitive() + assert G.degree == 6 + assert G.is_subgroup(A6) is alt + assert G.order() == order + if is_isom: + assert is_isomorphic(G, is_isom) + if not_isom: + assert not is_isomorphic(G, not_isom) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_graycode.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_graycode.py new file mode 100644 index 0000000000000000000000000000000000000000..a754a3c401b07c9c12cb9bdeeefdfc94f6cb8b5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_graycode.py @@ -0,0 +1,72 @@ +from sympy.combinatorics.graycode import (GrayCode, bin_to_gray, + random_bitstring, get_subset_from_bitstring, graycode_subsets, + gray_to_bin) +from sympy.testing.pytest import raises + +def test_graycode(): + g = GrayCode(2) + got = [] + for i in g.generate_gray(): + if i.startswith('0'): + g.skip() + got.append(i) + assert got == '00 11 10'.split() + a = GrayCode(6) + assert a.current == '0'*6 + assert a.rank == 0 + assert len(list(a.generate_gray())) == 64 + codes = ['011001', '011011', '011010', + '011110', '011111', '011101', '011100', '010100', '010101', '010111', + '010110', '010010', '010011', '010001', '010000', '110000', '110001', + '110011', '110010', '110110', '110111', '110101', '110100', '111100', + '111101', '111111', '111110', '111010', '111011', '111001', '111000', + '101000', '101001', '101011', '101010', '101110', '101111', '101101', + '101100', '100100', '100101', '100111', '100110', '100010', '100011', + '100001', '100000'] + assert list(a.generate_gray(start='011001')) == codes + assert list( + a.generate_gray(rank=GrayCode(6, start='011001').rank)) == codes + assert a.next().current == '000001' + assert a.next(2).current == '000011' + assert a.next(-1).current == '100000' + + a = GrayCode(5, start='10010') + assert a.rank == 28 + a = GrayCode(6, start='101000') + assert a.rank == 48 + + assert GrayCode(6, rank=4).current == '000110' + assert GrayCode(6, rank=4).rank == 4 + assert [GrayCode(4, start=s).rank for s in + GrayCode(4).generate_gray()] == [0, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15] + a = GrayCode(15, rank=15) + assert a.current == '000000000001000' + + assert bin_to_gray('111') == '100' + + a = random_bitstring(5) + assert type(a) is str + assert len(a) == 5 + assert all(i in ['0', '1'] for i in a) + + assert get_subset_from_bitstring( + ['a', 'b', 'c', 'd'], '0011') == ['c', 'd'] + assert get_subset_from_bitstring('abcd', '1001') == ['a', 'd'] + assert list(graycode_subsets(['a', 'b', 'c'])) == \ + [[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], + ['a', 'c'], ['a']] + + raises(ValueError, lambda: GrayCode(0)) + raises(ValueError, lambda: GrayCode(2.2)) + raises(ValueError, lambda: GrayCode(2, start=[1, 1, 0])) + raises(ValueError, lambda: GrayCode(2, rank=2.5)) + raises(ValueError, lambda: get_subset_from_bitstring(['c', 'a', 'c'], '1100')) + raises(ValueError, lambda: list(GrayCode(3).generate_gray(start="1111"))) + + +def test_live_issue_117(): + assert bin_to_gray('0100') == '0110' + assert bin_to_gray('0101') == '0111' + for bits in ('0100', '0101'): + assert gray_to_bin(bin_to_gray(bits)) == bits diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_group_numbers.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_group_numbers.py new file mode 100644 index 0000000000000000000000000000000000000000..62de63cbcaadb0fd536ceb87932b8d7495334b15 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_group_numbers.py @@ -0,0 +1,27 @@ +from sympy.combinatorics.group_numbers import (is_nilpotent_number, + is_abelian_number, is_cyclic_number) +from sympy.testing.pytest import raises +from sympy import randprime + + +def test_is_nilpotent_number(): + assert is_nilpotent_number(21) == False + assert is_nilpotent_number(randprime(1, 30)**12) == True + raises(ValueError, lambda: is_nilpotent_number(-5)) + + +def test_is_abelian_number(): + assert is_abelian_number(4) == True + assert is_abelian_number(randprime(1, 2000)**2) == True + assert is_abelian_number(randprime(1000, 100000)) == True + assert is_abelian_number(60) == False + assert is_abelian_number(24) == False + raises(ValueError, lambda: is_abelian_number(-5)) + + +def test_is_cyclic_number(): + assert is_cyclic_number(15) == True + assert is_cyclic_number(randprime(1, 2000)**2) == False + assert is_cyclic_number(randprime(1000, 100000)) == True + assert is_cyclic_number(4) == False + raises(ValueError, lambda: is_cyclic_number(-5)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_homomorphisms.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_homomorphisms.py new file mode 100644 index 0000000000000000000000000000000000000000..0936bbddf46a16dccdfbaebda8d1c675c131f05a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_homomorphisms.py @@ -0,0 +1,114 @@ +from sympy.combinatorics import Permutation +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.combinatorics.homomorphisms import homomorphism, group_isomorphism, is_isomorphic +from sympy.combinatorics.free_groups import free_group +from sympy.combinatorics.fp_groups import FpGroup +from sympy.combinatorics.named_groups import AlternatingGroup, DihedralGroup, CyclicGroup +from sympy.testing.pytest import raises + +def test_homomorphism(): + # FpGroup -> PermutationGroup + F, a, b = free_group("a, b") + G = FpGroup(F, [a**3, b**3, (a*b)**2]) + + c = Permutation(3)(0, 1, 2) + d = Permutation(3)(1, 2, 3) + A = AlternatingGroup(4) + T = homomorphism(G, A, [a, b], [c, d]) + assert T(a*b**2*a**-1) == c*d**2*c**-1 + assert T.is_isomorphism() + assert T(T.invert(Permutation(3)(0, 2, 3))) == Permutation(3)(0, 2, 3) + + T = homomorphism(G, AlternatingGroup(4), G.generators) + assert T.is_trivial() + assert T.kernel().order() == G.order() + + E, e = free_group("e") + G = FpGroup(E, [e**8]) + P = PermutationGroup([Permutation(0, 1, 2, 3), Permutation(0, 2)]) + T = homomorphism(G, P, [e], [Permutation(0, 1, 2, 3)]) + assert T.image().order() == 4 + assert T(T.invert(Permutation(0, 2)(1, 3))) == Permutation(0, 2)(1, 3) + + T = homomorphism(E, AlternatingGroup(4), E.generators, [c]) + assert T.invert(c**2) == e**-1 #order(c) == 3 so c**2 == c**-1 + + # FreeGroup -> FreeGroup + T = homomorphism(F, E, [a], [e]) + assert T(a**-2*b**4*a**2).is_identity + + # FreeGroup -> FpGroup + G = FpGroup(F, [a*b*a**-1*b**-1]) + T = homomorphism(F, G, F.generators, G.generators) + assert T.invert(a**-1*b**-1*a**2) == a*b**-1 + + # PermutationGroup -> PermutationGroup + D = DihedralGroup(8) + p = Permutation(0, 1, 2, 3, 4, 5, 6, 7) + P = PermutationGroup(p) + T = homomorphism(P, D, [p], [p]) + assert T.is_injective() + assert not T.is_isomorphism() + assert T.invert(p**3) == p**3 + + T2 = homomorphism(F, P, [F.generators[0]], P.generators) + T = T.compose(T2) + assert T.domain == F + assert T.codomain == D + assert T(a*b) == p + + D3 = DihedralGroup(3) + T = homomorphism(D3, D3, D3.generators, D3.generators) + assert T.is_isomorphism() + + +def test_isomorphisms(): + + F, a, b = free_group("a, b") + E, c, d = free_group("c, d") + # Infinite groups with differently ordered relators. + G = FpGroup(F, [a**2, b**3]) + H = FpGroup(F, [b**3, a**2]) + assert is_isomorphic(G, H) + + # Trivial Case + # FpGroup -> FpGroup + H = FpGroup(F, [a**3, b**3, (a*b)**2]) + F, c, d = free_group("c, d") + G = FpGroup(F, [c**3, d**3, (c*d)**2]) + check, T = group_isomorphism(G, H) + assert check + assert T(c**3*d**2) == a**3*b**2 + + # FpGroup -> PermutationGroup + # FpGroup is converted to the equivalent isomorphic group. + F, a, b = free_group("a, b") + G = FpGroup(F, [a**3, b**3, (a*b)**2]) + H = AlternatingGroup(4) + check, T = group_isomorphism(G, H) + assert check + assert T(b*a*b**-1*a**-1*b**-1) == Permutation(0, 2, 3) + assert T(b*a*b*a**-1*b**-1) == Permutation(0, 3, 2) + + # PermutationGroup -> PermutationGroup + D = DihedralGroup(8) + p = Permutation(0, 1, 2, 3, 4, 5, 6, 7) + P = PermutationGroup(p) + assert not is_isomorphic(D, P) + + A = CyclicGroup(5) + B = CyclicGroup(7) + assert not is_isomorphic(A, B) + + # Two groups of the same prime order are isomorphic to each other. + G = FpGroup(F, [a, b**5]) + H = CyclicGroup(5) + assert G.order() == H.order() + assert is_isomorphic(G, H) + + +def test_check_homomorphism(): + a = Permutation(1,2,3,4) + b = Permutation(1,3) + G = PermutationGroup([a, b]) + raises(ValueError, lambda: homomorphism(G, G, [a], [a])) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_named_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_named_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..59bcb6ef3f020335de76d7a72152a0b58cbc6976 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_named_groups.py @@ -0,0 +1,70 @@ +from sympy.combinatorics.named_groups import (SymmetricGroup, CyclicGroup, + DihedralGroup, AlternatingGroup, + AbelianGroup, RubikGroup) +from sympy.testing.pytest import raises + + +def test_SymmetricGroup(): + G = SymmetricGroup(5) + elements = list(G.generate()) + assert (G.generators[0]).size == 5 + assert len(elements) == 120 + assert G.is_solvable is False + assert G.is_abelian is False + assert G.is_nilpotent is False + assert G.is_transitive() is True + H = SymmetricGroup(1) + assert H.order() == 1 + L = SymmetricGroup(2) + assert L.order() == 2 + + +def test_CyclicGroup(): + G = CyclicGroup(10) + elements = list(G.generate()) + assert len(elements) == 10 + assert (G.derived_subgroup()).order() == 1 + assert G.is_abelian is True + assert G.is_solvable is True + assert G.is_nilpotent is True + H = CyclicGroup(1) + assert H.order() == 1 + L = CyclicGroup(2) + assert L.order() == 2 + + +def test_DihedralGroup(): + G = DihedralGroup(6) + elements = list(G.generate()) + assert len(elements) == 12 + assert G.is_transitive() is True + assert G.is_abelian is False + assert G.is_solvable is True + assert G.is_nilpotent is False + H = DihedralGroup(1) + assert H.order() == 2 + L = DihedralGroup(2) + assert L.order() == 4 + assert L.is_abelian is True + assert L.is_nilpotent is True + + +def test_AlternatingGroup(): + G = AlternatingGroup(5) + elements = list(G.generate()) + assert len(elements) == 60 + assert [perm.is_even for perm in elements] == [True]*60 + H = AlternatingGroup(1) + assert H.order() == 1 + L = AlternatingGroup(2) + assert L.order() == 1 + + +def test_AbelianGroup(): + A = AbelianGroup(3, 3, 3) + assert A.order() == 27 + assert A.is_abelian is True + + +def test_RubikGroup(): + raises(ValueError, lambda: RubikGroup(1)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_partitions.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_partitions.py new file mode 100644 index 0000000000000000000000000000000000000000..32e70e53a53aadbb17c8292bbef8f52d1144d6e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_partitions.py @@ -0,0 +1,118 @@ +from sympy.core.sorting import ordered, default_sort_key +from sympy.combinatorics.partitions import (Partition, IntegerPartition, + RGS_enum, RGS_unrank, RGS_rank, + random_integer_partition) +from sympy.testing.pytest import raises +from sympy.utilities.iterables import partitions +from sympy.sets.sets import Set, FiniteSet + + +def test_partition_constructor(): + raises(ValueError, lambda: Partition([1, 1, 2])) + raises(ValueError, lambda: Partition([1, 2, 3], [2, 3, 4])) + raises(ValueError, lambda: Partition(1, 2, 3)) + raises(ValueError, lambda: Partition(*list(range(3)))) + + assert Partition([1, 2, 3], [4, 5]) == Partition([4, 5], [1, 2, 3]) + assert Partition({1, 2, 3}, {4, 5}) == Partition([1, 2, 3], [4, 5]) + + a = FiniteSet(1, 2, 3) + b = FiniteSet(4, 5) + assert Partition(a, b) == Partition([1, 2, 3], [4, 5]) + assert Partition({a, b}) == Partition(FiniteSet(a, b)) + assert Partition({a, b}) != Partition(a, b) + +def test_partition(): + from sympy.abc import x + + a = Partition([1, 2, 3], [4]) + b = Partition([1, 2], [3, 4]) + c = Partition([x]) + l = [a, b, c] + l.sort(key=default_sort_key) + assert l == [c, a, b] + l.sort(key=lambda w: default_sort_key(w, order='rev-lex')) + assert l == [c, a, b] + + assert (a == b) is False + assert a <= b + assert (a > b) is False + assert a != b + assert a < b + + assert (a + 2).partition == [[1, 2], [3, 4]] + assert (b - 1).partition == [[1, 2, 4], [3]] + + assert (a - 1).partition == [[1, 2, 3, 4]] + assert (a + 1).partition == [[1, 2, 4], [3]] + assert (b + 1).partition == [[1, 2], [3], [4]] + + assert a.rank == 1 + assert b.rank == 3 + + assert a.RGS == (0, 0, 0, 1) + assert b.RGS == (0, 0, 1, 1) + + +def test_integer_partition(): + # no zeros in partition + raises(ValueError, lambda: IntegerPartition(list(range(3)))) + # check fails since 1 + 2 != 100 + raises(ValueError, lambda: IntegerPartition(100, list(range(1, 3)))) + a = IntegerPartition(8, [1, 3, 4]) + b = a.next_lex() + c = IntegerPartition([1, 3, 4]) + d = IntegerPartition(8, {1: 3, 3: 1, 2: 1}) + assert a == c + assert a.integer == d.integer + assert a.conjugate == [3, 2, 2, 1] + assert (a == b) is False + assert a <= b + assert (a > b) is False + assert a != b + + for i in range(1, 11): + next = set() + prev = set() + a = IntegerPartition([i]) + ans = {IntegerPartition(p) for p in partitions(i)} + n = len(ans) + for j in range(n): + next.add(a) + a = a.next_lex() + IntegerPartition(i, a.partition) # check it by giving i + for j in range(n): + prev.add(a) + a = a.prev_lex() + IntegerPartition(i, a.partition) # check it by giving i + assert next == ans + assert prev == ans + + assert IntegerPartition([1, 2, 3]).as_ferrers() == '###\n##\n#' + assert IntegerPartition([1, 1, 3]).as_ferrers('o') == 'ooo\no\no' + assert str(IntegerPartition([1, 1, 3])) == '[3, 1, 1]' + assert IntegerPartition([1, 1, 3]).partition == [3, 1, 1] + + raises(ValueError, lambda: random_integer_partition(-1)) + assert random_integer_partition(1) == [1] + assert random_integer_partition(10, seed=[1, 3, 2, 1, 5, 1] + ) == [5, 2, 1, 1, 1] + + +def test_rgs(): + raises(ValueError, lambda: RGS_unrank(-1, 3)) + raises(ValueError, lambda: RGS_unrank(3, 0)) + raises(ValueError, lambda: RGS_unrank(10, 1)) + + raises(ValueError, lambda: Partition.from_rgs(list(range(3)), list(range(2)))) + raises(ValueError, lambda: Partition.from_rgs(list(range(1, 3)), list(range(2)))) + assert RGS_enum(-1) == 0 + assert RGS_enum(1) == 1 + assert RGS_unrank(7, 5) == [0, 0, 1, 0, 2] + assert RGS_unrank(23, 14) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 2] + assert RGS_rank(RGS_unrank(40, 100)) == 40 + +def test_ordered_partition_9608(): + a = Partition([1, 2, 3], [4]) + b = Partition([1, 2], [3, 4]) + assert list(ordered([a,b], Set._infimum_key)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_perm_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_perm_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..41be445fa806167687e47bdae8a1883ea30e9f0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_perm_groups.py @@ -0,0 +1,1243 @@ +from sympy.core.containers import Tuple +from sympy.combinatorics.generators import rubik_cube_generators +from sympy.combinatorics.homomorphisms import is_isomorphic +from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup,\ + DihedralGroup, AlternatingGroup, AbelianGroup, RubikGroup +from sympy.combinatorics.perm_groups import (PermutationGroup, + _orbit_transversal, Coset, SymmetricPermutationGroup) +from sympy.combinatorics.permutations import Permutation +from sympy.combinatorics.polyhedron import tetrahedron as Tetra, cube +from sympy.combinatorics.testutil import _verify_bsgs, _verify_centralizer,\ + _verify_normal_closure +from sympy.testing.pytest import skip, XFAIL, slow + +rmul = Permutation.rmul + + +def test_has(): + a = Permutation([1, 0]) + G = PermutationGroup([a]) + assert G.is_abelian + a = Permutation([2, 0, 1]) + b = Permutation([2, 1, 0]) + G = PermutationGroup([a, b]) + assert not G.is_abelian + + G = PermutationGroup([a]) + assert G.has(a) + assert not G.has(b) + + a = Permutation([2, 0, 1, 3, 4, 5]) + b = Permutation([0, 2, 1, 3, 4]) + assert PermutationGroup(a, b).degree == \ + PermutationGroup(a, b).degree == 6 + + g = PermutationGroup(Permutation(0, 2, 1)) + assert Tuple(1, g).has(g) + + +def test_generate(): + a = Permutation([1, 0]) + g = list(PermutationGroup([a]).generate()) + assert g == [Permutation([0, 1]), Permutation([1, 0])] + assert len(list(PermutationGroup(Permutation((0, 1))).generate())) == 1 + g = PermutationGroup([a]).generate(method='dimino') + assert list(g) == [Permutation([0, 1]), Permutation([1, 0])] + a = Permutation([2, 0, 1]) + b = Permutation([2, 1, 0]) + G = PermutationGroup([a, b]) + g = G.generate() + v1 = [p.array_form for p in list(g)] + v1.sort() + assert v1 == [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, + 1], [2, 1, 0]] + v2 = list(G.generate(method='dimino', af=True)) + assert v1 == sorted(v2) + a = Permutation([2, 0, 1, 3, 4, 5]) + b = Permutation([2, 1, 3, 4, 5, 0]) + g = PermutationGroup([a, b]).generate(af=True) + assert len(list(g)) == 360 + + +def test_order(): + a = Permutation([2, 0, 1, 3, 4, 5, 6, 7, 8, 9]) + b = Permutation([2, 1, 3, 4, 5, 6, 7, 8, 9, 0]) + g = PermutationGroup([a, b]) + assert g.order() == 1814400 + assert PermutationGroup().order() == 1 + + +def test_equality(): + p_1 = Permutation(0, 1, 3) + p_2 = Permutation(0, 2, 3) + p_3 = Permutation(0, 1, 2) + p_4 = Permutation(0, 1, 3) + g_1 = PermutationGroup(p_1, p_2) + g_2 = PermutationGroup(p_3, p_4) + g_3 = PermutationGroup(p_2, p_1) + g_4 = PermutationGroup(p_1, p_2) + + assert g_1 != g_2 + assert g_1.generators != g_2.generators + assert g_1.equals(g_2) + assert g_1 != g_3 + assert g_1.equals(g_3) + assert g_1 == g_4 + + +def test_stabilizer(): + S = SymmetricGroup(2) + H = S.stabilizer(0) + assert H.generators == [Permutation(1)] + a = Permutation([2, 0, 1, 3, 4, 5]) + b = Permutation([2, 1, 3, 4, 5, 0]) + G = PermutationGroup([a, b]) + G0 = G.stabilizer(0) + assert G0.order() == 60 + + gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]] + gens = [Permutation(p) for p in gens_cube] + G = PermutationGroup(gens) + G2 = G.stabilizer(2) + assert G2.order() == 6 + G2_1 = G2.stabilizer(1) + v = list(G2_1.generate(af=True)) + assert v == [[0, 1, 2, 3, 4, 5, 6, 7], [3, 1, 2, 0, 7, 5, 6, 4]] + + gens = ( + (1, 2, 0, 4, 5, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), + (0, 1, 2, 3, 4, 5, 19, 6, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 7, 17, 18), + (0, 1, 2, 3, 4, 5, 6, 7, 9, 18, 16, 11, 12, 13, 14, 15, 8, 17, 10, 19)) + gens = [Permutation(p) for p in gens] + G = PermutationGroup(gens) + G2 = G.stabilizer(2) + assert G2.order() == 181440 + S = SymmetricGroup(3) + assert [G.order() for G in S.basic_stabilizers] == [6, 2] + + +def test_center(): + # the center of the dihedral group D_n is of order 2 for even n + for i in (4, 6, 10): + D = DihedralGroup(i) + assert (D.center()).order() == 2 + # the center of the dihedral group D_n is of order 1 for odd n>2 + for i in (3, 5, 7): + D = DihedralGroup(i) + assert (D.center()).order() == 1 + # the center of an abelian group is the group itself + for i in (2, 3, 5): + for j in (1, 5, 7): + for k in (1, 1, 11): + G = AbelianGroup(i, j, k) + assert G.center().is_subgroup(G) + # the center of a nonabelian simple group is trivial + for i in(1, 5, 9): + A = AlternatingGroup(i) + assert (A.center()).order() == 1 + # brute-force verifications + D = DihedralGroup(5) + A = AlternatingGroup(3) + C = CyclicGroup(4) + G.is_subgroup(D*A*C) + assert _verify_centralizer(G, G) + + +def test_centralizer(): + # the centralizer of the trivial group is the entire group + S = SymmetricGroup(2) + assert S.centralizer(Permutation(list(range(2)))).is_subgroup(S) + A = AlternatingGroup(5) + assert A.centralizer(Permutation(list(range(5)))).is_subgroup(A) + # a centralizer in the trivial group is the trivial group itself + triv = PermutationGroup([Permutation([0, 1, 2, 3])]) + D = DihedralGroup(4) + assert triv.centralizer(D).is_subgroup(triv) + # brute-force verifications for centralizers of groups + for i in (4, 5, 6): + S = SymmetricGroup(i) + A = AlternatingGroup(i) + C = CyclicGroup(i) + D = DihedralGroup(i) + for gp in (S, A, C, D): + for gp2 in (S, A, C, D): + if not gp2.is_subgroup(gp): + assert _verify_centralizer(gp, gp2) + # verify the centralizer for all elements of several groups + S = SymmetricGroup(5) + elements = list(S.generate_dimino()) + for element in elements: + assert _verify_centralizer(S, element) + A = AlternatingGroup(5) + elements = list(A.generate_dimino()) + for element in elements: + assert _verify_centralizer(A, element) + D = DihedralGroup(7) + elements = list(D.generate_dimino()) + for element in elements: + assert _verify_centralizer(D, element) + # verify centralizers of small groups within small groups + small = [] + for i in (1, 2, 3): + small.append(SymmetricGroup(i)) + small.append(AlternatingGroup(i)) + small.append(DihedralGroup(i)) + small.append(CyclicGroup(i)) + for gp in small: + for gp2 in small: + if gp.degree == gp2.degree: + assert _verify_centralizer(gp, gp2) + + +def test_coset_rank(): + gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]] + gens = [Permutation(p) for p in gens_cube] + G = PermutationGroup(gens) + i = 0 + for h in G.generate(af=True): + rk = G.coset_rank(h) + assert rk == i + h1 = G.coset_unrank(rk, af=True) + assert h == h1 + i += 1 + assert G.coset_unrank(48) is None + assert G.coset_unrank(G.coset_rank(gens[0])) == gens[0] + + +def test_coset_factor(): + a = Permutation([0, 2, 1]) + G = PermutationGroup([a]) + c = Permutation([2, 1, 0]) + assert not G.coset_factor(c) + assert G.coset_rank(c) is None + + a = Permutation([2, 0, 1, 3, 4, 5]) + b = Permutation([2, 1, 3, 4, 5, 0]) + g = PermutationGroup([a, b]) + assert g.order() == 360 + d = Permutation([1, 0, 2, 3, 4, 5]) + assert not g.coset_factor(d.array_form) + assert not g.contains(d) + assert Permutation(2) in G + c = Permutation([1, 0, 2, 3, 5, 4]) + v = g.coset_factor(c, True) + tr = g.basic_transversals + p = Permutation.rmul(*[tr[i][v[i]] for i in range(len(g.base))]) + assert p == c + v = g.coset_factor(c) + p = Permutation.rmul(*v) + assert p == c + assert g.contains(c) + G = PermutationGroup([Permutation([2, 1, 0])]) + p = Permutation([1, 0, 2]) + assert G.coset_factor(p) == [] + + +def test_orbits(): + a = Permutation([2, 0, 1]) + b = Permutation([2, 1, 0]) + g = PermutationGroup([a, b]) + assert g.orbit(0) == {0, 1, 2} + assert g.orbits() == [{0, 1, 2}] + assert g.is_transitive() and g.is_transitive(strict=False) + assert g.orbit_transversal(0) == \ + [Permutation( + [0, 1, 2]), Permutation([2, 0, 1]), Permutation([1, 2, 0])] + assert g.orbit_transversal(0, True) == \ + [(0, Permutation([0, 1, 2])), (2, Permutation([2, 0, 1])), + (1, Permutation([1, 2, 0]))] + + G = DihedralGroup(6) + transversal, slps = _orbit_transversal(G.degree, G.generators, 0, True, slp=True) + for i, t in transversal: + slp = slps[i] + w = G.identity + for s in slp: + w = G.generators[s]*w + assert w == t + + a = Permutation(list(range(1, 100)) + [0]) + G = PermutationGroup([a]) + assert [min(o) for o in G.orbits()] == [0] + G = PermutationGroup(rubik_cube_generators()) + assert [min(o) for o in G.orbits()] == [0, 1] + assert not G.is_transitive() and not G.is_transitive(strict=False) + G = PermutationGroup([Permutation(0, 1, 3), Permutation(3)(0, 1)]) + assert not G.is_transitive() and G.is_transitive(strict=False) + assert PermutationGroup( + Permutation(3)).is_transitive(strict=False) is False + + +def test_is_normal(): + gens_s5 = [Permutation(p) for p in [[1, 2, 3, 4, 0], [2, 1, 4, 0, 3]]] + G1 = PermutationGroup(gens_s5) + assert G1.order() == 120 + gens_a5 = [Permutation(p) for p in [[1, 0, 3, 2, 4], [2, 1, 4, 3, 0]]] + G2 = PermutationGroup(gens_a5) + assert G2.order() == 60 + assert G2.is_normal(G1) + gens3 = [Permutation(p) for p in [[2, 1, 3, 0, 4], [1, 2, 0, 3, 4]]] + G3 = PermutationGroup(gens3) + assert not G3.is_normal(G1) + assert G3.order() == 12 + G4 = G1.normal_closure(G3.generators) + assert G4.order() == 60 + gens5 = [Permutation(p) for p in [[1, 2, 3, 0, 4], [1, 2, 0, 3, 4]]] + G5 = PermutationGroup(gens5) + assert G5.order() == 24 + G6 = G1.normal_closure(G5.generators) + assert G6.order() == 120 + assert G1.is_subgroup(G6) + assert not G1.is_subgroup(G4) + assert G2.is_subgroup(G4) + I5 = PermutationGroup(Permutation(4)) + assert I5.is_normal(G5) + assert I5.is_normal(G6, strict=False) + p1 = Permutation([1, 0, 2, 3, 4]) + p2 = Permutation([0, 1, 2, 4, 3]) + p3 = Permutation([3, 4, 2, 1, 0]) + id_ = Permutation([0, 1, 2, 3, 4]) + H = PermutationGroup([p1, p3]) + H_n1 = PermutationGroup([p1, p2]) + H_n2_1 = PermutationGroup(p1) + H_n2_2 = PermutationGroup(p2) + H_id = PermutationGroup(id_) + assert H_n1.is_normal(H) + assert H_n2_1.is_normal(H_n1) + assert H_n2_2.is_normal(H_n1) + assert H_id.is_normal(H_n2_1) + assert H_id.is_normal(H_n1) + assert H_id.is_normal(H) + assert not H_n2_1.is_normal(H) + assert not H_n2_2.is_normal(H) + + +def test_eq(): + a = [[1, 2, 0, 3, 4, 5], [1, 0, 2, 3, 4, 5], [2, 1, 0, 3, 4, 5], [ + 1, 2, 0, 3, 4, 5]] + a = [Permutation(p) for p in a + [[1, 2, 3, 4, 5, 0]]] + g = Permutation([1, 2, 3, 4, 5, 0]) + G1, G2, G3 = [PermutationGroup(x) for x in [a[:2], a[2:4], [g, g**2]]] + assert G1.order() == G2.order() == G3.order() == 6 + assert G1.is_subgroup(G2) + assert not G1.is_subgroup(G3) + G4 = PermutationGroup([Permutation([0, 1])]) + assert not G1.is_subgroup(G4) + assert G4.is_subgroup(G1, 0) + assert PermutationGroup(g, g).is_subgroup(PermutationGroup(g)) + assert SymmetricGroup(3).is_subgroup(SymmetricGroup(4), 0) + assert SymmetricGroup(3).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0) + assert not CyclicGroup(5).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0) + assert CyclicGroup(3).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0) + + +def test_derived_subgroup(): + a = Permutation([1, 0, 2, 4, 3]) + b = Permutation([0, 1, 3, 2, 4]) + G = PermutationGroup([a, b]) + C = G.derived_subgroup() + assert C.order() == 3 + assert C.is_normal(G) + assert C.is_subgroup(G, 0) + assert not G.is_subgroup(C, 0) + gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]] + gens = [Permutation(p) for p in gens_cube] + G = PermutationGroup(gens) + C = G.derived_subgroup() + assert C.order() == 12 + + +def test_is_solvable(): + a = Permutation([1, 2, 0]) + b = Permutation([1, 0, 2]) + G = PermutationGroup([a, b]) + assert G.is_solvable + G = PermutationGroup([a]) + assert G.is_solvable + a = Permutation([1, 2, 3, 4, 0]) + b = Permutation([1, 0, 2, 3, 4]) + G = PermutationGroup([a, b]) + assert not G.is_solvable + P = SymmetricGroup(10) + S = P.sylow_subgroup(3) + assert S.is_solvable + +def test_rubik1(): + gens = rubik_cube_generators() + gens1 = [gens[-1]] + [p**2 for p in gens[1:]] + G1 = PermutationGroup(gens1) + assert G1.order() == 19508428800 + gens2 = [p**2 for p in gens] + G2 = PermutationGroup(gens2) + assert G2.order() == 663552 + assert G2.is_subgroup(G1, 0) + C1 = G1.derived_subgroup() + assert C1.order() == 4877107200 + assert C1.is_subgroup(G1, 0) + assert not G2.is_subgroup(C1, 0) + + G = RubikGroup(2) + assert G.order() == 3674160 + + +@XFAIL +def test_rubik(): + skip('takes too much time') + G = PermutationGroup(rubik_cube_generators()) + assert G.order() == 43252003274489856000 + G1 = PermutationGroup(G[:3]) + assert G1.order() == 170659735142400 + assert not G1.is_normal(G) + G2 = G.normal_closure(G1.generators) + assert G2.is_subgroup(G) + + +def test_direct_product(): + C = CyclicGroup(4) + D = DihedralGroup(4) + G = C*C*C + assert G.order() == 64 + assert G.degree == 12 + assert len(G.orbits()) == 3 + assert G.is_abelian is True + H = D*C + assert H.order() == 32 + assert H.is_abelian is False + + +def test_orbit_rep(): + G = DihedralGroup(6) + assert G.orbit_rep(1, 3) in [Permutation([2, 3, 4, 5, 0, 1]), + Permutation([4, 3, 2, 1, 0, 5])] + H = CyclicGroup(4)*G + assert H.orbit_rep(1, 5) is False + + +def test_schreier_vector(): + G = CyclicGroup(50) + v = [0]*50 + v[23] = -1 + assert G.schreier_vector(23) == v + H = DihedralGroup(8) + assert H.schreier_vector(2) == [0, 1, -1, 0, 0, 1, 0, 0] + L = SymmetricGroup(4) + assert L.schreier_vector(1) == [1, -1, 0, 0] + + +def test_random_pr(): + D = DihedralGroup(6) + r = 11 + n = 3 + _random_prec_n = {} + _random_prec_n[0] = {'s': 7, 't': 3, 'x': 2, 'e': -1} + _random_prec_n[1] = {'s': 5, 't': 5, 'x': 1, 'e': -1} + _random_prec_n[2] = {'s': 3, 't': 4, 'x': 2, 'e': 1} + D._random_pr_init(r, n, _random_prec_n=_random_prec_n) + assert D._random_gens[11] == [0, 1, 2, 3, 4, 5] + _random_prec = {'s': 2, 't': 9, 'x': 1, 'e': -1} + assert D.random_pr(_random_prec=_random_prec) == \ + Permutation([0, 5, 4, 3, 2, 1]) + + +def test_is_alt_sym(): + G = DihedralGroup(10) + assert G.is_alt_sym() is False + assert G._eval_is_alt_sym_naive() is False + assert G._eval_is_alt_sym_naive(only_alt=True) is False + assert G._eval_is_alt_sym_naive(only_sym=True) is False + + S = SymmetricGroup(10) + assert S._eval_is_alt_sym_naive() is True + assert S._eval_is_alt_sym_naive(only_alt=True) is False + assert S._eval_is_alt_sym_naive(only_sym=True) is True + + N_eps = 10 + _random_prec = {'N_eps': N_eps, + 0: Permutation([[2], [1, 4], [0, 6, 7, 8, 9, 3, 5]]), + 1: Permutation([[1, 8, 7, 6, 3, 5, 2, 9], [0, 4]]), + 2: Permutation([[5, 8], [4, 7], [0, 1, 2, 3, 6, 9]]), + 3: Permutation([[3], [0, 8, 2, 7, 4, 1, 6, 9, 5]]), + 4: Permutation([[8], [4, 7, 9], [3, 6], [0, 5, 1, 2]]), + 5: Permutation([[6], [0, 2, 4, 5, 1, 8, 3, 9, 7]]), + 6: Permutation([[6, 9, 8], [4, 5], [1, 3, 7], [0, 2]]), + 7: Permutation([[4], [0, 2, 9, 1, 3, 8, 6, 5, 7]]), + 8: Permutation([[1, 5, 6, 3], [0, 2, 7, 8, 4, 9]]), + 9: Permutation([[8], [6, 7], [2, 3, 4, 5], [0, 1, 9]])} + assert S.is_alt_sym(_random_prec=_random_prec) is True + + A = AlternatingGroup(10) + assert A._eval_is_alt_sym_naive() is True + assert A._eval_is_alt_sym_naive(only_alt=True) is True + assert A._eval_is_alt_sym_naive(only_sym=True) is False + + _random_prec = {'N_eps': N_eps, + 0: Permutation([[1, 6, 4, 2, 7, 8, 5, 9, 3], [0]]), + 1: Permutation([[1], [0, 5, 8, 4, 9, 2, 3, 6, 7]]), + 2: Permutation([[1, 9, 8, 3, 2, 5], [0, 6, 7, 4]]), + 3: Permutation([[6, 8, 9], [4, 5], [1, 3, 7, 2], [0]]), + 4: Permutation([[8], [5], [4], [2, 6, 9, 3], [1], [0, 7]]), + 5: Permutation([[3, 6], [0, 8, 1, 7, 5, 9, 4, 2]]), + 6: Permutation([[5], [2, 9], [1, 8, 3], [0, 4, 7, 6]]), + 7: Permutation([[1, 8, 4, 7, 2, 3], [0, 6, 9, 5]]), + 8: Permutation([[5, 8, 7], [3], [1, 4, 2, 6], [0, 9]]), + 9: Permutation([[4, 9, 6], [3, 8], [1, 2], [0, 5, 7]])} + assert A.is_alt_sym(_random_prec=_random_prec) is False + + G = PermutationGroup( + Permutation(1, 3, size=8)(0, 2, 4, 6), + Permutation(5, 7, size=8)(0, 2, 4, 6)) + assert G.is_alt_sym() is False + + # Tests for monte-carlo c_n parameter setting, and which guarantees + # to give False. + G = DihedralGroup(10) + assert G._eval_is_alt_sym_monte_carlo() is False + G = DihedralGroup(20) + assert G._eval_is_alt_sym_monte_carlo() is False + + # A dry-running test to check if it looks up for the updated cache. + G = DihedralGroup(6) + G.is_alt_sym() + assert G.is_alt_sym() is False + + +def test_minimal_block(): + D = DihedralGroup(6) + block_system = D.minimal_block([0, 3]) + for i in range(3): + assert block_system[i] == block_system[i + 3] + S = SymmetricGroup(6) + assert S.minimal_block([0, 1]) == [0, 0, 0, 0, 0, 0] + + assert Tetra.pgroup.minimal_block([0, 1]) == [0, 0, 0, 0] + + P1 = PermutationGroup(Permutation(1, 5)(2, 4), Permutation(0, 1, 2, 3, 4, 5)) + P2 = PermutationGroup(Permutation(0, 1, 2, 3, 4, 5), Permutation(1, 5)(2, 4)) + assert P1.minimal_block([0, 2]) == [0, 1, 0, 1, 0, 1] + assert P2.minimal_block([0, 2]) == [0, 1, 0, 1, 0, 1] + + +def test_minimal_blocks(): + P = PermutationGroup(Permutation(1, 5)(2, 4), Permutation(0, 1, 2, 3, 4, 5)) + assert P.minimal_blocks() == [[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]] + + P = SymmetricGroup(5) + assert P.minimal_blocks() == [[0]*5] + + P = PermutationGroup(Permutation(0, 3)) + assert P.minimal_blocks() is False + + +def test_max_div(): + S = SymmetricGroup(10) + assert S.max_div == 5 + + +def test_is_primitive(): + S = SymmetricGroup(5) + assert S.is_primitive() is True + C = CyclicGroup(7) + assert C.is_primitive() is True + + a = Permutation(0, 1, 2, size=6) + b = Permutation(3, 4, 5, size=6) + G = PermutationGroup(a, b) + assert G.is_primitive() is False + + +def test_random_stab(): + S = SymmetricGroup(5) + _random_el = Permutation([1, 3, 2, 0, 4]) + _random_prec = {'rand': _random_el} + g = S.random_stab(2, _random_prec=_random_prec) + assert g == Permutation([1, 3, 2, 0, 4]) + h = S.random_stab(1) + assert h(1) == 1 + + +def test_transitivity_degree(): + perm = Permutation([1, 2, 0]) + C = PermutationGroup([perm]) + assert C.transitivity_degree == 1 + gen1 = Permutation([1, 2, 0, 3, 4]) + gen2 = Permutation([1, 2, 3, 4, 0]) + # alternating group of degree 5 + Alt = PermutationGroup([gen1, gen2]) + assert Alt.transitivity_degree == 3 + + +def test_schreier_sims_random(): + assert sorted(Tetra.pgroup.base) == [0, 1] + + S = SymmetricGroup(3) + base = [0, 1] + strong_gens = [Permutation([1, 2, 0]), Permutation([1, 0, 2]), + Permutation([0, 2, 1])] + assert S.schreier_sims_random(base, strong_gens, 5) == (base, strong_gens) + D = DihedralGroup(3) + _random_prec = {'g': [Permutation([2, 0, 1]), Permutation([1, 2, 0]), + Permutation([1, 0, 2])]} + base = [0, 1] + strong_gens = [Permutation([1, 2, 0]), Permutation([2, 1, 0]), + Permutation([0, 2, 1])] + assert D.schreier_sims_random([], D.generators, 2, + _random_prec=_random_prec) == (base, strong_gens) + + +def test_baseswap(): + S = SymmetricGroup(4) + S.schreier_sims() + base = S.base + strong_gens = S.strong_gens + assert base == [0, 1, 2] + deterministic = S.baseswap(base, strong_gens, 1, randomized=False) + randomized = S.baseswap(base, strong_gens, 1) + assert deterministic[0] == [0, 2, 1] + assert _verify_bsgs(S, deterministic[0], deterministic[1]) is True + assert randomized[0] == [0, 2, 1] + assert _verify_bsgs(S, randomized[0], randomized[1]) is True + + +def test_schreier_sims_incremental(): + identity = Permutation([0, 1, 2, 3, 4]) + TrivialGroup = PermutationGroup([identity]) + base, strong_gens = TrivialGroup.schreier_sims_incremental(base=[0, 1, 2]) + assert _verify_bsgs(TrivialGroup, base, strong_gens) is True + S = SymmetricGroup(5) + base, strong_gens = S.schreier_sims_incremental(base=[0, 1, 2]) + assert _verify_bsgs(S, base, strong_gens) is True + D = DihedralGroup(2) + base, strong_gens = D.schreier_sims_incremental(base=[1]) + assert _verify_bsgs(D, base, strong_gens) is True + A = AlternatingGroup(7) + gens = A.generators[:] + gen0 = gens[0] + gen1 = gens[1] + gen1 = rmul(gen1, ~gen0) + gen0 = rmul(gen0, gen1) + gen1 = rmul(gen0, gen1) + base, strong_gens = A.schreier_sims_incremental(base=[0, 1], gens=gens) + assert _verify_bsgs(A, base, strong_gens) is True + C = CyclicGroup(11) + gen = C.generators[0] + base, strong_gens = C.schreier_sims_incremental(gens=[gen**3]) + assert _verify_bsgs(C, base, strong_gens) is True + + +def _subgroup_search(i, j, k): + prop_true = lambda x: True + prop_fix_points = lambda x: [x(point) for point in points] == points + prop_comm_g = lambda x: rmul(x, g) == rmul(g, x) + prop_even = lambda x: x.is_even + for i in range(i, j, k): + S = SymmetricGroup(i) + A = AlternatingGroup(i) + C = CyclicGroup(i) + Sym = S.subgroup_search(prop_true) + assert Sym.is_subgroup(S) + Alt = S.subgroup_search(prop_even) + assert Alt.is_subgroup(A) + Sym = S.subgroup_search(prop_true, init_subgroup=C) + assert Sym.is_subgroup(S) + points = [7] + assert S.stabilizer(7).is_subgroup(S.subgroup_search(prop_fix_points)) + points = [3, 4] + assert S.stabilizer(3).stabilizer(4).is_subgroup( + S.subgroup_search(prop_fix_points)) + points = [3, 5] + fix35 = A.subgroup_search(prop_fix_points) + points = [5] + fix5 = A.subgroup_search(prop_fix_points) + assert A.subgroup_search(prop_fix_points, init_subgroup=fix35 + ).is_subgroup(fix5) + base, strong_gens = A.schreier_sims_incremental() + g = A.generators[0] + comm_g = \ + A.subgroup_search(prop_comm_g, base=base, strong_gens=strong_gens) + assert _verify_bsgs(comm_g, base, comm_g.generators) is True + assert [prop_comm_g(gen) is True for gen in comm_g.generators] + + +def test_subgroup_search(): + _subgroup_search(10, 15, 2) + + +@XFAIL +def test_subgroup_search2(): + skip('takes too much time') + _subgroup_search(16, 17, 1) + + +def test_normal_closure(): + # the normal closure of the trivial group is trivial + S = SymmetricGroup(3) + identity = Permutation([0, 1, 2]) + closure = S.normal_closure(identity) + assert closure.is_trivial + # the normal closure of the entire group is the entire group + A = AlternatingGroup(4) + assert A.normal_closure(A).is_subgroup(A) + # brute-force verifications for subgroups + for i in (3, 4, 5): + S = SymmetricGroup(i) + A = AlternatingGroup(i) + D = DihedralGroup(i) + C = CyclicGroup(i) + for gp in (A, D, C): + assert _verify_normal_closure(S, gp) + # brute-force verifications for all elements of a group + S = SymmetricGroup(5) + elements = list(S.generate_dimino()) + for element in elements: + assert _verify_normal_closure(S, element) + # small groups + small = [] + for i in (1, 2, 3): + small.append(SymmetricGroup(i)) + small.append(AlternatingGroup(i)) + small.append(DihedralGroup(i)) + small.append(CyclicGroup(i)) + for gp in small: + for gp2 in small: + if gp2.is_subgroup(gp, 0) and gp2.degree == gp.degree: + assert _verify_normal_closure(gp, gp2) + + +def test_derived_series(): + # the derived series of the trivial group consists only of the trivial group + triv = PermutationGroup([Permutation([0, 1, 2])]) + assert triv.derived_series()[0].is_subgroup(triv) + # the derived series for a simple group consists only of the group itself + for i in (5, 6, 7): + A = AlternatingGroup(i) + assert A.derived_series()[0].is_subgroup(A) + # the derived series for S_4 is S_4 > A_4 > K_4 > triv + S = SymmetricGroup(4) + series = S.derived_series() + assert series[1].is_subgroup(AlternatingGroup(4)) + assert series[2].is_subgroup(DihedralGroup(2)) + assert series[3].is_trivial + + +def test_lower_central_series(): + # the lower central series of the trivial group consists of the trivial + # group + triv = PermutationGroup([Permutation([0, 1, 2])]) + assert triv.lower_central_series()[0].is_subgroup(triv) + # the lower central series of a simple group consists of the group itself + for i in (5, 6, 7): + A = AlternatingGroup(i) + assert A.lower_central_series()[0].is_subgroup(A) + # GAP-verified example + S = SymmetricGroup(6) + series = S.lower_central_series() + assert len(series) == 2 + assert series[1].is_subgroup(AlternatingGroup(6)) + + +def test_commutator(): + # the commutator of the trivial group and the trivial group is trivial + S = SymmetricGroup(3) + triv = PermutationGroup([Permutation([0, 1, 2])]) + assert S.commutator(triv, triv).is_subgroup(triv) + # the commutator of the trivial group and any other group is again trivial + A = AlternatingGroup(3) + assert S.commutator(triv, A).is_subgroup(triv) + # the commutator is commutative + for i in (3, 4, 5): + S = SymmetricGroup(i) + A = AlternatingGroup(i) + D = DihedralGroup(i) + assert S.commutator(A, D).is_subgroup(S.commutator(D, A)) + # the commutator of an abelian group is trivial + S = SymmetricGroup(7) + A1 = AbelianGroup(2, 5) + A2 = AbelianGroup(3, 4) + triv = PermutationGroup([Permutation([0, 1, 2, 3, 4, 5, 6])]) + assert S.commutator(A1, A1).is_subgroup(triv) + assert S.commutator(A2, A2).is_subgroup(triv) + # examples calculated by hand + S = SymmetricGroup(3) + A = AlternatingGroup(3) + assert S.commutator(A, S).is_subgroup(A) + + +def test_is_nilpotent(): + # every abelian group is nilpotent + for i in (1, 2, 3): + C = CyclicGroup(i) + Ab = AbelianGroup(i, i + 2) + assert C.is_nilpotent + assert Ab.is_nilpotent + Ab = AbelianGroup(5, 7, 10) + assert Ab.is_nilpotent + # A_5 is not solvable and thus not nilpotent + assert AlternatingGroup(5).is_nilpotent is False + + +def test_is_trivial(): + for i in range(5): + triv = PermutationGroup([Permutation(list(range(i)))]) + assert triv.is_trivial + + +def test_pointwise_stabilizer(): + S = SymmetricGroup(2) + stab = S.pointwise_stabilizer([0]) + assert stab.generators == [Permutation(1)] + S = SymmetricGroup(5) + points = [] + stab = S + for point in (2, 0, 3, 4, 1): + stab = stab.stabilizer(point) + points.append(point) + assert S.pointwise_stabilizer(points).is_subgroup(stab) + + +def test_make_perm(): + assert cube.pgroup.make_perm(5, seed=list(range(5))) == \ + Permutation([4, 7, 6, 5, 0, 3, 2, 1]) + assert cube.pgroup.make_perm(7, seed=list(range(7))) == \ + Permutation([6, 7, 3, 2, 5, 4, 0, 1]) + + +def test_elements(): + from sympy.sets.sets import FiniteSet + + p = Permutation(2, 3) + assert PermutationGroup(p).elements == {Permutation(3), Permutation(2, 3)} + assert FiniteSet(*PermutationGroup(p).elements) \ + == FiniteSet(Permutation(2, 3), Permutation(3)) + + +def test_is_group(): + assert PermutationGroup(Permutation(1,2), Permutation(2,4)).is_group is True + assert SymmetricGroup(4).is_group is True + + +def test_PermutationGroup(): + assert PermutationGroup() == PermutationGroup(Permutation()) + assert (PermutationGroup() == 0) is False + + +def test_coset_transvesal(): + G = AlternatingGroup(5) + H = PermutationGroup(Permutation(0,1,2),Permutation(1,2)(3,4)) + assert G.coset_transversal(H) == \ + [Permutation(4), Permutation(2, 3, 4), Permutation(2, 4, 3), + Permutation(1, 2, 4), Permutation(4)(1, 2, 3), Permutation(1, 3)(2, 4), + Permutation(0, 1, 2, 3, 4), Permutation(0, 1, 2, 4, 3), + Permutation(0, 1, 3, 2, 4), Permutation(0, 2, 4, 1, 3)] + + +def test_coset_table(): + G = PermutationGroup(Permutation(0,1,2,3), Permutation(0,1,2), + Permutation(0,4,2,7), Permutation(5,6), Permutation(0,7)); + H = PermutationGroup(Permutation(0,1,2,3), Permutation(0,7)) + assert G.coset_table(H) == \ + [[0, 0, 0, 0, 1, 2, 3, 3, 0, 0], [4, 5, 2, 5, 6, 0, 7, 7, 1, 1], + [5, 4, 5, 1, 0, 6, 8, 8, 6, 6], [3, 3, 3, 3, 7, 8, 0, 0, 3, 3], + [2, 1, 4, 4, 4, 4, 9, 9, 4, 4], [1, 2, 1, 2, 5, 5, 10, 10, 5, 5], + [6, 6, 6, 6, 2, 1, 11, 11, 2, 2], [9, 10, 8, 10, 11, 3, 1, 1, 7, 7], + [10, 9, 10, 7, 3, 11, 2, 2, 11, 11], [8, 7, 9, 9, 9, 9, 4, 4, 9, 9], + [7, 8, 7, 8, 10, 10, 5, 5, 10, 10], [11, 11, 11, 11, 8, 7, 6, 6, 8, 8]] + + +def test_subgroup(): + G = PermutationGroup(Permutation(0,1,2), Permutation(0,2,3)) + H = G.subgroup([Permutation(0,1,3)]) + assert H.is_subgroup(G) + + +def test_generator_product(): + G = SymmetricGroup(5) + p = Permutation(0, 2, 3)(1, 4) + gens = G.generator_product(p) + assert all(g in G.strong_gens for g in gens) + w = G.identity + for g in gens: + w = g*w + assert w == p + + +def test_sylow_subgroup(): + P = PermutationGroup(Permutation(1, 5)(2, 4), Permutation(0, 1, 2, 3, 4, 5)) + S = P.sylow_subgroup(2) + assert S.order() == 4 + + P = DihedralGroup(12) + S = P.sylow_subgroup(3) + assert S.order() == 3 + + P = PermutationGroup( + Permutation(1, 5)(2, 4), Permutation(0, 1, 2, 3, 4, 5), Permutation(0, 2)) + S = P.sylow_subgroup(3) + assert S.order() == 9 + S = P.sylow_subgroup(2) + assert S.order() == 8 + + P = SymmetricGroup(10) + S = P.sylow_subgroup(2) + assert S.order() == 256 + S = P.sylow_subgroup(3) + assert S.order() == 81 + S = P.sylow_subgroup(5) + assert S.order() == 25 + + # the length of the lower central series + # of a p-Sylow subgroup of Sym(n) grows with + # the highest exponent exp of p such + # that n >= p**exp + exp = 1 + length = 0 + for i in range(2, 9): + P = SymmetricGroup(i) + S = P.sylow_subgroup(2) + ls = S.lower_central_series() + if i // 2**exp > 0: + # length increases with exponent + assert len(ls) > length + length = len(ls) + exp += 1 + else: + assert len(ls) == length + + G = SymmetricGroup(100) + S = G.sylow_subgroup(3) + assert G.order() % S.order() == 0 + assert G.order()/S.order() % 3 > 0 + + G = AlternatingGroup(100) + S = G.sylow_subgroup(2) + assert G.order() % S.order() == 0 + assert G.order()/S.order() % 2 > 0 + + G = DihedralGroup(18) + S = G.sylow_subgroup(p=2) + assert S.order() == 4 + + G = DihedralGroup(50) + S = G.sylow_subgroup(p=2) + assert S.order() == 4 + + +@slow +def test_presentation(): + def _test(P): + G = P.presentation() + return G.order() == P.order() + + def _strong_test(P): + G = P.strong_presentation() + chk = len(G.generators) == len(P.strong_gens) + return chk and G.order() == P.order() + + P = PermutationGroup(Permutation(0,1,5,2)(3,7,4,6), Permutation(0,3,5,4)(1,6,2,7)) + assert _test(P) + + P = AlternatingGroup(5) + assert _test(P) + + P = SymmetricGroup(5) + assert _test(P) + + P = PermutationGroup( + [Permutation(0,3,1,2), Permutation(3)(0,1), Permutation(0,1)(2,3)]) + assert _strong_test(P) + + P = DihedralGroup(6) + assert _strong_test(P) + + a = Permutation(0,1)(2,3) + b = Permutation(0,2)(3,1) + c = Permutation(4,5) + P = PermutationGroup(c, a, b) + assert _strong_test(P) + + +def test_polycyclic(): + a = Permutation([0, 1, 2]) + b = Permutation([2, 1, 0]) + G = PermutationGroup([a, b]) + assert G.is_polycyclic is True + + a = Permutation([1, 2, 3, 4, 0]) + b = Permutation([1, 0, 2, 3, 4]) + G = PermutationGroup([a, b]) + assert G.is_polycyclic is False + + +def test_elementary(): + a = Permutation([1, 5, 2, 0, 3, 6, 4]) + G = PermutationGroup([a]) + assert G.is_elementary(7) is False + + a = Permutation(0, 1)(2, 3) + b = Permutation(0, 2)(3, 1) + G = PermutationGroup([a, b]) + assert G.is_elementary(2) is True + c = Permutation(4, 5, 6) + G = PermutationGroup([a, b, c]) + assert G.is_elementary(2) is False + + G = SymmetricGroup(4).sylow_subgroup(2) + assert G.is_elementary(2) is False + H = AlternatingGroup(4).sylow_subgroup(2) + assert H.is_elementary(2) is True + + +def test_perfect(): + G = AlternatingGroup(3) + assert G.is_perfect is False + G = AlternatingGroup(5) + assert G.is_perfect is True + + +def test_index(): + G = PermutationGroup(Permutation(0,1,2), Permutation(0,2,3)) + H = G.subgroup([Permutation(0,1,3)]) + assert G.index(H) == 4 + + +def test_cyclic(): + G = SymmetricGroup(2) + assert G.is_cyclic + G = AbelianGroup(3, 7) + assert G.is_cyclic + G = AbelianGroup(7, 7) + assert not G.is_cyclic + G = AlternatingGroup(3) + assert G.is_cyclic + G = AlternatingGroup(4) + assert not G.is_cyclic + + # Order less than 6 + G = PermutationGroup(Permutation(0, 1, 2), Permutation(0, 2, 1)) + assert G.is_cyclic + G = PermutationGroup( + Permutation(0, 1, 2, 3), + Permutation(0, 2)(1, 3) + ) + assert G.is_cyclic + G = PermutationGroup( + Permutation(3), + Permutation(0, 1)(2, 3), + Permutation(0, 2)(1, 3), + Permutation(0, 3)(1, 2) + ) + assert G.is_cyclic is False + + # Order 15 + G = PermutationGroup( + Permutation(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), + Permutation(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13) + ) + assert G.is_cyclic + + # Distinct prime orders + assert PermutationGroup._distinct_primes_lemma([3, 5]) is True + assert PermutationGroup._distinct_primes_lemma([5, 7]) is True + assert PermutationGroup._distinct_primes_lemma([2, 3]) is None + assert PermutationGroup._distinct_primes_lemma([3, 5, 7]) is None + assert PermutationGroup._distinct_primes_lemma([5, 7, 13]) is True + + G = PermutationGroup( + Permutation(0, 1, 2, 3), + Permutation(0, 2)(1, 3)) + assert G.is_cyclic + assert G._is_abelian + + # Non-abelian and therefore not cyclic + G = PermutationGroup(*SymmetricGroup(3).generators) + assert G.is_cyclic is False + + # Abelian and cyclic + G = PermutationGroup( + Permutation(0, 1, 2, 3), + Permutation(4, 5, 6) + ) + assert G.is_cyclic + + # Abelian but not cyclic + G = PermutationGroup( + Permutation(0, 1), + Permutation(2, 3), + Permutation(4, 5, 6) + ) + assert G.is_cyclic is False + + +def test_dihedral(): + G = SymmetricGroup(2) + assert G.is_dihedral + G = SymmetricGroup(3) + assert G.is_dihedral + + G = AbelianGroup(2, 2) + assert G.is_dihedral + G = CyclicGroup(4) + assert not G.is_dihedral + + G = AbelianGroup(3, 5) + assert not G.is_dihedral + G = AbelianGroup(2) + assert G.is_dihedral + G = AbelianGroup(6) + assert not G.is_dihedral + + # D6, generated by two adjacent flips + G = PermutationGroup( + Permutation(1, 5)(2, 4), + Permutation(0, 1)(3, 4)(2, 5)) + assert G.is_dihedral + + # D7, generated by a flip and a rotation + G = PermutationGroup( + Permutation(1, 6)(2, 5)(3, 4), + Permutation(0, 1, 2, 3, 4, 5, 6)) + assert G.is_dihedral + + # S4, presented by three generators, fails due to having exactly 9 + # elements of order 2: + G = PermutationGroup( + Permutation(0, 1), Permutation(0, 2), + Permutation(0, 3)) + assert not G.is_dihedral + + # D7, given by three generators + G = PermutationGroup( + Permutation(1, 6)(2, 5)(3, 4), + Permutation(2, 0)(3, 6)(4, 5), + Permutation(0, 1, 2, 3, 4, 5, 6)) + assert G.is_dihedral + + +def test_abelian_invariants(): + G = AbelianGroup(2, 3, 4) + assert G.abelian_invariants() == [2, 3, 4] + G=PermutationGroup([Permutation(1, 2, 3, 4), Permutation(1, 2), Permutation(5, 6)]) + assert G.abelian_invariants() == [2, 2] + G = AlternatingGroup(7) + assert G.abelian_invariants() == [] + G = AlternatingGroup(4) + assert G.abelian_invariants() == [3] + G = DihedralGroup(4) + assert G.abelian_invariants() == [2, 2] + + G = PermutationGroup([Permutation(1, 2, 3, 4, 5, 6, 7)]) + assert G.abelian_invariants() == [7] + G = DihedralGroup(12) + S = G.sylow_subgroup(3) + assert S.abelian_invariants() == [3] + G = PermutationGroup(Permutation(0, 1, 2), Permutation(0, 2, 3)) + assert G.abelian_invariants() == [3] + G = PermutationGroup([Permutation(0, 1), Permutation(0, 2, 4, 6)(1, 3, 5, 7)]) + assert G.abelian_invariants() == [2, 4] + G = SymmetricGroup(30) + S = G.sylow_subgroup(2) + assert S.abelian_invariants() == [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + S = G.sylow_subgroup(3) + assert S.abelian_invariants() == [3, 3, 3, 3] + S = G.sylow_subgroup(5) + assert S.abelian_invariants() == [5, 5, 5] + + +def test_composition_series(): + a = Permutation(1, 2, 3) + b = Permutation(1, 2) + G = PermutationGroup([a, b]) + comp_series = G.composition_series() + assert comp_series == G.derived_series() + # The first group in the composition series is always the group itself and + # the last group in the series is the trivial group. + S = SymmetricGroup(4) + assert S.composition_series()[0] == S + assert len(S.composition_series()) == 5 + A = AlternatingGroup(4) + assert A.composition_series()[0] == A + assert len(A.composition_series()) == 4 + + # the composition series for C_8 is C_8 > C_4 > C_2 > triv + G = CyclicGroup(8) + series = G.composition_series() + assert is_isomorphic(series[1], CyclicGroup(4)) + assert is_isomorphic(series[2], CyclicGroup(2)) + assert series[3].is_trivial + + +def test_is_symmetric(): + a = Permutation(0, 1, 2) + b = Permutation(0, 1, size=3) + assert PermutationGroup(a, b).is_symmetric is True + + a = Permutation(0, 2, 1) + b = Permutation(1, 2, size=3) + assert PermutationGroup(a, b).is_symmetric is True + + a = Permutation(0, 1, 2, 3) + b = Permutation(0, 3)(1, 2) + assert PermutationGroup(a, b).is_symmetric is False + +def test_conjugacy_class(): + S = SymmetricGroup(4) + x = Permutation(1, 2, 3) + C = {Permutation(0, 1, 2, size = 4), Permutation(0, 1, 3), + Permutation(0, 2, 1, size = 4), Permutation(0, 2, 3), + Permutation(0, 3, 1), Permutation(0, 3, 2), + Permutation(1, 2, 3), Permutation(1, 3, 2)} + assert S.conjugacy_class(x) == C + +def test_conjugacy_classes(): + S = SymmetricGroup(3) + expected = [{Permutation(size = 3)}, + {Permutation(0, 1, size = 3), Permutation(0, 2), Permutation(1, 2)}, + {Permutation(0, 1, 2), Permutation(0, 2, 1)}] + computed = S.conjugacy_classes() + + assert len(expected) == len(computed) + assert all(e in computed for e in expected) + +def test_coset_class(): + a = Permutation(1, 2) + b = Permutation(0, 1) + G = PermutationGroup([a, b]) + #Creating right coset + rht_coset = G*a + #Checking whether it is left coset or right coset + assert rht_coset.is_right_coset + assert not rht_coset.is_left_coset + #Creating list representation of coset + list_repr = rht_coset.as_list() + expected = [Permutation(0, 2), Permutation(0, 2, 1), Permutation(1, 2), + Permutation(2), Permutation(2)(0, 1), Permutation(0, 1, 2)] + for ele in list_repr: + assert ele in expected + #Creating left coset + left_coset = a*G + #Checking whether it is left coset or right coset + assert not left_coset.is_right_coset + assert left_coset.is_left_coset + #Creating list representation of Coset + list_repr = left_coset.as_list() + expected = [Permutation(2)(0, 1), Permutation(0, 1, 2), Permutation(1, 2), + Permutation(2), Permutation(0, 2), Permutation(0, 2, 1)] + for ele in list_repr: + assert ele in expected + + G = PermutationGroup(Permutation(1, 2, 3, 4), Permutation(2, 3, 4)) + H = PermutationGroup(Permutation(1, 2, 3, 4)) + g = Permutation(1, 3)(2, 4) + rht_coset = Coset(g, H, G, dir='+') + assert rht_coset.is_right_coset + list_repr = rht_coset.as_list() + expected = [Permutation(1, 2, 3, 4), Permutation(4), Permutation(1, 3)(2, 4), + Permutation(1, 4, 3, 2)] + for ele in list_repr: + assert ele in expected + +def test_symmetricpermutationgroup(): + a = SymmetricPermutationGroup(5) + assert a.degree == 5 + assert a.order() == 120 + assert a.identity() == Permutation(4) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_polyhedron.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_polyhedron.py new file mode 100644 index 0000000000000000000000000000000000000000..abf469bb560eef1f378eff4740a84b80b696035f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_polyhedron.py @@ -0,0 +1,105 @@ +from sympy.core.symbol import symbols +from sympy.sets.sets import FiniteSet +from sympy.combinatorics.polyhedron import (Polyhedron, + tetrahedron, cube as square, octahedron, dodecahedron, icosahedron, + cube_faces) +from sympy.combinatorics.permutations import Permutation +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.testing.pytest import raises + +rmul = Permutation.rmul + + +def test_polyhedron(): + raises(ValueError, lambda: Polyhedron(list('ab'), + pgroup=[Permutation([0])])) + pgroup = [Permutation([[0, 7, 2, 5], [6, 1, 4, 3]]), + Permutation([[0, 7, 1, 6], [5, 2, 4, 3]]), + Permutation([[3, 6, 0, 5], [4, 1, 7, 2]]), + Permutation([[7, 4, 5], [1, 3, 0], [2], [6]]), + Permutation([[1, 3, 2], [7, 6, 5], [4], [0]]), + Permutation([[4, 7, 6], [2, 0, 3], [1], [5]]), + Permutation([[1, 2, 0], [4, 5, 6], [3], [7]]), + Permutation([[4, 2], [0, 6], [3, 7], [1, 5]]), + Permutation([[3, 5], [7, 1], [2, 6], [0, 4]]), + Permutation([[2, 5], [1, 6], [0, 4], [3, 7]]), + Permutation([[4, 3], [7, 0], [5, 1], [6, 2]]), + Permutation([[4, 1], [0, 5], [6, 2], [7, 3]]), + Permutation([[7, 2], [3, 6], [0, 4], [1, 5]]), + Permutation([0, 1, 2, 3, 4, 5, 6, 7])] + corners = tuple(symbols('A:H')) + faces = cube_faces + cube = Polyhedron(corners, faces, pgroup) + + assert cube.edges == FiniteSet(*( + (0, 1), (6, 7), (1, 2), (5, 6), (0, 3), (2, 3), + (4, 7), (4, 5), (3, 7), (1, 5), (0, 4), (2, 6))) + + for i in range(3): # add 180 degree face rotations + cube.rotate(cube.pgroup[i]**2) + + assert cube.corners == corners + + for i in range(3, 7): # add 240 degree axial corner rotations + cube.rotate(cube.pgroup[i]**2) + + assert cube.corners == corners + cube.rotate(1) + raises(ValueError, lambda: cube.rotate(Permutation([0, 1]))) + assert cube.corners != corners + assert cube.array_form == [7, 6, 4, 5, 3, 2, 0, 1] + assert cube.cyclic_form == [[0, 7, 1, 6], [2, 4, 3, 5]] + cube.reset() + assert cube.corners == corners + + def check(h, size, rpt, target): + + assert len(h.faces) + len(h.vertices) - len(h.edges) == 2 + assert h.size == size + + got = set() + for p in h.pgroup: + # make sure it restores original + P = h.copy() + hit = P.corners + for i in range(rpt): + P.rotate(p) + if P.corners == hit: + break + else: + print('error in permutation', p.array_form) + for i in range(rpt): + P.rotate(p) + got.add(tuple(P.corners)) + c = P.corners + f = [[c[i] for i in f] for f in P.faces] + assert h.faces == Polyhedron(c, f).faces + assert len(got) == target + assert PermutationGroup([Permutation(g) for g in got]).is_group + + for h, size, rpt, target in zip( + (tetrahedron, square, octahedron, dodecahedron, icosahedron), + (4, 8, 6, 20, 12), + (3, 4, 4, 5, 5), + (12, 24, 24, 60, 60)): + check(h, size, rpt, target) + + +def test_pgroups(): + from sympy.combinatorics.polyhedron import (cube, tetrahedron_faces, + octahedron_faces, dodecahedron_faces, icosahedron_faces) + from sympy.combinatorics.polyhedron import _pgroup_calcs + (tetrahedron2, cube2, octahedron2, dodecahedron2, icosahedron2, + tetrahedron_faces2, cube_faces2, octahedron_faces2, + dodecahedron_faces2, icosahedron_faces2) = _pgroup_calcs() + + assert tetrahedron == tetrahedron2 + assert cube == cube2 + assert octahedron == octahedron2 + assert dodecahedron == dodecahedron2 + assert icosahedron == icosahedron2 + assert sorted(map(sorted, tetrahedron_faces)) == sorted(map(sorted, tetrahedron_faces2)) + assert sorted(cube_faces) == sorted(cube_faces2) + assert sorted(octahedron_faces) == sorted(octahedron_faces2) + assert sorted(dodecahedron_faces) == sorted(dodecahedron_faces2) + assert sorted(icosahedron_faces) == sorted(icosahedron_faces2) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_prufer.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_prufer.py new file mode 100644 index 0000000000000000000000000000000000000000..b077c7cf3f023a4c36d7039505e6165ab29f275a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_prufer.py @@ -0,0 +1,74 @@ +from sympy.combinatorics.prufer import Prufer +from sympy.testing.pytest import raises + + +def test_prufer(): + # number of nodes is optional + assert Prufer([[0, 1], [0, 2], [0, 3], [0, 4]], 5).nodes == 5 + assert Prufer([[0, 1], [0, 2], [0, 3], [0, 4]]).nodes == 5 + + a = Prufer([[0, 1], [0, 2], [0, 3], [0, 4]]) + assert a.rank == 0 + assert a.nodes == 5 + assert a.prufer_repr == [0, 0, 0] + + a = Prufer([[2, 4], [1, 4], [1, 3], [0, 5], [0, 4]]) + assert a.rank == 924 + assert a.nodes == 6 + assert a.tree_repr == [[2, 4], [1, 4], [1, 3], [0, 5], [0, 4]] + assert a.prufer_repr == [4, 1, 4, 0] + + assert Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) == \ + ([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7) + assert Prufer([0]*4).size == Prufer([6]*4).size == 1296 + + # accept iterables but convert to list of lists + tree = [(0, 1), (1, 5), (0, 3), (0, 2), (2, 6), (4, 7), (2, 4)] + tree_lists = [list(t) for t in tree] + assert Prufer(tree).tree_repr == tree_lists + assert sorted(Prufer(set(tree)).tree_repr) == sorted(tree_lists) + + raises(ValueError, lambda: Prufer([[1, 2], [3, 4]])) # 0 is missing + raises(ValueError, lambda: Prufer([[2, 3], [3, 4]])) # 0, 1 are missing + assert Prufer(*Prufer.edges([1, 2], [3, 4])).prufer_repr == [1, 3] + raises(ValueError, lambda: Prufer.edges( + [1, 3], [3, 4])) # a broken tree but edges doesn't care + raises(ValueError, lambda: Prufer.edges([1, 2], [5, 6])) + raises(ValueError, lambda: Prufer([[]])) + + a = Prufer([[0, 1], [0, 2], [0, 3]]) + b = a.next() + assert b.tree_repr == [[0, 2], [0, 1], [1, 3]] + assert b.rank == 1 + + +def test_round_trip(): + def doit(t, b): + e, n = Prufer.edges(*t) + t = Prufer(e, n) + a = sorted(t.tree_repr) + b = [i - 1 for i in b] + assert t.prufer_repr == b + assert sorted(Prufer(b).tree_repr) == a + assert Prufer.unrank(t.rank, n).prufer_repr == b + + doit([[1, 2]], []) + doit([[2, 1, 3]], [1]) + doit([[1, 3, 2]], [3]) + doit([[1, 2, 3]], [2]) + doit([[2, 1, 4], [1, 3]], [1, 1]) + doit([[3, 2, 1, 4]], [2, 1]) + doit([[3, 2, 1], [2, 4]], [2, 2]) + doit([[1, 3, 2, 4]], [3, 2]) + doit([[1, 4, 2, 3]], [4, 2]) + doit([[3, 1, 4, 2]], [4, 1]) + doit([[4, 2, 1, 3]], [1, 2]) + doit([[1, 2, 4, 3]], [2, 4]) + doit([[1, 3, 4, 2]], [3, 4]) + doit([[2, 4, 1], [4, 3]], [4, 4]) + doit([[1, 2, 3, 4]], [2, 3]) + doit([[2, 3, 1], [3, 4]], [3, 3]) + doit([[1, 4, 3, 2]], [4, 3]) + doit([[2, 1, 4, 3]], [1, 4]) + doit([[2, 1, 3, 4]], [1, 3]) + doit([[6, 2, 1, 4], [1, 3, 5, 8], [3, 7]], [1, 2, 1, 3, 3, 5]) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_rewriting.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_rewriting.py new file mode 100644 index 0000000000000000000000000000000000000000..97c562bd57a2cd6318fa1dcb13c6f6278c861cca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_rewriting.py @@ -0,0 +1,49 @@ +from sympy.combinatorics.fp_groups import FpGroup +from sympy.combinatorics.free_groups import free_group +from sympy.testing.pytest import raises + + +def test_rewriting(): + F, a, b = free_group("a, b") + G = FpGroup(F, [a*b*a**-1*b**-1]) + a, b = G.generators + R = G._rewriting_system + assert R.is_confluent + + assert G.reduce(b**-1*a) == a*b**-1 + assert G.reduce(b**3*a**4*b**-2*a) == a**5*b + assert G.equals(b**2*a**-1*b, b**4*a**-1*b**-1) + + assert R.reduce_using_automaton(b*a*a**2*b**-1) == a**3 + assert R.reduce_using_automaton(b**3*a**4*b**-2*a) == a**5*b + assert R.reduce_using_automaton(b**-1*a) == a*b**-1 + + G = FpGroup(F, [a**3, b**3, (a*b)**2]) + R = G._rewriting_system + R.make_confluent() + # R._is_confluent should be set to True after + # a successful run of make_confluent + assert R.is_confluent + # but also the system should actually be confluent + assert R._check_confluence() + assert G.reduce(b*a**-1*b**-1*a**3*b**4*a**-1*b**-15) == a**-1*b**-1 + # check for automaton reduction + assert R.reduce_using_automaton(b*a**-1*b**-1*a**3*b**4*a**-1*b**-15) == a**-1*b**-1 + + G = FpGroup(F, [a**2, b**3, (a*b)**4]) + R = G._rewriting_system + assert G.reduce(a**2*b**-2*a**2*b) == b**-1 + assert R.reduce_using_automaton(a**2*b**-2*a**2*b) == b**-1 + assert G.reduce(a**3*b**-2*a**2*b) == a**-1*b**-1 + assert R.reduce_using_automaton(a**3*b**-2*a**2*b) == a**-1*b**-1 + # Check after adding a rule + R.add_rule(a**2, b) + assert R.reduce_using_automaton(a**2*b**-2*a**2*b) == b**-1 + assert R.reduce_using_automaton(a**4*b**-2*a**2*b**3) == b + + R.set_max(15) + raises(RuntimeError, lambda: R.add_rule(a**-3, b)) + R.set_max(20) + R.add_rule(a**-3, b) + + assert R.add_rule(a, a) == set() diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_schur_number.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_schur_number.py new file mode 100644 index 0000000000000000000000000000000000000000..e6beb9b11fa993a99b71d89b8485050fc3575b8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_schur_number.py @@ -0,0 +1,55 @@ +from sympy.core import S, Rational +from sympy.combinatorics.schur_number import schur_partition, SchurNumber +from sympy.core.random import _randint +from sympy.testing.pytest import raises +from sympy.core.symbol import symbols + + +def _sum_free_test(subset): + """ + Checks if subset is sum-free(There are no x,y,z in the subset such that + x + y = z) + """ + for i in subset: + for j in subset: + assert (i + j in subset) is False + + +def test_schur_partition(): + raises(ValueError, lambda: schur_partition(S.Infinity)) + raises(ValueError, lambda: schur_partition(-1)) + raises(ValueError, lambda: schur_partition(0)) + assert schur_partition(2) == [[1, 2]] + + random_number_generator = _randint(1000) + for _ in range(5): + n = random_number_generator(1, 1000) + result = schur_partition(n) + t = 0 + numbers = [] + for item in result: + _sum_free_test(item) + """ + Checks if the occurrence of all numbers is exactly one + """ + t += len(item) + for l in item: + assert (l in numbers) is False + numbers.append(l) + assert n == t + + x = symbols("x") + raises(ValueError, lambda: schur_partition(x)) + +def test_schur_number(): + first_known_schur_numbers = {1: 1, 2: 4, 3: 13, 4: 44, 5: 160} + for k in first_known_schur_numbers: + assert SchurNumber(k) == first_known_schur_numbers[k] + + assert SchurNumber(S.Infinity) == S.Infinity + assert SchurNumber(0) == 0 + raises(ValueError, lambda: SchurNumber(0.5)) + + n = symbols("n") + assert SchurNumber(n).lower_bound() == 3**n/2 - Rational(1, 2) + assert SchurNumber(8).lower_bound() == 5039 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_subsets.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_subsets.py new file mode 100644 index 0000000000000000000000000000000000000000..1d50076da1c685294c2d2561dcc2a6af629eaf83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_subsets.py @@ -0,0 +1,63 @@ +from sympy.combinatorics.subsets import Subset, ksubsets +from sympy.testing.pytest import raises + + +def test_subset(): + a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) + assert a.next_binary() == Subset(['b'], ['a', 'b', 'c', 'd']) + assert a.prev_binary() == Subset(['c'], ['a', 'b', 'c', 'd']) + assert a.next_lexicographic() == Subset(['d'], ['a', 'b', 'c', 'd']) + assert a.prev_lexicographic() == Subset(['c'], ['a', 'b', 'c', 'd']) + assert a.next_gray() == Subset(['c'], ['a', 'b', 'c', 'd']) + assert a.prev_gray() == Subset(['d'], ['a', 'b', 'c', 'd']) + assert a.rank_binary == 3 + assert a.rank_lexicographic == 14 + assert a.rank_gray == 2 + assert a.cardinality == 16 + assert a.size == 2 + assert Subset.bitlist_from_subset(a, ['a', 'b', 'c', 'd']) == '0011' + + a = Subset([2, 5, 7], [1, 2, 3, 4, 5, 6, 7]) + assert a.next_binary() == Subset([2, 5, 6], [1, 2, 3, 4, 5, 6, 7]) + assert a.prev_binary() == Subset([2, 5], [1, 2, 3, 4, 5, 6, 7]) + assert a.next_lexicographic() == Subset([2, 6], [1, 2, 3, 4, 5, 6, 7]) + assert a.prev_lexicographic() == Subset([2, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7]) + assert a.next_gray() == Subset([2, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7]) + assert a.prev_gray() == Subset([2, 5], [1, 2, 3, 4, 5, 6, 7]) + assert a.rank_binary == 37 + assert a.rank_lexicographic == 93 + assert a.rank_gray == 57 + assert a.cardinality == 128 + + superset = ['a', 'b', 'c', 'd'] + assert Subset.unrank_binary(4, superset).rank_binary == 4 + assert Subset.unrank_gray(10, superset).rank_gray == 10 + + superset = [1, 2, 3, 4, 5, 6, 7, 8, 9] + assert Subset.unrank_binary(33, superset).rank_binary == 33 + assert Subset.unrank_gray(25, superset).rank_gray == 25 + + a = Subset([], ['a', 'b', 'c', 'd']) + i = 1 + while a.subset != Subset(['d'], ['a', 'b', 'c', 'd']).subset: + a = a.next_lexicographic() + i = i + 1 + assert i == 16 + + i = 1 + while a.subset != Subset([], ['a', 'b', 'c', 'd']).subset: + a = a.prev_lexicographic() + i = i + 1 + assert i == 16 + + raises(ValueError, lambda: Subset(['a', 'b'], ['a'])) + raises(ValueError, lambda: Subset(['a'], ['b', 'c'])) + raises(ValueError, lambda: Subset.subset_from_bitlist(['a', 'b'], '010')) + + assert Subset(['a'], ['a', 'b']) != Subset(['b'], ['a', 'b']) + assert Subset(['a'], ['a', 'b']) != Subset(['a'], ['a', 'c']) + +def test_ksubsets(): + assert list(ksubsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)] + assert list(ksubsets([1, 2, 3, 4, 5], 2)) == [(1, 2), (1, 3), (1, 4), + (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (3, 5), (4, 5)] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_testutil.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_testutil.py new file mode 100644 index 0000000000000000000000000000000000000000..e13f4d5b9913213bac7798c4df77f6665785c9ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_testutil.py @@ -0,0 +1,55 @@ +from sympy.combinatorics.named_groups import SymmetricGroup, AlternatingGroup,\ + CyclicGroup +from sympy.combinatorics.testutil import _verify_bsgs, _cmp_perm_lists,\ + _naive_list_centralizer, _verify_centralizer,\ + _verify_normal_closure +from sympy.combinatorics.permutations import Permutation +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.core.random import shuffle + + +def test_cmp_perm_lists(): + S = SymmetricGroup(4) + els = list(S.generate_dimino()) + other = els[:] + shuffle(other) + assert _cmp_perm_lists(els, other) is True + + +def test_naive_list_centralizer(): + # verified by GAP + S = SymmetricGroup(3) + A = AlternatingGroup(3) + assert _naive_list_centralizer(S, S) == [Permutation([0, 1, 2])] + assert PermutationGroup(_naive_list_centralizer(S, A)).is_subgroup(A) + + +def test_verify_bsgs(): + S = SymmetricGroup(5) + S.schreier_sims() + base = S.base + strong_gens = S.strong_gens + assert _verify_bsgs(S, base, strong_gens) is True + assert _verify_bsgs(S, base[:-1], strong_gens) is False + assert _verify_bsgs(S, base, S.generators) is False + + +def test_verify_centralizer(): + # verified by GAP + S = SymmetricGroup(3) + A = AlternatingGroup(3) + triv = PermutationGroup([Permutation([0, 1, 2])]) + assert _verify_centralizer(S, S, centr=triv) + assert _verify_centralizer(S, A, centr=A) + + +def test_verify_normal_closure(): + # verified by GAP + S = SymmetricGroup(3) + A = AlternatingGroup(3) + assert _verify_normal_closure(S, A, closure=A) + S = SymmetricGroup(5) + A = AlternatingGroup(5) + C = CyclicGroup(5) + assert _verify_normal_closure(S, A, closure=A) + assert _verify_normal_closure(S, C, closure=A) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cb26903a384e9df3a0f02a92c488c5442cee1486 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__init__.py @@ -0,0 +1,12 @@ +from .boolalg import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor, Implies, + Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map, true, false, + gateinputcount) +from .inference import satisfiable + +__all__ = [ + 'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor', + 'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic', + 'bool_map', 'true', 'false', 'gateinputcount', + + 'satisfiable', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..365667548d50250f3e42f015232d700129c8ce80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd50025269110552838e55a1aec4762880153eae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/inference.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72406a7163c3f0975d3d6edca8a5fabbe77486dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/__pycache__/inference.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py new file mode 100644 index 0000000000000000000000000000000000000000..40e6802f7626c982a9a6cd7146baea3ac6b8b6e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py @@ -0,0 +1,308 @@ +"""Implementation of DPLL algorithm + +Further improvements: eliminate calls to pl_true, implement branching rules, +efficient unit propagation. + +References: + - https://en.wikipedia.org/wiki/DPLL_algorithm + - https://www.researchgate.net/publication/242384772_Implementations_of_the_DPLL_Algorithm +""" + +from sympy.core.sorting import default_sort_key +from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \ + to_int_repr, _find_predicates +from sympy.assumptions.cnf import CNF +from sympy.logic.inference import pl_true, literal_symbol + + +def dpll_satisfiable(expr): + """ + Check satisfiability of a propositional sentence. + It returns a model rather than True when it succeeds + + >>> from sympy.abc import A, B + >>> from sympy.logic.algorithms.dpll import dpll_satisfiable + >>> dpll_satisfiable(A & ~B) + {A: True, B: False} + >>> dpll_satisfiable(A & ~A) + False + + """ + if not isinstance(expr, CNF): + clauses = conjuncts(to_cnf(expr)) + else: + clauses = expr.clauses + if False in clauses: + return False + symbols = sorted(_find_predicates(expr), key=default_sort_key) + symbols_int_repr = set(range(1, len(symbols) + 1)) + clauses_int_repr = to_int_repr(clauses, symbols) + result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {}) + if not result: + return result + output = {} + for key in result: + output.update({symbols[key - 1]: result[key]}) + return output + + +def dpll(clauses, symbols, model): + """ + Compute satisfiability in a partial model. + Clauses is an array of conjuncts. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import dpll + >>> dpll([A, B, D], [A, B], {D: False}) + False + + """ + # compute DP kernel + P, value = find_unit_clause(clauses, model) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = ~P + clauses = unit_propagate(clauses, P) + P, value = find_unit_clause(clauses, model) + P, value = find_pure_symbol(symbols, clauses) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = ~P + clauses = unit_propagate(clauses, P) + P, value = find_pure_symbol(symbols, clauses) + # end DP kernel + unknown_clauses = [] + for c in clauses: + val = pl_true(c, model) + if val is False: + return False + if val is not True: + unknown_clauses.append(c) + if not unknown_clauses: + return model + if not clauses: + return model + P = symbols.pop() + model_copy = model.copy() + model.update({P: True}) + model_copy.update({P: False}) + symbols_copy = symbols[:] + return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or + dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy)) + + +def dpll_int_repr(clauses, symbols, model): + """ + Compute satisfiability in a partial model. + Arguments are expected to be in integer representation + + >>> from sympy.logic.algorithms.dpll import dpll_int_repr + >>> dpll_int_repr([{1}, {2}, {3}], {1, 2}, {3: False}) + False + + """ + # compute DP kernel + P, value = find_unit_clause_int_repr(clauses, model) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = -P + clauses = unit_propagate_int_repr(clauses, P) + P, value = find_unit_clause_int_repr(clauses, model) + P, value = find_pure_symbol_int_repr(symbols, clauses) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = -P + clauses = unit_propagate_int_repr(clauses, P) + P, value = find_pure_symbol_int_repr(symbols, clauses) + # end DP kernel + unknown_clauses = [] + for c in clauses: + val = pl_true_int_repr(c, model) + if val is False: + return False + if val is not True: + unknown_clauses.append(c) + if not unknown_clauses: + return model + P = symbols.pop() + model_copy = model.copy() + model.update({P: True}) + model_copy.update({P: False}) + symbols_copy = symbols.copy() + return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or + dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy)) + +### helper methods for DPLL + + +def pl_true_int_repr(clause, model={}): + """ + Lightweight version of pl_true. + Argument clause represents the set of args of an Or clause. This is used + inside dpll_int_repr, it is not meant to be used directly. + + >>> from sympy.logic.algorithms.dpll import pl_true_int_repr + >>> pl_true_int_repr({1, 2}, {1: False}) + >>> pl_true_int_repr({1, 2}, {1: False, 2: False}) + False + + """ + result = False + for lit in clause: + if lit < 0: + p = model.get(-lit) + if p is not None: + p = not p + else: + p = model.get(lit) + if p is True: + return True + elif p is None: + result = None + return result + + +def unit_propagate(clauses, symbol): + """ + Returns an equivalent set of clauses + If a set of clauses contains the unit clause l, the other clauses are + simplified by the application of the two following rules: + + 1. every clause containing l is removed + 2. in every clause that contains ~l this literal is deleted + + Arguments are expected to be in CNF. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import unit_propagate + >>> unit_propagate([A | B, D | ~B, B], B) + [D, B] + + """ + output = [] + for c in clauses: + if c.func != Or: + output.append(c) + continue + for arg in c.args: + if arg == ~symbol: + output.append(Or(*[x for x in c.args if x != ~symbol])) + break + if arg == symbol: + break + else: + output.append(c) + return output + + +def unit_propagate_int_repr(clauses, s): + """ + Same as unit_propagate, but arguments are expected to be in integer + representation + + >>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr + >>> unit_propagate_int_repr([{1, 2}, {3, -2}, {2}], 2) + [{3}] + + """ + negated = {-s} + return [clause - negated for clause in clauses if s not in clause] + + +def find_pure_symbol(symbols, unknown_clauses): + """ + Find a symbol and its value if it appears only as a positive literal + (or only as a negative) in clauses. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import find_pure_symbol + >>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A]) + (A, True) + + """ + for sym in symbols: + found_pos, found_neg = False, False + for c in unknown_clauses: + if not found_pos and sym in disjuncts(c): + found_pos = True + if not found_neg and Not(sym) in disjuncts(c): + found_neg = True + if found_pos != found_neg: + return sym, found_pos + return None, None + + +def find_pure_symbol_int_repr(symbols, unknown_clauses): + """ + Same as find_pure_symbol, but arguments are expected + to be in integer representation + + >>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr + >>> find_pure_symbol_int_repr({1,2,3}, + ... [{1, -2}, {-2, -3}, {3, 1}]) + (1, True) + + """ + all_symbols = set().union(*unknown_clauses) + found_pos = all_symbols.intersection(symbols) + found_neg = all_symbols.intersection([-s for s in symbols]) + for p in found_pos: + if -p not in found_neg: + return p, True + for p in found_neg: + if -p not in found_pos: + return -p, False + return None, None + + +def find_unit_clause(clauses, model): + """ + A unit clause has only 1 variable that is not bound in the model. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import find_unit_clause + >>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True}) + (B, False) + + """ + for clause in clauses: + num_not_in_model = 0 + for literal in disjuncts(clause): + sym = literal_symbol(literal) + if sym not in model: + num_not_in_model += 1 + P, value = sym, not isinstance(literal, Not) + if num_not_in_model == 1: + return P, value + return None, None + + +def find_unit_clause_int_repr(clauses, model): + """ + Same as find_unit_clause, but arguments are expected to be in + integer representation. + + >>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr + >>> find_unit_clause_int_repr([{1, 2, 3}, + ... {2, -3}, {1, -2}], {1: True}) + (2, False) + + """ + bound = set(model) | {-sym for sym in model} + for clause in clauses: + unbound = clause - bound + if len(unbound) == 1: + p = unbound.pop() + if p < 0: + return -p, False + else: + return p, True + return None, None diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py new file mode 100644 index 0000000000000000000000000000000000000000..0f518bce8f87a53e6e10cbbdf5ec85eaeefacdf7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py @@ -0,0 +1,659 @@ +"""Implementation of DPLL algorithm + +Features: + - Clause learning + - Watch literal scheme + - VSIDS heuristic + +References: + - https://en.wikipedia.org/wiki/DPLL_algorithm +""" + +from collections import defaultdict +from heapq import heappush, heappop + +from sympy.core.sorting import ordered +from sympy.assumptions.cnf import EncodedCNF + + +def dpll_satisfiable(expr, all_models=False): + """ + Check satisfiability of a propositional sentence. + It returns a model rather than True when it succeeds. + Returns a generator of all models if all_models is True. + + Examples + ======== + + >>> from sympy.abc import A, B + >>> from sympy.logic.algorithms.dpll2 import dpll_satisfiable + >>> dpll_satisfiable(A & ~B) + {A: True, B: False} + >>> dpll_satisfiable(A & ~A) + False + + """ + if not isinstance(expr, EncodedCNF): + exprs = EncodedCNF() + exprs.add_prop(expr) + expr = exprs + + # Return UNSAT when False (encoded as 0) is present in the CNF + if {0} in expr.data: + if all_models: + return (f for f in [False]) + return False + + solver = SATSolver(expr.data, expr.variables, set(), expr.symbols) + models = solver._find_model() + + if all_models: + return _all_models(models) + + try: + return next(models) + except StopIteration: + return False + + # Uncomment to confirm the solution is valid (hitting set for the clauses) + #else: + #for cls in clauses_int_repr: + #assert solver.var_settings.intersection(cls) + + +def _all_models(models): + satisfiable = False + try: + while True: + yield next(models) + satisfiable = True + except StopIteration: + if not satisfiable: + yield False + + +class SATSolver: + """ + Class for representing a SAT solver capable of + finding a model to a boolean theory in conjunctive + normal form. + """ + + def __init__(self, clauses, variables, var_settings, symbols=None, + heuristic='vsids', clause_learning='none', INTERVAL=500): + + self.var_settings = var_settings + self.heuristic = heuristic + self.is_unsatisfied = False + self._unit_prop_queue = [] + self.update_functions = [] + self.INTERVAL = INTERVAL + + if symbols is None: + self.symbols = list(ordered(variables)) + else: + self.symbols = symbols + + self._initialize_variables(variables) + self._initialize_clauses(clauses) + + if 'vsids' == heuristic: + self._vsids_init() + self.heur_calculate = self._vsids_calculate + self.heur_lit_assigned = self._vsids_lit_assigned + self.heur_lit_unset = self._vsids_lit_unset + self.heur_clause_added = self._vsids_clause_added + + # Note: Uncomment this if/when clause learning is enabled + #self.update_functions.append(self._vsids_decay) + + else: + raise NotImplementedError + + if 'simple' == clause_learning: + self.add_learned_clause = self._simple_add_learned_clause + self.compute_conflict = self.simple_compute_conflict + self.update_functions.append(self.simple_clean_clauses) + elif 'none' == clause_learning: + self.add_learned_clause = lambda x: None + self.compute_conflict = lambda: None + else: + raise NotImplementedError + + # Create the base level + self.levels = [Level(0)] + self._current_level.varsettings = var_settings + + # Keep stats + self.num_decisions = 0 + self.num_learned_clauses = 0 + self.original_num_clauses = len(self.clauses) + + def _initialize_variables(self, variables): + """Set up the variable data structures needed.""" + self.sentinels = defaultdict(set) + self.occurrence_count = defaultdict(int) + self.variable_set = [False] * (len(variables) + 1) + + def _initialize_clauses(self, clauses): + """Set up the clause data structures needed. + + For each clause, the following changes are made: + - Unit clauses are queued for propagation right away. + - Non-unit clauses have their first and last literals set as sentinels. + - The number of clauses a literal appears in is computed. + """ + self.clauses = [list(clause) for clause in clauses] + + for i, clause in enumerate(self.clauses): + + # Handle the unit clauses + if 1 == len(clause): + self._unit_prop_queue.append(clause[0]) + continue + + self.sentinels[clause[0]].add(i) + self.sentinels[clause[-1]].add(i) + + for lit in clause: + self.occurrence_count[lit] += 1 + + def _find_model(self): + """ + Main DPLL loop. Returns a generator of models. + + Variables are chosen successively, and assigned to be either + True or False. If a solution is not found with this setting, + the opposite is chosen and the search continues. The solver + halts when every variable has a setting. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> list(l._find_model()) + [{1: True, 2: False, 3: False}, {1: True, 2: True, 3: True}] + + >>> from sympy.abc import A, B, C + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set(), [A, B, C]) + >>> list(l._find_model()) + [{A: True, B: False, C: False}, {A: True, B: True, C: True}] + + """ + + # We use this variable to keep track of if we should flip a + # variable setting in successive rounds + flip_var = False + + # Check if unit prop says the theory is unsat right off the bat + self._simplify() + if self.is_unsatisfied: + return + + # While the theory still has clauses remaining + while True: + # Perform cleanup / fixup at regular intervals + if self.num_decisions % self.INTERVAL == 0: + for func in self.update_functions: + func() + + if flip_var: + # We have just backtracked and we are trying to opposite literal + flip_var = False + lit = self._current_level.decision + + else: + # Pick a literal to set + lit = self.heur_calculate() + self.num_decisions += 1 + + # Stopping condition for a satisfying theory + if 0 == lit: + yield {self.symbols[abs(lit) - 1]: + lit > 0 for lit in self.var_settings} + while self._current_level.flipped: + self._undo() + if len(self.levels) == 1: + return + flip_lit = -self._current_level.decision + self._undo() + self.levels.append(Level(flip_lit, flipped=True)) + flip_var = True + continue + + # Start the new decision level + self.levels.append(Level(lit)) + + # Assign the literal, updating the clauses it satisfies + self._assign_literal(lit) + + # _simplify the theory + self._simplify() + + # Check if we've made the theory unsat + if self.is_unsatisfied: + + self.is_unsatisfied = False + + # We unroll all of the decisions until we can flip a literal + while self._current_level.flipped: + self._undo() + + # If we've unrolled all the way, the theory is unsat + if 1 == len(self.levels): + return + + # Detect and add a learned clause + self.add_learned_clause(self.compute_conflict()) + + # Try the opposite setting of the most recent decision + flip_lit = -self._current_level.decision + self._undo() + self.levels.append(Level(flip_lit, flipped=True)) + flip_var = True + + ######################## + # Helper Methods # + ######################## + @property + def _current_level(self): + """The current decision level data structure + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{1}, {2}], {1, 2}, set()) + >>> next(l._find_model()) + {1: True, 2: True} + >>> l._current_level.decision + 0 + >>> l._current_level.flipped + False + >>> l._current_level.var_settings + {1, 2} + + """ + return self.levels[-1] + + def _clause_sat(self, cls): + """Check if a clause is satisfied by the current variable setting. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{1}, {-1}], {1}, set()) + >>> try: + ... next(l._find_model()) + ... except StopIteration: + ... pass + >>> l._clause_sat(0) + False + >>> l._clause_sat(1) + True + + """ + for lit in self.clauses[cls]: + if lit in self.var_settings: + return True + return False + + def _is_sentinel(self, lit, cls): + """Check if a literal is a sentinel of a given clause. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l._is_sentinel(2, 3) + True + >>> l._is_sentinel(-3, 1) + False + + """ + return cls in self.sentinels[lit] + + def _assign_literal(self, lit): + """Make a literal assignment. + + The literal assignment must be recorded as part of the current + decision level. Additionally, if the literal is marked as a + sentinel of any clause, then a new sentinel must be chosen. If + this is not possible, then unit propagation is triggered and + another literal is added to the queue to be set in the future. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l.var_settings + {-3, -2, 1} + + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l._assign_literal(-1) + >>> try: + ... next(l._find_model()) + ... except StopIteration: + ... pass + >>> l.var_settings + {-1} + + """ + self.var_settings.add(lit) + self._current_level.var_settings.add(lit) + self.variable_set[abs(lit)] = True + self.heur_lit_assigned(lit) + + sentinel_list = list(self.sentinels[-lit]) + + for cls in sentinel_list: + if not self._clause_sat(cls): + other_sentinel = None + for newlit in self.clauses[cls]: + if newlit != -lit: + if self._is_sentinel(newlit, cls): + other_sentinel = newlit + elif not self.variable_set[abs(newlit)]: + self.sentinels[-lit].remove(cls) + self.sentinels[newlit].add(cls) + other_sentinel = None + break + + # Check if no sentinel update exists + if other_sentinel: + self._unit_prop_queue.append(other_sentinel) + + def _undo(self): + """ + _undo the changes of the most recent decision level. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> level = l._current_level + >>> level.decision, level.var_settings, level.flipped + (-3, {-3, -2}, False) + >>> l._undo() + >>> level = l._current_level + >>> level.decision, level.var_settings, level.flipped + (0, {1}, False) + + """ + # Undo the variable settings + for lit in self._current_level.var_settings: + self.var_settings.remove(lit) + self.heur_lit_unset(lit) + self.variable_set[abs(lit)] = False + + # Pop the level off the stack + self.levels.pop() + + ######################### + # Propagation # + ######################### + """ + Propagation methods should attempt to soundly simplify the boolean + theory, and return True if any simplification occurred and False + otherwise. + """ + def _simplify(self): + """Iterate over the various forms of propagation to simplify the theory. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l.variable_set + [False, False, False, False] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}} + + >>> l._simplify() + + >>> l.variable_set + [False, True, False, False] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, -1: set(), 2: {0, 3}, + ...3: {2, 4}} + + """ + changed = True + while changed: + changed = False + changed |= self._unit_prop() + changed |= self._pure_literal() + + def _unit_prop(self): + """Perform unit propagation on the current theory.""" + result = len(self._unit_prop_queue) > 0 + while self._unit_prop_queue: + next_lit = self._unit_prop_queue.pop() + if -next_lit in self.var_settings: + self.is_unsatisfied = True + self._unit_prop_queue = [] + return False + else: + self._assign_literal(next_lit) + + return result + + def _pure_literal(self): + """Look for pure literals and assign them when found.""" + return False + + ######################### + # Heuristics # + ######################### + def _vsids_init(self): + """Initialize the data structures needed for the VSIDS heuristic.""" + self.lit_heap = [] + self.lit_scores = {} + + for var in range(1, len(self.variable_set)): + self.lit_scores[var] = float(-self.occurrence_count[var]) + self.lit_scores[-var] = float(-self.occurrence_count[-var]) + heappush(self.lit_heap, (self.lit_scores[var], var)) + heappush(self.lit_heap, (self.lit_scores[-var], -var)) + + def _vsids_decay(self): + """Decay the VSIDS scores for every literal. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.lit_scores + {-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0} + + >>> l._vsids_decay() + + >>> l.lit_scores + {-3: -1.0, -2: -1.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -1.0} + + """ + # We divide every literal score by 2 for a decay factor + # Note: This doesn't change the heap property + for lit in self.lit_scores.keys(): + self.lit_scores[lit] /= 2.0 + + def _vsids_calculate(self): + """ + VSIDS Heuristic Calculation + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.lit_heap + [(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)] + + >>> l._vsids_calculate() + -3 + + >>> l.lit_heap + [(-2.0, -2), (-2.0, 2), (0.0, -1), (0.0, 1), (-2.0, 3)] + + """ + if len(self.lit_heap) == 0: + return 0 + + # Clean out the front of the heap as long the variables are set + while self.variable_set[abs(self.lit_heap[0][1])]: + heappop(self.lit_heap) + if len(self.lit_heap) == 0: + return 0 + + return heappop(self.lit_heap)[1] + + def _vsids_lit_assigned(self, lit): + """Handle the assignment of a literal for the VSIDS heuristic.""" + pass + + def _vsids_lit_unset(self, lit): + """Handle the unsetting of a literal for the VSIDS heuristic. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l.lit_heap + [(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)] + + >>> l._vsids_lit_unset(2) + + >>> l.lit_heap + [(-2.0, -3), (-2.0, -2), (-2.0, -2), (-2.0, 2), (-2.0, 3), (0.0, -1), + ...(-2.0, 2), (0.0, 1)] + + """ + var = abs(lit) + heappush(self.lit_heap, (self.lit_scores[var], var)) + heappush(self.lit_heap, (self.lit_scores[-var], -var)) + + def _vsids_clause_added(self, cls): + """Handle the addition of a new clause for the VSIDS heuristic. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.num_learned_clauses + 0 + >>> l.lit_scores + {-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0} + + >>> l._vsids_clause_added({2, -3}) + + >>> l.num_learned_clauses + 1 + >>> l.lit_scores + {-3: -1.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -2.0} + + """ + self.num_learned_clauses += 1 + for lit in cls: + self.lit_scores[lit] += 1 + + ######################## + # Clause Learning # + ######################## + def _simple_add_learned_clause(self, cls): + """Add a new clause to the theory. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.num_learned_clauses + 0 + >>> l.clauses + [[2, -3], [1], [3, -3], [2, -2], [3, -2]] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}} + + >>> l._simple_add_learned_clause([3]) + + >>> l.clauses + [[2, -3], [1], [3, -3], [2, -2], [3, -2], [3]] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4, 5}} + + """ + cls_num = len(self.clauses) + self.clauses.append(cls) + + for lit in cls: + self.occurrence_count[lit] += 1 + + self.sentinels[cls[0]].add(cls_num) + self.sentinels[cls[-1]].add(cls_num) + + self.heur_clause_added(cls) + + def _simple_compute_conflict(self): + """ Build a clause representing the fact that at least one decision made + so far is wrong. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l._simple_compute_conflict() + [3] + + """ + return [-(level.decision) for level in self.levels[1:]] + + def _simple_clean_clauses(self): + """Clean up learned clauses.""" + pass + + +class Level: + """ + Represents a single level in the DPLL algorithm, and contains + enough information for a sound backtracking procedure. + """ + + def __init__(self, decision, flipped=False): + self.decision = decision + self.var_settings = set() + self.flipped = flipped diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5c1f8f14f04309f7cb8197cc05d01a3c108545 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py @@ -0,0 +1,46 @@ +from sympy.assumptions.cnf import EncodedCNF + +def minisat22_satisfiable(expr, all_models=False, minimal=False): + + if not isinstance(expr, EncodedCNF): + exprs = EncodedCNF() + exprs.add_prop(expr) + expr = exprs + + from pysat.solvers import Minisat22 + + # Return UNSAT when False (encoded as 0) is present in the CNF + if {0} in expr.data: + if all_models: + return (f for f in [False]) + return False + + r = Minisat22(expr.data) + + if minimal: + r.set_phases([-(i+1) for i in range(r.nof_vars())]) + + if not r.solve(): + return False + + if not all_models: + return {expr.symbols[abs(lit) - 1]: lit > 0 for lit in r.get_model()} + + else: + # Make solutions SymPy compatible by creating a generator + def _gen(results): + satisfiable = False + while results.solve(): + sol = results.get_model() + yield {expr.symbols[abs(lit) - 1]: lit > 0 for lit in sol} + if minimal: + results.add_clause([-i for i in sol if i>0]) + else: + results.add_clause([-i for i in sol]) + satisfiable = True + if not satisfiable: + yield False + raise StopIteration + + + return _gen(r) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/boolalg.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/boolalg.py new file mode 100644 index 0000000000000000000000000000000000000000..049a02de5d9bfd3dc0096290d19b3e344bcfb9e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/boolalg.py @@ -0,0 +1,3565 @@ +""" +Boolean algebra module for SymPy +""" + +from collections import defaultdict +from itertools import chain, combinations, product, permutations +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.cache import cacheit +from sympy.core.containers import Tuple +from sympy.core.decorators import sympify_method_args, sympify_return +from sympy.core.function import Application, Derivative +from sympy.core.kind import BooleanKind, NumberKind +from sympy.core.numbers import Number +from sympy.core.operations import LatticeOp +from sympy.core.singleton import Singleton, S +from sympy.core.sorting import ordered +from sympy.core.sympify import _sympy_converter, _sympify, sympify +from sympy.utilities.iterables import sift, ibin +from sympy.utilities.misc import filldedent + + +def as_Boolean(e): + """Like ``bool``, return the Boolean value of an expression, e, + which can be any instance of :py:class:`~.Boolean` or ``bool``. + + Examples + ======== + + >>> from sympy import true, false, nan + >>> from sympy.logic.boolalg import as_Boolean + >>> from sympy.abc import x + >>> as_Boolean(0) is false + True + >>> as_Boolean(1) is true + True + >>> as_Boolean(x) + x + >>> as_Boolean(2) + Traceback (most recent call last): + ... + TypeError: expecting bool or Boolean, not `2`. + >>> as_Boolean(nan) + Traceback (most recent call last): + ... + TypeError: expecting bool or Boolean, not `nan`. + + """ + from sympy.core.symbol import Symbol + if e == True: + return true + if e == False: + return false + if isinstance(e, Symbol): + z = e.is_zero + if z is None: + return e + return false if z else true + if isinstance(e, Boolean): + return e + raise TypeError('expecting bool or Boolean, not `%s`.' % e) + + +@sympify_method_args +class Boolean(Basic): + """A Boolean object is an object for which logic operations make sense.""" + + __slots__ = () + + kind = BooleanKind + + @sympify_return([('other', 'Boolean')], NotImplemented) + def __and__(self, other): + return And(self, other) + + __rand__ = __and__ + + @sympify_return([('other', 'Boolean')], NotImplemented) + def __or__(self, other): + return Or(self, other) + + __ror__ = __or__ + + def __invert__(self): + """Overloading for ~""" + return Not(self) + + @sympify_return([('other', 'Boolean')], NotImplemented) + def __rshift__(self, other): + return Implies(self, other) + + @sympify_return([('other', 'Boolean')], NotImplemented) + def __lshift__(self, other): + return Implies(other, self) + + __rrshift__ = __lshift__ + __rlshift__ = __rshift__ + + @sympify_return([('other', 'Boolean')], NotImplemented) + def __xor__(self, other): + return Xor(self, other) + + __rxor__ = __xor__ + + def equals(self, other): + """ + Returns ``True`` if the given formulas have the same truth table. + For two formulas to be equal they must have the same literals. + + Examples + ======== + + >>> from sympy.abc import A, B, C + >>> from sympy import And, Or, Not + >>> (A >> B).equals(~B >> ~A) + True + >>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C))) + False + >>> Not(And(A, Not(A))).equals(Or(B, Not(B))) + False + + """ + from sympy.logic.inference import satisfiable + from sympy.core.relational import Relational + + if self.has(Relational) or other.has(Relational): + raise NotImplementedError('handling of relationals') + return self.atoms() == other.atoms() and \ + not satisfiable(Not(Equivalent(self, other))) + + def to_nnf(self, simplify=True): + # override where necessary + return self + + def as_set(self): + """ + Rewrites Boolean expression in terms of real sets. + + Examples + ======== + + >>> from sympy import Symbol, Eq, Or, And + >>> x = Symbol('x', real=True) + >>> Eq(x, 0).as_set() + {0} + >>> (x > 0).as_set() + Interval.open(0, oo) + >>> And(-2 < x, x < 2).as_set() + Interval.open(-2, 2) + >>> Or(x < -2, 2 < x).as_set() + Union(Interval.open(-oo, -2), Interval.open(2, oo)) + + """ + from sympy.calculus.util import periodicity + from sympy.core.relational import Relational + + free = self.free_symbols + if len(free) == 1: + x = free.pop() + if x.kind is NumberKind: + reps = {} + for r in self.atoms(Relational): + if periodicity(r, x) not in (0, None): + s = r._eval_as_set() + if s in (S.EmptySet, S.UniversalSet, S.Reals): + reps[r] = s.as_relational(x) + continue + raise NotImplementedError(filldedent(''' + as_set is not implemented for relationals + with periodic solutions + ''')) + new = self.subs(reps) + if new.func != self.func: + return new.as_set() # restart with new obj + else: + return new._eval_as_set() + + return self._eval_as_set() + else: + raise NotImplementedError("Sorry, as_set has not yet been" + " implemented for multivariate" + " expressions") + + @property + def binary_symbols(self): + from sympy.core.relational import Eq, Ne + return set().union(*[i.binary_symbols for i in self.args + if i.is_Boolean or i.is_Symbol + or isinstance(i, (Eq, Ne))]) + + def _eval_refine(self, assumptions): + from sympy.assumptions import ask + ret = ask(self, assumptions) + if ret is True: + return true + elif ret is False: + return false + return None + + +class BooleanAtom(Boolean): + """ + Base class of :py:class:`~.BooleanTrue` and :py:class:`~.BooleanFalse`. + """ + is_Boolean = True + is_Atom = True + _op_priority = 11 # higher than Expr + + def simplify(self, *a, **kw): + return self + + def expand(self, *a, **kw): + return self + + @property + def canonical(self): + return self + + def _noop(self, other=None): + raise TypeError('BooleanAtom not allowed in this context.') + + __add__ = _noop + __radd__ = _noop + __sub__ = _noop + __rsub__ = _noop + __mul__ = _noop + __rmul__ = _noop + __pow__ = _noop + __rpow__ = _noop + __truediv__ = _noop + __rtruediv__ = _noop + __mod__ = _noop + __rmod__ = _noop + _eval_power = _noop + + # /// drop when Py2 is no longer supported + def __lt__(self, other): + raise TypeError(filldedent(''' + A Boolean argument can only be used in + Eq and Ne; all other relationals expect + real expressions. + ''')) + + __le__ = __lt__ + __gt__ = __lt__ + __ge__ = __lt__ + # \\\ + + def _eval_simplify(self, **kwargs): + return self + + +class BooleanTrue(BooleanAtom, metaclass=Singleton): + """ + SymPy version of ``True``, a singleton that can be accessed via ``S.true``. + + This is the SymPy version of ``True``, for use in the logic module. The + primary advantage of using ``true`` instead of ``True`` is that shorthand Boolean + operations like ``~`` and ``>>`` will work as expected on this class, whereas with + True they act bitwise on 1. Functions in the logic module will return this + class when they evaluate to true. + + Notes + ===== + + There is liable to be some confusion as to when ``True`` should + be used and when ``S.true`` should be used in various contexts + throughout SymPy. An important thing to remember is that + ``sympify(True)`` returns ``S.true``. This means that for the most + part, you can just use ``True`` and it will automatically be converted + to ``S.true`` when necessary, similar to how you can generally use 1 + instead of ``S.One``. + + The rule of thumb is: + + "If the boolean in question can be replaced by an arbitrary symbolic + ``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``S.true``. + Otherwise, use ``True``" + + In other words, use ``S.true`` only on those contexts where the + boolean is being used as a symbolic representation of truth. + For example, if the object ends up in the ``.args`` of any expression, + then it must necessarily be ``S.true`` instead of ``True``, as + elements of ``.args`` must be ``Basic``. On the other hand, + ``==`` is not a symbolic operation in SymPy, since it always returns + ``True`` or ``False``, and does so in terms of structural equality + rather than mathematical, so it should return ``True``. The assumptions + system should use ``True`` and ``False``. Aside from not satisfying + the above rule of thumb, the assumptions system uses a three-valued logic + (``True``, ``False``, ``None``), whereas ``S.true`` and ``S.false`` + represent a two-valued logic. When in doubt, use ``True``. + + "``S.true == True is True``." + + While "``S.true is True``" is ``False``, "``S.true == True``" + is ``True``, so if there is any doubt over whether a function or + expression will return ``S.true`` or ``True``, just use ``==`` + instead of ``is`` to do the comparison, and it will work in either + case. Finally, for boolean flags, it's better to just use ``if x`` + instead of ``if x is True``. To quote PEP 8: + + Do not compare boolean values to ``True`` or ``False`` + using ``==``. + + * Yes: ``if greeting:`` + * No: ``if greeting == True:`` + * Worse: ``if greeting is True:`` + + Examples + ======== + + >>> from sympy import sympify, true, false, Or + >>> sympify(True) + True + >>> _ is True, _ is true + (False, True) + + >>> Or(true, false) + True + >>> _ is true + True + + Python operators give a boolean result for true but a + bitwise result for True + + >>> ~true, ~True + (False, -2) + >>> true >> true, True >> True + (True, 0) + + Python operators give a boolean result for true but a + bitwise result for True + + >>> ~true, ~True + (False, -2) + >>> true >> true, True >> True + (True, 0) + + See Also + ======== + + sympy.logic.boolalg.BooleanFalse + + """ + def __bool__(self): + return True + + def __hash__(self): + return hash(True) + + def __eq__(self, other): + if other is True: + return True + if other is False: + return False + return super().__eq__(other) + + @property + def negated(self): + return false + + def as_set(self): + """ + Rewrite logic operators and relationals in terms of real sets. + + Examples + ======== + + >>> from sympy import true + >>> true.as_set() + UniversalSet + + """ + return S.UniversalSet + + +class BooleanFalse(BooleanAtom, metaclass=Singleton): + """ + SymPy version of ``False``, a singleton that can be accessed via ``S.false``. + + This is the SymPy version of ``False``, for use in the logic module. The + primary advantage of using ``false`` instead of ``False`` is that shorthand + Boolean operations like ``~`` and ``>>`` will work as expected on this class, + whereas with ``False`` they act bitwise on 0. Functions in the logic module + will return this class when they evaluate to false. + + Notes + ====== + + See the notes section in :py:class:`sympy.logic.boolalg.BooleanTrue` + + Examples + ======== + + >>> from sympy import sympify, true, false, Or + >>> sympify(False) + False + >>> _ is False, _ is false + (False, True) + + >>> Or(true, false) + True + >>> _ is true + True + + Python operators give a boolean result for false but a + bitwise result for False + + >>> ~false, ~False + (True, -1) + >>> false >> false, False >> False + (True, 0) + + See Also + ======== + + sympy.logic.boolalg.BooleanTrue + + """ + def __bool__(self): + return False + + def __hash__(self): + return hash(False) + + def __eq__(self, other): + if other is True: + return False + if other is False: + return True + return super().__eq__(other) + + @property + def negated(self): + return true + + def as_set(self): + """ + Rewrite logic operators and relationals in terms of real sets. + + Examples + ======== + + >>> from sympy import false + >>> false.as_set() + EmptySet + """ + return S.EmptySet + + +true = BooleanTrue() +false = BooleanFalse() +# We want S.true and S.false to work, rather than S.BooleanTrue and +# S.BooleanFalse, but making the class and instance names the same causes some +# major issues (like the inability to import the class directly from this +# file). +S.true = true +S.false = false + +_sympy_converter[bool] = lambda x: true if x else false + + +class BooleanFunction(Application, Boolean): + """Boolean function is a function that lives in a boolean space + It is used as base class for :py:class:`~.And`, :py:class:`~.Or`, + :py:class:`~.Not`, etc. + """ + is_Boolean = True + + def _eval_simplify(self, **kwargs): + rv = simplify_univariate(self) + if not isinstance(rv, BooleanFunction): + return rv.simplify(**kwargs) + rv = rv.func(*[a.simplify(**kwargs) for a in rv.args]) + return simplify_logic(rv) + + def simplify(self, **kwargs): + from sympy.simplify.simplify import simplify + return simplify(self, **kwargs) + + def __lt__(self, other): + raise TypeError(filldedent(''' + A Boolean argument can only be used in + Eq and Ne; all other relationals expect + real expressions. + ''')) + __le__ = __lt__ + __ge__ = __lt__ + __gt__ = __lt__ + + @classmethod + def binary_check_and_simplify(self, *args): + from sympy.core.relational import Relational, Eq, Ne + args = [as_Boolean(i) for i in args] + bin_syms = set().union(*[i.binary_symbols for i in args]) + rel = set().union(*[i.atoms(Relational) for i in args]) + reps = {} + for x in bin_syms: + for r in rel: + if x in bin_syms and x in r.free_symbols: + if isinstance(r, (Eq, Ne)): + if not ( + true in r.args or + false in r.args): + reps[r] = false + else: + raise TypeError(filldedent(''' + Incompatible use of binary symbol `%s` as a + real variable in `%s` + ''' % (x, r))) + return [i.subs(reps) for i in args] + + def to_nnf(self, simplify=True): + return self._to_nnf(*self.args, simplify=simplify) + + def to_anf(self, deep=True): + return self._to_anf(*self.args, deep=deep) + + @classmethod + def _to_nnf(cls, *args, **kwargs): + simplify = kwargs.get('simplify', True) + argset = set() + for arg in args: + if not is_literal(arg): + arg = arg.to_nnf(simplify) + if simplify: + if isinstance(arg, cls): + arg = arg.args + else: + arg = (arg,) + for a in arg: + if Not(a) in argset: + return cls.zero + argset.add(a) + else: + argset.add(arg) + return cls(*argset) + + @classmethod + def _to_anf(cls, *args, **kwargs): + deep = kwargs.get('deep', True) + argset = set() + for arg in args: + if deep: + if not is_literal(arg) or isinstance(arg, Not): + arg = arg.to_anf(deep=deep) + argset.add(arg) + else: + argset.add(arg) + return cls(*argset, remove_true=False) + + # the diff method below is copied from Expr class + def diff(self, *symbols, **assumptions): + assumptions.setdefault("evaluate", True) + return Derivative(self, *symbols, **assumptions) + + def _eval_derivative(self, x): + if x in self.binary_symbols: + from sympy.core.relational import Eq + from sympy.functions.elementary.piecewise import Piecewise + return Piecewise( + (0, Eq(self.subs(x, 0), self.subs(x, 1))), + (1, True)) + elif x in self.free_symbols: + # not implemented, see https://www.encyclopediaofmath.org/ + # index.php/Boolean_differential_calculus + pass + else: + return S.Zero + + +class And(LatticeOp, BooleanFunction): + """ + Logical AND function. + + It evaluates its arguments in order, returning false immediately + when an argument is false and true if they are all true. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import And + >>> x & y + x & y + + Notes + ===== + + The ``&`` operator is provided as a convenience, but note that its use + here is different from its normal use in Python, which is bitwise + and. Hence, ``And(a, b)`` and ``a & b`` will produce different results if + ``a`` and ``b`` are integers. + + >>> And(x, y).subs(x, 1) + y + + """ + zero = false + identity = true + + nargs = None + + @classmethod + def _new_args_filter(cls, args): + args = BooleanFunction.binary_check_and_simplify(*args) + args = LatticeOp._new_args_filter(args, And) + newargs = [] + rel = set() + for x in ordered(args): + if x.is_Relational: + c = x.canonical + if c in rel: + continue + elif c.negated.canonical in rel: + return [false] + else: + rel.add(c) + newargs.append(x) + return newargs + + def _eval_subs(self, old, new): + args = [] + bad = None + for i in self.args: + try: + i = i.subs(old, new) + except TypeError: + # store TypeError + if bad is None: + bad = i + continue + if i == False: + return false + elif i != True: + args.append(i) + if bad is not None: + # let it raise + bad.subs(old, new) + # If old is And, replace the parts of the arguments with new if all + # are there + if isinstance(old, And): + old_set = set(old.args) + if old_set.issubset(args): + args = set(args) - old_set + args.add(new) + + return self.func(*args) + + def _eval_simplify(self, **kwargs): + from sympy.core.relational import Equality, Relational + from sympy.solvers.solveset import linear_coeffs + # standard simplify + rv = super()._eval_simplify(**kwargs) + if not isinstance(rv, And): + return rv + + # simplify args that are equalities involving + # symbols so x == 0 & x == y -> x==0 & y == 0 + Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational), + binary=True) + if not Rel: + return rv + eqs, other = sift(Rel, lambda i: isinstance(i, Equality), binary=True) + + measure = kwargs['measure'] + if eqs: + ratio = kwargs['ratio'] + reps = {} + sifted = {} + # group by length of free symbols + sifted = sift(ordered([ + (i.free_symbols, i) for i in eqs]), + lambda x: len(x[0])) + eqs = [] + nonlineqs = [] + while 1 in sifted: + for free, e in sifted.pop(1): + x = free.pop() + if (e.lhs != x or x in e.rhs.free_symbols) and x not in reps: + try: + m, b = linear_coeffs( + e.rewrite(Add, evaluate=False), x) + enew = e.func(x, -b/m) + if measure(enew) <= ratio*measure(e): + e = enew + else: + eqs.append(e) + continue + except ValueError: + pass + if x in reps: + eqs.append(e.subs(x, reps[x])) + elif e.lhs == x and x not in e.rhs.free_symbols: + reps[x] = e.rhs + eqs.append(e) + else: + # x is not yet identified, but may be later + nonlineqs.append(e) + resifted = defaultdict(list) + for k in sifted: + for f, e in sifted[k]: + e = e.xreplace(reps) + f = e.free_symbols + resifted[len(f)].append((f, e)) + sifted = resifted + for k in sifted: + eqs.extend([e for f, e in sifted[k]]) + nonlineqs = [ei.subs(reps) for ei in nonlineqs] + other = [ei.subs(reps) for ei in other] + rv = rv.func(*([i.canonical for i in (eqs + nonlineqs + other)] + nonRel)) + patterns = _simplify_patterns_and() + threeterm_patterns = _simplify_patterns_and3() + return _apply_patternbased_simplification(rv, patterns, + measure, false, + threeterm_patterns=threeterm_patterns) + + def _eval_as_set(self): + from sympy.sets.sets import Intersection + return Intersection(*[arg.as_set() for arg in self.args]) + + def _eval_rewrite_as_Nor(self, *args, **kwargs): + return Nor(*[Not(arg) for arg in self.args]) + + def to_anf(self, deep=True): + if deep: + result = And._to_anf(*self.args, deep=deep) + return distribute_xor_over_and(result) + return self + + +class Or(LatticeOp, BooleanFunction): + """ + Logical OR function + + It evaluates its arguments in order, returning true immediately + when an argument is true, and false if they are all false. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import Or + >>> x | y + x | y + + Notes + ===== + + The ``|`` operator is provided as a convenience, but note that its use + here is different from its normal use in Python, which is bitwise + or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if + ``a`` and ``b`` are integers. + + >>> Or(x, y).subs(x, 0) + y + + """ + zero = true + identity = false + + @classmethod + def _new_args_filter(cls, args): + newargs = [] + rel = [] + args = BooleanFunction.binary_check_and_simplify(*args) + for x in args: + if x.is_Relational: + c = x.canonical + if c in rel: + continue + nc = c.negated.canonical + if any(r == nc for r in rel): + return [true] + rel.append(c) + newargs.append(x) + return LatticeOp._new_args_filter(newargs, Or) + + def _eval_subs(self, old, new): + args = [] + bad = None + for i in self.args: + try: + i = i.subs(old, new) + except TypeError: + # store TypeError + if bad is None: + bad = i + continue + if i == True: + return true + elif i != False: + args.append(i) + if bad is not None: + # let it raise + bad.subs(old, new) + # If old is Or, replace the parts of the arguments with new if all + # are there + if isinstance(old, Or): + old_set = set(old.args) + if old_set.issubset(args): + args = set(args) - old_set + args.add(new) + + return self.func(*args) + + def _eval_as_set(self): + from sympy.sets.sets import Union + return Union(*[arg.as_set() for arg in self.args]) + + def _eval_rewrite_as_Nand(self, *args, **kwargs): + return Nand(*[Not(arg) for arg in self.args]) + + def _eval_simplify(self, **kwargs): + from sympy.core.relational import Le, Ge, Eq + lege = self.atoms(Le, Ge) + if lege: + reps = {i: self.func( + Eq(i.lhs, i.rhs), i.strict) for i in lege} + return self.xreplace(reps)._eval_simplify(**kwargs) + # standard simplify + rv = super()._eval_simplify(**kwargs) + if not isinstance(rv, Or): + return rv + patterns = _simplify_patterns_or() + return _apply_patternbased_simplification(rv, patterns, + kwargs['measure'], true) + + def to_anf(self, deep=True): + args = range(1, len(self.args) + 1) + args = (combinations(self.args, j) for j in args) + args = chain.from_iterable(args) # powerset + args = (And(*arg) for arg in args) + args = (to_anf(x, deep=deep) if deep else x for x in args) + return Xor(*list(args), remove_true=False) + + +class Not(BooleanFunction): + """ + Logical Not function (negation) + + + Returns ``true`` if the statement is ``false`` or ``False``. + Returns ``false`` if the statement is ``true`` or ``True``. + + Examples + ======== + + >>> from sympy import Not, And, Or + >>> from sympy.abc import x, A, B + >>> Not(True) + False + >>> Not(False) + True + >>> Not(And(True, False)) + True + >>> Not(Or(True, False)) + False + >>> Not(And(And(True, x), Or(x, False))) + ~x + >>> ~x + ~x + >>> Not(And(Or(A, B), Or(~A, ~B))) + ~((A | B) & (~A | ~B)) + + Notes + ===== + + - The ``~`` operator is provided as a convenience, but note that its use + here is different from its normal use in Python, which is bitwise + not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is + an integer. Furthermore, since bools in Python subclass from ``int``, + ``~True`` is the same as ``~1`` which is ``-2``, which has a boolean + value of True. To avoid this issue, use the SymPy boolean types + ``true`` and ``false``. + + >>> from sympy import true + >>> ~True + -2 + >>> ~true + False + + """ + + is_Not = True + + @classmethod + def eval(cls, arg): + if isinstance(arg, Number) or arg in (True, False): + return false if arg else true + if arg.is_Not: + return arg.args[0] + # Simplify Relational objects. + if arg.is_Relational: + return arg.negated + + def _eval_as_set(self): + """ + Rewrite logic operators and relationals in terms of real sets. + + Examples + ======== + + >>> from sympy import Not, Symbol + >>> x = Symbol('x') + >>> Not(x > 0).as_set() + Interval(-oo, 0) + """ + return self.args[0].as_set().complement(S.Reals) + + def to_nnf(self, simplify=True): + if is_literal(self): + return self + + expr = self.args[0] + + func, args = expr.func, expr.args + + if func == And: + return Or._to_nnf(*[Not(arg) for arg in args], simplify=simplify) + + if func == Or: + return And._to_nnf(*[Not(arg) for arg in args], simplify=simplify) + + if func == Implies: + a, b = args + return And._to_nnf(a, Not(b), simplify=simplify) + + if func == Equivalent: + return And._to_nnf(Or(*args), Or(*[Not(arg) for arg in args]), + simplify=simplify) + + if func == Xor: + result = [] + for i in range(1, len(args)+1, 2): + for neg in combinations(args, i): + clause = [Not(s) if s in neg else s for s in args] + result.append(Or(*clause)) + return And._to_nnf(*result, simplify=simplify) + + if func == ITE: + a, b, c = args + return And._to_nnf(Or(a, Not(c)), Or(Not(a), Not(b)), simplify=simplify) + + raise ValueError("Illegal operator %s in expression" % func) + + def to_anf(self, deep=True): + return Xor._to_anf(true, self.args[0], deep=deep) + + +class Xor(BooleanFunction): + """ + Logical XOR (exclusive OR) function. + + + Returns True if an odd number of the arguments are True and the rest are + False. + + Returns False if an even number of the arguments are True and the rest are + False. + + Examples + ======== + + >>> from sympy.logic.boolalg import Xor + >>> from sympy import symbols + >>> x, y = symbols('x y') + >>> Xor(True, False) + True + >>> Xor(True, True) + False + >>> Xor(True, False, True, True, False) + True + >>> Xor(True, False, True, False) + False + >>> x ^ y + x ^ y + + Notes + ===== + + The ``^`` operator is provided as a convenience, but note that its use + here is different from its normal use in Python, which is bitwise xor. In + particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and + ``b`` are integers. + + >>> Xor(x, y).subs(y, 0) + x + + """ + def __new__(cls, *args, remove_true=True, **kwargs): + argset = set() + obj = super().__new__(cls, *args, **kwargs) + for arg in obj._args: + if isinstance(arg, Number) or arg in (True, False): + if arg: + arg = true + else: + continue + if isinstance(arg, Xor): + for a in arg.args: + argset.remove(a) if a in argset else argset.add(a) + elif arg in argset: + argset.remove(arg) + else: + argset.add(arg) + rel = [(r, r.canonical, r.negated.canonical) + for r in argset if r.is_Relational] + odd = False # is number of complimentary pairs odd? start 0 -> False + remove = [] + for i, (r, c, nc) in enumerate(rel): + for j in range(i + 1, len(rel)): + rj, cj = rel[j][:2] + if cj == nc: + odd = ~odd + break + elif cj == c: + break + else: + continue + remove.append((r, rj)) + if odd: + argset.remove(true) if true in argset else argset.add(true) + for a, b in remove: + argset.remove(a) + argset.remove(b) + if len(argset) == 0: + return false + elif len(argset) == 1: + return argset.pop() + elif True in argset and remove_true: + argset.remove(True) + return Not(Xor(*argset)) + else: + obj._args = tuple(ordered(argset)) + obj._argset = frozenset(argset) + return obj + + # XXX: This should be cached on the object rather than using cacheit + # Maybe it can be computed in __new__? + @property # type: ignore + @cacheit + def args(self): + return tuple(ordered(self._argset)) + + def to_nnf(self, simplify=True): + args = [] + for i in range(0, len(self.args)+1, 2): + for neg in combinations(self.args, i): + clause = [Not(s) if s in neg else s for s in self.args] + args.append(Or(*clause)) + return And._to_nnf(*args, simplify=simplify) + + def _eval_rewrite_as_Or(self, *args, **kwargs): + a = self.args + return Or(*[_convert_to_varsSOP(x, self.args) + for x in _get_odd_parity_terms(len(a))]) + + def _eval_rewrite_as_And(self, *args, **kwargs): + a = self.args + return And(*[_convert_to_varsPOS(x, self.args) + for x in _get_even_parity_terms(len(a))]) + + def _eval_simplify(self, **kwargs): + # as standard simplify uses simplify_logic which writes things as + # And and Or, we only simplify the partial expressions before using + # patterns + rv = self.func(*[a.simplify(**kwargs) for a in self.args]) + if not isinstance(rv, Xor): # This shouldn't really happen here + return rv + patterns = _simplify_patterns_xor() + return _apply_patternbased_simplification(rv, patterns, + kwargs['measure'], None) + + def _eval_subs(self, old, new): + # If old is Xor, replace the parts of the arguments with new if all + # are there + if isinstance(old, Xor): + old_set = set(old.args) + if old_set.issubset(self.args): + args = set(self.args) - old_set + args.add(new) + return self.func(*args) + + +class Nand(BooleanFunction): + """ + Logical NAND function. + + It evaluates its arguments in order, giving True immediately if any + of them are False, and False if they are all True. + + Returns True if any of the arguments are False + Returns False if all arguments are True + + Examples + ======== + + >>> from sympy.logic.boolalg import Nand + >>> from sympy import symbols + >>> x, y = symbols('x y') + >>> Nand(False, True) + True + >>> Nand(True, True) + False + >>> Nand(x, y) + ~(x & y) + + """ + @classmethod + def eval(cls, *args): + return Not(And(*args)) + + +class Nor(BooleanFunction): + """ + Logical NOR function. + + It evaluates its arguments in order, giving False immediately if any + of them are True, and True if they are all False. + + Returns False if any argument is True + Returns True if all arguments are False + + Examples + ======== + + >>> from sympy.logic.boolalg import Nor + >>> from sympy import symbols + >>> x, y = symbols('x y') + + >>> Nor(True, False) + False + >>> Nor(True, True) + False + >>> Nor(False, True) + False + >>> Nor(False, False) + True + >>> Nor(x, y) + ~(x | y) + + """ + @classmethod + def eval(cls, *args): + return Not(Or(*args)) + + +class Xnor(BooleanFunction): + """ + Logical XNOR function. + + Returns False if an odd number of the arguments are True and the rest are + False. + + Returns True if an even number of the arguments are True and the rest are + False. + + Examples + ======== + + >>> from sympy.logic.boolalg import Xnor + >>> from sympy import symbols + >>> x, y = symbols('x y') + >>> Xnor(True, False) + False + >>> Xnor(True, True) + True + >>> Xnor(True, False, True, True, False) + False + >>> Xnor(True, False, True, False) + True + + """ + @classmethod + def eval(cls, *args): + return Not(Xor(*args)) + + +class Implies(BooleanFunction): + r""" + Logical implication. + + A implies B is equivalent to if A then B. Mathematically, it is written + as `A \Rightarrow B` and is equivalent to `\neg A \vee B` or ``~A | B``. + + Accepts two Boolean arguments; A and B. + Returns False if A is True and B is False + Returns True otherwise. + + Examples + ======== + + >>> from sympy.logic.boolalg import Implies + >>> from sympy import symbols + >>> x, y = symbols('x y') + + >>> Implies(True, False) + False + >>> Implies(False, False) + True + >>> Implies(True, True) + True + >>> Implies(False, True) + True + >>> x >> y + Implies(x, y) + >>> y << x + Implies(x, y) + + Notes + ===== + + The ``>>`` and ``<<`` operators are provided as a convenience, but note + that their use here is different from their normal use in Python, which is + bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different + things if ``a`` and ``b`` are integers. In particular, since Python + considers ``True`` and ``False`` to be integers, ``True >> True`` will be + the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To + avoid this issue, use the SymPy objects ``true`` and ``false``. + + >>> from sympy import true, false + >>> True >> False + 1 + >>> true >> false + False + + """ + @classmethod + def eval(cls, *args): + try: + newargs = [] + for x in args: + if isinstance(x, Number) or x in (0, 1): + newargs.append(bool(x)) + else: + newargs.append(x) + A, B = newargs + except ValueError: + raise ValueError( + "%d operand(s) used for an Implies " + "(pairs are required): %s" % (len(args), str(args))) + if A in (True, False) or B in (True, False): + return Or(Not(A), B) + elif A == B: + return true + elif A.is_Relational and B.is_Relational: + if A.canonical == B.canonical: + return true + if A.negated.canonical == B.canonical: + return B + else: + return Basic.__new__(cls, *args) + + def to_nnf(self, simplify=True): + a, b = self.args + return Or._to_nnf(Not(a), b, simplify=simplify) + + def to_anf(self, deep=True): + a, b = self.args + return Xor._to_anf(true, a, And(a, b), deep=deep) + + +class Equivalent(BooleanFunction): + """ + Equivalence relation. + + ``Equivalent(A, B)`` is True iff A and B are both True or both False. + + Returns True if all of the arguments are logically equivalent. + Returns False otherwise. + + For two arguments, this is equivalent to :py:class:`~.Xnor`. + + Examples + ======== + + >>> from sympy.logic.boolalg import Equivalent, And + >>> from sympy.abc import x + >>> Equivalent(False, False, False) + True + >>> Equivalent(True, False, False) + False + >>> Equivalent(x, And(x, True)) + True + + """ + def __new__(cls, *args, **options): + from sympy.core.relational import Relational + args = [_sympify(arg) for arg in args] + + argset = set(args) + for x in args: + if isinstance(x, Number) or x in [True, False]: # Includes 0, 1 + argset.discard(x) + argset.add(bool(x)) + rel = [] + for r in argset: + if isinstance(r, Relational): + rel.append((r, r.canonical, r.negated.canonical)) + remove = [] + for i, (r, c, nc) in enumerate(rel): + for j in range(i + 1, len(rel)): + rj, cj = rel[j][:2] + if cj == nc: + return false + elif cj == c: + remove.append((r, rj)) + break + for a, b in remove: + argset.remove(a) + argset.remove(b) + argset.add(True) + if len(argset) <= 1: + return true + if True in argset: + argset.discard(True) + return And(*argset) + if False in argset: + argset.discard(False) + return And(*[Not(arg) for arg in argset]) + _args = frozenset(argset) + obj = super().__new__(cls, _args) + obj._argset = _args + return obj + + # XXX: This should be cached on the object rather than using cacheit + # Maybe it can be computed in __new__? + @property # type: ignore + @cacheit + def args(self): + return tuple(ordered(self._argset)) + + def to_nnf(self, simplify=True): + args = [] + for a, b in zip(self.args, self.args[1:]): + args.append(Or(Not(a), b)) + args.append(Or(Not(self.args[-1]), self.args[0])) + return And._to_nnf(*args, simplify=simplify) + + def to_anf(self, deep=True): + a = And(*self.args) + b = And(*[to_anf(Not(arg), deep=False) for arg in self.args]) + b = distribute_xor_over_and(b) + return Xor._to_anf(a, b, deep=deep) + + +class ITE(BooleanFunction): + """ + If-then-else clause. + + ``ITE(A, B, C)`` evaluates and returns the result of B if A is true + else it returns the result of C. All args must be Booleans. + + From a logic gate perspective, ITE corresponds to a 2-to-1 multiplexer, + where A is the select signal. + + Examples + ======== + + >>> from sympy.logic.boolalg import ITE, And, Xor, Or + >>> from sympy.abc import x, y, z + >>> ITE(True, False, True) + False + >>> ITE(Or(True, False), And(True, True), Xor(True, True)) + True + >>> ITE(x, y, z) + ITE(x, y, z) + >>> ITE(True, x, y) + x + >>> ITE(False, x, y) + y + >>> ITE(x, y, y) + y + + Trying to use non-Boolean args will generate a TypeError: + + >>> ITE(True, [], ()) + Traceback (most recent call last): + ... + TypeError: expecting bool, Boolean or ITE, not `[]` + + """ + def __new__(cls, *args, **kwargs): + from sympy.core.relational import Eq, Ne + if len(args) != 3: + raise ValueError('expecting exactly 3 args') + a, b, c = args + # check use of binary symbols + if isinstance(a, (Eq, Ne)): + # in this context, we can evaluate the Eq/Ne + # if one arg is a binary symbol and the other + # is true/false + b, c = map(as_Boolean, (b, c)) + bin_syms = set().union(*[i.binary_symbols for i in (b, c)]) + if len(set(a.args) - bin_syms) == 1: + # one arg is a binary_symbols + _a = a + if a.lhs is true: + a = a.rhs + elif a.rhs is true: + a = a.lhs + elif a.lhs is false: + a = Not(a.rhs) + elif a.rhs is false: + a = Not(a.lhs) + else: + # binary can only equal True or False + a = false + if isinstance(_a, Ne): + a = Not(a) + else: + a, b, c = BooleanFunction.binary_check_and_simplify( + a, b, c) + rv = None + if kwargs.get('evaluate', True): + rv = cls.eval(a, b, c) + if rv is None: + rv = BooleanFunction.__new__(cls, a, b, c, evaluate=False) + return rv + + @classmethod + def eval(cls, *args): + from sympy.core.relational import Eq, Ne + # do the args give a singular result? + a, b, c = args + if isinstance(a, (Ne, Eq)): + _a = a + if true in a.args: + a = a.lhs if a.rhs is true else a.rhs + elif false in a.args: + a = Not(a.lhs) if a.rhs is false else Not(a.rhs) + else: + _a = None + if _a is not None and isinstance(_a, Ne): + a = Not(a) + if a is true: + return b + if a is false: + return c + if b == c: + return b + else: + # or maybe the results allow the answer to be expressed + # in terms of the condition + if b is true and c is false: + return a + if b is false and c is true: + return Not(a) + if [a, b, c] != args: + return cls(a, b, c, evaluate=False) + + def to_nnf(self, simplify=True): + a, b, c = self.args + return And._to_nnf(Or(Not(a), b), Or(a, c), simplify=simplify) + + def _eval_as_set(self): + return self.to_nnf().as_set() + + def _eval_rewrite_as_Piecewise(self, *args, **kwargs): + from sympy.functions.elementary.piecewise import Piecewise + return Piecewise((args[1], args[0]), (args[2], True)) + + +class Exclusive(BooleanFunction): + """ + True if only one or no argument is true. + + ``Exclusive(A, B, C)`` is equivalent to ``~(A & B) & ~(A & C) & ~(B & C)``. + + For two arguments, this is equivalent to :py:class:`~.Xor`. + + Examples + ======== + + >>> from sympy.logic.boolalg import Exclusive + >>> Exclusive(False, False, False) + True + >>> Exclusive(False, True, False) + True + >>> Exclusive(False, True, True) + False + + """ + @classmethod + def eval(cls, *args): + and_args = [] + for a, b in combinations(args, 2): + and_args.append(Not(And(a, b))) + return And(*and_args) + + +# end class definitions. Some useful methods + + +def conjuncts(expr): + """Return a list of the conjuncts in ``expr``. + + Examples + ======== + + >>> from sympy.logic.boolalg import conjuncts + >>> from sympy.abc import A, B + >>> conjuncts(A & B) + frozenset({A, B}) + >>> conjuncts(A | B) + frozenset({A | B}) + + """ + return And.make_args(expr) + + +def disjuncts(expr): + """Return a list of the disjuncts in ``expr``. + + Examples + ======== + + >>> from sympy.logic.boolalg import disjuncts + >>> from sympy.abc import A, B + >>> disjuncts(A | B) + frozenset({A, B}) + >>> disjuncts(A & B) + frozenset({A & B}) + + """ + return Or.make_args(expr) + + +def distribute_and_over_or(expr): + """ + Given a sentence ``expr`` consisting of conjunctions and disjunctions + of literals, return an equivalent sentence in CNF. + + Examples + ======== + + >>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not + >>> from sympy.abc import A, B, C + >>> distribute_and_over_or(Or(A, And(Not(B), Not(C)))) + (A | ~B) & (A | ~C) + + """ + return _distribute((expr, And, Or)) + + +def distribute_or_over_and(expr): + """ + Given a sentence ``expr`` consisting of conjunctions and disjunctions + of literals, return an equivalent sentence in DNF. + + Note that the output is NOT simplified. + + Examples + ======== + + >>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not + >>> from sympy.abc import A, B, C + >>> distribute_or_over_and(And(Or(Not(A), B), C)) + (B & C) | (C & ~A) + + """ + return _distribute((expr, Or, And)) + + +def distribute_xor_over_and(expr): + """ + Given a sentence ``expr`` consisting of conjunction and + exclusive disjunctions of literals, return an + equivalent exclusive disjunction. + + Note that the output is NOT simplified. + + Examples + ======== + + >>> from sympy.logic.boolalg import distribute_xor_over_and, And, Xor, Not + >>> from sympy.abc import A, B, C + >>> distribute_xor_over_and(And(Xor(Not(A), B), C)) + (B & C) ^ (C & ~A) + """ + return _distribute((expr, Xor, And)) + + +def _distribute(info): + """ + Distributes ``info[1]`` over ``info[2]`` with respect to ``info[0]``. + """ + if isinstance(info[0], info[2]): + for arg in info[0].args: + if isinstance(arg, info[1]): + conj = arg + break + else: + return info[0] + rest = info[2](*[a for a in info[0].args if a is not conj]) + return info[1](*list(map(_distribute, + [(info[2](c, rest), info[1], info[2]) + for c in conj.args])), remove_true=False) + elif isinstance(info[0], info[1]): + return info[1](*list(map(_distribute, + [(x, info[1], info[2]) + for x in info[0].args])), + remove_true=False) + else: + return info[0] + + +def to_anf(expr, deep=True): + r""" + Converts expr to Algebraic Normal Form (ANF). + + ANF is a canonical normal form, which means that two + equivalent formulas will convert to the same ANF. + + A logical expression is in ANF if it has the form + + .. math:: 1 \oplus a \oplus b \oplus ab \oplus abc + + i.e. it can be: + - purely true, + - purely false, + - conjunction of variables, + - exclusive disjunction. + + The exclusive disjunction can only contain true, variables + or conjunction of variables. No negations are permitted. + + If ``deep`` is ``False``, arguments of the boolean + expression are considered variables, i.e. only the + top-level expression is converted to ANF. + + Examples + ======== + >>> from sympy.logic.boolalg import And, Or, Not, Implies, Equivalent + >>> from sympy.logic.boolalg import to_anf + >>> from sympy.abc import A, B, C + >>> to_anf(Not(A)) + A ^ True + >>> to_anf(And(Or(A, B), Not(C))) + A ^ B ^ (A & B) ^ (A & C) ^ (B & C) ^ (A & B & C) + >>> to_anf(Implies(Not(A), Equivalent(B, C)), deep=False) + True ^ ~A ^ (~A & (Equivalent(B, C))) + + """ + expr = sympify(expr) + + if is_anf(expr): + return expr + return expr.to_anf(deep=deep) + + +def to_nnf(expr, simplify=True): + """ + Converts ``expr`` to Negation Normal Form (NNF). + + A logical expression is in NNF if it + contains only :py:class:`~.And`, :py:class:`~.Or` and :py:class:`~.Not`, + and :py:class:`~.Not` is applied only to literals. + If ``simplify`` is ``True``, the result contains no redundant clauses. + + Examples + ======== + + >>> from sympy.abc import A, B, C, D + >>> from sympy.logic.boolalg import Not, Equivalent, to_nnf + >>> to_nnf(Not((~A & ~B) | (C & D))) + (A | B) & (~C | ~D) + >>> to_nnf(Equivalent(A >> B, B >> A)) + (A | ~B | (A & ~B)) & (B | ~A | (B & ~A)) + + """ + if is_nnf(expr, simplify): + return expr + return expr.to_nnf(simplify) + + +def to_cnf(expr, simplify=False, force=False): + """ + Convert a propositional logical sentence ``expr`` to conjunctive normal + form: ``((A | ~B | ...) & (B | C | ...) & ...)``. + If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest CNF + form using the Quine-McCluskey algorithm; this may take a long + time. If there are more than 8 variables the ``force`` flag must be set + to ``True`` to simplify (default is ``False``). + + Examples + ======== + + >>> from sympy.logic.boolalg import to_cnf + >>> from sympy.abc import A, B, D + >>> to_cnf(~(A | B) | D) + (D | ~A) & (D | ~B) + >>> to_cnf((A | B) & (A | ~A), True) + A | B + + """ + expr = sympify(expr) + if not isinstance(expr, BooleanFunction): + return expr + + if simplify: + if not force and len(_find_predicates(expr)) > 8: + raise ValueError(filldedent(''' + To simplify a logical expression with more + than 8 variables may take a long time and requires + the use of `force=True`.''')) + return simplify_logic(expr, 'cnf', True, force=force) + + # Don't convert unless we have to + if is_cnf(expr): + return expr + + expr = eliminate_implications(expr) + res = distribute_and_over_or(expr) + + return res + + +def to_dnf(expr, simplify=False, force=False): + """ + Convert a propositional logical sentence ``expr`` to disjunctive normal + form: ``((A & ~B & ...) | (B & C & ...) | ...)``. + If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest DNF form using + the Quine-McCluskey algorithm; this may take a long + time. If there are more than 8 variables, the ``force`` flag must be set to + ``True`` to simplify (default is ``False``). + + Examples + ======== + + >>> from sympy.logic.boolalg import to_dnf + >>> from sympy.abc import A, B, C + >>> to_dnf(B & (A | C)) + (A & B) | (B & C) + >>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True) + A | C + + """ + expr = sympify(expr) + if not isinstance(expr, BooleanFunction): + return expr + + if simplify: + if not force and len(_find_predicates(expr)) > 8: + raise ValueError(filldedent(''' + To simplify a logical expression with more + than 8 variables may take a long time and requires + the use of `force=True`.''')) + return simplify_logic(expr, 'dnf', True, force=force) + + # Don't convert unless we have to + if is_dnf(expr): + return expr + + expr = eliminate_implications(expr) + return distribute_or_over_and(expr) + + +def is_anf(expr): + r""" + Checks if ``expr`` is in Algebraic Normal Form (ANF). + + A logical expression is in ANF if it has the form + + .. math:: 1 \oplus a \oplus b \oplus ab \oplus abc + + i.e. it is purely true, purely false, conjunction of + variables or exclusive disjunction. The exclusive + disjunction can only contain true, variables or + conjunction of variables. No negations are permitted. + + Examples + ======== + + >>> from sympy.logic.boolalg import And, Not, Xor, true, is_anf + >>> from sympy.abc import A, B, C + >>> is_anf(true) + True + >>> is_anf(A) + True + >>> is_anf(And(A, B, C)) + True + >>> is_anf(Xor(A, Not(B))) + False + + """ + expr = sympify(expr) + + if is_literal(expr) and not isinstance(expr, Not): + return True + + if isinstance(expr, And): + for arg in expr.args: + if not arg.is_Symbol: + return False + return True + + elif isinstance(expr, Xor): + for arg in expr.args: + if isinstance(arg, And): + for a in arg.args: + if not a.is_Symbol: + return False + elif is_literal(arg): + if isinstance(arg, Not): + return False + else: + return False + return True + + else: + return False + + +def is_nnf(expr, simplified=True): + """ + Checks if ``expr`` is in Negation Normal Form (NNF). + + A logical expression is in NNF if it + contains only :py:class:`~.And`, :py:class:`~.Or` and :py:class:`~.Not`, + and :py:class:`~.Not` is applied only to literals. + If ``simplified`` is ``True``, checks if result contains no redundant clauses. + + Examples + ======== + + >>> from sympy.abc import A, B, C + >>> from sympy.logic.boolalg import Not, is_nnf + >>> is_nnf(A & B | ~C) + True + >>> is_nnf((A | ~A) & (B | C)) + False + >>> is_nnf((A | ~A) & (B | C), False) + True + >>> is_nnf(Not(A & B) | C) + False + >>> is_nnf((A >> B) & (B >> A)) + False + + """ + + expr = sympify(expr) + if is_literal(expr): + return True + + stack = [expr] + + while stack: + expr = stack.pop() + if expr.func in (And, Or): + if simplified: + args = expr.args + for arg in args: + if Not(arg) in args: + return False + stack.extend(expr.args) + + elif not is_literal(expr): + return False + + return True + + +def is_cnf(expr): + """ + Test whether or not an expression is in conjunctive normal form. + + Examples + ======== + + >>> from sympy.logic.boolalg import is_cnf + >>> from sympy.abc import A, B, C + >>> is_cnf(A | B | C) + True + >>> is_cnf(A & B & C) + True + >>> is_cnf((A & B) | C) + False + + """ + return _is_form(expr, And, Or) + + +def is_dnf(expr): + """ + Test whether or not an expression is in disjunctive normal form. + + Examples + ======== + + >>> from sympy.logic.boolalg import is_dnf + >>> from sympy.abc import A, B, C + >>> is_dnf(A | B | C) + True + >>> is_dnf(A & B & C) + True + >>> is_dnf((A & B) | C) + True + >>> is_dnf(A & (B | C)) + False + + """ + return _is_form(expr, Or, And) + + +def _is_form(expr, function1, function2): + """ + Test whether or not an expression is of the required form. + + """ + expr = sympify(expr) + + vals = function1.make_args(expr) if isinstance(expr, function1) else [expr] + for lit in vals: + if isinstance(lit, function2): + vals2 = function2.make_args(lit) if isinstance(lit, function2) else [lit] + for l in vals2: + if is_literal(l) is False: + return False + elif is_literal(lit) is False: + return False + + return True + + +def eliminate_implications(expr): + """ + Change :py:class:`~.Implies` and :py:class:`~.Equivalent` into + :py:class:`~.And`, :py:class:`~.Or`, and :py:class:`~.Not`. + That is, return an expression that is equivalent to ``expr``, but has only + ``&``, ``|``, and ``~`` as logical + operators. + + Examples + ======== + + >>> from sympy.logic.boolalg import Implies, Equivalent, \ + eliminate_implications + >>> from sympy.abc import A, B, C + >>> eliminate_implications(Implies(A, B)) + B | ~A + >>> eliminate_implications(Equivalent(A, B)) + (A | ~B) & (B | ~A) + >>> eliminate_implications(Equivalent(A, B, C)) + (A | ~C) & (B | ~A) & (C | ~B) + + """ + return to_nnf(expr, simplify=False) + + +def is_literal(expr): + """ + Returns True if expr is a literal, else False. + + Examples + ======== + + >>> from sympy import Or, Q + >>> from sympy.abc import A, B + >>> from sympy.logic.boolalg import is_literal + >>> is_literal(A) + True + >>> is_literal(~A) + True + >>> is_literal(Q.zero(A)) + True + >>> is_literal(A + B) + True + >>> is_literal(Or(A, B)) + False + + """ + from sympy.assumptions import AppliedPredicate + + if isinstance(expr, Not): + return is_literal(expr.args[0]) + elif expr in (True, False) or isinstance(expr, AppliedPredicate) or expr.is_Atom: + return True + elif not isinstance(expr, BooleanFunction) and all( + (isinstance(expr, AppliedPredicate) or a.is_Atom) for a in expr.args): + return True + return False + + +def to_int_repr(clauses, symbols): + """ + Takes clauses in CNF format and puts them into an integer representation. + + Examples + ======== + + >>> from sympy.logic.boolalg import to_int_repr + >>> from sympy.abc import x, y + >>> to_int_repr([x | y, y], [x, y]) == [{1, 2}, {2}] + True + + """ + + # Convert the symbol list into a dict + symbols = dict(zip(symbols, range(1, len(symbols) + 1))) + + def append_symbol(arg, symbols): + if isinstance(arg, Not): + return -symbols[arg.args[0]] + else: + return symbols[arg] + + return [{append_symbol(arg, symbols) for arg in Or.make_args(c)} + for c in clauses] + + +def term_to_integer(term): + """ + Return an integer corresponding to the base-2 digits given by *term*. + + Parameters + ========== + + term : a string or list of ones and zeros + + Examples + ======== + + >>> from sympy.logic.boolalg import term_to_integer + >>> term_to_integer([1, 0, 0]) + 4 + >>> term_to_integer('100') + 4 + + """ + + return int(''.join(list(map(str, list(term)))), 2) + + +integer_to_term = ibin # XXX could delete? + + +def truth_table(expr, variables, input=True): + """ + Return a generator of all possible configurations of the input variables, + and the result of the boolean expression for those values. + + Parameters + ========== + + expr : Boolean expression + + variables : list of variables + + input : bool (default ``True``) + Indicates whether to return the input combinations. + + Examples + ======== + + >>> from sympy.logic.boolalg import truth_table + >>> from sympy.abc import x,y + >>> table = truth_table(x >> y, [x, y]) + >>> for t in table: + ... print('{0} -> {1}'.format(*t)) + [0, 0] -> True + [0, 1] -> True + [1, 0] -> False + [1, 1] -> True + + >>> table = truth_table(x | y, [x, y]) + >>> list(table) + [([0, 0], False), ([0, 1], True), ([1, 0], True), ([1, 1], True)] + + If ``input`` is ``False``, ``truth_table`` returns only a list of truth values. + In this case, the corresponding input values of variables can be + deduced from the index of a given output. + + >>> from sympy.utilities.iterables import ibin + >>> vars = [y, x] + >>> values = truth_table(x >> y, vars, input=False) + >>> values = list(values) + >>> values + [True, False, True, True] + + >>> for i, value in enumerate(values): + ... print('{0} -> {1}'.format(list(zip( + ... vars, ibin(i, len(vars)))), value)) + [(y, 0), (x, 0)] -> True + [(y, 0), (x, 1)] -> False + [(y, 1), (x, 0)] -> True + [(y, 1), (x, 1)] -> True + + """ + variables = [sympify(v) for v in variables] + + expr = sympify(expr) + if not isinstance(expr, BooleanFunction) and not is_literal(expr): + return + + table = product((0, 1), repeat=len(variables)) + for term in table: + value = expr.xreplace(dict(zip(variables, term))) + + if input: + yield list(term), value + else: + yield value + + +def _check_pair(minterm1, minterm2): + """ + Checks if a pair of minterms differs by only one bit. If yes, returns + index, else returns `-1`. + """ + # Early termination seems to be faster than list comprehension, + # at least for large examples. + index = -1 + for x, i in enumerate(minterm1): # zip(minterm1, minterm2) is slower + if i != minterm2[x]: + if index == -1: + index = x + else: + return -1 + return index + + +def _convert_to_varsSOP(minterm, variables): + """ + Converts a term in the expansion of a function from binary to its + variable form (for SOP). + """ + temp = [variables[n] if val == 1 else Not(variables[n]) + for n, val in enumerate(minterm) if val != 3] + return And(*temp) + + +def _convert_to_varsPOS(maxterm, variables): + """ + Converts a term in the expansion of a function from binary to its + variable form (for POS). + """ + temp = [variables[n] if val == 0 else Not(variables[n]) + for n, val in enumerate(maxterm) if val != 3] + return Or(*temp) + + +def _convert_to_varsANF(term, variables): + """ + Converts a term in the expansion of a function from binary to its + variable form (for ANF). + + Parameters + ========== + + term : list of 1's and 0's (complementation pattern) + variables : list of variables + + """ + temp = [variables[n] for n, t in enumerate(term) if t == 1] + + if not temp: + return true + + return And(*temp) + + +def _get_odd_parity_terms(n): + """ + Returns a list of lists, with all possible combinations of n zeros and ones + with an odd number of ones. + """ + return [e for e in [ibin(i, n) for i in range(2**n)] if sum(e) % 2 == 1] + + +def _get_even_parity_terms(n): + """ + Returns a list of lists, with all possible combinations of n zeros and ones + with an even number of ones. + """ + return [e for e in [ibin(i, n) for i in range(2**n)] if sum(e) % 2 == 0] + + +def _simplified_pairs(terms): + """ + Reduces a set of minterms, if possible, to a simplified set of minterms + with one less variable in the terms using QM method. + """ + if not terms: + return [] + + simplified_terms = [] + todo = list(range(len(terms))) + + # Count number of ones as _check_pair can only potentially match if there + # is at most a difference of a single one + termdict = defaultdict(list) + for n, term in enumerate(terms): + ones = sum([1 for t in term if t == 1]) + termdict[ones].append(n) + + variables = len(terms[0]) + for k in range(variables): + for i in termdict[k]: + for j in termdict[k+1]: + index = _check_pair(terms[i], terms[j]) + if index != -1: + # Mark terms handled + todo[i] = todo[j] = None + # Copy old term + newterm = terms[i][:] + # Set differing position to don't care + newterm[index] = 3 + # Add if not already there + if newterm not in simplified_terms: + simplified_terms.append(newterm) + + if simplified_terms: + # Further simplifications only among the new terms + simplified_terms = _simplified_pairs(simplified_terms) + + # Add remaining, non-simplified, terms + simplified_terms.extend([terms[i] for i in todo if i is not None]) + return simplified_terms + + +def _rem_redundancy(l1, terms): + """ + After the truth table has been sufficiently simplified, use the prime + implicant table method to recognize and eliminate redundant pairs, + and return the essential arguments. + """ + + if not terms: + return [] + + nterms = len(terms) + nl1 = len(l1) + + # Create dominating matrix + dommatrix = [[0]*nl1 for n in range(nterms)] + colcount = [0]*nl1 + rowcount = [0]*nterms + for primei, prime in enumerate(l1): + for termi, term in enumerate(terms): + # Check prime implicant covering term + if all(t == 3 or t == mt for t, mt in zip(prime, term)): + dommatrix[termi][primei] = 1 + colcount[primei] += 1 + rowcount[termi] += 1 + + # Keep track if anything changed + anythingchanged = True + # Then, go again + while anythingchanged: + anythingchanged = False + + for rowi in range(nterms): + # Still non-dominated? + if rowcount[rowi]: + row = dommatrix[rowi] + for row2i in range(nterms): + # Still non-dominated? + if rowi != row2i and rowcount[rowi] and (rowcount[rowi] <= rowcount[row2i]): + row2 = dommatrix[row2i] + if all(row2[n] >= row[n] for n in range(nl1)): + # row2 dominating row, remove row2 + rowcount[row2i] = 0 + anythingchanged = True + for primei, prime in enumerate(row2): + if prime: + # Make corresponding entry 0 + dommatrix[row2i][primei] = 0 + colcount[primei] -= 1 + + colcache = {} + + for coli in range(nl1): + # Still non-dominated? + if colcount[coli]: + if coli in colcache: + col = colcache[coli] + else: + col = [dommatrix[i][coli] for i in range(nterms)] + colcache[coli] = col + for col2i in range(nl1): + # Still non-dominated? + if coli != col2i and colcount[col2i] and (colcount[coli] >= colcount[col2i]): + if col2i in colcache: + col2 = colcache[col2i] + else: + col2 = [dommatrix[i][col2i] for i in range(nterms)] + colcache[col2i] = col2 + if all(col[n] >= col2[n] for n in range(nterms)): + # col dominating col2, remove col2 + colcount[col2i] = 0 + anythingchanged = True + for termi, term in enumerate(col2): + if term and dommatrix[termi][col2i]: + # Make corresponding entry 0 + dommatrix[termi][col2i] = 0 + rowcount[termi] -= 1 + + if not anythingchanged: + # Heuristically select the prime implicant covering most terms + maxterms = 0 + bestcolidx = -1 + for coli in range(nl1): + s = colcount[coli] + if s > maxterms: + bestcolidx = coli + maxterms = s + + # In case we found a prime implicant covering at least two terms + if bestcolidx != -1 and maxterms > 1: + for primei, prime in enumerate(l1): + if primei != bestcolidx: + for termi, term in enumerate(colcache[bestcolidx]): + if term and dommatrix[termi][primei]: + # Make corresponding entry 0 + dommatrix[termi][primei] = 0 + anythingchanged = True + rowcount[termi] -= 1 + colcount[primei] -= 1 + + return [l1[i] for i in range(nl1) if colcount[i]] + + +def _input_to_binlist(inputlist, variables): + binlist = [] + bits = len(variables) + for val in inputlist: + if isinstance(val, int): + binlist.append(ibin(val, bits)) + elif isinstance(val, dict): + nonspecvars = list(variables) + for key in val.keys(): + nonspecvars.remove(key) + for t in product((0, 1), repeat=len(nonspecvars)): + d = dict(zip(nonspecvars, t)) + d.update(val) + binlist.append([d[v] for v in variables]) + elif isinstance(val, (list, tuple)): + if len(val) != bits: + raise ValueError("Each term must contain {bits} bits as there are" + "\n{bits} variables (or be an integer)." + "".format(bits=bits)) + binlist.append(list(val)) + else: + raise TypeError("A term list can only contain lists," + " ints or dicts.") + return binlist + + +def SOPform(variables, minterms, dontcares=None): + """ + The SOPform function uses simplified_pairs and a redundant group- + eliminating algorithm to convert the list of all input combos that + generate '1' (the minterms) into the smallest sum-of-products form. + + The variables must be given as the first argument. + + Return a logical :py:class:`~.Or` function (i.e., the "sum of products" or + "SOP" form) that gives the desired outcome. If there are inputs that can + be ignored, pass them as a list, too. + + The result will be one of the (perhaps many) functions that satisfy + the conditions. + + Examples + ======== + + >>> from sympy.logic import SOPform + >>> from sympy import symbols + >>> w, x, y, z = symbols('w x y z') + >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], + ... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]] + >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] + >>> SOPform([w, x, y, z], minterms, dontcares) + (y & z) | (~w & ~x) + + The terms can also be represented as integers: + + >>> minterms = [1, 3, 7, 11, 15] + >>> dontcares = [0, 2, 5] + >>> SOPform([w, x, y, z], minterms, dontcares) + (y & z) | (~w & ~x) + + They can also be specified using dicts, which does not have to be fully + specified: + + >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] + >>> SOPform([w, x, y, z], minterms) + (x & ~w) | (y & z & ~x) + + Or a combination: + + >>> minterms = [4, 7, 11, [1, 1, 1, 1]] + >>> dontcares = [{w : 0, x : 0, y: 0}, 5] + >>> SOPform([w, x, y, z], minterms, dontcares) + (w & y & z) | (~w & ~y) | (x & z & ~w) + + See also + ======== + + POSform + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm + .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term + + """ + if not minterms: + return false + + variables = tuple(map(sympify, variables)) + + + minterms = _input_to_binlist(minterms, variables) + dontcares = _input_to_binlist((dontcares or []), variables) + for d in dontcares: + if d in minterms: + raise ValueError('%s in minterms is also in dontcares' % d) + + return _sop_form(variables, minterms, dontcares) + + +def _sop_form(variables, minterms, dontcares): + new = _simplified_pairs(minterms + dontcares) + essential = _rem_redundancy(new, minterms) + return Or(*[_convert_to_varsSOP(x, variables) for x in essential]) + + +def POSform(variables, minterms, dontcares=None): + """ + The POSform function uses simplified_pairs and a redundant-group + eliminating algorithm to convert the list of all input combinations + that generate '1' (the minterms) into the smallest product-of-sums form. + + The variables must be given as the first argument. + + Return a logical :py:class:`~.And` function (i.e., the "product of sums" + or "POS" form) that gives the desired outcome. If there are inputs that can + be ignored, pass them as a list, too. + + The result will be one of the (perhaps many) functions that satisfy + the conditions. + + Examples + ======== + + >>> from sympy.logic import POSform + >>> from sympy import symbols + >>> w, x, y, z = symbols('w x y z') + >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], + ... [1, 0, 1, 1], [1, 1, 1, 1]] + >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] + >>> POSform([w, x, y, z], minterms, dontcares) + z & (y | ~w) + + The terms can also be represented as integers: + + >>> minterms = [1, 3, 7, 11, 15] + >>> dontcares = [0, 2, 5] + >>> POSform([w, x, y, z], minterms, dontcares) + z & (y | ~w) + + They can also be specified using dicts, which does not have to be fully + specified: + + >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] + >>> POSform([w, x, y, z], minterms) + (x | y) & (x | z) & (~w | ~x) + + Or a combination: + + >>> minterms = [4, 7, 11, [1, 1, 1, 1]] + >>> dontcares = [{w : 0, x : 0, y: 0}, 5] + >>> POSform([w, x, y, z], minterms, dontcares) + (w | x) & (y | ~w) & (z | ~y) + + See also + ======== + + SOPform + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm + .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term + + """ + if not minterms: + return false + + variables = tuple(map(sympify, variables)) + minterms = _input_to_binlist(minterms, variables) + dontcares = _input_to_binlist((dontcares or []), variables) + for d in dontcares: + if d in minterms: + raise ValueError('%s in minterms is also in dontcares' % d) + + maxterms = [] + for t in product((0, 1), repeat=len(variables)): + t = list(t) + if (t not in minterms) and (t not in dontcares): + maxterms.append(t) + + new = _simplified_pairs(maxterms + dontcares) + essential = _rem_redundancy(new, maxterms) + return And(*[_convert_to_varsPOS(x, variables) for x in essential]) + + +def ANFform(variables, truthvalues): + """ + The ANFform function converts the list of truth values to + Algebraic Normal Form (ANF). + + The variables must be given as the first argument. + + Return True, False, logical :py:class:`~.And` function (i.e., the + "Zhegalkin monomial") or logical :py:class:`~.Xor` function (i.e., + the "Zhegalkin polynomial"). When True and False + are represented by 1 and 0, respectively, then + :py:class:`~.And` is multiplication and :py:class:`~.Xor` is addition. + + Formally a "Zhegalkin monomial" is the product (logical + And) of a finite set of distinct variables, including + the empty set whose product is denoted 1 (True). + A "Zhegalkin polynomial" is the sum (logical Xor) of a + set of Zhegalkin monomials, with the empty set denoted + by 0 (False). + + Parameters + ========== + + variables : list of variables + truthvalues : list of 1's and 0's (result column of truth table) + + Examples + ======== + >>> from sympy.logic.boolalg import ANFform + >>> from sympy.abc import x, y + >>> ANFform([x], [1, 0]) + x ^ True + >>> ANFform([x, y], [0, 1, 1, 1]) + x ^ y ^ (x & y) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Zhegalkin_polynomial + + """ + + n_vars = len(variables) + n_values = len(truthvalues) + + if n_values != 2 ** n_vars: + raise ValueError("The number of truth values must be equal to 2^%d, " + "got %d" % (n_vars, n_values)) + + variables = tuple(map(sympify, variables)) + + coeffs = anf_coeffs(truthvalues) + terms = [] + + for i, t in enumerate(product((0, 1), repeat=n_vars)): + if coeffs[i] == 1: + terms.append(t) + + return Xor(*[_convert_to_varsANF(x, variables) for x in terms], + remove_true=False) + + +def anf_coeffs(truthvalues): + """ + Convert a list of truth values of some boolean expression + to the list of coefficients of the polynomial mod 2 (exclusive + disjunction) representing the boolean expression in ANF + (i.e., the "Zhegalkin polynomial"). + + There are `2^n` possible Zhegalkin monomials in `n` variables, since + each monomial is fully specified by the presence or absence of + each variable. + + We can enumerate all the monomials. For example, boolean + function with four variables ``(a, b, c, d)`` can contain + up to `2^4 = 16` monomials. The 13-th monomial is the + product ``a & b & d``, because 13 in binary is 1, 1, 0, 1. + + A given monomial's presence or absence in a polynomial corresponds + to that monomial's coefficient being 1 or 0 respectively. + + Examples + ======== + >>> from sympy.logic.boolalg import anf_coeffs, bool_monomial, Xor + >>> from sympy.abc import a, b, c + >>> truthvalues = [0, 1, 1, 0, 0, 1, 0, 1] + >>> coeffs = anf_coeffs(truthvalues) + >>> coeffs + [0, 1, 1, 0, 0, 0, 1, 0] + >>> polynomial = Xor(*[ + ... bool_monomial(k, [a, b, c]) + ... for k, coeff in enumerate(coeffs) if coeff == 1 + ... ]) + >>> polynomial + b ^ c ^ (a & b) + + """ + + s = '{:b}'.format(len(truthvalues)) + n = len(s) - 1 + + if len(truthvalues) != 2**n: + raise ValueError("The number of truth values must be a power of two, " + "got %d" % len(truthvalues)) + + coeffs = [[v] for v in truthvalues] + + for i in range(n): + tmp = [] + for j in range(2 ** (n-i-1)): + tmp.append(coeffs[2*j] + + list(map(lambda x, y: x^y, coeffs[2*j], coeffs[2*j+1]))) + coeffs = tmp + + return coeffs[0] + + +def bool_minterm(k, variables): + """ + Return the k-th minterm. + + Minterms are numbered by a binary encoding of the complementation + pattern of the variables. This convention assigns the value 1 to + the direct form and 0 to the complemented form. + + Parameters + ========== + + k : int or list of 1's and 0's (complementation pattern) + variables : list of variables + + Examples + ======== + + >>> from sympy.logic.boolalg import bool_minterm + >>> from sympy.abc import x, y, z + >>> bool_minterm([1, 0, 1], [x, y, z]) + x & z & ~y + >>> bool_minterm(6, [x, y, z]) + x & y & ~z + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_minterms + + """ + if isinstance(k, int): + k = ibin(k, len(variables)) + variables = tuple(map(sympify, variables)) + return _convert_to_varsSOP(k, variables) + + +def bool_maxterm(k, variables): + """ + Return the k-th maxterm. + + Each maxterm is assigned an index based on the opposite + conventional binary encoding used for minterms. The maxterm + convention assigns the value 0 to the direct form and 1 to + the complemented form. + + Parameters + ========== + + k : int or list of 1's and 0's (complementation pattern) + variables : list of variables + + Examples + ======== + >>> from sympy.logic.boolalg import bool_maxterm + >>> from sympy.abc import x, y, z + >>> bool_maxterm([1, 0, 1], [x, y, z]) + y | ~x | ~z + >>> bool_maxterm(6, [x, y, z]) + z | ~x | ~y + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_maxterms + + """ + if isinstance(k, int): + k = ibin(k, len(variables)) + variables = tuple(map(sympify, variables)) + return _convert_to_varsPOS(k, variables) + + +def bool_monomial(k, variables): + """ + Return the k-th monomial. + + Monomials are numbered by a binary encoding of the presence and + absences of the variables. This convention assigns the value + 1 to the presence of variable and 0 to the absence of variable. + + Each boolean function can be uniquely represented by a + Zhegalkin Polynomial (Algebraic Normal Form). The Zhegalkin + Polynomial of the boolean function with `n` variables can contain + up to `2^n` monomials. We can enumerate all the monomials. + Each monomial is fully specified by the presence or absence + of each variable. + + For example, boolean function with four variables ``(a, b, c, d)`` + can contain up to `2^4 = 16` monomials. The 13-th monomial is the + product ``a & b & d``, because 13 in binary is 1, 1, 0, 1. + + Parameters + ========== + + k : int or list of 1's and 0's + variables : list of variables + + Examples + ======== + >>> from sympy.logic.boolalg import bool_monomial + >>> from sympy.abc import x, y, z + >>> bool_monomial([1, 0, 1], [x, y, z]) + x & z + >>> bool_monomial(6, [x, y, z]) + x & y + + """ + if isinstance(k, int): + k = ibin(k, len(variables)) + variables = tuple(map(sympify, variables)) + return _convert_to_varsANF(k, variables) + + +def _find_predicates(expr): + """Helper to find logical predicates in BooleanFunctions. + + A logical predicate is defined here as anything within a BooleanFunction + that is not a BooleanFunction itself. + + """ + if not isinstance(expr, BooleanFunction): + return {expr} + return set().union(*(map(_find_predicates, expr.args))) + + +def simplify_logic(expr, form=None, deep=True, force=False, dontcare=None): + """ + This function simplifies a boolean function to its simplified version + in SOP or POS form. The return type is an :py:class:`~.Or` or + :py:class:`~.And` object in SymPy. + + Parameters + ========== + + expr : Boolean + + form : string (``'cnf'`` or ``'dnf'``) or ``None`` (default). + If ``'cnf'`` or ``'dnf'``, the simplest expression in the corresponding + normal form is returned; if ``None``, the answer is returned + according to the form with fewest args (in CNF by default). + + deep : bool (default ``True``) + Indicates whether to recursively simplify any + non-boolean functions contained within the input. + + force : bool (default ``False``) + As the simplifications require exponential time in the number + of variables, there is by default a limit on expressions with + 8 variables. When the expression has more than 8 variables + only symbolical simplification (controlled by ``deep``) is + made. By setting ``force`` to ``True``, this limit is removed. Be + aware that this can lead to very long simplification times. + + dontcare : Boolean + Optimize expression under the assumption that inputs where this + expression is true are don't care. This is useful in e.g. Piecewise + conditions, where later conditions do not need to consider inputs that + are converted by previous conditions. For example, if a previous + condition is ``And(A, B)``, the simplification of expr can be made + with don't cares for ``And(A, B)``. + + Examples + ======== + + >>> from sympy.logic import simplify_logic + >>> from sympy.abc import x, y, z + >>> b = (~x & ~y & ~z) | ( ~x & ~y & z) + >>> simplify_logic(b) + ~x & ~y + >>> simplify_logic(x | y, dontcare=y) + x + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Don%27t-care_term + + """ + + if form not in (None, 'cnf', 'dnf'): + raise ValueError("form can be cnf or dnf only") + expr = sympify(expr) + # check for quick exit if form is given: right form and all args are + # literal and do not involve Not + if form: + form_ok = False + if form == 'cnf': + form_ok = is_cnf(expr) + elif form == 'dnf': + form_ok = is_dnf(expr) + + if form_ok and all(is_literal(a) + for a in expr.args): + return expr + from sympy.core.relational import Relational + if deep: + variables = expr.atoms(Relational) + from sympy.simplify.simplify import simplify + s = tuple(map(simplify, variables)) + expr = expr.xreplace(dict(zip(variables, s))) + if not isinstance(expr, BooleanFunction): + return expr + # Replace Relationals with Dummys to possibly + # reduce the number of variables + repl = {} + undo = {} + from sympy.core.symbol import Dummy + variables = expr.atoms(Relational) + if dontcare is not None: + dontcare = sympify(dontcare) + variables.update(dontcare.atoms(Relational)) + while variables: + var = variables.pop() + if var.is_Relational: + d = Dummy() + undo[d] = var + repl[var] = d + nvar = var.negated + if nvar in variables: + repl[nvar] = Not(d) + variables.remove(nvar) + + expr = expr.xreplace(repl) + + if dontcare is not None: + dontcare = dontcare.xreplace(repl) + + # Get new variables after replacing + variables = _find_predicates(expr) + if not force and len(variables) > 8: + return expr.xreplace(undo) + if dontcare is not None: + # Add variables from dontcare + dcvariables = _find_predicates(dontcare) + variables.update(dcvariables) + # if too many restore to variables only + if not force and len(variables) > 8: + variables = _find_predicates(expr) + dontcare = None + # group into constants and variable values + c, v = sift(ordered(variables), lambda x: x in (True, False), binary=True) + variables = c + v + # standardize constants to be 1 or 0 in keeping with truthtable + c = [1 if i == True else 0 for i in c] + truthtable = _get_truthtable(v, expr, c) + if dontcare is not None: + dctruthtable = _get_truthtable(v, dontcare, c) + truthtable = [t for t in truthtable if t not in dctruthtable] + else: + dctruthtable = [] + big = len(truthtable) >= (2 ** (len(variables) - 1)) + if form == 'dnf' or form is None and big: + return _sop_form(variables, truthtable, dctruthtable).xreplace(undo) + return POSform(variables, truthtable, dctruthtable).xreplace(undo) + + +def _get_truthtable(variables, expr, const): + """ Return a list of all combinations leading to a True result for ``expr``. + """ + _variables = variables.copy() + def _get_tt(inputs): + if _variables: + v = _variables.pop() + tab = [[i[0].xreplace({v: false}), [0] + i[1]] for i in inputs if i[0] is not false] + tab.extend([[i[0].xreplace({v: true}), [1] + i[1]] for i in inputs if i[0] is not false]) + return _get_tt(tab) + return inputs + res = [const + k[1] for k in _get_tt([[expr, []]]) if k[0]] + if res == [[]]: + return [] + else: + return res + + +def _finger(eq): + """ + Assign a 5-item fingerprint to each symbol in the equation: + [ + # of times it appeared as a Symbol; + # of times it appeared as a Not(symbol); + # of times it appeared as a Symbol in an And or Or; + # of times it appeared as a Not(Symbol) in an And or Or; + a sorted tuple of tuples, (i, j, k), where i is the number of arguments + in an And or Or with which it appeared as a Symbol, and j is + the number of arguments that were Not(Symbol); k is the number + of times that (i, j) was seen. + ] + + Examples + ======== + + >>> from sympy.logic.boolalg import _finger as finger + >>> from sympy import And, Or, Not, Xor, to_cnf, symbols + >>> from sympy.abc import a, b, x, y + >>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y)) + >>> dict(finger(eq)) + {(0, 0, 1, 0, ((2, 0, 1),)): [x], + (0, 0, 1, 0, ((2, 1, 1),)): [a, b], + (0, 0, 1, 2, ((2, 0, 1),)): [y]} + >>> dict(finger(x & ~y)) + {(0, 1, 0, 0, ()): [y], (1, 0, 0, 0, ()): [x]} + + In the following, the (5, 2, 6) means that there were 6 Or + functions in which a symbol appeared as itself amongst 5 arguments in + which there were also 2 negated symbols, e.g. ``(a0 | a1 | a2 | ~a3 | ~a4)`` + is counted once for a0, a1 and a2. + + >>> dict(finger(to_cnf(Xor(*symbols('a:5'))))) + {(0, 0, 8, 8, ((5, 0, 1), (5, 2, 6), (5, 4, 1))): [a0, a1, a2, a3, a4]} + + The equation must not have more than one level of nesting: + + >>> dict(finger(And(Or(x, y), y))) + {(0, 0, 1, 0, ((2, 0, 1),)): [x], (1, 0, 1, 0, ((2, 0, 1),)): [y]} + >>> dict(finger(And(Or(x, And(a, x)), y))) + Traceback (most recent call last): + ... + NotImplementedError: unexpected level of nesting + + So y and x have unique fingerprints, but a and b do not. + """ + f = eq.free_symbols + d = dict(list(zip(f, [[0]*4 + [defaultdict(int)] for fi in f]))) + for a in eq.args: + if a.is_Symbol: + d[a][0] += 1 + elif a.is_Not: + d[a.args[0]][1] += 1 + else: + o = len(a.args), sum(isinstance(ai, Not) for ai in a.args) + for ai in a.args: + if ai.is_Symbol: + d[ai][2] += 1 + d[ai][-1][o] += 1 + elif ai.is_Not: + d[ai.args[0]][3] += 1 + else: + raise NotImplementedError('unexpected level of nesting') + inv = defaultdict(list) + for k, v in ordered(iter(d.items())): + v[-1] = tuple(sorted([i + (j,) for i, j in v[-1].items()])) + inv[tuple(v)].append(k) + return inv + + +def bool_map(bool1, bool2): + """ + Return the simplified version of *bool1*, and the mapping of variables + that makes the two expressions *bool1* and *bool2* represent the same + logical behaviour for some correspondence between the variables + of each. + If more than one mappings of this sort exist, one of them + is returned. + + For example, ``And(x, y)`` is logically equivalent to ``And(a, b)`` for + the mapping ``{x: a, y: b}`` or ``{x: b, y: a}``. + If no such mapping exists, return ``False``. + + Examples + ======== + + >>> from sympy import SOPform, bool_map, Or, And, Not, Xor + >>> from sympy.abc import w, x, y, z, a, b, c, d + >>> function1 = SOPform([x, z, y],[[1, 0, 1], [0, 0, 1]]) + >>> function2 = SOPform([a, b, c],[[1, 0, 1], [1, 0, 0]]) + >>> bool_map(function1, function2) + (y & ~z, {y: a, z: b}) + + The results are not necessarily unique, but they are canonical. Here, + ``(w, z)`` could be ``(a, d)`` or ``(d, a)``: + + >>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y)) + >>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c)) + >>> bool_map(eq, eq2) + ((x & y) | (w & ~y) | (z & ~y), {w: a, x: b, y: c, z: d}) + >>> eq = And(Xor(a, b), c, And(c,d)) + >>> bool_map(eq, eq.subs(c, x)) + (c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x}) + + """ + + def match(function1, function2): + """Return the mapping that equates variables between two + simplified boolean expressions if possible. + + By "simplified" we mean that a function has been denested + and is either an And (or an Or) whose arguments are either + symbols (x), negated symbols (Not(x)), or Or (or an And) whose + arguments are only symbols or negated symbols. For example, + ``And(x, Not(y), Or(w, Not(z)))``. + + Basic.match is not robust enough (see issue 4835) so this is + a workaround that is valid for simplified boolean expressions + """ + + # do some quick checks + if function1.__class__ != function2.__class__: + return None # maybe simplification makes them the same? + if len(function1.args) != len(function2.args): + return None # maybe simplification makes them the same? + if function1.is_Symbol: + return {function1: function2} + + # get the fingerprint dictionaries + f1 = _finger(function1) + f2 = _finger(function2) + + # more quick checks + if len(f1) != len(f2): + return False + + # assemble the match dictionary if possible + matchdict = {} + for k in f1.keys(): + if k not in f2: + return False + if len(f1[k]) != len(f2[k]): + return False + for i, x in enumerate(f1[k]): + matchdict[x] = f2[k][i] + return matchdict + + a = simplify_logic(bool1) + b = simplify_logic(bool2) + m = match(a, b) + if m: + return a, m + return m + + +def _apply_patternbased_simplification(rv, patterns, measure, + dominatingvalue, + replacementvalue=None, + threeterm_patterns=None): + """ + Replace patterns of Relational + + Parameters + ========== + + rv : Expr + Boolean expression + + patterns : tuple + Tuple of tuples, with (pattern to simplify, simplified pattern) with + two terms. + + measure : function + Simplification measure. + + dominatingvalue : Boolean or ``None`` + The dominating value for the function of consideration. + For example, for :py:class:`~.And` ``S.false`` is dominating. + As soon as one expression is ``S.false`` in :py:class:`~.And`, + the whole expression is ``S.false``. + + replacementvalue : Boolean or ``None``, optional + The resulting value for the whole expression if one argument + evaluates to ``dominatingvalue``. + For example, for :py:class:`~.Nand` ``S.false`` is dominating, but + in this case the resulting value is ``S.true``. Default is ``None``. + If ``replacementvalue`` is ``None`` and ``dominatingvalue`` is not + ``None``, ``replacementvalue = dominatingvalue``. + + threeterm_patterns : tuple, optional + Tuple of tuples, with (pattern to simplify, simplified pattern) with + three terms. + + """ + from sympy.core.relational import Relational, _canonical + + if replacementvalue is None and dominatingvalue is not None: + replacementvalue = dominatingvalue + # Use replacement patterns for Relationals + Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational), + binary=True) + if len(Rel) <= 1: + return rv + Rel, nonRealRel = sift(Rel, lambda i: not any(s.is_real is False + for s in i.free_symbols), + binary=True) + Rel = [i.canonical for i in Rel] + + if threeterm_patterns and len(Rel) >= 3: + Rel = _apply_patternbased_threeterm_simplification(Rel, + threeterm_patterns, rv.func, dominatingvalue, + replacementvalue, measure) + + Rel = _apply_patternbased_twoterm_simplification(Rel, patterns, + rv.func, dominatingvalue, replacementvalue, measure) + + rv = rv.func(*([_canonical(i) for i in ordered(Rel)] + + nonRel + nonRealRel)) + return rv + + +def _apply_patternbased_twoterm_simplification(Rel, patterns, func, + dominatingvalue, + replacementvalue, + measure): + """ Apply pattern-based two-term simplification.""" + from sympy.functions.elementary.miscellaneous import Min, Max + from sympy.core.relational import Ge, Gt, _Inequality + changed = True + while changed and len(Rel) >= 2: + changed = False + # Use only < or <= + Rel = [r.reversed if isinstance(r, (Ge, Gt)) else r for r in Rel] + # Sort based on ordered + Rel = list(ordered(Rel)) + # Eq and Ne must be tested reversed as well + rtmp = [(r, ) if isinstance(r, _Inequality) else (r, r.reversed) for r in Rel] + # Create a list of possible replacements + results = [] + # Try all combinations of possibly reversed relational + for ((i, pi), (j, pj)) in combinations(enumerate(rtmp), 2): + for pattern, simp in patterns: + res = [] + for p1, p2 in product(pi, pj): + # use SymPy matching + oldexpr = Tuple(p1, p2) + tmpres = oldexpr.match(pattern) + if tmpres: + res.append((tmpres, oldexpr)) + + if res: + for tmpres, oldexpr in res: + # we have a matching, compute replacement + np = simp.xreplace(tmpres) + if np == dominatingvalue: + # if dominatingvalue, the whole expression + # will be replacementvalue + return [replacementvalue] + # add replacement + if not isinstance(np, ITE) and not np.has(Min, Max): + # We only want to use ITE and Min/Max replacements if + # they simplify to a relational + costsaving = measure(func(*oldexpr.args)) - measure(np) + if costsaving > 0: + results.append((costsaving, ([i, j], np))) + if results: + # Sort results based on complexity + results = sorted(results, + key=lambda pair: pair[0], reverse=True) + # Replace the one providing most simplification + replacement = results[0][1] + idx, newrel = replacement + idx.sort() + # Remove the old relationals + for index in reversed(idx): + del Rel[index] + if dominatingvalue is None or newrel != Not(dominatingvalue): + # Insert the new one (no need to insert a value that will + # not affect the result) + if newrel.func == func: + for a in newrel.args: + Rel.append(a) + else: + Rel.append(newrel) + # We did change something so try again + changed = True + return Rel + + +def _apply_patternbased_threeterm_simplification(Rel, patterns, func, + dominatingvalue, + replacementvalue, + measure): + """ Apply pattern-based three-term simplification.""" + from sympy.functions.elementary.miscellaneous import Min, Max + from sympy.core.relational import Le, Lt, _Inequality + changed = True + while changed and len(Rel) >= 3: + changed = False + # Use only > or >= + Rel = [r.reversed if isinstance(r, (Le, Lt)) else r for r in Rel] + # Sort based on ordered + Rel = list(ordered(Rel)) + # Create a list of possible replacements + results = [] + # Eq and Ne must be tested reversed as well + rtmp = [(r, ) if isinstance(r, _Inequality) else (r, r.reversed) for r in Rel] + # Try all combinations of possibly reversed relational + for ((i, pi), (j, pj), (k, pk)) in permutations(enumerate(rtmp), 3): + for pattern, simp in patterns: + res = [] + for p1, p2, p3 in product(pi, pj, pk): + # use SymPy matching + oldexpr = Tuple(p1, p2, p3) + tmpres = oldexpr.match(pattern) + if tmpres: + res.append((tmpres, oldexpr)) + + if res: + for tmpres, oldexpr in res: + # we have a matching, compute replacement + np = simp.xreplace(tmpres) + if np == dominatingvalue: + # if dominatingvalue, the whole expression + # will be replacementvalue + return [replacementvalue] + # add replacement + if not isinstance(np, ITE) and not np.has(Min, Max): + # We only want to use ITE and Min/Max replacements if + # they simplify to a relational + costsaving = measure(func(*oldexpr.args)) - measure(np) + if costsaving > 0: + results.append((costsaving, ([i, j, k], np))) + if results: + # Sort results based on complexity + results = sorted(results, + key=lambda pair: pair[0], reverse=True) + # Replace the one providing most simplification + replacement = results[0][1] + idx, newrel = replacement + idx.sort() + # Remove the old relationals + for index in reversed(idx): + del Rel[index] + if dominatingvalue is None or newrel != Not(dominatingvalue): + # Insert the new one (no need to insert a value that will + # not affect the result) + if newrel.func == func: + for a in newrel.args: + Rel.append(a) + else: + Rel.append(newrel) + # We did change something so try again + changed = True + return Rel + + +@cacheit +def _simplify_patterns_and(): + """ Two-term patterns for And.""" + + from sympy.core import Wild + from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt + from sympy.functions.elementary.complexes import Abs + from sympy.functions.elementary.miscellaneous import Min, Max + a = Wild('a') + b = Wild('b') + c = Wild('c') + # Relationals patterns should be in alphabetical order + # (pattern1, pattern2, simplified) + # Do not use Ge, Gt + _matchers_and = ((Tuple(Eq(a, b), Lt(a, b)), false), + #(Tuple(Eq(a, b), Lt(b, a)), S.false), + #(Tuple(Le(b, a), Lt(a, b)), S.false), + #(Tuple(Lt(b, a), Le(a, b)), S.false), + (Tuple(Lt(b, a), Lt(a, b)), false), + (Tuple(Eq(a, b), Le(b, a)), Eq(a, b)), + #(Tuple(Eq(a, b), Le(a, b)), Eq(a, b)), + #(Tuple(Le(b, a), Lt(b, a)), Gt(a, b)), + (Tuple(Le(b, a), Le(a, b)), Eq(a, b)), + #(Tuple(Le(b, a), Ne(a, b)), Gt(a, b)), + #(Tuple(Lt(b, a), Ne(a, b)), Gt(a, b)), + (Tuple(Le(a, b), Lt(a, b)), Lt(a, b)), + (Tuple(Le(a, b), Ne(a, b)), Lt(a, b)), + (Tuple(Lt(a, b), Ne(a, b)), Lt(a, b)), + # Sign + (Tuple(Eq(a, b), Eq(a, -b)), And(Eq(a, S.Zero), Eq(b, S.Zero))), + # Min/Max/ITE + (Tuple(Le(b, a), Le(c, a)), Ge(a, Max(b, c))), + (Tuple(Le(b, a), Lt(c, a)), ITE(b > c, Ge(a, b), Gt(a, c))), + (Tuple(Lt(b, a), Lt(c, a)), Gt(a, Max(b, c))), + (Tuple(Le(a, b), Le(a, c)), Le(a, Min(b, c))), + (Tuple(Le(a, b), Lt(a, c)), ITE(b < c, Le(a, b), Lt(a, c))), + (Tuple(Lt(a, b), Lt(a, c)), Lt(a, Min(b, c))), + (Tuple(Le(a, b), Le(c, a)), ITE(Eq(b, c), Eq(a, b), ITE(b < c, false, And(Le(a, b), Ge(a, c))))), + (Tuple(Le(c, a), Le(a, b)), ITE(Eq(b, c), Eq(a, b), ITE(b < c, false, And(Le(a, b), Ge(a, c))))), + (Tuple(Lt(a, b), Lt(c, a)), ITE(b < c, false, And(Lt(a, b), Gt(a, c)))), + (Tuple(Lt(c, a), Lt(a, b)), ITE(b < c, false, And(Lt(a, b), Gt(a, c)))), + (Tuple(Le(a, b), Lt(c, a)), ITE(b <= c, false, And(Le(a, b), Gt(a, c)))), + (Tuple(Le(c, a), Lt(a, b)), ITE(b <= c, false, And(Lt(a, b), Ge(a, c)))), + (Tuple(Eq(a, b), Eq(a, c)), ITE(Eq(b, c), Eq(a, b), false)), + (Tuple(Lt(a, b), Lt(-b, a)), ITE(b > 0, Lt(Abs(a), b), false)), + (Tuple(Le(a, b), Le(-b, a)), ITE(b >= 0, Le(Abs(a), b), false)), + ) + return _matchers_and + + +@cacheit +def _simplify_patterns_and3(): + """ Three-term patterns for And.""" + + from sympy.core import Wild + from sympy.core.relational import Eq, Ge, Gt + + a = Wild('a') + b = Wild('b') + c = Wild('c') + # Relationals patterns should be in alphabetical order + # (pattern1, pattern2, pattern3, simplified) + # Do not use Le, Lt + _matchers_and = ((Tuple(Ge(a, b), Ge(b, c), Gt(c, a)), false), + (Tuple(Ge(a, b), Gt(b, c), Gt(c, a)), false), + (Tuple(Gt(a, b), Gt(b, c), Gt(c, a)), false), + # (Tuple(Ge(c, a), Gt(a, b), Gt(b, c)), S.false), + # Lower bound relations + # Commented out combinations that does not simplify + (Tuple(Ge(a, b), Ge(a, c), Ge(b, c)), And(Ge(a, b), Ge(b, c))), + (Tuple(Ge(a, b), Ge(a, c), Gt(b, c)), And(Ge(a, b), Gt(b, c))), + # (Tuple(Ge(a, b), Gt(a, c), Ge(b, c)), And(Ge(a, b), Ge(b, c))), + (Tuple(Ge(a, b), Gt(a, c), Gt(b, c)), And(Ge(a, b), Gt(b, c))), + # (Tuple(Gt(a, b), Ge(a, c), Ge(b, c)), And(Gt(a, b), Ge(b, c))), + (Tuple(Ge(a, c), Gt(a, b), Gt(b, c)), And(Gt(a, b), Gt(b, c))), + (Tuple(Ge(b, c), Gt(a, b), Gt(a, c)), And(Gt(a, b), Ge(b, c))), + (Tuple(Gt(a, b), Gt(a, c), Gt(b, c)), And(Gt(a, b), Gt(b, c))), + # Upper bound relations + # Commented out combinations that does not simplify + (Tuple(Ge(b, a), Ge(c, a), Ge(b, c)), And(Ge(c, a), Ge(b, c))), + (Tuple(Ge(b, a), Ge(c, a), Gt(b, c)), And(Ge(c, a), Gt(b, c))), + # (Tuple(Ge(b, a), Gt(c, a), Ge(b, c)), And(Gt(c, a), Ge(b, c))), + (Tuple(Ge(b, a), Gt(c, a), Gt(b, c)), And(Gt(c, a), Gt(b, c))), + # (Tuple(Gt(b, a), Ge(c, a), Ge(b, c)), And(Ge(c, a), Ge(b, c))), + (Tuple(Ge(c, a), Gt(b, a), Gt(b, c)), And(Ge(c, a), Gt(b, c))), + (Tuple(Ge(b, c), Gt(b, a), Gt(c, a)), And(Gt(c, a), Ge(b, c))), + (Tuple(Gt(b, a), Gt(c, a), Gt(b, c)), And(Gt(c, a), Gt(b, c))), + # Circular relation + (Tuple(Ge(a, b), Ge(b, c), Ge(c, a)), And(Eq(a, b), Eq(b, c))), + ) + return _matchers_and + + +@cacheit +def _simplify_patterns_or(): + """ Two-term patterns for Or.""" + + from sympy.core import Wild + from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt + from sympy.functions.elementary.complexes import Abs + from sympy.functions.elementary.miscellaneous import Min, Max + a = Wild('a') + b = Wild('b') + c = Wild('c') + # Relationals patterns should be in alphabetical order + # (pattern1, pattern2, simplified) + # Do not use Ge, Gt + _matchers_or = ((Tuple(Le(b, a), Le(a, b)), true), + #(Tuple(Le(b, a), Lt(a, b)), true), + (Tuple(Le(b, a), Ne(a, b)), true), + #(Tuple(Le(a, b), Lt(b, a)), true), + #(Tuple(Le(a, b), Ne(a, b)), true), + #(Tuple(Eq(a, b), Le(b, a)), Ge(a, b)), + #(Tuple(Eq(a, b), Lt(b, a)), Ge(a, b)), + (Tuple(Eq(a, b), Le(a, b)), Le(a, b)), + (Tuple(Eq(a, b), Lt(a, b)), Le(a, b)), + #(Tuple(Le(b, a), Lt(b, a)), Ge(a, b)), + (Tuple(Lt(b, a), Lt(a, b)), Ne(a, b)), + (Tuple(Lt(b, a), Ne(a, b)), Ne(a, b)), + (Tuple(Le(a, b), Lt(a, b)), Le(a, b)), + #(Tuple(Lt(a, b), Ne(a, b)), Ne(a, b)), + (Tuple(Eq(a, b), Ne(a, c)), ITE(Eq(b, c), true, Ne(a, c))), + (Tuple(Ne(a, b), Ne(a, c)), ITE(Eq(b, c), Ne(a, b), true)), + # Min/Max/ITE + (Tuple(Le(b, a), Le(c, a)), Ge(a, Min(b, c))), + #(Tuple(Ge(b, a), Ge(c, a)), Ge(Min(b, c), a)), + (Tuple(Le(b, a), Lt(c, a)), ITE(b > c, Lt(c, a), Le(b, a))), + (Tuple(Lt(b, a), Lt(c, a)), Gt(a, Min(b, c))), + #(Tuple(Gt(b, a), Gt(c, a)), Gt(Min(b, c), a)), + (Tuple(Le(a, b), Le(a, c)), Le(a, Max(b, c))), + #(Tuple(Le(b, a), Le(c, a)), Le(Max(b, c), a)), + (Tuple(Le(a, b), Lt(a, c)), ITE(b >= c, Le(a, b), Lt(a, c))), + (Tuple(Lt(a, b), Lt(a, c)), Lt(a, Max(b, c))), + #(Tuple(Lt(b, a), Lt(c, a)), Lt(Max(b, c), a)), + (Tuple(Le(a, b), Le(c, a)), ITE(b >= c, true, Or(Le(a, b), Ge(a, c)))), + (Tuple(Le(c, a), Le(a, b)), ITE(b >= c, true, Or(Le(a, b), Ge(a, c)))), + (Tuple(Lt(a, b), Lt(c, a)), ITE(b > c, true, Or(Lt(a, b), Gt(a, c)))), + (Tuple(Lt(c, a), Lt(a, b)), ITE(b > c, true, Or(Lt(a, b), Gt(a, c)))), + (Tuple(Le(a, b), Lt(c, a)), ITE(b >= c, true, Or(Le(a, b), Gt(a, c)))), + (Tuple(Le(c, a), Lt(a, b)), ITE(b >= c, true, Or(Lt(a, b), Ge(a, c)))), + (Tuple(Lt(b, a), Lt(a, -b)), ITE(b >= 0, Gt(Abs(a), b), true)), + (Tuple(Le(b, a), Le(a, -b)), ITE(b > 0, Ge(Abs(a), b), true)), + ) + return _matchers_or + + +@cacheit +def _simplify_patterns_xor(): + """ Two-term patterns for Xor.""" + + from sympy.functions.elementary.miscellaneous import Min, Max + from sympy.core import Wild + from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt + a = Wild('a') + b = Wild('b') + c = Wild('c') + # Relationals patterns should be in alphabetical order + # (pattern1, pattern2, simplified) + # Do not use Ge, Gt + _matchers_xor = (#(Tuple(Le(b, a), Lt(a, b)), true), + #(Tuple(Lt(b, a), Le(a, b)), true), + #(Tuple(Eq(a, b), Le(b, a)), Gt(a, b)), + #(Tuple(Eq(a, b), Lt(b, a)), Ge(a, b)), + (Tuple(Eq(a, b), Le(a, b)), Lt(a, b)), + (Tuple(Eq(a, b), Lt(a, b)), Le(a, b)), + (Tuple(Le(a, b), Lt(a, b)), Eq(a, b)), + (Tuple(Le(a, b), Le(b, a)), Ne(a, b)), + (Tuple(Le(b, a), Ne(a, b)), Le(a, b)), + # (Tuple(Lt(b, a), Lt(a, b)), Ne(a, b)), + (Tuple(Lt(b, a), Ne(a, b)), Lt(a, b)), + # (Tuple(Le(a, b), Lt(a, b)), Eq(a, b)), + # (Tuple(Le(a, b), Ne(a, b)), Ge(a, b)), + # (Tuple(Lt(a, b), Ne(a, b)), Gt(a, b)), + # Min/Max/ITE + (Tuple(Le(b, a), Le(c, a)), + And(Ge(a, Min(b, c)), Lt(a, Max(b, c)))), + (Tuple(Le(b, a), Lt(c, a)), + ITE(b > c, And(Gt(a, c), Lt(a, b)), + And(Ge(a, b), Le(a, c)))), + (Tuple(Lt(b, a), Lt(c, a)), + And(Gt(a, Min(b, c)), Le(a, Max(b, c)))), + (Tuple(Le(a, b), Le(a, c)), + And(Le(a, Max(b, c)), Gt(a, Min(b, c)))), + (Tuple(Le(a, b), Lt(a, c)), + ITE(b < c, And(Lt(a, c), Gt(a, b)), + And(Le(a, b), Ge(a, c)))), + (Tuple(Lt(a, b), Lt(a, c)), + And(Lt(a, Max(b, c)), Ge(a, Min(b, c)))), + ) + return _matchers_xor + + +def simplify_univariate(expr): + """return a simplified version of univariate boolean expression, else ``expr``""" + from sympy.functions.elementary.piecewise import Piecewise + from sympy.core.relational import Eq, Ne + if not isinstance(expr, BooleanFunction): + return expr + if expr.atoms(Eq, Ne): + return expr + c = expr + free = c.free_symbols + if len(free) != 1: + return c + x = free.pop() + ok, i = Piecewise((0, c), evaluate=False + )._intervals(x, err_on_Eq=True) + if not ok: + return c + if not i: + return false + args = [] + for a, b, _, _ in i: + if a is S.NegativeInfinity: + if b is S.Infinity: + c = true + else: + if c.subs(x, b) == True: + c = (x <= b) + else: + c = (x < b) + else: + incl_a = (c.subs(x, a) == True) + incl_b = (c.subs(x, b) == True) + if incl_a and incl_b: + if b.is_infinite: + c = (x >= a) + else: + c = And(a <= x, x <= b) + elif incl_a: + c = And(a <= x, x < b) + elif incl_b: + if b.is_infinite: + c = (x > a) + else: + c = And(a < x, x <= b) + else: + c = And(a < x, x < b) + args.append(c) + return Or(*args) + + +# Classes corresponding to logic gates +# Used in gateinputcount method +BooleanGates = (And, Or, Xor, Nand, Nor, Not, Xnor, ITE) + +def gateinputcount(expr): + """ + Return the total number of inputs for the logic gates realizing the + Boolean expression. + + Returns + ======= + + int + Number of gate inputs + + Note + ==== + + Not all Boolean functions count as gate here, only those that are + considered to be standard gates. These are: :py:class:`~.And`, + :py:class:`~.Or`, :py:class:`~.Xor`, :py:class:`~.Not`, and + :py:class:`~.ITE` (multiplexer). :py:class:`~.Nand`, :py:class:`~.Nor`, + and :py:class:`~.Xnor` will be evaluated to ``Not(And())`` etc. + + Examples + ======== + + >>> from sympy.logic import And, Or, Nand, Not, gateinputcount + >>> from sympy.abc import x, y, z + >>> expr = And(x, y) + >>> gateinputcount(expr) + 2 + >>> gateinputcount(Or(expr, z)) + 4 + + Note that ``Nand`` is automatically evaluated to ``Not(And())`` so + + >>> gateinputcount(Nand(x, y, z)) + 4 + >>> gateinputcount(Not(And(x, y, z))) + 4 + + Although this can be avoided by using ``evaluate=False`` + + >>> gateinputcount(Nand(x, y, z, evaluate=False)) + 3 + + Also note that a comparison will count as a Boolean variable: + + >>> gateinputcount(And(x > z, y >= 2)) + 2 + + As will a symbol: + >>> gateinputcount(x) + 0 + + """ + if not isinstance(expr, Boolean): + raise TypeError("Expression must be Boolean") + if isinstance(expr, BooleanGates): + return len(expr.args) + sum(gateinputcount(x) for x in expr.args) + return 0 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/inference.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..f0226eebea0363641a2f6cef65df874ca3e561d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/inference.py @@ -0,0 +1,329 @@ +"""Inference in propositional logic""" + +from sympy.logic.boolalg import And, Not, conjuncts, to_cnf, BooleanFunction +from sympy.core.sorting import ordered +from sympy.core.sympify import sympify +from sympy.external.importtools import import_module + + +def literal_symbol(literal): + """ + The symbol in this literal (without the negation). + + Examples + ======== + + >>> from sympy.abc import A + >>> from sympy.logic.inference import literal_symbol + >>> literal_symbol(A) + A + >>> literal_symbol(~A) + A + + """ + + if literal is True or literal is False: + return literal + try: + if literal.is_Symbol: + return literal + if literal.is_Not: + return literal_symbol(literal.args[0]) + else: + raise ValueError + except (AttributeError, ValueError): + raise ValueError("Argument must be a boolean literal.") + + +def satisfiable(expr, algorithm=None, all_models=False, minimal=False): + """ + Check satisfiability of a propositional sentence. + Returns a model when it succeeds. + Returns {true: true} for trivially true expressions. + + On setting all_models to True, if given expr is satisfiable then + returns a generator of models. However, if expr is unsatisfiable + then returns a generator containing the single element False. + + Examples + ======== + + >>> from sympy.abc import A, B + >>> from sympy.logic.inference import satisfiable + >>> satisfiable(A & ~B) + {A: True, B: False} + >>> satisfiable(A & ~A) + False + >>> satisfiable(True) + {True: True} + >>> next(satisfiable(A & ~A, all_models=True)) + False + >>> models = satisfiable((A >> B) & B, all_models=True) + >>> next(models) + {A: False, B: True} + >>> next(models) + {A: True, B: True} + >>> def use_models(models): + ... for model in models: + ... if model: + ... # Do something with the model. + ... print(model) + ... else: + ... # Given expr is unsatisfiable. + ... print("UNSAT") + >>> use_models(satisfiable(A >> ~A, all_models=True)) + {A: False} + >>> use_models(satisfiable(A ^ A, all_models=True)) + UNSAT + + """ + if algorithm is None or algorithm == "pycosat": + pycosat = import_module('pycosat') + if pycosat is not None: + algorithm = "pycosat" + else: + if algorithm == "pycosat": + raise ImportError("pycosat module is not present") + # Silently fall back to dpll2 if pycosat + # is not installed + algorithm = "dpll2" + + if algorithm=="minisat22": + pysat = import_module('pysat') + if pysat is None: + algorithm = "dpll2" + + if algorithm == "dpll": + from sympy.logic.algorithms.dpll import dpll_satisfiable + return dpll_satisfiable(expr) + elif algorithm == "dpll2": + from sympy.logic.algorithms.dpll2 import dpll_satisfiable + return dpll_satisfiable(expr, all_models) + elif algorithm == "pycosat": + from sympy.logic.algorithms.pycosat_wrapper import pycosat_satisfiable + return pycosat_satisfiable(expr, all_models) + elif algorithm == "minisat22": + from sympy.logic.algorithms.minisat22_wrapper import minisat22_satisfiable + return minisat22_satisfiable(expr, all_models, minimal) + raise NotImplementedError + + +def valid(expr): + """ + Check validity of a propositional sentence. + A valid propositional sentence is True under every assignment. + + Examples + ======== + + >>> from sympy.abc import A, B + >>> from sympy.logic.inference import valid + >>> valid(A | ~A) + True + >>> valid(A | B) + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Validity + + """ + return not satisfiable(Not(expr)) + + +def pl_true(expr, model=None, deep=False): + """ + Returns whether the given assignment is a model or not. + + If the assignment does not specify the value for every proposition, + this may return None to indicate 'not obvious'. + + Parameters + ========== + + model : dict, optional, default: {} + Mapping of symbols to boolean values to indicate assignment. + deep: boolean, optional, default: False + Gives the value of the expression under partial assignments + correctly. May still return None to indicate 'not obvious'. + + + Examples + ======== + + >>> from sympy.abc import A, B + >>> from sympy.logic.inference import pl_true + >>> pl_true( A & B, {A: True, B: True}) + True + >>> pl_true(A & B, {A: False}) + False + >>> pl_true(A & B, {A: True}) + >>> pl_true(A & B, {A: True}, deep=True) + >>> pl_true(A >> (B >> A)) + >>> pl_true(A >> (B >> A), deep=True) + True + >>> pl_true(A & ~A) + >>> pl_true(A & ~A, deep=True) + False + >>> pl_true(A & B & (~A | ~B), {A: True}) + >>> pl_true(A & B & (~A | ~B), {A: True}, deep=True) + False + + """ + + from sympy.core.symbol import Symbol + + boolean = (True, False) + + def _validate(expr): + if isinstance(expr, Symbol) or expr in boolean: + return True + if not isinstance(expr, BooleanFunction): + return False + return all(_validate(arg) for arg in expr.args) + + if expr in boolean: + return expr + expr = sympify(expr) + if not _validate(expr): + raise ValueError("%s is not a valid boolean expression" % expr) + if not model: + model = {} + model = {k: v for k, v in model.items() if v in boolean} + result = expr.subs(model) + if result in boolean: + return bool(result) + if deep: + model = {k: True for k in result.atoms()} + if pl_true(result, model): + if valid(result): + return True + else: + if not satisfiable(result): + return False + return None + + +def entails(expr, formula_set=None): + """ + Check whether the given expr_set entail an expr. + If formula_set is empty then it returns the validity of expr. + + Examples + ======== + + >>> from sympy.abc import A, B, C + >>> from sympy.logic.inference import entails + >>> entails(A, [A >> B, B >> C]) + False + >>> entails(C, [A >> B, B >> C, A]) + True + >>> entails(A >> B) + False + >>> entails(A >> (B >> A)) + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Logical_consequence + + """ + if formula_set: + formula_set = list(formula_set) + else: + formula_set = [] + formula_set.append(Not(expr)) + return not satisfiable(And(*formula_set)) + + +class KB: + """Base class for all knowledge bases""" + def __init__(self, sentence=None): + self.clauses_ = set() + if sentence: + self.tell(sentence) + + def tell(self, sentence): + raise NotImplementedError + + def ask(self, query): + raise NotImplementedError + + def retract(self, sentence): + raise NotImplementedError + + @property + def clauses(self): + return list(ordered(self.clauses_)) + + +class PropKB(KB): + """A KB for Propositional Logic. Inefficient, with no indexing.""" + + def tell(self, sentence): + """Add the sentence's clauses to the KB + + Examples + ======== + + >>> from sympy.logic.inference import PropKB + >>> from sympy.abc import x, y + >>> l = PropKB() + >>> l.clauses + [] + + >>> l.tell(x | y) + >>> l.clauses + [x | y] + + >>> l.tell(y) + >>> l.clauses + [y, x | y] + + """ + for c in conjuncts(to_cnf(sentence)): + self.clauses_.add(c) + + def ask(self, query): + """Checks if the query is true given the set of clauses. + + Examples + ======== + + >>> from sympy.logic.inference import PropKB + >>> from sympy.abc import x, y + >>> l = PropKB() + >>> l.tell(x & ~y) + >>> l.ask(x) + True + >>> l.ask(y) + False + + """ + return entails(query, self.clauses_) + + def retract(self, sentence): + """Remove the sentence's clauses from the KB + + Examples + ======== + + >>> from sympy.logic.inference import PropKB + >>> from sympy.abc import x, y + >>> l = PropKB() + >>> l.clauses + [] + + >>> l.tell(x | y) + >>> l.clauses + [x | y] + + >>> l.retract(x | y) + >>> l.clauses + [] + + """ + for c in conjuncts(to_cnf(sentence)): + self.clauses_.discard(c) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3aab9cb45cbfb795841b1aeff27fccc5ab74a9d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..038cb1d69f175a03410c063f23b393cfdab687e2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..034a09184e4854a6edef23b3dbba2c7f1a4b8c3b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b082eb4b9bfa873c5d7ac94f61d2d004dd52f5de Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py new file mode 100644 index 0000000000000000000000000000000000000000..b51e0eef553acdf95bbc16c2a614968153361c73 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py @@ -0,0 +1,1340 @@ +from sympy.assumptions.ask import Q +from sympy.assumptions.refine import refine +from sympy.core.numbers import oo +from sympy.core.relational import Equality, Eq, Ne +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, symbols) +from sympy.functions import Piecewise +from sympy.functions.elementary.trigonometric import cos, sin +from sympy.sets.sets import (Interval, Union) +from sympy.simplify.simplify import simplify +from sympy.logic.boolalg import ( + And, Boolean, Equivalent, ITE, Implies, Nand, Nor, Not, Or, + POSform, SOPform, Xor, Xnor, conjuncts, disjuncts, + distribute_or_over_and, distribute_and_over_or, + eliminate_implications, is_nnf, is_cnf, is_dnf, simplify_logic, + to_nnf, to_cnf, to_dnf, to_int_repr, bool_map, true, false, + BooleanAtom, is_literal, term_to_integer, + truth_table, as_Boolean, to_anf, is_anf, distribute_xor_over_and, + anf_coeffs, ANFform, bool_minterm, bool_maxterm, bool_monomial, + _check_pair, _convert_to_varsSOP, _convert_to_varsPOS, Exclusive, + gateinputcount) +from sympy.assumptions.cnf import CNF + +from sympy.testing.pytest import raises, XFAIL, slow + +from itertools import combinations, permutations, product + +A, B, C, D = symbols('A:D') +a, b, c, d, e, w, x, y, z = symbols('a:e w:z') + + +def test_overloading(): + """Test that |, & are overloaded as expected""" + + assert A & B == And(A, B) + assert A | B == Or(A, B) + assert (A & B) | C == Or(And(A, B), C) + assert A >> B == Implies(A, B) + assert A << B == Implies(B, A) + assert ~A == Not(A) + assert A ^ B == Xor(A, B) + + +def test_And(): + assert And() is true + assert And(A) == A + assert And(True) is true + assert And(False) is false + assert And(True, True) is true + assert And(True, False) is false + assert And(False, False) is false + assert And(True, A) == A + assert And(False, A) is false + assert And(True, True, True) is true + assert And(True, True, A) == A + assert And(True, False, A) is false + assert And(1, A) == A + raises(TypeError, lambda: And(2, A)) + raises(TypeError, lambda: And(A < 2, A)) + assert And(A < 1, A >= 1) is false + e = A > 1 + assert And(e, e.canonical) == e.canonical + g, l, ge, le = A > B, B < A, A >= B, B <= A + assert And(g, l, ge, le) == And(ge, g) + assert {And(*i) for i in permutations((l,g,le,ge))} == {And(ge, g)} + assert And(And(Eq(a, 0), Eq(b, 0)), And(Ne(a, 0), Eq(c, 0))) is false + + +def test_Or(): + assert Or() is false + assert Or(A) == A + assert Or(True) is true + assert Or(False) is false + assert Or(True, True) is true + assert Or(True, False) is true + assert Or(False, False) is false + assert Or(True, A) is true + assert Or(False, A) == A + assert Or(True, False, False) is true + assert Or(True, False, A) is true + assert Or(False, False, A) == A + assert Or(1, A) is true + raises(TypeError, lambda: Or(2, A)) + raises(TypeError, lambda: Or(A < 2, A)) + assert Or(A < 1, A >= 1) is true + e = A > 1 + assert Or(e, e.canonical) == e + g, l, ge, le = A > B, B < A, A >= B, B <= A + assert Or(g, l, ge, le) == Or(g, ge) + + +def test_Xor(): + assert Xor() is false + assert Xor(A) == A + assert Xor(A, A) is false + assert Xor(True, A, A) is true + assert Xor(A, A, A, A, A) == A + assert Xor(True, False, False, A, B) == ~Xor(A, B) + assert Xor(True) is true + assert Xor(False) is false + assert Xor(True, True) is false + assert Xor(True, False) is true + assert Xor(False, False) is false + assert Xor(True, A) == ~A + assert Xor(False, A) == A + assert Xor(True, False, False) is true + assert Xor(True, False, A) == ~A + assert Xor(False, False, A) == A + assert isinstance(Xor(A, B), Xor) + assert Xor(A, B, Xor(C, D)) == Xor(A, B, C, D) + assert Xor(A, B, Xor(B, C)) == Xor(A, C) + assert Xor(A < 1, A >= 1, B) == Xor(0, 1, B) == Xor(1, 0, B) + e = A > 1 + assert Xor(e, e.canonical) == Xor(0, 0) == Xor(1, 1) + + +def test_rewrite_as_And(): + expr = x ^ y + assert expr.rewrite(And) == (x | y) & (~x | ~y) + + +def test_rewrite_as_Or(): + expr = x ^ y + assert expr.rewrite(Or) == (x & ~y) | (y & ~x) + + +def test_rewrite_as_Nand(): + expr = (y & z) | (z & ~w) + assert expr.rewrite(Nand) == ~(~(y & z) & ~(z & ~w)) + + +def test_rewrite_as_Nor(): + expr = z & (y | ~w) + assert expr.rewrite(Nor) == ~(~z | ~(y | ~w)) + + +def test_Not(): + raises(TypeError, lambda: Not(True, False)) + assert Not(True) is false + assert Not(False) is true + assert Not(0) is true + assert Not(1) is false + assert Not(2) is false + + +def test_Nand(): + assert Nand() is false + assert Nand(A) == ~A + assert Nand(True) is false + assert Nand(False) is true + assert Nand(True, True) is false + assert Nand(True, False) is true + assert Nand(False, False) is true + assert Nand(True, A) == ~A + assert Nand(False, A) is true + assert Nand(True, True, True) is false + assert Nand(True, True, A) == ~A + assert Nand(True, False, A) is true + + +def test_Nor(): + assert Nor() is true + assert Nor(A) == ~A + assert Nor(True) is false + assert Nor(False) is true + assert Nor(True, True) is false + assert Nor(True, False) is false + assert Nor(False, False) is true + assert Nor(True, A) is false + assert Nor(False, A) == ~A + assert Nor(True, True, True) is false + assert Nor(True, True, A) is false + assert Nor(True, False, A) is false + + +def test_Xnor(): + assert Xnor() is true + assert Xnor(A) == ~A + assert Xnor(A, A) is true + assert Xnor(True, A, A) is false + assert Xnor(A, A, A, A, A) == ~A + assert Xnor(True) is false + assert Xnor(False) is true + assert Xnor(True, True) is true + assert Xnor(True, False) is false + assert Xnor(False, False) is true + assert Xnor(True, A) == A + assert Xnor(False, A) == ~A + assert Xnor(True, False, False) is false + assert Xnor(True, False, A) == A + assert Xnor(False, False, A) == ~A + + +def test_Implies(): + raises(ValueError, lambda: Implies(A, B, C)) + assert Implies(True, True) is true + assert Implies(True, False) is false + assert Implies(False, True) is true + assert Implies(False, False) is true + assert Implies(0, A) is true + assert Implies(1, 1) is true + assert Implies(1, 0) is false + assert A >> B == B << A + assert (A < 1) >> (A >= 1) == (A >= 1) + assert (A < 1) >> (S.One > A) is true + assert A >> A is true + + +def test_Equivalent(): + assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A) + assert Equivalent() is true + assert Equivalent(A, A) == Equivalent(A) is true + assert Equivalent(True, True) == Equivalent(False, False) is true + assert Equivalent(True, False) == Equivalent(False, True) is false + assert Equivalent(A, True) == A + assert Equivalent(A, False) == Not(A) + assert Equivalent(A, B, True) == A & B + assert Equivalent(A, B, False) == ~A & ~B + assert Equivalent(1, A) == A + assert Equivalent(0, A) == Not(A) + assert Equivalent(A, Equivalent(B, C)) != Equivalent(Equivalent(A, B), C) + assert Equivalent(A < 1, A >= 1) is false + assert Equivalent(A < 1, A >= 1, 0) is false + assert Equivalent(A < 1, A >= 1, 1) is false + assert Equivalent(A < 1, S.One > A) == Equivalent(1, 1) == Equivalent(0, 0) + assert Equivalent(Equality(A, B), Equality(B, A)) is true + + +def test_Exclusive(): + assert Exclusive(False, False, False) is true + assert Exclusive(True, False, False) is true + assert Exclusive(True, True, False) is false + assert Exclusive(True, True, True) is false + + +def test_equals(): + assert Not(Or(A, B)).equals(And(Not(A), Not(B))) is True + assert Equivalent(A, B).equals((A >> B) & (B >> A)) is True + assert ((A | ~B) & (~A | B)).equals((~A & ~B) | (A & B)) is True + assert (A >> B).equals(~A >> ~B) is False + assert (A >> (B >> A)).equals(A >> (C >> A)) is False + raises(NotImplementedError, lambda: (A & B).equals(A > B)) + + +def test_simplification_boolalg(): + """ + Test working of simplification methods. + """ + set1 = [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 0]] + set2 = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]] + assert SOPform([x, y, z], set1) == Or(And(Not(x), z), And(Not(z), x)) + assert Not(SOPform([x, y, z], set2)) == \ + Not(Or(And(Not(x), Not(z)), And(x, z))) + assert POSform([x, y, z], set1 + set2) is true + assert SOPform([x, y, z], set1 + set2) is true + assert SOPform([Dummy(), Dummy(), Dummy()], set1 + set2) is true + + minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], + [1, 1, 1, 1]] + dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, 3, 7, 11, 15] + dontcares = [0, 2, 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, [0, 0, 1, 1], 7, [1, 0, 1, 1], + [1, 1, 1, 1]] + dontcares = [0, [0, 0, 1, 0], 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, {y: 1, z: 1}] + dontcares = [0, [0, 0, 1, 0], 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + + minterms = [{y: 1, z: 1}, 1] + dontcares = [[0, 0, 0, 0]] + + minterms = [[0, 0, 0]] + raises(ValueError, lambda: SOPform([w, x, y, z], minterms)) + raises(ValueError, lambda: POSform([w, x, y, z], minterms)) + + raises(TypeError, lambda: POSform([w, x, y, z], ["abcdefg"])) + + # test simplification + ans = And(A, Or(B, C)) + assert simplify_logic(A & (B | C)) == ans + assert simplify_logic((A & B) | (A & C)) == ans + assert simplify_logic(Implies(A, B)) == Or(Not(A), B) + assert simplify_logic(Equivalent(A, B)) == \ + Or(And(A, B), And(Not(A), Not(B))) + assert simplify_logic(And(Equality(A, 2), C)) == And(Equality(A, 2), C) + assert simplify_logic(And(Equality(A, 2), A)) is S.false + assert simplify_logic(And(Equality(A, 2), A)) == And(Equality(A, 2), A) + assert simplify_logic(And(Equality(A, B), C)) == And(Equality(A, B), C) + assert simplify_logic(Or(And(Equality(A, 3), B), And(Equality(A, 3), C))) \ + == And(Equality(A, 3), Or(B, C)) + b = (~x & ~y & ~z) | (~x & ~y & z) + e = And(A, b) + assert simplify_logic(e) == A & ~x & ~y + raises(ValueError, lambda: simplify_logic(A & (B | C), form='blabla')) + assert simplify(Or(x <= y, And(x < y, z))) == (x <= y) + assert simplify(Or(x <= y, And(y > x, z))) == (x <= y) + assert simplify(Or(x >= y, And(y < x, z))) == (x >= y) + + # Check that expressions with nine variables or more are not simplified + # (without the force-flag) + a, b, c, d, e, f, g, h, j = symbols('a b c d e f g h j') + expr = a & b & c & d & e & f & g & h & j | \ + a & b & c & d & e & f & g & h & ~j + # This expression can be simplified to get rid of the j variables + assert simplify_logic(expr) == expr + + # Test dontcare + assert simplify_logic((a & b) | c | d, dontcare=(a & b)) == c | d + + # check input + ans = SOPform([x, y], [[1, 0]]) + assert SOPform([x, y], [[1, 0]]) == ans + assert POSform([x, y], [[1, 0]]) == ans + + raises(ValueError, lambda: SOPform([x], [[1]], [[1]])) + assert SOPform([x], [[1]], [[0]]) is true + assert SOPform([x], [[0]], [[1]]) is true + assert SOPform([x], [], []) is false + + raises(ValueError, lambda: POSform([x], [[1]], [[1]])) + assert POSform([x], [[1]], [[0]]) is true + assert POSform([x], [[0]], [[1]]) is true + assert POSform([x], [], []) is false + + # check working of simplify + assert simplify((A & B) | (A & C)) == And(A, Or(B, C)) + assert simplify(And(x, Not(x))) == False + assert simplify(Or(x, Not(x))) == True + assert simplify(And(Eq(x, 0), Eq(x, y))) == And(Eq(x, 0), Eq(y, 0)) + assert And(Eq(x - 1, 0), Eq(x, y)).simplify() == And(Eq(x, 1), Eq(y, 1)) + assert And(Ne(x - 1, 0), Ne(x, y)).simplify() == And(Ne(x, 1), Ne(x, y)) + assert And(Eq(x - 1, 0), Ne(x, y)).simplify() == And(Eq(x, 1), Ne(y, 1)) + assert And(Eq(x - 1, 0), Eq(x, z + y), Eq(y + x, 0)).simplify( + ) == And(Eq(x, 1), Eq(y, -1), Eq(z, 2)) + assert And(Eq(x - 1, 0), Eq(x + 2, 3)).simplify() == Eq(x, 1) + assert And(Ne(x - 1, 0), Ne(x + 2, 3)).simplify() == Ne(x, 1) + assert And(Eq(x - 1, 0), Eq(x + 2, 2)).simplify() == False + assert And(Ne(x - 1, 0), Ne(x + 2, 2)).simplify( + ) == And(Ne(x, 1), Ne(x, 0)) + + +def test_bool_map(): + """ + Test working of bool_map function. + """ + + minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], + [1, 1, 1, 1]] + assert bool_map(Not(Not(a)), a) == (a, {a: a}) + assert bool_map(SOPform([w, x, y, z], minterms), + POSform([w, x, y, z], minterms)) == \ + (And(Or(Not(w), y), Or(Not(x), y), z), {x: x, w: w, z: z, y: y}) + assert bool_map(SOPform([x, z, y], [[1, 0, 1]]), + SOPform([a, b, c], [[1, 0, 1]])) != False + function1 = SOPform([x, z, y], [[1, 0, 1], [0, 0, 1]]) + function2 = SOPform([a, b, c], [[1, 0, 1], [1, 0, 0]]) + assert bool_map(function1, function2) == \ + (function1, {y: a, z: b}) + assert bool_map(Xor(x, y), ~Xor(x, y)) == False + assert bool_map(And(x, y), Or(x, y)) is None + assert bool_map(And(x, y), And(x, y, z)) is None + # issue 16179 + assert bool_map(Xor(x, y, z), ~Xor(x, y, z)) == False + assert bool_map(Xor(a, x, y, z), ~Xor(a, x, y, z)) == False + + +def test_bool_symbol(): + """Test that mixing symbols with boolean values + works as expected""" + + assert And(A, True) == A + assert And(A, True, True) == A + assert And(A, False) is false + assert And(A, True, False) is false + assert Or(A, True) is true + assert Or(A, False) == A + + +def test_is_boolean(): + assert isinstance(True, Boolean) is False + assert isinstance(true, Boolean) is True + assert 1 == True + assert 1 != true + assert (1 == true) is False + assert 0 == False + assert 0 != false + assert (0 == false) is False + assert true.is_Boolean is True + assert (A & B).is_Boolean + assert (A | B).is_Boolean + assert (~A).is_Boolean + assert (A ^ B).is_Boolean + assert A.is_Boolean != isinstance(A, Boolean) + assert isinstance(A, Boolean) + + +def test_subs(): + assert (A & B).subs(A, True) == B + assert (A & B).subs(A, False) is false + assert (A & B).subs(B, True) == A + assert (A & B).subs(B, False) is false + assert (A & B).subs({A: True, B: True}) is true + assert (A | B).subs(A, True) is true + assert (A | B).subs(A, False) == B + assert (A | B).subs(B, True) is true + assert (A | B).subs(B, False) == A + assert (A | B).subs({A: True, B: True}) is true + + +""" +we test for axioms of boolean algebra +see https://en.wikipedia.org/wiki/Boolean_algebra_(structure) +""" + + +def test_commutative(): + """Test for commutativity of And and Or""" + A, B = map(Boolean, symbols('A,B')) + + assert A & B == B & A + assert A | B == B | A + + +def test_and_associativity(): + """Test for associativity of And""" + + assert (A & B) & C == A & (B & C) + + +def test_or_assicativity(): + assert ((A | B) | C) == (A | (B | C)) + + +def test_double_negation(): + a = Boolean() + assert ~(~a) == a + + +# test methods + +def test_eliminate_implications(): + assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B + assert eliminate_implications( + A >> (C >> Not(B))) == Or(Or(Not(B), Not(C)), Not(A)) + assert eliminate_implications(Equivalent(A, B, C, D)) == \ + (~A | B) & (~B | C) & (~C | D) & (~D | A) + + +def test_conjuncts(): + assert conjuncts(A & B & C) == {A, B, C} + assert conjuncts((A | B) & C) == {A | B, C} + assert conjuncts(A) == {A} + assert conjuncts(True) == {True} + assert conjuncts(False) == {False} + + +def test_disjuncts(): + assert disjuncts(A | B | C) == {A, B, C} + assert disjuncts((A | B) & C) == {(A | B) & C} + assert disjuncts(A) == {A} + assert disjuncts(True) == {True} + assert disjuncts(False) == {False} + + +def test_distribute(): + assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C)) + assert distribute_or_over_and(And(A, Or(B, C))) == Or(And(A, B), And(A, C)) + assert distribute_xor_over_and(And(A, Xor(B, C))) == Xor(And(A, B), And(A, C)) + + +def test_to_anf(): + x, y, z = symbols('x,y,z') + assert to_anf(And(x, y)) == And(x, y) + assert to_anf(Or(x, y)) == Xor(x, y, And(x, y)) + assert to_anf(Or(Implies(x, y), And(x, y), y)) == \ + Xor(x, True, x & y, remove_true=False) + assert to_anf(Or(Nand(x, y), Nor(x, y), Xnor(x, y), Implies(x, y))) == True + assert to_anf(Or(x, Not(y), Nor(x,z), And(x, y), Nand(y, z))) == \ + Xor(True, And(y, z), And(x, y, z), remove_true=False) + assert to_anf(Xor(x, y)) == Xor(x, y) + assert to_anf(Not(x)) == Xor(x, True, remove_true=False) + assert to_anf(Nand(x, y)) == Xor(True, And(x, y), remove_true=False) + assert to_anf(Nor(x, y)) == Xor(x, y, True, And(x, y), remove_true=False) + assert to_anf(Implies(x, y)) == Xor(x, True, And(x, y), remove_true=False) + assert to_anf(Equivalent(x, y)) == Xor(x, y, True, remove_true=False) + assert to_anf(Nand(x | y, x >> y), deep=False) == \ + Xor(True, And(Or(x, y), Implies(x, y)), remove_true=False) + assert to_anf(Nor(x ^ y, x & y), deep=False) == \ + Xor(True, Or(Xor(x, y), And(x, y)), remove_true=False) + + +def test_to_nnf(): + assert to_nnf(true) is true + assert to_nnf(false) is false + assert to_nnf(A) == A + assert to_nnf(A | ~A | B) is true + assert to_nnf(A & ~A & B) is false + assert to_nnf(A >> B) == ~A | B + assert to_nnf(Equivalent(A, B, C)) == (~A | B) & (~B | C) & (~C | A) + assert to_nnf(A ^ B ^ C) == \ + (A | B | C) & (~A | ~B | C) & (A | ~B | ~C) & (~A | B | ~C) + assert to_nnf(ITE(A, B, C)) == (~A | B) & (A | C) + assert to_nnf(Not(A | B | C)) == ~A & ~B & ~C + assert to_nnf(Not(A & B & C)) == ~A | ~B | ~C + assert to_nnf(Not(A >> B)) == A & ~B + assert to_nnf(Not(Equivalent(A, B, C))) == And(Or(A, B, C), Or(~A, ~B, ~C)) + assert to_nnf(Not(A ^ B ^ C)) == \ + (~A | B | C) & (A | ~B | C) & (A | B | ~C) & (~A | ~B | ~C) + assert to_nnf(Not(ITE(A, B, C))) == (~A | ~B) & (A | ~C) + assert to_nnf((A >> B) ^ (B >> A)) == (A & ~B) | (~A & B) + assert to_nnf((A >> B) ^ (B >> A), False) == \ + (~A | ~B | A | B) & ((A & ~B) | (~A & B)) + assert ITE(A, 1, 0).to_nnf() == A + assert ITE(A, 0, 1).to_nnf() == ~A + # although ITE can hold non-Boolean, it will complain if + # an attempt is made to convert the ITE to Boolean nnf + raises(TypeError, lambda: ITE(A < 1, [1], B).to_nnf()) + + +def test_to_cnf(): + assert to_cnf(~(B | C)) == And(Not(B), Not(C)) + assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C)) + assert to_cnf(A >> B) == (~A) | B + assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C) + assert to_cnf(A & (B | C) | ~A & (B | C), True) == B | C + assert to_cnf(A & B) == And(A, B) + + assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A))) + assert to_cnf(Equivalent(A, B & C)) == \ + (~A | B) & (~A | C) & (~B | ~C | A) + assert to_cnf(Equivalent(A, B | C), True) == \ + And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A))) + assert to_cnf(A + 1) == A + 1 + + +def test_issue_18904(): + x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 = symbols('x1:16') + eq = (( x1 & x2 & x3 & x4 & x5 & x6 & x7 & x8 & x9 ) | + ( x1 & x2 & x3 & x4 & x5 & x6 & x7 & x10 & x9 ) | + ( x1 & x11 & x3 & x12 & x5 & x13 & x14 & x15 & x9 )) + assert is_cnf(to_cnf(eq)) + raises(ValueError, lambda: to_cnf(eq, simplify=True)) + for f, t in zip((And, Or), (to_cnf, to_dnf)): + eq = f(x1, x2, x3, x4, x5, x6, x7, x8, x9) + raises(ValueError, lambda: to_cnf(eq, simplify=True)) + assert t(eq, simplify=True, force=True) == eq + + +def test_issue_9949(): + assert is_cnf(to_cnf((b > -5) | (a > 2) & (a < 4))) + + +def test_to_CNF(): + assert CNF.CNF_to_cnf(CNF.to_CNF(~(B | C))) == to_cnf(~(B | C)) + assert CNF.CNF_to_cnf(CNF.to_CNF((A & B) | C)) == to_cnf((A & B) | C) + assert CNF.CNF_to_cnf(CNF.to_CNF(A >> B)) == to_cnf(A >> B) + assert CNF.CNF_to_cnf(CNF.to_CNF(A >> (B & C))) == to_cnf(A >> (B & C)) + assert CNF.CNF_to_cnf(CNF.to_CNF(A & (B | C) | ~A & (B | C))) == to_cnf(A & (B | C) | ~A & (B | C)) + assert CNF.CNF_to_cnf(CNF.to_CNF(A & B)) == to_cnf(A & B) + + +def test_to_dnf(): + assert to_dnf(~(B | C)) == And(Not(B), Not(C)) + assert to_dnf(A & (B | C)) == Or(And(A, B), And(A, C)) + assert to_dnf(A >> B) == (~A) | B + assert to_dnf(A >> (B & C)) == (~A) | (B & C) + assert to_dnf(A | B) == A | B + + assert to_dnf(Equivalent(A, B), True) == \ + Or(And(A, B), And(Not(A), Not(B))) + assert to_dnf(Equivalent(A, B & C), True) == \ + Or(And(A, B, C), And(Not(A), Not(B)), And(Not(A), Not(C))) + assert to_dnf(A + 1) == A + 1 + + +def test_to_int_repr(): + x, y, z = map(Boolean, symbols('x,y,z')) + + def sorted_recursive(arg): + try: + return sorted(sorted_recursive(x) for x in arg) + except TypeError: # arg is not a sequence + return arg + + assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \ + sorted_recursive([[1, 2], [1, 3]]) + assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \ + sorted_recursive([[1, 2], [3, -1]]) + + +def test_is_anf(): + x, y = symbols('x,y') + assert is_anf(true) is True + assert is_anf(false) is True + assert is_anf(x) is True + assert is_anf(And(x, y)) is True + assert is_anf(Xor(x, y, And(x, y))) is True + assert is_anf(Xor(x, y, Or(x, y))) is False + assert is_anf(Xor(Not(x), y)) is False + + +def test_is_nnf(): + assert is_nnf(true) is True + assert is_nnf(A) is True + assert is_nnf(~A) is True + assert is_nnf(A & B) is True + assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), False) is True + assert is_nnf((A | B) & (~A | ~B)) is True + assert is_nnf(Not(Or(A, B))) is False + assert is_nnf(A ^ B) is False + assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), True) is False + + +def test_is_cnf(): + assert is_cnf(x) is True + assert is_cnf(x | y | z) is True + assert is_cnf(x & y & z) is True + assert is_cnf((x | y) & z) is True + assert is_cnf((x & y) | z) is False + assert is_cnf(~(x & y) | z) is False + + +def test_is_dnf(): + assert is_dnf(x) is True + assert is_dnf(x | y | z) is True + assert is_dnf(x & y & z) is True + assert is_dnf((x & y) | z) is True + assert is_dnf((x | y) & z) is False + assert is_dnf(~(x | y) & z) is False + + +def test_ITE(): + A, B, C = symbols('A:C') + assert ITE(True, False, True) is false + assert ITE(True, True, False) is true + assert ITE(False, True, False) is false + assert ITE(False, False, True) is true + assert isinstance(ITE(A, B, C), ITE) + + A = True + assert ITE(A, B, C) == B + A = False + assert ITE(A, B, C) == C + B = True + assert ITE(And(A, B), B, C) == C + assert ITE(Or(A, False), And(B, True), False) is false + assert ITE(x, A, B) == Not(x) + assert ITE(x, B, A) == x + assert ITE(1, x, y) == x + assert ITE(0, x, y) == y + raises(TypeError, lambda: ITE(2, x, y)) + raises(TypeError, lambda: ITE(1, [], y)) + raises(TypeError, lambda: ITE(1, (), y)) + raises(TypeError, lambda: ITE(1, y, [])) + assert ITE(1, 1, 1) is S.true + assert isinstance(ITE(1, 1, 1, evaluate=False), ITE) + + raises(TypeError, lambda: ITE(x > 1, y, x)) + assert ITE(Eq(x, True), y, x) == ITE(x, y, x) + assert ITE(Eq(x, False), y, x) == ITE(~x, y, x) + assert ITE(Ne(x, True), y, x) == ITE(~x, y, x) + assert ITE(Ne(x, False), y, x) == ITE(x, y, x) + assert ITE(Eq(S. true, x), y, x) == ITE(x, y, x) + assert ITE(Eq(S.false, x), y, x) == ITE(~x, y, x) + assert ITE(Ne(S.true, x), y, x) == ITE(~x, y, x) + assert ITE(Ne(S.false, x), y, x) == ITE(x, y, x) + # 0 and 1 in the context are not treated as True/False + # so the equality must always be False since dissimilar + # objects cannot be equal + assert ITE(Eq(x, 0), y, x) == x + assert ITE(Eq(x, 1), y, x) == x + assert ITE(Ne(x, 0), y, x) == y + assert ITE(Ne(x, 1), y, x) == y + assert ITE(Eq(x, 0), y, z).subs(x, 0) == y + assert ITE(Eq(x, 0), y, z).subs(x, 1) == z + raises(ValueError, lambda: ITE(x > 1, y, x, z)) + + +def test_is_literal(): + assert is_literal(True) is True + assert is_literal(False) is True + assert is_literal(A) is True + assert is_literal(~A) is True + assert is_literal(Or(A, B)) is False + assert is_literal(Q.zero(A)) is True + assert is_literal(Not(Q.zero(A))) is True + assert is_literal(Or(A, B)) is False + assert is_literal(And(Q.zero(A), Q.zero(B))) is False + assert is_literal(x < 3) + assert not is_literal(x + y < 3) + + +def test_operators(): + # Mostly test __and__, __rand__, and so on + assert True & A == A & True == A + assert False & A == A & False == False + assert A & B == And(A, B) + assert True | A == A | True == True + assert False | A == A | False == A + assert A | B == Or(A, B) + assert ~A == Not(A) + assert True >> A == A << True == A + assert False >> A == A << False == True + assert A >> True == True << A == True + assert A >> False == False << A == ~A + assert A >> B == B << A == Implies(A, B) + assert True ^ A == A ^ True == ~A + assert False ^ A == A ^ False == A + assert A ^ B == Xor(A, B) + + +def test_true_false(): + assert true is S.true + assert false is S.false + assert true is not True + assert false is not False + assert true + assert not false + assert true == True + assert false == False + assert not (true == False) + assert not (false == True) + assert not (true == false) + + assert hash(true) == hash(True) + assert hash(false) == hash(False) + assert len({true, True}) == len({false, False}) == 1 + + assert isinstance(true, BooleanAtom) + assert isinstance(false, BooleanAtom) + # We don't want to subclass from bool, because bool subclasses from + # int. But operators like &, |, ^, <<, >>, and ~ act differently on 0 and + # 1 then we want them to on true and false. See the docstrings of the + # various And, Or, etc. functions for examples. + assert not isinstance(true, bool) + assert not isinstance(false, bool) + + # Note: using 'is' comparison is important here. We want these to return + # true and false, not True and False + + assert Not(true) is false + assert Not(True) is false + assert Not(false) is true + assert Not(False) is true + assert ~true is false + assert ~false is true + + for T, F in product((True, true), (False, false)): + assert And(T, F) is false + assert And(F, T) is false + assert And(F, F) is false + assert And(T, T) is true + assert And(T, x) == x + assert And(F, x) is false + if not (T is True and F is False): + assert T & F is false + assert F & T is false + if F is not False: + assert F & F is false + if T is not True: + assert T & T is true + + assert Or(T, F) is true + assert Or(F, T) is true + assert Or(F, F) is false + assert Or(T, T) is true + assert Or(T, x) is true + assert Or(F, x) == x + if not (T is True and F is False): + assert T | F is true + assert F | T is true + if F is not False: + assert F | F is false + if T is not True: + assert T | T is true + + assert Xor(T, F) is true + assert Xor(F, T) is true + assert Xor(F, F) is false + assert Xor(T, T) is false + assert Xor(T, x) == ~x + assert Xor(F, x) == x + if not (T is True and F is False): + assert T ^ F is true + assert F ^ T is true + if F is not False: + assert F ^ F is false + if T is not True: + assert T ^ T is false + + assert Nand(T, F) is true + assert Nand(F, T) is true + assert Nand(F, F) is true + assert Nand(T, T) is false + assert Nand(T, x) == ~x + assert Nand(F, x) is true + + assert Nor(T, F) is false + assert Nor(F, T) is false + assert Nor(F, F) is true + assert Nor(T, T) is false + assert Nor(T, x) is false + assert Nor(F, x) == ~x + + assert Implies(T, F) is false + assert Implies(F, T) is true + assert Implies(F, F) is true + assert Implies(T, T) is true + assert Implies(T, x) == x + assert Implies(F, x) is true + assert Implies(x, T) is true + assert Implies(x, F) == ~x + if not (T is True and F is False): + assert T >> F is false + assert F << T is false + assert F >> T is true + assert T << F is true + if F is not False: + assert F >> F is true + assert F << F is true + if T is not True: + assert T >> T is true + assert T << T is true + + assert Equivalent(T, F) is false + assert Equivalent(F, T) is false + assert Equivalent(F, F) is true + assert Equivalent(T, T) is true + assert Equivalent(T, x) == x + assert Equivalent(F, x) == ~x + assert Equivalent(x, T) == x + assert Equivalent(x, F) == ~x + + assert ITE(T, T, T) is true + assert ITE(T, T, F) is true + assert ITE(T, F, T) is false + assert ITE(T, F, F) is false + assert ITE(F, T, T) is true + assert ITE(F, T, F) is false + assert ITE(F, F, T) is true + assert ITE(F, F, F) is false + + assert all(i.simplify(1, 2) is i for i in (S.true, S.false)) + + +def test_bool_as_set(): + assert ITE(y <= 0, False, y >= 1).as_set() == Interval(1, oo) + assert And(x <= 2, x >= -2).as_set() == Interval(-2, 2) + assert Or(x >= 2, x <= -2).as_set() == Interval(-oo, -2) + Interval(2, oo) + assert Not(x > 2).as_set() == Interval(-oo, 2) + # issue 10240 + assert Not(And(x > 2, x < 3)).as_set() == \ + Union(Interval(-oo, 2), Interval(3, oo)) + assert true.as_set() == S.UniversalSet + assert false.as_set() is S.EmptySet + assert x.as_set() == S.UniversalSet + assert And(Or(x < 1, x > 3), x < 2).as_set() == Interval.open(-oo, 1) + assert And(x < 1, sin(x) < 3).as_set() == (x < 1).as_set() + raises(NotImplementedError, lambda: (sin(x) < 1).as_set()) + # watch for object morph in as_set + assert Eq(-1, cos(2*x)**2/sin(2*x)**2).as_set() is S.EmptySet + + +@XFAIL +def test_multivariate_bool_as_set(): + x, y = symbols('x,y') + + assert And(x >= 0, y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + assert Or(x >= 0, y >= 0).as_set() == S.Reals*S.Reals - \ + Interval(-oo, 0, True, True)*Interval(-oo, 0, True, True) + + +def test_all_or_nothing(): + x = symbols('x', extended_real=True) + args = x >= -oo, x <= oo + v = And(*args) + if v.func is And: + assert len(v.args) == len(args) - args.count(S.true) + else: + assert v == True + v = Or(*args) + if v.func is Or: + assert len(v.args) == 2 + else: + assert v == True + + +def test_canonical_atoms(): + assert true.canonical == true + assert false.canonical == false + + +def test_negated_atoms(): + assert true.negated == false + assert false.negated == true + + +def test_issue_8777(): + assert And(x > 2, x < oo).as_set() == Interval(2, oo, left_open=True) + assert And(x >= 1, x < oo).as_set() == Interval(1, oo) + assert (x < oo).as_set() == Interval(-oo, oo) + assert (x > -oo).as_set() == Interval(-oo, oo) + + +def test_issue_8975(): + assert Or(And(-oo < x, x <= -2), And(2 <= x, x < oo)).as_set() == \ + Interval(-oo, -2) + Interval(2, oo) + + +def test_term_to_integer(): + assert term_to_integer([1, 0, 1, 0, 0, 1, 0]) == 82 + assert term_to_integer('0010101000111001') == 10809 + + +def test_issue_21971(): + a, b, c, d = symbols('a b c d') + f = a & b & c | a & c + assert f.subs(a & c, d) == b & d | d + assert f.subs(a & b & c, d) == a & c | d + + f = (a | b | c) & (a | c) + assert f.subs(a | c, d) == (b | d) & d + assert f.subs(a | b | c, d) == (a | c) & d + + f = (a ^ b ^ c) & (a ^ c) + assert f.subs(a ^ c, d) == (b ^ d) & d + assert f.subs(a ^ b ^ c, d) == (a ^ c) & d + + +def test_truth_table(): + assert list(truth_table(And(x, y), [x, y], input=False)) == \ + [False, False, False, True] + assert list(truth_table(x | y, [x, y], input=False)) == \ + [False, True, True, True] + assert list(truth_table(x >> y, [x, y], input=False)) == \ + [True, True, False, True] + assert list(truth_table(And(x, y), [x, y])) == \ + [([0, 0], False), ([0, 1], False), ([1, 0], False), ([1, 1], True)] + + +def test_issue_8571(): + for t in (S.true, S.false): + raises(TypeError, lambda: +t) + raises(TypeError, lambda: -t) + raises(TypeError, lambda: abs(t)) + # use int(bool(t)) to get 0 or 1 + raises(TypeError, lambda: int(t)) + + for o in [S.Zero, S.One, x]: + for _ in range(2): + raises(TypeError, lambda: o + t) + raises(TypeError, lambda: o - t) + raises(TypeError, lambda: o % t) + raises(TypeError, lambda: o*t) + raises(TypeError, lambda: o/t) + raises(TypeError, lambda: o**t) + o, t = t, o # do again in reversed order + + +def test_expand_relational(): + n = symbols('n', negative=True) + p, q = symbols('p q', positive=True) + r = ((n + q*(-n/q + 1))/(q*(-n/q + 1)) < 0) + assert r is not S.false + assert r.expand() is S.false + assert (q > 0).expand() is S.true + + +def test_issue_12717(): + assert S.true.is_Atom == True + assert S.false.is_Atom == True + + +def test_as_Boolean(): + nz = symbols('nz', nonzero=True) + assert all(as_Boolean(i) is S.true for i in (True, S.true, 1, nz)) + z = symbols('z', zero=True) + assert all(as_Boolean(i) is S.false for i in (False, S.false, 0, z)) + assert all(as_Boolean(i) == i for i in (x, x < 0)) + for i in (2, S(2), x + 1, []): + raises(TypeError, lambda: as_Boolean(i)) + + +def test_binary_symbols(): + assert ITE(x < 1, y, z).binary_symbols == {y, z} + for f in (Eq, Ne): + assert f(x, 1).binary_symbols == set() + assert f(x, True).binary_symbols == {x} + assert f(x, False).binary_symbols == {x} + assert S.true.binary_symbols == set() + assert S.false.binary_symbols == set() + assert x.binary_symbols == {x} + assert And(x, Eq(y, False), Eq(z, 1)).binary_symbols == {x, y} + assert Q.prime(x).binary_symbols == set() + assert Q.lt(x, 1).binary_symbols == set() + assert Q.is_true(x).binary_symbols == {x} + assert Q.eq(x, True).binary_symbols == {x} + assert Q.prime(x).binary_symbols == set() + + +def test_BooleanFunction_diff(): + assert And(x, y).diff(x) == Piecewise((0, Eq(y, False)), (1, True)) + + +def test_issue_14700(): + A, B, C, D, E, F, G, H = symbols('A B C D E F G H') + q = ((B & D & H & ~F) | (B & H & ~C & ~D) | (B & H & ~C & ~F) | + (B & H & ~D & ~G) | (B & H & ~F & ~G) | (C & G & ~B & ~D) | + (C & G & ~D & ~H) | (C & G & ~F & ~H) | (D & F & H & ~B) | + (D & F & ~G & ~H) | (B & D & F & ~C & ~H) | (D & E & F & ~B & ~C) | + (D & F & ~A & ~B & ~C) | (D & F & ~A & ~C & ~H) | + (A & B & D & F & ~E & ~H)) + soldnf = ((B & D & H & ~F) | (D & F & H & ~B) | (B & H & ~C & ~D) | + (B & H & ~D & ~G) | (C & G & ~B & ~D) | (C & G & ~D & ~H) | + (C & G & ~F & ~H) | (D & F & ~G & ~H) | (D & E & F & ~C & ~H) | + (D & F & ~A & ~C & ~H) | (A & B & D & F & ~E & ~H)) + solcnf = ((B | C | D) & (B | D | G) & (C | D | H) & (C | F | H) & + (D | G | H) & (F | G | H) & (B | F | ~D | ~H) & + (~B | ~D | ~F | ~H) & (D | ~B | ~C | ~G | ~H) & + (A | H | ~C | ~D | ~F | ~G) & (H | ~C | ~D | ~E | ~F | ~G) & + (B | E | H | ~A | ~D | ~F | ~G)) + assert simplify_logic(q, "dnf") == soldnf + assert simplify_logic(q, "cnf") == solcnf + + minterms = [[0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1], + [0, 0, 1, 1], [1, 0, 1, 1]] + dontcares = [[1, 0, 0, 0], [1, 0, 0, 1], [1, 1, 0, 0], [1, 1, 0, 1]] + assert SOPform([w, x, y, z], minterms) == (x & ~w) | (y & z & ~x) + # Should not be more complicated with don't cares + assert SOPform([w, x, y, z], minterms, dontcares) == \ + (x & ~w) | (y & z & ~x) + + +def test_relational_simplification(): + w, x, y, z = symbols('w x y z', real=True) + d, e = symbols('d e', real=False) + # Test all combinations or sign and order + assert Or(x >= y, x < y).simplify() == S.true + assert Or(x >= y, y > x).simplify() == S.true + assert Or(x >= y, -x > -y).simplify() == S.true + assert Or(x >= y, -y < -x).simplify() == S.true + assert Or(-x <= -y, x < y).simplify() == S.true + assert Or(-x <= -y, -x > -y).simplify() == S.true + assert Or(-x <= -y, y > x).simplify() == S.true + assert Or(-x <= -y, -y < -x).simplify() == S.true + assert Or(y <= x, x < y).simplify() == S.true + assert Or(y <= x, y > x).simplify() == S.true + assert Or(y <= x, -x > -y).simplify() == S.true + assert Or(y <= x, -y < -x).simplify() == S.true + assert Or(-y >= -x, x < y).simplify() == S.true + assert Or(-y >= -x, y > x).simplify() == S.true + assert Or(-y >= -x, -x > -y).simplify() == S.true + assert Or(-y >= -x, -y < -x).simplify() == S.true + + assert Or(x < y, x >= y).simplify() == S.true + assert Or(y > x, x >= y).simplify() == S.true + assert Or(-x > -y, x >= y).simplify() == S.true + assert Or(-y < -x, x >= y).simplify() == S.true + assert Or(x < y, -x <= -y).simplify() == S.true + assert Or(-x > -y, -x <= -y).simplify() == S.true + assert Or(y > x, -x <= -y).simplify() == S.true + assert Or(-y < -x, -x <= -y).simplify() == S.true + assert Or(x < y, y <= x).simplify() == S.true + assert Or(y > x, y <= x).simplify() == S.true + assert Or(-x > -y, y <= x).simplify() == S.true + assert Or(-y < -x, y <= x).simplify() == S.true + assert Or(x < y, -y >= -x).simplify() == S.true + assert Or(y > x, -y >= -x).simplify() == S.true + assert Or(-x > -y, -y >= -x).simplify() == S.true + assert Or(-y < -x, -y >= -x).simplify() == S.true + + # Some other tests + assert Or(x >= y, w < z, x <= y).simplify() == S.true + assert And(x >= y, x < y).simplify() == S.false + assert Or(x >= y, Eq(y, x)).simplify() == (x >= y) + assert And(x >= y, Eq(y, x)).simplify() == Eq(x, y) + assert And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify() == \ + (Eq(x, y) & (x >= 1) & (y >= 5) & (y > z)) + assert Or(Eq(x, y), x >= y, w < y, z < y).simplify() == \ + (x >= y) | (y > z) | (w < y) + assert And(Eq(x, y), x >= y, w < y, y >= z, z < y).simplify() == \ + Eq(x, y) & (y > z) & (w < y) + # assert And(Eq(x, y), x >= y, w < y, y >= z, z < y).simplify(relational_minmax=True) == \ + # And(Eq(x, y), y > Max(w, z)) + # assert Or(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify(relational_minmax=True) == \ + # (Eq(x, y) | (x >= 1) | (y > Min(2, z))) + assert And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify() == \ + (Eq(x, y) & (x >= 1) & (y >= 5) & (y > z)) + assert (Eq(x, y) & Eq(d, e) & (x >= y) & (d >= e)).simplify() == \ + (Eq(x, y) & Eq(d, e) & (d >= e)) + assert And(Eq(x, y), Eq(x, -y)).simplify() == And(Eq(x, 0), Eq(y, 0)) + assert Xor(x >= y, x <= y).simplify() == Ne(x, y) + assert And(x > 1, x < -1, Eq(x, y)).simplify() == S.false + # From #16690 + assert And(x >= y, Eq(y, 0)).simplify() == And(x >= 0, Eq(y, 0)) + assert Or(Ne(x, 1), Ne(x, 2)).simplify() == S.true + assert And(Eq(x, 1), Ne(2, x)).simplify() == Eq(x, 1) + assert Or(Eq(x, 1), Ne(2, x)).simplify() == Ne(x, 2) + +def test_issue_8373(): + x = symbols('x', real=True) + assert Or(x < 1, x > -1).simplify() == S.true + assert Or(x < 1, x >= 1).simplify() == S.true + assert And(x < 1, x >= 1).simplify() == S.false + assert Or(x <= 1, x >= 1).simplify() == S.true + + +def test_issue_7950(): + x = symbols('x', real=True) + assert And(Eq(x, 1), Eq(x, 2)).simplify() == S.false + + +@slow +def test_relational_simplification_numerically(): + def test_simplification_numerically_function(original, simplified): + symb = original.free_symbols + n = len(symb) + valuelist = list(set(combinations(list(range(-(n-1), n))*n, n))) + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.subs(sublist) + simplifiedvalue = simplified.subs(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for {}"\ + "".format(original, simplified, sublist) + + w, x, y, z = symbols('w x y z', real=True) + d, e = symbols('d e', real=False) + + expressions = (And(Eq(x, y), x >= y, w < y, y >= z, z < y), + And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y), + Or(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y), + And(x >= y, Eq(y, x)), + Or(And(Eq(x, y), x >= y, w < y, Or(y >= z, z < y)), + And(Eq(x, y), x >= 1, 2 < y, y >= -1, z < y)), + (Eq(x, y) & Eq(d, e) & (x >= y) & (d >= e)), + ) + + for expression in expressions: + test_simplification_numerically_function(expression, + expression.simplify()) + + +def test_relational_simplification_patterns_numerically(): + from sympy.core import Wild + from sympy.logic.boolalg import _simplify_patterns_and, \ + _simplify_patterns_or, _simplify_patterns_xor + a = Wild('a') + b = Wild('b') + c = Wild('c') + symb = [a, b, c] + patternlists = [[And, _simplify_patterns_and()], + [Or, _simplify_patterns_or()], + [Xor, _simplify_patterns_xor()]] + valuelist = list(set(combinations(list(range(-2, 3))*3, 3))) + # Skip combinations of +/-2 and 0, except for all 0 + valuelist = [v for v in valuelist if any([w % 2 for w in v]) or not any(v)] + for func, patternlist in patternlists: + for pattern in patternlist: + original = func(*pattern[0].args) + simplified = pattern[1] + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.xreplace(sublist) + simplifiedvalue = simplified.xreplace(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for"\ + "{}".format(pattern[0], simplified, sublist) + + +def test_issue_16803(): + n = symbols('n') + # No simplification done, but should not raise an exception + assert ((n > 3) | (n < 0) | ((n > 0) & (n < 3))).simplify() == \ + (n > 3) | (n < 0) | ((n > 0) & (n < 3)) + + +def test_issue_17530(): + r = {x: oo, y: oo} + assert Or(x + y > 0, x - y < 0).subs(r) + assert not And(x + y < 0, x - y < 0).subs(r) + raises(TypeError, lambda: Or(x + y < 0, x - y < 0).subs(r)) + raises(TypeError, lambda: And(x + y > 0, x - y < 0).subs(r)) + raises(TypeError, lambda: And(x + y > 0, x - y < 0).subs(r)) + + +def test_anf_coeffs(): + assert anf_coeffs([1, 0]) == [1, 1] + assert anf_coeffs([0, 0, 0, 1]) == [0, 0, 0, 1] + assert anf_coeffs([0, 1, 1, 1]) == [0, 1, 1, 1] + assert anf_coeffs([1, 1, 1, 0]) == [1, 0, 0, 1] + assert anf_coeffs([1, 0, 0, 0]) == [1, 1, 1, 1] + assert anf_coeffs([1, 0, 0, 1]) == [1, 1, 1, 0] + assert anf_coeffs([1, 1, 0, 1]) == [1, 0, 1, 1] + + +def test_ANFform(): + x, y = symbols('x,y') + assert ANFform([x], [1, 1]) == True + assert ANFform([x], [0, 0]) == False + assert ANFform([x], [1, 0]) == Xor(x, True, remove_true=False) + assert ANFform([x, y], [1, 1, 1, 0]) == \ + Xor(True, And(x, y), remove_true=False) + + +def test_bool_minterm(): + x, y = symbols('x,y') + assert bool_minterm(3, [x, y]) == And(x, y) + assert bool_minterm([1, 0], [x, y]) == And(Not(y), x) + + +def test_bool_maxterm(): + x, y = symbols('x,y') + assert bool_maxterm(2, [x, y]) == Or(Not(x), y) + assert bool_maxterm([0, 1], [x, y]) == Or(Not(y), x) + + +def test_bool_monomial(): + x, y = symbols('x,y') + assert bool_monomial(1, [x, y]) == y + assert bool_monomial([1, 1], [x, y]) == And(x, y) + + +def test_check_pair(): + assert _check_pair([0, 1, 0], [0, 1, 1]) == 2 + assert _check_pair([0, 1, 0], [1, 1, 1]) == -1 + + +def test_issue_19114(): + expr = (B & C) | (A & ~C) | (~A & ~B) + # Expression is minimal, but there are multiple minimal forms possible + res1 = (A & B) | (C & ~A) | (~B & ~C) + result = to_dnf(expr, simplify=True) + assert result in (expr, res1) + + +def test_issue_20870(): + result = SOPform([a, b, c, d], [1, 2, 3, 4, 5, 6, 8, 9, 11, 12, 14, 15]) + expected = ((d & ~b) | (a & b & c) | (a & ~c & ~d) | + (b & ~a & ~c) | (c & ~a & ~d)) + assert result == expected + + +def test_convert_to_varsSOP(): + assert _convert_to_varsSOP([0, 1, 0], [x, y, z]) == And(Not(x), y, Not(z)) + assert _convert_to_varsSOP([3, 1, 0], [x, y, z]) == And(y, Not(z)) + + +def test_convert_to_varsPOS(): + assert _convert_to_varsPOS([0, 1, 0], [x, y, z]) == Or(x, Not(y), z) + assert _convert_to_varsPOS([3, 1, 0], [x, y, z]) == Or(Not(y), z) + + +def test_gateinputcount(): + a, b, c, d, e = symbols('a:e') + assert gateinputcount(And(a, b)) == 2 + assert gateinputcount(a | b & c & d ^ (e | a)) == 9 + assert gateinputcount(And(a, True)) == 0 + raises(TypeError, lambda: gateinputcount(a*b)) + + +def test_refine(): + # relational + assert not refine(x < 0, ~(x < 0)) + assert refine(x < 0, (x < 0)) + assert refine(x < 0, (0 > x)) is S.true + assert refine(x < 0, (y < 0)) == (x < 0) + assert not refine(x <= 0, ~(x <= 0)) + assert refine(x <= 0, (x <= 0)) + assert refine(x <= 0, (0 >= x)) is S.true + assert refine(x <= 0, (y <= 0)) == (x <= 0) + assert not refine(x > 0, ~(x > 0)) + assert refine(x > 0, (x > 0)) + assert refine(x > 0, (0 < x)) is S.true + assert refine(x > 0, (y > 0)) == (x > 0) + assert not refine(x >= 0, ~(x >= 0)) + assert refine(x >= 0, (x >= 0)) + assert refine(x >= 0, (0 <= x)) is S.true + assert refine(x >= 0, (y >= 0)) == (x >= 0) + assert not refine(Eq(x, 0), ~(Eq(x, 0))) + assert refine(Eq(x, 0), (Eq(x, 0))) + assert refine(Eq(x, 0), (Eq(0, x))) is S.true + assert refine(Eq(x, 0), (Eq(y, 0))) == Eq(x, 0) + assert not refine(Ne(x, 0), ~(Ne(x, 0))) + assert refine(Ne(x, 0), (Ne(0, x))) is S.true + assert refine(Ne(x, 0), (Ne(x, 0))) + assert refine(Ne(x, 0), (Ne(y, 0))) == (Ne(x, 0)) + + # boolean functions + assert refine(And(x > 0, y > 0), (x > 0)) == (y > 0) + assert refine(And(x > 0, y > 0), (x > 0) & (y > 0)) is S.true + + # predicates + assert refine(Q.positive(x), Q.positive(x)) is S.true + assert refine(Q.positive(x), Q.negative(x)) is S.false + assert refine(Q.positive(x), Q.real(x)) == Q.positive(x) + + +def test_relational_threeterm_simplification_patterns_numerically(): + from sympy.core import Wild + from sympy.logic.boolalg import _simplify_patterns_and3 + a = Wild('a') + b = Wild('b') + c = Wild('c') + symb = [a, b, c] + patternlists = [[And, _simplify_patterns_and3()]] + valuelist = list(set(combinations(list(range(-2, 3))*3, 3))) + # Skip combinations of +/-2 and 0, except for all 0 + valuelist = [v for v in valuelist if any([w % 2 for w in v]) or not any(v)] + for func, patternlist in patternlists: + for pattern in patternlist: + original = func(*pattern[0].args) + simplified = pattern[1] + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.xreplace(sublist) + simplifiedvalue = simplified.xreplace(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for"\ + "{}".format(pattern[0], simplified, sublist) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py new file mode 100644 index 0000000000000000000000000000000000000000..3a9a51a39d33fb807688614cb5809b621ce21a2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py @@ -0,0 +1,234 @@ +"""Various tests on satisfiability using dimacs cnf file syntax +You can find lots of cnf files in +ftp://dimacs.rutgers.edu/pub/challenge/satisfiability/benchmarks/cnf/ +""" + +from sympy.logic.utilities.dimacs import load +from sympy.logic.algorithms.dpll import dpll_satisfiable + + +def test_f1(): + assert bool(dpll_satisfiable(load(f1))) + + +def test_f2(): + assert bool(dpll_satisfiable(load(f2))) + + +def test_f3(): + assert bool(dpll_satisfiable(load(f3))) + + +def test_f4(): + assert not bool(dpll_satisfiable(load(f4))) + + +def test_f5(): + assert bool(dpll_satisfiable(load(f5))) + +f1 = """c simple example +c Resolution: SATISFIABLE +c +p cnf 3 2 +1 -3 0 +2 3 -1 0 +""" + + +f2 = """c an example from Quinn's text, 16 variables and 18 clauses. +c Resolution: SATISFIABLE +c +p cnf 16 18 + 1 2 0 + -2 -4 0 + 3 4 0 + -4 -5 0 + 5 -6 0 + 6 -7 0 + 6 7 0 + 7 -16 0 + 8 -9 0 + -8 -14 0 + 9 10 0 + 9 -10 0 +-10 -11 0 + 10 12 0 + 11 12 0 + 13 14 0 + 14 -15 0 + 15 16 0 +""" + +f3 = """c +p cnf 6 9 +-1 0 +-3 0 +2 -1 0 +2 -4 0 +5 -4 0 +-1 -3 0 +-4 -6 0 +1 3 -2 0 +4 6 -2 -5 0 +""" + +f4 = """c +c file: hole6.cnf [http://people.sc.fsu.edu/~jburkardt/data/cnf/hole6.cnf] +c +c SOURCE: John Hooker (jh38+@andrew.cmu.edu) +c +c DESCRIPTION: Pigeon hole problem of placing n (for file 'holen.cnf') pigeons +c in n+1 holes without placing 2 pigeons in the same hole +c +c NOTE: Part of the collection at the Forschungsinstitut fuer +c anwendungsorientierte Wissensverarbeitung in Ulm Germany. +c +c NOTE: Not satisfiable +c +p cnf 42 133 +-1 -7 0 +-1 -13 0 +-1 -19 0 +-1 -25 0 +-1 -31 0 +-1 -37 0 +-7 -13 0 +-7 -19 0 +-7 -25 0 +-7 -31 0 +-7 -37 0 +-13 -19 0 +-13 -25 0 +-13 -31 0 +-13 -37 0 +-19 -25 0 +-19 -31 0 +-19 -37 0 +-25 -31 0 +-25 -37 0 +-31 -37 0 +-2 -8 0 +-2 -14 0 +-2 -20 0 +-2 -26 0 +-2 -32 0 +-2 -38 0 +-8 -14 0 +-8 -20 0 +-8 -26 0 +-8 -32 0 +-8 -38 0 +-14 -20 0 +-14 -26 0 +-14 -32 0 +-14 -38 0 +-20 -26 0 +-20 -32 0 +-20 -38 0 +-26 -32 0 +-26 -38 0 +-32 -38 0 +-3 -9 0 +-3 -15 0 +-3 -21 0 +-3 -27 0 +-3 -33 0 +-3 -39 0 +-9 -15 0 +-9 -21 0 +-9 -27 0 +-9 -33 0 +-9 -39 0 +-15 -21 0 +-15 -27 0 +-15 -33 0 +-15 -39 0 +-21 -27 0 +-21 -33 0 +-21 -39 0 +-27 -33 0 +-27 -39 0 +-33 -39 0 +-4 -10 0 +-4 -16 0 +-4 -22 0 +-4 -28 0 +-4 -34 0 +-4 -40 0 +-10 -16 0 +-10 -22 0 +-10 -28 0 +-10 -34 0 +-10 -40 0 +-16 -22 0 +-16 -28 0 +-16 -34 0 +-16 -40 0 +-22 -28 0 +-22 -34 0 +-22 -40 0 +-28 -34 0 +-28 -40 0 +-34 -40 0 +-5 -11 0 +-5 -17 0 +-5 -23 0 +-5 -29 0 +-5 -35 0 +-5 -41 0 +-11 -17 0 +-11 -23 0 +-11 -29 0 +-11 -35 0 +-11 -41 0 +-17 -23 0 +-17 -29 0 +-17 -35 0 +-17 -41 0 +-23 -29 0 +-23 -35 0 +-23 -41 0 +-29 -35 0 +-29 -41 0 +-35 -41 0 +-6 -12 0 +-6 -18 0 +-6 -24 0 +-6 -30 0 +-6 -36 0 +-6 -42 0 +-12 -18 0 +-12 -24 0 +-12 -30 0 +-12 -36 0 +-12 -42 0 +-18 -24 0 +-18 -30 0 +-18 -36 0 +-18 -42 0 +-24 -30 0 +-24 -36 0 +-24 -42 0 +-30 -36 0 +-30 -42 0 +-36 -42 0 + 6 5 4 3 2 1 0 + 12 11 10 9 8 7 0 + 18 17 16 15 14 13 0 + 24 23 22 21 20 19 0 + 30 29 28 27 26 25 0 + 36 35 34 33 32 31 0 + 42 41 40 39 38 37 0 +""" + +f5 = """c simple example requiring variable selection +c +c NOTE: Satisfiable +c +p cnf 5 5 +1 2 3 0 +1 -2 3 0 +4 5 -3 0 +1 -4 -3 0 +-1 -5 0 +""" diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..ee6a0a12a89e74ebf43e3952b45e3b310a30072c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py @@ -0,0 +1,315 @@ +"""For more tests on satisfiability, see test_dimacs""" + +from sympy.assumptions.ask import Q +from sympy.core.symbol import symbols +from sympy.logic.boolalg import And, Implies, Equivalent, true, false +from sympy.logic.inference import literal_symbol, \ + pl_true, satisfiable, valid, entails, PropKB +from sympy.logic.algorithms.dpll import dpll, dpll_satisfiable, \ + find_pure_symbol, find_unit_clause, unit_propagate, \ + find_pure_symbol_int_repr, find_unit_clause_int_repr, \ + unit_propagate_int_repr +from sympy.logic.algorithms.dpll2 import dpll_satisfiable as dpll2_satisfiable +from sympy.testing.pytest import raises + + +def test_literal(): + A, B = symbols('A,B') + assert literal_symbol(True) is True + assert literal_symbol(False) is False + assert literal_symbol(A) is A + assert literal_symbol(~A) is A + + +def test_find_pure_symbol(): + A, B, C = symbols('A,B,C') + assert find_pure_symbol([A], [A]) == (A, True) + assert find_pure_symbol([A, B], [~A | B, ~B | A]) == (None, None) + assert find_pure_symbol([A, B, C], [ A | ~B, ~B | ~C, C | A]) == (A, True) + assert find_pure_symbol([A, B, C], [~A | B, B | ~C, C | A]) == (B, True) + assert find_pure_symbol([A, B, C], [~A | ~B, ~B | ~C, C | A]) == (B, False) + assert find_pure_symbol( + [A, B, C], [~A | B, ~B | ~C, C | A]) == (None, None) + + +def test_find_pure_symbol_int_repr(): + assert find_pure_symbol_int_repr([1], [{1}]) == (1, True) + assert find_pure_symbol_int_repr([1, 2], + [{-1, 2}, {-2, 1}]) == (None, None) + assert find_pure_symbol_int_repr([1, 2, 3], + [{1, -2}, {-2, -3}, {3, 1}]) == (1, True) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, 2}, {2, -3}, {3, 1}]) == (2, True) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, -2}, {-2, -3}, {3, 1}]) == (2, False) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, 2}, {-2, -3}, {3, 1}]) == (None, None) + + +def test_unit_clause(): + A, B, C = symbols('A,B,C') + assert find_unit_clause([A], {}) == (A, True) + assert find_unit_clause([A, ~A], {}) == (A, True) # Wrong ?? + assert find_unit_clause([A | B], {A: True}) == (B, True) + assert find_unit_clause([A | B], {B: True}) == (A, True) + assert find_unit_clause( + [A | B | C, B | ~C, A | ~B], {A: True}) == (B, False) + assert find_unit_clause([A | B | C, B | ~C, A | B], {A: True}) == (B, True) + assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True) + + +def test_unit_clause_int_repr(): + assert find_unit_clause_int_repr(map(set, [[1]]), {}) == (1, True) + assert find_unit_clause_int_repr(map(set, [[1], [-1]]), {}) == (1, True) + assert find_unit_clause_int_repr([{1, 2}], {1: True}) == (2, True) + assert find_unit_clause_int_repr([{1, 2}], {2: True}) == (1, True) + assert find_unit_clause_int_repr(map(set, + [[1, 2, 3], [2, -3], [1, -2]]), {1: True}) == (2, False) + assert find_unit_clause_int_repr(map(set, + [[1, 2, 3], [3, -3], [1, 2]]), {1: True}) == (2, True) + + A, B, C = symbols('A,B,C') + assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True) + + +def test_unit_propagate(): + A, B, C = symbols('A,B,C') + assert unit_propagate([A | B], A) == [] + assert unit_propagate([A | B, ~A | C, ~C | B, A], A) == [C, ~C | B, A] + + +def test_unit_propagate_int_repr(): + assert unit_propagate_int_repr([{1, 2}], 1) == [] + assert unit_propagate_int_repr(map(set, + [[1, 2], [-1, 3], [-3, 2], [1]]), 1) == [{3}, {-3, 2}] + + +def test_dpll(): + """This is also tested in test_dimacs""" + A, B, C = symbols('A,B,C') + assert dpll([A | B], [A, B], {A: True, B: True}) == {A: True, B: True} + + +def test_dpll_satisfiable(): + A, B, C = symbols('A,B,C') + assert dpll_satisfiable( A & ~A ) is False + assert dpll_satisfiable( A & ~B ) == {A: True, B: False} + assert dpll_satisfiable( + A | B ) in ({A: True}, {B: True}, {A: True, B: True}) + assert dpll_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert dpll_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False}, + {A: True, C: True}, {B: True, C: True}) + assert dpll_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert dpll_satisfiable( (A | B) & (A >> B) ) == {B: True} + assert dpll_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert dpll_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + + +def test_dpll2_satisfiable(): + A, B, C = symbols('A,B,C') + assert dpll2_satisfiable( A & ~A ) is False + assert dpll2_satisfiable( A & ~B ) == {A: True, B: False} + assert dpll2_satisfiable( + A | B ) in ({A: True}, {B: True}, {A: True, B: True}) + assert dpll2_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert dpll2_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}) + assert dpll2_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert dpll2_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert dpll2_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert dpll2_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + + +def test_minisat22_satisfiable(): + A, B, C = symbols('A,B,C') + minisat22_satisfiable = lambda expr: satisfiable(expr, algorithm="minisat22") + assert minisat22_satisfiable( A & ~A ) is False + assert minisat22_satisfiable( A & ~B ) == {A: True, B: False} + assert minisat22_satisfiable( + A | B ) in ({A: True}, {B: False}, {A: False, B: True}, {A: True, B: True}, {A: True, B: False}) + assert minisat22_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert minisat22_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}, {A: False, B: True, C: True}, {A: True, B: False, C: False}) + assert minisat22_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert minisat22_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert minisat22_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert minisat22_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + +def test_minisat22_minimal_satisfiable(): + A, B, C = symbols('A,B,C') + minisat22_satisfiable = lambda expr, minimal=True: satisfiable(expr, algorithm="minisat22", minimal=True) + assert minisat22_satisfiable( A & ~A ) is False + assert minisat22_satisfiable( A & ~B ) == {A: True, B: False} + assert minisat22_satisfiable( + A | B ) in ({A: True}, {B: False}, {A: False, B: True}, {A: True, B: True}, {A: True, B: False}) + assert minisat22_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert minisat22_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}, {A: False, B: True, C: True}, {A: True, B: False, C: False}) + assert minisat22_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert minisat22_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert minisat22_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert minisat22_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + g = satisfiable((A | B | C),algorithm="minisat22",minimal=True,all_models=True) + sol = next(g) + first_solution = {key for key, value in sol.items() if value} + sol=next(g) + second_solution = {key for key, value in sol.items() if value} + sol=next(g) + third_solution = {key for key, value in sol.items() if value} + assert not first_solution <= second_solution + assert not second_solution <= third_solution + assert not first_solution <= third_solution + +def test_satisfiable(): + A, B, C = symbols('A,B,C') + assert satisfiable(A & (A >> B) & ~B) is False + + +def test_valid(): + A, B, C = symbols('A,B,C') + assert valid(A >> (B >> A)) is True + assert valid((A >> (B >> C)) >> ((A >> B) >> (A >> C))) is True + assert valid((~B >> ~A) >> (A >> B)) is True + assert valid(A | B | C) is False + assert valid(A >> B) is False + + +def test_pl_true(): + A, B, C = symbols('A,B,C') + assert pl_true(True) is True + assert pl_true( A & B, {A: True, B: True}) is True + assert pl_true( A | B, {A: True}) is True + assert pl_true( A | B, {B: True}) is True + assert pl_true( A | B, {A: None, B: True}) is True + assert pl_true( A >> B, {A: False}) is True + assert pl_true( A | B | ~C, {A: False, B: True, C: True}) is True + assert pl_true(Equivalent(A, B), {A: False, B: False}) is True + + # test for false + assert pl_true(False) is False + assert pl_true( A & B, {A: False, B: False}) is False + assert pl_true( A & B, {A: False}) is False + assert pl_true( A & B, {B: False}) is False + assert pl_true( A | B, {A: False, B: False}) is False + + #test for None + assert pl_true(B, {B: None}) is None + assert pl_true( A & B, {A: True, B: None}) is None + assert pl_true( A >> B, {A: True, B: None}) is None + assert pl_true(Equivalent(A, B), {A: None}) is None + assert pl_true(Equivalent(A, B), {A: True, B: None}) is None + + # Test for deep + assert pl_true(A | B, {A: False}, deep=True) is None + assert pl_true(~A & ~B, {A: False}, deep=True) is None + assert pl_true(A | B, {A: False, B: False}, deep=True) is False + assert pl_true(A & B & (~A | ~B), {A: True}, deep=True) is False + assert pl_true((C >> A) >> (B >> A), {C: True}, deep=True) is True + + +def test_pl_true_wrong_input(): + from sympy.core.numbers import pi + raises(ValueError, lambda: pl_true('John Cleese')) + raises(ValueError, lambda: pl_true(42 + pi + pi ** 2)) + raises(ValueError, lambda: pl_true(42)) + + +def test_entails(): + A, B, C = symbols('A, B, C') + assert entails(A, [A >> B, ~B]) is False + assert entails(B, [Equivalent(A, B), A]) is True + assert entails((A >> B) >> (~A >> ~B)) is False + assert entails((A >> B) >> (~B >> ~A)) is True + + +def test_PropKB(): + A, B, C = symbols('A,B,C') + kb = PropKB() + assert kb.ask(A >> B) is False + assert kb.ask(A >> (B >> A)) is True + kb.tell(A >> B) + kb.tell(B >> C) + assert kb.ask(A) is False + assert kb.ask(B) is False + assert kb.ask(C) is False + assert kb.ask(~A) is False + assert kb.ask(~B) is False + assert kb.ask(~C) is False + assert kb.ask(A >> C) is True + kb.tell(A) + assert kb.ask(A) is True + assert kb.ask(B) is True + assert kb.ask(C) is True + assert kb.ask(~C) is False + kb.retract(A) + assert kb.ask(C) is False + + +def test_propKB_tolerant(): + """"tolerant to bad input""" + kb = PropKB() + A, B, C = symbols('A,B,C') + assert kb.ask(B) is False + +def test_satisfiable_non_symbols(): + x, y = symbols('x y') + assumptions = Q.zero(x*y) + facts = Implies(Q.zero(x*y), Q.zero(x) | Q.zero(y)) + query = ~Q.zero(x) & ~Q.zero(y) + refutations = [ + {Q.zero(x): True, Q.zero(x*y): True}, + {Q.zero(y): True, Q.zero(x*y): True}, + {Q.zero(x): True, Q.zero(y): True, Q.zero(x*y): True}, + {Q.zero(x): True, Q.zero(y): False, Q.zero(x*y): True}, + {Q.zero(x): False, Q.zero(y): True, Q.zero(x*y): True}] + assert not satisfiable(And(assumptions, facts, query), algorithm='dpll') + assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll') in refutations + assert not satisfiable(And(assumptions, facts, query), algorithm='dpll2') + assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll2') in refutations + +def test_satisfiable_bool(): + from sympy.core.singleton import S + assert satisfiable(true) == {true: true} + assert satisfiable(S.true) == {true: true} + assert satisfiable(false) is False + assert satisfiable(S.false) is False + + +def test_satisfiable_all_models(): + from sympy.abc import A, B + assert next(satisfiable(False, all_models=True)) is False + assert list(satisfiable((A >> ~A) & A, all_models=True)) == [False] + assert list(satisfiable(True, all_models=True)) == [{true: true}] + + models = [{A: True, B: False}, {A: False, B: True}] + result = satisfiable(A ^ B, all_models=True) + models.remove(next(result)) + models.remove(next(result)) + raises(StopIteration, lambda: next(result)) + assert not models + + assert list(satisfiable(Equivalent(A, B), all_models=True)) == \ + [{A: False, B: False}, {A: True, B: True}] + + models = [{A: False, B: False}, {A: False, B: True}, {A: True, B: True}] + for model in satisfiable(A >> B, all_models=True): + models.remove(model) + assert not models + + # This is a santiy test to check that only the required number + # of solutions are generated. The expr below has 2**100 - 1 models + # which would time out the test if all are generated at once. + from sympy.utilities.iterables import numbered_symbols + from sympy.logic.boolalg import Or + sym = numbered_symbols() + X = [next(sym) for i in range(100)] + result = satisfiable(Or(*X), all_models=True) + for i in range(10): + assert next(result) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3526c3e53d624bc70afe2df05f123c835781364c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__init__.py @@ -0,0 +1,3 @@ +from .dimacs import load_file + +__all__ = ['load_file'] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdfb4a4c0943b6be88317224c45a92ad9744cfe0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f538e40848143ff9bbb956a4853bf03087f98fb9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/dimacs.py b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/dimacs.py new file mode 100644 index 0000000000000000000000000000000000000000..56fd9a20eeaae28b678b498ec4a7401481010bc2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/logic/utilities/dimacs.py @@ -0,0 +1,70 @@ +"""For reading in DIMACS file format + +www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps + +""" + +from sympy.core import Symbol +from sympy.logic.boolalg import And, Or +import re + + +def load(s): + """Loads a boolean expression from a string. + + Examples + ======== + + >>> from sympy.logic.utilities.dimacs import load + >>> load('1') + cnf_1 + >>> load('1 2') + cnf_1 | cnf_2 + >>> load('1 \\n 2') + cnf_1 & cnf_2 + >>> load('1 2 \\n 3') + cnf_3 & (cnf_1 | cnf_2) + """ + clauses = [] + + lines = s.split('\n') + + pComment = re.compile(r'c.*') + pStats = re.compile(r'p\s*cnf\s*(\d*)\s*(\d*)') + + while len(lines) > 0: + line = lines.pop(0) + + # Only deal with lines that aren't comments + if not pComment.match(line): + m = pStats.match(line) + + if not m: + nums = line.rstrip('\n').split(' ') + list = [] + for lit in nums: + if lit != '': + if int(lit) == 0: + continue + num = abs(int(lit)) + sign = True + if int(lit) < 0: + sign = False + + if sign: + list.append(Symbol("cnf_%s" % num)) + else: + list.append(~Symbol("cnf_%s" % num)) + + if len(list) > 0: + clauses.append(Or(*list)) + + return And(*clauses) + + +def load_file(location): + """Loads a boolean expression from a file.""" + with open(location) as f: + s = f.read() + + return load(s) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/acceleration.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/acceleration.py new file mode 100644 index 0000000000000000000000000000000000000000..467640442eabf53762f722a6d39de452256f24ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/acceleration.py @@ -0,0 +1,101 @@ +""" +Convergence acceleration / extrapolation methods for series and +sequences. + +References: +Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for +Scientists and Engineers: Asymptotic Methods and Perturbation Theory", +Springer 1999. (Shanks transformation: pp. 368-375, Richardson +extrapolation: pp. 375-377.) +""" + +from sympy.core.numbers import Integer +from sympy.core.singleton import S +from sympy.functions.combinatorial.factorials import factorial + + +def richardson(A, k, n, N): + """ + Calculate an approximation for lim k->oo A(k) using Richardson + extrapolation with the terms A(n), A(n+1), ..., A(n+N+1). + Choosing N ~= 2*n often gives good results. + + Examples + ======== + + A simple example is to calculate exp(1) using the limit definition. + This limit converges slowly; n = 100 only produces two accurate + digits: + + >>> from sympy.abc import n + >>> e = (1 + 1/n)**n + >>> print(round(e.subs(n, 100).evalf(), 10)) + 2.7048138294 + + Richardson extrapolation with 11 appropriately chosen terms gives + a value that is accurate to the indicated precision: + + >>> from sympy import E + >>> from sympy.series.acceleration import richardson + >>> print(round(richardson(e, n, 10, 20).evalf(), 10)) + 2.7182818285 + >>> print(round(E.evalf(), 10)) + 2.7182818285 + + Another useful application is to speed up convergence of series. + Computing 100 terms of the zeta(2) series 1/k**2 yields only + two accurate digits: + + >>> from sympy.abc import k, n + >>> from sympy import Sum + >>> A = Sum(k**-2, (k, 1, n)) + >>> print(round(A.subs(n, 100).evalf(), 10)) + 1.6349839002 + + Richardson extrapolation performs much better: + + >>> from sympy import pi + >>> print(round(richardson(A, n, 10, 20).evalf(), 10)) + 1.6449340668 + >>> print(round(((pi**2)/6).evalf(), 10)) # Exact value + 1.6449340668 + + """ + s = S.Zero + for j in range(0, N + 1): + s += (A.subs(k, Integer(n + j)).doit() * (n + j)**N * + S.NegativeOne**(j + N) / (factorial(j) * factorial(N - j))) + return s + + +def shanks(A, k, n, m=1): + """ + Calculate an approximation for lim k->oo A(k) using the n-term Shanks + transformation S(A)(n). With m > 1, calculate the m-fold recursive + Shanks transformation S(S(...S(A)...))(n). + + The Shanks transformation is useful for summing Taylor series that + converge slowly near a pole or singularity, e.g. for log(2): + + >>> from sympy.abc import k, n + >>> from sympy import Sum, Integer + >>> from sympy.series.acceleration import shanks + >>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n)) + >>> print(round(A.subs(n, 100).doit().evalf(), 10)) + 0.6881721793 + >>> print(round(shanks(A, n, 25).evalf(), 10)) + 0.6931396564 + >>> print(round(shanks(A, n, 25, 5).evalf(), 10)) + 0.6931471806 + + The correct value is 0.6931471805599453094172321215. + """ + table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)] + table2 = table[:] + + for i in range(1, m + 1): + for j in range(i, n + m + 1): + x, y, z = table[j - 1], table[j], table[j + 1] + table2[j] = (z*x - y**2) / (z + x - 2*y) + table = table2[:] + return table[n] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/formal.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/formal.py new file mode 100644 index 0000000000000000000000000000000000000000..5ad724b21455d3e233c22192e10d7d52d161554c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/formal.py @@ -0,0 +1,1869 @@ +"""Formal Power Series""" + +from collections import defaultdict + +from sympy.core.numbers import (nan, oo, zoo) +from sympy.core.add import Add +from sympy.core.expr import Expr +from sympy.core.function import Derivative, Function, expand +from sympy.core.mul import Mul +from sympy.core.numbers import Rational +from sympy.core.relational import Eq +from sympy.sets.sets import Interval +from sympy.core.singleton import S +from sympy.core.symbol import Wild, Dummy, symbols, Symbol +from sympy.core.sympify import sympify +from sympy.discrete.convolutions import convolution +from sympy.functions.combinatorial.factorials import binomial, factorial, rf +from sympy.functions.combinatorial.numbers import bell +from sympy.functions.elementary.integers import floor, frac, ceiling +from sympy.functions.elementary.miscellaneous import Min, Max +from sympy.functions.elementary.piecewise import Piecewise +from sympy.series.limits import Limit +from sympy.series.order import Order +from sympy.series.sequences import sequence +from sympy.series.series_class import SeriesBase +from sympy.utilities.iterables import iterable + + + +def rational_algorithm(f, x, k, order=4, full=False): + """ + Rational algorithm for computing + formula of coefficients of Formal Power Series + of a function. + + Explanation + =========== + + Applicable when f(x) or some derivative of f(x) + is a rational function in x. + + :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction + decomposition. :func:`~.apart` by default uses 'undetermined coefficients + method'. By setting ``full=True``, 'Bronstein's algorithm' can be used + instead. + + Looks for derivative of a function up to 4'th order (by default). + This can be overridden using order option. + + Parameters + ========== + + x : Symbol + order : int, optional + Order of the derivative of ``f``, Default is 4. + full : bool + + Returns + ======= + + formula : Expr + ind : Expr + Independent terms. + order : int + full : bool + + Examples + ======== + + >>> from sympy import log, atan + >>> from sympy.series.formal import rational_algorithm as ra + >>> from sympy.abc import x, k + + >>> ra(1 / (1 - x), x, k) + (1, 0, 0) + >>> ra(log(1 + x), x, k) + (-1/((-1)**k*k), 0, 1) + + >>> ra(atan(x), x, k, full=True) + ((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1) + + Notes + ===== + + By setting ``full=True``, range of admissible functions to be solved using + ``rational_algorithm`` can be increased. This option should be used + carefully as it can significantly slow down the computation as ``doit`` is + performed on the :class:`~.RootSum` object returned by the :func:`~.apart` + function. Use ``full=False`` whenever possible. + + See Also + ======== + + sympy.polys.partfrac.apart + + References + ========== + + .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf + .. [2] Power Series in Computer Algebra - Wolfram Koepf + + """ + from sympy.polys import RootSum, apart + from sympy.integrals import integrate + + diff = f + ds = [] # list of diff + + for i in range(order + 1): + if i: + diff = diff.diff(x) + + if diff.is_rational_function(x): + coeff, sep = S.Zero, S.Zero + + terms = apart(diff, x, full=full) + if terms.has(RootSum): + terms = terms.doit() + + for t in Add.make_args(terms): + num, den = t.as_numer_denom() + if not den.has(x): + sep += t + else: + if isinstance(den, Mul): + # m*(n*x - a)**j -> (n*x - a)**j + ind = den.as_independent(x) + den = ind[1] + num /= ind[0] + + # (n*x - a)**j -> (x - b) + den, j = den.as_base_exp() + a, xterm = den.as_coeff_add(x) + + # term -> m/x**n + if not a: + sep += t + continue + + xc = xterm[0].coeff(x) + a /= -xc + num /= xc**j + + ak = ((-1)**j * num * + binomial(j + k - 1, k).rewrite(factorial) / + a**(j + k)) + coeff += ak + + # Hacky, better way? + if coeff.is_zero: + return None + if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or + coeff.has(nan)): + return None + + for j in range(i): + coeff = (coeff / (k + j + 1)) + sep = integrate(sep, x) + sep += (ds.pop() - sep).limit(x, 0) # constant of integration + return (coeff.subs(k, k - i), sep, i) + + else: + ds.append(diff) + + return None + + +def rational_independent(terms, x): + """ + Returns a list of all the rationally independent terms. + + Examples + ======== + + >>> from sympy import sin, cos + >>> from sympy.series.formal import rational_independent + >>> from sympy.abc import x + + >>> rational_independent([cos(x), sin(x)], x) + [cos(x), sin(x)] + >>> rational_independent([x**2, sin(x), x*sin(x), x**3], x) + [x**3 + x**2, x*sin(x) + sin(x)] + """ + if not terms: + return [] + + ind = terms[0:1] + + for t in terms[1:]: + n = t.as_independent(x)[1] + for i, term in enumerate(ind): + d = term.as_independent(x)[1] + q = (n / d).cancel() + if q.is_rational_function(x): + ind[i] += t + break + else: + ind.append(t) + return ind + + +def simpleDE(f, x, g, order=4): + r""" + Generates simple DE. + + Explanation + =========== + + DE is of the form + + .. math:: + f^k(x) + \sum\limits_{j=0}^{k-1} A_j f^j(x) = 0 + + where :math:`A_j` should be rational function in x. + + Generates DE's upto order 4 (default). DE's can also have free parameters. + + By increasing order, higher order DE's can be found. + + Yields a tuple of (DE, order). + """ + from sympy.solvers.solveset import linsolve + + a = symbols('a:%d' % (order)) + + def _makeDE(k): + eq = f.diff(x, k) + Add(*[a[i]*f.diff(x, i) for i in range(0, k)]) + DE = g(x).diff(x, k) + Add(*[a[i]*g(x).diff(x, i) for i in range(0, k)]) + return eq, DE + + found = False + for k in range(1, order + 1): + eq, DE = _makeDE(k) + eq = eq.expand() + terms = eq.as_ordered_terms() + ind = rational_independent(terms, x) + if found or len(ind) == k: + sol = dict(zip(a, (i for s in linsolve(ind, a[:k]) for i in s))) + if sol: + found = True + DE = DE.subs(sol) + DE = DE.as_numer_denom()[0] + DE = DE.factor().as_coeff_mul(Derivative)[1][0] + yield DE.collect(Derivative(g(x))), k + + +def exp_re(DE, r, k): + """Converts a DE with constant coefficients (explike) into a RE. + + Explanation + =========== + + Performs the substitution: + + .. math:: + f^j(x) \\to r(k + j) + + Normalises the terms so that lowest order of a term is always r(k). + + Examples + ======== + + >>> from sympy import Function, Derivative + >>> from sympy.series.formal import exp_re + >>> from sympy.abc import x, k + >>> f, r = Function('f'), Function('r') + + >>> exp_re(-f(x) + Derivative(f(x)), r, k) + -r(k) + r(k + 1) + >>> exp_re(Derivative(f(x), x) + Derivative(f(x), (x, 2)), r, k) + r(k) + r(k + 1) + + See Also + ======== + + sympy.series.formal.hyper_re + """ + RE = S.Zero + + g = DE.atoms(Function).pop() + + mini = None + for t in Add.make_args(DE): + coeff, d = t.as_independent(g) + if isinstance(d, Derivative): + j = d.derivative_count + else: + j = 0 + if mini is None or j < mini: + mini = j + RE += coeff * r(k + j) + if mini: + RE = RE.subs(k, k - mini) + return RE + + +def hyper_re(DE, r, k): + """ + Converts a DE into a RE. + + Explanation + =========== + + Performs the substitution: + + .. math:: + x^l f^j(x) \\to (k + 1 - l)_j . a_{k + j - l} + + Normalises the terms so that lowest order of a term is always r(k). + + Examples + ======== + + >>> from sympy import Function, Derivative + >>> from sympy.series.formal import hyper_re + >>> from sympy.abc import x, k + >>> f, r = Function('f'), Function('r') + + >>> hyper_re(-f(x) + Derivative(f(x)), r, k) + (k + 1)*r(k + 1) - r(k) + >>> hyper_re(-x*f(x) + Derivative(f(x), (x, 2)), r, k) + (k + 2)*(k + 3)*r(k + 3) - r(k) + + See Also + ======== + + sympy.series.formal.exp_re + """ + RE = S.Zero + + g = DE.atoms(Function).pop() + x = g.atoms(Symbol).pop() + + mini = None + for t in Add.make_args(DE.expand()): + coeff, d = t.as_independent(g) + c, v = coeff.as_independent(x) + l = v.as_coeff_exponent(x)[1] + if isinstance(d, Derivative): + j = d.derivative_count + else: + j = 0 + RE += c * rf(k + 1 - l, j) * r(k + j - l) + if mini is None or j - l < mini: + mini = j - l + + RE = RE.subs(k, k - mini) + + m = Wild('m') + return RE.collect(r(k + m)) + + +def _transformation_a(f, x, P, Q, k, m, shift): + f *= x**(-shift) + P = P.subs(k, k + shift) + Q = Q.subs(k, k + shift) + return f, P, Q, m + + +def _transformation_c(f, x, P, Q, k, m, scale): + f = f.subs(x, x**scale) + P = P.subs(k, k / scale) + Q = Q.subs(k, k / scale) + m *= scale + return f, P, Q, m + + +def _transformation_e(f, x, P, Q, k, m): + f = f.diff(x) + P = P.subs(k, k + 1) * (k + m + 1) + Q = Q.subs(k, k + 1) * (k + 1) + return f, P, Q, m + + +def _apply_shift(sol, shift): + return [(res, cond + shift) for res, cond in sol] + + +def _apply_scale(sol, scale): + return [(res, cond / scale) for res, cond in sol] + + +def _apply_integrate(sol, x, k): + return [(res / ((cond + 1)*(cond.as_coeff_Add()[1].coeff(k))), cond + 1) + for res, cond in sol] + + +def _compute_formula(f, x, P, Q, k, m, k_max): + """Computes the formula for f.""" + from sympy.polys import roots + + sol = [] + for i in range(k_max + 1, k_max + m + 1): + if (i < 0) == True: + continue + r = f.diff(x, i).limit(x, 0) / factorial(i) + if r.is_zero: + continue + + kterm = m*k + i + res = r + + p = P.subs(k, kterm) + q = Q.subs(k, kterm) + c1 = p.subs(k, 1/k).leadterm(k)[0] + c2 = q.subs(k, 1/k).leadterm(k)[0] + res *= (-c1 / c2)**k + + res *= Mul(*[rf(-r, k)**mul for r, mul in roots(p, k).items()]) + res /= Mul(*[rf(-r, k)**mul for r, mul in roots(q, k).items()]) + + sol.append((res, kterm)) + + return sol + + +def _rsolve_hypergeometric(f, x, P, Q, k, m): + """ + Recursive wrapper to rsolve_hypergeometric. + + Explanation + =========== + + Returns a Tuple of (formula, series independent terms, + maximum power of x in independent terms) if successful + otherwise ``None``. + + See :func:`rsolve_hypergeometric` for details. + """ + from sympy.polys import lcm, roots + from sympy.integrals import integrate + + # transformation - c + proots, qroots = roots(P, k), roots(Q, k) + all_roots = dict(proots) + all_roots.update(qroots) + scale = lcm([r.as_numer_denom()[1] for r, t in all_roots.items() + if r.is_rational]) + f, P, Q, m = _transformation_c(f, x, P, Q, k, m, scale) + + # transformation - a + qroots = roots(Q, k) + if qroots: + k_min = Min(*qroots.keys()) + else: + k_min = S.Zero + shift = k_min + m + f, P, Q, m = _transformation_a(f, x, P, Q, k, m, shift) + + l = (x*f).limit(x, 0) + if not isinstance(l, Limit) and l != 0: # Ideally should only be l != 0 + return None + + qroots = roots(Q, k) + if qroots: + k_max = Max(*qroots.keys()) + else: + k_max = S.Zero + + ind, mp = S.Zero, -oo + for i in range(k_max + m + 1): + r = f.diff(x, i).limit(x, 0) / factorial(i) + if r.is_finite is False: + old_f = f + f, P, Q, m = _transformation_a(f, x, P, Q, k, m, i) + f, P, Q, m = _transformation_e(f, x, P, Q, k, m) + sol, ind, mp = _rsolve_hypergeometric(f, x, P, Q, k, m) + sol = _apply_integrate(sol, x, k) + sol = _apply_shift(sol, i) + ind = integrate(ind, x) + ind += (old_f - ind).limit(x, 0) # constant of integration + mp += 1 + return sol, ind, mp + elif r: + ind += r*x**(i + shift) + pow_x = Rational((i + shift), scale) + if pow_x > mp: + mp = pow_x # maximum power of x + ind = ind.subs(x, x**(1/scale)) + + sol = _compute_formula(f, x, P, Q, k, m, k_max) + sol = _apply_shift(sol, shift) + sol = _apply_scale(sol, scale) + + return sol, ind, mp + + +def rsolve_hypergeometric(f, x, P, Q, k, m): + """ + Solves RE of hypergeometric type. + + Explanation + =========== + + Attempts to solve RE of the form + + Q(k)*a(k + m) - P(k)*a(k) + + Transformations that preserve Hypergeometric type: + + a. x**n*f(x): b(k + m) = R(k - n)*b(k) + b. f(A*x): b(k + m) = A**m*R(k)*b(k) + c. f(x**n): b(k + n*m) = R(k/n)*b(k) + d. f(x**(1/m)): b(k + 1) = R(k*m)*b(k) + e. f'(x): b(k + m) = ((k + m + 1)/(k + 1))*R(k + 1)*b(k) + + Some of these transformations have been used to solve the RE. + + Returns + ======= + + formula : Expr + ind : Expr + Independent terms. + order : int + + Examples + ======== + + >>> from sympy import exp, ln, S + >>> from sympy.series.formal import rsolve_hypergeometric as rh + >>> from sympy.abc import x, k + + >>> rh(exp(x), x, -S.One, (k + 1), k, 1) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + + >>> rh(ln(1 + x), x, k**2, k*(k + 1), k, 1) + (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), + Eq(Mod(k, 1), 0)), (0, True)), x, 2) + + References + ========== + + .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf + .. [2] Power Series in Computer Algebra - Wolfram Koepf + """ + result = _rsolve_hypergeometric(f, x, P, Q, k, m) + + if result is None: + return None + + sol_list, ind, mp = result + + sol_dict = defaultdict(lambda: S.Zero) + for res, cond in sol_list: + j, mk = cond.as_coeff_Add() + c = mk.coeff(k) + + if j.is_integer is False: + res *= x**frac(j) + j = floor(j) + + res = res.subs(k, (k - j) / c) + cond = Eq(k % c, j % c) + sol_dict[cond] += res # Group together formula for same conditions + + sol = [] + for cond, res in sol_dict.items(): + sol.append((res, cond)) + sol.append((S.Zero, True)) + sol = Piecewise(*sol) + + if mp is -oo: + s = S.Zero + elif mp.is_integer is False: + s = ceiling(mp) + else: + s = mp + 1 + + # save all the terms of + # form 1/x**k in ind + if s < 0: + ind += sum(sequence(sol * x**k, (k, s, -1))) + s = S.Zero + + return (sol, ind, s) + + +def _solve_hyper_RE(f, x, RE, g, k): + """See docstring of :func:`rsolve_hypergeometric` for details.""" + terms = Add.make_args(RE) + + if len(terms) == 2: + gs = list(RE.atoms(Function)) + P, Q = map(RE.coeff, gs) + m = gs[1].args[0] - gs[0].args[0] + if m < 0: + P, Q = Q, P + m = abs(m) + return rsolve_hypergeometric(f, x, P, Q, k, m) + + +def _solve_explike_DE(f, x, DE, g, k): + """Solves DE with constant coefficients.""" + from sympy.solvers import rsolve + + for t in Add.make_args(DE): + coeff, d = t.as_independent(g) + if coeff.free_symbols: + return + + RE = exp_re(DE, g, k) + + init = {} + for i in range(len(Add.make_args(RE))): + if i: + f = f.diff(x) + init[g(k).subs(k, i)] = f.limit(x, 0) + + sol = rsolve(RE, g(k), init) + + if sol: + return (sol / factorial(k), S.Zero, S.Zero) + + +def _solve_simple(f, x, DE, g, k): + """Converts DE into RE and solves using :func:`rsolve`.""" + from sympy.solvers import rsolve + + RE = hyper_re(DE, g, k) + + init = {} + for i in range(len(Add.make_args(RE))): + if i: + f = f.diff(x) + init[g(k).subs(k, i)] = f.limit(x, 0) / factorial(i) + + sol = rsolve(RE, g(k), init) + + if sol: + return (sol, S.Zero, S.Zero) + + +def _transform_explike_DE(DE, g, x, order, syms): + """Converts DE with free parameters into DE with constant coefficients.""" + from sympy.solvers.solveset import linsolve + + eq = [] + highest_coeff = DE.coeff(Derivative(g(x), x, order)) + for i in range(order): + coeff = DE.coeff(Derivative(g(x), x, i)) + coeff = (coeff / highest_coeff).expand().collect(x) + for t in Add.make_args(coeff): + eq.append(t) + temp = [] + for e in eq: + if e.has(x): + break + elif e.has(Symbol): + temp.append(e) + else: + eq = temp + if eq: + sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s))) + if sol: + DE = DE.subs(sol) + DE = DE.factor().as_coeff_mul(Derivative)[1][0] + DE = DE.collect(Derivative(g(x))) + return DE + + +def _transform_DE_RE(DE, g, k, order, syms): + """Converts DE with free parameters into RE of hypergeometric type.""" + from sympy.solvers.solveset import linsolve + + RE = hyper_re(DE, g, k) + + eq = [] + for i in range(1, order): + coeff = RE.coeff(g(k + i)) + eq.append(coeff) + sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s))) + if sol: + m = Wild('m') + RE = RE.subs(sol) + RE = RE.factor().as_numer_denom()[0].collect(g(k + m)) + RE = RE.as_coeff_mul(g)[1][0] + for i in range(order): # smallest order should be g(k) + if RE.coeff(g(k + i)) and i: + RE = RE.subs(k, k - i) + break + return RE + + +def solve_de(f, x, DE, order, g, k): + """ + Solves the DE. + + Explanation + =========== + + Tries to solve DE by either converting into a RE containing two terms or + converting into a DE having constant coefficients. + + Returns + ======= + + formula : Expr + ind : Expr + Independent terms. + order : int + + Examples + ======== + + >>> from sympy import Derivative as D, Function + >>> from sympy import exp, ln + >>> from sympy.series.formal import solve_de + >>> from sympy.abc import x, k + >>> f = Function('f') + + >>> solve_de(exp(x), x, D(f(x), x) - f(x), 1, f, k) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + + >>> solve_de(ln(1 + x), x, (x + 1)*D(f(x), x, 2) + D(f(x)), 2, f, k) + (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), + Eq(Mod(k, 1), 0)), (0, True)), x, 2) + """ + sol = None + syms = DE.free_symbols.difference({g, x}) + + if syms: + RE = _transform_DE_RE(DE, g, k, order, syms) + else: + RE = hyper_re(DE, g, k) + if not RE.free_symbols.difference({k}): + sol = _solve_hyper_RE(f, x, RE, g, k) + + if sol: + return sol + + if syms: + DE = _transform_explike_DE(DE, g, x, order, syms) + if not DE.free_symbols.difference({x}): + sol = _solve_explike_DE(f, x, DE, g, k) + + if sol: + return sol + + +def hyper_algorithm(f, x, k, order=4): + """ + Hypergeometric algorithm for computing Formal Power Series. + + Explanation + =========== + + Steps: + * Generates DE + * Convert the DE into RE + * Solves the RE + + Examples + ======== + + >>> from sympy import exp, ln + >>> from sympy.series.formal import hyper_algorithm + + >>> from sympy.abc import x, k + + >>> hyper_algorithm(exp(x), x, k) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + + >>> hyper_algorithm(ln(1 + x), x, k) + (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), + Eq(Mod(k, 1), 0)), (0, True)), x, 2) + + See Also + ======== + + sympy.series.formal.simpleDE + sympy.series.formal.solve_de + """ + g = Function('g') + + des = [] # list of DE's + sol = None + for DE, i in simpleDE(f, x, g, order): + if DE is not None: + sol = solve_de(f, x, DE, i, g, k) + if sol: + return sol + if not DE.free_symbols.difference({x}): + des.append(DE) + + # If nothing works + # Try plain rsolve + for DE in des: + sol = _solve_simple(f, x, DE, g, k) + if sol: + return sol + + +def _compute_fps(f, x, x0, dir, hyper, order, rational, full): + """Recursive wrapper to compute fps. + + See :func:`compute_fps` for details. + """ + if x0 in [S.Infinity, S.NegativeInfinity]: + dir = S.One if x0 is S.Infinity else -S.One + temp = f.subs(x, 1/x) + result = _compute_fps(temp, x, 0, dir, hyper, order, rational, full) + if result is None: + return None + return (result[0], result[1].subs(x, 1/x), result[2].subs(x, 1/x)) + elif x0 or dir == -S.One: + if dir == -S.One: + rep = -x + x0 + rep2 = -x + rep2b = x0 + else: + rep = x + x0 + rep2 = x + rep2b = -x0 + temp = f.subs(x, rep) + result = _compute_fps(temp, x, 0, S.One, hyper, order, rational, full) + if result is None: + return None + return (result[0], result[1].subs(x, rep2 + rep2b), + result[2].subs(x, rep2 + rep2b)) + + if f.is_polynomial(x): + k = Dummy('k') + ak = sequence(Coeff(f, x, k), (k, 1, oo)) + xk = sequence(x**k, (k, 0, oo)) + ind = f.coeff(x, 0) + return ak, xk, ind + + # Break instances of Add + # this allows application of different + # algorithms on different terms increasing the + # range of admissible functions. + if isinstance(f, Add): + result = False + ak = sequence(S.Zero, (0, oo)) + ind, xk = S.Zero, None + for t in Add.make_args(f): + res = _compute_fps(t, x, 0, S.One, hyper, order, rational, full) + if res: + if not result: + result = True + xk = res[1] + if res[0].start > ak.start: + seq = ak + s, f = ak.start, res[0].start + else: + seq = res[0] + s, f = res[0].start, ak.start + save = Add(*[z[0]*z[1] for z in zip(seq[0:(f - s)], xk[s:f])]) + ak += res[0] + ind += res[2] + save + else: + ind += t + if result: + return ak, xk, ind + return None + + # The symbolic term - symb, if present, is being separated from the function + # Otherwise symb is being set to S.One + syms = f.free_symbols.difference({x}) + (f, symb) = expand(f).as_independent(*syms) + + result = None + + # from here on it's x0=0 and dir=1 handling + k = Dummy('k') + if rational: + result = rational_algorithm(f, x, k, order, full) + + if result is None and hyper: + result = hyper_algorithm(f, x, k, order) + + if result is None: + return None + + from sympy.simplify.powsimp import powsimp + if symb.is_zero: + symb = S.One + else: + symb = powsimp(symb) + ak = sequence(result[0], (k, result[2], oo)) + xk_formula = powsimp(x**k * symb) + xk = sequence(xk_formula, (k, 0, oo)) + ind = powsimp(result[1] * symb) + + return ak, xk, ind + + +def compute_fps(f, x, x0=0, dir=1, hyper=True, order=4, rational=True, + full=False): + """ + Computes the formula for Formal Power Series of a function. + + Explanation + =========== + + Tries to compute the formula by applying the following techniques + (in order): + + * rational_algorithm + * Hypergeometric algorithm + + Parameters + ========== + + x : Symbol + x0 : number, optional + Point to perform series expansion about. Default is 0. + dir : {1, -1, '+', '-'}, optional + If dir is 1 or '+' the series is calculated from the right and + for -1 or '-' the series is calculated from the left. For smooth + functions this flag will not alter the results. Default is 1. + hyper : {True, False}, optional + Set hyper to False to skip the hypergeometric algorithm. + By default it is set to False. + order : int, optional + Order of the derivative of ``f``, Default is 4. + rational : {True, False}, optional + Set rational to False to skip rational algorithm. By default it is set + to True. + full : {True, False}, optional + Set full to True to increase the range of rational algorithm. + See :func:`rational_algorithm` for details. By default it is set to + False. + + Returns + ======= + + ak : sequence + Sequence of coefficients. + xk : sequence + Sequence of powers of x. + ind : Expr + Independent terms. + mul : Pow + Common terms. + + See Also + ======== + + sympy.series.formal.rational_algorithm + sympy.series.formal.hyper_algorithm + """ + f = sympify(f) + x = sympify(x) + + if not f.has(x): + return None + + x0 = sympify(x0) + + if dir == '+': + dir = S.One + elif dir == '-': + dir = -S.One + elif dir not in [S.One, -S.One]: + raise ValueError("Dir must be '+' or '-'") + else: + dir = sympify(dir) + + return _compute_fps(f, x, x0, dir, hyper, order, rational, full) + + +class Coeff(Function): + """ + Coeff(p, x, n) represents the nth coefficient of the polynomial p in x + """ + @classmethod + def eval(cls, p, x, n): + if p.is_polynomial(x) and n.is_integer: + return p.coeff(x, n) + + +class FormalPowerSeries(SeriesBase): + """ + Represents Formal Power Series of a function. + + Explanation + =========== + + No computation is performed. This class should only to be used to represent + a series. No checks are performed. + + For computing a series use :func:`fps`. + + See Also + ======== + + sympy.series.formal.fps + """ + def __new__(cls, *args): + args = map(sympify, args) + return Expr.__new__(cls, *args) + + def __init__(self, *args): + ak = args[4][0] + k = ak.variables[0] + self.ak_seq = sequence(ak.formula, (k, 1, oo)) + self.fact_seq = sequence(factorial(k), (k, 1, oo)) + self.bell_coeff_seq = self.ak_seq * self.fact_seq + self.sign_seq = sequence((-1, 1), (k, 1, oo)) + + @property + def function(self): + return self.args[0] + + @property + def x(self): + return self.args[1] + + @property + def x0(self): + return self.args[2] + + @property + def dir(self): + return self.args[3] + + @property + def ak(self): + return self.args[4][0] + + @property + def xk(self): + return self.args[4][1] + + @property + def ind(self): + return self.args[4][2] + + @property + def interval(self): + return Interval(0, oo) + + @property + def start(self): + return self.interval.inf + + @property + def stop(self): + return self.interval.sup + + @property + def length(self): + return oo + + @property + def infinite(self): + """Returns an infinite representation of the series""" + from sympy.concrete import Sum + ak, xk = self.ak, self.xk + k = ak.variables[0] + inf_sum = Sum(ak.formula * xk.formula, (k, ak.start, ak.stop)) + + return self.ind + inf_sum + + def _get_pow_x(self, term): + """Returns the power of x in a term.""" + xterm, pow_x = term.as_independent(self.x)[1].as_base_exp() + if not xterm.has(self.x): + return S.Zero + return pow_x + + def polynomial(self, n=6): + """ + Truncated series as polynomial. + + Explanation + =========== + + Returns series expansion of ``f`` upto order ``O(x**n)`` + as a polynomial(without ``O`` term). + """ + terms = [] + sym = self.free_symbols + for i, t in enumerate(self): + xp = self._get_pow_x(t) + if xp.has(*sym): + xp = xp.as_coeff_add(*sym)[0] + if xp >= n: + break + elif xp.is_integer is True and i == n + 1: + break + elif t is not S.Zero: + terms.append(t) + + return Add(*terms) + + def truncate(self, n=6): + """ + Truncated series. + + Explanation + =========== + + Returns truncated series expansion of f upto + order ``O(x**n)``. + + If n is ``None``, returns an infinite iterator. + """ + if n is None: + return iter(self) + + x, x0 = self.x, self.x0 + pt_xk = self.xk.coeff(n) + if x0 is S.NegativeInfinity: + x0 = S.Infinity + + return self.polynomial(n) + Order(pt_xk, (x, x0)) + + def zero_coeff(self): + return self._eval_term(0) + + def _eval_term(self, pt): + try: + pt_xk = self.xk.coeff(pt) + pt_ak = self.ak.coeff(pt).simplify() # Simplify the coefficients + except IndexError: + term = S.Zero + else: + term = (pt_ak * pt_xk) + + if self.ind: + ind = S.Zero + sym = self.free_symbols + for t in Add.make_args(self.ind): + pow_x = self._get_pow_x(t) + if pow_x.has(*sym): + pow_x = pow_x.as_coeff_add(*sym)[0] + if pt == 0 and pow_x < 1: + ind += t + elif pow_x >= pt and pow_x < pt + 1: + ind += t + term += ind + + return term.collect(self.x) + + def _eval_subs(self, old, new): + x = self.x + if old.has(x): + return self + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + for t in self: + if t is not S.Zero: + return t + + def _eval_derivative(self, x): + f = self.function.diff(x) + ind = self.ind.diff(x) + + pow_xk = self._get_pow_x(self.xk.formula) + ak = self.ak + k = ak.variables[0] + if ak.formula.has(x): + form = [] + for e, c in ak.formula.args: + temp = S.Zero + for t in Add.make_args(e): + pow_x = self._get_pow_x(t) + temp += t * (pow_xk + pow_x) + form.append((temp, c)) + form = Piecewise(*form) + ak = sequence(form.subs(k, k + 1), (k, ak.start - 1, ak.stop)) + else: + ak = sequence((ak.formula * pow_xk).subs(k, k + 1), + (k, ak.start - 1, ak.stop)) + + return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) + + def integrate(self, x=None, **kwargs): + """ + Integrate Formal Power Series. + + Examples + ======== + + >>> from sympy import fps, sin, integrate + >>> from sympy.abc import x + >>> f = fps(sin(x)) + >>> f.integrate(x).truncate() + -1 + x**2/2 - x**4/24 + O(x**6) + >>> integrate(f, (x, 0, 1)) + 1 - cos(1) + """ + from sympy.integrals import integrate + + if x is None: + x = self.x + elif iterable(x): + return integrate(self.function, x) + + f = integrate(self.function, x) + ind = integrate(self.ind, x) + ind += (f - ind).limit(x, 0) # constant of integration + + pow_xk = self._get_pow_x(self.xk.formula) + ak = self.ak + k = ak.variables[0] + if ak.formula.has(x): + form = [] + for e, c in ak.formula.args: + temp = S.Zero + for t in Add.make_args(e): + pow_x = self._get_pow_x(t) + temp += t / (pow_xk + pow_x + 1) + form.append((temp, c)) + form = Piecewise(*form) + ak = sequence(form.subs(k, k - 1), (k, ak.start + 1, ak.stop)) + else: + ak = sequence((ak.formula / (pow_xk + 1)).subs(k, k - 1), + (k, ak.start + 1, ak.stop)) + + return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) + + def product(self, other, x=None, n=6): + """ + Multiplies two Formal Power Series, using discrete convolution and + return the truncated terms upto specified order. + + Parameters + ========== + + n : Number, optional + Specifies the order of the term up to which the polynomial should + be truncated. + + Examples + ======== + + >>> from sympy import fps, sin, exp + >>> from sympy.abc import x + >>> f1 = fps(sin(x)) + >>> f2 = fps(exp(x)) + + >>> f1.product(f2, x).truncate(4) + x + x**2 + x**3/3 + O(x**4) + + See Also + ======== + + sympy.discrete.convolutions + sympy.series.formal.FormalPowerSeriesProduct + + """ + + if n is None: + return iter(self) + + other = sympify(other) + + if not isinstance(other, FormalPowerSeries): + raise ValueError("Both series should be an instance of FormalPowerSeries" + " class.") + + if self.dir != other.dir: + raise ValueError("Both series should be calculated from the" + " same direction.") + elif self.x0 != other.x0: + raise ValueError("Both series should be calculated about the" + " same point.") + + elif self.x != other.x: + raise ValueError("Both series should have the same symbol.") + + return FormalPowerSeriesProduct(self, other) + + def coeff_bell(self, n): + r""" + self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind. + Note that ``n`` should be a integer. + + The second kind of Bell polynomials (are sometimes called "partial" Bell + polynomials or incomplete Bell polynomials) are defined as + + .. math:: + B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) = + \sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n} + \frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!} + \left(\frac{x_1}{1!} \right)^{j_1} + \left(\frac{x_2}{2!} \right)^{j_2} \dotsb + \left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}. + + * ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind, + `B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`. + + See Also + ======== + + sympy.functions.combinatorial.numbers.bell + + """ + + inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)] + + k = Dummy('k') + return sequence(tuple(inner_coeffs), (k, 1, oo)) + + def compose(self, other, x=None, n=6): + r""" + Returns the truncated terms of the formal power series of the composed function, + up to specified ``n``. + + Explanation + =========== + + If ``f`` and ``g`` are two formal power series of two different functions, + then the coefficient sequence ``ak`` of the composed formal power series `fp` + will be as follows. + + .. math:: + \sum\limits_{k=0}^{n} b_k B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) + + Parameters + ========== + + n : Number, optional + Specifies the order of the term up to which the polynomial should + be truncated. + + Examples + ======== + + >>> from sympy import fps, sin, exp + >>> from sympy.abc import x + >>> f1 = fps(exp(x)) + >>> f2 = fps(sin(x)) + + >>> f1.compose(f2, x).truncate() + 1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6) + + >>> f1.compose(f2, x).truncate(8) + 1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8) + + See Also + ======== + + sympy.functions.combinatorial.numbers.bell + sympy.series.formal.FormalPowerSeriesCompose + + References + ========== + + .. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974. + + """ + + if n is None: + return iter(self) + + other = sympify(other) + + if not isinstance(other, FormalPowerSeries): + raise ValueError("Both series should be an instance of FormalPowerSeries" + " class.") + + if self.dir != other.dir: + raise ValueError("Both series should be calculated from the" + " same direction.") + elif self.x0 != other.x0: + raise ValueError("Both series should be calculated about the" + " same point.") + + elif self.x != other.x: + raise ValueError("Both series should have the same symbol.") + + if other._eval_term(0).as_coeff_mul(other.x)[0] is not S.Zero: + raise ValueError("The formal power series of the inner function should not have any " + "constant coefficient term.") + + return FormalPowerSeriesCompose(self, other) + + def inverse(self, x=None, n=6): + r""" + Returns the truncated terms of the inverse of the formal power series, + up to specified ``n``. + + Explanation + =========== + + If ``f`` and ``g`` are two formal power series of two different functions, + then the coefficient sequence ``ak`` of the composed formal power series ``fp`` + will be as follows. + + .. math:: + \sum\limits_{k=0}^{n} (-1)^{k} x_0^{-k-1} B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) + + Parameters + ========== + + n : Number, optional + Specifies the order of the term up to which the polynomial should + be truncated. + + Examples + ======== + + >>> from sympy import fps, exp, cos + >>> from sympy.abc import x + >>> f1 = fps(exp(x)) + >>> f2 = fps(cos(x)) + + >>> f1.inverse(x).truncate() + 1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6) + + >>> f2.inverse(x).truncate(8) + 1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8) + + See Also + ======== + + sympy.functions.combinatorial.numbers.bell + sympy.series.formal.FormalPowerSeriesInverse + + References + ========== + + .. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974. + + """ + + if n is None: + return iter(self) + + if self._eval_term(0).is_zero: + raise ValueError("Constant coefficient should exist for an inverse of a formal" + " power series to exist.") + + return FormalPowerSeriesInverse(self) + + def __add__(self, other): + other = sympify(other) + + if isinstance(other, FormalPowerSeries): + if self.dir != other.dir: + raise ValueError("Both series should be calculated from the" + " same direction.") + elif self.x0 != other.x0: + raise ValueError("Both series should be calculated about the" + " same point.") + + x, y = self.x, other.x + f = self.function + other.function.subs(y, x) + + if self.x not in f.free_symbols: + return f + + ak = self.ak + other.ak + if self.ak.start > other.ak.start: + seq = other.ak + s, e = other.ak.start, self.ak.start + else: + seq = self.ak + s, e = self.ak.start, other.ak.start + save = Add(*[z[0]*z[1] for z in zip(seq[0:(e - s)], self.xk[s:e])]) + ind = self.ind + other.ind + save + + return self.func(f, x, self.x0, self.dir, (ak, self.xk, ind)) + + elif not other.has(self.x): + f = self.function + other + ind = self.ind + other + + return self.func(f, self.x, self.x0, self.dir, + (self.ak, self.xk, ind)) + + return Add(self, other) + + def __radd__(self, other): + return self.__add__(other) + + def __neg__(self): + return self.func(-self.function, self.x, self.x0, self.dir, + (-self.ak, self.xk, -self.ind)) + + def __sub__(self, other): + return self.__add__(-other) + + def __rsub__(self, other): + return (-self).__add__(other) + + def __mul__(self, other): + other = sympify(other) + + if other.has(self.x): + return Mul(self, other) + + f = self.function * other + ak = self.ak.coeff_mul(other) + ind = self.ind * other + + return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) + + def __rmul__(self, other): + return self.__mul__(other) + + +class FiniteFormalPowerSeries(FormalPowerSeries): + """Base Class for Product, Compose and Inverse classes""" + + def __init__(self, *args): + pass + + @property + def ffps(self): + return self.args[0] + + @property + def gfps(self): + return self.args[1] + + @property + def f(self): + return self.ffps.function + + @property + def g(self): + return self.gfps.function + + @property + def infinite(self): + raise NotImplementedError("No infinite version for an object of" + " FiniteFormalPowerSeries class.") + + def _eval_terms(self, n): + raise NotImplementedError("(%s)._eval_terms()" % self) + + def _eval_term(self, pt): + raise NotImplementedError("By the current logic, one can get terms" + "upto a certain order, instead of getting term by term.") + + def polynomial(self, n): + return self._eval_terms(n) + + def truncate(self, n=6): + ffps = self.ffps + pt_xk = ffps.xk.coeff(n) + x, x0 = ffps.x, ffps.x0 + + return self.polynomial(n) + Order(pt_xk, (x, x0)) + + def _eval_derivative(self, x): + raise NotImplementedError + + def integrate(self, x): + raise NotImplementedError + + +class FormalPowerSeriesProduct(FiniteFormalPowerSeries): + """Represents the product of two formal power series of two functions. + + Explanation + =========== + + No computation is performed. Terms are calculated using a term by term logic, + instead of a point by point logic. + + There are two differences between a :obj:`FormalPowerSeries` object and a + :obj:`FormalPowerSeriesProduct` object. The first argument contains the two + functions involved in the product. Also, the coefficient sequence contains + both the coefficient sequence of the formal power series of the involved functions. + + See Also + ======== + + sympy.series.formal.FormalPowerSeries + sympy.series.formal.FiniteFormalPowerSeries + + """ + + def __init__(self, *args): + ffps, gfps = self.ffps, self.gfps + + k = ffps.ak.variables[0] + self.coeff1 = sequence(ffps.ak.formula, (k, 0, oo)) + + k = gfps.ak.variables[0] + self.coeff2 = sequence(gfps.ak.formula, (k, 0, oo)) + + @property + def function(self): + """Function of the product of two formal power series.""" + return self.f * self.g + + def _eval_terms(self, n): + """ + Returns the first ``n`` terms of the product formal power series. + Term by term logic is implemented here. + + Examples + ======== + + >>> from sympy import fps, sin, exp + >>> from sympy.abc import x + >>> f1 = fps(sin(x)) + >>> f2 = fps(exp(x)) + >>> fprod = f1.product(f2, x) + + >>> fprod._eval_terms(4) + x**3/3 + x**2 + x + + See Also + ======== + + sympy.series.formal.FormalPowerSeries.product + + """ + coeff1, coeff2 = self.coeff1, self.coeff2 + + aks = convolution(coeff1[:n], coeff2[:n]) + + terms = [] + for i in range(0, n): + terms.append(aks[i] * self.ffps.xk.coeff(i)) + + return Add(*terms) + + +class FormalPowerSeriesCompose(FiniteFormalPowerSeries): + """ + Represents the composed formal power series of two functions. + + Explanation + =========== + + No computation is performed. Terms are calculated using a term by term logic, + instead of a point by point logic. + + There are two differences between a :obj:`FormalPowerSeries` object and a + :obj:`FormalPowerSeriesCompose` object. The first argument contains the outer + function and the inner function involved in the omposition. Also, the + coefficient sequence contains the generic sequence which is to be multiplied + by a custom ``bell_seq`` finite sequence. The finite terms will then be added up to + get the final terms. + + See Also + ======== + + sympy.series.formal.FormalPowerSeries + sympy.series.formal.FiniteFormalPowerSeries + + """ + + @property + def function(self): + """Function for the composed formal power series.""" + f, g, x = self.f, self.g, self.ffps.x + return f.subs(x, g) + + def _eval_terms(self, n): + """ + Returns the first `n` terms of the composed formal power series. + Term by term logic is implemented here. + + Explanation + =========== + + The coefficient sequence of the :obj:`FormalPowerSeriesCompose` object is the generic sequence. + It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get + the final terms for the polynomial. + + Examples + ======== + + >>> from sympy import fps, sin, exp + >>> from sympy.abc import x + >>> f1 = fps(exp(x)) + >>> f2 = fps(sin(x)) + >>> fcomp = f1.compose(f2, x) + + >>> fcomp._eval_terms(6) + -x**5/15 - x**4/8 + x**2/2 + x + 1 + + >>> fcomp._eval_terms(8) + x**7/90 - x**6/240 - x**5/15 - x**4/8 + x**2/2 + x + 1 + + See Also + ======== + + sympy.series.formal.FormalPowerSeries.compose + sympy.series.formal.FormalPowerSeries.coeff_bell + + """ + + ffps, gfps = self.ffps, self.gfps + terms = [ffps.zero_coeff()] + + for i in range(1, n): + bell_seq = gfps.coeff_bell(i) + seq = (ffps.bell_coeff_seq * bell_seq) + terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i)) + + return Add(*terms) + + +class FormalPowerSeriesInverse(FiniteFormalPowerSeries): + """ + Represents the Inverse of a formal power series. + + Explanation + =========== + + No computation is performed. Terms are calculated using a term by term logic, + instead of a point by point logic. + + There is a single difference between a :obj:`FormalPowerSeries` object and a + :obj:`FormalPowerSeriesInverse` object. The coefficient sequence contains the + generic sequence which is to be multiplied by a custom ``bell_seq`` finite sequence. + The finite terms will then be added up to get the final terms. + + See Also + ======== + + sympy.series.formal.FormalPowerSeries + sympy.series.formal.FiniteFormalPowerSeries + + """ + def __init__(self, *args): + ffps = self.ffps + k = ffps.xk.variables[0] + + inv = ffps.zero_coeff() + inv_seq = sequence(inv ** (-(k + 1)), (k, 1, oo)) + self.aux_seq = ffps.sign_seq * ffps.fact_seq * inv_seq + + @property + def function(self): + """Function for the inverse of a formal power series.""" + f = self.f + return 1 / f + + @property + def g(self): + raise ValueError("Only one function is considered while performing" + "inverse of a formal power series.") + + @property + def gfps(self): + raise ValueError("Only one function is considered while performing" + "inverse of a formal power series.") + + def _eval_terms(self, n): + """ + Returns the first ``n`` terms of the composed formal power series. + Term by term logic is implemented here. + + Explanation + =========== + + The coefficient sequence of the `FormalPowerSeriesInverse` object is the generic sequence. + It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get + the final terms for the polynomial. + + Examples + ======== + + >>> from sympy import fps, exp, cos + >>> from sympy.abc import x + >>> f1 = fps(exp(x)) + >>> f2 = fps(cos(x)) + >>> finv1, finv2 = f1.inverse(), f2.inverse() + + >>> finv1._eval_terms(6) + -x**5/120 + x**4/24 - x**3/6 + x**2/2 - x + 1 + + >>> finv2._eval_terms(8) + 61*x**6/720 + 5*x**4/24 + x**2/2 + 1 + + See Also + ======== + + sympy.series.formal.FormalPowerSeries.inverse + sympy.series.formal.FormalPowerSeries.coeff_bell + + """ + ffps = self.ffps + terms = [ffps.zero_coeff()] + + for i in range(1, n): + bell_seq = ffps.coeff_bell(i) + seq = (self.aux_seq * bell_seq) + terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i)) + + return Add(*terms) + + +def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): + """ + Generates Formal Power Series of ``f``. + + Explanation + =========== + + Returns the formal series expansion of ``f`` around ``x = x0`` + with respect to ``x`` in the form of a ``FormalPowerSeries`` object. + + Formal Power Series is represented using an explicit formula + computed using different algorithms. + + See :func:`compute_fps` for the more details regarding the computation + of formula. + + Parameters + ========== + + x : Symbol, optional + If x is None and ``f`` is univariate, the univariate symbols will be + supplied, otherwise an error will be raised. + x0 : number, optional + Point to perform series expansion about. Default is 0. + dir : {1, -1, '+', '-'}, optional + If dir is 1 or '+' the series is calculated from the right and + for -1 or '-' the series is calculated from the left. For smooth + functions this flag will not alter the results. Default is 1. + hyper : {True, False}, optional + Set hyper to False to skip the hypergeometric algorithm. + By default it is set to False. + order : int, optional + Order of the derivative of ``f``, Default is 4. + rational : {True, False}, optional + Set rational to False to skip rational algorithm. By default it is set + to True. + full : {True, False}, optional + Set full to True to increase the range of rational algorithm. + See :func:`rational_algorithm` for details. By default it is set to + False. + + Examples + ======== + + >>> from sympy import fps, ln, atan, sin + >>> from sympy.abc import x, n + + Rational Functions + + >>> fps(ln(1 + x)).truncate() + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6) + + >>> fps(atan(x), full=True).truncate() + x - x**3/3 + x**5/5 + O(x**6) + + Symbolic Functions + + >>> fps(x**n*sin(x**2), x).truncate(8) + -x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8)) + + See Also + ======== + + sympy.series.formal.FormalPowerSeries + sympy.series.formal.compute_fps + """ + f = sympify(f) + + if x is None: + free = f.free_symbols + if len(free) == 1: + x = free.pop() + elif not free: + return f + else: + raise NotImplementedError("multivariate formal power series") + + result = compute_fps(f, x, x0, dir, hyper, order, rational, full) + + if result is None: + return f + + return FormalPowerSeries(f, x, x0, dir, result) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/fourier.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..298a83634da8392eded68236994b239a6a389328 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/fourier.py @@ -0,0 +1,808 @@ +"""Fourier Series""" + +from sympy.core.numbers import (oo, pi) +from sympy.core.symbol import Wild +from sympy.core.expr import Expr +from sympy.core.add import Add +from sympy.core.containers import Tuple +from sympy.core.singleton import S +from sympy.core.symbol import Dummy, Symbol +from sympy.core.sympify import sympify +from sympy.functions.elementary.trigonometric import sin, cos, sinc +from sympy.series.series_class import SeriesBase +from sympy.series.sequences import SeqFormula +from sympy.sets.sets import Interval +from sympy.utilities.iterables import is_sequence + + +def fourier_cos_seq(func, limits, n): + """Returns the cos sequence in a Fourier series""" + from sympy.integrals import integrate + x, L = limits[0], limits[2] - limits[1] + cos_term = cos(2*n*pi*x / L) + formula = 2 * cos_term * integrate(func * cos_term, limits) / L + a0 = formula.subs(n, S.Zero) / 2 + return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits) + / L, (n, 1, oo)) + + +def fourier_sin_seq(func, limits, n): + """Returns the sin sequence in a Fourier series""" + from sympy.integrals import integrate + x, L = limits[0], limits[2] - limits[1] + sin_term = sin(2*n*pi*x / L) + return SeqFormula(2 * sin_term * integrate(func * sin_term, limits) + / L, (n, 1, oo)) + + +def _process_limits(func, limits): + """ + Limits should be of the form (x, start, stop). + x should be a symbol. Both start and stop should be bounded. + + Explanation + =========== + + * If x is not given, x is determined from func. + * If limits is None. Limit of the form (x, -pi, pi) is returned. + + Examples + ======== + + >>> from sympy.series.fourier import _process_limits as pari + >>> from sympy.abc import x + >>> pari(x**2, (x, -2, 2)) + (x, -2, 2) + >>> pari(x**2, (-2, 2)) + (x, -2, 2) + >>> pari(x**2, None) + (x, -pi, pi) + """ + def _find_x(func): + free = func.free_symbols + if len(free) == 1: + return free.pop() + elif not free: + return Dummy('k') + else: + raise ValueError( + " specify dummy variables for %s. If the function contains" + " more than one free symbol, a dummy variable should be" + " supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))" + % func) + + x, start, stop = None, None, None + if limits is None: + x, start, stop = _find_x(func), -pi, pi + if is_sequence(limits, Tuple): + if len(limits) == 3: + x, start, stop = limits + elif len(limits) == 2: + x = _find_x(func) + start, stop = limits + + if not isinstance(x, Symbol) or start is None or stop is None: + raise ValueError('Invalid limits given: %s' % str(limits)) + + unbounded = [S.NegativeInfinity, S.Infinity] + if start in unbounded or stop in unbounded: + raise ValueError("Both the start and end value should be bounded") + + return sympify((x, start, stop)) + + +def finite_check(f, x, L): + + def check_fx(exprs, x): + return x not in exprs.free_symbols + + def check_sincos(_expr, x, L): + if isinstance(_expr, (sin, cos)): + sincos_args = _expr.args[0] + + if sincos_args.match(a*(pi/L)*x + b) is not None: + return True + else: + return False + + from sympy.simplify.fu import TR2, TR1, sincos_to_sum + _expr = sincos_to_sum(TR2(TR1(f))) + add_coeff = _expr.as_coeff_add() + + a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ]) + b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) + + for s in add_coeff[1]: + mul_coeffs = s.as_coeff_mul()[1] + for t in mul_coeffs: + if not (check_fx(t, x) or check_sincos(t, x, L)): + return False, f + + return True, _expr + + +class FourierSeries(SeriesBase): + r"""Represents Fourier sine/cosine series. + + Explanation + =========== + + This class only represents a fourier series. + No computation is performed. + + For how to compute Fourier series, see the :func:`fourier_series` + docstring. + + See Also + ======== + + sympy.series.fourier.fourier_series + """ + def __new__(cls, *args): + args = map(sympify, args) + return Expr.__new__(cls, *args) + + @property + def function(self): + return self.args[0] + + @property + def x(self): + return self.args[1][0] + + @property + def period(self): + return (self.args[1][1], self.args[1][2]) + + @property + def a0(self): + return self.args[2][0] + + @property + def an(self): + return self.args[2][1] + + @property + def bn(self): + return self.args[2][2] + + @property + def interval(self): + return Interval(0, oo) + + @property + def start(self): + return self.interval.inf + + @property + def stop(self): + return self.interval.sup + + @property + def length(self): + return oo + + @property + def L(self): + return abs(self.period[1] - self.period[0]) / 2 + + def _eval_subs(self, old, new): + x = self.x + if old.has(x): + return self + + def truncate(self, n=3): + """ + Return the first n nonzero terms of the series. + + If ``n`` is None return an iterator. + + Parameters + ========== + + n : int or None + Amount of non-zero terms in approximation or None. + + Returns + ======= + + Expr or iterator : + Approximation of function expanded into Fourier series. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x, (x, -pi, pi)) + >>> s.truncate(4) + 2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2 + + See Also + ======== + + sympy.series.fourier.FourierSeries.sigma_approximation + """ + if n is None: + return iter(self) + + terms = [] + for t in self: + if len(terms) == n: + break + if t is not S.Zero: + terms.append(t) + + return Add(*terms) + + def sigma_approximation(self, n=3): + r""" + Return :math:`\sigma`-approximation of Fourier series with respect + to order n. + + Explanation + =========== + + Sigma approximation adjusts a Fourier summation to eliminate the Gibbs + phenomenon which would otherwise occur at discontinuities. + A sigma-approximated summation for a Fourier series of a T-periodical + function can be written as + + .. math:: + s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1} + \operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot + \left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr) + + b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right], + + where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier + series coefficients and + :math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos + :math:`\sigma` factor (expressed in terms of normalized + :math:`\operatorname{sinc}` function). + + Parameters + ========== + + n : int + Highest order of the terms taken into account in approximation. + + Returns + ======= + + Expr : + Sigma approximation of function expanded into Fourier series. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x, (x, -pi, pi)) + >>> s.sigma_approximation(4) + 2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3 + + See Also + ======== + + sympy.series.fourier.FourierSeries.truncate + + Notes + ===== + + The behaviour of + :meth:`~sympy.series.fourier.FourierSeries.sigma_approximation` + is different from :meth:`~sympy.series.fourier.FourierSeries.truncate` + - it takes all nonzero terms of degree smaller than n, rather than + first n nonzero ones. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon + .. [2] https://en.wikipedia.org/wiki/Sigma_approximation + """ + terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n]) + if t is not S.Zero] + return Add(*terms) + + def shift(self, s): + """ + Shift the function by a term independent of x. + + Explanation + =========== + + f(x) -> f(x) + s + + This is fast, if Fourier series of f(x) is already + computed. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x**2, (x, -pi, pi)) + >>> s.shift(1).truncate() + -4*cos(x) + cos(2*x) + 1 + pi**2/3 + """ + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + a0 = self.a0 + s + sfunc = self.function + s + + return self.func(sfunc, self.args[1], (a0, self.an, self.bn)) + + def shiftx(self, s): + """ + Shift x by a term independent of x. + + Explanation + =========== + + f(x) -> f(x + s) + + This is fast, if Fourier series of f(x) is already + computed. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x**2, (x, -pi, pi)) + >>> s.shiftx(1).truncate() + -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 + """ + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + an = self.an.subs(x, x + s) + bn = self.bn.subs(x, x + s) + sfunc = self.function.subs(x, x + s) + + return self.func(sfunc, self.args[1], (self.a0, an, bn)) + + def scale(self, s): + """ + Scale the function by a term independent of x. + + Explanation + =========== + + f(x) -> s * f(x) + + This is fast, if Fourier series of f(x) is already + computed. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x**2, (x, -pi, pi)) + >>> s.scale(2).truncate() + -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 + """ + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + an = self.an.coeff_mul(s) + bn = self.bn.coeff_mul(s) + a0 = self.a0 * s + sfunc = self.args[0] * s + + return self.func(sfunc, self.args[1], (a0, an, bn)) + + def scalex(self, s): + """ + Scale x by a term independent of x. + + Explanation + =========== + + f(x) -> f(s*x) + + This is fast, if Fourier series of f(x) is already + computed. + + Examples + ======== + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> s = fourier_series(x**2, (x, -pi, pi)) + >>> s.scalex(2).truncate() + -4*cos(2*x) + cos(4*x) + pi**2/3 + """ + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + an = self.an.subs(x, x * s) + bn = self.bn.subs(x, x * s) + sfunc = self.function.subs(x, x * s) + + return self.func(sfunc, self.args[1], (self.a0, an, bn)) + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + for t in self: + if t is not S.Zero: + return t + + def _eval_term(self, pt): + if pt == 0: + return self.a0 + return self.an.coeff(pt) + self.bn.coeff(pt) + + def __neg__(self): + return self.scale(-1) + + def __add__(self, other): + if isinstance(other, FourierSeries): + if self.period != other.period: + raise ValueError("Both the series should have same periods") + + x, y = self.x, other.x + function = self.function + other.function.subs(y, x) + + if self.x not in function.free_symbols: + return function + + an = self.an + other.an + bn = self.bn + other.bn + a0 = self.a0 + other.a0 + + return self.func(function, self.args[1], (a0, an, bn)) + + return Add(self, other) + + def __sub__(self, other): + return self.__add__(-other) + + +class FiniteFourierSeries(FourierSeries): + r"""Represents Finite Fourier sine/cosine series. + + For how to compute Fourier series, see the :func:`fourier_series` + docstring. + + Parameters + ========== + + f : Expr + Expression for finding fourier_series + + limits : ( x, start, stop) + x is the independent variable for the expression f + (start, stop) is the period of the fourier series + + exprs: (a0, an, bn) or Expr + a0 is the constant term a0 of the fourier series + an is a dictionary of coefficients of cos terms + an[k] = coefficient of cos(pi*(k/L)*x) + bn is a dictionary of coefficients of sin terms + bn[k] = coefficient of sin(pi*(k/L)*x) + + or exprs can be an expression to be converted to fourier form + + Methods + ======= + + This class is an extension of FourierSeries class. + Please refer to sympy.series.fourier.FourierSeries for + further information. + + See Also + ======== + + sympy.series.fourier.FourierSeries + sympy.series.fourier.fourier_series + """ + + def __new__(cls, f, limits, exprs): + f = sympify(f) + limits = sympify(limits) + exprs = sympify(exprs) + + if not (isinstance(exprs, Tuple) and len(exprs) == 3): # exprs is not of form (a0, an, bn) + # Converts the expression to fourier form + c, e = exprs.as_coeff_add() + from sympy.simplify.fu import TR10 + rexpr = c + Add(*[TR10(i) for i in e]) + a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add() + + x = limits[0] + L = abs(limits[2] - limits[1]) / 2 + + a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ]) + b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) + + an = {} + bn = {} + + # separates the coefficients of sin and cos terms in dictionaries an, and bn + for p in exp_ls: + t = p.match(b * cos(a * (pi / L) * x)) + q = p.match(b * sin(a * (pi / L) * x)) + if t: + an[t[a]] = t[b] + an.get(t[a], S.Zero) + elif q: + bn[q[a]] = q[b] + bn.get(q[a], S.Zero) + else: + a0 += p + + exprs = Tuple(a0, an, bn) + + return Expr.__new__(cls, f, limits, exprs) + + @property + def interval(self): + _length = 1 if self.a0 else 0 + _length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1 + return Interval(0, _length) + + @property + def length(self): + return self.stop - self.start + + def shiftx(self, s): + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + _expr = self.truncate().subs(x, x + s) + sfunc = self.function.subs(x, x + s) + + return self.func(sfunc, self.args[1], _expr) + + def scale(self, s): + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + _expr = self.truncate() * s + sfunc = self.function * s + + return self.func(sfunc, self.args[1], _expr) + + def scalex(self, s): + s, x = sympify(s), self.x + + if x in s.free_symbols: + raise ValueError("'%s' should be independent of %s" % (s, x)) + + _expr = self.truncate().subs(x, x * s) + sfunc = self.function.subs(x, x * s) + + return self.func(sfunc, self.args[1], _expr) + + def _eval_term(self, pt): + if pt == 0: + return self.a0 + + _term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \ + + self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x) + return _term + + def __add__(self, other): + if isinstance(other, FourierSeries): + return other.__add__(fourier_series(self.function, self.args[1],\ + finite=False)) + elif isinstance(other, FiniteFourierSeries): + if self.period != other.period: + raise ValueError("Both the series should have same periods") + + x, y = self.x, other.x + function = self.function + other.function.subs(y, x) + + if self.x not in function.free_symbols: + return function + + return fourier_series(function, limits=self.args[1]) + + +def fourier_series(f, limits=None, finite=True): + r"""Computes the Fourier trigonometric series expansion. + + Explanation + =========== + + Fourier trigonometric series of $f(x)$ over the interval $(a, b)$ + is defined as: + + .. math:: + \frac{a_0}{2} + \sum_{n=1}^{\infty} + (a_n \cos(\frac{2n \pi x}{L}) + b_n \sin(\frac{2n \pi x}{L})) + + where the coefficients are: + + .. math:: + L = b - a + + .. math:: + a_0 = \frac{2}{L} \int_{a}^{b}{f(x) dx} + + .. math:: + a_n = \frac{2}{L} \int_{a}^{b}{f(x) \cos(\frac{2n \pi x}{L}) dx} + + .. math:: + b_n = \frac{2}{L} \int_{a}^{b}{f(x) \sin(\frac{2n \pi x}{L}) dx} + + The condition whether the function $f(x)$ given should be periodic + or not is more than necessary, because it is sufficient to consider + the series to be converging to $f(x)$ only in the given interval, + not throughout the whole real line. + + This also brings a lot of ease for the computation because + you do not have to make $f(x)$ artificially periodic by + wrapping it with piecewise, modulo operations, + but you can shape the function to look like the desired periodic + function only in the interval $(a, b)$, and the computed series will + automatically become the series of the periodic version of $f(x)$. + + This property is illustrated in the examples section below. + + Parameters + ========== + + limits : (sym, start, end), optional + *sym* denotes the symbol the series is computed with respect to. + + *start* and *end* denotes the start and the end of the interval + where the fourier series converges to the given function. + + Default range is specified as $-\pi$ and $\pi$. + + Returns + ======= + + FourierSeries + A symbolic object representing the Fourier trigonometric series. + + Examples + ======== + + Computing the Fourier series of $f(x) = x^2$: + + >>> from sympy import fourier_series, pi + >>> from sympy.abc import x + >>> f = x**2 + >>> s = fourier_series(f, (x, -pi, pi)) + >>> s1 = s.truncate(n=3) + >>> s1 + -4*cos(x) + cos(2*x) + pi**2/3 + + Shifting of the Fourier series: + + >>> s.shift(1).truncate() + -4*cos(x) + cos(2*x) + 1 + pi**2/3 + >>> s.shiftx(1).truncate() + -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 + + Scaling of the Fourier series: + + >>> s.scale(2).truncate() + -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 + >>> s.scalex(2).truncate() + -4*cos(2*x) + cos(4*x) + pi**2/3 + + Computing the Fourier series of $f(x) = x$: + + This illustrates how truncating to the higher order gives better + convergence. + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import fourier_series, pi, plot + >>> from sympy.abc import x + >>> f = x + >>> s = fourier_series(f, (x, -pi, pi)) + >>> s1 = s.truncate(n = 3) + >>> s2 = s.truncate(n = 5) + >>> s3 = s.truncate(n = 7) + >>> p = plot(f, s1, s2, s3, (x, -pi, pi), show=False, legend=True) + + >>> p[0].line_color = (0, 0, 0) + >>> p[0].label = 'x' + >>> p[1].line_color = (0.7, 0.7, 0.7) + >>> p[1].label = 'n=3' + >>> p[2].line_color = (0.5, 0.5, 0.5) + >>> p[2].label = 'n=5' + >>> p[3].line_color = (0.3, 0.3, 0.3) + >>> p[3].label = 'n=7' + + >>> p.show() + + This illustrates how the series converges to different sawtooth + waves if the different ranges are specified. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> s1 = fourier_series(x, (x, -1, 1)).truncate(10) + >>> s2 = fourier_series(x, (x, -pi, pi)).truncate(10) + >>> s3 = fourier_series(x, (x, 0, 1)).truncate(10) + >>> p = plot(x, s1, s2, s3, (x, -5, 5), show=False, legend=True) + + >>> p[0].line_color = (0, 0, 0) + >>> p[0].label = 'x' + >>> p[1].line_color = (0.7, 0.7, 0.7) + >>> p[1].label = '[-1, 1]' + >>> p[2].line_color = (0.5, 0.5, 0.5) + >>> p[2].label = '[-pi, pi]' + >>> p[3].line_color = (0.3, 0.3, 0.3) + >>> p[3].label = '[0, 1]' + + >>> p.show() + + Notes + ===== + + Computing Fourier series can be slow + due to the integration required in computing + an, bn. + + It is faster to compute Fourier series of a function + by using shifting and scaling on an already + computed Fourier series rather than computing + again. + + e.g. If the Fourier series of ``x**2`` is known + the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``. + + See Also + ======== + + sympy.series.fourier.FourierSeries + + References + ========== + + .. [1] https://mathworld.wolfram.com/FourierSeries.html + """ + f = sympify(f) + + limits = _process_limits(f, limits) + x = limits[0] + + if x not in f.free_symbols: + return f + + if finite: + L = abs(limits[2] - limits[1]) / 2 + is_finite, res_f = finite_check(f, x, L) + if is_finite: + return FiniteFourierSeries(f, limits, res_f) + + n = Dummy('n') + center = (limits[1] + limits[2]) / 2 + if center.is_zero: + neg_f = f.subs(x, -x) + if f == neg_f: + a0, an = fourier_cos_seq(f, limits, n) + bn = SeqFormula(0, (1, oo)) + return FourierSeries(f, limits, (a0, an, bn)) + elif f == -neg_f: + a0 = S.Zero + an = SeqFormula(0, (1, oo)) + bn = fourier_sin_seq(f, limits, n) + return FourierSeries(f, limits, (a0, an, bn)) + a0, an = fourier_cos_seq(f, limits, n) + bn = fourier_sin_seq(f, limits, n) + return FourierSeries(f, limits, (a0, an, bn)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/gruntz.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/gruntz.py new file mode 100644 index 0000000000000000000000000000000000000000..a10405289761bcad0236bbcb130c00e4023cf9b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/gruntz.py @@ -0,0 +1,738 @@ +""" +Limits +====== + +Implemented according to the PhD thesis +https://www.cybertester.com/data/gruntz.pdf, which contains very thorough +descriptions of the algorithm including many examples. We summarize here +the gist of it. + +All functions are sorted according to how rapidly varying they are at +infinity using the following rules. Any two functions f and g can be +compared using the properties of L: + +L=lim log|f(x)| / log|g(x)| (for x -> oo) + +We define >, < ~ according to:: + + 1. f > g .... L=+-oo + + we say that: + - f is greater than any power of g + - f is more rapidly varying than g + - f goes to infinity/zero faster than g + + 2. f < g .... L=0 + + we say that: + - f is lower than any power of g + + 3. f ~ g .... L!=0, +-oo + + we say that: + - both f and g are bounded from above and below by suitable integral + powers of the other + +Examples +======== +:: + 2 < x < exp(x) < exp(x**2) < exp(exp(x)) + 2 ~ 3 ~ -5 + x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x + exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x)) + f ~ 1/f + +So we can divide all the functions into comparability classes (x and x^2 +belong to one class, exp(x) and exp(-x) belong to some other class). In +principle, we could compare any two functions, but in our algorithm, we +do not compare anything below the class 2~3~-5 (for example log(x) is +below this), so we set 2~3~-5 as the lowest comparability class. + +Given the function f, we find the list of most rapidly varying (mrv set) +subexpressions of it. This list belongs to the same comparability class. +Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an +element "w" (either from the list or a new one) from the same +comparability class which goes to zero at infinity. In our example we +set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We +rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it +into f. Then we expand f into a series in w:: + + f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0oo, lim f = lim c0*w^e0, because all the other terms go to zero, +because w goes to zero faster than the ci and ei. So:: + + for e0>0, lim f = 0 + for e0<0, lim f = +-oo (the sign depends on the sign of c0) + for e0=0, lim f = lim c0 + +We need to recursively compute limits at several places of the algorithm, but +as is shown in the PhD thesis, it always finishes. + +Important functions from the implementation: + +compare(a, b, x) compares "a" and "b" by computing the limit L. +mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e" +rewrite(e, Omega, x, wsym) rewrites "e" in terms of w +leadterm(f, x) returns the lowest power term in the series of f +mrv_leadterm(e, x) returns the lead term (c0, e0) for e +limitinf(e, x) computes lim e (for x->oo) +limit(e, z, z0) computes any limit by converting it to the case x->oo + +All the functions are really simple and straightforward except +rewrite(), which is the most difficult/complex part of the algorithm. +When the algorithm fails, the bugs are usually in the series expansion +(i.e. in SymPy) or in rewrite. + +This code is almost exact rewrite of the Maple code inside the Gruntz +thesis. + +Debugging +--------- + +Because the gruntz algorithm is highly recursive, it's difficult to +figure out what went wrong inside a debugger. Instead, turn on nice +debug prints by defining the environment variable SYMPY_DEBUG. For +example: + +[user@localhost]: SYMPY_DEBUG=True ./bin/isympy + +In [1]: limit(sin(x)/x, x, 0) +limitinf(_x*sin(1/_x), _x) = 1 ++-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0) +| +-mrv(_x*sin(1/_x), _x) = set([_x]) +| | +-mrv(_x, _x) = set([_x]) +| | +-mrv(sin(1/_x), _x) = set([_x]) +| | +-mrv(1/_x, _x) = set([_x]) +| | +-mrv(_x, _x) = set([_x]) +| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0) +| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x) +| +-sign(_x, _x) = 1 +| +-mrv_leadterm(1, _x) = (1, 0) ++-sign(0, _x) = 0 ++-limitinf(1, _x) = 1 + +And check manually which line is wrong. Then go to the source code and +debug this function to figure out the exact problem. + +""" +from functools import reduce + +from sympy.core import Basic, S, Mul, PoleError, expand_mul +from sympy.core.cache import cacheit +from sympy.core.numbers import ilcm, I, oo +from sympy.core.symbol import Dummy, Wild +from sympy.core.traversal import bottom_up + +from sympy.functions import log, exp, sign as _sign +from sympy.series.order import Order +from sympy.utilities.exceptions import SymPyDeprecationWarning +from sympy.utilities.misc import debug_decorator as debug +from sympy.utilities.timeutils import timethis + +timeit = timethis('gruntz') + + +def compare(a, b, x): + """Returns "<" if a" for a>b""" + # log(exp(...)) must always be simplified here for termination + la, lb = log(a), log(b) + if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)): + la = a.exp + if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)): + lb = b.exp + + c = limitinf(la/lb, x) + if c == 0: + return "<" + elif c.is_infinite: + return ">" + else: + return "=" + + +class SubsSet(dict): + """ + Stores (expr, dummy) pairs, and how to rewrite expr-s. + + Explanation + =========== + + The gruntz algorithm needs to rewrite certain expressions in term of a new + variable w. We cannot use subs, because it is just too smart for us. For + example:: + + > Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))] + > O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w] + > e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p)) + > e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1]) + -1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p)) + + is really not what we want! + + So we do it the hard way and keep track of all the things we potentially + want to substitute by dummy variables. Consider the expression:: + + exp(x - exp(-x)) + exp(x) + x. + + The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}. + We introduce corresponding dummy variables d1, d2, d3 and rewrite:: + + d3 + d1 + x. + + This class first of all keeps track of the mapping expr->variable, i.e. + will at this stage be a dictionary:: + + {exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}. + + [It turns out to be more convenient this way round.] + But sometimes expressions in the mrv set have other expressions from the + mrv set as subexpressions, and we need to keep track of that as well. In + this case, d3 is really exp(x - d2), so rewrites at this stage is:: + + {d3: exp(x-d2)}. + + The function rewrite uses all this information to correctly rewrite our + expression in terms of w. In this case w can be chosen to be exp(-x), + i.e. d2. The correct rewriting then is:: + + exp(-w)/w + 1/w + x. + """ + def __init__(self): + self.rewrites = {} + + def __repr__(self): + return super().__repr__() + ', ' + self.rewrites.__repr__() + + def __getitem__(self, key): + if key not in self: + self[key] = Dummy() + return dict.__getitem__(self, key) + + def do_subs(self, e): + """Substitute the variables with expressions""" + for expr, var in self.items(): + e = e.xreplace({var: expr}) + return e + + def meets(self, s2): + """Tell whether or not self and s2 have non-empty intersection""" + return set(self.keys()).intersection(list(s2.keys())) != set() + + def union(self, s2, exps=None): + """Compute the union of self and s2, adjusting exps""" + res = self.copy() + tr = {} + for expr, var in s2.items(): + if expr in self: + if exps: + exps = exps.xreplace({var: res[expr]}) + tr[var] = res[expr] + else: + res[expr] = var + for var, rewr in s2.rewrites.items(): + res.rewrites[var] = rewr.xreplace(tr) + return res, exps + + def copy(self): + """Create a shallow copy of SubsSet""" + r = SubsSet() + r.rewrites = self.rewrites.copy() + for expr, var in self.items(): + r[expr] = var + return r + + +@debug +def mrv(e, x): + """Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e', + and e rewritten in terms of these""" + from sympy.simplify.powsimp import powsimp + e = powsimp(e, deep=True, combine='exp') + if not isinstance(e, Basic): + raise TypeError("e should be an instance of Basic") + if not e.has(x): + return SubsSet(), e + elif e == x: + s = SubsSet() + return s, s[x] + elif e.is_Mul or e.is_Add: + i, d = e.as_independent(x) # throw away x-independent terms + if d.func != e.func: + s, expr = mrv(d, x) + return s, e.func(i, expr) + a, b = d.as_two_terms() + s1, e1 = mrv(a, x) + s2, e2 = mrv(b, x) + return mrv_max1(s1, s2, e.func(i, e1, e2), x) + elif e.is_Pow and e.base != S.Exp1: + e1 = S.One + while e.is_Pow: + b1 = e.base + e1 *= e.exp + e = b1 + if b1 == 1: + return SubsSet(), b1 + if e1.has(x): + base_lim = limitinf(b1, x) + if base_lim is S.One: + return mrv(exp(e1 * (b1 - 1)), x) + return mrv(exp(e1 * log(b1)), x) + else: + s, expr = mrv(b1, x) + return s, expr**e1 + elif isinstance(e, log): + s, expr = mrv(e.args[0], x) + return s, log(expr) + elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1): + # We know from the theory of this algorithm that exp(log(...)) may always + # be simplified here, and doing so is vital for termination. + if isinstance(e.exp, log): + return mrv(e.exp.args[0], x) + # if a product has an infinite factor the result will be + # infinite if there is no zero, otherwise NaN; here, we + # consider the result infinite if any factor is infinite + li = limitinf(e.exp, x) + if any(_.is_infinite for _ in Mul.make_args(li)): + s1 = SubsSet() + e1 = s1[e] + s2, e2 = mrv(e.exp, x) + su = s1.union(s2)[0] + su.rewrites[e1] = exp(e2) + return mrv_max3(s1, e1, s2, exp(e2), su, e1, x) + else: + s, expr = mrv(e.exp, x) + return s, exp(expr) + elif e.is_Function: + l = [mrv(a, x) for a in e.args] + l2 = [s for (s, _) in l if s != SubsSet()] + if len(l2) != 1: + # e.g. something like BesselJ(x, x) + raise NotImplementedError("MRV set computation for functions in" + " several variables not implemented.") + s, ss = l2[0], SubsSet() + args = [ss.do_subs(x[1]) for x in l] + return s, e.func(*args) + elif e.is_Derivative: + raise NotImplementedError("MRV set computation for derivatives" + " not implemented yet.") + raise NotImplementedError( + "Don't know how to calculate the mrv of '%s'" % e) + + +def mrv_max3(f, expsf, g, expsg, union, expsboth, x): + """ + Computes the maximum of two sets of expressions f and g, which + are in the same comparability class, i.e. max() compares (two elements of) + f and g and returns either (f, expsf) [if f is larger], (g, expsg) + [if g is larger] or (union, expsboth) [if f, g are of the same class]. + """ + if not isinstance(f, SubsSet): + raise TypeError("f should be an instance of SubsSet") + if not isinstance(g, SubsSet): + raise TypeError("g should be an instance of SubsSet") + if f == SubsSet(): + return g, expsg + elif g == SubsSet(): + return f, expsf + elif f.meets(g): + return union, expsboth + + c = compare(list(f.keys())[0], list(g.keys())[0], x) + if c == ">": + return f, expsf + elif c == "<": + return g, expsg + else: + if c != "=": + raise ValueError("c should be =") + return union, expsboth + + +def mrv_max1(f, g, exps, x): + """Computes the maximum of two sets of expressions f and g, which + are in the same comparability class, i.e. mrv_max1() compares (two elements of) + f and g and returns the set, which is in the higher comparability class + of the union of both, if they have the same order of variation. + Also returns exps, with the appropriate substitutions made. + """ + u, b = f.union(g, exps) + return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps), + u, b, x) + + +@debug +@cacheit +@timeit +def sign(e, x): + """ + Returns a sign of an expression e(x) for x->oo. + + :: + + e > 0 for x sufficiently large ... 1 + e == 0 for x sufficiently large ... 0 + e < 0 for x sufficiently large ... -1 + + The result of this function is currently undefined if e changes sign + arbitrarily often for arbitrarily large x (e.g. sin(x)). + + Note that this returns zero only if e is *constantly* zero + for x sufficiently large. [If e is constant, of course, this is just + the same thing as the sign of e.] + """ + if not isinstance(e, Basic): + raise TypeError("e should be an instance of Basic") + + if e.is_positive: + return 1 + elif e.is_negative: + return -1 + elif e.is_zero: + return 0 + + elif not e.has(x): + from sympy.simplify import logcombine + e = logcombine(e) + return _sign(e) + elif e == x: + return 1 + elif e.is_Mul: + a, b = e.as_two_terms() + sa = sign(a, x) + if not sa: + return 0 + return sa * sign(b, x) + elif isinstance(e, exp): + return 1 + elif e.is_Pow: + if e.base == S.Exp1: + return 1 + s = sign(e.base, x) + if s == 1: + return 1 + if e.exp.is_Integer: + return s**e.exp + elif isinstance(e, log): + return sign(e.args[0] - 1, x) + + # if all else fails, do it the hard way + c0, e0 = mrv_leadterm(e, x) + return sign(c0, x) + + +@debug +@timeit +@cacheit +def limitinf(e, x): + """Limit e(x) for x-> oo.""" + # rewrite e in terms of tractable functions only + + old = e + if not e.has(x): + return e # e is a constant + from sympy.simplify.powsimp import powdenest + from sympy.calculus.util import AccumBounds + if e.has(Order): + e = e.expand().removeO() + if not x.is_positive or x.is_integer: + # We make sure that x.is_positive is True and x.is_integer is None + # so we get all the correct mathematical behavior from the expression. + # We need a fresh variable. + p = Dummy('p', positive=True) + e = e.subs(x, p) + x = p + e = e.rewrite('tractable', deep=True, limitvar=x) + e = powdenest(e) + if isinstance(e, AccumBounds): + if mrv_leadterm(e.min, x) != mrv_leadterm(e.max, x): + raise NotImplementedError + c0, e0 = mrv_leadterm(e.min, x) + else: + c0, e0 = mrv_leadterm(e, x) + sig = sign(e0, x) + if sig == 1: + return S.Zero # e0>0: lim f = 0 + elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0) + if c0.match(I*Wild("a", exclude=[I])): + return c0*oo + s = sign(c0, x) + # the leading term shouldn't be 0: + if s == 0: + raise ValueError("Leading term should not be 0") + return s*oo + elif sig == 0: + if c0 == old: + c0 = c0.cancel() + return limitinf(c0, x) # e0=0: lim f = lim c0 + else: + raise ValueError("{} could not be evaluated".format(sig)) + + +def moveup2(s, x): + r = SubsSet() + for expr, var in s.items(): + r[expr.xreplace({x: exp(x)})] = var + for var, expr in s.rewrites.items(): + r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)}) + return r + + +def moveup(l, x): + return [e.xreplace({x: exp(x)}) for e in l] + + +@debug +@timeit +def calculate_series(e, x, logx=None): + """ Calculates at least one term of the series of ``e`` in ``x``. + + This is a place that fails most often, so it is in its own function. + """ + + SymPyDeprecationWarning( + feature="calculate_series", + useinstead="series() with suitable n, or as_leading_term", + issue=21838, + deprecated_since_version="1.12" + ).warn() + + from sympy.simplify.powsimp import powdenest + + for t in e.lseries(x, logx=logx): + # bottom_up function is required for a specific case - when e is + # -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p) + t = bottom_up(t, lambda w: + getattr(w, 'normal', lambda: w)()) + # And the expression + # `(-sin(1/x) + sin((x + exp(x))*exp(-x)/x))*exp(x)` + # from the first test of test_gruntz_eval_special needs to + # be expanded. But other forms need to be have at least + # factor_terms applied. `factor` accomplishes both and is + # faster than using `factor_terms` for the gruntz suite. It + # does not appear that use of `cancel` is necessary. + # t = cancel(t, expand=False) + t = t.factor() + + if t.has(exp) and t.has(log): + t = powdenest(t) + + if not t.is_zero: + break + + return t + + +@debug +@timeit +@cacheit +def mrv_leadterm(e, x): + """Returns (c0, e0) for e.""" + Omega = SubsSet() + if not e.has(x): + return (e, S.Zero) + if Omega == SubsSet(): + Omega, exps = mrv(e, x) + if not Omega: + # e really does not depend on x after simplification + return exps, S.Zero + if x in Omega: + # move the whole omega up (exponentiate each term): + Omega_up = moveup2(Omega, x) + exps_up = moveup([exps], x)[0] + # NOTE: there is no need to move this down! + Omega = Omega_up + exps = exps_up + # + # The positive dummy, w, is used here so log(w*2) etc. will expand; + # a unique dummy is needed in this algorithm + # + # For limits of complex functions, the algorithm would have to be + # improved, or just find limits of Re and Im components separately. + # + w = Dummy("w", positive=True) + f, logw = rewrite(exps, Omega, x, w) + try: + lt = f.leadterm(w, logx=logw) + except (NotImplementedError, PoleError, ValueError): + n0 = 1 + _series = Order(1) + incr = S.One + while _series.is_Order: + _series = f._eval_nseries(w, n=n0+incr, logx=logw) + incr *= 2 + series = _series.expand().removeO() + try: + lt = series.leadterm(w, logx=logw) + except (NotImplementedError, PoleError, ValueError): + lt = f.as_coeff_exponent(w) + if lt[0].has(w): + base = f.as_base_exp()[0].as_coeff_exponent(w) + ex = f.as_base_exp()[1] + lt = (base[0]**ex, base[1]*ex) + return (lt[0].subs(log(w), logw), lt[1]) + + +def build_expression_tree(Omega, rewrites): + r""" Helper function for rewrite. + + We need to sort Omega (mrv set) so that we replace an expression before + we replace any expression in terms of which it has to be rewritten:: + + e1 ---> e2 ---> e3 + \ + -> e4 + + Here we can do e1, e2, e3, e4 or e1, e2, e4, e3. + To do this we assemble the nodes into a tree, and sort them by height. + + This function builds the tree, rewrites then sorts the nodes. + """ + class Node: + def __init__(self): + self.before = [] + self.expr = None + self.var = None + def ht(self): + return reduce(lambda x, y: x + y, + [x.ht() for x in self.before], 1) + nodes = {} + for expr, v in Omega: + n = Node() + n.var = v + n.expr = expr + nodes[v] = n + for _, v in Omega: + if v in rewrites: + n = nodes[v] + r = rewrites[v] + for _, v2 in Omega: + if r.has(v2): + n.before.append(nodes[v2]) + + return nodes + + +@debug +@timeit +def rewrite(e, Omega, x, wsym): + """e(x) ... the function + Omega ... the mrv set + wsym ... the symbol which is going to be used for w + + Returns the rewritten e in terms of w and log(w). See test_rewrite1() + for examples and correct results. + """ + + from sympy import AccumBounds + if not isinstance(Omega, SubsSet): + raise TypeError("Omega should be an instance of SubsSet") + if len(Omega) == 0: + raise ValueError("Length cannot be 0") + # all items in Omega must be exponentials + for t in Omega.keys(): + if not isinstance(t, exp): + raise ValueError("Value should be exp") + rewrites = Omega.rewrites + Omega = list(Omega.items()) + + nodes = build_expression_tree(Omega, rewrites) + Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True) + + # make sure we know the sign of each exp() term; after the loop, + # g is going to be the "w" - the simplest one in the mrv set + for g, _ in Omega: + sig = sign(g.exp, x) + if sig != 1 and sig != -1 and not sig.has(AccumBounds): + raise NotImplementedError('Result depends on the sign of %s' % sig) + if sig == 1: + wsym = 1/wsym # if g goes to oo, substitute 1/w + # O2 is a list, which results by rewriting each item in Omega using "w" + O2 = [] + denominators = [] + for f, var in Omega: + c = limitinf(f.exp/g.exp, x) + if c.is_Rational: + denominators.append(c.q) + arg = f.exp + if var in rewrites: + if not isinstance(rewrites[var], exp): + raise ValueError("Value should be exp") + arg = rewrites[var].args[0] + O2.append((var, exp((arg - c*g.exp).expand())*wsym**c)) + + # Remember that Omega contains subexpressions of "e". So now we find + # them in "e" and substitute them for our rewriting, stored in O2 + + # the following powsimp is necessary to automatically combine exponentials, + # so that the .xreplace() below succeeds: + # TODO this should not be necessary + from sympy.simplify.powsimp import powsimp + f = powsimp(e, deep=True, combine='exp') + for a, b in O2: + f = f.xreplace({a: b}) + + for _, var in Omega: + assert not f.has(var) + + # finally compute the logarithm of w (logw). + logw = g.exp + if sig == 1: + logw = -logw # log(w)->log(1/w)=-log(w) + + # Some parts of SymPy have difficulty computing series expansions with + # non-integral exponents. The following heuristic improves the situation: + exponent = reduce(ilcm, denominators, 1) + f = f.subs({wsym: wsym**exponent}) + logw /= exponent + + # bottom_up function is required for a specific case - when f is + # -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification + # methods reduce this to 0 while not expanding polynomials. + f = bottom_up(f, lambda w: getattr(w, 'normal', lambda: w)()) + f = expand_mul(f) + + return f, logw + + +def gruntz(e, z, z0, dir="+"): + """ + Compute the limit of e(z) at the point z0 using the Gruntz algorithm. + + Explanation + =========== + + ``z0`` can be any expression, including oo and -oo. + + For ``dir="+"`` (default) it calculates the limit from the right + (z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0 + (oo or -oo), the dir argument does not matter. + + This algorithm is fully described in the module docstring in the gruntz.py + file. It relies heavily on the series expansion. Most frequently, gruntz() + is only used if the faster limit() function (which uses heuristics) fails. + """ + if not z.is_symbol: + raise NotImplementedError("Second argument must be a Symbol") + + # convert all limits to the limit z->oo; sign of z is handled in limitinf + r = None + if z0 in (oo, I*oo): + e0 = e + elif z0 in (-oo, -I*oo): + e0 = e.subs(z, -z) + else: + if str(dir) == "-": + e0 = e.subs(z, z0 - 1/z) + elif str(dir) == "+": + e0 = e.subs(z, z0 + 1/z) + else: + raise NotImplementedError("dir must be '+' or '-'") + + r = limitinf(e0, z) + + # This is a bit of a heuristic for nice results... we always rewrite + # tractable functions in terms of familiar intractable ones. + # It might be nicer to rewrite the exactly to what they were initially, + # but that would take some work to implement. + return r.rewrite('intractable', deep=True) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/limits.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/limits.py new file mode 100644 index 0000000000000000000000000000000000000000..174ce389676fa553e759f43cce5f94f7ae31817b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/limits.py @@ -0,0 +1,385 @@ +from sympy.calculus.accumulationbounds import AccumBounds +from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul +from sympy.core.exprtools import factor_terms +from sympy.core.numbers import Float, _illegal +from sympy.functions.combinatorial.factorials import factorial +from sympy.functions.elementary.complexes import (Abs, sign, arg, re) +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.special.gamma_functions import gamma +from sympy.polys import PolynomialError, factor +from sympy.series.order import Order +from .gruntz import gruntz + +def limit(e, z, z0, dir="+"): + """Computes the limit of ``e(z)`` at the point ``z0``. + + Parameters + ========== + + e : expression, the limit of which is to be taken + + z : symbol representing the variable in the limit. + Other symbols are treated as constants. Multivariate limits + are not supported. + + z0 : the value toward which ``z`` tends. Can be any expression, + including ``oo`` and ``-oo``. + + dir : string, optional (default: "+") + The limit is bi-directional if ``dir="+-"``, from the right + (z->z0+) if ``dir="+"``, and from the left (z->z0-) if + ``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir`` + argument is determined from the direction of the infinity + (i.e., ``dir="-"`` for ``oo``). + + Examples + ======== + + >>> from sympy import limit, sin, oo + >>> from sympy.abc import x + >>> limit(sin(x)/x, x, 0) + 1 + >>> limit(1/x, x, 0) # default dir='+' + oo + >>> limit(1/x, x, 0, dir="-") + -oo + >>> limit(1/x, x, 0, dir='+-') + zoo + >>> limit(1/x, x, oo) + 0 + + Notes + ===== + + First we try some heuristics for easy and frequent cases like "x", "1/x", + "x**2" and similar, so that it's fast. For all other cases, we use the + Gruntz algorithm (see the gruntz() function). + + See Also + ======== + + limit_seq : returns the limit of a sequence. + """ + + return Limit(e, z, z0, dir).doit(deep=False) + + +def heuristics(e, z, z0, dir): + """Computes the limit of an expression term-wise. + Parameters are the same as for the ``limit`` function. + Works with the arguments of expression ``e`` one by one, computing + the limit of each and then combining the results. This approach + works only for simple limits, but it is fast. + """ + + rv = None + if z0 is S.Infinity: + rv = limit(e.subs(z, 1/z), z, S.Zero, "+") + if isinstance(rv, Limit): + return + elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function: + r = [] + from sympy.simplify.simplify import together + for a in e.args: + l = limit(a, z, z0, dir) + if l.has(S.Infinity) and l.is_finite is None: + if isinstance(e, Add): + m = factor_terms(e) + if not isinstance(m, Mul): # try together + m = together(m) + if not isinstance(m, Mul): # try factor if the previous methods failed + m = factor(e) + if isinstance(m, Mul): + return heuristics(m, z, z0, dir) + return + return + elif isinstance(l, Limit): + return + elif l is S.NaN: + return + else: + r.append(l) + if r: + rv = e.func(*r) + if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r): + r2 = [] + e2 = [] + for ii, rval in enumerate(r): + if isinstance(rval, AccumBounds): + r2.append(rval) + else: + e2.append(e.args[ii]) + + if len(e2) > 0: + e3 = Mul(*e2).simplify() + l = limit(e3, z, z0, dir) + rv = l * Mul(*r2) + + if rv is S.NaN: + try: + from sympy.simplify.ratsimp import ratsimp + rat_e = ratsimp(e) + except PolynomialError: + return + if rat_e is S.NaN or rat_e == e: + return + return limit(rat_e, z, z0, dir) + return rv + + +class Limit(Expr): + """Represents an unevaluated limit. + + Examples + ======== + + >>> from sympy import Limit, sin + >>> from sympy.abc import x + >>> Limit(sin(x)/x, x, 0) + Limit(sin(x)/x, x, 0, dir='+') + >>> Limit(1/x, x, 0, dir="-") + Limit(1/x, x, 0, dir='-') + + """ + + def __new__(cls, e, z, z0, dir="+"): + e = sympify(e) + z = sympify(z) + z0 = sympify(z0) + + if z0 in (S.Infinity, S.ImaginaryUnit*S.Infinity): + dir = "-" + elif z0 in (S.NegativeInfinity, S.ImaginaryUnit*S.NegativeInfinity): + dir = "+" + + if(z0.has(z)): + raise NotImplementedError("Limits approaching a variable point are" + " not supported (%s -> %s)" % (z, z0)) + if isinstance(dir, str): + dir = Symbol(dir) + elif not isinstance(dir, Symbol): + raise TypeError("direction must be of type basestring or " + "Symbol, not %s" % type(dir)) + if str(dir) not in ('+', '-', '+-'): + raise ValueError("direction must be one of '+', '-' " + "or '+-', not %s" % dir) + + obj = Expr.__new__(cls) + obj._args = (e, z, z0, dir) + return obj + + + @property + def free_symbols(self): + e = self.args[0] + isyms = e.free_symbols + isyms.difference_update(self.args[1].free_symbols) + isyms.update(self.args[2].free_symbols) + return isyms + + + def pow_heuristics(self, e): + _, z, z0, _ = self.args + b1, e1 = e.base, e.exp + if not b1.has(z): + res = limit(e1*log(b1), z, z0) + return exp(res) + + ex_lim = limit(e1, z, z0) + base_lim = limit(b1, z, z0) + + if base_lim is S.One: + if ex_lim in (S.Infinity, S.NegativeInfinity): + res = limit(e1*(b1 - 1), z, z0) + return exp(res) + if base_lim is S.NegativeInfinity and ex_lim is S.Infinity: + return S.ComplexInfinity + + + def doit(self, **hints): + """Evaluates the limit. + + Parameters + ========== + + deep : bool, optional (default: True) + Invoke the ``doit`` method of the expressions involved before + taking the limit. + + hints : optional keyword arguments + To be passed to ``doit`` methods; only used if deep is True. + """ + + e, z, z0, dir = self.args + + if str(dir) == '+-': + r = limit(e, z, z0, dir='+') + l = limit(e, z, z0, dir='-') + if isinstance(r, Limit) and isinstance(l, Limit): + if r.args[0] == l.args[0]: + return self + if r == l: + return l + if r.is_infinite and l.is_infinite: + return S.ComplexInfinity + raise ValueError("The limit does not exist since " + "left hand limit = %s and right hand limit = %s" + % (l, r)) + + if z0 is S.ComplexInfinity: + raise NotImplementedError("Limits at complex " + "infinity are not implemented") + + if z0.is_infinite: + cdir = sign(z0) + cdir = cdir/abs(cdir) + e = e.subs(z, cdir*z) + dir = "-" + z0 = S.Infinity + + if hints.get('deep', True): + e = e.doit(**hints) + z = z.doit(**hints) + z0 = z0.doit(**hints) + + if e == z: + return z0 + + if not e.has(z): + return e + + if z0 is S.NaN: + return S.NaN + + if e.has(*_illegal): + return self + + if e.is_Order: + return Order(limit(e.expr, z, z0), *e.args[1:]) + + cdir = 0 + if str(dir) == "+": + cdir = 1 + elif str(dir) == "-": + cdir = -1 + + def set_signs(expr): + if not expr.args: + return expr + newargs = tuple(set_signs(arg) for arg in expr.args) + if newargs != expr.args: + expr = expr.func(*newargs) + abs_flag = isinstance(expr, Abs) + arg_flag = isinstance(expr, arg) + sign_flag = isinstance(expr, sign) + if abs_flag or sign_flag or arg_flag: + sig = limit(expr.args[0], z, z0, dir) + if sig.is_zero: + sig = limit(1/expr.args[0], z, z0, dir) + if sig.is_extended_real: + if (sig < 0) == True: + return (-expr.args[0] if abs_flag else + S.NegativeOne if sign_flag else S.Pi) + elif (sig > 0) == True: + return (expr.args[0] if abs_flag else + S.One if sign_flag else S.Zero) + return expr + + if e.has(Float): + # Convert floats like 0.5 to exact SymPy numbers like S.Half, to + # prevent rounding errors which can lead to unexpected execution + # of conditional blocks that work on comparisons + # Also see comments in https://github.com/sympy/sympy/issues/19453 + from sympy.simplify.simplify import nsimplify + e = nsimplify(e) + e = set_signs(e) + + + if e.is_meromorphic(z, z0): + if z0 is S.Infinity: + newe = e.subs(z, 1/z) + # cdir changes sign as oo- should become 0+ + cdir = -cdir + else: + newe = e.subs(z, z + z0) + try: + coeff, ex = newe.leadterm(z, cdir=cdir) + except ValueError: + pass + else: + if ex > 0: + return S.Zero + elif ex == 0: + return coeff + if cdir == 1 or not(int(ex) & 1): + return S.Infinity*sign(coeff) + elif cdir == -1: + return S.NegativeInfinity*sign(coeff) + else: + return S.ComplexInfinity + + if z0 is S.Infinity: + if e.is_Mul: + e = factor_terms(e) + newe = e.subs(z, 1/z) + # cdir changes sign as oo- should become 0+ + cdir = -cdir + else: + newe = e.subs(z, z + z0) + try: + coeff, ex = newe.leadterm(z, cdir=cdir) + except (ValueError, NotImplementedError, PoleError): + # The NotImplementedError catching is for custom functions + from sympy.simplify.powsimp import powsimp + e = powsimp(e) + if e.is_Pow: + r = self.pow_heuristics(e) + if r is not None: + return r + try: + coeff = newe.as_leading_term(z, cdir=cdir) + if coeff != newe and coeff.has(exp): + return gruntz(coeff, z, 0, "-" if re(cdir).is_negative else "+") + except (ValueError, NotImplementedError, PoleError): + pass + else: + if isinstance(coeff, AccumBounds) and ex == S.Zero: + return coeff + if coeff.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN): + return self + if not coeff.has(z): + if ex.is_positive: + return S.Zero + elif ex == 0: + return coeff + elif ex.is_negative: + if cdir == 1: + return S.Infinity*sign(coeff) + elif cdir == -1: + return S.NegativeInfinity*sign(coeff)*S.NegativeOne**(S.One + ex) + else: + return S.ComplexInfinity + else: + raise NotImplementedError("Not sure of sign of %s" % ex) + + # gruntz fails on factorials but works with the gamma function + # If no factorial term is present, e should remain unchanged. + # factorial is defined to be zero for negative inputs (which + # differs from gamma) so only rewrite for positive z0. + if z0.is_extended_positive: + e = e.rewrite(factorial, gamma) + + l = None + + try: + r = gruntz(e, z, z0, dir) + if r is S.NaN or l is S.NaN: + raise PoleError() + except (PoleError, ValueError): + if l is not None: + raise + r = heuristics(e, z, z0, dir) + if r is None: + return self + + return r diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/limitseq.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/limitseq.py new file mode 100644 index 0000000000000000000000000000000000000000..ceac4e7b63bfc09d9dfc26c12c7d2acc8b8d44da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/limitseq.py @@ -0,0 +1,257 @@ +"""Limits of sequences""" + +from sympy.calculus.accumulationbounds import AccumulationBounds +from sympy.core.add import Add +from sympy.core.function import PoleError +from sympy.core.power import Pow +from sympy.core.singleton import S +from sympy.core.symbol import Dummy +from sympy.core.sympify import sympify +from sympy.functions.combinatorial.numbers import fibonacci +from sympy.functions.combinatorial.factorials import factorial, subfactorial +from sympy.functions.special.gamma_functions import gamma +from sympy.functions.elementary.complexes import Abs +from sympy.functions.elementary.miscellaneous import Max, Min +from sympy.functions.elementary.trigonometric import cos, sin +from sympy.series.limits import Limit + + +def difference_delta(expr, n=None, step=1): + """Difference Operator. + + Explanation + =========== + + Discrete analog of differential operator. Given a sequence x[n], + returns the sequence x[n + step] - x[n]. + + Examples + ======== + + >>> from sympy import difference_delta as dd + >>> from sympy.abc import n + >>> dd(n*(n + 1), n) + 2*n + 2 + >>> dd(n*(n + 1), n, 2) + 4*n + 6 + + References + ========== + + .. [1] https://reference.wolfram.com/language/ref/DifferenceDelta.html + """ + expr = sympify(expr) + + if n is None: + f = expr.free_symbols + if len(f) == 1: + n = f.pop() + elif len(f) == 0: + return S.Zero + else: + raise ValueError("Since there is more than one variable in the" + " expression, a variable must be supplied to" + " take the difference of %s" % expr) + step = sympify(step) + if step.is_number is False or step.is_finite is False: + raise ValueError("Step should be a finite number.") + + if hasattr(expr, '_eval_difference_delta'): + result = expr._eval_difference_delta(n, step) + if result: + return result + + return expr.subs(n, n + step) - expr + + +def dominant(expr, n): + """Finds the dominant term in a sum, that is a term that dominates + every other term. + + Explanation + =========== + + If limit(a/b, n, oo) is oo then a dominates b. + If limit(a/b, n, oo) is 0 then b dominates a. + Otherwise, a and b are comparable. + + If there is no unique dominant term, then returns ``None``. + + Examples + ======== + + >>> from sympy import Sum + >>> from sympy.series.limitseq import dominant + >>> from sympy.abc import n, k + >>> dominant(5*n**3 + 4*n**2 + n + 1, n) + 5*n**3 + >>> dominant(2**n + Sum(k, (k, 0, n)), n) + 2**n + + See Also + ======== + + sympy.series.limitseq.dominant + """ + terms = Add.make_args(expr.expand(func=True)) + term0 = terms[-1] + comp = [term0] # comparable terms + for t in terms[:-1]: + r = term0/t + e = r.gammasimp() + if e == r: + e = r.factor() + l = limit_seq(e, n) + if l is None: + return None + elif l.is_zero: + term0 = t + comp = [term0] + elif l not in [S.Infinity, S.NegativeInfinity]: + comp.append(t) + if len(comp) > 1: + return None + return term0 + + +def _limit_inf(expr, n): + try: + return Limit(expr, n, S.Infinity).doit(deep=False) + except (NotImplementedError, PoleError): + return None + + +def _limit_seq(expr, n, trials): + from sympy.concrete.summations import Sum + + for i in range(trials): + if not expr.has(Sum): + result = _limit_inf(expr, n) + if result is not None: + return result + + num, den = expr.as_numer_denom() + if not den.has(n) or not num.has(n): + result = _limit_inf(expr.doit(), n) + if result is not None: + return result + return None + + num, den = (difference_delta(t.expand(), n) for t in [num, den]) + expr = (num / den).gammasimp() + + if not expr.has(Sum): + result = _limit_inf(expr, n) + if result is not None: + return result + + num, den = expr.as_numer_denom() + + num = dominant(num, n) + if num is None: + return None + + den = dominant(den, n) + if den is None: + return None + + expr = (num / den).gammasimp() + + +def limit_seq(expr, n=None, trials=5): + """Finds the limit of a sequence as index ``n`` tends to infinity. + + Parameters + ========== + + expr : Expr + SymPy expression for the ``n-th`` term of the sequence + n : Symbol, optional + The index of the sequence, an integer that tends to positive + infinity. If None, inferred from the expression unless it has + multiple symbols. + trials: int, optional + The algorithm is highly recursive. ``trials`` is a safeguard from + infinite recursion in case the limit is not easily computed by the + algorithm. Try increasing ``trials`` if the algorithm returns ``None``. + + Admissible Terms + ================ + + The algorithm is designed for sequences built from rational functions, + indefinite sums, and indefinite products over an indeterminate n. Terms of + alternating sign are also allowed, but more complex oscillatory behavior is + not supported. + + Examples + ======== + + >>> from sympy import limit_seq, Sum, binomial + >>> from sympy.abc import n, k, m + >>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n) + 5/3 + >>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n) + 3/4 + >>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n) + 4 + + See Also + ======== + + sympy.series.limitseq.dominant + + References + ========== + + .. [1] Computing Limits of Sequences - Manuel Kauers + """ + + from sympy.concrete.summations import Sum + if n is None: + free = expr.free_symbols + if len(free) == 1: + n = free.pop() + elif not free: + return expr + else: + raise ValueError("Expression has more than one variable. " + "Please specify a variable.") + elif n not in expr.free_symbols: + return expr + + expr = expr.rewrite(fibonacci, S.GoldenRatio) + expr = expr.rewrite(factorial, subfactorial, gamma) + n_ = Dummy("n", integer=True, positive=True) + n1 = Dummy("n", odd=True, positive=True) + n2 = Dummy("n", even=True, positive=True) + + # If there is a negative term raised to a power involving n, or a + # trigonometric function, then consider even and odd n separately. + powers = (p.as_base_exp() for p in expr.atoms(Pow)) + if (any(b.is_negative and e.has(n) for b, e in powers) or + expr.has(cos, sin)): + L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials) + if L1 is not None: + L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials) + if L1 != L2: + if L1.is_comparable and L2.is_comparable: + return AccumulationBounds(Min(L1, L2), Max(L1, L2)) + else: + return None + else: + L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials) + if L1 is not None: + return L1 + else: + if expr.is_Add: + limits = [limit_seq(term, n, trials) for term in expr.args] + if any(result is None for result in limits): + return None + else: + return Add(*limits) + # Maybe the absolute value is easier to deal with (though not if + # it has a Sum). If it tends to 0, the limit is 0. + elif not expr.has(Sum): + lim = _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials) + if lim is not None and lim.is_zero: + return S.Zero diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/sequences.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/sequences.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf08fc1ba942dd8fa9e53a2fca2d8b2665512bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/sequences.py @@ -0,0 +1,1240 @@ +from sympy.core.basic import Basic +from sympy.core.cache import cacheit +from sympy.core.containers import Tuple +from sympy.core.decorators import call_highest_priority +from sympy.core.parameters import global_parameters +from sympy.core.function import AppliedUndef, expand +from sympy.core.mul import Mul +from sympy.core.numbers import Integer +from sympy.core.relational import Eq +from sympy.core.singleton import S, Singleton +from sympy.core.sorting import ordered +from sympy.core.symbol import Dummy, Symbol, Wild +from sympy.core.sympify import sympify +from sympy.matrices import Matrix +from sympy.polys import lcm, factor +from sympy.sets.sets import Interval, Intersection +from sympy.tensor.indexed import Idx +from sympy.utilities.iterables import flatten, is_sequence, iterable + + +############################################################################### +# SEQUENCES # +############################################################################### + + +class SeqBase(Basic): + """Base class for sequences""" + + is_commutative = True + _op_priority = 15 + + @staticmethod + def _start_key(expr): + """Return start (if possible) else S.Infinity. + + adapted from Set._infimum_key + """ + try: + start = expr.start + except (NotImplementedError, + AttributeError, ValueError): + start = S.Infinity + return start + + def _intersect_interval(self, other): + """Returns start and stop. + + Takes intersection over the two intervals. + """ + interval = Intersection(self.interval, other.interval) + return interval.inf, interval.sup + + @property + def gen(self): + """Returns the generator for the sequence""" + raise NotImplementedError("(%s).gen" % self) + + @property + def interval(self): + """The interval on which the sequence is defined""" + raise NotImplementedError("(%s).interval" % self) + + @property + def start(self): + """The starting point of the sequence. This point is included""" + raise NotImplementedError("(%s).start" % self) + + @property + def stop(self): + """The ending point of the sequence. This point is included""" + raise NotImplementedError("(%s).stop" % self) + + @property + def length(self): + """Length of the sequence""" + raise NotImplementedError("(%s).length" % self) + + @property + def variables(self): + """Returns a tuple of variables that are bounded""" + return () + + @property + def free_symbols(self): + """ + This method returns the symbols in the object, excluding those + that take on a specific value (i.e. the dummy symbols). + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n, m + >>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols + {m} + """ + return ({j for i in self.args for j in i.free_symbols + .difference(self.variables)}) + + @cacheit + def coeff(self, pt): + """Returns the coefficient at point pt""" + if pt < self.start or pt > self.stop: + raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) + return self._eval_coeff(pt) + + def _eval_coeff(self, pt): + raise NotImplementedError("The _eval_coeff method should be added to" + "%s to return coefficient so it is available" + "when coeff calls it." + % self.func) + + def _ith_point(self, i): + """Returns the i'th point of a sequence. + + Explanation + =========== + + If start point is negative infinity, point is returned from the end. + Assumes the first point to be indexed zero. + + Examples + ========= + + >>> from sympy import oo + >>> from sympy.series.sequences import SeqPer + + bounded + + >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0) + -10 + >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5) + -5 + + End is at infinity + + >>> SeqPer((1, 2, 3), (0, oo))._ith_point(5) + 5 + + Starts at negative infinity + + >>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5) + -5 + """ + if self.start is S.NegativeInfinity: + initial = self.stop + else: + initial = self.start + + if self.start is S.NegativeInfinity: + step = -1 + else: + step = 1 + + return initial + i*step + + def _add(self, other): + """ + Should only be used internally. + + Explanation + =========== + + self._add(other) returns a new, term-wise added sequence if self + knows how to add with other, otherwise it returns ``None``. + + ``other`` should only be a sequence object. + + Used within :class:`SeqAdd` class. + """ + return None + + def _mul(self, other): + """ + Should only be used internally. + + Explanation + =========== + + self._mul(other) returns a new, term-wise multiplied sequence if self + knows how to multiply with other, otherwise it returns ``None``. + + ``other`` should only be a sequence object. + + Used within :class:`SeqMul` class. + """ + return None + + def coeff_mul(self, other): + """ + Should be used when ``other`` is not a sequence. Should be + defined to define custom behaviour. + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n + >>> SeqFormula(n**2).coeff_mul(2) + SeqFormula(2*n**2, (n, 0, oo)) + + Notes + ===== + + '*' defines multiplication of sequences with sequences only. + """ + return Mul(self, other) + + def __add__(self, other): + """Returns the term-wise addition of 'self' and 'other'. + + ``other`` should be a sequence. + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n + >>> SeqFormula(n**2) + SeqFormula(n**3) + SeqFormula(n**3 + n**2, (n, 0, oo)) + """ + if not isinstance(other, SeqBase): + raise TypeError('cannot add sequence and %s' % type(other)) + return SeqAdd(self, other) + + @call_highest_priority('__add__') + def __radd__(self, other): + return self + other + + def __sub__(self, other): + """Returns the term-wise subtraction of ``self`` and ``other``. + + ``other`` should be a sequence. + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n + >>> SeqFormula(n**2) - (SeqFormula(n)) + SeqFormula(n**2 - n, (n, 0, oo)) + """ + if not isinstance(other, SeqBase): + raise TypeError('cannot subtract sequence and %s' % type(other)) + return SeqAdd(self, -other) + + @call_highest_priority('__sub__') + def __rsub__(self, other): + return (-self) + other + + def __neg__(self): + """Negates the sequence. + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n + >>> -SeqFormula(n**2) + SeqFormula(-n**2, (n, 0, oo)) + """ + return self.coeff_mul(-1) + + def __mul__(self, other): + """Returns the term-wise multiplication of 'self' and 'other'. + + ``other`` should be a sequence. For ``other`` not being a + sequence see :func:`coeff_mul` method. + + Examples + ======== + + >>> from sympy import SeqFormula + >>> from sympy.abc import n + >>> SeqFormula(n**2) * (SeqFormula(n)) + SeqFormula(n**3, (n, 0, oo)) + """ + if not isinstance(other, SeqBase): + raise TypeError('cannot multiply sequence and %s' % type(other)) + return SeqMul(self, other) + + @call_highest_priority('__mul__') + def __rmul__(self, other): + return self * other + + def __iter__(self): + for i in range(self.length): + pt = self._ith_point(i) + yield self.coeff(pt) + + def __getitem__(self, index): + if isinstance(index, int): + index = self._ith_point(index) + return self.coeff(index) + elif isinstance(index, slice): + start, stop = index.start, index.stop + if start is None: + start = 0 + if stop is None: + stop = self.length + return [self.coeff(self._ith_point(i)) for i in + range(start, stop, index.step or 1)] + + def find_linear_recurrence(self,n,d=None,gfvar=None): + r""" + Finds the shortest linear recurrence that satisfies the first n + terms of sequence of order `\leq` ``n/2`` if possible. + If ``d`` is specified, find shortest linear recurrence of order + `\leq` min(d, n/2) if possible. + Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the + recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...`` + Returns ``[]`` if no recurrence is found. + If gfvar is specified, also returns ordinary generating function as a + function of gfvar. + + Examples + ======== + + >>> from sympy import sequence, sqrt, oo, lucas + >>> from sympy.abc import n, x, y + >>> sequence(n**2).find_linear_recurrence(10, 2) + [] + >>> sequence(n**2).find_linear_recurrence(10) + [3, -3, 1] + >>> sequence(2**n).find_linear_recurrence(10) + [2] + >>> sequence(23*n**4+91*n**2).find_linear_recurrence(10) + [5, -10, 10, -5, 1] + >>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10) + [1, 1] + >>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30) + [1/2, 1/2] + >>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x) + ([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1))) + >>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x) + ([1, 1], (x - 2)/(x**2 + x - 1)) + """ + from sympy.simplify import simplify + x = [simplify(expand(t)) for t in self[:n]] + lx = len(x) + if d is None: + r = lx//2 + else: + r = min(d,lx//2) + coeffs = [] + for l in range(1, r+1): + l2 = 2*l + mlist = [] + for k in range(l): + mlist.append(x[k:k+l]) + m = Matrix(mlist) + if m.det() != 0: + y = simplify(m.LUsolve(Matrix(x[l:l2]))) + if lx == l2: + coeffs = flatten(y[::-1]) + break + mlist = [] + for k in range(l,lx-l): + mlist.append(x[k:k+l]) + m = Matrix(mlist) + if m*y == Matrix(x[l2:]): + coeffs = flatten(y[::-1]) + break + if gfvar is None: + return coeffs + else: + l = len(coeffs) + if l == 0: + return [], None + else: + n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l + for i in range(l-1): + n += x[i]*gfvar**i + for j in range(l-i-1): + n -= coeffs[i]*x[j]*gfvar**(i+j+1) + d -= coeffs[i]*gfvar**(i+1) + return coeffs, simplify(factor(n)/factor(d)) + +class EmptySequence(SeqBase, metaclass=Singleton): + """Represents an empty sequence. + + The empty sequence is also available as a singleton as + ``S.EmptySequence``. + + Examples + ======== + + >>> from sympy import EmptySequence, SeqPer + >>> from sympy.abc import x + >>> EmptySequence + EmptySequence + >>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence + SeqPer((1, 2), (x, 0, 10)) + >>> SeqPer((1, 2)) * EmptySequence + EmptySequence + >>> EmptySequence.coeff_mul(-1) + EmptySequence + """ + + @property + def interval(self): + return S.EmptySet + + @property + def length(self): + return S.Zero + + def coeff_mul(self, coeff): + """See docstring of SeqBase.coeff_mul""" + return self + + def __iter__(self): + return iter([]) + + +class SeqExpr(SeqBase): + """Sequence expression class. + + Various sequences should inherit from this class. + + Examples + ======== + + >>> from sympy.series.sequences import SeqExpr + >>> from sympy.abc import x + >>> from sympy import Tuple + >>> s = SeqExpr(Tuple(1, 2, 3), Tuple(x, 0, 10)) + >>> s.gen + (1, 2, 3) + >>> s.interval + Interval(0, 10) + >>> s.length + 11 + + See Also + ======== + + sympy.series.sequences.SeqPer + sympy.series.sequences.SeqFormula + """ + + @property + def gen(self): + return self.args[0] + + @property + def interval(self): + return Interval(self.args[1][1], self.args[1][2]) + + @property + def start(self): + return self.interval.inf + + @property + def stop(self): + return self.interval.sup + + @property + def length(self): + return self.stop - self.start + 1 + + @property + def variables(self): + return (self.args[1][0],) + + +class SeqPer(SeqExpr): + """ + Represents a periodic sequence. + + The elements are repeated after a given period. + + Examples + ======== + + >>> from sympy import SeqPer, oo + >>> from sympy.abc import k + + >>> s = SeqPer((1, 2, 3), (0, 5)) + >>> s.periodical + (1, 2, 3) + >>> s.period + 3 + + For value at a particular point + + >>> s.coeff(3) + 1 + + supports slicing + + >>> s[:] + [1, 2, 3, 1, 2, 3] + + iterable + + >>> list(s) + [1, 2, 3, 1, 2, 3] + + sequence starts from negative infinity + + >>> SeqPer((1, 2, 3), (-oo, 0))[0:6] + [1, 2, 3, 1, 2, 3] + + Periodic formulas + + >>> SeqPer((k, k**2, k**3), (k, 0, oo))[0:6] + [0, 1, 8, 3, 16, 125] + + See Also + ======== + + sympy.series.sequences.SeqFormula + """ + + def __new__(cls, periodical, limits=None): + periodical = sympify(periodical) + + def _find_x(periodical): + free = periodical.free_symbols + if len(periodical.free_symbols) == 1: + return free.pop() + else: + return Dummy('k') + + x, start, stop = None, None, None + if limits is None: + x, start, stop = _find_x(periodical), 0, S.Infinity + if is_sequence(limits, Tuple): + if len(limits) == 3: + x, start, stop = limits + elif len(limits) == 2: + x = _find_x(periodical) + start, stop = limits + + if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: + raise ValueError('Invalid limits given: %s' % str(limits)) + + if start is S.NegativeInfinity and stop is S.Infinity: + raise ValueError("Both the start and end value" + "cannot be unbounded") + + limits = sympify((x, start, stop)) + + if is_sequence(periodical, Tuple): + periodical = sympify(tuple(flatten(periodical))) + else: + raise ValueError("invalid period %s should be something " + "like e.g (1, 2) " % periodical) + + if Interval(limits[1], limits[2]) is S.EmptySet: + return S.EmptySequence + + return Basic.__new__(cls, periodical, limits) + + @property + def period(self): + return len(self.gen) + + @property + def periodical(self): + return self.gen + + def _eval_coeff(self, pt): + if self.start is S.NegativeInfinity: + idx = (self.stop - pt) % self.period + else: + idx = (pt - self.start) % self.period + return self.periodical[idx].subs(self.variables[0], pt) + + def _add(self, other): + """See docstring of SeqBase._add""" + if isinstance(other, SeqPer): + per1, lper1 = self.periodical, self.period + per2, lper2 = other.periodical, other.period + + per_length = lcm(lper1, lper2) + + new_per = [] + for x in range(per_length): + ele1 = per1[x % lper1] + ele2 = per2[x % lper2] + new_per.append(ele1 + ele2) + + start, stop = self._intersect_interval(other) + return SeqPer(new_per, (self.variables[0], start, stop)) + + def _mul(self, other): + """See docstring of SeqBase._mul""" + if isinstance(other, SeqPer): + per1, lper1 = self.periodical, self.period + per2, lper2 = other.periodical, other.period + + per_length = lcm(lper1, lper2) + + new_per = [] + for x in range(per_length): + ele1 = per1[x % lper1] + ele2 = per2[x % lper2] + new_per.append(ele1 * ele2) + + start, stop = self._intersect_interval(other) + return SeqPer(new_per, (self.variables[0], start, stop)) + + def coeff_mul(self, coeff): + """See docstring of SeqBase.coeff_mul""" + coeff = sympify(coeff) + per = [x * coeff for x in self.periodical] + return SeqPer(per, self.args[1]) + + +class SeqFormula(SeqExpr): + """ + Represents sequence based on a formula. + + Elements are generated using a formula. + + Examples + ======== + + >>> from sympy import SeqFormula, oo, Symbol + >>> n = Symbol('n') + >>> s = SeqFormula(n**2, (n, 0, 5)) + >>> s.formula + n**2 + + For value at a particular point + + >>> s.coeff(3) + 9 + + supports slicing + + >>> s[:] + [0, 1, 4, 9, 16, 25] + + iterable + + >>> list(s) + [0, 1, 4, 9, 16, 25] + + sequence starts from negative infinity + + >>> SeqFormula(n**2, (-oo, 0))[0:6] + [0, 1, 4, 9, 16, 25] + + See Also + ======== + + sympy.series.sequences.SeqPer + """ + + def __new__(cls, formula, limits=None): + formula = sympify(formula) + + def _find_x(formula): + free = formula.free_symbols + if len(free) == 1: + return free.pop() + elif not free: + return Dummy('k') + else: + raise ValueError( + " specify dummy variables for %s. If the formula contains" + " more than one free symbol, a dummy variable should be" + " supplied explicitly e.g., SeqFormula(m*n**2, (n, 0, 5))" + % formula) + + x, start, stop = None, None, None + if limits is None: + x, start, stop = _find_x(formula), 0, S.Infinity + if is_sequence(limits, Tuple): + if len(limits) == 3: + x, start, stop = limits + elif len(limits) == 2: + x = _find_x(formula) + start, stop = limits + + if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: + raise ValueError('Invalid limits given: %s' % str(limits)) + + if start is S.NegativeInfinity and stop is S.Infinity: + raise ValueError("Both the start and end value " + "cannot be unbounded") + limits = sympify((x, start, stop)) + + if Interval(limits[1], limits[2]) is S.EmptySet: + return S.EmptySequence + + return Basic.__new__(cls, formula, limits) + + @property + def formula(self): + return self.gen + + def _eval_coeff(self, pt): + d = self.variables[0] + return self.formula.subs(d, pt) + + def _add(self, other): + """See docstring of SeqBase._add""" + if isinstance(other, SeqFormula): + form1, v1 = self.formula, self.variables[0] + form2, v2 = other.formula, other.variables[0] + formula = form1 + form2.subs(v2, v1) + start, stop = self._intersect_interval(other) + return SeqFormula(formula, (v1, start, stop)) + + def _mul(self, other): + """See docstring of SeqBase._mul""" + if isinstance(other, SeqFormula): + form1, v1 = self.formula, self.variables[0] + form2, v2 = other.formula, other.variables[0] + formula = form1 * form2.subs(v2, v1) + start, stop = self._intersect_interval(other) + return SeqFormula(formula, (v1, start, stop)) + + def coeff_mul(self, coeff): + """See docstring of SeqBase.coeff_mul""" + coeff = sympify(coeff) + formula = self.formula * coeff + return SeqFormula(formula, self.args[1]) + + def expand(self, *args, **kwargs): + return SeqFormula(expand(self.formula, *args, **kwargs), self.args[1]) + +class RecursiveSeq(SeqBase): + """ + A finite degree recursive sequence. + + Explanation + =========== + + That is, a sequence a(n) that depends on a fixed, finite number of its + previous values. The general form is + + a(n) = f(a(n - 1), a(n - 2), ..., a(n - d)) + + for some fixed, positive integer d, where f is some function defined by a + SymPy expression. + + Parameters + ========== + + recurrence : SymPy expression defining recurrence + This is *not* an equality, only the expression that the nth term is + equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`, + then the expression should be :code:`f(a(n - 1), ..., a(n - d))`. + + yn : applied undefined function + Represents the nth term of the sequence as e.g. :code:`y(n)` where + :code:`y` is an undefined function and `n` is the sequence index. + + n : symbolic argument + The name of the variable that the recurrence is in, e.g., :code:`n` if + the recurrence function is :code:`y(n)`. + + initial : iterable with length equal to the degree of the recurrence + The initial values of the recurrence. + + start : start value of sequence (inclusive) + + Examples + ======== + + >>> from sympy import Function, symbols + >>> from sympy.series.sequences import RecursiveSeq + >>> y = Function("y") + >>> n = symbols("n") + >>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1]) + + >>> fib.coeff(3) # Value at a particular point + 2 + + >>> fib[:6] # supports slicing + [0, 1, 1, 2, 3, 5] + + >>> fib.recurrence # inspect recurrence + Eq(y(n), y(n - 2) + y(n - 1)) + + >>> fib.degree # automatically determine degree + 2 + + >>> for x in zip(range(10), fib): # supports iteration + ... print(x) + (0, 0) + (1, 1) + (2, 1) + (3, 2) + (4, 3) + (5, 5) + (6, 8) + (7, 13) + (8, 21) + (9, 34) + + See Also + ======== + + sympy.series.sequences.SeqFormula + + """ + + def __new__(cls, recurrence, yn, n, initial=None, start=0): + if not isinstance(yn, AppliedUndef): + raise TypeError("recurrence sequence must be an applied undefined function" + ", found `{}`".format(yn)) + + if not isinstance(n, Basic) or not n.is_symbol: + raise TypeError("recurrence variable must be a symbol" + ", found `{}`".format(n)) + + if yn.args != (n,): + raise TypeError("recurrence sequence does not match symbol") + + y = yn.func + + k = Wild("k", exclude=(n,)) + degree = 0 + + # Find all applications of y in the recurrence and check that: + # 1. The function y is only being used with a single argument; and + # 2. All arguments are n + k for constant negative integers k. + + prev_ys = recurrence.find(y) + for prev_y in prev_ys: + if len(prev_y.args) != 1: + raise TypeError("Recurrence should be in a single variable") + + shift = prev_y.args[0].match(n + k)[k] + if not (shift.is_constant() and shift.is_integer and shift < 0): + raise TypeError("Recurrence should have constant," + " negative, integer shifts" + " (found {})".format(prev_y)) + + if -shift > degree: + degree = -shift + + if not initial: + initial = [Dummy("c_{}".format(k)) for k in range(degree)] + + if len(initial) != degree: + raise ValueError("Number of initial terms must equal degree") + + degree = Integer(degree) + start = sympify(start) + + initial = Tuple(*(sympify(x) for x in initial)) + + seq = Basic.__new__(cls, recurrence, yn, n, initial, start) + + seq.cache = {y(start + k): init for k, init in enumerate(initial)} + seq.degree = degree + + return seq + + @property + def _recurrence(self): + """Equation defining recurrence.""" + return self.args[0] + + @property + def recurrence(self): + """Equation defining recurrence.""" + return Eq(self.yn, self.args[0]) + + @property + def yn(self): + """Applied function representing the nth term""" + return self.args[1] + + @property + def y(self): + """Undefined function for the nth term of the sequence""" + return self.yn.func + + @property + def n(self): + """Sequence index symbol""" + return self.args[2] + + @property + def initial(self): + """The initial values of the sequence""" + return self.args[3] + + @property + def start(self): + """The starting point of the sequence. This point is included""" + return self.args[4] + + @property + def stop(self): + """The ending point of the sequence. (oo)""" + return S.Infinity + + @property + def interval(self): + """Interval on which sequence is defined.""" + return (self.start, S.Infinity) + + def _eval_coeff(self, index): + if index - self.start < len(self.cache): + return self.cache[self.y(index)] + + for current in range(len(self.cache), index + 1): + # Use xreplace over subs for performance. + # See issue #10697. + seq_index = self.start + current + current_recurrence = self._recurrence.xreplace({self.n: seq_index}) + new_term = current_recurrence.xreplace(self.cache) + + self.cache[self.y(seq_index)] = new_term + + return self.cache[self.y(self.start + current)] + + def __iter__(self): + index = self.start + while True: + yield self._eval_coeff(index) + index += 1 + + +def sequence(seq, limits=None): + """ + Returns appropriate sequence object. + + Explanation + =========== + + If ``seq`` is a SymPy sequence, returns :class:`SeqPer` object + otherwise returns :class:`SeqFormula` object. + + Examples + ======== + + >>> from sympy import sequence + >>> from sympy.abc import n + >>> sequence(n**2, (n, 0, 5)) + SeqFormula(n**2, (n, 0, 5)) + >>> sequence((1, 2, 3), (n, 0, 5)) + SeqPer((1, 2, 3), (n, 0, 5)) + + See Also + ======== + + sympy.series.sequences.SeqPer + sympy.series.sequences.SeqFormula + """ + seq = sympify(seq) + + if is_sequence(seq, Tuple): + return SeqPer(seq, limits) + else: + return SeqFormula(seq, limits) + + +############################################################################### +# OPERATIONS # +############################################################################### + + +class SeqExprOp(SeqBase): + """ + Base class for operations on sequences. + + Examples + ======== + + >>> from sympy.series.sequences import SeqExprOp, sequence + >>> from sympy.abc import n + >>> s1 = sequence(n**2, (n, 0, 10)) + >>> s2 = sequence((1, 2, 3), (n, 5, 10)) + >>> s = SeqExprOp(s1, s2) + >>> s.gen + (n**2, (1, 2, 3)) + >>> s.interval + Interval(5, 10) + >>> s.length + 6 + + See Also + ======== + + sympy.series.sequences.SeqAdd + sympy.series.sequences.SeqMul + """ + @property + def gen(self): + """Generator for the sequence. + + returns a tuple of generators of all the argument sequences. + """ + return tuple(a.gen for a in self.args) + + @property + def interval(self): + """Sequence is defined on the intersection + of all the intervals of respective sequences + """ + return Intersection(*(a.interval for a in self.args)) + + @property + def start(self): + return self.interval.inf + + @property + def stop(self): + return self.interval.sup + + @property + def variables(self): + """Cumulative of all the bound variables""" + return tuple(flatten([a.variables for a in self.args])) + + @property + def length(self): + return self.stop - self.start + 1 + + +class SeqAdd(SeqExprOp): + """Represents term-wise addition of sequences. + + Rules: + * The interval on which sequence is defined is the intersection + of respective intervals of sequences. + * Anything + :class:`EmptySequence` remains unchanged. + * Other rules are defined in ``_add`` methods of sequence classes. + + Examples + ======== + + >>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula + >>> from sympy.abc import n + >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence) + SeqPer((1, 2), (n, 0, oo)) + >>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) + EmptySequence + >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo))) + SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) + >>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2)) + SeqFormula(n**3 + n**2, (n, 0, oo)) + + See Also + ======== + + sympy.series.sequences.SeqMul + """ + + def __new__(cls, *args, **kwargs): + evaluate = kwargs.get('evaluate', global_parameters.evaluate) + + # flatten inputs + args = list(args) + + # adapted from sympy.sets.sets.Union + def _flatten(arg): + if isinstance(arg, SeqBase): + if isinstance(arg, SeqAdd): + return sum(map(_flatten, arg.args), []) + else: + return [arg] + if iterable(arg): + return sum(map(_flatten, arg), []) + raise TypeError("Input must be Sequences or " + " iterables of Sequences") + args = _flatten(args) + + args = [a for a in args if a is not S.EmptySequence] + + # Addition of no sequences is EmptySequence + if not args: + return S.EmptySequence + + if Intersection(*(a.interval for a in args)) is S.EmptySet: + return S.EmptySequence + + # reduce using known rules + if evaluate: + return SeqAdd.reduce(args) + + args = list(ordered(args, SeqBase._start_key)) + + return Basic.__new__(cls, *args) + + @staticmethod + def reduce(args): + """Simplify :class:`SeqAdd` using known rules. + + Iterates through all pairs and ask the constituent + sequences if they can simplify themselves with any other constituent. + + Notes + ===== + + adapted from ``Union.reduce`` + + """ + new_args = True + while new_args: + for id1, s in enumerate(args): + new_args = False + for id2, t in enumerate(args): + if id1 == id2: + continue + new_seq = s._add(t) + # This returns None if s does not know how to add + # with t. Returns the newly added sequence otherwise + if new_seq is not None: + new_args = [a for a in args if a not in (s, t)] + new_args.append(new_seq) + break + if new_args: + args = new_args + break + + if len(args) == 1: + return args.pop() + else: + return SeqAdd(args, evaluate=False) + + def _eval_coeff(self, pt): + """adds up the coefficients of all the sequences at point pt""" + return sum(a.coeff(pt) for a in self.args) + + +class SeqMul(SeqExprOp): + r"""Represents term-wise multiplication of sequences. + + Explanation + =========== + + Handles multiplication of sequences only. For multiplication + with other objects see :func:`SeqBase.coeff_mul`. + + Rules: + * The interval on which sequence is defined is the intersection + of respective intervals of sequences. + * Anything \* :class:`EmptySequence` returns :class:`EmptySequence`. + * Other rules are defined in ``_mul`` methods of sequence classes. + + Examples + ======== + + >>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula + >>> from sympy.abc import n + >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence) + EmptySequence + >>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) + EmptySequence + >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2)) + SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) + >>> SeqMul(SeqFormula(n**3), SeqFormula(n**2)) + SeqFormula(n**5, (n, 0, oo)) + + See Also + ======== + + sympy.series.sequences.SeqAdd + """ + + def __new__(cls, *args, **kwargs): + evaluate = kwargs.get('evaluate', global_parameters.evaluate) + + # flatten inputs + args = list(args) + + # adapted from sympy.sets.sets.Union + def _flatten(arg): + if isinstance(arg, SeqBase): + if isinstance(arg, SeqMul): + return sum(map(_flatten, arg.args), []) + else: + return [arg] + elif iterable(arg): + return sum(map(_flatten, arg), []) + raise TypeError("Input must be Sequences or " + " iterables of Sequences") + args = _flatten(args) + + # Multiplication of no sequences is EmptySequence + if not args: + return S.EmptySequence + + if Intersection(*(a.interval for a in args)) is S.EmptySet: + return S.EmptySequence + + # reduce using known rules + if evaluate: + return SeqMul.reduce(args) + + args = list(ordered(args, SeqBase._start_key)) + + return Basic.__new__(cls, *args) + + @staticmethod + def reduce(args): + """Simplify a :class:`SeqMul` using known rules. + + Explanation + =========== + + Iterates through all pairs and ask the constituent + sequences if they can simplify themselves with any other constituent. + + Notes + ===== + + adapted from ``Union.reduce`` + + """ + new_args = True + while new_args: + for id1, s in enumerate(args): + new_args = False + for id2, t in enumerate(args): + if id1 == id2: + continue + new_seq = s._mul(t) + # This returns None if s does not know how to multiply + # with t. Returns the newly multiplied sequence otherwise + if new_seq is not None: + new_args = [a for a in args if a not in (s, t)] + new_args.append(new_seq) + break + if new_args: + args = new_args + break + + if len(args) == 1: + return args.pop() + else: + return SeqMul(args, evaluate=False) + + def _eval_coeff(self, pt): + """multiplies the coefficients of all the sequences at point pt""" + val = 1 + for a in self.args: + val *= a.coeff(pt) + return val diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/series.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/series.py new file mode 100644 index 0000000000000000000000000000000000000000..b355bf430b4bc074748021fa9dca4fc70e2da075 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/series.py @@ -0,0 +1,63 @@ +from sympy.core.sympify import sympify + + +def series(expr, x=None, x0=0, n=6, dir="+"): + """Series expansion of expr around point `x = x0`. + + Parameters + ========== + + expr : Expression + The expression whose series is to be expanded. + + x : Symbol + It is the variable of the expression to be calculated. + + x0 : Value + The value around which ``x`` is calculated. Can be any value + from ``-oo`` to ``oo``. + + n : Value + The number of terms upto which the series is to be expanded. + + dir : String, optional + The series-expansion can be bi-directional. If ``dir="+"``, + then (x->x0+). If ``dir="-", then (x->x0-). For infinite + ``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined + from the direction of the infinity (i.e., ``dir="-"`` for + ``oo``). + + Examples + ======== + + >>> from sympy import series, tan, oo + >>> from sympy.abc import x + >>> f = tan(x) + >>> series(f, x, 2, 6, "+") + tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) + + (x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 + + 5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 + + 2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2)) + + >>> series(f, x, 2, 3, "-") + tan(2) + (2 - x)*(-tan(2)**2 - 1) + (2 - x)**2*(tan(2)**3 + tan(2)) + + O((x - 2)**3, (x, 2)) + + >>> series(f, x, 2, oo, "+") + Traceback (most recent call last): + ... + TypeError: 'Infinity' object cannot be interpreted as an integer + + Returns + ======= + + Expr + Series expansion of the expression about x0 + + See Also + ======== + + sympy.core.expr.Expr.series: See the docstring of Expr.series() for complete details of this wrapper. + """ + expr = sympify(expr) + return expr.series(x, x0, n, dir) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/series/series_class.py b/llmeval-env/lib/python3.10/site-packages/sympy/series/series_class.py new file mode 100644 index 0000000000000000000000000000000000000000..ff04993b266a3cbd3f767042d4325fb11edb2168 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/series/series_class.py @@ -0,0 +1,99 @@ +""" +Contains the base class for series +Made using sequences in mind +""" + +from sympy.core.expr import Expr +from sympy.core.singleton import S +from sympy.core.cache import cacheit + + +class SeriesBase(Expr): + """Base Class for series""" + + @property + def interval(self): + """The interval on which the series is defined""" + raise NotImplementedError("(%s).interval" % self) + + @property + def start(self): + """The starting point of the series. This point is included""" + raise NotImplementedError("(%s).start" % self) + + @property + def stop(self): + """The ending point of the series. This point is included""" + raise NotImplementedError("(%s).stop" % self) + + @property + def length(self): + """Length of the series expansion""" + raise NotImplementedError("(%s).length" % self) + + @property + def variables(self): + """Returns a tuple of variables that are bounded""" + return () + + @property + def free_symbols(self): + """ + This method returns the symbols in the object, excluding those + that take on a specific value (i.e. the dummy symbols). + """ + return ({j for i in self.args for j in i.free_symbols} + .difference(self.variables)) + + @cacheit + def term(self, pt): + """Term at point pt of a series""" + if pt < self.start or pt > self.stop: + raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) + return self._eval_term(pt) + + def _eval_term(self, pt): + raise NotImplementedError("The _eval_term method should be added to" + "%s to return series term so it is available" + "when 'term' calls it." + % self.func) + + def _ith_point(self, i): + """ + Returns the i'th point of a series + If start point is negative infinity, point is returned from the end. + Assumes the first point to be indexed zero. + + Examples + ======== + + TODO + """ + if self.start is S.NegativeInfinity: + initial = self.stop + step = -1 + else: + initial = self.start + step = 1 + + return initial + i*step + + def __iter__(self): + i = 0 + while i < self.length: + pt = self._ith_point(i) + yield self.term(pt) + i += 1 + + def __getitem__(self, index): + if isinstance(index, int): + index = self._ith_point(index) + return self.term(index) + elif isinstance(index, slice): + start, stop = index.start, index.stop + if start is None: + start = 0 + if stop is None: + stop = self.length + return [self.term(self._ith_point(i)) for i in + range(start, stop, index.step or 1)]