diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58013d2e0377a016d1a21fbf21c344ee76765189 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__init__.py @@ -0,0 +1,3 @@ +from .quaternion import Quaternion + +__all__ = ["Quaternion",] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d93c48eb8dcdd1099357f3332f52c5e8cd443a5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/quaternion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/quaternion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8004a4d8ed62a7aa4f9710327f6f2dd36ad736b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/__pycache__/quaternion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/quaternion.py b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9170333dc857f373cf25f8e6c4811293939575 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/quaternion.py @@ -0,0 +1,1673 @@ +from sympy.core.numbers import Rational +from sympy.core.singleton import S +from sympy.core.relational import is_eq +from sympy.functions.elementary.complexes import (conjugate, im, re, sign) +from sympy.functions.elementary.exponential import (exp, log as ln) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (acos, asin, atan2) +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.simplify.trigsimp import trigsimp +from sympy.integrals.integrals import integrate +from sympy.matrices.dense import MutableDenseMatrix as Matrix +from sympy.core.sympify import sympify, _sympify +from sympy.core.expr import Expr +from sympy.core.logic import fuzzy_not, fuzzy_or + +from mpmath.libmp.libmpf import prec_to_dps + + +def _check_norm(elements, norm): + """validate if input norm is consistent""" + if norm is not None and norm.is_number: + if norm.is_positive is False: + raise ValueError("Input norm must be positive.") + + numerical = all(i.is_number and i.is_real is True for i in elements) + if numerical and is_eq(norm**2, sum(i**2 for i in elements)) is False: + raise ValueError("Incompatible value for norm.") + + +def _is_extrinsic(seq): + """validate seq and return True if seq is lowercase and False if uppercase""" + if type(seq) != str: + raise ValueError('Expected seq to be a string.') + if len(seq) != 3: + raise ValueError("Expected 3 axes, got `{}`.".format(seq)) + + intrinsic = seq.isupper() + extrinsic = seq.islower() + if not (intrinsic or extrinsic): + raise ValueError("seq must either be fully uppercase (for extrinsic " + "rotations), or fully lowercase, for intrinsic " + "rotations).") + + i, j, k = seq.lower() + if (i == j) or (j == k): + raise ValueError("Consecutive axes must be different") + + bad = set(seq) - set('xyzXYZ') + if bad: + raise ValueError("Expected axes from `seq` to be from " + "['x', 'y', 'z'] or ['X', 'Y', 'Z'], " + "got {}".format(''.join(bad))) + + return extrinsic + + +class Quaternion(Expr): + """Provides basic quaternion operations. + Quaternion objects can be instantiated as Quaternion(a, b, c, d) + as in (a + b*i + c*j + d*k). + + Parameters + ========== + + norm : None or number + Pre-defined quaternion norm. If a value is given, Quaternion.norm + returns this pre-defined value instead of calculating the norm + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 2, 3, 4) + >>> q + 1 + 2*i + 3*j + 4*k + + Quaternions over complex fields can be defined as : + + >>> from sympy import Quaternion + >>> from sympy import symbols, I + >>> x = symbols('x') + >>> q1 = Quaternion(x, x**3, x, x**2, real_field = False) + >>> q2 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + >>> q1 + x + x**3*i + x*j + x**2*k + >>> q2 + (3 + 4*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k + + Defining symbolic unit quaternions: + >>> from sympy import Quaternion + >>> from sympy.abc import w, x, y, z + >>> q = Quaternion(w, x, y, z, norm=1) + >>> q + w + x*i + y*j + z*k + >>> q.norm() + 1 + + References + ========== + + .. [1] https://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/ + .. [2] https://en.wikipedia.org/wiki/Quaternion + + """ + _op_priority = 11.0 + + is_commutative = False + + def __new__(cls, a=0, b=0, c=0, d=0, real_field=True, norm=None): + a, b, c, d = map(sympify, (a, b, c, d)) + + if any(i.is_commutative is False for i in [a, b, c, d]): + raise ValueError("arguments have to be commutative") + else: + obj = Expr.__new__(cls, a, b, c, d) + obj._a = a + obj._b = b + obj._c = c + obj._d = d + obj._real_field = real_field + obj.set_norm(norm) + return obj + + def set_norm(self, norm): + """Sets norm of an already instantiated quaternion. + + Parameters + ========== + + norm : None or number + Pre-defined quaternion norm. If a value is given, Quaternion.norm + returns this pre-defined value instead of calculating the norm + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> q = Quaternion(a, b, c, d) + >>> q.norm() + sqrt(a**2 + b**2 + c**2 + d**2) + + Setting the norm: + + >>> q.set_norm(1) + >>> q.norm() + 1 + + Removing set norm: + + >>> q.set_norm(None) + >>> q.norm() + sqrt(a**2 + b**2 + c**2 + d**2) + + """ + norm = sympify(norm) + _check_norm(self.args, norm) + self._norm = norm + + @property + def a(self): + return self._a + + @property + def b(self): + return self._b + + @property + def c(self): + return self._c + + @property + def d(self): + return self._d + + @property + def real_field(self): + return self._real_field + + @property + def product_matrix_left(self): + r"""Returns 4 x 4 Matrix equivalent to a Hamilton product from the + left. This can be useful when treating quaternion elements as column + vectors. Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d + are real numbers, the product matrix from the left is: + + .. math:: + + M = \begin{bmatrix} a &-b &-c &-d \\ + b & a &-d & c \\ + c & d & a &-b \\ + d &-c & b & a \end{bmatrix} + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> q1 = Quaternion(1, 0, 0, 1) + >>> q2 = Quaternion(a, b, c, d) + >>> q1.product_matrix_left + Matrix([ + [1, 0, 0, -1], + [0, 1, -1, 0], + [0, 1, 1, 0], + [1, 0, 0, 1]]) + + >>> q1.product_matrix_left * q2.to_Matrix() + Matrix([ + [a - d], + [b - c], + [b + c], + [a + d]]) + + This is equivalent to: + + >>> (q1 * q2).to_Matrix() + Matrix([ + [a - d], + [b - c], + [b + c], + [a + d]]) + """ + return Matrix([ + [self.a, -self.b, -self.c, -self.d], + [self.b, self.a, -self.d, self.c], + [self.c, self.d, self.a, -self.b], + [self.d, -self.c, self.b, self.a]]) + + @property + def product_matrix_right(self): + r"""Returns 4 x 4 Matrix equivalent to a Hamilton product from the + right. This can be useful when treating quaternion elements as column + vectors. Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d + are real numbers, the product matrix from the left is: + + .. math:: + + M = \begin{bmatrix} a &-b &-c &-d \\ + b & a & d &-c \\ + c &-d & a & b \\ + d & c &-b & a \end{bmatrix} + + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> q1 = Quaternion(a, b, c, d) + >>> q2 = Quaternion(1, 0, 0, 1) + >>> q2.product_matrix_right + Matrix([ + [1, 0, 0, -1], + [0, 1, 1, 0], + [0, -1, 1, 0], + [1, 0, 0, 1]]) + + Note the switched arguments: the matrix represents the quaternion on + the right, but is still considered as a matrix multiplication from the + left. + + >>> q2.product_matrix_right * q1.to_Matrix() + Matrix([ + [ a - d], + [ b + c], + [-b + c], + [ a + d]]) + + This is equivalent to: + + >>> (q1 * q2).to_Matrix() + Matrix([ + [ a - d], + [ b + c], + [-b + c], + [ a + d]]) + """ + return Matrix([ + [self.a, -self.b, -self.c, -self.d], + [self.b, self.a, self.d, -self.c], + [self.c, -self.d, self.a, self.b], + [self.d, self.c, -self.b, self.a]]) + + def to_Matrix(self, vector_only=False): + """Returns elements of quaternion as a column vector. + By default, a Matrix of length 4 is returned, with the real part as the + first element. + If vector_only is True, returns only imaginary part as a Matrix of + length 3. + + Parameters + ========== + + vector_only : bool + If True, only imaginary part is returned. + Default value: False + + Returns + ======= + + Matrix + A column vector constructed by the elements of the quaternion. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> q = Quaternion(a, b, c, d) + >>> q + a + b*i + c*j + d*k + + >>> q.to_Matrix() + Matrix([ + [a], + [b], + [c], + [d]]) + + + >>> q.to_Matrix(vector_only=True) + Matrix([ + [b], + [c], + [d]]) + + """ + if vector_only: + return Matrix(self.args[1:]) + else: + return Matrix(self.args) + + @classmethod + def from_Matrix(cls, elements): + """Returns quaternion from elements of a column vector`. + If vector_only is True, returns only imaginary part as a Matrix of + length 3. + + Parameters + ========== + + elements : Matrix, list or tuple of length 3 or 4. If length is 3, + assume real part is zero. + Default value: False + + Returns + ======= + + Quaternion + A quaternion created from the input elements. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> q = Quaternion.from_Matrix([a, b, c, d]) + >>> q + a + b*i + c*j + d*k + + >>> q = Quaternion.from_Matrix([b, c, d]) + >>> q + 0 + b*i + c*j + d*k + + """ + length = len(elements) + if length != 3 and length != 4: + raise ValueError("Input elements must have length 3 or 4, got {} " + "elements".format(length)) + + if length == 3: + return Quaternion(0, *elements) + else: + return Quaternion(*elements) + + @classmethod + def from_euler(cls, angles, seq): + """Returns quaternion equivalent to rotation represented by the Euler + angles, in the sequence defined by ``seq``. + + Parameters + ========== + + angles : list, tuple or Matrix of 3 numbers + The Euler angles (in radians). + seq : string of length 3 + Represents the sequence of rotations. + For intrinsic rotations, seq must be all lowercase and its elements + must be from the set ``{'x', 'y', 'z'}`` + For extrinsic rotations, seq must be all uppercase and its elements + must be from the set ``{'X', 'Y', 'Z'}`` + + Returns + ======= + + Quaternion + The normalized rotation quaternion calculated from the Euler angles + in the given sequence. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import pi + >>> q = Quaternion.from_euler([pi/2, 0, 0], 'xyz') + >>> q + sqrt(2)/2 + sqrt(2)/2*i + 0*j + 0*k + + >>> q = Quaternion.from_euler([0, pi/2, pi] , 'zyz') + >>> q + 0 + (-sqrt(2)/2)*i + 0*j + sqrt(2)/2*k + + >>> q = Quaternion.from_euler([0, pi/2, pi] , 'ZYZ') + >>> q + 0 + sqrt(2)/2*i + 0*j + sqrt(2)/2*k + + """ + + if len(angles) != 3: + raise ValueError("3 angles must be given.") + + extrinsic = _is_extrinsic(seq) + i, j, k = seq.lower() + + # get elementary basis vectors + ei = [1 if n == i else 0 for n in 'xyz'] + ej = [1 if n == j else 0 for n in 'xyz'] + ek = [1 if n == k else 0 for n in 'xyz'] + + # calculate distinct quaternions + qi = cls.from_axis_angle(ei, angles[0]) + qj = cls.from_axis_angle(ej, angles[1]) + qk = cls.from_axis_angle(ek, angles[2]) + + if extrinsic: + return trigsimp(qk * qj * qi) + else: + return trigsimp(qi * qj * qk) + + def to_euler(self, seq, angle_addition=True, avoid_square_root=False): + r"""Returns Euler angles representing same rotation as the quaternion, + in the sequence given by ``seq``. This implements the method described + in [1]_. + + For degenerate cases (gymbal lock cases), the third angle is + set to zero. + + Parameters + ========== + + seq : string of length 3 + Represents the sequence of rotations. + For intrinsic rotations, seq must be all lowercase and its elements + must be from the set ``{'x', 'y', 'z'}`` + For extrinsic rotations, seq must be all uppercase and its elements + must be from the set ``{'X', 'Y', 'Z'}`` + + angle_addition : bool + When True, first and third angles are given as an addition and + subtraction of two simpler ``atan2`` expressions. When False, the + first and third angles are each given by a single more complicated + ``atan2`` expression. This equivalent expression is given by: + + .. math:: + + \operatorname{atan_2} (b,a) \pm \operatorname{atan_2} (d,c) = + \operatorname{atan_2} (bc\pm ad, ac\mp bd) + + Default value: True + + avoid_square_root : bool + When True, the second angle is calculated with an expression based + on ``acos``, which is slightly more complicated but avoids a square + root. When False, second angle is calculated with ``atan2``, which + is simpler and can be better for numerical reasons (some + numerical implementations of ``acos`` have problems near zero). + Default value: False + + + Returns + ======= + + Tuple + The Euler angles calculated from the quaternion + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy.abc import a, b, c, d + >>> euler = Quaternion(a, b, c, d).to_euler('zyz') + >>> euler + (-atan2(-b, c) + atan2(d, a), + 2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2)), + atan2(-b, c) + atan2(d, a)) + + + References + ========== + + .. [1] https://doi.org/10.1371/journal.pone.0276302 + + """ + if self.is_zero_quaternion(): + raise ValueError('Cannot convert a quaternion with norm 0.') + + angles = [0, 0, 0] + + extrinsic = _is_extrinsic(seq) + i, j, k = seq.lower() + + # get index corresponding to elementary basis vectors + i = 'xyz'.index(i) + 1 + j = 'xyz'.index(j) + 1 + k = 'xyz'.index(k) + 1 + + if not extrinsic: + i, k = k, i + + # check if sequence is symmetric + symmetric = i == k + if symmetric: + k = 6 - i - j + + # parity of the permutation + sign = (i - j) * (j - k) * (k - i) // 2 + + # permutate elements + elements = [self.a, self.b, self.c, self.d] + a = elements[0] + b = elements[i] + c = elements[j] + d = elements[k] * sign + + if not symmetric: + a, b, c, d = a - c, b + d, c + a, d - b + + if avoid_square_root: + if symmetric: + n2 = self.norm()**2 + angles[1] = acos((a * a + b * b - c * c - d * d) / n2) + else: + n2 = 2 * self.norm()**2 + angles[1] = asin((c * c + d * d - a * a - b * b) / n2) + else: + angles[1] = 2 * atan2(sqrt(c * c + d * d), sqrt(a * a + b * b)) + if not symmetric: + angles[1] -= S.Pi / 2 + + # Check for singularities in numerical cases + case = 0 + if is_eq(c, S.Zero) and is_eq(d, S.Zero): + case = 1 + if is_eq(a, S.Zero) and is_eq(b, S.Zero): + case = 2 + + if case == 0: + if angle_addition: + angles[0] = atan2(b, a) + atan2(d, c) + angles[2] = atan2(b, a) - atan2(d, c) + else: + angles[0] = atan2(b*c + a*d, a*c - b*d) + angles[2] = atan2(b*c - a*d, a*c + b*d) + + else: # any degenerate case + angles[2 * (not extrinsic)] = S.Zero + if case == 1: + angles[2 * extrinsic] = 2 * atan2(b, a) + else: + angles[2 * extrinsic] = 2 * atan2(d, c) + angles[2 * extrinsic] *= (-1 if extrinsic else 1) + + # for Tait-Bryan angles + if not symmetric: + angles[0] *= sign + + if extrinsic: + return tuple(angles[::-1]) + else: + return tuple(angles) + + @classmethod + def from_axis_angle(cls, vector, angle): + """Returns a rotation quaternion given the axis and the angle of rotation. + + Parameters + ========== + + vector : tuple of three numbers + The vector representation of the given axis. + angle : number + The angle by which axis is rotated (in radians). + + Returns + ======= + + Quaternion + The normalized rotation quaternion calculated from the given axis and the angle of rotation. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import pi, sqrt + >>> q = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), 2*pi/3) + >>> q + 1/2 + 1/2*i + 1/2*j + 1/2*k + + """ + (x, y, z) = vector + norm = sqrt(x**2 + y**2 + z**2) + (x, y, z) = (x / norm, y / norm, z / norm) + s = sin(angle * S.Half) + a = cos(angle * S.Half) + b = x * s + c = y * s + d = z * s + + # note that this quaternion is already normalized by construction: + # c^2 + (s*x)^2 + (s*y)^2 + (s*z)^2 = c^2 + s^2*(x^2 + y^2 + z^2) = c^2 + s^2 * 1 = c^2 + s^2 = 1 + # so, what we return is a normalized quaternion + + return cls(a, b, c, d) + + @classmethod + def from_rotation_matrix(cls, M): + """Returns the equivalent quaternion of a matrix. The quaternion will be normalized + only if the matrix is special orthogonal (orthogonal and det(M) = 1). + + Parameters + ========== + + M : Matrix + Input matrix to be converted to equivalent quaternion. M must be special + orthogonal (orthogonal and det(M) = 1) for the quaternion to be normalized. + + Returns + ======= + + Quaternion + The quaternion equivalent to given matrix. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import Matrix, symbols, cos, sin, trigsimp + >>> x = symbols('x') + >>> M = Matrix([[cos(x), -sin(x), 0], [sin(x), cos(x), 0], [0, 0, 1]]) + >>> q = trigsimp(Quaternion.from_rotation_matrix(M)) + >>> q + sqrt(2)*sqrt(cos(x) + 1)/2 + 0*i + 0*j + sqrt(2 - 2*cos(x))*sign(sin(x))/2*k + + """ + + absQ = M.det()**Rational(1, 3) + + a = sqrt(absQ + M[0, 0] + M[1, 1] + M[2, 2]) / 2 + b = sqrt(absQ + M[0, 0] - M[1, 1] - M[2, 2]) / 2 + c = sqrt(absQ - M[0, 0] + M[1, 1] - M[2, 2]) / 2 + d = sqrt(absQ - M[0, 0] - M[1, 1] + M[2, 2]) / 2 + + b = b * sign(M[2, 1] - M[1, 2]) + c = c * sign(M[0, 2] - M[2, 0]) + d = d * sign(M[1, 0] - M[0, 1]) + + return Quaternion(a, b, c, d) + + def __add__(self, other): + return self.add(other) + + def __radd__(self, other): + return self.add(other) + + def __sub__(self, other): + return self.add(other*-1) + + def __mul__(self, other): + return self._generic_mul(self, _sympify(other)) + + def __rmul__(self, other): + return self._generic_mul(_sympify(other), self) + + def __pow__(self, p): + return self.pow(p) + + def __neg__(self): + return Quaternion(-self._a, -self._b, -self._c, -self.d) + + def __truediv__(self, other): + return self * sympify(other)**-1 + + def __rtruediv__(self, other): + return sympify(other) * self**-1 + + def _eval_Integral(self, *args): + return self.integrate(*args) + + def diff(self, *symbols, **kwargs): + kwargs.setdefault('evaluate', True) + return self.func(*[a.diff(*symbols, **kwargs) for a in self.args]) + + def add(self, other): + """Adds quaternions. + + Parameters + ========== + + other : Quaternion + The quaternion to add to current (self) quaternion. + + Returns + ======= + + Quaternion + The resultant quaternion after adding self to other + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import symbols + >>> q1 = Quaternion(1, 2, 3, 4) + >>> q2 = Quaternion(5, 6, 7, 8) + >>> q1.add(q2) + 6 + 8*i + 10*j + 12*k + >>> q1 + 5 + 6 + 2*i + 3*j + 4*k + >>> x = symbols('x', real = True) + >>> q1.add(x) + (x + 1) + 2*i + 3*j + 4*k + + Quaternions over complex fields : + + >>> from sympy import Quaternion + >>> from sympy import I + >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + >>> q3.add(2 + 3*I) + (5 + 7*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k + + """ + q1 = self + q2 = sympify(other) + + # If q2 is a number or a SymPy expression instead of a quaternion + if not isinstance(q2, Quaternion): + if q1.real_field and q2.is_complex: + return Quaternion(re(q2) + q1.a, im(q2) + q1.b, q1.c, q1.d) + elif q2.is_commutative: + return Quaternion(q1.a + q2, q1.b, q1.c, q1.d) + else: + raise ValueError("Only commutative expressions can be added with a Quaternion.") + + return Quaternion(q1.a + q2.a, q1.b + q2.b, q1.c + q2.c, q1.d + + q2.d) + + def mul(self, other): + """Multiplies quaternions. + + Parameters + ========== + + other : Quaternion or symbol + The quaternion to multiply to current (self) quaternion. + + Returns + ======= + + Quaternion + The resultant quaternion after multiplying self with other + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import symbols + >>> q1 = Quaternion(1, 2, 3, 4) + >>> q2 = Quaternion(5, 6, 7, 8) + >>> q1.mul(q2) + (-60) + 12*i + 30*j + 24*k + >>> q1.mul(2) + 2 + 4*i + 6*j + 8*k + >>> x = symbols('x', real = True) + >>> q1.mul(x) + x + 2*x*i + 3*x*j + 4*x*k + + Quaternions over complex fields : + + >>> from sympy import Quaternion + >>> from sympy import I + >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + >>> q3.mul(2 + 3*I) + (2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k + + """ + return self._generic_mul(self, _sympify(other)) + + @staticmethod + def _generic_mul(q1, q2): + """Generic multiplication. + + Parameters + ========== + + q1 : Quaternion or symbol + q2 : Quaternion or symbol + + It is important to note that if neither q1 nor q2 is a Quaternion, + this function simply returns q1 * q2. + + Returns + ======= + + Quaternion + The resultant quaternion after multiplying q1 and q2 + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import Symbol, S + >>> q1 = Quaternion(1, 2, 3, 4) + >>> q2 = Quaternion(5, 6, 7, 8) + >>> Quaternion._generic_mul(q1, q2) + (-60) + 12*i + 30*j + 24*k + >>> Quaternion._generic_mul(q1, S(2)) + 2 + 4*i + 6*j + 8*k + >>> x = Symbol('x', real = True) + >>> Quaternion._generic_mul(q1, x) + x + 2*x*i + 3*x*j + 4*x*k + + Quaternions over complex fields : + + >>> from sympy import I + >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + >>> Quaternion._generic_mul(q3, 2 + 3*I) + (2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k + + """ + # None is a Quaternion: + if not isinstance(q1, Quaternion) and not isinstance(q2, Quaternion): + return q1 * q2 + + # If q1 is a number or a SymPy expression instead of a quaternion + if not isinstance(q1, Quaternion): + if q2.real_field and q1.is_complex: + return Quaternion(re(q1), im(q1), 0, 0) * q2 + elif q1.is_commutative: + return Quaternion(q1 * q2.a, q1 * q2.b, q1 * q2.c, q1 * q2.d) + else: + raise ValueError("Only commutative expressions can be multiplied with a Quaternion.") + + # If q2 is a number or a SymPy expression instead of a quaternion + if not isinstance(q2, Quaternion): + if q1.real_field and q2.is_complex: + return q1 * Quaternion(re(q2), im(q2), 0, 0) + elif q2.is_commutative: + return Quaternion(q2 * q1.a, q2 * q1.b, q2 * q1.c, q2 * q1.d) + else: + raise ValueError("Only commutative expressions can be multiplied with a Quaternion.") + + # If any of the quaternions has a fixed norm, pre-compute norm + if q1._norm is None and q2._norm is None: + norm = None + else: + norm = q1.norm() * q2.norm() + + return Quaternion(-q1.b*q2.b - q1.c*q2.c - q1.d*q2.d + q1.a*q2.a, + q1.b*q2.a + q1.c*q2.d - q1.d*q2.c + q1.a*q2.b, + -q1.b*q2.d + q1.c*q2.a + q1.d*q2.b + q1.a*q2.c, + q1.b*q2.c - q1.c*q2.b + q1.d*q2.a + q1.a * q2.d, + norm=norm) + + def _eval_conjugate(self): + """Returns the conjugate of the quaternion.""" + q = self + return Quaternion(q.a, -q.b, -q.c, -q.d, norm=q._norm) + + def norm(self): + """Returns the norm of the quaternion.""" + if self._norm is None: # check if norm is pre-defined + q = self + # trigsimp is used to simplify sin(x)^2 + cos(x)^2 (these terms + # arise when from_axis_angle is used). + self._norm = sqrt(trigsimp(q.a**2 + q.b**2 + q.c**2 + q.d**2)) + + return self._norm + + def normalize(self): + """Returns the normalized form of the quaternion.""" + q = self + return q * (1/q.norm()) + + def inverse(self): + """Returns the inverse of the quaternion.""" + q = self + if not q.norm(): + raise ValueError("Cannot compute inverse for a quaternion with zero norm") + return conjugate(q) * (1/q.norm()**2) + + def pow(self, p): + """Finds the pth power of the quaternion. + + Parameters + ========== + + p : int + Power to be applied on quaternion. + + Returns + ======= + + Quaternion + Returns the p-th power of the current quaternion. + Returns the inverse if p = -1. + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 2, 3, 4) + >>> q.pow(4) + 668 + (-224)*i + (-336)*j + (-448)*k + + """ + p = sympify(p) + q = self + if p == -1: + return q.inverse() + res = 1 + + if not p.is_Integer: + return NotImplemented + + if p < 0: + q, p = q.inverse(), -p + + while p > 0: + if p % 2 == 1: + res = q * res + + p = p//2 + q = q * q + + return res + + def exp(self): + """Returns the exponential of q (e^q). + + Returns + ======= + + Quaternion + Exponential of q (e^q). + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 2, 3, 4) + >>> q.exp() + E*cos(sqrt(29)) + + 2*sqrt(29)*E*sin(sqrt(29))/29*i + + 3*sqrt(29)*E*sin(sqrt(29))/29*j + + 4*sqrt(29)*E*sin(sqrt(29))/29*k + + """ + # exp(q) = e^a(cos||v|| + v/||v||*sin||v||) + q = self + vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2) + a = exp(q.a) * cos(vector_norm) + b = exp(q.a) * sin(vector_norm) * q.b / vector_norm + c = exp(q.a) * sin(vector_norm) * q.c / vector_norm + d = exp(q.a) * sin(vector_norm) * q.d / vector_norm + + return Quaternion(a, b, c, d) + + def _ln(self): + """Returns the natural logarithm of the quaternion (_ln(q)). + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 2, 3, 4) + >>> q._ln() + log(sqrt(30)) + + 2*sqrt(29)*acos(sqrt(30)/30)/29*i + + 3*sqrt(29)*acos(sqrt(30)/30)/29*j + + 4*sqrt(29)*acos(sqrt(30)/30)/29*k + + """ + # _ln(q) = _ln||q|| + v/||v||*arccos(a/||q||) + q = self + vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2) + q_norm = q.norm() + a = ln(q_norm) + b = q.b * acos(q.a / q_norm) / vector_norm + c = q.c * acos(q.a / q_norm) / vector_norm + d = q.d * acos(q.a / q_norm) / vector_norm + + return Quaternion(a, b, c, d) + + def _eval_subs(self, *args): + elements = [i.subs(*args) for i in self.args] + norm = self._norm + try: + norm = norm.subs(*args) + except AttributeError: + pass + _check_norm(elements, norm) + return Quaternion(*elements, norm=norm) + + def _eval_evalf(self, prec): + """Returns the floating point approximations (decimal numbers) of the quaternion. + + Returns + ======= + + Quaternion + Floating point approximations of quaternion(self) + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import sqrt + >>> q = Quaternion(1/sqrt(1), 1/sqrt(2), 1/sqrt(3), 1/sqrt(4)) + >>> q.evalf() + 1.00000000000000 + + 0.707106781186547*i + + 0.577350269189626*j + + 0.500000000000000*k + + """ + nprec = prec_to_dps(prec) + return Quaternion(*[arg.evalf(n=nprec) for arg in self.args]) + + def pow_cos_sin(self, p): + """Computes the pth power in the cos-sin form. + + Parameters + ========== + + p : int + Power to be applied on quaternion. + + Returns + ======= + + Quaternion + The p-th power in the cos-sin form. + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 2, 3, 4) + >>> q.pow_cos_sin(4) + 900*cos(4*acos(sqrt(30)/30)) + + 1800*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*i + + 2700*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*j + + 3600*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*k + + """ + # q = ||q||*(cos(a) + u*sin(a)) + # q^p = ||q||^p * (cos(p*a) + u*sin(p*a)) + + q = self + (v, angle) = q.to_axis_angle() + q2 = Quaternion.from_axis_angle(v, p * angle) + return q2 * (q.norm()**p) + + def integrate(self, *args): + """Computes integration of quaternion. + + Returns + ======= + + Quaternion + Integration of the quaternion(self) with the given variable. + + Examples + ======== + + Indefinite Integral of quaternion : + + >>> from sympy import Quaternion + >>> from sympy.abc import x + >>> q = Quaternion(1, 2, 3, 4) + >>> q.integrate(x) + x + 2*x*i + 3*x*j + 4*x*k + + Definite integral of quaternion : + + >>> from sympy import Quaternion + >>> from sympy.abc import x + >>> q = Quaternion(1, 2, 3, 4) + >>> q.integrate((x, 1, 5)) + 4 + 8*i + 12*j + 16*k + + """ + # TODO: is this expression correct? + return Quaternion(integrate(self.a, *args), integrate(self.b, *args), + integrate(self.c, *args), integrate(self.d, *args)) + + @staticmethod + def rotate_point(pin, r): + """Returns the coordinates of the point pin(a 3 tuple) after rotation. + + Parameters + ========== + + pin : tuple + A 3-element tuple of coordinates of a point which needs to be + rotated. + r : Quaternion or tuple + Axis and angle of rotation. + + It's important to note that when r is a tuple, it must be of the form + (axis, angle) + + Returns + ======= + + tuple + The coordinates of the point after rotation. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import symbols, trigsimp, cos, sin + >>> x = symbols('x') + >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2)) + >>> trigsimp(Quaternion.rotate_point((1, 1, 1), q)) + (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1) + >>> (axis, angle) = q.to_axis_angle() + >>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle))) + (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1) + + """ + if isinstance(r, tuple): + # if r is of the form (vector, angle) + q = Quaternion.from_axis_angle(r[0], r[1]) + else: + # if r is a quaternion + q = r.normalize() + pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q) + return (pout.b, pout.c, pout.d) + + def to_axis_angle(self): + """Returns the axis and angle of rotation of a quaternion. + + Returns + ======= + + tuple + Tuple of (axis, angle) + + Examples + ======== + + >>> from sympy import Quaternion + >>> q = Quaternion(1, 1, 1, 1) + >>> (axis, angle) = q.to_axis_angle() + >>> axis + (sqrt(3)/3, sqrt(3)/3, sqrt(3)/3) + >>> angle + 2*pi/3 + + """ + q = self + if q.a.is_negative: + q = q * -1 + + q = q.normalize() + angle = trigsimp(2 * acos(q.a)) + + # Since quaternion is normalised, q.a is less than 1. + s = sqrt(1 - q.a*q.a) + + x = trigsimp(q.b / s) + y = trigsimp(q.c / s) + z = trigsimp(q.d / s) + + v = (x, y, z) + t = (v, angle) + + return t + + def to_rotation_matrix(self, v=None, homogeneous=True): + """Returns the equivalent rotation transformation matrix of the quaternion + which represents rotation about the origin if v is not passed. + + Parameters + ========== + + v : tuple or None + Default value: None + homogeneous : bool + When True, gives an expression that may be more efficient for + symbolic calculations but less so for direct evaluation. Both + formulas are mathematically equivalent. + Default value: True + + Returns + ======= + + tuple + Returns the equivalent rotation transformation matrix of the quaternion + which represents rotation about the origin if v is not passed. + + Examples + ======== + + >>> from sympy import Quaternion + >>> from sympy import symbols, trigsimp, cos, sin + >>> x = symbols('x') + >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2)) + >>> trigsimp(q.to_rotation_matrix()) + Matrix([ + [cos(x), -sin(x), 0], + [sin(x), cos(x), 0], + [ 0, 0, 1]]) + + Generates a 4x4 transformation matrix (used for rotation about a point + other than the origin) if the point(v) is passed as an argument. + """ + + q = self + s = q.norm()**-2 + + # diagonal elements are different according to parameter normal + if homogeneous: + m00 = s*(q.a**2 + q.b**2 - q.c**2 - q.d**2) + m11 = s*(q.a**2 - q.b**2 + q.c**2 - q.d**2) + m22 = s*(q.a**2 - q.b**2 - q.c**2 + q.d**2) + else: + m00 = 1 - 2*s*(q.c**2 + q.d**2) + m11 = 1 - 2*s*(q.b**2 + q.d**2) + m22 = 1 - 2*s*(q.b**2 + q.c**2) + + m01 = 2*s*(q.b*q.c - q.d*q.a) + m02 = 2*s*(q.b*q.d + q.c*q.a) + + m10 = 2*s*(q.b*q.c + q.d*q.a) + m12 = 2*s*(q.c*q.d - q.b*q.a) + + m20 = 2*s*(q.b*q.d - q.c*q.a) + m21 = 2*s*(q.c*q.d + q.b*q.a) + + if not v: + return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]]) + + else: + (x, y, z) = v + + m03 = x - x*m00 - y*m01 - z*m02 + m13 = y - x*m10 - y*m11 - z*m12 + m23 = z - x*m20 - y*m21 - z*m22 + m30 = m31 = m32 = 0 + m33 = 1 + + return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13], + [m20, m21, m22, m23], [m30, m31, m32, m33]]) + + def scalar_part(self): + r"""Returns scalar part($\mathbf{S}(q)$) of the quaternion q. + + Explanation + =========== + + Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{S}(q) = a$. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(4, 8, 13, 12) + >>> q.scalar_part() + 4 + + """ + + return self.a + + def vector_part(self): + r""" + Returns vector part($\mathbf{V}(q)$) of the quaternion q. + + Explanation + =========== + + Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{V}(q) = bi + cj + dk$. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(1, 1, 1, 1) + >>> q.vector_part() + 0 + 1*i + 1*j + 1*k + + >>> q = Quaternion(4, 8, 13, 12) + >>> q.vector_part() + 0 + 8*i + 13*j + 12*k + + """ + + return Quaternion(0, self.b, self.c, self.d) + + def axis(self): + r""" + Returns the axis($\mathbf{Ax}(q)$) of the quaternion. + + Explanation + =========== + + Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{Ax}(q)$ i.e., the versor of the vector part of that quaternion + equal to $\mathbf{U}[\mathbf{V}(q)]$. + The axis is always an imaginary unit with square equal to $-1 + 0i + 0j + 0k$. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(1, 1, 1, 1) + >>> q.axis() + 0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k + + See Also + ======== + + vector_part + + """ + axis = self.vector_part().normalize() + + return Quaternion(0, axis.b, axis.c, axis.d) + + def is_pure(self): + """ + Returns true if the quaternion is pure, false if the quaternion is not pure + or returns none if it is unknown. + + Explanation + =========== + + A pure quaternion (also a vector quaternion) is a quaternion with scalar + part equal to 0. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(0, 8, 13, 12) + >>> q.is_pure() + True + + See Also + ======== + scalar_part + + """ + + return self.a.is_zero + + def is_zero_quaternion(self): + """ + Returns true if the quaternion is a zero quaternion or false if it is not a zero quaternion + and None if the value is unknown. + + Explanation + =========== + + A zero quaternion is a quaternion with both scalar part and + vector part equal to 0. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(1, 0, 0, 0) + >>> q.is_zero_quaternion() + False + + >>> q = Quaternion(0, 0, 0, 0) + >>> q.is_zero_quaternion() + True + + See Also + ======== + scalar_part + vector_part + + """ + + return self.norm().is_zero + + def angle(self): + r""" + Returns the angle of the quaternion measured in the real-axis plane. + + Explanation + =========== + + Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d + are real numbers, returns the angle of the quaternion given by + + .. math:: + angle := atan2(\sqrt{b^2 + c^2 + d^2}, {a}) + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(1, 4, 4, 4) + >>> q.angle() + atan(4*sqrt(3)) + + """ + + return atan2(self.vector_part().norm(), self.scalar_part()) + + + def arc_coplanar(self, other): + """ + Returns True if the transformation arcs represented by the input quaternions happen in the same plane. + + Explanation + =========== + + Two quaternions are said to be coplanar (in this arc sense) when their axes are parallel. + The plane of a quaternion is the one normal to its axis. + + Parameters + ========== + + other : a Quaternion + + Returns + ======= + + True : if the planes of the two quaternions are the same, apart from its orientation/sign. + False : if the planes of the two quaternions are not the same, apart from its orientation/sign. + None : if plane of either of the quaternion is unknown. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q1 = Quaternion(1, 4, 4, 4) + >>> q2 = Quaternion(3, 8, 8, 8) + >>> Quaternion.arc_coplanar(q1, q2) + True + + >>> q1 = Quaternion(2, 8, 13, 12) + >>> Quaternion.arc_coplanar(q1, q2) + False + + See Also + ======== + + vector_coplanar + is_pure + + """ + if (self.is_zero_quaternion()) or (other.is_zero_quaternion()): + raise ValueError('Neither of the given quaternions can be 0') + + return fuzzy_or([(self.axis() - other.axis()).is_zero_quaternion(), (self.axis() + other.axis()).is_zero_quaternion()]) + + @classmethod + def vector_coplanar(cls, q1, q2, q3): + r""" + Returns True if the axis of the pure quaternions seen as 3D vectors + q1, q2, and q3 are coplanar. + + Explanation + =========== + + Three pure quaternions are vector coplanar if the quaternions seen as 3D vectors are coplanar. + + Parameters + ========== + + q1 + A pure Quaternion. + q2 + A pure Quaternion. + q3 + A pure Quaternion. + + Returns + ======= + + True : if the axis of the pure quaternions seen as 3D vectors + q1, q2, and q3 are coplanar. + False : if the axis of the pure quaternions seen as 3D vectors + q1, q2, and q3 are not coplanar. + None : if the axis of the pure quaternions seen as 3D vectors + q1, q2, and q3 are coplanar is unknown. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q1 = Quaternion(0, 4, 4, 4) + >>> q2 = Quaternion(0, 8, 8, 8) + >>> q3 = Quaternion(0, 24, 24, 24) + >>> Quaternion.vector_coplanar(q1, q2, q3) + True + + >>> q1 = Quaternion(0, 8, 16, 8) + >>> q2 = Quaternion(0, 8, 3, 12) + >>> Quaternion.vector_coplanar(q1, q2, q3) + False + + See Also + ======== + + axis + is_pure + + """ + + if fuzzy_not(q1.is_pure()) or fuzzy_not(q2.is_pure()) or fuzzy_not(q3.is_pure()): + raise ValueError('The given quaternions must be pure') + + M = Matrix([[q1.b, q1.c, q1.d], [q2.b, q2.c, q2.d], [q3.b, q3.c, q3.d]]).det() + return M.is_zero + + def parallel(self, other): + """ + Returns True if the two pure quaternions seen as 3D vectors are parallel. + + Explanation + =========== + + Two pure quaternions are called parallel when their vector product is commutative which + implies that the quaternions seen as 3D vectors have same direction. + + Parameters + ========== + + other : a Quaternion + + Returns + ======= + + True : if the two pure quaternions seen as 3D vectors are parallel. + False : if the two pure quaternions seen as 3D vectors are not parallel. + None : if the two pure quaternions seen as 3D vectors are parallel is unknown. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(0, 4, 4, 4) + >>> q1 = Quaternion(0, 8, 8, 8) + >>> q.parallel(q1) + True + + >>> q1 = Quaternion(0, 8, 13, 12) + >>> q.parallel(q1) + False + + """ + + if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()): + raise ValueError('The provided quaternions must be pure') + + return (self*other - other*self).is_zero_quaternion() + + def orthogonal(self, other): + """ + Returns the orthogonality of two quaternions. + + Explanation + =========== + + Two pure quaternions are called orthogonal when their product is anti-commutative. + + Parameters + ========== + + other : a Quaternion + + Returns + ======= + + True : if the two pure quaternions seen as 3D vectors are orthogonal. + False : if the two pure quaternions seen as 3D vectors are not orthogonal. + None : if the two pure quaternions seen as 3D vectors are orthogonal is unknown. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(0, 4, 4, 4) + >>> q1 = Quaternion(0, 8, 8, 8) + >>> q.orthogonal(q1) + False + + >>> q1 = Quaternion(0, 2, 2, 0) + >>> q = Quaternion(0, 2, -2, 0) + >>> q.orthogonal(q1) + True + + """ + + if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()): + raise ValueError('The given quaternions must be pure') + + return (self*other + other*self).is_zero_quaternion() + + def index_vector(self): + r""" + Returns the index vector of the quaternion. + + Explanation + =========== + + Index vector is given by $\mathbf{T}(q)$ multiplied by $\mathbf{Ax}(q)$ where $\mathbf{Ax}(q)$ is the axis of the quaternion q, + and mod(q) is the $\mathbf{T}(q)$ (magnitude) of the quaternion. + + Returns + ======= + + Quaternion: representing index vector of the provided quaternion. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(2, 4, 2, 4) + >>> q.index_vector() + 0 + 4*sqrt(10)/3*i + 2*sqrt(10)/3*j + 4*sqrt(10)/3*k + + See Also + ======== + + axis + norm + + """ + + return self.norm() * self.axis() + + def mensor(self): + """ + Returns the natural logarithm of the norm(magnitude) of the quaternion. + + Examples + ======== + + >>> from sympy.algebras.quaternion import Quaternion + >>> q = Quaternion(2, 4, 2, 4) + >>> q.mensor() + log(2*sqrt(10)) + >>> q.norm() + 2*sqrt(10) + + See Also + ======== + + norm + + """ + + return ln(self.norm()) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e5783cab21228e1b4e997dc7d380d5fe23011ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/test_quaternion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/test_quaternion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf84ad90853b149fc81c979dd346fd5c75e4217f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/__pycache__/test_quaternion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/test_quaternion.py b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/test_quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..ced474c9306047feafa6ba82cb1a7a67bf980a6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/algebras/tests/test_quaternion.py @@ -0,0 +1,409 @@ +from sympy.core.function import diff +from sympy.core.function import expand +from sympy.core.numbers import (E, I, Rational, pi) +from sympy.core.singleton import S +from sympy.core.symbol import (Symbol, symbols) +from sympy.functions.elementary.complexes import (Abs, conjugate, im, re, sign) +from sympy.functions.elementary.exponential import log +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (acos, asin, cos, sin, atan2, atan) +from sympy.integrals.integrals import integrate +from sympy.matrices.dense import Matrix +from sympy.simplify import simplify +from sympy.simplify.trigsimp import trigsimp +from sympy.algebras.quaternion import Quaternion +from sympy.testing.pytest import raises +from itertools import permutations, product + +w, x, y, z = symbols('w:z') +phi = symbols('phi') + +def test_quaternion_construction(): + q = Quaternion(w, x, y, z) + assert q + q == Quaternion(2*w, 2*x, 2*y, 2*z) + + q2 = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), + pi*Rational(2, 3)) + assert q2 == Quaternion(S.Half, S.Half, + S.Half, S.Half) + + M = Matrix([[cos(phi), -sin(phi), 0], [sin(phi), cos(phi), 0], [0, 0, 1]]) + q3 = trigsimp(Quaternion.from_rotation_matrix(M)) + assert q3 == Quaternion( + sqrt(2)*sqrt(cos(phi) + 1)/2, 0, 0, sqrt(2 - 2*cos(phi))*sign(sin(phi))/2) + + nc = Symbol('nc', commutative=False) + raises(ValueError, lambda: Quaternion(w, x, nc, z)) + + +def test_quaternion_construction_norm(): + q1 = Quaternion(*symbols('a:d')) + + q2 = Quaternion(w, x, y, z) + assert expand((q1*q2).norm()**2 - (q1.norm()**2 * q2.norm()**2)) == 0 + + q3 = Quaternion(w, x, y, z, norm=1) + assert (q1 * q3).norm() == q1.norm() + + +def test_to_and_from_Matrix(): + q = Quaternion(w, x, y, z) + q_full = Quaternion.from_Matrix(q.to_Matrix()) + q_vect = Quaternion.from_Matrix(q.to_Matrix(True)) + assert (q - q_full).is_zero_quaternion() + assert (q.vector_part() - q_vect).is_zero_quaternion() + + +def test_product_matrices(): + q1 = Quaternion(w, x, y, z) + q2 = Quaternion(*(symbols("a:d"))) + assert (q1 * q2).to_Matrix() == q1.product_matrix_left * q2.to_Matrix() + assert (q1 * q2).to_Matrix() == q2.product_matrix_right * q1.to_Matrix() + + R1 = (q1.product_matrix_left * q1.product_matrix_right.T)[1:, 1:] + R2 = simplify(q1.to_rotation_matrix()*q1.norm()**2) + assert R1 == R2 + + +def test_quaternion_axis_angle(): + + test_data = [ # axis, angle, expected_quaternion + ((1, 0, 0), 0, (1, 0, 0, 0)), + ((1, 0, 0), pi/2, (sqrt(2)/2, sqrt(2)/2, 0, 0)), + ((0, 1, 0), pi/2, (sqrt(2)/2, 0, sqrt(2)/2, 0)), + ((0, 0, 1), pi/2, (sqrt(2)/2, 0, 0, sqrt(2)/2)), + ((1, 0, 0), pi, (0, 1, 0, 0)), + ((0, 1, 0), pi, (0, 0, 1, 0)), + ((0, 0, 1), pi, (0, 0, 0, 1)), + ((1, 1, 1), pi, (0, 1/sqrt(3),1/sqrt(3),1/sqrt(3))), + ((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), pi*2/3, (S.Half, S.Half, S.Half, S.Half)) + ] + + for axis, angle, expected in test_data: + assert Quaternion.from_axis_angle(axis, angle) == Quaternion(*expected) + + +def test_quaternion_axis_angle_simplification(): + result = Quaternion.from_axis_angle((1, 2, 3), asin(4)) + assert result.a == cos(asin(4)/2) + assert result.b == sqrt(14)*sin(asin(4)/2)/14 + assert result.c == sqrt(14)*sin(asin(4)/2)/7 + assert result.d == 3*sqrt(14)*sin(asin(4)/2)/14 + +def test_quaternion_complex_real_addition(): + a = symbols("a", complex=True) + b = symbols("b", real=True) + # This symbol is not complex: + c = symbols("c", commutative=False) + + q = Quaternion(w, x, y, z) + assert a + q == Quaternion(w + re(a), x + im(a), y, z) + assert 1 + q == Quaternion(1 + w, x, y, z) + assert I + q == Quaternion(w, 1 + x, y, z) + assert b + q == Quaternion(w + b, x, y, z) + raises(ValueError, lambda: c + q) + raises(ValueError, lambda: q * c) + raises(ValueError, lambda: c * q) + + assert -q == Quaternion(-w, -x, -y, -z) + + q1 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + q2 = Quaternion(1, 4, 7, 8) + + assert q1 + (2 + 3*I) == Quaternion(5 + 7*I, 2 + 5*I, 0, 7 + 8*I) + assert q2 + (2 + 3*I) == Quaternion(3, 7, 7, 8) + assert q1 * (2 + 3*I) == \ + Quaternion((2 + 3*I)*(3 + 4*I), (2 + 3*I)*(2 + 5*I), 0, (2 + 3*I)*(7 + 8*I)) + assert q2 * (2 + 3*I) == Quaternion(-10, 11, 38, -5) + + q1 = Quaternion(1, 2, 3, 4) + q0 = Quaternion(0, 0, 0, 0) + assert q1 + q0 == q1 + assert q1 - q0 == q1 + assert q1 - q1 == q0 + + +def test_quaternion_evalf(): + assert (Quaternion(sqrt(2), 0, 0, sqrt(3)).evalf() == + Quaternion(sqrt(2).evalf(), 0, 0, sqrt(3).evalf())) + assert (Quaternion(1/sqrt(2), 0, 0, 1/sqrt(2)).evalf() == + Quaternion((1/sqrt(2)).evalf(), 0, 0, (1/sqrt(2)).evalf())) + + +def test_quaternion_functions(): + q = Quaternion(w, x, y, z) + q1 = Quaternion(1, 2, 3, 4) + q0 = Quaternion(0, 0, 0, 0) + + assert conjugate(q) == Quaternion(w, -x, -y, -z) + assert q.norm() == sqrt(w**2 + x**2 + y**2 + z**2) + assert q.normalize() == Quaternion(w, x, y, z) / sqrt(w**2 + x**2 + y**2 + z**2) + assert q.inverse() == Quaternion(w, -x, -y, -z) / (w**2 + x**2 + y**2 + z**2) + assert q.inverse() == q.pow(-1) + raises(ValueError, lambda: q0.inverse()) + assert q.pow(2) == Quaternion(w**2 - x**2 - y**2 - z**2, 2*w*x, 2*w*y, 2*w*z) + assert q**(2) == Quaternion(w**2 - x**2 - y**2 - z**2, 2*w*x, 2*w*y, 2*w*z) + assert q1.pow(-2) == Quaternion( + Rational(-7, 225), Rational(-1, 225), Rational(-1, 150), Rational(-2, 225)) + assert q1**(-2) == Quaternion( + Rational(-7, 225), Rational(-1, 225), Rational(-1, 150), Rational(-2, 225)) + assert q1.pow(-0.5) == NotImplemented + raises(TypeError, lambda: q1**(-0.5)) + + assert q1.exp() == \ + Quaternion(E * cos(sqrt(29)), + 2 * sqrt(29) * E * sin(sqrt(29)) / 29, + 3 * sqrt(29) * E * sin(sqrt(29)) / 29, + 4 * sqrt(29) * E * sin(sqrt(29)) / 29) + assert q1._ln() == \ + Quaternion(log(sqrt(30)), + 2 * sqrt(29) * acos(sqrt(30)/30) / 29, + 3 * sqrt(29) * acos(sqrt(30)/30) / 29, + 4 * sqrt(29) * acos(sqrt(30)/30) / 29) + + assert q1.pow_cos_sin(2) == \ + Quaternion(30 * cos(2 * acos(sqrt(30)/30)), + 60 * sqrt(29) * sin(2 * acos(sqrt(30)/30)) / 29, + 90 * sqrt(29) * sin(2 * acos(sqrt(30)/30)) / 29, + 120 * sqrt(29) * sin(2 * acos(sqrt(30)/30)) / 29) + + assert diff(Quaternion(x, x, x, x), x) == Quaternion(1, 1, 1, 1) + + assert integrate(Quaternion(x, x, x, x), x) == \ + Quaternion(x**2 / 2, x**2 / 2, x**2 / 2, x**2 / 2) + + assert Quaternion.rotate_point((1, 1, 1), q1) == (S.One / 5, 1, S(7) / 5) + n = Symbol('n') + raises(TypeError, lambda: q1**n) + n = Symbol('n', integer=True) + raises(TypeError, lambda: q1**n) + + assert Quaternion(22, 23, 55, 8).scalar_part() == 22 + assert Quaternion(w, x, y, z).scalar_part() == w + + assert Quaternion(22, 23, 55, 8).vector_part() == Quaternion(0, 23, 55, 8) + assert Quaternion(w, x, y, z).vector_part() == Quaternion(0, x, y, z) + + assert q1.axis() == Quaternion(0, 2*sqrt(29)/29, 3*sqrt(29)/29, 4*sqrt(29)/29) + assert q1.axis().pow(2) == Quaternion(-1, 0, 0, 0) + assert q0.axis().scalar_part() == 0 + assert (q.axis() == Quaternion(0, + x/sqrt(x**2 + y**2 + z**2), + y/sqrt(x**2 + y**2 + z**2), + z/sqrt(x**2 + y**2 + z**2))) + + assert q0.is_pure() is True + assert q1.is_pure() is False + assert Quaternion(0, 0, 0, 3).is_pure() is True + assert Quaternion(0, 2, 10, 3).is_pure() is True + assert Quaternion(w, 2, 10, 3).is_pure() is None + + assert q1.angle() == atan(sqrt(29)) + assert q.angle() == atan2(sqrt(x**2 + y**2 + z**2), w) + + assert Quaternion.arc_coplanar(q1, Quaternion(2, 4, 6, 8)) is True + assert Quaternion.arc_coplanar(q1, Quaternion(1, -2, -3, -4)) is True + assert Quaternion.arc_coplanar(q1, Quaternion(1, 8, 12, 16)) is True + assert Quaternion.arc_coplanar(q1, Quaternion(1, 2, 3, 4)) is True + assert Quaternion.arc_coplanar(q1, Quaternion(w, 4, 6, 8)) is True + assert Quaternion.arc_coplanar(q1, Quaternion(2, 7, 4, 1)) is False + assert Quaternion.arc_coplanar(q1, Quaternion(w, x, y, z)) is None + raises(ValueError, lambda: Quaternion.arc_coplanar(q1, q0)) + + assert Quaternion.vector_coplanar( + Quaternion(0, 8, 12, 16), + Quaternion(0, 4, 6, 8), + Quaternion(0, 2, 3, 4)) is True + assert Quaternion.vector_coplanar( + Quaternion(0, 0, 0, 0), Quaternion(0, 4, 6, 8), Quaternion(0, 2, 3, 4)) is True + assert Quaternion.vector_coplanar( + Quaternion(0, 8, 2, 6), Quaternion(0, 1, 6, 6), Quaternion(0, 0, 3, 4)) is False + assert Quaternion.vector_coplanar( + Quaternion(0, 1, 3, 4), + Quaternion(0, 4, w, 6), + Quaternion(0, 6, 8, 1)) is None + raises(ValueError, lambda: + Quaternion.vector_coplanar(q0, Quaternion(0, 4, 6, 8), q1)) + + assert Quaternion(0, 1, 2, 3).parallel(Quaternion(0, 2, 4, 6)) is True + assert Quaternion(0, 1, 2, 3).parallel(Quaternion(0, 2, 2, 6)) is False + assert Quaternion(0, 1, 2, 3).parallel(Quaternion(w, x, y, 6)) is None + raises(ValueError, lambda: q0.parallel(q1)) + + assert Quaternion(0, 1, 2, 3).orthogonal(Quaternion(0, -2, 1, 0)) is True + assert Quaternion(0, 2, 4, 7).orthogonal(Quaternion(0, 2, 2, 6)) is False + assert Quaternion(0, 2, 4, 7).orthogonal(Quaternion(w, x, y, 6)) is None + raises(ValueError, lambda: q0.orthogonal(q1)) + + assert q1.index_vector() == Quaternion( + 0, 2*sqrt(870)/29, + 3*sqrt(870)/29, + 4*sqrt(870)/29) + assert Quaternion(0, 3, 9, 4).index_vector() == Quaternion(0, 3, 9, 4) + + assert Quaternion(4, 3, 9, 4).mensor() == log(sqrt(122)) + assert Quaternion(3, 3, 0, 2).mensor() == log(sqrt(22)) + + assert q0.is_zero_quaternion() is True + assert q1.is_zero_quaternion() is False + assert Quaternion(w, 0, 0, 0).is_zero_quaternion() is None + +def test_quaternion_conversions(): + q1 = Quaternion(1, 2, 3, 4) + + assert q1.to_axis_angle() == ((2 * sqrt(29)/29, + 3 * sqrt(29)/29, + 4 * sqrt(29)/29), + 2 * acos(sqrt(30)/30)) + + assert (q1.to_rotation_matrix() == + Matrix([[Rational(-2, 3), Rational(2, 15), Rational(11, 15)], + [Rational(2, 3), Rational(-1, 3), Rational(2, 3)], + [Rational(1, 3), Rational(14, 15), Rational(2, 15)]])) + + assert (q1.to_rotation_matrix((1, 1, 1)) == + Matrix([ + [Rational(-2, 3), Rational(2, 15), Rational(11, 15), Rational(4, 5)], + [Rational(2, 3), Rational(-1, 3), Rational(2, 3), S.Zero], + [Rational(1, 3), Rational(14, 15), Rational(2, 15), Rational(-2, 5)], + [S.Zero, S.Zero, S.Zero, S.One]])) + + theta = symbols("theta", real=True) + q2 = Quaternion(cos(theta/2), 0, 0, sin(theta/2)) + + assert trigsimp(q2.to_rotation_matrix()) == Matrix([ + [cos(theta), -sin(theta), 0], + [sin(theta), cos(theta), 0], + [0, 0, 1]]) + + assert q2.to_axis_angle() == ((0, 0, sin(theta/2)/Abs(sin(theta/2))), + 2*acos(cos(theta/2))) + + assert trigsimp(q2.to_rotation_matrix((1, 1, 1))) == Matrix([ + [cos(theta), -sin(theta), 0, sin(theta) - cos(theta) + 1], + [sin(theta), cos(theta), 0, -sin(theta) - cos(theta) + 1], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + + +def test_rotation_matrix_homogeneous(): + q = Quaternion(w, x, y, z) + R1 = q.to_rotation_matrix(homogeneous=True) * q.norm()**2 + R2 = simplify(q.to_rotation_matrix(homogeneous=False) * q.norm()**2) + assert R1 == R2 + + +def test_quaternion_rotation_iss1593(): + """ + There was a sign mistake in the definition, + of the rotation matrix. This tests that particular sign mistake. + See issue 1593 for reference. + See wikipedia + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix + for the correct definition + """ + q = Quaternion(cos(phi/2), sin(phi/2), 0, 0) + assert(trigsimp(q.to_rotation_matrix()) == Matrix([ + [1, 0, 0], + [0, cos(phi), -sin(phi)], + [0, sin(phi), cos(phi)]])) + + +def test_quaternion_multiplication(): + q1 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) + q2 = Quaternion(1, 2, 3, 5) + q3 = Quaternion(1, 1, 1, y) + + assert Quaternion._generic_mul(S(4), S.One) == 4 + assert (Quaternion._generic_mul(S(4), q1) == + Quaternion(12 + 16*I, 8 + 20*I, 0, 28 + 32*I)) + assert q2.mul(2) == Quaternion(2, 4, 6, 10) + assert q2.mul(q3) == Quaternion(-5*y - 4, 3*y - 2, 9 - 2*y, y + 4) + assert q2.mul(q3) == q2*q3 + + z = symbols('z', complex=True) + z_quat = Quaternion(re(z), im(z), 0, 0) + q = Quaternion(*symbols('q:4', real=True)) + + assert z * q == z_quat * q + assert q * z == q * z_quat + + +def test_issue_16318(): + #for rtruediv + q0 = Quaternion(0, 0, 0, 0) + raises(ValueError, lambda: 1/q0) + #for rotate_point + q = Quaternion(1, 2, 3, 4) + (axis, angle) = q.to_axis_angle() + assert Quaternion.rotate_point((1, 1, 1), (axis, angle)) == (S.One / 5, 1, S(7) / 5) + #test for to_axis_angle + q = Quaternion(-1, 1, 1, 1) + axis = (-sqrt(3)/3, -sqrt(3)/3, -sqrt(3)/3) + angle = 2*pi/3 + assert (axis, angle) == q.to_axis_angle() + + +def test_to_euler(): + q = Quaternion(w, x, y, z) + q_normalized = q.normalize() + + seqs = ['zxy', 'zyx', 'zyz', 'zxz'] + seqs += [seq.upper() for seq in seqs] + + for seq in seqs: + euler_from_q = q.to_euler(seq) + q_back = simplify(Quaternion.from_euler(euler_from_q, seq)) + assert q_back == q_normalized + + +def test_to_euler_iss24504(): + """ + There was a mistake in the degenerate case testing + See issue 24504 for reference. + """ + q = Quaternion.from_euler((phi, 0, 0), 'zyz') + assert trigsimp(q.to_euler('zyz'), inverse=True) == (phi, 0, 0) + + +def test_to_euler_numerical_singilarities(): + + def test_one_case(angles, seq): + q = Quaternion.from_euler(angles, seq) + assert q.to_euler(seq) == angles + + # symmetric + test_one_case((pi/2, 0, 0), 'zyz') + test_one_case((pi/2, 0, 0), 'ZYZ') + test_one_case((pi/2, pi, 0), 'zyz') + test_one_case((pi/2, pi, 0), 'ZYZ') + + # asymmetric + test_one_case((pi/2, pi/2, 0), 'zyx') + test_one_case((pi/2, -pi/2, 0), 'zyx') + test_one_case((pi/2, pi/2, 0), 'ZYX') + test_one_case((pi/2, -pi/2, 0), 'ZYX') + + +def test_to_euler_options(): + def test_one_case(q): + angles1 = Matrix(q.to_euler(seq, True, True)) + angles2 = Matrix(q.to_euler(seq, False, False)) + angle_errors = simplify(angles1-angles2).evalf() + for angle_error in angle_errors: + # forcing angles to set {-pi, pi} + angle_error = (angle_error + pi) % (2 * pi) - pi + assert angle_error < 10e-7 + + for xyz in ('xyz', 'XYZ'): + for seq_tuple in permutations(xyz): + for symmetric in (True, False): + if symmetric: + seq = ''.join([seq_tuple[0], seq_tuple[1], seq_tuple[0]]) + else: + seq = ''.join(seq_tuple) + + for elements in product([-1, 0, 1], repeat=4): + q = Quaternion(*elements) + if not q.is_zero_quaternion(): + test_one_case(q) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..220c27a42975555f8fe7770fcef0dd18475c89e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__init__.py @@ -0,0 +1,43 @@ +from sympy.combinatorics.permutations import Permutation, Cycle +from sympy.combinatorics.prufer import Prufer +from sympy.combinatorics.generators import cyclic, alternating, symmetric, dihedral +from sympy.combinatorics.subsets import Subset +from sympy.combinatorics.partitions import (Partition, IntegerPartition, + RGS_rank, RGS_unrank, RGS_enum) +from sympy.combinatorics.polyhedron import (Polyhedron, tetrahedron, cube, + octahedron, dodecahedron, icosahedron) +from sympy.combinatorics.perm_groups import PermutationGroup, Coset, SymmetricPermutationGroup +from sympy.combinatorics.group_constructs import DirectProduct +from sympy.combinatorics.graycode import GrayCode +from sympy.combinatorics.named_groups import (SymmetricGroup, DihedralGroup, + CyclicGroup, AlternatingGroup, AbelianGroup, RubikGroup) +from sympy.combinatorics.pc_groups import PolycyclicGroup, Collector +from sympy.combinatorics.free_groups import free_group + +__all__ = [ + 'Permutation', 'Cycle', + + 'Prufer', + + 'cyclic', 'alternating', 'symmetric', 'dihedral', + + 'Subset', + + 'Partition', 'IntegerPartition', 'RGS_rank', 'RGS_unrank', 'RGS_enum', + + 'Polyhedron', 'tetrahedron', 'cube', 'octahedron', 'dodecahedron', + 'icosahedron', + + 'PermutationGroup', 'Coset', 'SymmetricPermutationGroup', + + 'DirectProduct', + + 'GrayCode', + + 'SymmetricGroup', 'DihedralGroup', 'CyclicGroup', 'AlternatingGroup', + 'AbelianGroup', 'RubikGroup', + + 'PolycyclicGroup', 'Collector', + + 'free_group', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9546f6f640a8f4471aab1db2f14c39db93cae817 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59c1824e9f6d46269e7a9210b3ad05f782b3fb5f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/fp_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/fp_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eea9c861871d5868cc00c6a4c9127b9c35c5f0f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/fp_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d16debbbaa4fe79227b9bd7cfade3f486f74615 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/galois.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/galois.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56711ebd4a46affb7e66f802720588ff53d4fe35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/galois.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/graycode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/graycode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75a2a465c8846ec0977ab55593201b3ec9b2a53f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/graycode.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..121a6dd8961adaa08cf379b3052381d9bdba7635 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_numbers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca8c640da145f94a7c770a73f46f3bbe61f60625 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_numbers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5cb84d0e88736c249c040c6d7275dd6047c4562 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6337b77c7a6039ec9b436f48e497c02244db80d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/pc_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/pc_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..202b8f426eb29e4dbef4f519d5a8be71faf0534e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/pc_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..991a758106c0d60737cef1f5bf78ab0d0137355e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/permutations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/permutations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c10fa29c2f6753a4ec03306050fe953c19e70cf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/permutations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/polyhedron.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/polyhedron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4245a50c697a612575aa002fa28b497830cc6943 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/polyhedron.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/prufer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/prufer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9f9101f26ba8557b9c88c9d791923b67ef62ef3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/prufer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d20a054ec9127b9abe2fa79989bff3f0f194d6e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem_fsm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem_fsm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8841792a7a459d50920cead607091091211f1aff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/rewritingsystem_fsm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/schur_number.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/schur_number.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ae8d54f1f277e022dd9ed9c88869076526c3aa0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/schur_number.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/subsets.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/subsets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f513e330b58fdaf3f5c76594e5aa68eca8a5e3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/subsets.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/tensor_can.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/tensor_can.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cac4ba9ed83bda4c55bc56a1160c8d1bf8129243 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/tensor_can.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/testutil.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/testutil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9d54493d1480b49618670729c260f2e18a72e5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/testutil.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32d70f59a562bc64043c7a8ecbcc498ea3268856 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/coset_table.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/coset_table.py new file mode 100644 index 0000000000000000000000000000000000000000..06cc427c87c6860892f34735112f2f833d3f7f6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/coset_table.py @@ -0,0 +1,1255 @@ +from sympy.combinatorics.free_groups import free_group +from sympy.printing.defaults import DefaultPrinting + +from itertools import chain, product +from bisect import bisect_left + + +############################################################################### +# COSET TABLE # +############################################################################### + +class CosetTable(DefaultPrinting): + # coset_table: Mathematically a coset table + # represented using a list of lists + # alpha: Mathematically a coset (precisely, a live coset) + # represented by an integer between i with 1 <= i <= n + # alpha in c + # x: Mathematically an element of "A" (set of generators and + # their inverses), represented using "FpGroupElement" + # fp_grp: Finitely Presented Group with < X|R > as presentation. + # H: subgroup of fp_grp. + # NOTE: We start with H as being only a list of words in generators + # of "fp_grp". Since `.subgroup` method has not been implemented. + + r""" + + Properties + ========== + + [1] `0 \in \Omega` and `\tau(1) = \epsilon` + [2] `\alpha^x = \beta \Leftrightarrow \beta^{x^{-1}} = \alpha` + [3] If `\alpha^x = \beta`, then `H \tau(\alpha)x = H \tau(\beta)` + [4] `\forall \alpha \in \Omega, 1^{\tau(\alpha)} = \alpha` + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + + .. [2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson + Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490. + "Implementation and Analysis of the Todd-Coxeter Algorithm" + + """ + # default limit for the number of cosets allowed in a + # coset enumeration. + coset_table_max_limit = 4096000 + # limit for the current instance + coset_table_limit = None + # maximum size of deduction stack above or equal to + # which it is emptied + max_stack_size = 100 + + def __init__(self, fp_grp, subgroup, max_cosets=None): + if not max_cosets: + max_cosets = CosetTable.coset_table_max_limit + self.fp_group = fp_grp + self.subgroup = subgroup + self.coset_table_limit = max_cosets + # "p" is setup independent of Omega and n + self.p = [0] + # a list of the form `[gen_1, gen_1^{-1}, ... , gen_k, gen_k^{-1}]` + self.A = list(chain.from_iterable((gen, gen**-1) \ + for gen in self.fp_group.generators)) + #P[alpha, x] Only defined when alpha^x is defined. + self.P = [[None]*len(self.A)] + # the mathematical coset table which is a list of lists + self.table = [[None]*len(self.A)] + self.A_dict = {x: self.A.index(x) for x in self.A} + self.A_dict_inv = {} + for x, index in self.A_dict.items(): + if index % 2 == 0: + self.A_dict_inv[x] = self.A_dict[x] + 1 + else: + self.A_dict_inv[x] = self.A_dict[x] - 1 + # used in the coset-table based method of coset enumeration. Each of + # the element is called a "deduction" which is the form (alpha, x) whenever + # a value is assigned to alpha^x during a definition or "deduction process" + self.deduction_stack = [] + # Attributes for modified methods. + H = self.subgroup + self._grp = free_group(', ' .join(["a_%d" % i for i in range(len(H))]))[0] + self.P = [[None]*len(self.A)] + self.p_p = {} + + @property + def omega(self): + """Set of live cosets. """ + return [coset for coset in range(len(self.p)) if self.p[coset] == coset] + + def copy(self): + """ + Return a shallow copy of Coset Table instance ``self``. + + """ + self_copy = self.__class__(self.fp_group, self.subgroup) + self_copy.table = [list(perm_rep) for perm_rep in self.table] + self_copy.p = list(self.p) + self_copy.deduction_stack = list(self.deduction_stack) + return self_copy + + def __str__(self): + return "Coset Table on %s with %s as subgroup generators" \ + % (self.fp_group, self.subgroup) + + __repr__ = __str__ + + @property + def n(self): + """The number `n` represents the length of the sublist containing the + live cosets. + + """ + if not self.table: + return 0 + return max(self.omega) + 1 + + # Pg. 152 [1] + def is_complete(self): + r""" + The coset table is called complete if it has no undefined entries + on the live cosets; that is, `\alpha^x` is defined for all + `\alpha \in \Omega` and `x \in A`. + + """ + return not any(None in self.table[coset] for coset in self.omega) + + # Pg. 153 [1] + def define(self, alpha, x, modified=False): + r""" + This routine is used in the relator-based strategy of Todd-Coxeter + algorithm if some `\alpha^x` is undefined. We check whether there is + space available for defining a new coset. If there is enough space + then we remedy this by adjoining a new coset `\beta` to `\Omega` + (i.e to set of live cosets) and put that equal to `\alpha^x`, then + make an assignment satisfying Property[1]. If there is not enough space + then we halt the Coset Table creation. The maximum amount of space that + can be used by Coset Table can be manipulated using the class variable + ``CosetTable.coset_table_max_limit``. + + See Also + ======== + + define_c + + """ + A = self.A + table = self.table + len_table = len(table) + if len_table >= self.coset_table_limit: + # abort the further generation of cosets + raise ValueError("the coset enumeration has defined more than " + "%s cosets. Try with a greater value max number of cosets " + % self.coset_table_limit) + table.append([None]*len(A)) + self.P.append([None]*len(self.A)) + # beta is the new coset generated + beta = len_table + self.p.append(beta) + table[alpha][self.A_dict[x]] = beta + table[beta][self.A_dict_inv[x]] = alpha + # P[alpha][x] = epsilon, P[beta][x**-1] = epsilon + if modified: + self.P[alpha][self.A_dict[x]] = self._grp.identity + self.P[beta][self.A_dict_inv[x]] = self._grp.identity + self.p_p[beta] = self._grp.identity + + def define_c(self, alpha, x): + r""" + A variation of ``define`` routine, described on Pg. 165 [1], used in + the coset table-based strategy of Todd-Coxeter algorithm. It differs + from ``define`` routine in that for each definition it also adds the + tuple `(\alpha, x)` to the deduction stack. + + See Also + ======== + + define + + """ + A = self.A + table = self.table + len_table = len(table) + if len_table >= self.coset_table_limit: + # abort the further generation of cosets + raise ValueError("the coset enumeration has defined more than " + "%s cosets. Try with a greater value max number of cosets " + % self.coset_table_limit) + table.append([None]*len(A)) + # beta is the new coset generated + beta = len_table + self.p.append(beta) + table[alpha][self.A_dict[x]] = beta + table[beta][self.A_dict_inv[x]] = alpha + # append to deduction stack + self.deduction_stack.append((alpha, x)) + + def scan_c(self, alpha, word): + """ + A variation of ``scan`` routine, described on pg. 165 of [1], which + puts at tuple, whenever a deduction occurs, to deduction stack. + + See Also + ======== + + scan, scan_check, scan_and_fill, scan_and_fill_c + + """ + # alpha is an integer representing a "coset" + # since scanning can be in two cases + # 1. for alpha=0 and w in Y (i.e generating set of H) + # 2. alpha in Omega (set of live cosets), w in R (relators) + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + f = alpha + i = 0 + r = len(word) + b = alpha + j = r - 1 + # list of union of generators and their inverses + while i <= j and table[f][A_dict[word[i]]] is not None: + f = table[f][A_dict[word[i]]] + i += 1 + if i > j: + if f != b: + self.coincidence_c(f, b) + return + while j >= i and table[b][A_dict_inv[word[j]]] is not None: + b = table[b][A_dict_inv[word[j]]] + j -= 1 + if j < i: + # we have an incorrect completed scan with coincidence f ~ b + # run the "coincidence" routine + self.coincidence_c(f, b) + elif j == i: + # deduction process + table[f][A_dict[word[i]]] = b + table[b][A_dict_inv[word[i]]] = f + self.deduction_stack.append((f, word[i])) + # otherwise scan is incomplete and yields no information + + # alpha, beta coincide, i.e. alpha, beta represent the pair of cosets where + # coincidence occurs + def coincidence_c(self, alpha, beta): + """ + A variation of ``coincidence`` routine used in the coset-table based + method of coset enumeration. The only difference being on addition of + a new coset in coset table(i.e new coset introduction), then it is + appended to ``deduction_stack``. + + See Also + ======== + + coincidence + + """ + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + # behaves as a queue + q = [] + self.merge(alpha, beta, q) + while len(q) > 0: + gamma = q.pop(0) + for x in A_dict: + delta = table[gamma][A_dict[x]] + if delta is not None: + table[delta][A_dict_inv[x]] = None + # only line of difference from ``coincidence`` routine + self.deduction_stack.append((delta, x**-1)) + mu = self.rep(gamma) + nu = self.rep(delta) + if table[mu][A_dict[x]] is not None: + self.merge(nu, table[mu][A_dict[x]], q) + elif table[nu][A_dict_inv[x]] is not None: + self.merge(mu, table[nu][A_dict_inv[x]], q) + else: + table[mu][A_dict[x]] = nu + table[nu][A_dict_inv[x]] = mu + + def scan(self, alpha, word, y=None, fill=False, modified=False): + r""" + ``scan`` performs a scanning process on the input ``word``. + It first locates the largest prefix ``s`` of ``word`` for which + `\alpha^s` is defined (i.e is not ``None``), ``s`` may be empty. Let + ``word=sv``, let ``t`` be the longest suffix of ``v`` for which + `\alpha^{t^{-1}}` is defined, and let ``v=ut``. Then three + possibilities are there: + + 1. If ``t=v``, then we say that the scan completes, and if, in addition + `\alpha^s = \alpha^{t^{-1}}`, then we say that the scan completes + correctly. + + 2. It can also happen that scan does not complete, but `|u|=1`; that + is, the word ``u`` consists of a single generator `x \in A`. In that + case, if `\alpha^s = \beta` and `\alpha^{t^{-1}} = \gamma`, then we can + set `\beta^x = \gamma` and `\gamma^{x^{-1}} = \beta`. These assignments + are known as deductions and enable the scan to complete correctly. + + 3. See ``coicidence`` routine for explanation of third condition. + + Notes + ===== + + The code for the procedure of scanning `\alpha \in \Omega` + under `w \in A*` is defined on pg. 155 [1] + + See Also + ======== + + scan_c, scan_check, scan_and_fill, scan_and_fill_c + + Scan and Fill + ============= + + Performed when the default argument fill=True. + + Modified Scan + ============= + + Performed when the default argument modified=True + + """ + # alpha is an integer representing a "coset" + # since scanning can be in two cases + # 1. for alpha=0 and w in Y (i.e generating set of H) + # 2. alpha in Omega (set of live cosets), w in R (relators) + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + f = alpha + i = 0 + r = len(word) + b = alpha + j = r - 1 + b_p = y + if modified: + f_p = self._grp.identity + flag = 0 + while fill or flag == 0: + flag = 1 + while i <= j and table[f][A_dict[word[i]]] is not None: + if modified: + f_p = f_p*self.P[f][A_dict[word[i]]] + f = table[f][A_dict[word[i]]] + i += 1 + if i > j: + if f != b: + if modified: + self.modified_coincidence(f, b, f_p**-1*y) + else: + self.coincidence(f, b) + return + while j >= i and table[b][A_dict_inv[word[j]]] is not None: + if modified: + b_p = b_p*self.P[b][self.A_dict_inv[word[j]]] + b = table[b][A_dict_inv[word[j]]] + j -= 1 + if j < i: + # we have an incorrect completed scan with coincidence f ~ b + # run the "coincidence" routine + if modified: + self.modified_coincidence(f, b, f_p**-1*b_p) + else: + self.coincidence(f, b) + elif j == i: + # deduction process + table[f][A_dict[word[i]]] = b + table[b][A_dict_inv[word[i]]] = f + if modified: + self.P[f][self.A_dict[word[i]]] = f_p**-1*b_p + self.P[b][self.A_dict_inv[word[i]]] = b_p**-1*f_p + return + elif fill: + self.define(f, word[i], modified=modified) + # otherwise scan is incomplete and yields no information + + # used in the low-index subgroups algorithm + def scan_check(self, alpha, word): + r""" + Another version of ``scan`` routine, described on, it checks whether + `\alpha` scans correctly under `word`, it is a straightforward + modification of ``scan``. ``scan_check`` returns ``False`` (rather than + calling ``coincidence``) if the scan completes incorrectly; otherwise + it returns ``True``. + + See Also + ======== + + scan, scan_c, scan_and_fill, scan_and_fill_c + + """ + # alpha is an integer representing a "coset" + # since scanning can be in two cases + # 1. for alpha=0 and w in Y (i.e generating set of H) + # 2. alpha in Omega (set of live cosets), w in R (relators) + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + f = alpha + i = 0 + r = len(word) + b = alpha + j = r - 1 + while i <= j and table[f][A_dict[word[i]]] is not None: + f = table[f][A_dict[word[i]]] + i += 1 + if i > j: + return f == b + while j >= i and table[b][A_dict_inv[word[j]]] is not None: + b = table[b][A_dict_inv[word[j]]] + j -= 1 + if j < i: + # we have an incorrect completed scan with coincidence f ~ b + # return False, instead of calling coincidence routine + return False + elif j == i: + # deduction process + table[f][A_dict[word[i]]] = b + table[b][A_dict_inv[word[i]]] = f + return True + + def merge(self, k, lamda, q, w=None, modified=False): + """ + Merge two classes with representatives ``k`` and ``lamda``, described + on Pg. 157 [1] (for pseudocode), start by putting ``p[k] = lamda``. + It is more efficient to choose the new representative from the larger + of the two classes being merged, i.e larger among ``k`` and ``lamda``. + procedure ``merge`` performs the merging operation, adds the deleted + class representative to the queue ``q``. + + Parameters + ========== + + 'k', 'lamda' being the two class representatives to be merged. + + Notes + ===== + + Pg. 86-87 [1] contains a description of this method. + + See Also + ======== + + coincidence, rep + + """ + p = self.p + rep = self.rep + phi = rep(k, modified=modified) + psi = rep(lamda, modified=modified) + if phi != psi: + mu = min(phi, psi) + v = max(phi, psi) + p[v] = mu + if modified: + if v == phi: + self.p_p[phi] = self.p_p[k]**-1*w*self.p_p[lamda] + else: + self.p_p[psi] = self.p_p[lamda]**-1*w**-1*self.p_p[k] + q.append(v) + + def rep(self, k, modified=False): + r""" + Parameters + ========== + + `k \in [0 \ldots n-1]`, as for ``self`` only array ``p`` is used + + Returns + ======= + + Representative of the class containing ``k``. + + Returns the representative of `\sim` class containing ``k``, it also + makes some modification to array ``p`` of ``self`` to ease further + computations, described on Pg. 157 [1]. + + The information on classes under `\sim` is stored in array `p` of + ``self`` argument, which will always satisfy the property: + + `p[\alpha] \sim \alpha` and `p[\alpha]=\alpha \iff \alpha=rep(\alpha)` + `\forall \in [0 \ldots n-1]`. + + So, for `\alpha \in [0 \ldots n-1]`, we find `rep(self, \alpha)` by + continually replacing `\alpha` by `p[\alpha]` until it becomes + constant (i.e satisfies `p[\alpha] = \alpha`):w + + To increase the efficiency of later ``rep`` calculations, whenever we + find `rep(self, \alpha)=\beta`, we set + `p[\gamma] = \beta \forall \gamma \in p-chain` from `\alpha` to `\beta` + + Notes + ===== + + ``rep`` routine is also described on Pg. 85-87 [1] in Atkinson's + algorithm, this results from the fact that ``coincidence`` routine + introduces functionality similar to that introduced by the + ``minimal_block`` routine on Pg. 85-87 [1]. + + See Also + ======== + + coincidence, merge + + """ + p = self.p + lamda = k + rho = p[lamda] + if modified: + s = p[:] + while rho != lamda: + if modified: + s[rho] = lamda + lamda = rho + rho = p[lamda] + if modified: + rho = s[lamda] + while rho != k: + mu = rho + rho = s[mu] + p[rho] = lamda + self.p_p[rho] = self.p_p[rho]*self.p_p[mu] + else: + mu = k + rho = p[mu] + while rho != lamda: + p[mu] = lamda + mu = rho + rho = p[mu] + return lamda + + # alpha, beta coincide, i.e. alpha, beta represent the pair of cosets + # where coincidence occurs + def coincidence(self, alpha, beta, w=None, modified=False): + r""" + The third situation described in ``scan`` routine is handled by this + routine, described on Pg. 156-161 [1]. + + The unfortunate situation when the scan completes but not correctly, + then ``coincidence`` routine is run. i.e when for some `i` with + `1 \le i \le r+1`, we have `w=st` with `s = x_1 x_2 \dots x_{i-1}`, + `t = x_i x_{i+1} \dots x_r`, and `\beta = \alpha^s` and + `\gamma = \alpha^{t-1}` are defined but unequal. This means that + `\beta` and `\gamma` represent the same coset of `H` in `G`. Described + on Pg. 156 [1]. ``rep`` + + See Also + ======== + + scan + + """ + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + # behaves as a queue + q = [] + if modified: + self.modified_merge(alpha, beta, w, q) + else: + self.merge(alpha, beta, q) + while len(q) > 0: + gamma = q.pop(0) + for x in A_dict: + delta = table[gamma][A_dict[x]] + if delta is not None: + table[delta][A_dict_inv[x]] = None + mu = self.rep(gamma, modified=modified) + nu = self.rep(delta, modified=modified) + if table[mu][A_dict[x]] is not None: + if modified: + v = self.p_p[delta]**-1*self.P[gamma][self.A_dict[x]]**-1 + v = v*self.p_p[gamma]*self.P[mu][self.A_dict[x]] + self.modified_merge(nu, table[mu][self.A_dict[x]], v, q) + else: + self.merge(nu, table[mu][A_dict[x]], q) + elif table[nu][A_dict_inv[x]] is not None: + if modified: + v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]] + v = v*self.p_p[delta]*self.P[mu][self.A_dict_inv[x]] + self.modified_merge(mu, table[nu][self.A_dict_inv[x]], v, q) + else: + self.merge(mu, table[nu][A_dict_inv[x]], q) + else: + table[mu][A_dict[x]] = nu + table[nu][A_dict_inv[x]] = mu + if modified: + v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]*self.p_p[delta] + self.P[mu][self.A_dict[x]] = v + self.P[nu][self.A_dict_inv[x]] = v**-1 + + # method used in the HLT strategy + def scan_and_fill(self, alpha, word): + """ + A modified version of ``scan`` routine used in the relator-based + method of coset enumeration, described on pg. 162-163 [1], which + follows the idea that whenever the procedure is called and the scan + is incomplete then it makes new definitions to enable the scan to + complete; i.e it fills in the gaps in the scan of the relator or + subgroup generator. + + """ + self.scan(alpha, word, fill=True) + + def scan_and_fill_c(self, alpha, word): + """ + A modified version of ``scan`` routine, described on Pg. 165 second + para. [1], with modification similar to that of ``scan_anf_fill`` the + only difference being it calls the coincidence procedure used in the + coset-table based method i.e. the routine ``coincidence_c`` is used. + + See Also + ======== + + scan, scan_and_fill + + """ + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + r = len(word) + f = alpha + i = 0 + b = alpha + j = r - 1 + # loop until it has filled the alpha row in the table. + while True: + # do the forward scanning + while i <= j and table[f][A_dict[word[i]]] is not None: + f = table[f][A_dict[word[i]]] + i += 1 + if i > j: + if f != b: + self.coincidence_c(f, b) + return + # forward scan was incomplete, scan backwards + while j >= i and table[b][A_dict_inv[word[j]]] is not None: + b = table[b][A_dict_inv[word[j]]] + j -= 1 + if j < i: + self.coincidence_c(f, b) + elif j == i: + table[f][A_dict[word[i]]] = b + table[b][A_dict_inv[word[i]]] = f + self.deduction_stack.append((f, word[i])) + else: + self.define_c(f, word[i]) + + # method used in the HLT strategy + def look_ahead(self): + """ + When combined with the HLT method this is known as HLT+Lookahead + method of coset enumeration, described on pg. 164 [1]. Whenever + ``define`` aborts due to lack of space available this procedure is + executed. This routine helps in recovering space resulting from + "coincidence" of cosets. + + """ + R = self.fp_group.relators + p = self.p + # complete scan all relators under all cosets(obviously live) + # without making new definitions + for beta in self.omega: + for w in R: + self.scan(beta, w) + if p[beta] < beta: + break + + # Pg. 166 + def process_deductions(self, R_c_x, R_c_x_inv): + """ + Processes the deductions that have been pushed onto ``deduction_stack``, + described on Pg. 166 [1] and is used in coset-table based enumeration. + + See Also + ======== + + deduction_stack + + """ + p = self.p + table = self.table + while len(self.deduction_stack) > 0: + if len(self.deduction_stack) >= CosetTable.max_stack_size: + self.look_ahead() + del self.deduction_stack[:] + continue + else: + alpha, x = self.deduction_stack.pop() + if p[alpha] == alpha: + for w in R_c_x: + self.scan_c(alpha, w) + if p[alpha] < alpha: + break + beta = table[alpha][self.A_dict[x]] + if beta is not None and p[beta] == beta: + for w in R_c_x_inv: + self.scan_c(beta, w) + if p[beta] < beta: + break + + def process_deductions_check(self, R_c_x, R_c_x_inv): + """ + A variation of ``process_deductions``, this calls ``scan_check`` + wherever ``process_deductions`` calls ``scan``, described on Pg. [1]. + + See Also + ======== + + process_deductions + + """ + table = self.table + while len(self.deduction_stack) > 0: + alpha, x = self.deduction_stack.pop() + for w in R_c_x: + if not self.scan_check(alpha, w): + return False + beta = table[alpha][self.A_dict[x]] + if beta is not None: + for w in R_c_x_inv: + if not self.scan_check(beta, w): + return False + return True + + def switch(self, beta, gamma): + r"""Switch the elements `\beta, \gamma \in \Omega` of ``self``, used + by the ``standardize`` procedure, described on Pg. 167 [1]. + + See Also + ======== + + standardize + + """ + A = self.A + A_dict = self.A_dict + table = self.table + for x in A: + z = table[gamma][A_dict[x]] + table[gamma][A_dict[x]] = table[beta][A_dict[x]] + table[beta][A_dict[x]] = z + for alpha in range(len(self.p)): + if self.p[alpha] == alpha: + if table[alpha][A_dict[x]] == beta: + table[alpha][A_dict[x]] = gamma + elif table[alpha][A_dict[x]] == gamma: + table[alpha][A_dict[x]] = beta + + def standardize(self): + r""" + A coset table is standardized if when running through the cosets and + within each coset through the generator images (ignoring generator + inverses), the cosets appear in order of the integers + `0, 1, \dots, n`. "Standardize" reorders the elements of `\Omega` + such that, if we scan the coset table first by elements of `\Omega` + and then by elements of A, then the cosets occur in ascending order. + ``standardize()`` is used at the end of an enumeration to permute the + cosets so that they occur in some sort of standard order. + + Notes + ===== + + procedure is described on pg. 167-168 [1], it also makes use of the + ``switch`` routine to replace by smaller integer value. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r + >>> F, x, y = free_group("x, y") + + # Example 5.3 from [1] + >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) + >>> C = coset_enumeration_r(f, []) + >>> C.compress() + >>> C.table + [[1, 3, 1, 3], [2, 0, 2, 0], [3, 1, 3, 1], [0, 2, 0, 2]] + >>> C.standardize() + >>> C.table + [[1, 2, 1, 2], [3, 0, 3, 0], [0, 3, 0, 3], [2, 1, 2, 1]] + + """ + A = self.A + A_dict = self.A_dict + gamma = 1 + for alpha, x in product(range(self.n), A): + beta = self.table[alpha][A_dict[x]] + if beta >= gamma: + if beta > gamma: + self.switch(gamma, beta) + gamma += 1 + if gamma == self.n: + return + + # Compression of a Coset Table + def compress(self): + """Removes the non-live cosets from the coset table, described on + pg. 167 [1]. + + """ + gamma = -1 + A = self.A + A_dict = self.A_dict + A_dict_inv = self.A_dict_inv + table = self.table + chi = tuple([i for i in range(len(self.p)) if self.p[i] != i]) + for alpha in self.omega: + gamma += 1 + if gamma != alpha: + # replace alpha by gamma in coset table + for x in A: + beta = table[alpha][A_dict[x]] + table[gamma][A_dict[x]] = beta + table[beta][A_dict_inv[x]] == gamma + # all the cosets in the table are live cosets + self.p = list(range(gamma + 1)) + # delete the useless columns + del table[len(self.p):] + # re-define values + for row in table: + for j in range(len(self.A)): + row[j] -= bisect_left(chi, row[j]) + + def conjugates(self, R): + R_c = list(chain.from_iterable((rel.cyclic_conjugates(), \ + (rel**-1).cyclic_conjugates()) for rel in R)) + R_set = set() + for conjugate in R_c: + R_set = R_set.union(conjugate) + R_c_list = [] + for x in self.A: + r = {word for word in R_set if word[0] == x} + R_c_list.append(r) + R_set.difference_update(r) + return R_c_list + + def coset_representative(self, coset): + ''' + Compute the coset representative of a given coset. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + >>> C = coset_enumeration_r(f, [x]) + >>> C.compress() + >>> C.table + [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] + >>> C.coset_representative(0) + + >>> C.coset_representative(1) + y + >>> C.coset_representative(2) + y**-1 + + ''' + for x in self.A: + gamma = self.table[coset][self.A_dict[x]] + if coset == 0: + return self.fp_group.identity + if gamma < coset: + return self.coset_representative(gamma)*x**-1 + + ############################## + # Modified Methods # + ############################## + + def modified_define(self, alpha, x): + r""" + Define a function p_p from from [1..n] to A* as + an additional component of the modified coset table. + + Parameters + ========== + + \alpha \in \Omega + x \in A* + + See Also + ======== + + define + + """ + self.define(alpha, x, modified=True) + + def modified_scan(self, alpha, w, y, fill=False): + r""" + Parameters + ========== + \alpha \in \Omega + w \in A* + y \in (YUY^-1) + fill -- `modified_scan_and_fill` when set to True. + + See Also + ======== + + scan + """ + self.scan(alpha, w, y=y, fill=fill, modified=True) + + def modified_scan_and_fill(self, alpha, w, y): + self.modified_scan(alpha, w, y, fill=True) + + def modified_merge(self, k, lamda, w, q): + r""" + Parameters + ========== + + 'k', 'lamda' -- the two class representatives to be merged. + q -- queue of length l of elements to be deleted from `\Omega` *. + w -- Word in (YUY^-1) + + See Also + ======== + + merge + """ + self.merge(k, lamda, q, w=w, modified=True) + + def modified_rep(self, k): + r""" + Parameters + ========== + + `k \in [0 \ldots n-1]` + + See Also + ======== + + rep + """ + self.rep(k, modified=True) + + def modified_coincidence(self, alpha, beta, w): + r""" + Parameters + ========== + + A coincident pair `\alpha, \beta \in \Omega, w \in Y \cup Y^{-1}` + + See Also + ======== + + coincidence + + """ + self.coincidence(alpha, beta, w=w, modified=True) + +############################################################################### +# COSET ENUMERATION # +############################################################################### + +# relator-based method +def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, + incomplete=False, modified=False): + """ + This is easier of the two implemented methods of coset enumeration. + and is often called the HLT method, after Hazelgrove, Leech, Trotter + The idea is that we make use of ``scan_and_fill`` makes new definitions + whenever the scan is incomplete to enable the scan to complete; this way + we fill in the gaps in the scan of the relator or subgroup generator, + that's why the name relator-based method. + + An instance of `CosetTable` for `fp_grp` can be passed as the keyword + argument `draft` in which case the coset enumeration will start with + that instance and attempt to complete it. + + When `incomplete` is `True` and the function is unable to complete for + some reason, the partially complete table will be returned. + + # TODO: complete the docstring + + See Also + ======== + + scan_and_fill, + + Examples + ======== + + >>> from sympy.combinatorics.free_groups import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r + >>> F, x, y = free_group("x, y") + + # Example 5.1 from [1] + >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + >>> C = coset_enumeration_r(f, [x]) + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... print(C.table[i]) + [0, 0, 1, 2] + [1, 1, 2, 0] + [2, 2, 0, 1] + >>> C.p + [0, 1, 2, 1, 1] + + # Example from exercises Q2 [1] + >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) + >>> C = coset_enumeration_r(f, []) + >>> C.compress(); C.standardize() + >>> C.table + [[1, 2, 3, 4], + [5, 0, 6, 7], + [0, 5, 7, 6], + [7, 6, 5, 0], + [6, 7, 0, 5], + [2, 1, 4, 3], + [3, 4, 2, 1], + [4, 3, 1, 2]] + + # Example 5.2 + >>> f = FpGroup(F, [x**2, y**3, (x*y)**3]) + >>> Y = [x*y] + >>> C = coset_enumeration_r(f, Y) + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... print(C.table[i]) + [1, 1, 2, 1] + [0, 0, 0, 2] + [3, 3, 1, 0] + [2, 2, 3, 3] + + # Example 5.3 + >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) + >>> Y = [] + >>> C = coset_enumeration_r(f, Y) + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... print(C.table[i]) + [1, 3, 1, 3] + [2, 0, 2, 0] + [3, 1, 3, 1] + [0, 2, 0, 2] + + # Example 5.4 + >>> F, a, b, c, d, e = free_group("a, b, c, d, e") + >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1]) + >>> Y = [a] + >>> C = coset_enumeration_r(f, Y) + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... print(C.table[i]) + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + # example of "compress" method + >>> C.compress() + >>> C.table + [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + + # Exercises Pg. 161, Q2. + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) + >>> Y = [] + >>> C = coset_enumeration_r(f, Y) + >>> C.compress() + >>> C.standardize() + >>> C.table + [[1, 2, 3, 4], + [5, 0, 6, 7], + [0, 5, 7, 6], + [7, 6, 5, 0], + [6, 7, 0, 5], + [2, 1, 4, 3], + [3, 4, 2, 1], + [4, 3, 1, 2]] + + # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson + # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490 + # from 1973chwd.pdf + # Table 1. Ex. 1 + >>> F, r, s, t = free_group("r, s, t") + >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2]) + >>> C = coset_enumeration_r(E1, [r]) + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... print(C.table[i]) + [0, 0, 0, 0, 0, 0] + + Ex. 2 + >>> F, a, b = free_group("a, b") + >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5]) + >>> C = coset_enumeration_r(Cox, [a]) + >>> index = 0 + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... index += 1 + >>> index + 500 + + # Ex. 3 + >>> F, a, b = free_group("a, b") + >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \ + (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4]) + >>> C = coset_enumeration_r(B_2_4, [a]) + >>> index = 0 + >>> for i in range(len(C.p)): + ... if C.p[i] == i: + ... index += 1 + >>> index + 1024 + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of computational group theory" + + """ + # 1. Initialize a coset table C for < X|R > + C = CosetTable(fp_grp, Y, max_cosets=max_cosets) + # Define coset table methods. + if modified: + _scan_and_fill = C.modified_scan_and_fill + _define = C.modified_define + else: + _scan_and_fill = C.scan_and_fill + _define = C.define + if draft: + C.table = draft.table[:] + C.p = draft.p[:] + R = fp_grp.relators + A_dict = C.A_dict + p = C.p + for i in range(len(Y)): + if modified: + _scan_and_fill(0, Y[i], C._grp.generators[i]) + else: + _scan_and_fill(0, Y[i]) + alpha = 0 + while alpha < C.n: + if p[alpha] == alpha: + try: + for w in R: + if modified: + _scan_and_fill(alpha, w, C._grp.identity) + else: + _scan_and_fill(alpha, w) + # if alpha was eliminated during the scan then break + if p[alpha] < alpha: + break + if p[alpha] == alpha: + for x in A_dict: + if C.table[alpha][A_dict[x]] is None: + _define(alpha, x) + except ValueError as e: + if incomplete: + return C + raise e + alpha += 1 + return C + +def modified_coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, + incomplete=False): + r""" + Introduce a new set of symbols y \in Y that correspond to the + generators of the subgroup. Store the elements of Y as a + word P[\alpha, x] and compute the coset table similar to that of + the regular coset enumeration methods. + + Examples + ======== + + >>> from sympy.combinatorics.free_groups import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup + >>> from sympy.combinatorics.coset_table import modified_coset_enumeration_r + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + >>> C = modified_coset_enumeration_r(f, [x]) + >>> C.table + [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1], [None, 1, None, None], [1, 3, None, None]] + + See Also + ======== + + coset_enumertation_r + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E., + "Handbook of Computational Group Theory", + Section 5.3.2 + """ + return coset_enumeration_r(fp_grp, Y, max_cosets=max_cosets, draft=draft, + incomplete=incomplete, modified=True) + +# Pg. 166 +# coset-table based method +def coset_enumeration_c(fp_grp, Y, max_cosets=None, draft=None, + incomplete=False): + """ + >>> from sympy.combinatorics.free_groups import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) + >>> C = coset_enumeration_c(f, [x]) + >>> C.table + [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] + + """ + # Initialize a coset table C for < X|R > + X = fp_grp.generators + R = fp_grp.relators + C = CosetTable(fp_grp, Y, max_cosets=max_cosets) + if draft: + C.table = draft.table[:] + C.p = draft.p[:] + C.deduction_stack = draft.deduction_stack + for alpha, x in product(range(len(C.table)), X): + if C.table[alpha][C.A_dict[x]] is not None: + C.deduction_stack.append((alpha, x)) + A = C.A + # replace all the elements by cyclic reductions + R_cyc_red = [rel.identity_cyclic_reduction() for rel in R] + R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \ + for rel in R_cyc_red)) + R_set = set() + for conjugate in R_c: + R_set = R_set.union(conjugate) + # a list of subsets of R_c whose words start with "x". + R_c_list = [] + for x in C.A: + r = {word for word in R_set if word[0] == x} + R_c_list.append(r) + R_set.difference_update(r) + for w in Y: + C.scan_and_fill_c(0, w) + for x in A: + C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) + alpha = 0 + while alpha < len(C.table): + if C.p[alpha] == alpha: + try: + for x in C.A: + if C.p[alpha] != alpha: + break + if C.table[alpha][C.A_dict[x]] is None: + C.define_c(alpha, x) + C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) + except ValueError as e: + if incomplete: + return C + raise e + alpha += 1 + return C diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/fp_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/fp_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..9ab8c47d4e6fa211522a5b3e85a8b06c0fa402e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/fp_groups.py @@ -0,0 +1,1348 @@ +"""Finitely Presented Groups and its algorithms. """ + +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.combinatorics.free_groups import (FreeGroup, FreeGroupElement, + free_group) +from sympy.combinatorics.rewritingsystem import RewritingSystem +from sympy.combinatorics.coset_table import (CosetTable, + coset_enumeration_r, + coset_enumeration_c) +from sympy.combinatorics import PermutationGroup +from sympy.matrices.normalforms import invariant_factors +from sympy.matrices import Matrix +from sympy.polys.polytools import gcd +from sympy.printing.defaults import DefaultPrinting +from sympy.utilities import public +from sympy.utilities.magic import pollute + +from itertools import product + + +@public +def fp_group(fr_grp, relators=()): + _fp_group = FpGroup(fr_grp, relators) + return (_fp_group,) + tuple(_fp_group._generators) + +@public +def xfp_group(fr_grp, relators=()): + _fp_group = FpGroup(fr_grp, relators) + return (_fp_group, _fp_group._generators) + +# Does not work. Both symbols and pollute are undefined. Never tested. +@public +def vfp_group(fr_grpm, relators): + _fp_group = FpGroup(symbols, relators) + pollute([sym.name for sym in _fp_group.symbols], _fp_group.generators) + return _fp_group + + +def _parse_relators(rels): + """Parse the passed relators.""" + return rels + + +############################################################################### +# FINITELY PRESENTED GROUPS # +############################################################################### + + +class FpGroup(DefaultPrinting): + """ + The FpGroup would take a FreeGroup and a list/tuple of relators, the + relators would be specified in such a way that each of them be equal to the + identity of the provided free group. + + """ + is_group = True + is_FpGroup = True + is_PermutationGroup = False + + def __init__(self, fr_grp, relators): + relators = _parse_relators(relators) + self.free_group = fr_grp + self.relators = relators + self.generators = self._generators() + self.dtype = type("FpGroupElement", (FpGroupElement,), {"group": self}) + + # CosetTable instance on identity subgroup + self._coset_table = None + # returns whether coset table on identity subgroup + # has been standardized + self._is_standardized = False + + self._order = None + self._center = None + + self._rewriting_system = RewritingSystem(self) + self._perm_isomorphism = None + return + + def _generators(self): + return self.free_group.generators + + def make_confluent(self): + ''' + Try to make the group's rewriting system confluent + + ''' + self._rewriting_system.make_confluent() + return + + def reduce(self, word): + ''' + Return the reduced form of `word` in `self` according to the group's + rewriting system. If it's confluent, the reduced form is the unique normal + form of the word in the group. + + ''' + return self._rewriting_system.reduce(word) + + def equals(self, word1, word2): + ''' + Compare `word1` and `word2` for equality in the group + using the group's rewriting system. If the system is + confluent, the returned answer is necessarily correct. + (If it is not, `False` could be returned in some cases + where in fact `word1 == word2`) + + ''' + if self.reduce(word1*word2**-1) == self.identity: + return True + elif self._rewriting_system.is_confluent: + return False + return None + + @property + def identity(self): + return self.free_group.identity + + def __contains__(self, g): + return g in self.free_group + + def subgroup(self, gens, C=None, homomorphism=False): + ''' + Return the subgroup generated by `gens` using the + Reidemeister-Schreier algorithm + homomorphism -- When set to True, return a dictionary containing the images + of the presentation generators in the original group. + + Examples + ======== + + >>> from sympy.combinatorics.fp_groups import FpGroup + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]) + >>> H = [x*y, x**-1*y**-1*x*y*x] + >>> K, T = f.subgroup(H, homomorphism=True) + >>> T(K.generators) + [x*y, x**-1*y**2*x**-1] + + ''' + + if not all(isinstance(g, FreeGroupElement) for g in gens): + raise ValueError("Generators must be `FreeGroupElement`s") + if not all(g.group == self.free_group for g in gens): + raise ValueError("Given generators are not members of the group") + if homomorphism: + g, rels, _gens = reidemeister_presentation(self, gens, C=C, homomorphism=True) + else: + g, rels = reidemeister_presentation(self, gens, C=C) + if g: + g = FpGroup(g[0].group, rels) + else: + g = FpGroup(free_group('')[0], []) + if homomorphism: + from sympy.combinatorics.homomorphisms import homomorphism + return g, homomorphism(g, self, g.generators, _gens, check=False) + return g + + def coset_enumeration(self, H, strategy="relator_based", max_cosets=None, + draft=None, incomplete=False): + """ + Return an instance of ``coset table``, when Todd-Coxeter algorithm is + run over the ``self`` with ``H`` as subgroup, using ``strategy`` + argument as strategy. The returned coset table is compressed but not + standardized. + + An instance of `CosetTable` for `fp_grp` can be passed as the keyword + argument `draft` in which case the coset enumeration will start with + that instance and attempt to complete it. + + When `incomplete` is `True` and the function is unable to complete for + some reason, the partially complete table will be returned. + + """ + if not max_cosets: + max_cosets = CosetTable.coset_table_max_limit + if strategy == 'relator_based': + C = coset_enumeration_r(self, H, max_cosets=max_cosets, + draft=draft, incomplete=incomplete) + else: + C = coset_enumeration_c(self, H, max_cosets=max_cosets, + draft=draft, incomplete=incomplete) + if C.is_complete(): + C.compress() + return C + + def standardize_coset_table(self): + """ + Standardized the coset table ``self`` and makes the internal variable + ``_is_standardized`` equal to ``True``. + + """ + self._coset_table.standardize() + self._is_standardized = True + + def coset_table(self, H, strategy="relator_based", max_cosets=None, + draft=None, incomplete=False): + """ + Return the mathematical coset table of ``self`` in ``H``. + + """ + if not H: + if self._coset_table is not None: + if not self._is_standardized: + self.standardize_coset_table() + else: + C = self.coset_enumeration([], strategy, max_cosets=max_cosets, + draft=draft, incomplete=incomplete) + self._coset_table = C + self.standardize_coset_table() + return self._coset_table.table + else: + C = self.coset_enumeration(H, strategy, max_cosets=max_cosets, + draft=draft, incomplete=incomplete) + C.standardize() + return C.table + + def order(self, strategy="relator_based"): + """ + Returns the order of the finitely presented group ``self``. It uses + the coset enumeration with identity group as subgroup, i.e ``H=[]``. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x, y**2]) + >>> f.order(strategy="coset_table_based") + 2 + + """ + if self._order is not None: + return self._order + if self._coset_table is not None: + self._order = len(self._coset_table.table) + elif len(self.relators) == 0: + self._order = self.free_group.order() + elif len(self.generators) == 1: + self._order = abs(gcd([r.array_form[0][1] for r in self.relators])) + elif self._is_infinite(): + self._order = S.Infinity + else: + gens, C = self._finite_index_subgroup() + if C: + ind = len(C.table) + self._order = ind*self.subgroup(gens, C=C).order() + else: + self._order = self.index([]) + return self._order + + def _is_infinite(self): + ''' + Test if the group is infinite. Return `True` if the test succeeds + and `None` otherwise + + ''' + used_gens = set() + for r in self.relators: + used_gens.update(r.contains_generators()) + if not set(self.generators) <= used_gens: + return True + # Abelianisation test: check is the abelianisation is infinite + abelian_rels = [] + for rel in self.relators: + abelian_rels.append([rel.exponent_sum(g) for g in self.generators]) + m = Matrix(Matrix(abelian_rels)) + if 0 in invariant_factors(m): + return True + else: + return None + + + def _finite_index_subgroup(self, s=None): + ''' + Find the elements of `self` that generate a finite index subgroup + and, if found, return the list of elements and the coset table of `self` by + the subgroup, otherwise return `(None, None)` + + ''' + gen = self.most_frequent_generator() + rels = list(self.generators) + rels.extend(self.relators) + if not s: + if len(self.generators) == 2: + s = [gen] + [g for g in self.generators if g != gen] + else: + rand = self.free_group.identity + i = 0 + while ((rand in rels or rand**-1 in rels or rand.is_identity) + and i<10): + rand = self.random() + i += 1 + s = [gen, rand] + [g for g in self.generators if g != gen] + mid = (len(s)+1)//2 + half1 = s[:mid] + half2 = s[mid:] + draft1 = None + draft2 = None + m = 200 + C = None + while not C and (m/2 < CosetTable.coset_table_max_limit): + m = min(m, CosetTable.coset_table_max_limit) + draft1 = self.coset_enumeration(half1, max_cosets=m, + draft=draft1, incomplete=True) + if draft1.is_complete(): + C = draft1 + half = half1 + else: + draft2 = self.coset_enumeration(half2, max_cosets=m, + draft=draft2, incomplete=True) + if draft2.is_complete(): + C = draft2 + half = half2 + if not C: + m *= 2 + if not C: + return None, None + C.compress() + return half, C + + def most_frequent_generator(self): + gens = self.generators + rels = self.relators + freqs = [sum([r.generator_count(g) for r in rels]) for g in gens] + return gens[freqs.index(max(freqs))] + + def random(self): + import random + r = self.free_group.identity + for i in range(random.randint(2,3)): + r = r*random.choice(self.generators)**random.choice([1,-1]) + return r + + def index(self, H, strategy="relator_based"): + """ + Return the index of subgroup ``H`` in group ``self``. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**5, y**4, y*x*y**3*x**3]) + >>> f.index([x]) + 4 + + """ + # TODO: use |G:H| = |G|/|H| (currently H can't be made into a group) + # when we know |G| and |H| + + if H == []: + return self.order() + else: + C = self.coset_enumeration(H, strategy) + return len(C.table) + + def __str__(self): + if self.free_group.rank > 30: + str_form = "" % self.free_group.rank + else: + str_form = "" % str(self.generators) + return str_form + + __repr__ = __str__ + +#============================================================================== +# PERMUTATION GROUP METHODS +#============================================================================== + + def _to_perm_group(self): + ''' + Return an isomorphic permutation group and the isomorphism. + The implementation is dependent on coset enumeration so + will only terminate for finite groups. + + ''' + from sympy.combinatorics import Permutation + from sympy.combinatorics.homomorphisms import homomorphism + if self.order() is S.Infinity: + raise NotImplementedError("Permutation presentation of infinite " + "groups is not implemented") + if self._perm_isomorphism: + T = self._perm_isomorphism + P = T.image() + else: + C = self.coset_table([]) + gens = self.generators + images = [[C[i][2*gens.index(g)] for i in range(len(C))] for g in gens] + images = [Permutation(i) for i in images] + P = PermutationGroup(images) + T = homomorphism(self, P, gens, images, check=False) + self._perm_isomorphism = T + return P, T + + def _perm_group_list(self, method_name, *args): + ''' + Given the name of a `PermutationGroup` method (returning a subgroup + or a list of subgroups) and (optionally) additional arguments it takes, + return a list or a list of lists containing the generators of this (or + these) subgroups in terms of the generators of `self`. + + ''' + P, T = self._to_perm_group() + perm_result = getattr(P, method_name)(*args) + single = False + if isinstance(perm_result, PermutationGroup): + perm_result, single = [perm_result], True + result = [] + for group in perm_result: + gens = group.generators + result.append(T.invert(gens)) + return result[0] if single else result + + def derived_series(self): + ''' + Return the list of lists containing the generators + of the subgroups in the derived series of `self`. + + ''' + return self._perm_group_list('derived_series') + + def lower_central_series(self): + ''' + Return the list of lists containing the generators + of the subgroups in the lower central series of `self`. + + ''' + return self._perm_group_list('lower_central_series') + + def center(self): + ''' + Return the list of generators of the center of `self`. + + ''' + return self._perm_group_list('center') + + + def derived_subgroup(self): + ''' + Return the list of generators of the derived subgroup of `self`. + + ''' + return self._perm_group_list('derived_subgroup') + + + def centralizer(self, other): + ''' + Return the list of generators of the centralizer of `other` + (a list of elements of `self`) in `self`. + + ''' + T = self._to_perm_group()[1] + other = T(other) + return self._perm_group_list('centralizer', other) + + def normal_closure(self, other): + ''' + Return the list of generators of the normal closure of `other` + (a list of elements of `self`) in `self`. + + ''' + T = self._to_perm_group()[1] + other = T(other) + return self._perm_group_list('normal_closure', other) + + def _perm_property(self, attr): + ''' + Given an attribute of a `PermutationGroup`, return + its value for a permutation group isomorphic to `self`. + + ''' + P = self._to_perm_group()[0] + return getattr(P, attr) + + @property + def is_abelian(self): + ''' + Check if `self` is abelian. + + ''' + return self._perm_property("is_abelian") + + @property + def is_nilpotent(self): + ''' + Check if `self` is nilpotent. + + ''' + return self._perm_property("is_nilpotent") + + @property + def is_solvable(self): + ''' + Check if `self` is solvable. + + ''' + return self._perm_property("is_solvable") + + @property + def elements(self): + ''' + List the elements of `self`. + + ''' + P, T = self._to_perm_group() + return T.invert(P._elements) + + @property + def is_cyclic(self): + """ + Return ``True`` if group is Cyclic. + + """ + if len(self.generators) <= 1: + return True + try: + P, T = self._to_perm_group() + except NotImplementedError: + raise NotImplementedError("Check for infinite Cyclic group " + "is not implemented") + return P.is_cyclic + + def abelian_invariants(self): + """ + Return Abelian Invariants of a group. + """ + try: + P, T = self._to_perm_group() + except NotImplementedError: + raise NotImplementedError("abelian invariants is not implemented" + "for infinite group") + return P.abelian_invariants() + + def composition_series(self): + """ + Return subnormal series of maximum length for a group. + """ + try: + P, T = self._to_perm_group() + except NotImplementedError: + raise NotImplementedError("composition series is not implemented" + "for infinite group") + return P.composition_series() + + +class FpSubgroup(DefaultPrinting): + ''' + The class implementing a subgroup of an FpGroup or a FreeGroup + (only finite index subgroups are supported at this point). This + is to be used if one wishes to check if an element of the original + group belongs to the subgroup + + ''' + def __init__(self, G, gens, normal=False): + super().__init__() + self.parent = G + self.generators = list({g for g in gens if g != G.identity}) + self._min_words = None #for use in __contains__ + self.C = None + self.normal = normal + + def __contains__(self, g): + + if isinstance(self.parent, FreeGroup): + if self._min_words is None: + # make _min_words - a list of subwords such that + # g is in the subgroup if and only if it can be + # partitioned into these subwords. Infinite families of + # subwords are presented by tuples, e.g. (r, w) + # stands for the family of subwords r*w**n*r**-1 + + def _process(w): + # this is to be used before adding new words + # into _min_words; if the word w is not cyclically + # reduced, it will generate an infinite family of + # subwords so should be written as a tuple; + # if it is, w**-1 should be added to the list + # as well + p, r = w.cyclic_reduction(removed=True) + if not r.is_identity: + return [(r, p)] + else: + return [w, w**-1] + + # make the initial list + gens = [] + for w in self.generators: + if self.normal: + w = w.cyclic_reduction() + gens.extend(_process(w)) + + for w1 in gens: + for w2 in gens: + # if w1 and w2 are equal or are inverses, continue + if w1 == w2 or (not isinstance(w1, tuple) + and w1**-1 == w2): + continue + + # if the start of one word is the inverse of the + # end of the other, their multiple should be added + # to _min_words because of cancellation + if isinstance(w1, tuple): + # start, end + s1, s2 = w1[0][0], w1[0][0]**-1 + else: + s1, s2 = w1[0], w1[len(w1)-1] + + if isinstance(w2, tuple): + # start, end + r1, r2 = w2[0][0], w2[0][0]**-1 + else: + r1, r2 = w2[0], w2[len(w1)-1] + + # p1 and p2 are w1 and w2 or, in case when + # w1 or w2 is an infinite family, a representative + p1, p2 = w1, w2 + if isinstance(w1, tuple): + p1 = w1[0]*w1[1]*w1[0]**-1 + if isinstance(w2, tuple): + p2 = w2[0]*w2[1]*w2[0]**-1 + + # add the product of the words to the list is necessary + if r1**-1 == s2 and not (p1*p2).is_identity: + new = _process(p1*p2) + if new not in gens: + gens.extend(new) + + if r2**-1 == s1 and not (p2*p1).is_identity: + new = _process(p2*p1) + if new not in gens: + gens.extend(new) + + self._min_words = gens + + min_words = self._min_words + + def _is_subword(w): + # check if w is a word in _min_words or one of + # the infinite families in it + w, r = w.cyclic_reduction(removed=True) + if r.is_identity or self.normal: + return w in min_words + else: + t = [s[1] for s in min_words if isinstance(s, tuple) + and s[0] == r] + return [s for s in t if w.power_of(s)] != [] + + # store the solution of words for which the result of + # _word_break (below) is known + known = {} + + def _word_break(w): + # check if w can be written as a product of words + # in min_words + if len(w) == 0: + return True + i = 0 + while i < len(w): + i += 1 + prefix = w.subword(0, i) + if not _is_subword(prefix): + continue + rest = w.subword(i, len(w)) + if rest not in known: + known[rest] = _word_break(rest) + if known[rest]: + return True + return False + + if self.normal: + g = g.cyclic_reduction() + return _word_break(g) + else: + if self.C is None: + C = self.parent.coset_enumeration(self.generators) + self.C = C + i = 0 + C = self.C + for j in range(len(g)): + i = C.table[i][C.A_dict[g[j]]] + return i == 0 + + def order(self): + if not self.generators: + return S.One + if isinstance(self.parent, FreeGroup): + return S.Infinity + if self.C is None: + C = self.parent.coset_enumeration(self.generators) + self.C = C + # This is valid because `len(self.C.table)` (the index of the subgroup) + # will always be finite - otherwise coset enumeration doesn't terminate + return self.parent.order()/len(self.C.table) + + def to_FpGroup(self): + if isinstance(self.parent, FreeGroup): + gen_syms = [('x_%d'%i) for i in range(len(self.generators))] + return free_group(', '.join(gen_syms))[0] + return self.parent.subgroup(C=self.C) + + def __str__(self): + if len(self.generators) > 30: + str_form = "" % len(self.generators) + else: + str_form = "" % str(self.generators) + return str_form + + __repr__ = __str__ + + +############################################################################### +# LOW INDEX SUBGROUPS # +############################################################################### + +def low_index_subgroups(G, N, Y=()): + """ + Implements the Low Index Subgroups algorithm, i.e find all subgroups of + ``G`` upto a given index ``N``. This implements the method described in + [Sim94]. This procedure involves a backtrack search over incomplete Coset + Tables, rather than over forced coincidences. + + Parameters + ========== + + G: An FpGroup < X|R > + N: positive integer, representing the maximum index value for subgroups + Y: (an optional argument) specifying a list of subgroup generators, such + that each of the resulting subgroup contains the subgroup generated by Y. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**2, y**3, (x*y)**4]) + >>> L = low_index_subgroups(f, 4) + >>> for coset_table in L: + ... print(coset_table.table) + [[0, 0, 0, 0]] + [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]] + [[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]] + [[1, 1, 0, 0], [0, 0, 1, 1]] + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + Section 5.4 + + .. [2] Marston Conder and Peter Dobcsanyi + "Applications and Adaptions of the Low Index Subgroups Procedure" + + """ + C = CosetTable(G, []) + R = G.relators + # length chosen for the length of the short relators + len_short_rel = 5 + # elements of R2 only checked at the last step for complete + # coset tables + R2 = {rel for rel in R if len(rel) > len_short_rel} + # elements of R1 are used in inner parts of the process to prune + # branches of the search tree, + R1 = {rel.identity_cyclic_reduction() for rel in set(R) - R2} + R1_c_list = C.conjugates(R1) + S = [] + descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y) + return S + + +def descendant_subgroups(S, C, R1_c_list, x, R2, N, Y): + A_dict = C.A_dict + A_dict_inv = C.A_dict_inv + if C.is_complete(): + # if C is complete then it only needs to test + # whether the relators in R2 are satisfied + for w, alpha in product(R2, C.omega): + if not C.scan_check(alpha, w): + return + # relators in R2 are satisfied, append the table to list + S.append(C) + else: + # find the first undefined entry in Coset Table + for alpha, x in product(range(len(C.table)), C.A): + if C.table[alpha][A_dict[x]] is None: + # this is "x" in pseudo-code (using "y" makes it clear) + undefined_coset, undefined_gen = alpha, x + break + # for filling up the undefine entry we try all possible values + # of beta in Omega or beta = n where beta^(undefined_gen^-1) is undefined + reach = C.omega + [C.n] + for beta in reach: + if beta < N: + if beta == C.n or C.table[beta][A_dict_inv[undefined_gen]] is None: + try_descendant(S, C, R1_c_list, R2, N, undefined_coset, \ + undefined_gen, beta, Y) + + +def try_descendant(S, C, R1_c_list, R2, N, alpha, x, beta, Y): + r""" + Solves the problem of trying out each individual possibility + for `\alpha^x. + + """ + D = C.copy() + if beta == D.n and beta < N: + D.table.append([None]*len(D.A)) + D.p.append(beta) + D.table[alpha][D.A_dict[x]] = beta + D.table[beta][D.A_dict_inv[x]] = alpha + D.deduction_stack.append((alpha, x)) + if not D.process_deductions_check(R1_c_list[D.A_dict[x]], \ + R1_c_list[D.A_dict_inv[x]]): + return + for w in Y: + if not D.scan_check(0, w): + return + if first_in_class(D, Y): + descendant_subgroups(S, D, R1_c_list, x, R2, N, Y) + + +def first_in_class(C, Y=()): + """ + Checks whether the subgroup ``H=G1`` corresponding to the Coset Table + could possibly be the canonical representative of its conjugacy class. + + Parameters + ========== + + C: CosetTable + + Returns + ======= + + bool: True/False + + If this returns False, then no descendant of C can have that property, and + so we can abandon C. If it returns True, then we need to process further + the node of the search tree corresponding to C, and so we call + ``descendant_subgroups`` recursively on C. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, first_in_class + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**2, y**3, (x*y)**4]) + >>> C = CosetTable(f, []) + >>> C.table = [[0, 0, None, None]] + >>> first_in_class(C) + True + >>> C.table = [[1, 1, 1, None], [0, 0, None, 1]]; C.p = [0, 1] + >>> first_in_class(C) + True + >>> C.table = [[1, 1, 2, 1], [0, 0, 0, None], [None, None, None, 0]] + >>> C.p = [0, 1, 2] + >>> first_in_class(C) + False + >>> C.table = [[1, 1, 1, 2], [0, 0, 2, 0], [2, None, 0, 1]] + >>> first_in_class(C) + False + + # TODO:: Sims points out in [Sim94] that performance can be improved by + # remembering some of the information computed by ``first_in_class``. If + # the ``continue alpha`` statement is executed at line 14, then the same thing + # will happen for that value of alpha in any descendant of the table C, and so + # the values the values of alpha for which this occurs could profitably be + # stored and passed through to the descendants of C. Of course this would + # make the code more complicated. + + # The code below is taken directly from the function on page 208 of [Sim94] + # nu[alpha] + + """ + n = C.n + # lamda is the largest numbered point in Omega_c_alpha which is currently defined + lamda = -1 + # for alpha in Omega_c, nu[alpha] is the point in Omega_c_alpha corresponding to alpha + nu = [None]*n + # for alpha in Omega_c_alpha, mu[alpha] is the point in Omega_c corresponding to alpha + mu = [None]*n + # mutually nu and mu are the mutually-inverse equivalence maps between + # Omega_c_alpha and Omega_c + next_alpha = False + # For each 0!=alpha in [0 .. nc-1], we start by constructing the equivalent + # standardized coset table C_alpha corresponding to H_alpha + for alpha in range(1, n): + # reset nu to "None" after previous value of alpha + for beta in range(lamda+1): + nu[mu[beta]] = None + # we only want to reject our current table in favour of a preceding + # table in the ordering in which 1 is replaced by alpha, if the subgroup + # G_alpha corresponding to this preceding table definitely contains the + # given subgroup + for w in Y: + # TODO: this should support input of a list of general words + # not just the words which are in "A" (i.e gen and gen^-1) + if C.table[alpha][C.A_dict[w]] != alpha: + # continue with alpha + next_alpha = True + break + if next_alpha: + next_alpha = False + continue + # try alpha as the new point 0 in Omega_C_alpha + mu[0] = alpha + nu[alpha] = 0 + # compare corresponding entries in C and C_alpha + lamda = 0 + for beta in range(n): + for x in C.A: + gamma = C.table[beta][C.A_dict[x]] + delta = C.table[mu[beta]][C.A_dict[x]] + # if either of the entries is undefined, + # we move with next alpha + if gamma is None or delta is None: + # continue with alpha + next_alpha = True + break + if nu[delta] is None: + # delta becomes the next point in Omega_C_alpha + lamda += 1 + nu[delta] = lamda + mu[lamda] = delta + if nu[delta] < gamma: + return False + if nu[delta] > gamma: + # continue with alpha + next_alpha = True + break + if next_alpha: + next_alpha = False + break + return True + +#======================================================================== +# Simplifying Presentation +#======================================================================== + +def simplify_presentation(*args, change_gens=False): + ''' + For an instance of `FpGroup`, return a simplified isomorphic copy of + the group (e.g. remove redundant generators or relators). Alternatively, + a list of generators and relators can be passed in which case the + simplified lists will be returned. + + By default, the generators of the group are unchanged. If you would + like to remove redundant generators, set the keyword argument + `change_gens = True`. + + ''' + if len(args) == 1: + if not isinstance(args[0], FpGroup): + raise TypeError("The argument must be an instance of FpGroup") + G = args[0] + gens, rels = simplify_presentation(G.generators, G.relators, + change_gens=change_gens) + if gens: + return FpGroup(gens[0].group, rels) + return FpGroup(FreeGroup([]), []) + elif len(args) == 2: + gens, rels = args[0][:], args[1][:] + if not gens: + return gens, rels + identity = gens[0].group.identity + else: + if len(args) == 0: + m = "Not enough arguments" + else: + m = "Too many arguments" + raise RuntimeError(m) + + prev_gens = [] + prev_rels = [] + while not set(prev_rels) == set(rels): + prev_rels = rels + while change_gens and not set(prev_gens) == set(gens): + prev_gens = gens + gens, rels = elimination_technique_1(gens, rels, identity) + rels = _simplify_relators(rels, identity) + + if change_gens: + syms = [g.array_form[0][0] for g in gens] + F = free_group(syms)[0] + identity = F.identity + gens = F.generators + subs = dict(zip(syms, gens)) + for j, r in enumerate(rels): + a = r.array_form + rel = identity + for sym, p in a: + rel = rel*subs[sym]**p + rels[j] = rel + return gens, rels + +def _simplify_relators(rels, identity): + """Relies upon ``_simplification_technique_1`` for its functioning. """ + rels = rels[:] + + rels = list(set(_simplification_technique_1(rels))) + rels.sort() + rels = [r.identity_cyclic_reduction() for r in rels] + try: + rels.remove(identity) + except ValueError: + pass + return rels + +# Pg 350, section 2.5.1 from [2] +def elimination_technique_1(gens, rels, identity): + rels = rels[:] + # the shorter relators are examined first so that generators selected for + # elimination will have shorter strings as equivalent + rels.sort() + gens = gens[:] + redundant_gens = {} + redundant_rels = [] + used_gens = set() + # examine each relator in relator list for any generator occurring exactly + # once + for rel in rels: + # don't look for a redundant generator in a relator which + # depends on previously found ones + contained_gens = rel.contains_generators() + if any(g in contained_gens for g in redundant_gens): + continue + contained_gens = list(contained_gens) + contained_gens.sort(reverse = True) + for gen in contained_gens: + if rel.generator_count(gen) == 1 and gen not in used_gens: + k = rel.exponent_sum(gen) + gen_index = rel.index(gen**k) + bk = rel.subword(gen_index + 1, len(rel)) + fw = rel.subword(0, gen_index) + chi = bk*fw + redundant_gens[gen] = chi**(-1*k) + used_gens.update(chi.contains_generators()) + redundant_rels.append(rel) + break + rels = [r for r in rels if r not in redundant_rels] + # eliminate the redundant generators from remaining relators + rels = [r.eliminate_words(redundant_gens, _all = True).identity_cyclic_reduction() for r in rels] + rels = list(set(rels)) + try: + rels.remove(identity) + except ValueError: + pass + gens = [g for g in gens if g not in redundant_gens] + return gens, rels + +def _simplification_technique_1(rels): + """ + All relators are checked to see if they are of the form `gen^n`. If any + such relators are found then all other relators are processed for strings + in the `gen` known order. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import _simplification_technique_1 + >>> F, x, y = free_group("x, y") + >>> w1 = [x**2*y**4, x**3] + >>> _simplification_technique_1(w1) + [x**-1*y**4, x**3] + + >>> w2 = [x**2*y**-4*x**5, x**3, x**2*y**8, y**5] + >>> _simplification_technique_1(w2) + [x**-1*y*x**-1, x**3, x**-1*y**-2, y**5] + + >>> w3 = [x**6*y**4, x**4] + >>> _simplification_technique_1(w3) + [x**2*y**4, x**4] + + """ + rels = rels[:] + # dictionary with "gen: n" where gen^n is one of the relators + exps = {} + for i in range(len(rels)): + rel = rels[i] + if rel.number_syllables() == 1: + g = rel[0] + exp = abs(rel.array_form[0][1]) + if rel.array_form[0][1] < 0: + rels[i] = rels[i]**-1 + g = g**-1 + if g in exps: + exp = gcd(exp, exps[g].array_form[0][1]) + exps[g] = g**exp + + one_syllables_words = exps.values() + # decrease some of the exponents in relators, making use of the single + # syllable relators + for i in range(len(rels)): + rel = rels[i] + if rel in one_syllables_words: + continue + rel = rel.eliminate_words(one_syllables_words, _all = True) + # if rels[i] contains g**n where abs(n) is greater than half of the power p + # of g in exps, g**n can be replaced by g**(n-p) (or g**(p-n) if n<0) + for g in rel.contains_generators(): + if g in exps: + exp = exps[g].array_form[0][1] + max_exp = (exp + 1)//2 + rel = rel.eliminate_word(g**(max_exp), g**(max_exp-exp), _all = True) + rel = rel.eliminate_word(g**(-max_exp), g**(-(max_exp-exp)), _all = True) + rels[i] = rel + rels = [r.identity_cyclic_reduction() for r in rels] + return rels + + +############################################################################### +# SUBGROUP PRESENTATIONS # +############################################################################### + +# Pg 175 [1] +def define_schreier_generators(C, homomorphism=False): + ''' + Parameters + ========== + + C -- Coset table. + homomorphism -- When set to True, return a dictionary containing the images + of the presentation generators in the original group. + ''' + y = [] + gamma = 1 + f = C.fp_group + X = f.generators + if homomorphism: + # `_gens` stores the elements of the parent group to + # to which the schreier generators correspond to. + _gens = {} + # compute the schreier Traversal + tau = {} + tau[0] = f.identity + C.P = [[None]*len(C.A) for i in range(C.n)] + for alpha, x in product(C.omega, C.A): + beta = C.table[alpha][C.A_dict[x]] + if beta == gamma: + C.P[alpha][C.A_dict[x]] = "" + C.P[beta][C.A_dict_inv[x]] = "" + gamma += 1 + if homomorphism: + tau[beta] = tau[alpha]*x + elif x in X and C.P[alpha][C.A_dict[x]] is None: + y_alpha_x = '%s_%s' % (x, alpha) + y.append(y_alpha_x) + C.P[alpha][C.A_dict[x]] = y_alpha_x + if homomorphism: + _gens[y_alpha_x] = tau[alpha]*x*tau[beta]**-1 + grp_gens = list(free_group(', '.join(y))) + C._schreier_free_group = grp_gens.pop(0) + C._schreier_generators = grp_gens + if homomorphism: + C._schreier_gen_elem = _gens + # replace all elements of P by, free group elements + for i, j in product(range(len(C.P)), range(len(C.A))): + # if equals "", replace by identity element + if C.P[i][j] == "": + C.P[i][j] = C._schreier_free_group.identity + elif isinstance(C.P[i][j], str): + r = C._schreier_generators[y.index(C.P[i][j])] + C.P[i][j] = r + beta = C.table[i][j] + C.P[beta][j + 1] = r**-1 + +def reidemeister_relators(C): + R = C.fp_group.relators + rels = [rewrite(C, coset, word) for word in R for coset in range(C.n)] + order_1_gens = {i for i in rels if len(i) == 1} + + # remove all the order 1 generators from relators + rels = list(filter(lambda rel: rel not in order_1_gens, rels)) + + # replace order 1 generators by identity element in reidemeister relators + for i in range(len(rels)): + w = rels[i] + w = w.eliminate_words(order_1_gens, _all=True) + rels[i] = w + + C._schreier_generators = [i for i in C._schreier_generators + if not (i in order_1_gens or i**-1 in order_1_gens)] + + # Tietze transformation 1 i.e TT_1 + # remove cyclic conjugate elements from relators + i = 0 + while i < len(rels): + w = rels[i] + j = i + 1 + while j < len(rels): + if w.is_cyclic_conjugate(rels[j]): + del rels[j] + else: + j += 1 + i += 1 + + C._reidemeister_relators = rels + + +def rewrite(C, alpha, w): + """ + Parameters + ========== + + C: CosetTable + alpha: A live coset + w: A word in `A*` + + Returns + ======= + + rho(tau(alpha), w) + + Examples + ======== + + >>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, define_schreier_generators, rewrite + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**2, y**3, (x*y)**6]) + >>> C = CosetTable(f, []) + >>> C.table = [[1, 1, 2, 3], [0, 0, 4, 5], [4, 4, 3, 0], [5, 5, 0, 2], [2, 2, 5, 1], [3, 3, 1, 4]] + >>> C.p = [0, 1, 2, 3, 4, 5] + >>> define_schreier_generators(C) + >>> rewrite(C, 0, (x*y)**6) + x_4*y_2*x_3*x_1*x_2*y_4*x_5 + + """ + v = C._schreier_free_group.identity + for i in range(len(w)): + x_i = w[i] + v = v*C.P[alpha][C.A_dict[x_i]] + alpha = C.table[alpha][C.A_dict[x_i]] + return v + +# Pg 350, section 2.5.2 from [2] +def elimination_technique_2(C): + """ + This technique eliminates one generator at a time. Heuristically this + seems superior in that we may select for elimination the generator with + shortest equivalent string at each stage. + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r, \ + reidemeister_relators, define_schreier_generators, elimination_technique_2 + >>> F, x, y = free_group("x, y") + >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]); H = [x*y, x**-1*y**-1*x*y*x] + >>> C = coset_enumeration_r(f, H) + >>> C.compress(); C.standardize() + >>> define_schreier_generators(C) + >>> reidemeister_relators(C) + >>> elimination_technique_2(C) + ([y_1, y_2], [y_2**-3, y_2*y_1*y_2*y_1*y_2*y_1, y_1**2]) + + """ + rels = C._reidemeister_relators + rels.sort(reverse=True) + gens = C._schreier_generators + for i in range(len(gens) - 1, -1, -1): + rel = rels[i] + for j in range(len(gens) - 1, -1, -1): + gen = gens[j] + if rel.generator_count(gen) == 1: + k = rel.exponent_sum(gen) + gen_index = rel.index(gen**k) + bk = rel.subword(gen_index + 1, len(rel)) + fw = rel.subword(0, gen_index) + rep_by = (bk*fw)**(-1*k) + del rels[i]; del gens[j] + for l in range(len(rels)): + rels[l] = rels[l].eliminate_word(gen, rep_by) + break + C._reidemeister_relators = rels + C._schreier_generators = gens + return C._schreier_generators, C._reidemeister_relators + +def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False): + """ + Parameters + ========== + + fp_group: A finitely presented group, an instance of FpGroup + H: A subgroup whose presentation is to be found, given as a list + of words in generators of `fp_grp` + homomorphism: When set to True, return a homomorphism from the subgroup + to the parent group + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation + >>> F, x, y = free_group("x, y") + + Example 5.6 Pg. 177 from [1] + >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]) + >>> H = [x*y, x**-1*y**-1*x*y*x] + >>> reidemeister_presentation(f, H) + ((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1)) + + Example 5.8 Pg. 183 from [1] + >>> f = FpGroup(F, [x**3, y**3, (x*y)**3]) + >>> H = [x*y, x*y**-1] + >>> reidemeister_presentation(f, H) + ((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0)) + + Exercises Q2. Pg 187 from [1] + >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) + >>> H = [x] + >>> reidemeister_presentation(f, H) + ((x_0,), (x_0**4,)) + + Example 5.9 Pg. 183 from [1] + >>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2]) + >>> H = [x] + >>> reidemeister_presentation(f, H) + ((x_0,), (x_0**6,)) + + """ + if not C: + C = coset_enumeration_r(fp_grp, H) + C.compress(); C.standardize() + define_schreier_generators(C, homomorphism=homomorphism) + reidemeister_relators(C) + gens, rels = C._schreier_generators, C._reidemeister_relators + gens, rels = simplify_presentation(gens, rels, change_gens=True) + + C.schreier_generators = tuple(gens) + C.reidemeister_relators = tuple(rels) + + if homomorphism: + _gens = [] + for gen in gens: + _gens.append(C._schreier_gen_elem[str(gen)]) + return C.schreier_generators, C.reidemeister_relators, _gens + + return C.schreier_generators, C.reidemeister_relators + + +FpGroupElement = FreeGroupElement diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/free_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/free_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..9c24abdc480c7803331435ba7453c4e0848ddb07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/free_groups.py @@ -0,0 +1,1354 @@ +from __future__ import annotations + +from sympy.core import S +from sympy.core.expr import Expr +from sympy.core.symbol import Symbol, symbols as _symbols +from sympy.core.sympify import CantSympify +from sympy.printing.defaults import DefaultPrinting +from sympy.utilities import public +from sympy.utilities.iterables import flatten, is_sequence +from sympy.utilities.magic import pollute +from sympy.utilities.misc import as_int + + +@public +def free_group(symbols): + """Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``. + + Parameters + ========== + + symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y, z = free_group("x, y, z") + >>> F + + >>> x**2*y**-1 + x**2*y**-1 + >>> type(_) + + + """ + _free_group = FreeGroup(symbols) + return (_free_group,) + tuple(_free_group.generators) + +@public +def xfree_group(symbols): + """Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``. + + Parameters + ========== + + symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) + + Examples + ======== + + >>> from sympy.combinatorics.free_groups import xfree_group + >>> F, (x, y, z) = xfree_group("x, y, z") + >>> F + + >>> y**2*x**-2*z**-1 + y**2*x**-2*z**-1 + >>> type(_) + + + """ + _free_group = FreeGroup(symbols) + return (_free_group, _free_group.generators) + +@public +def vfree_group(symbols): + """Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols + into the global namespace. + + Parameters + ========== + + symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) + + Examples + ======== + + >>> from sympy.combinatorics.free_groups import vfree_group + >>> vfree_group("x, y, z") + + >>> x**2*y**-2*z # noqa: F821 + x**2*y**-2*z + >>> type(_) + + + """ + _free_group = FreeGroup(symbols) + pollute([sym.name for sym in _free_group.symbols], _free_group.generators) + return _free_group + + +def _parse_symbols(symbols): + if not symbols: + return () + if isinstance(symbols, str): + return _symbols(symbols, seq=True) + elif isinstance(symbols, (Expr, FreeGroupElement)): + return (symbols,) + elif is_sequence(symbols): + if all(isinstance(s, str) for s in symbols): + return _symbols(symbols) + elif all(isinstance(s, Expr) for s in symbols): + return symbols + raise ValueError("The type of `symbols` must be one of the following: " + "a str, Symbol/Expr or a sequence of " + "one of these types") + + +############################################################################## +# FREE GROUP # +############################################################################## + +_free_group_cache: dict[int, FreeGroup] = {} + +class FreeGroup(DefaultPrinting): + """ + Free group with finite or infinite number of generators. Its input API + is that of a str, Symbol/Expr or a sequence of one of + these types (which may be empty) + + See Also + ======== + + sympy.polys.rings.PolyRing + + References + ========== + + .. [1] https://www.gap-system.org/Manuals/doc/ref/chap37.html + + .. [2] https://en.wikipedia.org/wiki/Free_group + + """ + is_associative = True + is_group = True + is_FreeGroup = True + is_PermutationGroup = False + relators: list[Expr] = [] + + def __new__(cls, symbols): + symbols = tuple(_parse_symbols(symbols)) + rank = len(symbols) + _hash = hash((cls.__name__, symbols, rank)) + obj = _free_group_cache.get(_hash) + + if obj is None: + obj = object.__new__(cls) + obj._hash = _hash + obj._rank = rank + # dtype method is used to create new instances of FreeGroupElement + obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj}) + obj.symbols = symbols + obj.generators = obj._generators() + obj._gens_set = set(obj.generators) + for symbol, generator in zip(obj.symbols, obj.generators): + if isinstance(symbol, Symbol): + name = symbol.name + if hasattr(obj, name): + setattr(obj, name, generator) + + _free_group_cache[_hash] = obj + + return obj + + def _generators(group): + """Returns the generators of the FreeGroup. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y, z = free_group("x, y, z") + >>> F.generators + (x, y, z) + + """ + gens = [] + for sym in group.symbols: + elm = ((sym, 1),) + gens.append(group.dtype(elm)) + return tuple(gens) + + def clone(self, symbols=None): + return self.__class__(symbols or self.symbols) + + def __contains__(self, i): + """Return True if ``i`` is contained in FreeGroup.""" + if not isinstance(i, FreeGroupElement): + return False + group = i.group + return self == group + + def __hash__(self): + return self._hash + + def __len__(self): + return self.rank + + def __str__(self): + if self.rank > 30: + str_form = "" % self.rank + else: + str_form = "" + return str_form + + __repr__ = __str__ + + def __getitem__(self, index): + symbols = self.symbols[index] + return self.clone(symbols=symbols) + + def __eq__(self, other): + """No ``FreeGroup`` is equal to any "other" ``FreeGroup``. + """ + return self is other + + def index(self, gen): + """Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> F.index(y) + 1 + >>> F.index(x) + 0 + + """ + if isinstance(gen, self.dtype): + return self.generators.index(gen) + else: + raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen)) + + def order(self): + """Return the order of the free group. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> F.order() + oo + + >>> free_group("")[0].order() + 1 + + """ + if self.rank == 0: + return S.One + else: + return S.Infinity + + @property + def elements(self): + """ + Return the elements of the free group. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> (z,) = free_group("") + >>> z.elements + {} + + """ + if self.rank == 0: + # A set containing Identity element of `FreeGroup` self is returned + return {self.identity} + else: + raise ValueError("Group contains infinitely many elements" + ", hence cannot be represented") + + @property + def rank(self): + r""" + In group theory, the `rank` of a group `G`, denoted `G.rank`, + can refer to the smallest cardinality of a generating set + for G, that is + + \operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \left\langle X\right\rangle =G\}. + + """ + return self._rank + + @property + def is_abelian(self): + """Returns if the group is Abelian. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> f.is_abelian + False + + """ + return self.rank in (0, 1) + + @property + def identity(self): + """Returns the identity element of free group.""" + return self.dtype() + + def contains(self, g): + """Tests if Free Group element ``g`` belong to self, ``G``. + + In mathematical terms any linear combination of generators + of a Free Group is contained in it. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> f.contains(x**3*y**2) + True + + """ + if not isinstance(g, FreeGroupElement): + return False + elif self != g.group: + return False + else: + return True + + def center(self): + """Returns the center of the free group `self`.""" + return {self.identity} + + +############################################################################ +# FreeGroupElement # +############################################################################ + + +class FreeGroupElement(CantSympify, DefaultPrinting, tuple): + """Used to create elements of FreeGroup. It cannot be used directly to + create a free group element. It is called by the `dtype` method of the + `FreeGroup` class. + + """ + is_assoc_word = True + + def new(self, init): + return self.__class__(init) + + _hash = None + + def __hash__(self): + _hash = self._hash + if _hash is None: + self._hash = _hash = hash((self.group, frozenset(tuple(self)))) + return _hash + + def copy(self): + return self.new(self) + + @property + def is_identity(self): + if self.array_form == (): + return True + else: + return False + + @property + def array_form(self): + """ + SymPy provides two different internal kinds of representation + of associative words. The first one is called the `array_form` + which is a tuple containing `tuples` as its elements, where the + size of each tuple is two. At the first position the tuple + contains the `symbol-generator`, while at the second position + of tuple contains the exponent of that generator at the position. + Since elements (i.e. words) do not commute, the indexing of tuple + makes that property to stay. + + The structure in ``array_form`` of ``FreeGroupElement`` is of form: + + ``( ( symbol_of_gen, exponent ), ( , ), ... ( , ) )`` + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> (x*z).array_form + ((x, 1), (z, 1)) + >>> (x**2*z*y*x**2).array_form + ((x, 2), (z, 1), (y, 1), (x, 2)) + + See Also + ======== + + letter_repr + + """ + return tuple(self) + + @property + def letter_form(self): + """ + The letter representation of a ``FreeGroupElement`` is a tuple + of generator symbols, with each entry corresponding to a group + generator. Inverses of the generators are represented by + negative generator symbols. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b, c, d = free_group("a b c d") + >>> (a**3).letter_form + (a, a, a) + >>> (a**2*d**-2*a*b**-4).letter_form + (a, a, -d, -d, a, -b, -b, -b, -b) + >>> (a**-2*b**3*d).letter_form + (-a, -a, b, b, b, d) + + See Also + ======== + + array_form + + """ + return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) + for i, j in self.array_form])) + + def __getitem__(self, i): + group = self.group + r = self.letter_form[i] + if r.is_Symbol: + return group.dtype(((r, 1),)) + else: + return group.dtype(((-r, -1),)) + + def index(self, gen): + if len(gen) != 1: + raise ValueError() + return (self.letter_form).index(gen.letter_form[0]) + + @property + def letter_form_elm(self): + """ + """ + group = self.group + r = self.letter_form + return [group.dtype(((elm,1),)) if elm.is_Symbol \ + else group.dtype(((-elm,-1),)) for elm in r] + + @property + def ext_rep(self): + """This is called the External Representation of ``FreeGroupElement`` + """ + return tuple(flatten(self.array_form)) + + def __contains__(self, gen): + return gen.array_form[0][0] in tuple([r[0] for r in self.array_form]) + + def __str__(self): + if self.is_identity: + return "" + + str_form = "" + array_form = self.array_form + for i in range(len(array_form)): + if i == len(array_form) - 1: + if array_form[i][1] == 1: + str_form += str(array_form[i][0]) + else: + str_form += str(array_form[i][0]) + \ + "**" + str(array_form[i][1]) + else: + if array_form[i][1] == 1: + str_form += str(array_form[i][0]) + "*" + else: + str_form += str(array_form[i][0]) + \ + "**" + str(array_form[i][1]) + "*" + return str_form + + __repr__ = __str__ + + def __pow__(self, n): + n = as_int(n) + group = self.group + if n == 0: + return group.identity + + if n < 0: + n = -n + return (self.inverse())**n + + result = self + for i in range(n - 1): + result = result*self + # this method can be improved instead of just returning the + # multiplication of elements + return result + + def __mul__(self, other): + """Returns the product of elements belonging to the same ``FreeGroup``. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> x*y**2*y**-4 + x*y**-2 + >>> z*y**-2 + z*y**-2 + >>> x**2*y*y**-1*x**-2 + + + """ + group = self.group + if not isinstance(other, group.dtype): + raise TypeError("only FreeGroup elements of same FreeGroup can " + "be multiplied") + if self.is_identity: + return other + if other.is_identity: + return self + r = list(self.array_form + other.array_form) + zero_mul_simp(r, len(self.array_form) - 1) + return group.dtype(tuple(r)) + + def __truediv__(self, other): + group = self.group + if not isinstance(other, group.dtype): + raise TypeError("only FreeGroup elements of same FreeGroup can " + "be multiplied") + return self*(other.inverse()) + + def __rtruediv__(self, other): + group = self.group + if not isinstance(other, group.dtype): + raise TypeError("only FreeGroup elements of same FreeGroup can " + "be multiplied") + return other*(self.inverse()) + + def __add__(self, other): + return NotImplemented + + def inverse(self): + """ + Returns the inverse of a ``FreeGroupElement`` element + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> x.inverse() + x**-1 + >>> (x*y).inverse() + y**-1*x**-1 + + """ + group = self.group + r = tuple([(i, -j) for i, j in self.array_form[::-1]]) + return group.dtype(r) + + def order(self): + """Find the order of a ``FreeGroupElement``. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y = free_group("x y") + >>> (x**2*y*y**-1*x**-2).order() + 1 + + """ + if self.is_identity: + return S.One + else: + return S.Infinity + + def commutator(self, other): + """ + Return the commutator of `self` and `x`: ``~x*~self*x*self`` + + """ + group = self.group + if not isinstance(other, group.dtype): + raise ValueError("commutator of only FreeGroupElement of the same " + "FreeGroup exists") + else: + return self.inverse()*other.inverse()*self*other + + def eliminate_words(self, words, _all=False, inverse=True): + ''' + Replace each subword from the dictionary `words` by words[subword]. + If words is a list, replace the words by the identity. + + ''' + again = True + new = self + if isinstance(words, dict): + while again: + again = False + for sub in words: + prev = new + new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse) + if new != prev: + again = True + else: + while again: + again = False + for sub in words: + prev = new + new = new.eliminate_word(sub, _all=_all, inverse=inverse) + if new != prev: + again = True + return new + + def eliminate_word(self, gen, by=None, _all=False, inverse=True): + """ + For an associative word `self`, a subword `gen`, and an associative + word `by` (identity by default), return the associative word obtained by + replacing each occurrence of `gen` in `self` by `by`. If `_all = True`, + the occurrences of `gen` that may appear after the first substitution will + also be replaced and so on until no occurrences are found. This might not + always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`). + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y = free_group("x y") + >>> w = x**5*y*x**2*y**-4*x + >>> w.eliminate_word( x, x**2 ) + x**10*y*x**4*y**-4*x**2 + >>> w.eliminate_word( x, y**-1 ) + y**-11 + >>> w.eliminate_word(x**5) + y*x**2*y**-4*x + >>> w.eliminate_word(x*y, y) + x**4*y*x**2*y**-4*x + + See Also + ======== + substituted_word + + """ + if by is None: + by = self.group.identity + if self.is_independent(gen) or gen == by: + return self + if gen == self: + return by + if gen**-1 == by: + _all = False + word = self + l = len(gen) + + try: + i = word.subword_index(gen) + k = 1 + except ValueError: + if not inverse: + return word + try: + i = word.subword_index(gen**-1) + k = -1 + except ValueError: + return word + + word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by) + + if _all: + return word.eliminate_word(gen, by, _all=True, inverse=inverse) + else: + return word + + def __len__(self): + """ + For an associative word `self`, returns the number of letters in it. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> w = a**5*b*a**2*b**-4*a + >>> len(w) + 13 + >>> len(a**17) + 17 + >>> len(w**0) + 0 + + """ + return sum(abs(j) for (i, j) in self) + + def __eq__(self, other): + """ + Two associative words are equal if they are words over the + same alphabet and if they are sequences of the same letters. + This is equivalent to saying that the external representations + of the words are equal. + There is no "universal" empty word, every alphabet has its own + empty word. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1") + >>> f + + >>> g, swap0, swap1 = free_group("swap0 swap1") + >>> g + + + >>> swapnil0 == swapnil1 + False + >>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1 + True + >>> swapnil0*swapnil1 == swapnil1*swapnil0 + False + >>> swapnil1**0 == swap0**0 + False + + """ + group = self.group + if not isinstance(other, group.dtype): + return False + return tuple.__eq__(self, other) + + def __lt__(self, other): + """ + The ordering of associative words is defined by length and + lexicography (this ordering is called short-lex ordering), that + is, shorter words are smaller than longer words, and words of the + same length are compared w.r.t. the lexicographical ordering induced + by the ordering of generators. Generators are sorted according + to the order in which they were created. If the generators are + invertible then each generator `g` is larger than its inverse `g^{-1}`, + and `g^{-1}` is larger than every generator that is smaller than `g`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> b < a + False + >>> a < a.inverse() + False + + """ + group = self.group + if not isinstance(other, group.dtype): + raise TypeError("only FreeGroup elements of same FreeGroup can " + "be compared") + l = len(self) + m = len(other) + # implement lenlex order + if l < m: + return True + elif l > m: + return False + for i in range(l): + a = self[i].array_form[0] + b = other[i].array_form[0] + p = group.symbols.index(a[0]) + q = group.symbols.index(b[0]) + if p < q: + return True + elif p > q: + return False + elif a[1] < b[1]: + return True + elif a[1] > b[1]: + return False + return False + + def __le__(self, other): + return (self == other or self < other) + + def __gt__(self, other): + """ + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, x, y, z = free_group("x y z") + >>> y**2 > x**2 + True + >>> y*z > z*y + False + >>> x > x.inverse() + True + + """ + group = self.group + if not isinstance(other, group.dtype): + raise TypeError("only FreeGroup elements of same FreeGroup can " + "be compared") + return not self <= other + + def __ge__(self, other): + return not self < other + + def exponent_sum(self, gen): + """ + For an associative word `self` and a generator or inverse of generator + `gen`, ``exponent_sum`` returns the number of times `gen` appears in + `self` minus the number of times its inverse appears in `self`. If + neither `gen` nor its inverse occur in `self` then 0 is returned. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> w = x**2*y**3 + >>> w.exponent_sum(x) + 2 + >>> w.exponent_sum(x**-1) + -2 + >>> w = x**2*y**4*x**-3 + >>> w.exponent_sum(x) + -1 + + See Also + ======== + + generator_count + + """ + if len(gen) != 1: + raise ValueError("gen must be a generator or inverse of a generator") + s = gen.array_form[0] + return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]]) + + def generator_count(self, gen): + """ + For an associative word `self` and a generator `gen`, + ``generator_count`` returns the multiplicity of generator + `gen` in `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> w = x**2*y**3 + >>> w.generator_count(x) + 2 + >>> w = x**2*y**4*x**-3 + >>> w.generator_count(x) + 5 + + See Also + ======== + + exponent_sum + + """ + if len(gen) != 1 or gen.array_form[0][1] < 0: + raise ValueError("gen must be a generator") + s = gen.array_form[0] + return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]]) + + def subword(self, from_i, to_j, strict=True): + """ + For an associative word `self` and two positive integers `from_i` and + `to_j`, `subword` returns the subword of `self` that begins at position + `from_i` and ends at `to_j - 1`, indexing is done with origin 0. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> w = a**5*b*a**2*b**-4*a + >>> w.subword(2, 6) + a**3*b + + """ + group = self.group + if not strict: + from_i = max(from_i, 0) + to_j = min(len(self), to_j) + if from_i < 0 or to_j > len(self): + raise ValueError("`from_i`, `to_j` must be positive and no greater than " + "the length of associative word") + if to_j <= from_i: + return group.identity + else: + letter_form = self.letter_form[from_i: to_j] + array_form = letter_form_to_array_form(letter_form, group) + return group.dtype(array_form) + + def subword_index(self, word, start = 0): + ''' + Find the index of `word` in `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> w = a**2*b*a*b**3 + >>> w.subword_index(a*b*a*b) + 1 + + ''' + l = len(word) + self_lf = self.letter_form + word_lf = word.letter_form + index = None + for i in range(start,len(self_lf)-l+1): + if self_lf[i:i+l] == word_lf: + index = i + break + if index is not None: + return index + else: + raise ValueError("The given word is not a subword of self") + + def is_dependent(self, word): + """ + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> (x**4*y**-3).is_dependent(x**4*y**-2) + True + >>> (x**2*y**-1).is_dependent(x*y) + False + >>> (x*y**2*x*y**2).is_dependent(x*y**2) + True + >>> (x**12).is_dependent(x**-4) + True + + See Also + ======== + + is_independent + + """ + try: + return self.subword_index(word) is not None + except ValueError: + pass + try: + return self.subword_index(word**-1) is not None + except ValueError: + return False + + def is_independent(self, word): + """ + + See Also + ======== + + is_dependent + + """ + return not self.is_dependent(word) + + def contains_generators(self): + """ + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y, z = free_group("x, y, z") + >>> (x**2*y**-1).contains_generators() + {x, y} + >>> (x**3*z).contains_generators() + {x, z} + + """ + group = self.group + gens = set() + for syllable in self.array_form: + gens.add(group.dtype(((syllable[0], 1),))) + return set(gens) + + def cyclic_subword(self, from_i, to_j): + group = self.group + l = len(self) + letter_form = self.letter_form + period1 = int(from_i/l) + if from_i >= l: + from_i -= l*period1 + to_j -= l*period1 + diff = to_j - from_i + word = letter_form[from_i: to_j] + period2 = int(to_j/l) - 1 + word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2] + word = letter_form_to_array_form(word, group) + return group.dtype(word) + + def cyclic_conjugates(self): + """Returns a words which are cyclic to the word `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> w = x*y*x*y*x + >>> w.cyclic_conjugates() + {x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x} + >>> s = x*y*x**2*y*x + >>> s.cyclic_conjugates() + {x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x} + + References + ========== + + .. [1] https://planetmath.org/cyclicpermutation + + """ + return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))} + + def is_cyclic_conjugate(self, w): + """ + Checks whether words ``self``, ``w`` are cyclic conjugates. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> w1 = x**2*y**5 + >>> w2 = x*y**5*x + >>> w1.is_cyclic_conjugate(w2) + True + >>> w3 = x**-1*y**5*x**-1 + >>> w3.is_cyclic_conjugate(w2) + False + + """ + l1 = len(self) + l2 = len(w) + if l1 != l2: + return False + w1 = self.identity_cyclic_reduction() + w2 = w.identity_cyclic_reduction() + letter1 = w1.letter_form + letter2 = w2.letter_form + str1 = ' '.join(map(str, letter1)) + str2 = ' '.join(map(str, letter2)) + if len(str1) != len(str2): + return False + + return str1 in str2 + ' ' + str2 + + def number_syllables(self): + """Returns the number of syllables of the associative word `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1") + >>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables() + 3 + + """ + return len(self.array_form) + + def exponent_syllable(self, i): + """ + Returns the exponent of the `i`-th syllable of the associative word + `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> w = a**5*b*a**2*b**-4*a + >>> w.exponent_syllable( 2 ) + 2 + + """ + return self.array_form[i][1] + + def generator_syllable(self, i): + """ + Returns the symbol of the generator that is involved in the + i-th syllable of the associative word `self`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a b") + >>> w = a**5*b*a**2*b**-4*a + >>> w.generator_syllable( 3 ) + b + + """ + return self.array_form[i][0] + + def sub_syllables(self, from_i, to_j): + """ + `sub_syllables` returns the subword of the associative word `self` that + consists of syllables from positions `from_to` to `to_j`, where + `from_to` and `to_j` must be positive integers and indexing is done + with origin 0. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> f, a, b = free_group("a, b") + >>> w = a**5*b*a**2*b**-4*a + >>> w.sub_syllables(1, 2) + b + >>> w.sub_syllables(3, 3) + + + """ + if not isinstance(from_i, int) or not isinstance(to_j, int): + raise ValueError("both arguments should be integers") + group = self.group + if to_j <= from_i: + return group.identity + else: + r = tuple(self.array_form[from_i: to_j]) + return group.dtype(r) + + def substituted_word(self, from_i, to_j, by): + """ + Returns the associative word obtained by replacing the subword of + `self` that begins at position `from_i` and ends at position `to_j - 1` + by the associative word `by`. `from_i` and `to_j` must be positive + integers, indexing is done with origin 0. In other words, + `w.substituted_word(w, from_i, to_j, by)` is the product of the three + words: `w.subword(0, from_i)`, `by`, and + `w.subword(to_j len(w))`. + + See Also + ======== + + eliminate_word + + """ + lw = len(self) + if from_i >= to_j or from_i > lw or to_j > lw: + raise ValueError("values should be within bounds") + + # otherwise there are four possibilities + + # first if from=1 and to=lw then + if from_i == 0 and to_j == lw: + return by + elif from_i == 0: # second if from_i=1 (and to_j < lw) then + return by*self.subword(to_j, lw) + elif to_j == lw: # third if to_j=1 (and from_i > 1) then + return self.subword(0, from_i)*by + else: # finally + return self.subword(0, from_i)*by*self.subword(to_j, lw) + + def is_cyclically_reduced(self): + r"""Returns whether the word is cyclically reduced or not. + A word is cyclically reduced if by forming the cycle of the + word, the word is not reduced, i.e a word w = `a_1 ... a_n` + is called cyclically reduced if `a_1 \ne a_n^{-1}`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> (x**2*y**-1*x**-1).is_cyclically_reduced() + False + >>> (y*x**2*y**2).is_cyclically_reduced() + True + + """ + if not self: + return True + return self[0] != self[-1]**-1 + + def identity_cyclic_reduction(self): + """Return a unique cyclically reduced version of the word. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> (x**2*y**2*x**-1).identity_cyclic_reduction() + x*y**2 + >>> (x**-3*y**-1*x**5).identity_cyclic_reduction() + x**2*y**-1 + + References + ========== + + .. [1] https://planetmath.org/cyclicallyreduced + + """ + word = self.copy() + group = self.group + while not word.is_cyclically_reduced(): + exp1 = word.exponent_syllable(0) + exp2 = word.exponent_syllable(-1) + r = exp1 + exp2 + if r == 0: + rep = word.array_form[1: word.number_syllables() - 1] + else: + rep = ((word.generator_syllable(0), exp1 + exp2),) + \ + word.array_form[1: word.number_syllables() - 1] + word = group.dtype(rep) + return word + + def cyclic_reduction(self, removed=False): + """Return a cyclically reduced version of the word. Unlike + `identity_cyclic_reduction`, this will not cyclically permute + the reduced word - just remove the "unreduced" bits on either + side of it. Compare the examples with those of + `identity_cyclic_reduction`. + + When `removed` is `True`, return a tuple `(word, r)` where + self `r` is such that before the reduction the word was either + `r*word*r**-1`. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> (x**2*y**2*x**-1).cyclic_reduction() + x*y**2 + >>> (x**-3*y**-1*x**5).cyclic_reduction() + y**-1*x**2 + >>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True) + (y**-1*x**2, x**-3) + + """ + word = self.copy() + g = self.group.identity + while not word.is_cyclically_reduced(): + exp1 = abs(word.exponent_syllable(0)) + exp2 = abs(word.exponent_syllable(-1)) + exp = min(exp1, exp2) + start = word[0]**abs(exp) + end = word[-1]**abs(exp) + word = start**-1*word*end**-1 + g = g*start + if removed: + return word, g + return word + + def power_of(self, other): + ''' + Check if `self == other**n` for some integer n. + + Examples + ======== + + >>> from sympy.combinatorics import free_group + >>> F, x, y = free_group("x, y") + >>> ((x*y)**2).power_of(x*y) + True + >>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3) + True + + ''' + if self.is_identity: + return True + + l = len(other) + if l == 1: + # self has to be a power of one generator + gens = self.contains_generators() + s = other in gens or other**-1 in gens + return len(gens) == 1 and s + + # if self is not cyclically reduced and it is a power of other, + # other isn't cyclically reduced and the parts removed during + # their reduction must be equal + reduced, r1 = self.cyclic_reduction(removed=True) + if not r1.is_identity: + other, r2 = other.cyclic_reduction(removed=True) + if r1 == r2: + return reduced.power_of(other) + return False + + if len(self) < l or len(self) % l: + return False + + prefix = self.subword(0, l) + if prefix == other or prefix**-1 == other: + rest = self.subword(l, len(self)) + return rest.power_of(other) + return False + + +def letter_form_to_array_form(array_form, group): + """ + This method converts a list given with possible repetitions of elements in + it. It returns a new list such that repetitions of consecutive elements is + removed and replace with a tuple element of size two such that the first + index contains `value` and the second index contains the number of + consecutive repetitions of `value`. + + """ + a = list(array_form[:]) + new_array = [] + n = 1 + symbols = group.symbols + for i in range(len(a)): + if i == len(a) - 1: + if a[i] == a[i - 1]: + if (-a[i]) in symbols: + new_array.append((-a[i], -n)) + else: + new_array.append((a[i], n)) + else: + if (-a[i]) in symbols: + new_array.append((-a[i], -1)) + else: + new_array.append((a[i], 1)) + return new_array + elif a[i] == a[i + 1]: + n += 1 + else: + if (-a[i]) in symbols: + new_array.append((-a[i], -n)) + else: + new_array.append((a[i], n)) + n = 1 + + +def zero_mul_simp(l, index): + """Used to combine two reduced words.""" + while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]: + exp = l[index][1] + l[index + 1][1] + base = l[index][0] + l[index] = (base, exp) + del l[index + 1] + if l[index][1] == 0: + del l[index] + index -= 1 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/generators.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/generators.py new file mode 100644 index 0000000000000000000000000000000000000000..9f136502d4e082e6c2554e7fb294d0036c5b0034 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/generators.py @@ -0,0 +1,302 @@ +from sympy.combinatorics.permutations import Permutation +from sympy.core.symbol import symbols +from sympy.matrices import Matrix +from sympy.utilities.iterables import variations, rotate_left + + +def symmetric(n): + """ + Generates the symmetric group of order n, Sn. + + Examples + ======== + + >>> from sympy.combinatorics.generators import symmetric + >>> list(symmetric(3)) + [(2), (1 2), (2)(0 1), (0 1 2), (0 2 1), (0 2)] + """ + for perm in variations(range(n), n): + yield Permutation(perm) + + +def cyclic(n): + """ + Generates the cyclic group of order n, Cn. + + Examples + ======== + + >>> from sympy.combinatorics.generators import cyclic + >>> list(cyclic(5)) + [(4), (0 1 2 3 4), (0 2 4 1 3), + (0 3 1 4 2), (0 4 3 2 1)] + + See Also + ======== + + dihedral + """ + gen = list(range(n)) + for i in range(n): + yield Permutation(gen) + gen = rotate_left(gen, 1) + + +def alternating(n): + """ + Generates the alternating group of order n, An. + + Examples + ======== + + >>> from sympy.combinatorics.generators import alternating + >>> list(alternating(3)) + [(2), (0 1 2), (0 2 1)] + """ + for perm in variations(range(n), n): + p = Permutation(perm) + if p.is_even: + yield p + + +def dihedral(n): + """ + Generates the dihedral group of order 2n, Dn. + + The result is given as a subgroup of Sn, except for the special cases n=1 + (the group S2) and n=2 (the Klein 4-group) where that's not possible + and embeddings in S2 and S4 respectively are given. + + Examples + ======== + + >>> from sympy.combinatorics.generators import dihedral + >>> list(dihedral(3)) + [(2), (0 2), (0 1 2), (1 2), (0 2 1), (2)(0 1)] + + See Also + ======== + + cyclic + """ + if n == 1: + yield Permutation([0, 1]) + yield Permutation([1, 0]) + elif n == 2: + yield Permutation([0, 1, 2, 3]) + yield Permutation([1, 0, 3, 2]) + yield Permutation([2, 3, 0, 1]) + yield Permutation([3, 2, 1, 0]) + else: + gen = list(range(n)) + for i in range(n): + yield Permutation(gen) + yield Permutation(gen[::-1]) + gen = rotate_left(gen, 1) + + +def rubik_cube_generators(): + """Return the permutations of the 3x3 Rubik's cube, see + https://www.gap-system.org/Doc/Examples/rubik.html + """ + a = [ + [(1, 3, 8, 6), (2, 5, 7, 4), (9, 33, 25, 17), (10, 34, 26, 18), + (11, 35, 27, 19)], + [(9, 11, 16, 14), (10, 13, 15, 12), (1, 17, 41, 40), (4, 20, 44, 37), + (6, 22, 46, 35)], + [(17, 19, 24, 22), (18, 21, 23, 20), (6, 25, 43, 16), (7, 28, 42, 13), + (8, 30, 41, 11)], + [(25, 27, 32, 30), (26, 29, 31, 28), (3, 38, 43, 19), (5, 36, 45, 21), + (8, 33, 48, 24)], + [(33, 35, 40, 38), (34, 37, 39, 36), (3, 9, 46, 32), (2, 12, 47, 29), + (1, 14, 48, 27)], + [(41, 43, 48, 46), (42, 45, 47, 44), (14, 22, 30, 38), + (15, 23, 31, 39), (16, 24, 32, 40)] + ] + return [Permutation([[i - 1 for i in xi] for xi in x], size=48) for x in a] + + +def rubik(n): + """Return permutations for an nxn Rubik's cube. + + Permutations returned are for rotation of each of the slice + from the face up to the last face for each of the 3 sides (in this order): + front, right and bottom. Hence, the first n - 1 permutations are for the + slices from the front. + """ + + if n < 2: + raise ValueError('dimension of cube must be > 1') + + # 1-based reference to rows and columns in Matrix + def getr(f, i): + return faces[f].col(n - i) + + def getl(f, i): + return faces[f].col(i - 1) + + def getu(f, i): + return faces[f].row(i - 1) + + def getd(f, i): + return faces[f].row(n - i) + + def setr(f, i, s): + faces[f][:, n - i] = Matrix(n, 1, s) + + def setl(f, i, s): + faces[f][:, i - 1] = Matrix(n, 1, s) + + def setu(f, i, s): + faces[f][i - 1, :] = Matrix(1, n, s) + + def setd(f, i, s): + faces[f][n - i, :] = Matrix(1, n, s) + + # motion of a single face + def cw(F, r=1): + for _ in range(r): + face = faces[F] + rv = [] + for c in range(n): + for r in range(n - 1, -1, -1): + rv.append(face[r, c]) + faces[F] = Matrix(n, n, rv) + + def ccw(F): + cw(F, 3) + + # motion of plane i from the F side; + # fcw(0) moves the F face, fcw(1) moves the plane + # just behind the front face, etc... + def fcw(i, r=1): + for _ in range(r): + if i == 0: + cw(F) + i += 1 + temp = getr(L, i) + setr(L, i, list(getu(D, i))) + setu(D, i, list(reversed(getl(R, i)))) + setl(R, i, list(getd(U, i))) + setd(U, i, list(reversed(temp))) + i -= 1 + + def fccw(i): + fcw(i, 3) + + # motion of the entire cube from the F side + def FCW(r=1): + for _ in range(r): + cw(F) + ccw(B) + cw(U) + t = faces[U] + cw(L) + faces[U] = faces[L] + cw(D) + faces[L] = faces[D] + cw(R) + faces[D] = faces[R] + faces[R] = t + + def FCCW(): + FCW(3) + + # motion of the entire cube from the U side + def UCW(r=1): + for _ in range(r): + cw(U) + ccw(D) + t = faces[F] + faces[F] = faces[R] + faces[R] = faces[B] + faces[B] = faces[L] + faces[L] = t + + def UCCW(): + UCW(3) + + # defining the permutations for the cube + + U, F, R, B, L, D = names = symbols('U, F, R, B, L, D') + + # the faces are represented by nxn matrices + faces = {} + count = 0 + for fi in range(6): + f = [] + for a in range(n**2): + f.append(count) + count += 1 + faces[names[fi]] = Matrix(n, n, f) + + # this will either return the value of the current permutation + # (show != 1) or else append the permutation to the group, g + def perm(show=0): + # add perm to the list of perms + p = [] + for f in names: + p.extend(faces[f]) + if show: + return p + g.append(Permutation(p)) + + g = [] # container for the group's permutations + I = list(range(6*n**2)) # the identity permutation used for checking + + # define permutations corresponding to cw rotations of the planes + # up TO the last plane from that direction; by not including the + # last plane, the orientation of the cube is maintained. + + # F slices + for i in range(n - 1): + fcw(i) + perm() + fccw(i) # restore + assert perm(1) == I + + # R slices + # bring R to front + UCW() + for i in range(n - 1): + fcw(i) + # put it back in place + UCCW() + # record + perm() + # restore + # bring face to front + UCW() + fccw(i) + # restore + UCCW() + assert perm(1) == I + + # D slices + # bring up bottom + FCW() + UCCW() + FCCW() + for i in range(n - 1): + # turn strip + fcw(i) + # put bottom back on the bottom + FCW() + UCW() + FCCW() + # record + perm() + # restore + # bring up bottom + FCW() + UCCW() + FCCW() + # turn strip + fccw(i) + # put bottom back on the bottom + FCW() + UCW() + FCCW() + assert perm(1) == I + + return g diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/graycode.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/graycode.py new file mode 100644 index 0000000000000000000000000000000000000000..930fd337862a70e920a985947d74375b27741293 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/graycode.py @@ -0,0 +1,430 @@ +from sympy.core import Basic, Integer + +import random + + +class GrayCode(Basic): + """ + A Gray code is essentially a Hamiltonian walk on + a n-dimensional cube with edge length of one. + The vertices of the cube are represented by vectors + whose values are binary. The Hamilton walk visits + each vertex exactly once. The Gray code for a 3d + cube is ['000','100','110','010','011','111','101', + '001']. + + A Gray code solves the problem of sequentially + generating all possible subsets of n objects in such + a way that each subset is obtained from the previous + one by either deleting or adding a single object. + In the above example, 1 indicates that the object is + present, and 0 indicates that its absent. + + Gray codes have applications in statistics as well when + we want to compute various statistics related to subsets + in an efficient manner. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> list(a.generate_gray()) + ['000', '001', '011', '010', '110', '111', '101', '100'] + >>> a = GrayCode(4) + >>> list(a.generate_gray()) + ['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \ + '1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000'] + + References + ========== + + .. [1] Nijenhuis,A. and Wilf,H.S.(1978). + Combinatorial Algorithms. Academic Press. + .. [2] Knuth, D. (2011). The Art of Computer Programming, Vol 4 + Addison Wesley + + + """ + + _skip = False + _current = 0 + _rank = None + + def __new__(cls, n, *args, **kw_args): + """ + Default constructor. + + It takes a single argument ``n`` which gives the dimension of the Gray + code. The starting Gray code string (``start``) or the starting ``rank`` + may also be given; the default is to start at rank = 0 ('0...0'). + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> a + GrayCode(3) + >>> a.n + 3 + + >>> a = GrayCode(3, start='100') + >>> a.current + '100' + + >>> a = GrayCode(4, rank=4) + >>> a.current + '0110' + >>> a.rank + 4 + + """ + if n < 1 or int(n) != n: + raise ValueError( + 'Gray code dimension must be a positive integer, not %i' % n) + n = Integer(n) + args = (n,) + args + obj = Basic.__new__(cls, *args) + if 'start' in kw_args: + obj._current = kw_args["start"] + if len(obj._current) > n: + raise ValueError('Gray code start has length %i but ' + 'should not be greater than %i' % (len(obj._current), n)) + elif 'rank' in kw_args: + if int(kw_args["rank"]) != kw_args["rank"]: + raise ValueError('Gray code rank must be a positive integer, ' + 'not %i' % kw_args["rank"]) + obj._rank = int(kw_args["rank"]) % obj.selections + obj._current = obj.unrank(n, obj._rank) + return obj + + def next(self, delta=1): + """ + Returns the Gray code a distance ``delta`` (default = 1) from the + current value in canonical order. + + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3, start='110') + >>> a.next().current + '111' + >>> a.next(-1).current + '010' + """ + return GrayCode(self.n, rank=(self.rank + delta) % self.selections) + + @property + def selections(self): + """ + Returns the number of bit vectors in the Gray code. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> a.selections + 8 + """ + return 2**self.n + + @property + def n(self): + """ + Returns the dimension of the Gray code. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(5) + >>> a.n + 5 + """ + return self.args[0] + + def generate_gray(self, **hints): + """ + Generates the sequence of bit vectors of a Gray Code. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> list(a.generate_gray()) + ['000', '001', '011', '010', '110', '111', '101', '100'] + >>> list(a.generate_gray(start='011')) + ['011', '010', '110', '111', '101', '100'] + >>> list(a.generate_gray(rank=4)) + ['110', '111', '101', '100'] + + See Also + ======== + + skip + + References + ========== + + .. [1] Knuth, D. (2011). The Art of Computer Programming, + Vol 4, Addison Wesley + + """ + bits = self.n + start = None + if "start" in hints: + start = hints["start"] + elif "rank" in hints: + start = GrayCode.unrank(self.n, hints["rank"]) + if start is not None: + self._current = start + current = self.current + graycode_bin = gray_to_bin(current) + if len(graycode_bin) > self.n: + raise ValueError('Gray code start has length %i but should ' + 'not be greater than %i' % (len(graycode_bin), bits)) + self._current = int(current, 2) + graycode_int = int(''.join(graycode_bin), 2) + for i in range(graycode_int, 1 << bits): + if self._skip: + self._skip = False + else: + yield self.current + bbtc = (i ^ (i + 1)) + gbtc = (bbtc ^ (bbtc >> 1)) + self._current = (self._current ^ gbtc) + self._current = 0 + + def skip(self): + """ + Skips the bit generation. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> for i in a.generate_gray(): + ... if i == '010': + ... a.skip() + ... print(i) + ... + 000 + 001 + 011 + 010 + 111 + 101 + 100 + + See Also + ======== + + generate_gray + """ + self._skip = True + + @property + def rank(self): + """ + Ranks the Gray code. + + A ranking algorithm determines the position (or rank) + of a combinatorial object among all the objects w.r.t. + a given order. For example, the 4 bit binary reflected + Gray code (BRGC) '0101' has a rank of 6 as it appears in + the 6th position in the canonical ordering of the family + of 4 bit Gray codes. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> a = GrayCode(3) + >>> list(a.generate_gray()) + ['000', '001', '011', '010', '110', '111', '101', '100'] + >>> GrayCode(3, start='100').rank + 7 + >>> GrayCode(3, rank=7).current + '100' + + See Also + ======== + + unrank + + References + ========== + + .. [1] https://web.archive.org/web/20200224064753/http://statweb.stanford.edu/~susan/courses/s208/node12.html + + """ + if self._rank is None: + self._rank = int(gray_to_bin(self.current), 2) + return self._rank + + @property + def current(self): + """ + Returns the currently referenced Gray code as a bit string. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> GrayCode(3, start='100').current + '100' + """ + rv = self._current or '0' + if not isinstance(rv, str): + rv = bin(rv)[2:] + return rv.rjust(self.n, '0') + + @classmethod + def unrank(self, n, rank): + """ + Unranks an n-bit sized Gray code of rank k. This method exists + so that a derivative GrayCode class can define its own code of + a given rank. + + The string here is generated in reverse order to allow for tail-call + optimization. + + Examples + ======== + + >>> from sympy.combinatorics import GrayCode + >>> GrayCode(5, rank=3).current + '00010' + >>> GrayCode.unrank(5, 3) + '00010' + + See Also + ======== + + rank + """ + def _unrank(k, n): + if n == 1: + return str(k % 2) + m = 2**(n - 1) + if k < m: + return '0' + _unrank(k, n - 1) + return '1' + _unrank(m - (k % m) - 1, n - 1) + return _unrank(rank, n) + + +def random_bitstring(n): + """ + Generates a random bitlist of length n. + + Examples + ======== + + >>> from sympy.combinatorics.graycode import random_bitstring + >>> random_bitstring(3) # doctest: +SKIP + 100 + """ + return ''.join([random.choice('01') for i in range(n)]) + + +def gray_to_bin(bin_list): + """ + Convert from Gray coding to binary coding. + + We assume big endian encoding. + + Examples + ======== + + >>> from sympy.combinatorics.graycode import gray_to_bin + >>> gray_to_bin('100') + '111' + + See Also + ======== + + bin_to_gray + """ + b = [bin_list[0]] + for i in range(1, len(bin_list)): + b += str(int(b[i - 1] != bin_list[i])) + return ''.join(b) + + +def bin_to_gray(bin_list): + """ + Convert from binary coding to gray coding. + + We assume big endian encoding. + + Examples + ======== + + >>> from sympy.combinatorics.graycode import bin_to_gray + >>> bin_to_gray('111') + '100' + + See Also + ======== + + gray_to_bin + """ + b = [bin_list[0]] + for i in range(1, len(bin_list)): + b += str(int(bin_list[i]) ^ int(bin_list[i - 1])) + return ''.join(b) + + +def get_subset_from_bitstring(super_set, bitstring): + """ + Gets the subset defined by the bitstring. + + Examples + ======== + + >>> from sympy.combinatorics.graycode import get_subset_from_bitstring + >>> get_subset_from_bitstring(['a', 'b', 'c', 'd'], '0011') + ['c', 'd'] + >>> get_subset_from_bitstring(['c', 'a', 'c', 'c'], '1100') + ['c', 'a'] + + See Also + ======== + + graycode_subsets + """ + if len(super_set) != len(bitstring): + raise ValueError("The sizes of the lists are not equal") + return [super_set[i] for i, j in enumerate(bitstring) + if bitstring[i] == '1'] + + +def graycode_subsets(gray_code_set): + """ + Generates the subsets as enumerated by a Gray code. + + Examples + ======== + + >>> from sympy.combinatorics.graycode import graycode_subsets + >>> list(graycode_subsets(['a', 'b', 'c'])) + [[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \ + ['a', 'c'], ['a']] + >>> list(graycode_subsets(['a', 'b', 'c', 'c'])) + [[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \ + ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \ + ['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']] + + See Also + ======== + + get_subset_from_bitstring + """ + for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()): + yield get_subset_from_bitstring(gray_code_set, bitstring) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/named_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/named_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..59f10c40ef716e3b644e00f936323e9f6936eb88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/named_groups.py @@ -0,0 +1,332 @@ +from sympy.combinatorics.group_constructs import DirectProduct +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.combinatorics.permutations import Permutation + +_af_new = Permutation._af_new + + +def AbelianGroup(*cyclic_orders): + """ + Returns the direct product of cyclic groups with the given orders. + + Explanation + =========== + + According to the structure theorem for finite abelian groups ([1]), + every finite abelian group can be written as the direct product of + finitely many cyclic groups. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AbelianGroup + >>> AbelianGroup(3, 4) + PermutationGroup([ + (6)(0 1 2), + (3 4 5 6)]) + >>> _.is_group + True + + See Also + ======== + + DirectProduct + + References + ========== + + .. [1] https://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups + + """ + groups = [] + degree = 0 + order = 1 + for size in cyclic_orders: + degree += size + order *= size + groups.append(CyclicGroup(size)) + G = DirectProduct(*groups) + G._is_abelian = True + G._degree = degree + G._order = order + + return G + + +def AlternatingGroup(n): + """ + Generates the alternating group on ``n`` elements as a permutation group. + + Explanation + =========== + + For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for + ``n`` odd + and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.). + After the group is generated, some of its basic properties are set. + The cases ``n = 1, 2`` are handled separately. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> G = AlternatingGroup(4) + >>> G.is_group + True + >>> a = list(G.generate_dimino()) + >>> len(a) + 12 + >>> all(perm.is_even for perm in a) + True + + See Also + ======== + + SymmetricGroup, CyclicGroup, DihedralGroup + + References + ========== + + .. [1] Armstrong, M. "Groups and Symmetry" + + """ + # small cases are special + if n in (1, 2): + return PermutationGroup([Permutation([0])]) + + a = list(range(n)) + a[0], a[1], a[2] = a[1], a[2], a[0] + gen1 = a + if n % 2: + a = list(range(1, n)) + a.append(0) + gen2 = a + else: + a = list(range(2, n)) + a.append(1) + a.insert(0, 0) + gen2 = a + gens = [gen1, gen2] + if gen1 == gen2: + gens = gens[:1] + G = PermutationGroup([_af_new(a) for a in gens], dups=False) + + set_alternating_group_properties(G, n, n) + G._is_alt = True + return G + + +def set_alternating_group_properties(G, n, degree): + """Set known properties of an alternating group. """ + if n < 4: + G._is_abelian = True + G._is_nilpotent = True + else: + G._is_abelian = False + G._is_nilpotent = False + if n < 5: + G._is_solvable = True + else: + G._is_solvable = False + G._degree = degree + G._is_transitive = True + G._is_dihedral = False + + +def CyclicGroup(n): + """ + Generates the cyclic group of order ``n`` as a permutation group. + + Explanation + =========== + + The generator taken is the ``n``-cycle ``(0 1 2 ... n-1)`` + (in cycle notation). After the group is generated, some of its basic + properties are set. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import CyclicGroup + >>> G = CyclicGroup(6) + >>> G.is_group + True + >>> G.order() + 6 + >>> list(G.generate_schreier_sims(af=True)) + [[0, 1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 0], [2, 3, 4, 5, 0, 1], + [3, 4, 5, 0, 1, 2], [4, 5, 0, 1, 2, 3], [5, 0, 1, 2, 3, 4]] + + See Also + ======== + + SymmetricGroup, DihedralGroup, AlternatingGroup + + """ + a = list(range(1, n)) + a.append(0) + gen = _af_new(a) + G = PermutationGroup([gen]) + + G._is_abelian = True + G._is_nilpotent = True + G._is_solvable = True + G._degree = n + G._is_transitive = True + G._order = n + G._is_dihedral = (n == 2) + return G + + +def DihedralGroup(n): + r""" + Generates the dihedral group `D_n` as a permutation group. + + Explanation + =========== + + The dihedral group `D_n` is the group of symmetries of the regular + ``n``-gon. The generators taken are the ``n``-cycle ``a = (0 1 2 ... n-1)`` + (a rotation of the ``n``-gon) and ``b = (0 n-1)(1 n-2)...`` + (a reflection of the ``n``-gon) in cycle rotation. It is easy to see that + these satisfy ``a**n = b**2 = 1`` and ``bab = ~a`` so they indeed generate + `D_n` (See [1]). After the group is generated, some of its basic properties + are set. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> G = DihedralGroup(5) + >>> G.is_group + True + >>> a = list(G.generate_dimino()) + >>> [perm.cyclic_form for perm in a] + [[], [[0, 1, 2, 3, 4]], [[0, 2, 4, 1, 3]], + [[0, 3, 1, 4, 2]], [[0, 4, 3, 2, 1]], [[0, 4], [1, 3]], + [[1, 4], [2, 3]], [[0, 1], [2, 4]], [[0, 2], [3, 4]], + [[0, 3], [1, 2]]] + + See Also + ======== + + SymmetricGroup, CyclicGroup, AlternatingGroup + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Dihedral_group + + """ + # small cases are special + if n == 1: + return PermutationGroup([Permutation([1, 0])]) + if n == 2: + return PermutationGroup([Permutation([1, 0, 3, 2]), + Permutation([2, 3, 0, 1]), Permutation([3, 2, 1, 0])]) + + a = list(range(1, n)) + a.append(0) + gen1 = _af_new(a) + a = list(range(n)) + a.reverse() + gen2 = _af_new(a) + G = PermutationGroup([gen1, gen2]) + # if n is a power of 2, group is nilpotent + if n & (n-1) == 0: + G._is_nilpotent = True + else: + G._is_nilpotent = False + G._is_dihedral = True + G._is_abelian = False + G._is_solvable = True + G._degree = n + G._is_transitive = True + G._order = 2*n + return G + + +def SymmetricGroup(n): + """ + Generates the symmetric group on ``n`` elements as a permutation group. + + Explanation + =========== + + The generators taken are the ``n``-cycle + ``(0 1 2 ... n-1)`` and the transposition ``(0 1)`` (in cycle notation). + (See [1]). After the group is generated, some of its basic properties + are set. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> G = SymmetricGroup(4) + >>> G.is_group + True + >>> G.order() + 24 + >>> list(G.generate_schreier_sims(af=True)) + [[0, 1, 2, 3], [1, 2, 3, 0], [2, 3, 0, 1], [3, 1, 2, 0], [0, 2, 3, 1], + [1, 3, 0, 2], [2, 0, 1, 3], [3, 2, 0, 1], [0, 3, 1, 2], [1, 0, 2, 3], + [2, 1, 3, 0], [3, 0, 1, 2], [0, 1, 3, 2], [1, 2, 0, 3], [2, 3, 1, 0], + [3, 1, 0, 2], [0, 2, 1, 3], [1, 3, 2, 0], [2, 0, 3, 1], [3, 2, 1, 0], + [0, 3, 2, 1], [1, 0, 3, 2], [2, 1, 0, 3], [3, 0, 2, 1]] + + See Also + ======== + + CyclicGroup, DihedralGroup, AlternatingGroup + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Symmetric_group#Generators_and_relations + + """ + if n == 1: + G = PermutationGroup([Permutation([0])]) + elif n == 2: + G = PermutationGroup([Permutation([1, 0])]) + else: + a = list(range(1, n)) + a.append(0) + gen1 = _af_new(a) + a = list(range(n)) + a[0], a[1] = a[1], a[0] + gen2 = _af_new(a) + G = PermutationGroup([gen1, gen2]) + set_symmetric_group_properties(G, n, n) + G._is_sym = True + return G + + +def set_symmetric_group_properties(G, n, degree): + """Set known properties of a symmetric group. """ + if n < 3: + G._is_abelian = True + G._is_nilpotent = True + else: + G._is_abelian = False + G._is_nilpotent = False + if n < 5: + G._is_solvable = True + else: + G._is_solvable = False + G._degree = degree + G._is_transitive = True + G._is_dihedral = (n in [2, 3]) # cf Landau's func and Stirling's approx + + +def RubikGroup(n): + """Return a group of Rubik's cube generators + + >>> from sympy.combinatorics.named_groups import RubikGroup + >>> RubikGroup(2).is_group + True + """ + from sympy.combinatorics.generators import rubik + if n <= 1: + raise ValueError("Invalid cube. n has to be greater than 1") + return PermutationGroup(rubik(n)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/partitions.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/partitions.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe646baabbb5bf2350cba859a265ac32bbfaf53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/partitions.py @@ -0,0 +1,745 @@ +from sympy.core import Basic, Dict, sympify, Tuple +from sympy.core.numbers import Integer +from sympy.core.sorting import default_sort_key +from sympy.core.sympify import _sympify +from sympy.functions.combinatorial.numbers import bell +from sympy.matrices import zeros +from sympy.sets.sets import FiniteSet, Union +from sympy.utilities.iterables import flatten, group +from sympy.utilities.misc import as_int + + +from collections import defaultdict + + +class Partition(FiniteSet): + """ + This class represents an abstract partition. + + A partition is a set of disjoint sets whose union equals a given set. + + See Also + ======== + + sympy.utilities.iterables.partitions, + sympy.utilities.iterables.multiset_partitions + """ + + _rank = None + _partition = None + + def __new__(cls, *partition): + """ + Generates a new partition object. + + This method also verifies if the arguments passed are + valid and raises a ValueError if they are not. + + Examples + ======== + + Creating Partition from Python lists: + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3]) + >>> a + Partition({3}, {1, 2}) + >>> a.partition + [[1, 2], [3]] + >>> len(a) + 2 + >>> a.members + (1, 2, 3) + + Creating Partition from Python sets: + + >>> Partition({1, 2, 3}, {4, 5}) + Partition({4, 5}, {1, 2, 3}) + + Creating Partition from SymPy finite sets: + + >>> from sympy import FiniteSet + >>> a = FiniteSet(1, 2, 3) + >>> b = FiniteSet(4, 5) + >>> Partition(a, b) + Partition({4, 5}, {1, 2, 3}) + """ + args = [] + dups = False + for arg in partition: + if isinstance(arg, list): + as_set = set(arg) + if len(as_set) < len(arg): + dups = True + break # error below + arg = as_set + args.append(_sympify(arg)) + + if not all(isinstance(part, FiniteSet) for part in args): + raise ValueError( + "Each argument to Partition should be " \ + "a list, set, or a FiniteSet") + + # sort so we have a canonical reference for RGS + U = Union(*args) + if dups or len(U) < sum(len(arg) for arg in args): + raise ValueError("Partition contained duplicate elements.") + + obj = FiniteSet.__new__(cls, *args) + obj.members = tuple(U) + obj.size = len(U) + return obj + + def sort_key(self, order=None): + """Return a canonical key that can be used for sorting. + + Ordering is based on the size and sorted elements of the partition + and ties are broken with the rank. + + Examples + ======== + + >>> from sympy import default_sort_key + >>> from sympy.combinatorics import Partition + >>> from sympy.abc import x + >>> a = Partition([1, 2]) + >>> b = Partition([3, 4]) + >>> c = Partition([1, x]) + >>> d = Partition(list(range(4))) + >>> l = [d, b, a + 1, a, c] + >>> l.sort(key=default_sort_key); l + [Partition({1, 2}), Partition({1}, {2}), Partition({1, x}), Partition({3, 4}), Partition({0, 1, 2, 3})] + """ + if order is None: + members = self.members + else: + members = tuple(sorted(self.members, + key=lambda w: default_sort_key(w, order))) + return tuple(map(default_sort_key, (self.size, members, self.rank))) + + @property + def partition(self): + """Return partition as a sorted list of lists. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> Partition([1], [2, 3]).partition + [[1], [2, 3]] + """ + if self._partition is None: + self._partition = sorted([sorted(p, key=default_sort_key) + for p in self.args]) + return self._partition + + def __add__(self, other): + """ + Return permutation whose rank is ``other`` greater than current rank, + (mod the maximum rank for the set). + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3]) + >>> a.rank + 1 + >>> (a + 1).rank + 2 + >>> (a + 100).rank + 1 + """ + other = as_int(other) + offset = self.rank + other + result = RGS_unrank((offset) % + RGS_enum(self.size), + self.size) + return Partition.from_rgs(result, self.members) + + def __sub__(self, other): + """ + Return permutation whose rank is ``other`` less than current rank, + (mod the maximum rank for the set). + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3]) + >>> a.rank + 1 + >>> (a - 1).rank + 0 + >>> (a - 100).rank + 1 + """ + return self.__add__(-other) + + def __le__(self, other): + """ + Checks if a partition is less than or equal to + the other based on rank. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3, 4, 5]) + >>> b = Partition([1], [2, 3], [4], [5]) + >>> a.rank, b.rank + (9, 34) + >>> a <= a + True + >>> a <= b + True + """ + return self.sort_key() <= sympify(other).sort_key() + + def __lt__(self, other): + """ + Checks if a partition is less than the other. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3, 4, 5]) + >>> b = Partition([1], [2, 3], [4], [5]) + >>> a.rank, b.rank + (9, 34) + >>> a < b + True + """ + return self.sort_key() < sympify(other).sort_key() + + @property + def rank(self): + """ + Gets the rank of a partition. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3], [4, 5]) + >>> a.rank + 13 + """ + if self._rank is not None: + return self._rank + self._rank = RGS_rank(self.RGS) + return self._rank + + @property + def RGS(self): + """ + Returns the "restricted growth string" of the partition. + + Explanation + =========== + + The RGS is returned as a list of indices, L, where L[i] indicates + the block in which element i appears. For example, in a partition + of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is + [1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> a = Partition([1, 2], [3], [4, 5]) + >>> a.members + (1, 2, 3, 4, 5) + >>> a.RGS + (0, 0, 1, 2, 2) + >>> a + 1 + Partition({3}, {4}, {5}, {1, 2}) + >>> _.RGS + (0, 0, 1, 2, 3) + """ + rgs = {} + partition = self.partition + for i, part in enumerate(partition): + for j in part: + rgs[j] = i + return tuple([rgs[i] for i in sorted( + [i for p in partition for i in p], key=default_sort_key)]) + + @classmethod + def from_rgs(self, rgs, elements): + """ + Creates a set partition from a restricted growth string. + + Explanation + =========== + + The indices given in rgs are assumed to be the index + of the element as given in elements *as provided* (the + elements are not sorted by this routine). Block numbering + starts from 0. If any block was not referenced in ``rgs`` + an error will be raised. + + Examples + ======== + + >>> from sympy.combinatorics import Partition + >>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde')) + Partition({c}, {a, d}, {b, e}) + >>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead')) + Partition({e}, {a, c}, {b, d}) + >>> a = Partition([1, 4], [2], [3, 5]) + >>> Partition.from_rgs(a.RGS, a.members) + Partition({2}, {1, 4}, {3, 5}) + """ + if len(rgs) != len(elements): + raise ValueError('mismatch in rgs and element lengths') + max_elem = max(rgs) + 1 + partition = [[] for i in range(max_elem)] + j = 0 + for i in rgs: + partition[i].append(elements[j]) + j += 1 + if not all(p for p in partition): + raise ValueError('some blocks of the partition were empty.') + return Partition(*partition) + + +class IntegerPartition(Basic): + """ + This class represents an integer partition. + + Explanation + =========== + + In number theory and combinatorics, a partition of a positive integer, + ``n``, also called an integer partition, is a way of writing ``n`` as a + list of positive integers that sum to n. Two partitions that differ only + in the order of summands are considered to be the same partition; if order + matters then the partitions are referred to as compositions. For example, + 4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1]; + the compositions [1, 2, 1] and [1, 1, 2] are the same as partition + [2, 1, 1]. + + See Also + ======== + + sympy.utilities.iterables.partitions, + sympy.utilities.iterables.multiset_partitions + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Partition_%28number_theory%29 + """ + + _dict = None + _keys = None + + def __new__(cls, partition, integer=None): + """ + Generates a new IntegerPartition object from a list or dictionary. + + Explanation + =========== + + The partition can be given as a list of positive integers or a + dictionary of (integer, multiplicity) items. If the partition is + preceded by an integer an error will be raised if the partition + does not sum to that given integer. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> a = IntegerPartition([5, 4, 3, 1, 1]) + >>> a + IntegerPartition(14, (5, 4, 3, 1, 1)) + >>> print(a) + [5, 4, 3, 1, 1] + >>> IntegerPartition({1:3, 2:1}) + IntegerPartition(5, (2, 1, 1, 1)) + + If the value that the partition should sum to is given first, a check + will be made to see n error will be raised if there is a discrepancy: + + >>> IntegerPartition(10, [5, 4, 3, 1]) + Traceback (most recent call last): + ... + ValueError: The partition is not valid + + """ + if integer is not None: + integer, partition = partition, integer + if isinstance(partition, (dict, Dict)): + _ = [] + for k, v in sorted(partition.items(), reverse=True): + if not v: + continue + k, v = as_int(k), as_int(v) + _.extend([k]*v) + partition = tuple(_) + else: + partition = tuple(sorted(map(as_int, partition), reverse=True)) + sum_ok = False + if integer is None: + integer = sum(partition) + sum_ok = True + else: + integer = as_int(integer) + + if not sum_ok and sum(partition) != integer: + raise ValueError("Partition did not add to %s" % integer) + if any(i < 1 for i in partition): + raise ValueError("All integer summands must be greater than one") + + obj = Basic.__new__(cls, Integer(integer), Tuple(*partition)) + obj.partition = list(partition) + obj.integer = integer + return obj + + def prev_lex(self): + """Return the previous partition of the integer, n, in lexical order, + wrapping around to [1, ..., 1] if the partition is [n]. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> p = IntegerPartition([4]) + >>> print(p.prev_lex()) + [3, 1] + >>> p.partition > p.prev_lex().partition + True + """ + d = defaultdict(int) + d.update(self.as_dict()) + keys = self._keys + if keys == [1]: + return IntegerPartition({self.integer: 1}) + if keys[-1] != 1: + d[keys[-1]] -= 1 + if keys[-1] == 2: + d[1] = 2 + else: + d[keys[-1] - 1] = d[1] = 1 + else: + d[keys[-2]] -= 1 + left = d[1] + keys[-2] + new = keys[-2] + d[1] = 0 + while left: + new -= 1 + if left - new >= 0: + d[new] += left//new + left -= d[new]*new + return IntegerPartition(self.integer, d) + + def next_lex(self): + """Return the next partition of the integer, n, in lexical order, + wrapping around to [n] if the partition is [1, ..., 1]. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> p = IntegerPartition([3, 1]) + >>> print(p.next_lex()) + [4] + >>> p.partition < p.next_lex().partition + True + """ + d = defaultdict(int) + d.update(self.as_dict()) + key = self._keys + a = key[-1] + if a == self.integer: + d.clear() + d[1] = self.integer + elif a == 1: + if d[a] > 1: + d[a + 1] += 1 + d[a] -= 2 + else: + b = key[-2] + d[b + 1] += 1 + d[1] = (d[b] - 1)*b + d[b] = 0 + else: + if d[a] > 1: + if len(key) == 1: + d.clear() + d[a + 1] = 1 + d[1] = self.integer - a - 1 + else: + a1 = a + 1 + d[a1] += 1 + d[1] = d[a]*a - a1 + d[a] = 0 + else: + b = key[-2] + b1 = b + 1 + d[b1] += 1 + need = d[b]*b + d[a]*a - b1 + d[a] = d[b] = 0 + d[1] = need + return IntegerPartition(self.integer, d) + + def as_dict(self): + """Return the partition as a dictionary whose keys are the + partition integers and the values are the multiplicity of that + integer. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict() + {1: 3, 2: 1, 3: 4} + """ + if self._dict is None: + groups = group(self.partition, multiple=False) + self._keys = [g[0] for g in groups] + self._dict = dict(groups) + return self._dict + + @property + def conjugate(self): + """ + Computes the conjugate partition of itself. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> a = IntegerPartition([6, 3, 3, 2, 1]) + >>> a.conjugate + [5, 4, 3, 1, 1, 1] + """ + j = 1 + temp_arr = list(self.partition) + [0] + k = temp_arr[0] + b = [0]*k + while k > 0: + while k > temp_arr[j]: + b[k - 1] = j + k -= 1 + j += 1 + return b + + def __lt__(self, other): + """Return True if self is less than other when the partition + is listed from smallest to biggest. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> a = IntegerPartition([3, 1]) + >>> a < a + False + >>> b = a.next_lex() + >>> a < b + True + >>> a == b + False + """ + return list(reversed(self.partition)) < list(reversed(other.partition)) + + def __le__(self, other): + """Return True if self is less than other when the partition + is listed from smallest to biggest. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> a = IntegerPartition([4]) + >>> a <= a + True + """ + return list(reversed(self.partition)) <= list(reversed(other.partition)) + + def as_ferrers(self, char='#'): + """ + Prints the ferrer diagram of a partition. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import IntegerPartition + >>> print(IntegerPartition([1, 1, 5]).as_ferrers()) + ##### + # + # + """ + return "\n".join([char*i for i in self.partition]) + + def __str__(self): + return str(list(self.partition)) + + +def random_integer_partition(n, seed=None): + """ + Generates a random integer partition summing to ``n`` as a list + of reverse-sorted integers. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import random_integer_partition + + For the following, a seed is given so a known value can be shown; in + practice, the seed would not be given. + + >>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1]) + [85, 12, 2, 1] + >>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1]) + [5, 3, 1, 1] + >>> random_integer_partition(1) + [1] + """ + from sympy.core.random import _randint + + n = as_int(n) + if n < 1: + raise ValueError('n must be a positive integer') + + randint = _randint(seed) + + partition = [] + while (n > 0): + k = randint(1, n) + mult = randint(1, n//k) + partition.append((k, mult)) + n -= k*mult + partition.sort(reverse=True) + partition = flatten([[k]*m for k, m in partition]) + return partition + + +def RGS_generalized(m): + """ + Computes the m + 1 generalized unrestricted growth strings + and returns them as rows in matrix. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import RGS_generalized + >>> RGS_generalized(6) + Matrix([ + [ 1, 1, 1, 1, 1, 1, 1], + [ 1, 2, 3, 4, 5, 6, 0], + [ 2, 5, 10, 17, 26, 0, 0], + [ 5, 15, 37, 77, 0, 0, 0], + [ 15, 52, 151, 0, 0, 0, 0], + [ 52, 203, 0, 0, 0, 0, 0], + [203, 0, 0, 0, 0, 0, 0]]) + """ + d = zeros(m + 1) + for i in range(m + 1): + d[0, i] = 1 + + for i in range(1, m + 1): + for j in range(m): + if j <= m - i: + d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1] + else: + d[i, j] = 0 + return d + + +def RGS_enum(m): + """ + RGS_enum computes the total number of restricted growth strings + possible for a superset of size m. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import RGS_enum + >>> from sympy.combinatorics import Partition + >>> RGS_enum(4) + 15 + >>> RGS_enum(5) + 52 + >>> RGS_enum(6) + 203 + + We can check that the enumeration is correct by actually generating + the partitions. Here, the 15 partitions of 4 items are generated: + + >>> a = Partition(list(range(4))) + >>> s = set() + >>> for i in range(20): + ... s.add(a) + ... a += 1 + ... + >>> assert len(s) == 15 + + """ + if (m < 1): + return 0 + elif (m == 1): + return 1 + else: + return bell(m) + + +def RGS_unrank(rank, m): + """ + Gives the unranked restricted growth string for a given + superset size. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import RGS_unrank + >>> RGS_unrank(14, 4) + [0, 1, 2, 3] + >>> RGS_unrank(0, 4) + [0, 0, 0, 0] + """ + if m < 1: + raise ValueError("The superset size must be >= 1") + if rank < 0 or RGS_enum(m) <= rank: + raise ValueError("Invalid arguments") + + L = [1] * (m + 1) + j = 1 + D = RGS_generalized(m) + for i in range(2, m + 1): + v = D[m - i, j] + cr = j*v + if cr <= rank: + L[i] = j + 1 + rank -= cr + j += 1 + else: + L[i] = int(rank / v + 1) + rank %= v + return [x - 1 for x in L[1:]] + + +def RGS_rank(rgs): + """ + Computes the rank of a restricted growth string. + + Examples + ======== + + >>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank + >>> RGS_rank([0, 1, 2, 1, 3]) + 42 + >>> RGS_rank(RGS_unrank(4, 7)) + 4 + """ + rgs_size = len(rgs) + rank = 0 + D = RGS_generalized(rgs_size) + for i in range(1, rgs_size): + n = len(rgs[(i + 1):]) + m = max(rgs[0:i]) + rank += D[n, m + 1] * rgs[i] + return rank diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/pc_groups.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/pc_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb4b0e442ec70d4c088fb51924fadc30cdf2fbf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/pc_groups.py @@ -0,0 +1,709 @@ +from sympy.ntheory.primetest import isprime +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.printing.defaults import DefaultPrinting +from sympy.combinatorics.free_groups import free_group + + +class PolycyclicGroup(DefaultPrinting): + + is_group = True + is_solvable = True + + def __init__(self, pc_sequence, pc_series, relative_order, collector=None): + """ + + Parameters + ========== + + pc_sequence : list + A sequence of elements whose classes generate the cyclic factor + groups of pc_series. + pc_series : list + A subnormal sequence of subgroups where each factor group is cyclic. + relative_order : list + The orders of factor groups of pc_series. + collector : Collector + By default, it is None. Collector class provides the + polycyclic presentation with various other functionalities. + + """ + self.pcgs = pc_sequence + self.pc_series = pc_series + self.relative_order = relative_order + self.collector = Collector(self.pcgs, pc_series, relative_order) if not collector else collector + + def is_prime_order(self): + return all(isprime(order) for order in self.relative_order) + + def length(self): + return len(self.pcgs) + + +class Collector(DefaultPrinting): + + """ + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + Section 8.1.3 + """ + + def __init__(self, pcgs, pc_series, relative_order, free_group_=None, pc_presentation=None): + """ + + Most of the parameters for the Collector class are the same as for PolycyclicGroup. + Others are described below. + + Parameters + ========== + + free_group_ : tuple + free_group_ provides the mapping of polycyclic generating + sequence with the free group elements. + pc_presentation : dict + Provides the presentation of polycyclic groups with the + help of power and conjugate relators. + + See Also + ======== + + PolycyclicGroup + + """ + self.pcgs = pcgs + self.pc_series = pc_series + self.relative_order = relative_order + self.free_group = free_group('x:{}'.format(len(pcgs)))[0] if not free_group_ else free_group_ + self.index = {s: i for i, s in enumerate(self.free_group.symbols)} + self.pc_presentation = self.pc_relators() + + def minimal_uncollected_subword(self, word): + r""" + Returns the minimal uncollected subwords. + + Explanation + =========== + + A word ``v`` defined on generators in ``X`` is a minimal + uncollected subword of the word ``w`` if ``v`` is a subword + of ``w`` and it has one of the following form + + * `v = {x_{i+1}}^{a_j}x_i` + + * `v = {x_{i+1}}^{a_j}{x_i}^{-1}` + + * `v = {x_i}^{a_j}` + + for `a_j` not in `\{1, \ldots, s-1\}`. Where, ``s`` is the power + exponent of the corresponding generator. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics import free_group + >>> G = SymmetricGroup(4) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> F, x1, x2 = free_group("x1, x2") + >>> word = x2**2*x1**7 + >>> collector.minimal_uncollected_subword(word) + ((x2, 2),) + + """ + # To handle the case word = + if not word: + return None + + array = word.array_form + re = self.relative_order + index = self.index + + for i in range(len(array)): + s1, e1 = array[i] + + if re[index[s1]] and (e1 < 0 or e1 > re[index[s1]]-1): + return ((s1, e1), ) + + for i in range(len(array)-1): + s1, e1 = array[i] + s2, e2 = array[i+1] + + if index[s1] > index[s2]: + e = 1 if e2 > 0 else -1 + return ((s1, e1), (s2, e)) + + return None + + def relations(self): + """ + Separates the given relators of pc presentation in power and + conjugate relations. + + Returns + ======= + + (power_rel, conj_rel) + Separates pc presentation into power and conjugate relations. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> G = SymmetricGroup(3) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> power_rel, conj_rel = collector.relations() + >>> power_rel + {x0**2: (), x1**3: ()} + >>> conj_rel + {x0**-1*x1*x0: x1**2} + + See Also + ======== + + pc_relators + + """ + power_relators = {} + conjugate_relators = {} + for key, value in self.pc_presentation.items(): + if len(key.array_form) == 1: + power_relators[key] = value + else: + conjugate_relators[key] = value + return power_relators, conjugate_relators + + def subword_index(self, word, w): + """ + Returns the start and ending index of a given + subword in a word. + + Parameters + ========== + + word : FreeGroupElement + word defined on free group elements for a + polycyclic group. + w : FreeGroupElement + subword of a given word, whose starting and + ending index to be computed. + + Returns + ======= + + (i, j) + A tuple containing starting and ending index of ``w`` + in the given word. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics import free_group + >>> G = SymmetricGroup(4) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> F, x1, x2 = free_group("x1, x2") + >>> word = x2**2*x1**7 + >>> w = x2**2*x1 + >>> collector.subword_index(word, w) + (0, 3) + >>> w = x1**7 + >>> collector.subword_index(word, w) + (2, 9) + + """ + low = -1 + high = -1 + for i in range(len(word)-len(w)+1): + if word.subword(i, i+len(w)) == w: + low = i + high = i+len(w) + break + if low == high == -1: + return -1, -1 + return low, high + + def map_relation(self, w): + """ + Return a conjugate relation. + + Explanation + =========== + + Given a word formed by two free group elements, the + corresponding conjugate relation with those free + group elements is formed and mapped with the collected + word in the polycyclic presentation. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics import free_group + >>> G = SymmetricGroup(3) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> F, x0, x1 = free_group("x0, x1") + >>> w = x1*x0 + >>> collector.map_relation(w) + x1**2 + + See Also + ======== + + pc_presentation + + """ + array = w.array_form + s1 = array[0][0] + s2 = array[1][0] + key = ((s2, -1), (s1, 1), (s2, 1)) + key = self.free_group.dtype(key) + return self.pc_presentation[key] + + + def collected_word(self, word): + r""" + Return the collected form of a word. + + Explanation + =========== + + A word ``w`` is called collected, if `w = {x_{i_1}}^{a_1} * \ldots * + {x_{i_r}}^{a_r}` with `i_1 < i_2< \ldots < i_r` and `a_j` is in + `\{1, \ldots, {s_j}-1\}`. + + Otherwise w is uncollected. + + Parameters + ========== + + word : FreeGroupElement + An uncollected word. + + Returns + ======= + + word + A collected word of form `w = {x_{i_1}}^{a_1}, \ldots, + {x_{i_r}}^{a_r}` with `i_1, i_2, \ldots, i_r` and `a_j \in + \{1, \ldots, {s_j}-1\}`. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> from sympy.combinatorics import free_group + >>> G = SymmetricGroup(4) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> F, x0, x1, x2, x3 = free_group("x0, x1, x2, x3") + >>> word = x3*x2*x1*x0 + >>> collected_word = collector.collected_word(word) + >>> free_to_perm = {} + >>> free_group = collector.free_group + >>> for sym, gen in zip(free_group.symbols, collector.pcgs): + ... free_to_perm[sym] = gen + >>> G1 = PermutationGroup() + >>> for w in word: + ... sym = w[0] + ... perm = free_to_perm[sym] + ... G1 = PermutationGroup([perm] + G1.generators) + >>> G2 = PermutationGroup() + >>> for w in collected_word: + ... sym = w[0] + ... perm = free_to_perm[sym] + ... G2 = PermutationGroup([perm] + G2.generators) + + The two are not identical, but they are equivalent: + + >>> G1.equals(G2), G1 == G2 + (True, False) + + See Also + ======== + + minimal_uncollected_subword + + """ + free_group = self.free_group + while True: + w = self.minimal_uncollected_subword(word) + if not w: + break + + low, high = self.subword_index(word, free_group.dtype(w)) + if low == -1: + continue + + s1, e1 = w[0] + if len(w) == 1: + re = self.relative_order[self.index[s1]] + q = e1 // re + r = e1-q*re + + key = ((w[0][0], re), ) + key = free_group.dtype(key) + if self.pc_presentation[key]: + presentation = self.pc_presentation[key].array_form + sym, exp = presentation[0] + word_ = ((w[0][0], r), (sym, q*exp)) + word_ = free_group.dtype(word_) + else: + if r != 0: + word_ = ((w[0][0], r), ) + word_ = free_group.dtype(word_) + else: + word_ = None + word = word.eliminate_word(free_group.dtype(w), word_) + + if len(w) == 2 and w[1][1] > 0: + s2, e2 = w[1] + s2 = ((s2, 1), ) + s2 = free_group.dtype(s2) + word_ = self.map_relation(free_group.dtype(w)) + word_ = s2*word_**e1 + word_ = free_group.dtype(word_) + word = word.substituted_word(low, high, word_) + + elif len(w) == 2 and w[1][1] < 0: + s2, e2 = w[1] + s2 = ((s2, 1), ) + s2 = free_group.dtype(s2) + word_ = self.map_relation(free_group.dtype(w)) + word_ = s2**-1*word_**e1 + word_ = free_group.dtype(word_) + word = word.substituted_word(low, high, word_) + + return word + + + def pc_relators(self): + r""" + Return the polycyclic presentation. + + Explanation + =========== + + There are two types of relations used in polycyclic + presentation. + + * Power relations : Power relators are of the form `x_i^{re_i}`, + where `i \in \{0, \ldots, \mathrm{len(pcgs)}\}`, ``x`` represents polycyclic + generator and ``re`` is the corresponding relative order. + + * Conjugate relations : Conjugate relators are of the form `x_j^-1x_ix_j`, + where `j < i \in \{0, \ldots, \mathrm{len(pcgs)}\}`. + + Returns + ======= + + A dictionary with power and conjugate relations as key and + their collected form as corresponding values. + + Notes + ===== + + Identity Permutation is mapped with empty ``()``. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.permutations import Permutation + >>> S = SymmetricGroup(49).sylow_subgroup(7) + >>> der = S.derived_series() + >>> G = der[len(der)-2] + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> pcgs = PcGroup.pcgs + >>> len(pcgs) + 6 + >>> free_group = collector.free_group + >>> pc_resentation = collector.pc_presentation + >>> free_to_perm = {} + >>> for s, g in zip(free_group.symbols, pcgs): + ... free_to_perm[s] = g + + >>> for k, v in pc_resentation.items(): + ... k_array = k.array_form + ... if v != (): + ... v_array = v.array_form + ... lhs = Permutation() + ... for gen in k_array: + ... s = gen[0] + ... e = gen[1] + ... lhs = lhs*free_to_perm[s]**e + ... if v == (): + ... assert lhs.is_identity + ... continue + ... rhs = Permutation() + ... for gen in v_array: + ... s = gen[0] + ... e = gen[1] + ... rhs = rhs*free_to_perm[s]**e + ... assert lhs == rhs + + """ + free_group = self.free_group + rel_order = self.relative_order + pc_relators = {} + perm_to_free = {} + pcgs = self.pcgs + + for gen, s in zip(pcgs, free_group.generators): + perm_to_free[gen**-1] = s**-1 + perm_to_free[gen] = s + + pcgs = pcgs[::-1] + series = self.pc_series[::-1] + rel_order = rel_order[::-1] + collected_gens = [] + + for i, gen in enumerate(pcgs): + re = rel_order[i] + relation = perm_to_free[gen]**re + G = series[i] + + l = G.generator_product(gen**re, original = True) + l.reverse() + + word = free_group.identity + for g in l: + word = word*perm_to_free[g] + + word = self.collected_word(word) + pc_relators[relation] = word if word else () + self.pc_presentation = pc_relators + + collected_gens.append(gen) + if len(collected_gens) > 1: + conj = collected_gens[len(collected_gens)-1] + conjugator = perm_to_free[conj] + + for j in range(len(collected_gens)-1): + conjugated = perm_to_free[collected_gens[j]] + + relation = conjugator**-1*conjugated*conjugator + gens = conj**-1*collected_gens[j]*conj + + l = G.generator_product(gens, original = True) + l.reverse() + word = free_group.identity + for g in l: + word = word*perm_to_free[g] + + word = self.collected_word(word) + pc_relators[relation] = word if word else () + self.pc_presentation = pc_relators + + return pc_relators + + def exponent_vector(self, element): + r""" + Return the exponent vector of length equal to the + length of polycyclic generating sequence. + + Explanation + =========== + + For a given generator/element ``g`` of the polycyclic group, + it can be represented as `g = {x_1}^{e_1}, \ldots, {x_n}^{e_n}`, + where `x_i` represents polycyclic generators and ``n`` is + the number of generators in the free_group equal to the length + of pcgs. + + Parameters + ========== + + element : Permutation + Generator of a polycyclic group. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> from sympy.combinatorics.permutations import Permutation + >>> G = SymmetricGroup(4) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> pcgs = PcGroup.pcgs + >>> collector.exponent_vector(G[0]) + [1, 0, 0, 0] + >>> exp = collector.exponent_vector(G[1]) + >>> g = Permutation() + >>> for i in range(len(exp)): + ... g = g*pcgs[i]**exp[i] if exp[i] else g + >>> assert g == G[1] + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + Section 8.1.1, Definition 8.4 + + """ + free_group = self.free_group + G = PermutationGroup() + for g in self.pcgs: + G = PermutationGroup([g] + G.generators) + gens = G.generator_product(element, original = True) + gens.reverse() + + perm_to_free = {} + for sym, g in zip(free_group.generators, self.pcgs): + perm_to_free[g**-1] = sym**-1 + perm_to_free[g] = sym + w = free_group.identity + for g in gens: + w = w*perm_to_free[g] + + word = self.collected_word(w) + + index = self.index + exp_vector = [0]*len(free_group) + word = word.array_form + for t in word: + exp_vector[index[t[0]]] = t[1] + return exp_vector + + def depth(self, element): + r""" + Return the depth of a given element. + + Explanation + =========== + + The depth of a given element ``g`` is defined by + `\mathrm{dep}[g] = i` if `e_1 = e_2 = \ldots = e_{i-1} = 0` + and `e_i != 0`, where ``e`` represents the exponent-vector. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> G = SymmetricGroup(3) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> collector.depth(G[0]) + 2 + >>> collector.depth(G[1]) + 1 + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of Computational Group Theory" + Section 8.1.1, Definition 8.5 + + """ + exp_vector = self.exponent_vector(element) + return next((i+1 for i, x in enumerate(exp_vector) if x), len(self.pcgs)+1) + + def leading_exponent(self, element): + r""" + Return the leading non-zero exponent. + + Explanation + =========== + + The leading exponent for a given element `g` is defined + by `\mathrm{leading\_exponent}[g]` `= e_i`, if `\mathrm{depth}[g] = i`. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> G = SymmetricGroup(3) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> collector.leading_exponent(G[1]) + 1 + + """ + exp_vector = self.exponent_vector(element) + depth = self.depth(element) + if depth != len(self.pcgs)+1: + return exp_vector[depth-1] + return None + + def _sift(self, z, g): + h = g + d = self.depth(h) + while d < len(self.pcgs) and z[d-1] != 1: + k = z[d-1] + e = self.leading_exponent(h)*(self.leading_exponent(k))**-1 + e = e % self.relative_order[d-1] + h = k**-e*h + d = self.depth(h) + return h + + def induced_pcgs(self, gens): + """ + + Parameters + ========== + + gens : list + A list of generators on which polycyclic subgroup + is to be defined. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import SymmetricGroup + >>> S = SymmetricGroup(8) + >>> G = S.sylow_subgroup(2) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> gens = [G[0], G[1]] + >>> ipcgs = collector.induced_pcgs(gens) + >>> [gen.order() for gen in ipcgs] + [2, 2, 2] + >>> G = S.sylow_subgroup(3) + >>> PcGroup = G.polycyclic_group() + >>> collector = PcGroup.collector + >>> gens = [G[0], G[1]] + >>> ipcgs = collector.induced_pcgs(gens) + >>> [gen.order() for gen in ipcgs] + [3] + + """ + z = [1]*len(self.pcgs) + G = gens + while G: + g = G.pop(0) + h = self._sift(z, g) + d = self.depth(h) + if d < len(self.pcgs): + for gen in z: + if gen != 1: + G.append(h**-1*gen**-1*h*gen) + z[d-1] = h; + z = [gen for gen in z if gen != 1] + return z + + def constructive_membership_test(self, ipcgs, g): + """ + Return the exponent vector for induced pcgs. + """ + e = [0]*len(ipcgs) + h = g + d = self.depth(h) + for i, gen in enumerate(ipcgs): + while self.depth(gen) == d: + f = self.leading_exponent(h)*self.leading_exponent(gen) + f = f % self.relative_order[d-1] + h = gen**(-f)*h + e[i] = f + d = self.depth(h) + if h == 1: + return e + return False diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/permutations.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/permutations.py new file mode 100644 index 0000000000000000000000000000000000000000..6c823720d4719483adfcfdfcce52ed157d2b755c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/permutations.py @@ -0,0 +1,3112 @@ +import random +from collections import defaultdict +from collections.abc import Iterable +from functools import reduce + +from sympy.core.parameters import global_parameters +from sympy.core.basic import Atom +from sympy.core.expr import Expr +from sympy.core.numbers import Integer +from sympy.core.sympify import _sympify +from sympy.matrices import zeros +from sympy.polys.polytools import lcm +from sympy.printing.repr import srepr +from sympy.utilities.iterables import (flatten, has_variety, minlex, + has_dups, runs, is_sequence) +from sympy.utilities.misc import as_int +from mpmath.libmp.libintmath import ifac +from sympy.multipledispatch import dispatch + +def _af_rmul(a, b): + """ + Return the product b*a; input and output are array forms. The ith value + is a[b[i]]. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_rmul, Permutation + + >>> a, b = [1, 0, 2], [0, 2, 1] + >>> _af_rmul(a, b) + [1, 2, 0] + >>> [a[b[i]] for i in range(3)] + [1, 2, 0] + + This handles the operands in reverse order compared to the ``*`` operator: + + >>> a = Permutation(a) + >>> b = Permutation(b) + >>> list(a*b) + [2, 0, 1] + >>> [b(a(i)) for i in range(3)] + [2, 0, 1] + + See Also + ======== + + rmul, _af_rmuln + """ + return [a[i] for i in b] + + +def _af_rmuln(*abc): + """ + Given [a, b, c, ...] return the product of ...*c*b*a using array forms. + The ith value is a[b[c[i]]]. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_rmul, Permutation + + >>> a, b = [1, 0, 2], [0, 2, 1] + >>> _af_rmul(a, b) + [1, 2, 0] + >>> [a[b[i]] for i in range(3)] + [1, 2, 0] + + This handles the operands in reverse order compared to the ``*`` operator: + + >>> a = Permutation(a); b = Permutation(b) + >>> list(a*b) + [2, 0, 1] + >>> [b(a(i)) for i in range(3)] + [2, 0, 1] + + See Also + ======== + + rmul, _af_rmul + """ + a = abc + m = len(a) + if m == 3: + p0, p1, p2 = a + return [p0[p1[i]] for i in p2] + if m == 4: + p0, p1, p2, p3 = a + return [p0[p1[p2[i]]] for i in p3] + if m == 5: + p0, p1, p2, p3, p4 = a + return [p0[p1[p2[p3[i]]]] for i in p4] + if m == 6: + p0, p1, p2, p3, p4, p5 = a + return [p0[p1[p2[p3[p4[i]]]]] for i in p5] + if m == 7: + p0, p1, p2, p3, p4, p5, p6 = a + return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6] + if m == 8: + p0, p1, p2, p3, p4, p5, p6, p7 = a + return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7] + if m == 1: + return a[0][:] + if m == 2: + a, b = a + return [a[i] for i in b] + if m == 0: + raise ValueError("String must not be empty") + p0 = _af_rmuln(*a[:m//2]) + p1 = _af_rmuln(*a[m//2:]) + return [p0[i] for i in p1] + + +def _af_parity(pi): + """ + Computes the parity of a permutation in array form. + + Explanation + =========== + + The parity of a permutation reflects the parity of the + number of inversions in the permutation, i.e., the + number of pairs of x and y such that x > y but p[x] < p[y]. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_parity + >>> _af_parity([0, 1, 2, 3]) + 0 + >>> _af_parity([3, 2, 0, 1]) + 1 + + See Also + ======== + + Permutation + """ + n = len(pi) + a = [0] * n + c = 0 + for j in range(n): + if a[j] == 0: + c += 1 + a[j] = 1 + i = j + while pi[i] != j: + i = pi[i] + a[i] = 1 + return (n - c) % 2 + + +def _af_invert(a): + """ + Finds the inverse, ~A, of a permutation, A, given in array form. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_invert, _af_rmul + >>> A = [1, 2, 0, 3] + >>> _af_invert(A) + [2, 0, 1, 3] + >>> _af_rmul(_, A) + [0, 1, 2, 3] + + See Also + ======== + + Permutation, __invert__ + """ + inv_form = [0] * len(a) + for i, ai in enumerate(a): + inv_form[ai] = i + return inv_form + + +def _af_pow(a, n): + """ + Routine for finding powers of a permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy.combinatorics.permutations import _af_pow + >>> p = Permutation([2, 0, 3, 1]) + >>> p.order() + 4 + >>> _af_pow(p._array_form, 4) + [0, 1, 2, 3] + """ + if n == 0: + return list(range(len(a))) + if n < 0: + return _af_pow(_af_invert(a), -n) + if n == 1: + return a[:] + elif n == 2: + b = [a[i] for i in a] + elif n == 3: + b = [a[a[i]] for i in a] + elif n == 4: + b = [a[a[a[i]]] for i in a] + else: + # use binary multiplication + b = list(range(len(a))) + while 1: + if n & 1: + b = [b[i] for i in a] + n -= 1 + if not n: + break + if n % 4 == 0: + a = [a[a[a[i]]] for i in a] + n = n // 4 + elif n % 2 == 0: + a = [a[i] for i in a] + n = n // 2 + return b + + +def _af_commutes_with(a, b): + """ + Checks if the two permutations with array forms + given by ``a`` and ``b`` commute. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_commutes_with + >>> _af_commutes_with([1, 2, 0], [0, 2, 1]) + False + + See Also + ======== + + Permutation, commutes_with + """ + return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1)) + + +class Cycle(dict): + """ + Wrapper around dict which provides the functionality of a disjoint cycle. + + Explanation + =========== + + A cycle shows the rule to use to move subsets of elements to obtain + a permutation. The Cycle class is more flexible than Permutation in + that 1) all elements need not be present in order to investigate how + multiple cycles act in sequence and 2) it can contain singletons: + + >>> from sympy.combinatorics.permutations import Perm, Cycle + + A Cycle will automatically parse a cycle given as a tuple on the rhs: + + >>> Cycle(1, 2)(2, 3) + (1 3 2) + + The identity cycle, Cycle(), can be used to start a product: + + >>> Cycle()(1, 2)(2, 3) + (1 3 2) + + The array form of a Cycle can be obtained by calling the list + method (or passing it to the list function) and all elements from + 0 will be shown: + + >>> a = Cycle(1, 2) + >>> a.list() + [0, 2, 1] + >>> list(a) + [0, 2, 1] + + If a larger (or smaller) range is desired use the list method and + provide the desired size -- but the Cycle cannot be truncated to + a size smaller than the largest element that is out of place: + + >>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3) + >>> b.list() + [0, 2, 1, 3, 4] + >>> b.list(b.size + 1) + [0, 2, 1, 3, 4, 5] + >>> b.list(-1) + [0, 2, 1] + + Singletons are not shown when printing with one exception: the largest + element is always shown -- as a singleton if necessary: + + >>> Cycle(1, 4, 10)(4, 5) + (1 5 4 10) + >>> Cycle(1, 2)(4)(5)(10) + (1 2)(10) + + The array form can be used to instantiate a Permutation so other + properties of the permutation can be investigated: + + >>> Perm(Cycle(1, 2)(3, 4).list()).transpositions() + [(1, 2), (3, 4)] + + Notes + ===== + + The underlying structure of the Cycle is a dictionary and although + the __iter__ method has been redefined to give the array form of the + cycle, the underlying dictionary items are still available with the + such methods as items(): + + >>> list(Cycle(1, 2).items()) + [(1, 2), (2, 1)] + + See Also + ======== + + Permutation + """ + def __missing__(self, arg): + """Enter arg into dictionary and return arg.""" + return as_int(arg) + + def __iter__(self): + yield from self.list() + + def __call__(self, *other): + """Return product of cycles processed from R to L. + + Examples + ======== + + >>> from sympy.combinatorics import Cycle + >>> Cycle(1, 2)(2, 3) + (1 3 2) + + An instance of a Cycle will automatically parse list-like + objects and Permutations that are on the right. It is more + flexible than the Permutation in that all elements need not + be present: + + >>> a = Cycle(1, 2) + >>> a(2, 3) + (1 3 2) + >>> a(2, 3)(4, 5) + (1 3 2)(4 5) + + """ + rv = Cycle(*other) + for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]): + rv[k] = v + return rv + + def list(self, size=None): + """Return the cycles as an explicit list starting from 0 up + to the greater of the largest value in the cycles and size. + + Truncation of trailing unmoved items will occur when size + is less than the maximum element in the cycle; if this is + desired, setting ``size=-1`` will guarantee such trimming. + + Examples + ======== + + >>> from sympy.combinatorics import Cycle + >>> p = Cycle(2, 3)(4, 5) + >>> p.list() + [0, 1, 3, 2, 5, 4] + >>> p.list(10) + [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] + + Passing a length too small will trim trailing, unchanged elements + in the permutation: + + >>> Cycle(2, 4)(1, 2, 4).list(-1) + [0, 2, 1] + """ + if not self and size is None: + raise ValueError('must give size for empty Cycle') + if size is not None: + big = max([i for i in self.keys() if self[i] != i] + [0]) + size = max(size, big + 1) + else: + size = self.size + return [self[i] for i in range(size)] + + def __repr__(self): + """We want it to print as a Cycle, not as a dict. + + Examples + ======== + + >>> from sympy.combinatorics import Cycle + >>> Cycle(1, 2) + (1 2) + >>> print(_) + (1 2) + >>> list(Cycle(1, 2).items()) + [(1, 2), (2, 1)] + """ + if not self: + return 'Cycle()' + cycles = Permutation(self).cyclic_form + s = ''.join(str(tuple(c)) for c in cycles) + big = self.size - 1 + if not any(i == big for c in cycles for i in c): + s += '(%s)' % big + return 'Cycle%s' % s + + def __str__(self): + """We want it to be printed in a Cycle notation with no + comma in-between. + + Examples + ======== + + >>> from sympy.combinatorics import Cycle + >>> Cycle(1, 2) + (1 2) + >>> Cycle(1, 2, 4)(5, 6) + (1 2 4)(5 6) + """ + if not self: + return '()' + cycles = Permutation(self).cyclic_form + s = ''.join(str(tuple(c)) for c in cycles) + big = self.size - 1 + if not any(i == big for c in cycles for i in c): + s += '(%s)' % big + s = s.replace(',', '') + return s + + def __init__(self, *args): + """Load up a Cycle instance with the values for the cycle. + + Examples + ======== + + >>> from sympy.combinatorics import Cycle + >>> Cycle(1, 2, 6) + (1 2 6) + """ + + if not args: + return + if len(args) == 1: + if isinstance(args[0], Permutation): + for c in args[0].cyclic_form: + self.update(self(*c)) + return + elif isinstance(args[0], Cycle): + for k, v in args[0].items(): + self[k] = v + return + args = [as_int(a) for a in args] + if any(i < 0 for i in args): + raise ValueError('negative integers are not allowed in a cycle.') + if has_dups(args): + raise ValueError('All elements must be unique in a cycle.') + for i in range(-len(args), 0): + self[args[i]] = args[i + 1] + + @property + def size(self): + if not self: + return 0 + return max(self.keys()) + 1 + + def copy(self): + return Cycle(self) + + +class Permutation(Atom): + r""" + A permutation, alternatively known as an 'arrangement number' or 'ordering' + is an arrangement of the elements of an ordered list into a one-to-one + mapping with itself. The permutation of a given arrangement is given by + indicating the positions of the elements after re-arrangement [2]_. For + example, if one started with elements ``[x, y, a, b]`` (in that order) and + they were reordered as ``[x, y, b, a]`` then the permutation would be + ``[0, 1, 3, 2]``. Notice that (in SymPy) the first element is always referred + to as 0 and the permutation uses the indices of the elements in the + original ordering, not the elements ``(a, b, ...)`` themselves. + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + + Permutations Notation + ===================== + + Permutations are commonly represented in disjoint cycle or array forms. + + Array Notation and 2-line Form + ------------------------------------ + + In the 2-line form, the elements and their final positions are shown + as a matrix with 2 rows: + + [0 1 2 ... n-1] + [p(0) p(1) p(2) ... p(n-1)] + + Since the first line is always ``range(n)``, where n is the size of p, + it is sufficient to represent the permutation by the second line, + referred to as the "array form" of the permutation. This is entered + in brackets as the argument to the Permutation class: + + >>> p = Permutation([0, 2, 1]); p + Permutation([0, 2, 1]) + + Given i in range(p.size), the permutation maps i to i^p + + >>> [i^p for i in range(p.size)] + [0, 2, 1] + + The composite of two permutations p*q means first apply p, then q, so + i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules: + + >>> q = Permutation([2, 1, 0]) + >>> [i^p^q for i in range(3)] + [2, 0, 1] + >>> [i^(p*q) for i in range(3)] + [2, 0, 1] + + One can use also the notation p(i) = i^p, but then the composition + rule is (p*q)(i) = q(p(i)), not p(q(i)): + + >>> [(p*q)(i) for i in range(p.size)] + [2, 0, 1] + >>> [q(p(i)) for i in range(p.size)] + [2, 0, 1] + >>> [p(q(i)) for i in range(p.size)] + [1, 2, 0] + + Disjoint Cycle Notation + ----------------------- + + In disjoint cycle notation, only the elements that have shifted are + indicated. + + For example, [1, 3, 2, 0] can be represented as (0, 1, 3)(2). + This can be understood from the 2 line format of the given permutation. + In the 2-line form, + [0 1 2 3] + [1 3 2 0] + + The element in the 0th position is 1, so 0 -> 1. The element in the 1st + position is three, so 1 -> 3. And the element in the third position is again + 0, so 3 -> 0. Thus, 0 -> 1 -> 3 -> 0, and 2 -> 2. Thus, this can be represented + as 2 cycles: (0, 1, 3)(2). + In common notation, singular cycles are not explicitly written as they can be + inferred implicitly. + + Only the relative ordering of elements in a cycle matter: + + >>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2) + True + + The disjoint cycle notation is convenient when representing + permutations that have several cycles in them: + + >>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]]) + True + + It also provides some economy in entry when computing products of + permutations that are written in disjoint cycle notation: + + >>> Permutation(1, 2)(1, 3)(2, 3) + Permutation([0, 3, 2, 1]) + >>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]]) + True + + Caution: when the cycles have common elements between them then the order + in which the permutations are applied matters. This module applies + the permutations from *left to right*. + + >>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)]) + True + >>> Permutation(1, 2)(2, 3).list() + [0, 3, 1, 2] + + In the above case, (1,2) is computed before (2,3). + As 0 -> 0, 0 -> 0, element in position 0 is 0. + As 1 -> 2, 2 -> 3, element in position 1 is 3. + As 2 -> 1, 1 -> 1, element in position 2 is 1. + As 3 -> 3, 3 -> 2, element in position 3 is 2. + + If the first and second elements had been + swapped first, followed by the swapping of the second + and third, the result would have been [0, 2, 3, 1]. + If, you want to apply the cycles in the conventional + right to left order, call the function with arguments in reverse order + as demonstrated below: + + >>> Permutation([(1, 2), (2, 3)][::-1]).list() + [0, 2, 3, 1] + + Entering a singleton in a permutation is a way to indicate the size of the + permutation. The ``size`` keyword can also be used. + + Array-form entry: + + >>> Permutation([[1, 2], [9]]) + Permutation([0, 2, 1], size=10) + >>> Permutation([[1, 2]], size=10) + Permutation([0, 2, 1], size=10) + + Cyclic-form entry: + + >>> Permutation(1, 2, size=10) + Permutation([0, 2, 1], size=10) + >>> Permutation(9)(1, 2) + Permutation([0, 2, 1], size=10) + + Caution: no singleton containing an element larger than the largest + in any previous cycle can be entered. This is an important difference + in how Permutation and Cycle handle the ``__call__`` syntax. A singleton + argument at the start of a Permutation performs instantiation of the + Permutation and is permitted: + + >>> Permutation(5) + Permutation([], size=6) + + A singleton entered after instantiation is a call to the permutation + -- a function call -- and if the argument is out of range it will + trigger an error. For this reason, it is better to start the cycle + with the singleton: + + The following fails because there is no element 3: + + >>> Permutation(1, 2)(3) + Traceback (most recent call last): + ... + IndexError: list index out of range + + This is ok: only the call to an out of range singleton is prohibited; + otherwise the permutation autosizes: + + >>> Permutation(3)(1, 2) + Permutation([0, 2, 1, 3]) + >>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2) + True + + + Equality testing + ---------------- + + The array forms must be the same in order for permutations to be equal: + + >>> Permutation([1, 0, 2, 3]) == Permutation([1, 0]) + False + + + Identity Permutation + -------------------- + + The identity permutation is a permutation in which no element is out of + place. It can be entered in a variety of ways. All the following create + an identity permutation of size 4: + + >>> I = Permutation([0, 1, 2, 3]) + >>> all(p == I for p in [ + ... Permutation(3), + ... Permutation(range(4)), + ... Permutation([], size=4), + ... Permutation(size=4)]) + True + + Watch out for entering the range *inside* a set of brackets (which is + cycle notation): + + >>> I == Permutation([range(4)]) + False + + + Permutation Printing + ==================== + + There are a few things to note about how Permutations are printed. + + .. deprecated:: 1.6 + + Configuring Permutation printing by setting + ``Permutation.print_cyclic`` is deprecated. Users should use the + ``perm_cyclic`` flag to the printers, as described below. + + 1) If you prefer one form (array or cycle) over another, you can set + ``init_printing`` with the ``perm_cyclic`` flag. + + >>> from sympy import init_printing + >>> p = Permutation(1, 2)(4, 5)(3, 4) + >>> p + Permutation([0, 2, 1, 4, 5, 3]) + + >>> init_printing(perm_cyclic=True, pretty_print=False) + >>> p + (1 2)(3 4 5) + + 2) Regardless of the setting, a list of elements in the array for cyclic + form can be obtained and either of those can be copied and supplied as + the argument to Permutation: + + >>> p.array_form + [0, 2, 1, 4, 5, 3] + >>> p.cyclic_form + [[1, 2], [3, 4, 5]] + >>> Permutation(_) == p + True + + 3) Printing is economical in that as little as possible is printed while + retaining all information about the size of the permutation: + + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> Permutation([1, 0, 2, 3]) + Permutation([1, 0, 2, 3]) + >>> Permutation([1, 0, 2, 3], size=20) + Permutation([1, 0], size=20) + >>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20) + Permutation([1, 0, 2, 4, 3], size=20) + + >>> p = Permutation([1, 0, 2, 3]) + >>> init_printing(perm_cyclic=True, pretty_print=False) + >>> p + (3)(0 1) + >>> init_printing(perm_cyclic=False, pretty_print=False) + + The 2 was not printed but it is still there as can be seen with the + array_form and size methods: + + >>> p.array_form + [1, 0, 2, 3] + >>> p.size + 4 + + Short introduction to other methods + =================================== + + The permutation can act as a bijective function, telling what element is + located at a given position + + >>> q = Permutation([5, 2, 3, 4, 1, 0]) + >>> q.array_form[1] # the hard way + 2 + >>> q(1) # the easy way + 2 + >>> {i: q(i) for i in range(q.size)} # showing the bijection + {0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0} + + The full cyclic form (including singletons) can be obtained: + + >>> p.full_cyclic_form + [[0, 1], [2], [3]] + + Any permutation can be factored into transpositions of pairs of elements: + + >>> Permutation([[1, 2], [3, 4, 5]]).transpositions() + [(1, 2), (3, 5), (3, 4)] + >>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form + [[1, 2], [3, 4, 5]] + + The number of permutations on a set of n elements is given by n! and is + called the cardinality. + + >>> p.size + 4 + >>> p.cardinality + 24 + + A given permutation has a rank among all the possible permutations of the + same elements, but what that rank is depends on how the permutations are + enumerated. (There are a number of different methods of doing so.) The + lexicographic rank is given by the rank method and this rank is used to + increment a permutation with addition/subtraction: + + >>> p.rank() + 6 + >>> p + 1 + Permutation([1, 0, 3, 2]) + >>> p.next_lex() + Permutation([1, 0, 3, 2]) + >>> _.rank() + 7 + >>> p.unrank_lex(p.size, rank=7) + Permutation([1, 0, 3, 2]) + + The product of two permutations p and q is defined as their composition as + functions, (p*q)(i) = q(p(i)) [6]_. + + >>> p = Permutation([1, 0, 2, 3]) + >>> q = Permutation([2, 3, 1, 0]) + >>> list(q*p) + [2, 3, 0, 1] + >>> list(p*q) + [3, 2, 1, 0] + >>> [q(p(i)) for i in range(p.size)] + [3, 2, 1, 0] + + The permutation can be 'applied' to any list-like object, not only + Permutations: + + >>> p(['zero', 'one', 'four', 'two']) + ['one', 'zero', 'four', 'two'] + >>> p('zo42') + ['o', 'z', '4', '2'] + + If you have a list of arbitrary elements, the corresponding permutation + can be found with the from_sequence method: + + >>> Permutation.from_sequence('SymPy') + Permutation([1, 3, 2, 0, 4]) + + Checking if a Permutation is contained in a Group + ================================================= + + Generally if you have a group of permutations G on n symbols, and + you're checking if a permutation on less than n symbols is part + of that group, the check will fail. + + Here is an example for n=5 and we check if the cycle + (1,2,3) is in G: + + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=True, pretty_print=False) + >>> from sympy.combinatorics import Cycle, Permutation + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> G = PermutationGroup(Cycle(2, 3)(4, 5), Cycle(1, 2, 3, 4, 5)) + >>> p1 = Permutation(Cycle(2, 5, 3)) + >>> p2 = Permutation(Cycle(1, 2, 3)) + >>> a1 = Permutation(Cycle(1, 2, 3).list(6)) + >>> a2 = Permutation(Cycle(1, 2, 3)(5)) + >>> a3 = Permutation(Cycle(1, 2, 3),size=6) + >>> for p in [p1,p2,a1,a2,a3]: p, G.contains(p) + ((2 5 3), True) + ((1 2 3), False) + ((5)(1 2 3), True) + ((5)(1 2 3), True) + ((5)(1 2 3), True) + + The check for p2 above will fail. + + Checking if p1 is in G works because SymPy knows + G is a group on 5 symbols, and p1 is also on 5 symbols + (its largest element is 5). + + For ``a1``, the ``.list(6)`` call will extend the permutation to 5 + symbols, so the test will work as well. In the case of ``a2`` the + permutation is being extended to 5 symbols by using a singleton, + and in the case of ``a3`` it's extended through the constructor + argument ``size=6``. + + There is another way to do this, which is to tell the ``contains`` + method that the number of symbols the group is on does not need to + match perfectly the number of symbols for the permutation: + + >>> G.contains(p2,strict=False) + True + + This can be via the ``strict`` argument to the ``contains`` method, + and SymPy will try to extend the permutation on its own and then + perform the containment check. + + See Also + ======== + + Cycle + + References + ========== + + .. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics + Combinatorics and Graph Theory with Mathematica. Reading, MA: + Addison-Wesley, pp. 3-16, 1990. + + .. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial + Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011. + + .. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking + permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001), + 281-284. DOI=10.1016/S0020-0190(01)00141-7 + + .. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms' + CRC Press, 1999 + + .. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O. + Concrete Mathematics: A Foundation for Computer Science, 2nd ed. + Reading, MA: Addison-Wesley, 1994. + + .. [6] https://en.wikipedia.org/w/index.php?oldid=499948155#Product_and_inverse + + .. [7] https://en.wikipedia.org/wiki/Lehmer_code + + """ + + is_Permutation = True + + _array_form = None + _cyclic_form = None + _cycle_structure = None + _size = None + _rank = None + + def __new__(cls, *args, size=None, **kwargs): + """ + Constructor for the Permutation object from a list or a + list of lists in which all elements of the permutation may + appear only once. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + + Permutations entered in array-form are left unaltered: + + >>> Permutation([0, 2, 1]) + Permutation([0, 2, 1]) + + Permutations entered in cyclic form are converted to array form; + singletons need not be entered, but can be entered to indicate the + largest element: + + >>> Permutation([[4, 5, 6], [0, 1]]) + Permutation([1, 0, 2, 3, 5, 6, 4]) + >>> Permutation([[4, 5, 6], [0, 1], [19]]) + Permutation([1, 0, 2, 3, 5, 6, 4], size=20) + + All manipulation of permutations assumes that the smallest element + is 0 (in keeping with 0-based indexing in Python) so if the 0 is + missing when entering a permutation in array form, an error will be + raised: + + >>> Permutation([2, 1]) + Traceback (most recent call last): + ... + ValueError: Integers 0 through 2 must be present. + + If a permutation is entered in cyclic form, it can be entered without + singletons and the ``size`` specified so those values can be filled + in, otherwise the array form will only extend to the maximum value + in the cycles: + + >>> Permutation([[1, 4], [3, 5, 2]], size=10) + Permutation([0, 4, 3, 5, 1, 2], size=10) + >>> _.array_form + [0, 4, 3, 5, 1, 2, 6, 7, 8, 9] + """ + if size is not None: + size = int(size) + + #a) () + #b) (1) = identity + #c) (1, 2) = cycle + #d) ([1, 2, 3]) = array form + #e) ([[1, 2]]) = cyclic form + #f) (Cycle) = conversion to permutation + #g) (Permutation) = adjust size or return copy + ok = True + if not args: # a + return cls._af_new(list(range(size or 0))) + elif len(args) > 1: # c + return cls._af_new(Cycle(*args).list(size)) + if len(args) == 1: + a = args[0] + if isinstance(a, cls): # g + if size is None or size == a.size: + return a + return cls(a.array_form, size=size) + if isinstance(a, Cycle): # f + return cls._af_new(a.list(size)) + if not is_sequence(a): # b + if size is not None and a + 1 > size: + raise ValueError('size is too small when max is %s' % a) + return cls._af_new(list(range(a + 1))) + if has_variety(is_sequence(ai) for ai in a): + ok = False + else: + ok = False + if not ok: + raise ValueError("Permutation argument must be a list of ints, " + "a list of lists, Permutation or Cycle.") + + # safe to assume args are valid; this also makes a copy + # of the args + args = list(args[0]) + + is_cycle = args and is_sequence(args[0]) + if is_cycle: # e + args = [[int(i) for i in c] for c in args] + else: # d + args = [int(i) for i in args] + + # if there are n elements present, 0, 1, ..., n-1 should be present + # unless a cycle notation has been provided. A 0 will be added + # for convenience in case one wants to enter permutations where + # counting starts from 1. + + temp = flatten(args) + if has_dups(temp) and not is_cycle: + raise ValueError('there were repeated elements.') + temp = set(temp) + + if not is_cycle: + if temp != set(range(len(temp))): + raise ValueError('Integers 0 through %s must be present.' % + max(temp)) + if size is not None and temp and max(temp) + 1 > size: + raise ValueError('max element should not exceed %s' % (size - 1)) + + if is_cycle: + # it's not necessarily canonical so we won't store + # it -- use the array form instead + c = Cycle() + for ci in args: + c = c(*ci) + aform = c.list() + else: + aform = list(args) + if size and size > len(aform): + # don't allow for truncation of permutation which + # might split a cycle and lead to an invalid aform + # but do allow the permutation size to be increased + aform.extend(list(range(len(aform), size))) + + return cls._af_new(aform) + + @classmethod + def _af_new(cls, perm): + """A method to produce a Permutation object from a list; + the list is bound to the _array_form attribute, so it must + not be modified; this method is meant for internal use only; + the list ``a`` is supposed to be generated as a temporary value + in a method, so p = Perm._af_new(a) is the only object + to hold a reference to ``a``:: + + Examples + ======== + + >>> from sympy.combinatorics.permutations import Perm + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> a = [2, 1, 3, 0] + >>> p = Perm._af_new(a) + >>> p + Permutation([2, 1, 3, 0]) + + """ + p = super().__new__(cls) + p._array_form = perm + p._size = len(perm) + return p + + def _hashable_content(self): + # the array_form (a list) is the Permutation arg, so we need to + # return a tuple, instead + return tuple(self.array_form) + + @property + def array_form(self): + """ + Return a copy of the attribute _array_form + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([[2, 0], [3, 1]]) + >>> p.array_form + [2, 3, 0, 1] + >>> Permutation([[2, 0, 3, 1]]).array_form + [3, 2, 0, 1] + >>> Permutation([2, 0, 3, 1]).array_form + [2, 0, 3, 1] + >>> Permutation([[1, 2], [4, 5]]).array_form + [0, 2, 1, 3, 5, 4] + """ + return self._array_form[:] + + def list(self, size=None): + """Return the permutation as an explicit list, possibly + trimming unmoved elements if size is less than the maximum + element in the permutation; if this is desired, setting + ``size=-1`` will guarantee such trimming. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation(2, 3)(4, 5) + >>> p.list() + [0, 1, 3, 2, 5, 4] + >>> p.list(10) + [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] + + Passing a length too small will trim trailing, unchanged elements + in the permutation: + + >>> Permutation(2, 4)(1, 2, 4).list(-1) + [0, 2, 1] + >>> Permutation(3).list(-1) + [] + """ + if not self and size is None: + raise ValueError('must give size for empty Cycle') + rv = self.array_form + if size is not None: + if size > self.size: + rv.extend(list(range(self.size, size))) + else: + # find first value from rhs where rv[i] != i + i = self.size - 1 + while rv: + if rv[-1] != i: + break + rv.pop() + i -= 1 + return rv + + @property + def cyclic_form(self): + """ + This is used to convert to the cyclic notation + from the canonical notation. Singletons are omitted. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 3, 1, 2]) + >>> p.cyclic_form + [[1, 3, 2]] + >>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form + [[0, 1], [3, 4]] + + See Also + ======== + + array_form, full_cyclic_form + """ + if self._cyclic_form is not None: + return list(self._cyclic_form) + array_form = self.array_form + unchecked = [True] * len(array_form) + cyclic_form = [] + for i in range(len(array_form)): + if unchecked[i]: + cycle = [] + cycle.append(i) + unchecked[i] = False + j = i + while unchecked[array_form[j]]: + j = array_form[j] + cycle.append(j) + unchecked[j] = False + if len(cycle) > 1: + cyclic_form.append(cycle) + assert cycle == list(minlex(cycle)) + cyclic_form.sort() + self._cyclic_form = cyclic_form[:] + return cyclic_form + + @property + def full_cyclic_form(self): + """Return permutation in cyclic form including singletons. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([0, 2, 1]).full_cyclic_form + [[0], [1, 2]] + """ + need = set(range(self.size)) - set(flatten(self.cyclic_form)) + rv = self.cyclic_form + [[i] for i in need] + rv.sort() + return rv + + @property + def size(self): + """ + Returns the number of elements in the permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([[3, 2], [0, 1]]).size + 4 + + See Also + ======== + + cardinality, length, order, rank + """ + return self._size + + def support(self): + """Return the elements in permutation, P, for which P[i] != i. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([[3, 2], [0, 1], [4]]) + >>> p.array_form + [1, 0, 3, 2, 4] + >>> p.support() + [0, 1, 2, 3] + """ + a = self.array_form + return [i for i, e in enumerate(a) if a[i] != i] + + def __add__(self, other): + """Return permutation that is other higher in rank than self. + + The rank is the lexicographical rank, with the identity permutation + having rank of 0. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> I = Permutation([0, 1, 2, 3]) + >>> a = Permutation([2, 1, 3, 0]) + >>> I + a.rank() == a + True + + See Also + ======== + + __sub__, inversion_vector + + """ + rank = (self.rank() + other) % self.cardinality + rv = self.unrank_lex(self.size, rank) + rv._rank = rank + return rv + + def __sub__(self, other): + """Return the permutation that is other lower in rank than self. + + See Also + ======== + + __add__ + """ + return self.__add__(-other) + + @staticmethod + def rmul(*args): + """ + Return product of Permutations [a, b, c, ...] as the Permutation whose + ith value is a(b(c(i))). + + a, b, c, ... can be Permutation objects or tuples. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + + >>> a, b = [1, 0, 2], [0, 2, 1] + >>> a = Permutation(a); b = Permutation(b) + >>> list(Permutation.rmul(a, b)) + [1, 2, 0] + >>> [a(b(i)) for i in range(3)] + [1, 2, 0] + + This handles the operands in reverse order compared to the ``*`` operator: + + >>> a = Permutation(a); b = Permutation(b) + >>> list(a*b) + [2, 0, 1] + >>> [b(a(i)) for i in range(3)] + [2, 0, 1] + + Notes + ===== + + All items in the sequence will be parsed by Permutation as + necessary as long as the first item is a Permutation: + + >>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b) + True + + The reverse order of arguments will raise a TypeError. + + """ + rv = args[0] + for i in range(1, len(args)): + rv = args[i]*rv + return rv + + @classmethod + def rmul_with_af(cls, *args): + """ + same as rmul, but the elements of args are Permutation objects + which have _array_form + """ + a = [x._array_form for x in args] + rv = cls._af_new(_af_rmuln(*a)) + return rv + + def mul_inv(self, other): + """ + other*~self, self and other have _array_form + """ + a = _af_invert(self._array_form) + b = other._array_form + return self._af_new(_af_rmul(a, b)) + + def __rmul__(self, other): + """This is needed to coerce other to Permutation in rmul.""" + cls = type(self) + return cls(other)*self + + def __mul__(self, other): + """ + Return the product a*b as a Permutation; the ith value is b(a(i)). + + Examples + ======== + + >>> from sympy.combinatorics.permutations import _af_rmul, Permutation + + >>> a, b = [1, 0, 2], [0, 2, 1] + >>> a = Permutation(a); b = Permutation(b) + >>> list(a*b) + [2, 0, 1] + >>> [b(a(i)) for i in range(3)] + [2, 0, 1] + + This handles operands in reverse order compared to _af_rmul and rmul: + + >>> al = list(a); bl = list(b) + >>> _af_rmul(al, bl) + [1, 2, 0] + >>> [al[bl[i]] for i in range(3)] + [1, 2, 0] + + It is acceptable for the arrays to have different lengths; the shorter + one will be padded to match the longer one: + + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> b*Permutation([1, 0]) + Permutation([1, 2, 0]) + >>> Permutation([1, 0])*b + Permutation([2, 0, 1]) + + It is also acceptable to allow coercion to handle conversion of a + single list to the left of a Permutation: + + >>> [0, 1]*a # no change: 2-element identity + Permutation([1, 0, 2]) + >>> [[0, 1]]*a # exchange first two elements + Permutation([0, 1, 2]) + + You cannot use more than 1 cycle notation in a product of cycles + since coercion can only handle one argument to the left. To handle + multiple cycles it is convenient to use Cycle instead of Permutation: + + >>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP + >>> from sympy.combinatorics.permutations import Cycle + >>> Cycle(1, 2)(2, 3) + (1 3 2) + + """ + from sympy.combinatorics.perm_groups import PermutationGroup, Coset + if isinstance(other, PermutationGroup): + return Coset(self, other, dir='-') + a = self.array_form + # __rmul__ makes sure the other is a Permutation + b = other.array_form + if not b: + perm = a + else: + b.extend(list(range(len(b), len(a)))) + perm = [b[i] for i in a] + b[len(a):] + return self._af_new(perm) + + def commutes_with(self, other): + """ + Checks if the elements are commuting. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> a = Permutation([1, 4, 3, 0, 2, 5]) + >>> b = Permutation([0, 1, 2, 3, 4, 5]) + >>> a.commutes_with(b) + True + >>> b = Permutation([2, 3, 5, 4, 1, 0]) + >>> a.commutes_with(b) + False + """ + a = self.array_form + b = other.array_form + return _af_commutes_with(a, b) + + def __pow__(self, n): + """ + Routine for finding powers of a permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([2, 0, 3, 1]) + >>> p.order() + 4 + >>> p**4 + Permutation([0, 1, 2, 3]) + """ + if isinstance(n, Permutation): + raise NotImplementedError( + 'p**p is not defined; do you mean p^p (conjugate)?') + n = int(n) + return self._af_new(_af_pow(self.array_form, n)) + + def __rxor__(self, i): + """Return self(i) when ``i`` is an int. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation(1, 2, 9) + >>> 2^p == p(2) == 9 + True + """ + if int(i) == i: + return self(i) + else: + raise NotImplementedError( + "i^p = p(i) when i is an integer, not %s." % i) + + def __xor__(self, h): + """Return the conjugate permutation ``~h*self*h` `. + + Explanation + =========== + + If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and + ``b = ~h*a*h`` and both have the same cycle structure. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation(1, 2, 9) + >>> q = Permutation(6, 9, 8) + >>> p*q != q*p + True + + Calculate and check properties of the conjugate: + + >>> c = p^q + >>> c == ~q*p*q and p == q*c*~q + True + + The expression q^p^r is equivalent to q^(p*r): + + >>> r = Permutation(9)(4, 6, 8) + >>> q^p^r == q^(p*r) + True + + If the term to the left of the conjugate operator, i, is an integer + then this is interpreted as selecting the ith element from the + permutation to the right: + + >>> all(i^p == p(i) for i in range(p.size)) + True + + Note that the * operator as higher precedence than the ^ operator: + + >>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4) + True + + Notes + ===== + + In Python the precedence rule is p^q^r = (p^q)^r which differs + in general from p^(q^r) + + >>> q^p^r + (9)(1 4 8) + >>> q^(p^r) + (9)(1 8 6) + + For a given r and p, both of the following are conjugates of p: + ~r*p*r and r*p*~r. But these are not necessarily the same: + + >>> ~r*p*r == r*p*~r + True + + >>> p = Permutation(1, 2, 9)(5, 6) + >>> ~r*p*r == r*p*~r + False + + The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent + to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to + this method: + + >>> p^~r == r*p*~r + True + """ + + if self.size != h.size: + raise ValueError("The permutations must be of equal size.") + a = [None]*self.size + h = h._array_form + p = self._array_form + for i in range(self.size): + a[h[i]] = h[p[i]] + return self._af_new(a) + + def transpositions(self): + """ + Return the permutation decomposed into a list of transpositions. + + Explanation + =========== + + It is always possible to express a permutation as the product of + transpositions, see [1] + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]]) + >>> t = p.transpositions() + >>> t + [(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)] + >>> print(''.join(str(c) for c in t)) + (0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2) + >>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties + + """ + a = self.cyclic_form + res = [] + for x in a: + nx = len(x) + if nx == 2: + res.append(tuple(x)) + elif nx > 2: + first = x[0] + for y in x[nx - 1:0:-1]: + res.append((first, y)) + return res + + @classmethod + def from_sequence(self, i, key=None): + """Return the permutation needed to obtain ``i`` from the sorted + elements of ``i``. If custom sorting is desired, a key can be given. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + + >>> Permutation.from_sequence('SymPy') + (4)(0 1 3) + >>> _(sorted("SymPy")) + ['S', 'y', 'm', 'P', 'y'] + >>> Permutation.from_sequence('SymPy', key=lambda x: x.lower()) + (4)(0 2)(1 3) + """ + ic = list(zip(i, list(range(len(i))))) + if key: + ic.sort(key=lambda x: key(x[0])) + else: + ic.sort() + return ~Permutation([i[1] for i in ic]) + + def __invert__(self): + """ + Return the inverse of the permutation. + + A permutation multiplied by its inverse is the identity permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([[2, 0], [3, 1]]) + >>> ~p + Permutation([2, 3, 0, 1]) + >>> _ == p**-1 + True + >>> p*~p == ~p*p == Permutation([0, 1, 2, 3]) + True + """ + return self._af_new(_af_invert(self._array_form)) + + def __iter__(self): + """Yield elements from array form. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> list(Permutation(range(3))) + [0, 1, 2] + """ + yield from self.array_form + + def __repr__(self): + return srepr(self) + + def __call__(self, *i): + """ + Allows applying a permutation instance as a bijective function. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([[2, 0], [3, 1]]) + >>> p.array_form + [2, 3, 0, 1] + >>> [p(i) for i in range(4)] + [2, 3, 0, 1] + + If an array is given then the permutation selects the items + from the array (i.e. the permutation is applied to the array): + + >>> from sympy.abc import x + >>> p([x, 1, 0, x**2]) + [0, x**2, x, 1] + """ + # list indices can be Integer or int; leave this + # as it is (don't test or convert it) because this + # gets called a lot and should be fast + if len(i) == 1: + i = i[0] + if not isinstance(i, Iterable): + i = as_int(i) + if i < 0 or i > self.size: + raise TypeError( + "{} should be an integer between 0 and {}" + .format(i, self.size-1)) + return self._array_form[i] + # P([a, b, c]) + if len(i) != self.size: + raise TypeError( + "{} should have the length {}.".format(i, self.size)) + return [i[j] for j in self._array_form] + # P(1, 2, 3) + return self*Permutation(Cycle(*i), size=self.size) + + def atoms(self): + """ + Returns all the elements of a permutation + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([0, 1, 2, 3, 4, 5]).atoms() + {0, 1, 2, 3, 4, 5} + >>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms() + {0, 1, 2, 3, 4, 5} + """ + return set(self.array_form) + + def apply(self, i): + r"""Apply the permutation to an expression. + + Parameters + ========== + + i : Expr + It should be an integer between $0$ and $n-1$ where $n$ + is the size of the permutation. + + If it is a symbol or a symbolic expression that can + have integer values, an ``AppliedPermutation`` object + will be returned which can represent an unevaluated + function. + + Notes + ===== + + Any permutation can be defined as a bijective function + $\sigma : \{ 0, 1, \dots, n-1 \} \rightarrow \{ 0, 1, \dots, n-1 \}$ + where $n$ denotes the size of the permutation. + + The definition may even be extended for any set with distinctive + elements, such that the permutation can even be applied for + real numbers or such, however, it is not implemented for now for + computational reasons and the integrity with the group theory + module. + + This function is similar to the ``__call__`` magic, however, + ``__call__`` magic already has some other applications like + permuting an array or attaching new cycles, which would + not always be mathematically consistent. + + This also guarantees that the return type is a SymPy integer, + which guarantees the safety to use assumptions. + """ + i = _sympify(i) + if i.is_integer is False: + raise NotImplementedError("{} should be an integer.".format(i)) + + n = self.size + if (i < 0) == True or (i >= n) == True: + raise NotImplementedError( + "{} should be an integer between 0 and {}".format(i, n-1)) + + if i.is_Integer: + return Integer(self._array_form[i]) + return AppliedPermutation(self, i) + + def next_lex(self): + """ + Returns the next permutation in lexicographical order. + If self is the last permutation in lexicographical order + it returns None. + See [4] section 2.4. + + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([2, 3, 1, 0]) + >>> p = Permutation([2, 3, 1, 0]); p.rank() + 17 + >>> p = p.next_lex(); p.rank() + 18 + + See Also + ======== + + rank, unrank_lex + """ + perm = self.array_form[:] + n = len(perm) + i = n - 2 + while perm[i + 1] < perm[i]: + i -= 1 + if i == -1: + return None + else: + j = n - 1 + while perm[j] < perm[i]: + j -= 1 + perm[j], perm[i] = perm[i], perm[j] + i += 1 + j = n - 1 + while i < j: + perm[j], perm[i] = perm[i], perm[j] + i += 1 + j -= 1 + return self._af_new(perm) + + @classmethod + def unrank_nonlex(self, n, r): + """ + This is a linear time unranking algorithm that does not + respect lexicographic order [3]. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> Permutation.unrank_nonlex(4, 5) + Permutation([2, 0, 3, 1]) + >>> Permutation.unrank_nonlex(4, -1) + Permutation([0, 1, 2, 3]) + + See Also + ======== + + next_nonlex, rank_nonlex + """ + def _unrank1(n, r, a): + if n > 0: + a[n - 1], a[r % n] = a[r % n], a[n - 1] + _unrank1(n - 1, r//n, a) + + id_perm = list(range(n)) + n = int(n) + r = r % ifac(n) + _unrank1(n, r, id_perm) + return self._af_new(id_perm) + + def rank_nonlex(self, inv_perm=None): + """ + This is a linear time ranking algorithm that does not + enforce lexicographic order [3]. + + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.rank_nonlex() + 23 + + See Also + ======== + + next_nonlex, unrank_nonlex + """ + def _rank1(n, perm, inv_perm): + if n == 1: + return 0 + s = perm[n - 1] + t = inv_perm[n - 1] + perm[n - 1], perm[t] = perm[t], s + inv_perm[n - 1], inv_perm[s] = inv_perm[s], t + return s + n*_rank1(n - 1, perm, inv_perm) + + if inv_perm is None: + inv_perm = (~self).array_form + if not inv_perm: + return 0 + perm = self.array_form[:] + r = _rank1(len(perm), perm, inv_perm) + return r + + def next_nonlex(self): + """ + Returns the next permutation in nonlex order [3]. + If self is the last permutation in this order it returns None. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex() + 5 + >>> p = p.next_nonlex(); p + Permutation([3, 0, 1, 2]) + >>> p.rank_nonlex() + 6 + + See Also + ======== + + rank_nonlex, unrank_nonlex + """ + r = self.rank_nonlex() + if r == ifac(self.size) - 1: + return None + return self.unrank_nonlex(self.size, r + 1) + + def rank(self): + """ + Returns the lexicographic rank of the permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.rank() + 0 + >>> p = Permutation([3, 2, 1, 0]) + >>> p.rank() + 23 + + See Also + ======== + + next_lex, unrank_lex, cardinality, length, order, size + """ + if self._rank is not None: + return self._rank + rank = 0 + rho = self.array_form[:] + n = self.size - 1 + size = n + 1 + psize = int(ifac(n)) + for j in range(size - 1): + rank += rho[j]*psize + for i in range(j + 1, size): + if rho[i] > rho[j]: + rho[i] -= 1 + psize //= n + n -= 1 + self._rank = rank + return rank + + @property + def cardinality(self): + """ + Returns the number of all possible permutations. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.cardinality + 24 + + See Also + ======== + + length, order, rank, size + """ + return int(ifac(self.size)) + + def parity(self): + """ + Computes the parity of a permutation. + + Explanation + =========== + + The parity of a permutation reflects the parity of the + number of inversions in the permutation, i.e., the + number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.parity() + 0 + >>> p = Permutation([3, 2, 0, 1]) + >>> p.parity() + 1 + + See Also + ======== + + _af_parity + """ + if self._cyclic_form is not None: + return (self.size - self.cycles) % 2 + + return _af_parity(self.array_form) + + @property + def is_even(self): + """ + Checks if a permutation is even. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.is_even + True + >>> p = Permutation([3, 2, 1, 0]) + >>> p.is_even + True + + See Also + ======== + + is_odd + """ + return not self.is_odd + + @property + def is_odd(self): + """ + Checks if a permutation is odd. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.is_odd + False + >>> p = Permutation([3, 2, 0, 1]) + >>> p.is_odd + True + + See Also + ======== + + is_even + """ + return bool(self.parity() % 2) + + @property + def is_Singleton(self): + """ + Checks to see if the permutation contains only one number and is + thus the only possible permutation of this set of numbers + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([0]).is_Singleton + True + >>> Permutation([0, 1]).is_Singleton + False + + See Also + ======== + + is_Empty + """ + return self.size == 1 + + @property + def is_Empty(self): + """ + Checks to see if the permutation is a set with zero elements + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([]).is_Empty + True + >>> Permutation([0]).is_Empty + False + + See Also + ======== + + is_Singleton + """ + return self.size == 0 + + @property + def is_identity(self): + return self.is_Identity + + @property + def is_Identity(self): + """ + Returns True if the Permutation is an identity permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([]) + >>> p.is_Identity + True + >>> p = Permutation([[0], [1], [2]]) + >>> p.is_Identity + True + >>> p = Permutation([0, 1, 2]) + >>> p.is_Identity + True + >>> p = Permutation([0, 2, 1]) + >>> p.is_Identity + False + + See Also + ======== + + order + """ + af = self.array_form + return not af or all(i == af[i] for i in range(self.size)) + + def ascents(self): + """ + Returns the positions of ascents in a permutation, ie, the location + where p[i] < p[i+1] + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([4, 0, 1, 3, 2]) + >>> p.ascents() + [1, 2] + + See Also + ======== + + descents, inversions, min, max + """ + a = self.array_form + pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]] + return pos + + def descents(self): + """ + Returns the positions of descents in a permutation, ie, the location + where p[i] > p[i+1] + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([4, 0, 1, 3, 2]) + >>> p.descents() + [0, 3] + + See Also + ======== + + ascents, inversions, min, max + """ + a = self.array_form + pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]] + return pos + + def max(self): + """ + The maximum element moved by the permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([1, 0, 2, 3, 4]) + >>> p.max() + 1 + + See Also + ======== + + min, descents, ascents, inversions + """ + max = 0 + a = self.array_form + for i in range(len(a)): + if a[i] != i and a[i] > max: + max = a[i] + return max + + def min(self): + """ + The minimum element moved by the permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 4, 3, 2]) + >>> p.min() + 2 + + See Also + ======== + + max, descents, ascents, inversions + """ + a = self.array_form + min = len(a) + for i in range(len(a)): + if a[i] != i and a[i] < min: + min = a[i] + return min + + def inversions(self): + """ + Computes the number of inversions of a permutation. + + Explanation + =========== + + An inversion is where i > j but p[i] < p[j]. + + For small length of p, it iterates over all i and j + values and calculates the number of inversions. + For large length of p, it uses a variation of merge + sort to calculate the number of inversions. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3, 4, 5]) + >>> p.inversions() + 0 + >>> Permutation([3, 2, 1, 0]).inversions() + 6 + + See Also + ======== + + descents, ascents, min, max + + References + ========== + + .. [1] https://www.cp.eng.chula.ac.th/~prabhas//teaching/algo/algo2008/count-inv.htm + + """ + inversions = 0 + a = self.array_form + n = len(a) + if n < 130: + for i in range(n - 1): + b = a[i] + for c in a[i + 1:]: + if b > c: + inversions += 1 + else: + k = 1 + right = 0 + arr = a[:] + temp = a[:] + while k < n: + i = 0 + while i + k < n: + right = i + k * 2 - 1 + if right >= n: + right = n - 1 + inversions += _merge(arr, temp, i, i + k, right) + i = i + k * 2 + k = k * 2 + return inversions + + def commutator(self, x): + """Return the commutator of ``self`` and ``x``: ``~x*~self*x*self`` + + If f and g are part of a group, G, then the commutator of f and g + is the group identity iff f and g commute, i.e. fg == gf. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([0, 2, 3, 1]) + >>> x = Permutation([2, 0, 3, 1]) + >>> c = p.commutator(x); c + Permutation([2, 1, 3, 0]) + >>> c == ~x*~p*x*p + True + + >>> I = Permutation(3) + >>> p = [I + i for i in range(6)] + >>> for i in range(len(p)): + ... for j in range(len(p)): + ... c = p[i].commutator(p[j]) + ... if p[i]*p[j] == p[j]*p[i]: + ... assert c == I + ... else: + ... assert c != I + ... + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Commutator + """ + + a = self.array_form + b = x.array_form + n = len(a) + if len(b) != n: + raise ValueError("The permutations must be of equal size.") + inva = [None]*n + for i in range(n): + inva[a[i]] = i + invb = [None]*n + for i in range(n): + invb[b[i]] = i + return self._af_new([a[b[inva[i]]] for i in invb]) + + def signature(self): + """ + Gives the signature of the permutation needed to place the + elements of the permutation in canonical order. + + The signature is calculated as (-1)^ + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2]) + >>> p.inversions() + 0 + >>> p.signature() + 1 + >>> q = Permutation([0,2,1]) + >>> q.inversions() + 1 + >>> q.signature() + -1 + + See Also + ======== + + inversions + """ + if self.is_even: + return 1 + return -1 + + def order(self): + """ + Computes the order of a permutation. + + When the permutation is raised to the power of its + order it equals the identity permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([3, 1, 5, 2, 4, 0]) + >>> p.order() + 4 + >>> (p**(p.order())) + Permutation([], size=6) + + See Also + ======== + + identity, cardinality, length, rank, size + """ + + return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1) + + def length(self): + """ + Returns the number of integers moved by a permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([0, 3, 2, 1]).length() + 2 + >>> Permutation([[0, 1], [2, 3]]).length() + 4 + + See Also + ======== + + min, max, support, cardinality, order, rank, size + """ + + return len(self.support()) + + @property + def cycle_structure(self): + """Return the cycle structure of the permutation as a dictionary + indicating the multiplicity of each cycle length. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation(3).cycle_structure + {1: 4} + >>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure + {2: 2, 3: 1} + """ + if self._cycle_structure: + rv = self._cycle_structure + else: + rv = defaultdict(int) + singletons = self.size + for c in self.cyclic_form: + rv[len(c)] += 1 + singletons -= len(c) + if singletons: + rv[1] = singletons + self._cycle_structure = rv + return dict(rv) # make a copy + + @property + def cycles(self): + """ + Returns the number of cycles contained in the permutation + (including singletons). + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation([0, 1, 2]).cycles + 3 + >>> Permutation([0, 1, 2]).full_cyclic_form + [[0], [1], [2]] + >>> Permutation(0, 1)(2, 3).cycles + 2 + + See Also + ======== + sympy.functions.combinatorial.numbers.stirling + """ + return len(self.full_cyclic_form) + + def index(self): + """ + Returns the index of a permutation. + + The index of a permutation is the sum of all subscripts j such + that p[j] is greater than p[j+1]. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([3, 0, 2, 1, 4]) + >>> p.index() + 2 + """ + a = self.array_form + + return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]]) + + def runs(self): + """ + Returns the runs of a permutation. + + An ascending sequence in a permutation is called a run [5]. + + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8]) + >>> p.runs() + [[2, 5, 7], [3, 6], [0, 1, 4, 8]] + >>> q = Permutation([1,3,2,0]) + >>> q.runs() + [[1, 3], [2], [0]] + """ + return runs(self.array_form) + + def inversion_vector(self): + """Return the inversion vector of the permutation. + + The inversion vector consists of elements whose value + indicates the number of elements in the permutation + that are lesser than it and lie on its right hand side. + + The inversion vector is the same as the Lehmer encoding of a + permutation. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2]) + >>> p.inversion_vector() + [4, 7, 0, 5, 0, 2, 1, 1] + >>> p = Permutation([3, 2, 1, 0]) + >>> p.inversion_vector() + [3, 2, 1] + + The inversion vector increases lexicographically with the rank + of the permutation, the -ith element cycling through 0..i. + + >>> p = Permutation(2) + >>> while p: + ... print('%s %s %s' % (p, p.inversion_vector(), p.rank())) + ... p = p.next_lex() + (2) [0, 0] 0 + (1 2) [0, 1] 1 + (2)(0 1) [1, 0] 2 + (0 1 2) [1, 1] 3 + (0 2 1) [2, 0] 4 + (0 2) [2, 1] 5 + + See Also + ======== + + from_inversion_vector + """ + self_array_form = self.array_form + n = len(self_array_form) + inversion_vector = [0] * (n - 1) + + for i in range(n - 1): + val = 0 + for j in range(i + 1, n): + if self_array_form[j] < self_array_form[i]: + val += 1 + inversion_vector[i] = val + return inversion_vector + + def rank_trotterjohnson(self): + """ + Returns the Trotter Johnson rank, which we get from the minimal + change algorithm. See [4] section 2.4. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 1, 2, 3]) + >>> p.rank_trotterjohnson() + 0 + >>> p = Permutation([0, 2, 1, 3]) + >>> p.rank_trotterjohnson() + 7 + + See Also + ======== + + unrank_trotterjohnson, next_trotterjohnson + """ + if self.array_form == [] or self.is_Identity: + return 0 + if self.array_form == [1, 0]: + return 1 + perm = self.array_form + n = self.size + rank = 0 + for j in range(1, n): + k = 1 + i = 0 + while perm[i] != j: + if perm[i] < j: + k += 1 + i += 1 + j1 = j + 1 + if rank % 2 == 0: + rank = j1*rank + j1 - k + else: + rank = j1*rank + k - 1 + return rank + + @classmethod + def unrank_trotterjohnson(cls, size, rank): + """ + Trotter Johnson permutation unranking. See [4] section 2.4. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> Permutation.unrank_trotterjohnson(5, 10) + Permutation([0, 3, 1, 2, 4]) + + See Also + ======== + + rank_trotterjohnson, next_trotterjohnson + """ + perm = [0]*size + r2 = 0 + n = ifac(size) + pj = 1 + for j in range(2, size + 1): + pj *= j + r1 = (rank * pj) // n + k = r1 - j*r2 + if r2 % 2 == 0: + for i in range(j - 1, j - k - 1, -1): + perm[i] = perm[i - 1] + perm[j - k - 1] = j - 1 + else: + for i in range(j - 1, k, -1): + perm[i] = perm[i - 1] + perm[k] = j - 1 + r2 = r1 + return cls._af_new(perm) + + def next_trotterjohnson(self): + """ + Returns the next permutation in Trotter-Johnson order. + If self is the last permutation it returns None. + See [4] section 2.4. If it is desired to generate all such + permutations, they can be generated in order more quickly + with the ``generate_bell`` function. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation([3, 0, 2, 1]) + >>> p.rank_trotterjohnson() + 4 + >>> p = p.next_trotterjohnson(); p + Permutation([0, 3, 2, 1]) + >>> p.rank_trotterjohnson() + 5 + + See Also + ======== + + rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell + """ + pi = self.array_form[:] + n = len(pi) + st = 0 + rho = pi[:] + done = False + m = n-1 + while m > 0 and not done: + d = rho.index(m) + for i in range(d, m): + rho[i] = rho[i + 1] + par = _af_parity(rho[:m]) + if par == 1: + if d == m: + m -= 1 + else: + pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d] + done = True + else: + if d == 0: + m -= 1 + st += 1 + else: + pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d] + done = True + if m == 0: + return None + return self._af_new(pi) + + def get_precedence_matrix(self): + """ + Gets the precedence matrix. This is used for computing the + distance between two permutations. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> p = Permutation.josephus(3, 6, 1) + >>> p + Permutation([2, 5, 3, 1, 4, 0]) + >>> p.get_precedence_matrix() + Matrix([ + [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 1, 0], + [1, 1, 0, 1, 1, 1], + [1, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0], + [1, 1, 0, 1, 1, 0]]) + + See Also + ======== + + get_precedence_distance, get_adjacency_matrix, get_adjacency_distance + """ + m = zeros(self.size) + perm = self.array_form + for i in range(m.rows): + for j in range(i + 1, m.cols): + m[perm[i], perm[j]] = 1 + return m + + def get_precedence_distance(self, other): + """ + Computes the precedence distance between two permutations. + + Explanation + =========== + + Suppose p and p' represent n jobs. The precedence metric + counts the number of times a job j is preceded by job i + in both p and p'. This metric is commutative. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([2, 0, 4, 3, 1]) + >>> q = Permutation([3, 1, 2, 4, 0]) + >>> p.get_precedence_distance(q) + 7 + >>> q.get_precedence_distance(p) + 7 + + See Also + ======== + + get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance + """ + if self.size != other.size: + raise ValueError("The permutations must be of equal size.") + self_prec_mat = self.get_precedence_matrix() + other_prec_mat = other.get_precedence_matrix() + n_prec = 0 + for i in range(self.size): + for j in range(self.size): + if i == j: + continue + if self_prec_mat[i, j] * other_prec_mat[i, j] == 1: + n_prec += 1 + d = self.size * (self.size - 1)//2 - n_prec + return d + + def get_adjacency_matrix(self): + """ + Computes the adjacency matrix of a permutation. + + Explanation + =========== + + If job i is adjacent to job j in a permutation p + then we set m[i, j] = 1 where m is the adjacency + matrix of p. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation.josephus(3, 6, 1) + >>> p.get_adjacency_matrix() + Matrix([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1], + [0, 1, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0]]) + >>> q = Permutation([0, 1, 2, 3]) + >>> q.get_adjacency_matrix() + Matrix([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]]) + + See Also + ======== + + get_precedence_matrix, get_precedence_distance, get_adjacency_distance + """ + m = zeros(self.size) + perm = self.array_form + for i in range(self.size - 1): + m[perm[i], perm[i + 1]] = 1 + return m + + def get_adjacency_distance(self, other): + """ + Computes the adjacency distance between two permutations. + + Explanation + =========== + + This metric counts the number of times a pair i,j of jobs is + adjacent in both p and p'. If n_adj is this quantity then + the adjacency distance is n - n_adj - 1 [1] + + [1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals + of Operational Research, 86, pp 473-490. (1999) + + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 3, 1, 2, 4]) + >>> q = Permutation.josephus(4, 5, 2) + >>> p.get_adjacency_distance(q) + 3 + >>> r = Permutation([0, 2, 1, 4, 3]) + >>> p.get_adjacency_distance(r) + 4 + + See Also + ======== + + get_precedence_matrix, get_precedence_distance, get_adjacency_matrix + """ + if self.size != other.size: + raise ValueError("The permutations must be of the same size.") + self_adj_mat = self.get_adjacency_matrix() + other_adj_mat = other.get_adjacency_matrix() + n_adj = 0 + for i in range(self.size): + for j in range(self.size): + if i == j: + continue + if self_adj_mat[i, j] * other_adj_mat[i, j] == 1: + n_adj += 1 + d = self.size - n_adj - 1 + return d + + def get_positional_distance(self, other): + """ + Computes the positional distance between two permutations. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> p = Permutation([0, 3, 1, 2, 4]) + >>> q = Permutation.josephus(4, 5, 2) + >>> r = Permutation([3, 1, 4, 0, 2]) + >>> p.get_positional_distance(q) + 12 + >>> p.get_positional_distance(r) + 12 + + See Also + ======== + + get_precedence_distance, get_adjacency_distance + """ + a = self.array_form + b = other.array_form + if len(a) != len(b): + raise ValueError("The permutations must be of the same size.") + return sum([abs(a[i] - b[i]) for i in range(len(a))]) + + @classmethod + def josephus(cls, m, n, s=1): + """Return as a permutation the shuffling of range(n) using the Josephus + scheme in which every m-th item is selected until all have been chosen. + The returned permutation has elements listed by the order in which they + were selected. + + The parameter ``s`` stops the selection process when there are ``s`` + items remaining and these are selected by continuing the selection, + counting by 1 rather than by ``m``. + + Consider selecting every 3rd item from 6 until only 2 remain:: + + choices chosen + ======== ====== + 012345 + 01 345 2 + 01 34 25 + 01 4 253 + 0 4 2531 + 0 25314 + 253140 + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation.josephus(3, 6, 2).array_form + [2, 5, 3, 1, 4, 0] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Flavius_Josephus + .. [2] https://en.wikipedia.org/wiki/Josephus_problem + .. [3] https://web.archive.org/web/20171008094331/http://www.wou.edu/~burtonl/josephus.html + + """ + from collections import deque + m -= 1 + Q = deque(list(range(n))) + perm = [] + while len(Q) > max(s, 1): + for dp in range(m): + Q.append(Q.popleft()) + perm.append(Q.popleft()) + perm.extend(list(Q)) + return cls(perm) + + @classmethod + def from_inversion_vector(cls, inversion): + """ + Calculates the permutation from the inversion vector. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> Permutation.from_inversion_vector([3, 2, 1, 0, 0]) + Permutation([3, 2, 1, 0, 4, 5]) + + """ + size = len(inversion) + N = list(range(size + 1)) + perm = [] + try: + for k in range(size): + val = N[inversion[k]] + perm.append(val) + N.remove(val) + except IndexError: + raise ValueError("The inversion vector is not valid.") + perm.extend(N) + return cls._af_new(perm) + + @classmethod + def random(cls, n): + """ + Generates a random permutation of length ``n``. + + Uses the underlying Python pseudo-random number generator. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1])) + True + + """ + perm_array = list(range(n)) + random.shuffle(perm_array) + return cls._af_new(perm_array) + + @classmethod + def unrank_lex(cls, size, rank): + """ + Lexicographic permutation unranking. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> from sympy import init_printing + >>> init_printing(perm_cyclic=False, pretty_print=False) + >>> a = Permutation.unrank_lex(5, 10) + >>> a.rank() + 10 + >>> a + Permutation([0, 2, 4, 1, 3]) + + See Also + ======== + + rank, next_lex + """ + perm_array = [0] * size + psize = 1 + for i in range(size): + new_psize = psize*(i + 1) + d = (rank % new_psize) // psize + rank -= d*psize + perm_array[size - i - 1] = d + for j in range(size - i, size): + if perm_array[j] > d - 1: + perm_array[j] += 1 + psize = new_psize + return cls._af_new(perm_array) + + def resize(self, n): + """Resize the permutation to the new size ``n``. + + Parameters + ========== + + n : int + The new size of the permutation. + + Raises + ====== + + ValueError + If the permutation cannot be resized to the given size. + This may only happen when resized to a smaller size than + the original. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + + Increasing the size of a permutation: + + >>> p = Permutation(0, 1, 2) + >>> p = p.resize(5) + >>> p + (4)(0 1 2) + + Decreasing the size of the permutation: + + >>> p = p.resize(4) + >>> p + (3)(0 1 2) + + If resizing to the specific size breaks the cycles: + + >>> p.resize(2) + Traceback (most recent call last): + ... + ValueError: The permutation cannot be resized to 2 because the + cycle (0, 1, 2) may break. + """ + aform = self.array_form + l = len(aform) + if n > l: + aform += list(range(l, n)) + return Permutation._af_new(aform) + + elif n < l: + cyclic_form = self.full_cyclic_form + new_cyclic_form = [] + for cycle in cyclic_form: + cycle_min = min(cycle) + cycle_max = max(cycle) + if cycle_min <= n-1: + if cycle_max > n-1: + raise ValueError( + "The permutation cannot be resized to {} " + "because the cycle {} may break." + .format(n, tuple(cycle))) + + new_cyclic_form.append(cycle) + return Permutation(new_cyclic_form) + + return self + + # XXX Deprecated flag + print_cyclic = None + + +def _merge(arr, temp, left, mid, right): + """ + Merges two sorted arrays and calculates the inversion count. + + Helper function for calculating inversions. This method is + for internal use only. + """ + i = k = left + j = mid + inv_count = 0 + while i < mid and j <= right: + if arr[i] < arr[j]: + temp[k] = arr[i] + k += 1 + i += 1 + else: + temp[k] = arr[j] + k += 1 + j += 1 + inv_count += (mid -i) + while i < mid: + temp[k] = arr[i] + k += 1 + i += 1 + if j <= right: + k += right - j + 1 + j += right - j + 1 + arr[left:k + 1] = temp[left:k + 1] + else: + arr[left:right + 1] = temp[left:right + 1] + return inv_count + +Perm = Permutation +_af_new = Perm._af_new + + +class AppliedPermutation(Expr): + """A permutation applied to a symbolic variable. + + Parameters + ========== + + perm : Permutation + x : Expr + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.combinatorics import Permutation + + Creating a symbolic permutation function application: + + >>> x = Symbol('x') + >>> p = Permutation(0, 1, 2) + >>> p.apply(x) + AppliedPermutation((0 1 2), x) + >>> _.subs(x, 1) + 2 + """ + def __new__(cls, perm, x, evaluate=None): + if evaluate is None: + evaluate = global_parameters.evaluate + + perm = _sympify(perm) + x = _sympify(x) + + if not isinstance(perm, Permutation): + raise ValueError("{} must be a Permutation instance." + .format(perm)) + + if evaluate: + if x.is_Integer: + return perm.apply(x) + + obj = super().__new__(cls, perm, x) + return obj + + +@dispatch(Permutation, Permutation) +def _eval_is_eq(lhs, rhs): + if lhs._size != rhs._size: + return None + return lhs._array_form == rhs._array_form diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/polyhedron.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/polyhedron.py new file mode 100644 index 0000000000000000000000000000000000000000..a0433bdeafaeef738ef65d3b799ecbc2623b4f81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/polyhedron.py @@ -0,0 +1,1019 @@ +from sympy.combinatorics import Permutation as Perm +from sympy.combinatorics.perm_groups import PermutationGroup +from sympy.core import Basic, Tuple, default_sort_key +from sympy.sets import FiniteSet +from sympy.utilities.iterables import (minlex, unflatten, flatten) +from sympy.utilities.misc import as_int + +rmul = Perm.rmul + + +class Polyhedron(Basic): + """ + Represents the polyhedral symmetry group (PSG). + + Explanation + =========== + + The PSG is one of the symmetry groups of the Platonic solids. + There are three polyhedral groups: the tetrahedral group + of order 12, the octahedral group of order 24, and the + icosahedral group of order 60. + + All doctests have been given in the docstring of the + constructor of the object. + + References + ========== + + .. [1] https://mathworld.wolfram.com/PolyhedralGroup.html + + """ + _edges = None + + def __new__(cls, corners, faces=(), pgroup=()): + """ + The constructor of the Polyhedron group object. + + Explanation + =========== + + It takes up to three parameters: the corners, faces, and + allowed transformations. + + The corners/vertices are entered as a list of arbitrary + expressions that are used to identify each vertex. + + The faces are entered as a list of tuples of indices; a tuple + of indices identifies the vertices which define the face. They + should be entered in a cw or ccw order; they will be standardized + by reversal and rotation to be give the lowest lexical ordering. + If no faces are given then no edges will be computed. + + >>> from sympy.combinatorics.polyhedron import Polyhedron + >>> Polyhedron(list('abc'), [(1, 2, 0)]).faces + {(0, 1, 2)} + >>> Polyhedron(list('abc'), [(1, 0, 2)]).faces + {(0, 1, 2)} + + The allowed transformations are entered as allowable permutations + of the vertices for the polyhedron. Instance of Permutations + (as with faces) should refer to the supplied vertices by index. + These permutation are stored as a PermutationGroup. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import Permutation + >>> from sympy import init_printing + >>> from sympy.abc import w, x, y, z + >>> init_printing(pretty_print=False, perm_cyclic=False) + + Here we construct the Polyhedron object for a tetrahedron. + + >>> corners = [w, x, y, z] + >>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)] + + Next, allowed transformations of the polyhedron must be given. This + is given as permutations of vertices. + + Although the vertices of a tetrahedron can be numbered in 24 (4!) + different ways, there are only 12 different orientations for a + physical tetrahedron. The following permutations, applied once or + twice, will generate all 12 of the orientations. (The identity + permutation, Permutation(range(4)), is not included since it does + not change the orientation of the vertices.) + + >>> pgroup = [Permutation([[0, 1, 2], [3]]), \ + Permutation([[0, 1, 3], [2]]), \ + Permutation([[0, 2, 3], [1]]), \ + Permutation([[1, 2, 3], [0]]), \ + Permutation([[0, 1], [2, 3]]), \ + Permutation([[0, 2], [1, 3]]), \ + Permutation([[0, 3], [1, 2]])] + + The Polyhedron is now constructed and demonstrated: + + >>> tetra = Polyhedron(corners, faces, pgroup) + >>> tetra.size + 4 + >>> tetra.edges + {(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)} + >>> tetra.corners + (w, x, y, z) + + It can be rotated with an arbitrary permutation of vertices, e.g. + the following permutation is not in the pgroup: + + >>> tetra.rotate(Permutation([0, 1, 3, 2])) + >>> tetra.corners + (w, x, z, y) + + An allowed permutation of the vertices can be constructed by + repeatedly applying permutations from the pgroup to the vertices. + Here is a demonstration that applying p and p**2 for every p in + pgroup generates all the orientations of a tetrahedron and no others: + + >>> all = ( (w, x, y, z), \ + (x, y, w, z), \ + (y, w, x, z), \ + (w, z, x, y), \ + (z, w, y, x), \ + (w, y, z, x), \ + (y, z, w, x), \ + (x, z, y, w), \ + (z, y, x, w), \ + (y, x, z, w), \ + (x, w, z, y), \ + (z, x, w, y) ) + + >>> got = [] + >>> for p in (pgroup + [p**2 for p in pgroup]): + ... h = Polyhedron(corners) + ... h.rotate(p) + ... got.append(h.corners) + ... + >>> set(got) == set(all) + True + + The make_perm method of a PermutationGroup will randomly pick + permutations, multiply them together, and return the permutation that + can be applied to the polyhedron to give the orientation produced + by those individual permutations. + + Here, 3 permutations are used: + + >>> tetra.pgroup.make_perm(3) # doctest: +SKIP + Permutation([0, 3, 1, 2]) + + To select the permutations that should be used, supply a list + of indices to the permutations in pgroup in the order they should + be applied: + + >>> use = [0, 0, 2] + >>> p002 = tetra.pgroup.make_perm(3, use) + >>> p002 + Permutation([1, 0, 3, 2]) + + + Apply them one at a time: + + >>> tetra.reset() + >>> for i in use: + ... tetra.rotate(pgroup[i]) + ... + >>> tetra.vertices + (x, w, z, y) + >>> sequentially = tetra.vertices + + Apply the composite permutation: + + >>> tetra.reset() + >>> tetra.rotate(p002) + >>> tetra.corners + (x, w, z, y) + >>> tetra.corners in all and tetra.corners == sequentially + True + + Notes + ===== + + Defining permutation groups + --------------------------- + + It is not necessary to enter any permutations, nor is necessary to + enter a complete set of transformations. In fact, for a polyhedron, + all configurations can be constructed from just two permutations. + For example, the orientations of a tetrahedron can be generated from + an axis passing through a vertex and face and another axis passing + through a different vertex or from an axis passing through the + midpoints of two edges opposite of each other. + + For simplicity of presentation, consider a square -- + not a cube -- with vertices 1, 2, 3, and 4: + + 1-----2 We could think of axes of rotation being: + | | 1) through the face + | | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4 + 3-----4 3) lines 1-4 or 2-3 + + + To determine how to write the permutations, imagine 4 cameras, + one at each corner, labeled A-D: + + A B A B + 1-----2 1-----3 vertex index: + | | | | 1 0 + | | | | 2 1 + 3-----4 2-----4 3 2 + C D C D 4 3 + + original after rotation + along 1-4 + + A diagonal and a face axis will be chosen for the "permutation group" + from which any orientation can be constructed. + + >>> pgroup = [] + + Imagine a clockwise rotation when viewing 1-4 from camera A. The new + orientation is (in camera-order): 1, 3, 2, 4 so the permutation is + given using the *indices* of the vertices as: + + >>> pgroup.append(Permutation((0, 2, 1, 3))) + + Now imagine rotating clockwise when looking down an axis entering the + center of the square as viewed. The new camera-order would be + 3, 1, 4, 2 so the permutation is (using indices): + + >>> pgroup.append(Permutation((2, 0, 3, 1))) + + The square can now be constructed: + ** use real-world labels for the vertices, entering them in + camera order + ** for the faces we use zero-based indices of the vertices + in *edge-order* as the face is traversed; neither the + direction nor the starting point matter -- the faces are + only used to define edges (if so desired). + + >>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup) + + To rotate the square with a single permutation we can do: + + >>> square.rotate(square.pgroup[0]) + >>> square.corners + (1, 3, 2, 4) + + To use more than one permutation (or to use one permutation more + than once) it is more convenient to use the make_perm method: + + >>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations + >>> square.reset() # return to initial orientation + >>> square.rotate(p011) + >>> square.corners + (4, 2, 3, 1) + + Thinking outside the box + ------------------------ + + Although the Polyhedron object has a direct physical meaning, it + actually has broader application. In the most general sense it is + just a decorated PermutationGroup, allowing one to connect the + permutations to something physical. For example, a Rubik's cube is + not a proper polyhedron, but the Polyhedron class can be used to + represent it in a way that helps to visualize the Rubik's cube. + + >>> from sympy import flatten, unflatten, symbols + >>> from sympy.combinatorics import RubikGroup + >>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD']) + >>> def show(): + ... pairs = unflatten(r2.corners, 2) + ... print(pairs[::2]) + ... print(pairs[1::2]) + ... + >>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2)) + >>> show() + [(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)] + [(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)] + >>> r2.rotate(0) # cw rotation of F + >>> show() + [(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)] + [(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)] + + Predefined Polyhedra + ==================== + + For convenience, the vertices and faces are defined for the following + standard solids along with a permutation group for transformations. + When the polyhedron is oriented as indicated below, the vertices in + a given horizontal plane are numbered in ccw direction, starting from + the vertex that will give the lowest indices in a given face. (In the + net of the vertices, indices preceded by "-" indicate replication of + the lhs index in the net.) + + tetrahedron, tetrahedron_faces + ------------------------------ + + 4 vertices (vertex up) net: + + 0 0-0 + 1 2 3-1 + + 4 faces: + + (0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3) + + cube, cube_faces + ---------------- + + 8 vertices (face up) net: + + 0 1 2 3-0 + 4 5 6 7-4 + + 6 faces: + + (0, 1, 2, 3) + (0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4) + (4, 5, 6, 7) + + octahedron, octahedron_faces + ---------------------------- + + 6 vertices (vertex up) net: + + 0 0 0-0 + 1 2 3 4-1 + 5 5 5-5 + + 8 faces: + + (0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4) + (1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5) + + dodecahedron, dodecahedron_faces + -------------------------------- + + 20 vertices (vertex up) net: + + 0 1 2 3 4 -0 + 5 6 7 8 9 -5 + 14 10 11 12 13-14 + 15 16 17 18 19-15 + + 12 faces: + + (0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6) + (2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5) + (5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12) + (8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19) + + icosahedron, icosahedron_faces + ------------------------------ + + 12 vertices (face up) net: + + 0 0 0 0 -0 + 1 2 3 4 5 -1 + 6 7 8 9 10 -6 + 11 11 11 11 -11 + + 20 faces: + + (0, 1, 2) (0, 2, 3) (0, 3, 4) + (0, 4, 5) (0, 1, 5) (1, 2, 6) + (2, 3, 7) (3, 4, 8) (4, 5, 9) + (1, 5, 10) (2, 6, 7) (3, 7, 8) + (4, 8, 9) (5, 9, 10) (1, 6, 10) + (6, 7, 11) (7, 8, 11) (8, 9, 11) + (9, 10, 11) (6, 10, 11) + + >>> from sympy.combinatorics.polyhedron import cube + >>> cube.edges + {(0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7)} + + If you want to use letters or other names for the corners you + can still use the pre-calculated faces: + + >>> corners = list('abcdefgh') + >>> Polyhedron(corners, cube.faces).corners + (a, b, c, d, e, f, g, h) + + References + ========== + + .. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf + + """ + faces = [minlex(f, directed=False, key=default_sort_key) for f in faces] + corners, faces, pgroup = args = \ + [Tuple(*a) for a in (corners, faces, pgroup)] + obj = Basic.__new__(cls, *args) + obj._corners = tuple(corners) # in order given + obj._faces = FiniteSet(*faces) + if pgroup and pgroup[0].size != len(corners): + raise ValueError("Permutation size unequal to number of corners.") + # use the identity permutation if none are given + obj._pgroup = PermutationGroup( + pgroup or [Perm(range(len(corners)))] ) + return obj + + @property + def corners(self): + """ + Get the corners of the Polyhedron. + + The method ``vertices`` is an alias for ``corners``. + + Examples + ======== + + >>> from sympy.combinatorics import Polyhedron + >>> from sympy.abc import a, b, c, d + >>> p = Polyhedron(list('abcd')) + >>> p.corners == p.vertices == (a, b, c, d) + True + + See Also + ======== + + array_form, cyclic_form + """ + return self._corners + vertices = corners + + @property + def array_form(self): + """Return the indices of the corners. + + The indices are given relative to the original position of corners. + + Examples + ======== + + >>> from sympy.combinatorics.polyhedron import tetrahedron + >>> tetrahedron = tetrahedron.copy() + >>> tetrahedron.array_form + [0, 1, 2, 3] + + >>> tetrahedron.rotate(0) + >>> tetrahedron.array_form + [0, 2, 3, 1] + >>> tetrahedron.pgroup[0].array_form + [0, 2, 3, 1] + + See Also + ======== + + corners, cyclic_form + """ + corners = list(self.args[0]) + return [corners.index(c) for c in self.corners] + + @property + def cyclic_form(self): + """Return the indices of the corners in cyclic notation. + + The indices are given relative to the original position of corners. + + See Also + ======== + + corners, array_form + """ + return Perm._af_new(self.array_form).cyclic_form + + @property + def size(self): + """ + Get the number of corners of the Polyhedron. + """ + return len(self._corners) + + @property + def faces(self): + """ + Get the faces of the Polyhedron. + """ + return self._faces + + @property + def pgroup(self): + """ + Get the permutations of the Polyhedron. + """ + return self._pgroup + + @property + def edges(self): + """ + Given the faces of the polyhedra we can get the edges. + + Examples + ======== + + >>> from sympy.combinatorics import Polyhedron + >>> from sympy.abc import a, b, c + >>> corners = (a, b, c) + >>> faces = [(0, 1, 2)] + >>> Polyhedron(corners, faces).edges + {(0, 1), (0, 2), (1, 2)} + + """ + if self._edges is None: + output = set() + for face in self.faces: + for i in range(len(face)): + edge = tuple(sorted([face[i], face[i - 1]])) + output.add(edge) + self._edges = FiniteSet(*output) + return self._edges + + def rotate(self, perm): + """ + Apply a permutation to the polyhedron *in place*. The permutation + may be given as a Permutation instance or an integer indicating + which permutation from pgroup of the Polyhedron should be + applied. + + This is an operation that is analogous to rotation about + an axis by a fixed increment. + + Notes + ===== + + When a Permutation is applied, no check is done to see if that + is a valid permutation for the Polyhedron. For example, a cube + could be given a permutation which effectively swaps only 2 + vertices. A valid permutation (that rotates the object in a + physical way) will be obtained if one only uses + permutations from the ``pgroup`` of the Polyhedron. On the other + hand, allowing arbitrary rotations (applications of permutations) + gives a way to follow named elements rather than indices since + Polyhedron allows vertices to be named while Permutation works + only with indices. + + Examples + ======== + + >>> from sympy.combinatorics import Polyhedron, Permutation + >>> from sympy.combinatorics.polyhedron import cube + >>> cube = cube.copy() + >>> cube.corners + (0, 1, 2, 3, 4, 5, 6, 7) + >>> cube.rotate(0) + >>> cube.corners + (1, 2, 3, 0, 5, 6, 7, 4) + + A non-physical "rotation" that is not prohibited by this method: + + >>> cube.reset() + >>> cube.rotate(Permutation([[1, 2]], size=8)) + >>> cube.corners + (0, 2, 1, 3, 4, 5, 6, 7) + + Polyhedron can be used to follow elements of set that are + identified by letters instead of integers: + + >>> shadow = h5 = Polyhedron(list('abcde')) + >>> p = Permutation([3, 0, 1, 2, 4]) + >>> h5.rotate(p) + >>> h5.corners + (d, a, b, c, e) + >>> _ == shadow.corners + True + >>> copy = h5.copy() + >>> h5.rotate(p) + >>> h5.corners == copy.corners + False + """ + if not isinstance(perm, Perm): + perm = self.pgroup[perm] + # and we know it's valid + else: + if perm.size != self.size: + raise ValueError('Polyhedron and Permutation sizes differ.') + a = perm.array_form + corners = [self.corners[a[i]] for i in range(len(self.corners))] + self._corners = tuple(corners) + + def reset(self): + """Return corners to their original positions. + + Examples + ======== + + >>> from sympy.combinatorics.polyhedron import tetrahedron as T + >>> T = T.copy() + >>> T.corners + (0, 1, 2, 3) + >>> T.rotate(0) + >>> T.corners + (0, 2, 3, 1) + >>> T.reset() + >>> T.corners + (0, 1, 2, 3) + """ + self._corners = self.args[0] + + +def _pgroup_calcs(): + """Return the permutation groups for each of the polyhedra and the face + definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron, + tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces, + icosahedron_faces + + Explanation + =========== + + (This author did not find and did not know of a better way to do it though + there likely is such a way.) + + Although only 2 permutations are needed for a polyhedron in order to + generate all the possible orientations, a group of permutations is + provided instead. A set of permutations is called a "group" if:: + + a*b = c (for any pair of permutations in the group, a and b, their + product, c, is in the group) + + a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds) + + there is an identity permutation, I, such that I*a = a*I for all elements + in the group + + a*b = I (the inverse of each permutation is also in the group) + + None of the polyhedron groups defined follow these definitions of a group. + Instead, they are selected to contain those permutations whose powers + alone will construct all orientations of the polyhedron, i.e. for + permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``, + ``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of + permutation ``i``) generate all permutations of the polyhedron instead of + mixed products like ``a*b``, ``a*b**2``, etc.... + + Note that for a polyhedron with n vertices, the valid permutations of the + vertices exclude those that do not maintain its faces. e.g. the + permutation BCDE of a square's four corners, ABCD, is a valid + permutation while CBDE is not (because this would twist the square). + + Examples + ======== + + The is_group checks for: closure, the presence of the Identity permutation, + and the presence of the inverse for each of the elements in the group. This + confirms that none of the polyhedra are true groups: + + >>> from sympy.combinatorics.polyhedron import ( + ... tetrahedron, cube, octahedron, dodecahedron, icosahedron) + ... + >>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron) + >>> [h.pgroup.is_group for h in polyhedra] + ... + [True, True, True, True, True] + + Although tests in polyhedron's test suite check that powers of the + permutations in the groups generate all permutations of the vertices + of the polyhedron, here we also demonstrate the powers of the given + permutations create a complete group for the tetrahedron: + + >>> from sympy.combinatorics import Permutation, PermutationGroup + >>> for h in polyhedra[:1]: + ... G = h.pgroup + ... perms = set() + ... for g in G: + ... for e in range(g.order()): + ... p = tuple((g**e).array_form) + ... perms.add(p) + ... + ... perms = [Permutation(p) for p in perms] + ... assert PermutationGroup(perms).is_group + + In addition to doing the above, the tests in the suite confirm that the + faces are all present after the application of each permutation. + + References + ========== + + .. [1] https://dogschool.tripod.com/trianglegroup.html + + """ + def _pgroup_of_double(polyh, ordered_faces, pgroup): + n = len(ordered_faces[0]) + # the vertices of the double which sits inside a give polyhedron + # can be found by tracking the faces of the outer polyhedron. + # A map between face and the vertex of the double is made so that + # after rotation the position of the vertices can be located + fmap = dict(zip(ordered_faces, + range(len(ordered_faces)))) + flat_faces = flatten(ordered_faces) + new_pgroup = [] + for i, p in enumerate(pgroup): + h = polyh.copy() + h.rotate(p) + c = h.corners + # reorder corners in the order they should appear when + # enumerating the faces + reorder = unflatten([c[j] for j in flat_faces], n) + # make them canonical + reorder = [tuple(map(as_int, + minlex(f, directed=False))) + for f in reorder] + # map face to vertex: the resulting list of vertices are the + # permutation that we seek for the double + new_pgroup.append(Perm([fmap[f] for f in reorder])) + return new_pgroup + + tetrahedron_faces = [ + (0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3 + (1, 2, 3), # bottom + ] + + # cw from top + # + _t_pgroup = [ + Perm([[1, 2, 3], [0]]), # cw from top + Perm([[0, 1, 2], [3]]), # cw from front face + Perm([[0, 3, 2], [1]]), # cw from back right face + Perm([[0, 3, 1], [2]]), # cw from back left face + Perm([[0, 1], [2, 3]]), # through front left edge + Perm([[0, 2], [1, 3]]), # through front right edge + Perm([[0, 3], [1, 2]]), # through back edge + ] + + tetrahedron = Polyhedron( + range(4), + tetrahedron_faces, + _t_pgroup) + + cube_faces = [ + (0, 1, 2, 3), # upper + (0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4 + (4, 5, 6, 7), # lower + ] + + # U, D, F, B, L, R = up, down, front, back, left, right + _c_pgroup = [Perm(p) for p in + [ + [1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U + [4, 0, 3, 7, 5, 1, 2, 6], # cw from F face + [4, 5, 1, 0, 7, 6, 2, 3], # cw from R face + + [1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge + [6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge + [6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge + [3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge + [4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge + [6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge + + [0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex + [5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex + [5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex + [7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL + ]] + + cube = Polyhedron( + range(8), + cube_faces, + _c_pgroup) + + octahedron_faces = [ + (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4 + (1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4 + ] + + octahedron = Polyhedron( + range(6), + octahedron_faces, + _pgroup_of_double(cube, cube_faces, _c_pgroup)) + + dodecahedron_faces = [ + (0, 1, 2, 3, 4), # top + (0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5 + (3, 4, 9, 13, 8), (0, 4, 9, 14, 5), + (5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18, + 12), # lower 5 + (8, 12, 18, 19, 13), (9, 13, 19, 15, 14), + (15, 16, 17, 18, 19) # bottom + ] + + def _string_to_perm(s): + rv = [Perm(range(20))] + p = None + for si in s: + if si not in '01': + count = int(si) - 1 + else: + count = 1 + if si == '0': + p = _f0 + elif si == '1': + p = _f1 + rv.extend([p]*count) + return Perm.rmul(*rv) + + # top face cw + _f0 = Perm([ + 1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11, + 12, 13, 14, 10, 16, 17, 18, 19, 15]) + # front face cw + _f1 = Perm([ + 5, 0, 4, 9, 14, 10, 1, 3, 13, 15, + 6, 2, 8, 19, 16, 17, 11, 7, 12, 18]) + # the strings below, like 0104 are shorthand for F0*F1*F0**4 and are + # the remaining 4 face rotations, 15 edge permutations, and the + # 10 vertex rotations. + _dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in ''' + 0104 140 014 0410 + 010 1403 03104 04103 102 + 120 1304 01303 021302 03130 + 0412041 041204103 04120410 041204104 041204102 + 10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()] + + dodecahedron = Polyhedron( + range(20), + dodecahedron_faces, + _dodeca_pgroup) + + icosahedron_faces = [ + (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5), + (1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9), + (3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6), + (6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)] + + icosahedron = Polyhedron( + range(12), + icosahedron_faces, + _pgroup_of_double( + dodecahedron, dodecahedron_faces, _dodeca_pgroup)) + + return (tetrahedron, cube, octahedron, dodecahedron, icosahedron, + tetrahedron_faces, cube_faces, octahedron_faces, + dodecahedron_faces, icosahedron_faces) + +# ----------------------------------------------------------------------- +# Standard Polyhedron groups +# +# These are generated using _pgroup_calcs() above. However to save +# import time we encode them explicitly here. +# ----------------------------------------------------------------------- + +tetrahedron = Polyhedron( + Tuple(0, 1, 2, 3), + Tuple( + Tuple(0, 1, 2), + Tuple(0, 2, 3), + Tuple(0, 1, 3), + Tuple(1, 2, 3)), + Tuple( + Perm(1, 2, 3), + Perm(3)(0, 1, 2), + Perm(0, 3, 2), + Perm(0, 3, 1), + Perm(0, 1)(2, 3), + Perm(0, 2)(1, 3), + Perm(0, 3)(1, 2) + )) + +cube = Polyhedron( + Tuple(0, 1, 2, 3, 4, 5, 6, 7), + Tuple( + Tuple(0, 1, 2, 3), + Tuple(0, 1, 5, 4), + Tuple(1, 2, 6, 5), + Tuple(2, 3, 7, 6), + Tuple(0, 3, 7, 4), + Tuple(4, 5, 6, 7)), + Tuple( + Perm(0, 1, 2, 3)(4, 5, 6, 7), + Perm(0, 4, 5, 1)(2, 3, 7, 6), + Perm(0, 4, 7, 3)(1, 5, 6, 2), + Perm(0, 1)(2, 4)(3, 5)(6, 7), + Perm(0, 6)(1, 2)(3, 5)(4, 7), + Perm(0, 6)(1, 7)(2, 3)(4, 5), + Perm(0, 3)(1, 7)(2, 4)(5, 6), + Perm(0, 4)(1, 7)(2, 6)(3, 5), + Perm(0, 6)(1, 5)(2, 4)(3, 7), + Perm(1, 3, 4)(2, 7, 5), + Perm(7)(0, 5, 2)(3, 4, 6), + Perm(0, 5, 7)(1, 6, 3), + Perm(0, 7, 2)(1, 4, 6))) + +octahedron = Polyhedron( + Tuple(0, 1, 2, 3, 4, 5), + Tuple( + Tuple(0, 1, 2), + Tuple(0, 2, 3), + Tuple(0, 3, 4), + Tuple(0, 1, 4), + Tuple(1, 2, 5), + Tuple(2, 3, 5), + Tuple(3, 4, 5), + Tuple(1, 4, 5)), + Tuple( + Perm(5)(1, 2, 3, 4), + Perm(0, 4, 5, 2), + Perm(0, 1, 5, 3), + Perm(0, 1)(2, 4)(3, 5), + Perm(0, 2)(1, 3)(4, 5), + Perm(0, 3)(1, 5)(2, 4), + Perm(0, 4)(1, 3)(2, 5), + Perm(0, 5)(1, 4)(2, 3), + Perm(0, 5)(1, 2)(3, 4), + Perm(0, 4, 1)(2, 3, 5), + Perm(0, 1, 2)(3, 4, 5), + Perm(0, 2, 3)(1, 5, 4), + Perm(0, 4, 3)(1, 5, 2))) + +dodecahedron = Polyhedron( + Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), + Tuple( + Tuple(0, 1, 2, 3, 4), + Tuple(0, 1, 6, 10, 5), + Tuple(1, 2, 7, 11, 6), + Tuple(2, 3, 8, 12, 7), + Tuple(3, 4, 9, 13, 8), + Tuple(0, 4, 9, 14, 5), + Tuple(5, 10, 16, 15, 14), + Tuple(6, 10, 16, 17, 11), + Tuple(7, 11, 17, 18, 12), + Tuple(8, 12, 18, 19, 13), + Tuple(9, 13, 19, 15, 14), + Tuple(15, 16, 17, 18, 19)), + Tuple( + Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19), + Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12), + Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13), + Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14), + Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10), + Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11), + Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19), + Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19), + Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16), + Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17), + Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18), + Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18), + Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19), + Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15), + Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14), + Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17), + Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11), + Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13), + Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10), + Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12), + Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14), + Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17), + Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18), + Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19), + Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15), + Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16), + Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18), + Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19), + Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9), + Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16), + Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17))) + +icosahedron = Polyhedron( + Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + Tuple( + Tuple(0, 1, 2), + Tuple(0, 2, 3), + Tuple(0, 3, 4), + Tuple(0, 4, 5), + Tuple(0, 1, 5), + Tuple(1, 6, 7), + Tuple(1, 2, 7), + Tuple(2, 7, 8), + Tuple(2, 3, 8), + Tuple(3, 8, 9), + Tuple(3, 4, 9), + Tuple(4, 9, 10), + Tuple(4, 5, 10), + Tuple(5, 6, 10), + Tuple(1, 5, 6), + Tuple(6, 7, 11), + Tuple(7, 8, 11), + Tuple(8, 9, 11), + Tuple(9, 10, 11), + Tuple(6, 10, 11)), + Tuple( + Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10), + Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8), + Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9), + Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5), + Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6), + Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7), + Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11), + Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11), + Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10), + Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11), + Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11), + Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9), + Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10), + Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10), + Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7), + Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8), + Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7), + Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9), + Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6), + Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8), + Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10), + Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11), + Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11), + Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10), + Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11), + Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11), + Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8), + Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9), + Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10), + Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4), + Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5))) + +tetrahedron_faces = [tuple(arg) for arg in tetrahedron.faces] + +cube_faces = [tuple(arg) for arg in cube.faces] + +octahedron_faces = [tuple(arg) for arg in octahedron.faces] + +dodecahedron_faces = [tuple(arg) for arg in dodecahedron.faces] + +icosahedron_faces = [tuple(arg) for arg in icosahedron.faces] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem.py new file mode 100644 index 0000000000000000000000000000000000000000..4bacda085f9cb14f2cad14c915c05e5d036366bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem.py @@ -0,0 +1,453 @@ +from collections import deque +from sympy.combinatorics.rewritingsystem_fsm import StateMachine + +class RewritingSystem: + ''' + A class implementing rewriting systems for `FpGroup`s. + + References + ========== + .. [1] Epstein, D., Holt, D. and Rees, S. (1991). + The use of Knuth-Bendix methods to solve the word problem in automatic groups. + Journal of Symbolic Computation, 12(4-5), pp.397-414. + + .. [2] GAP's Manual on its KBMAG package + https://www.gap-system.org/Manuals/pkg/kbmag-1.5.3/doc/manual.pdf + + ''' + def __init__(self, group): + self.group = group + self.alphabet = group.generators + self._is_confluent = None + + # these values are taken from [2] + self.maxeqns = 32767 # max rules + self.tidyint = 100 # rules before tidying + + # _max_exceeded is True if maxeqns is exceeded + # at any point + self._max_exceeded = False + + # Reduction automaton + self.reduction_automaton = None + self._new_rules = {} + + # dictionary of reductions + self.rules = {} + self.rules_cache = deque([], 50) + self._init_rules() + + + # All the transition symbols in the automaton + generators = list(self.alphabet) + generators += [gen**-1 for gen in generators] + # Create a finite state machine as an instance of the StateMachine object + self.reduction_automaton = StateMachine('Reduction automaton for '+ repr(self.group), generators) + self.construct_automaton() + + def set_max(self, n): + ''' + Set the maximum number of rules that can be defined + + ''' + if n > self.maxeqns: + self._max_exceeded = False + self.maxeqns = n + return + + @property + def is_confluent(self): + ''' + Return `True` if the system is confluent + + ''' + if self._is_confluent is None: + self._is_confluent = self._check_confluence() + return self._is_confluent + + def _init_rules(self): + identity = self.group.free_group.identity + for r in self.group.relators: + self.add_rule(r, identity) + self._remove_redundancies() + return + + def _add_rule(self, r1, r2): + ''' + Add the rule r1 -> r2 with no checking or further + deductions + + ''' + if len(self.rules) + 1 > self.maxeqns: + self._is_confluent = self._check_confluence() + self._max_exceeded = True + raise RuntimeError("Too many rules were defined.") + self.rules[r1] = r2 + # Add the newly added rule to the `new_rules` dictionary. + if self.reduction_automaton: + self._new_rules[r1] = r2 + + def add_rule(self, w1, w2, check=False): + new_keys = set() + + if w1 == w2: + return new_keys + + if w1 < w2: + w1, w2 = w2, w1 + + if (w1, w2) in self.rules_cache: + return new_keys + self.rules_cache.append((w1, w2)) + + s1, s2 = w1, w2 + + # The following is the equivalent of checking + # s1 for overlaps with the implicit reductions + # {g*g**-1 -> } and {g**-1*g -> } + # for any generator g without installing the + # redundant rules that would result from processing + # the overlaps. See [1], Section 3 for details. + + if len(s1) - len(s2) < 3: + if s1 not in self.rules: + new_keys.add(s1) + if not check: + self._add_rule(s1, s2) + if s2**-1 > s1**-1 and s2**-1 not in self.rules: + new_keys.add(s2**-1) + if not check: + self._add_rule(s2**-1, s1**-1) + + # overlaps on the right + while len(s1) - len(s2) > -1: + g = s1[len(s1)-1] + s1 = s1.subword(0, len(s1)-1) + s2 = s2*g**-1 + if len(s1) - len(s2) < 0: + if s2 not in self.rules: + if not check: + self._add_rule(s2, s1) + new_keys.add(s2) + elif len(s1) - len(s2) < 3: + new = self.add_rule(s1, s2, check) + new_keys.update(new) + + # overlaps on the left + while len(w1) - len(w2) > -1: + g = w1[0] + w1 = w1.subword(1, len(w1)) + w2 = g**-1*w2 + if len(w1) - len(w2) < 0: + if w2 not in self.rules: + if not check: + self._add_rule(w2, w1) + new_keys.add(w2) + elif len(w1) - len(w2) < 3: + new = self.add_rule(w1, w2, check) + new_keys.update(new) + + return new_keys + + def _remove_redundancies(self, changes=False): + ''' + Reduce left- and right-hand sides of reduction rules + and remove redundant equations (i.e. those for which + lhs == rhs). If `changes` is `True`, return a set + containing the removed keys and a set containing the + added keys + + ''' + removed = set() + added = set() + rules = self.rules.copy() + for r in rules: + v = self.reduce(r, exclude=r) + w = self.reduce(rules[r]) + if v != r: + del self.rules[r] + removed.add(r) + if v > w: + added.add(v) + self.rules[v] = w + elif v < w: + added.add(w) + self.rules[w] = v + else: + self.rules[v] = w + if changes: + return removed, added + return + + def make_confluent(self, check=False): + ''' + Try to make the system confluent using the Knuth-Bendix + completion algorithm + + ''' + if self._max_exceeded: + return self._is_confluent + lhs = list(self.rules.keys()) + + def _overlaps(r1, r2): + len1 = len(r1) + len2 = len(r2) + result = [] + for j in range(1, len1 + len2): + if (r1.subword(len1 - j, len1 + len2 - j, strict=False) + == r2.subword(j - len1, j, strict=False)): + a = r1.subword(0, len1-j, strict=False) + a = a*r2.subword(0, j-len1, strict=False) + b = r2.subword(j-len1, j, strict=False) + c = r2.subword(j, len2, strict=False) + c = c*r1.subword(len1 + len2 - j, len1, strict=False) + result.append(a*b*c) + return result + + def _process_overlap(w, r1, r2, check): + s = w.eliminate_word(r1, self.rules[r1]) + s = self.reduce(s) + t = w.eliminate_word(r2, self.rules[r2]) + t = self.reduce(t) + if s != t: + if check: + # system not confluent + return [0] + try: + new_keys = self.add_rule(t, s, check) + return new_keys + except RuntimeError: + return False + return + + added = 0 + i = 0 + while i < len(lhs): + r1 = lhs[i] + i += 1 + # j could be i+1 to not + # check each pair twice but lhs + # is extended in the loop and the new + # elements have to be checked with the + # preceding ones. there is probably a better way + # to handle this + j = 0 + while j < len(lhs): + r2 = lhs[j] + j += 1 + if r1 == r2: + continue + overlaps = _overlaps(r1, r2) + overlaps.extend(_overlaps(r1**-1, r2)) + if not overlaps: + continue + for w in overlaps: + new_keys = _process_overlap(w, r1, r2, check) + if new_keys: + if check: + return False + lhs.extend(new_keys) + added += len(new_keys) + elif new_keys == False: + # too many rules were added so the process + # couldn't complete + return self._is_confluent + + if added > self.tidyint and not check: + # tidy up + r, a = self._remove_redundancies(changes=True) + added = 0 + if r: + # reset i since some elements were removed + i = min([lhs.index(s) for s in r]) + lhs = [l for l in lhs if l not in r] + lhs.extend(a) + if r1 in r: + # r1 was removed as redundant + break + + self._is_confluent = True + if not check: + self._remove_redundancies() + return True + + def _check_confluence(self): + return self.make_confluent(check=True) + + def reduce(self, word, exclude=None): + ''' + Apply reduction rules to `word` excluding the reduction rule + for the lhs equal to `exclude` + + ''' + rules = {r: self.rules[r] for r in self.rules if r != exclude} + # the following is essentially `eliminate_words()` code from the + # `FreeGroupElement` class, the only difference being the first + # "if" statement + again = True + new = word + while again: + again = False + for r in rules: + prev = new + if rules[r]**-1 > r**-1: + new = new.eliminate_word(r, rules[r], _all=True, inverse=False) + else: + new = new.eliminate_word(r, rules[r], _all=True) + if new != prev: + again = True + return new + + def _compute_inverse_rules(self, rules): + ''' + Compute the inverse rules for a given set of rules. + The inverse rules are used in the automaton for word reduction. + + Arguments: + rules (dictionary): Rules for which the inverse rules are to computed. + + Returns: + Dictionary of inverse_rules. + + ''' + inverse_rules = {} + for r in rules: + rule_key_inverse = r**-1 + rule_value_inverse = (rules[r])**-1 + if (rule_value_inverse < rule_key_inverse): + inverse_rules[rule_key_inverse] = rule_value_inverse + else: + inverse_rules[rule_value_inverse] = rule_key_inverse + return inverse_rules + + def construct_automaton(self): + ''' + Construct the automaton based on the set of reduction rules of the system. + + Automata Design: + The accept states of the automaton are the proper prefixes of the left hand side of the rules. + The complete left hand side of the rules are the dead states of the automaton. + + ''' + self._add_to_automaton(self.rules) + + def _add_to_automaton(self, rules): + ''' + Add new states and transitions to the automaton. + + Summary: + States corresponding to the new rules added to the system are computed and added to the automaton. + Transitions in the previously added states are also modified if necessary. + + Arguments: + rules (dictionary) -- Dictionary of the newly added rules. + + ''' + # Automaton variables + automaton_alphabet = [] + proper_prefixes = {} + + # compute the inverses of all the new rules added + all_rules = rules + inverse_rules = self._compute_inverse_rules(all_rules) + all_rules.update(inverse_rules) + + # Keep track of the accept_states. + accept_states = [] + + for rule in all_rules: + # The symbols present in the new rules are the symbols to be verified at each state. + # computes the automaton_alphabet, as the transitions solely depend upon the new states. + automaton_alphabet += rule.letter_form_elm + # Compute the proper prefixes for every rule. + proper_prefixes[rule] = [] + letter_word_array = list(rule.letter_form_elm) + len_letter_word_array = len(letter_word_array) + for i in range (1, len_letter_word_array): + letter_word_array[i] = letter_word_array[i-1]*letter_word_array[i] + # Add accept states. + elem = letter_word_array[i-1] + if elem not in self.reduction_automaton.states: + self.reduction_automaton.add_state(elem, state_type='a') + accept_states.append(elem) + proper_prefixes[rule] = letter_word_array + # Check for overlaps between dead and accept states. + if rule in accept_states: + self.reduction_automaton.states[rule].state_type = 'd' + self.reduction_automaton.states[rule].rh_rule = all_rules[rule] + accept_states.remove(rule) + # Add dead states + if rule not in self.reduction_automaton.states: + self.reduction_automaton.add_state(rule, state_type='d', rh_rule=all_rules[rule]) + + automaton_alphabet = set(automaton_alphabet) + + # Add new transitions for every state. + for state in self.reduction_automaton.states: + current_state_name = state + current_state_type = self.reduction_automaton.states[state].state_type + # Transitions will be modified only when suffixes of the current_state + # belongs to the proper_prefixes of the new rules. + # The rest are ignored if they cannot lead to a dead state after a finite number of transisitons. + if current_state_type == 's': + for letter in automaton_alphabet: + if letter in self.reduction_automaton.states: + self.reduction_automaton.states[state].add_transition(letter, letter) + else: + self.reduction_automaton.states[state].add_transition(letter, current_state_name) + elif current_state_type == 'a': + # Check if the transition to any new state in possible. + for letter in automaton_alphabet: + _next = current_state_name*letter + while len(_next) and _next not in self.reduction_automaton.states: + _next = _next.subword(1, len(_next)) + if not len(_next): + _next = 'start' + self.reduction_automaton.states[state].add_transition(letter, _next) + + # Add transitions for new states. All symbols used in the automaton are considered here. + # Ignore this if `reduction_automaton.automaton_alphabet` = `automaton_alphabet`. + if len(self.reduction_automaton.automaton_alphabet) != len(automaton_alphabet): + for state in accept_states: + current_state_name = state + for letter in self.reduction_automaton.automaton_alphabet: + _next = current_state_name*letter + while len(_next) and _next not in self.reduction_automaton.states: + _next = _next.subword(1, len(_next)) + if not len(_next): + _next = 'start' + self.reduction_automaton.states[state].add_transition(letter, _next) + + def reduce_using_automaton(self, word): + ''' + Reduce a word using an automaton. + + Summary: + All the symbols of the word are stored in an array and are given as the input to the automaton. + If the automaton reaches a dead state that subword is replaced and the automaton is run from the beginning. + The complete word has to be replaced when the word is read and the automaton reaches a dead state. + So, this process is repeated until the word is read completely and the automaton reaches the accept state. + + Arguments: + word (instance of FreeGroupElement) -- Word that needs to be reduced. + + ''' + # Modify the automaton if new rules are found. + if self._new_rules: + self._add_to_automaton(self._new_rules) + self._new_rules = {} + + flag = 1 + while flag: + flag = 0 + current_state = self.reduction_automaton.states['start'] + for i, s in enumerate(word.letter_form_elm): + next_state_name = current_state.transitions[s] + next_state = self.reduction_automaton.states[next_state_name] + if next_state.state_type == 'd': + subst = next_state.rh_rule + word = word.substituted_word(i - len(next_state_name) + 1, i+1, subst) + flag = 1 + break + current_state = next_state + return word diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem_fsm.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem_fsm.py new file mode 100644 index 0000000000000000000000000000000000000000..21916530040ac321180692d1a0811da4ae36a056 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/rewritingsystem_fsm.py @@ -0,0 +1,60 @@ +class State: + ''' + A representation of a state managed by a ``StateMachine``. + + Attributes: + name (instance of FreeGroupElement or string) -- State name which is also assigned to the Machine. + transisitons (OrderedDict) -- Represents all the transitions of the state object. + state_type (string) -- Denotes the type (accept/start/dead) of the state. + rh_rule (instance of FreeGroupElement) -- right hand rule for dead state. + state_machine (instance of StateMachine object) -- The finite state machine that the state belongs to. + ''' + + def __init__(self, name, state_machine, state_type=None, rh_rule=None): + self.name = name + self.transitions = {} + self.state_machine = state_machine + self.state_type = state_type[0] + self.rh_rule = rh_rule + + def add_transition(self, letter, state): + ''' + Add a transition from the current state to a new state. + + Keyword Arguments: + letter -- The alphabet element the current state reads to make the state transition. + state -- This will be an instance of the State object which represents a new state after in the transition after the alphabet is read. + + ''' + self.transitions[letter] = state + +class StateMachine: + ''' + Representation of a finite state machine the manages the states and the transitions of the automaton. + + Attributes: + states (dictionary) -- Collection of all registered `State` objects. + name (str) -- Name of the state machine. + ''' + + def __init__(self, name, automaton_alphabet): + self.name = name + self.automaton_alphabet = automaton_alphabet + self.states = {} # Contains all the states in the machine. + self.add_state('start', state_type='s') + + def add_state(self, state_name, state_type=None, rh_rule=None): + ''' + Instantiate a state object and stores it in the 'states' dictionary. + + Arguments: + state_name (instance of FreeGroupElement or string) -- name of the new states. + state_type (string) -- Denotes the type (accept/start/dead) of the state added. + rh_rule (instance of FreeGroupElement) -- right hand rule for dead state. + + ''' + new_state = State(state_name, self, state_type, rh_rule) + self.states[state_name] = new_state + + def __repr__(self): + return "%s" % (self.name) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/schur_number.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/schur_number.py new file mode 100644 index 0000000000000000000000000000000000000000..83aac98e543d4b54d4e6af17adca6e4f4de1b9ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/schur_number.py @@ -0,0 +1,160 @@ +""" +The Schur number S(k) is the largest integer n for which the interval [1,n] +can be partitioned into k sum-free sets.(https://mathworld.wolfram.com/SchurNumber.html) +""" +import math +from sympy.core import S +from sympy.core.basic import Basic +from sympy.core.function import Function +from sympy.core.numbers import Integer + + +class SchurNumber(Function): + r""" + This function creates a SchurNumber object + which is evaluated for `k \le 5` otherwise only + the lower bound information can be retrieved. + + Examples + ======== + + >>> from sympy.combinatorics.schur_number import SchurNumber + + Since S(3) = 13, hence the output is a number + >>> SchurNumber(3) + 13 + + We do not know the Schur number for values greater than 5, hence + only the object is returned + >>> SchurNumber(6) + SchurNumber(6) + + Now, the lower bound information can be retrieved using lower_bound() + method + >>> SchurNumber(6).lower_bound() + 536 + + """ + + @classmethod + def eval(cls, k): + if k.is_Number: + if k is S.Infinity: + return S.Infinity + if k.is_zero: + return S.Zero + if not k.is_integer or k.is_negative: + raise ValueError("k should be a positive integer") + first_known_schur_numbers = {1: 1, 2: 4, 3: 13, 4: 44, 5: 160} + if k <= 5: + return Integer(first_known_schur_numbers[k]) + + def lower_bound(self): + f_ = self.args[0] + # Improved lower bounds known for S(6) and S(7) + if f_ == 6: + return Integer(536) + if f_ == 7: + return Integer(1680) + # For other cases, use general expression + if f_.is_Integer: + return 3*self.func(f_ - 1).lower_bound() - 1 + return (3**f_ - 1)/2 + + +def _schur_subsets_number(n): + + if n is S.Infinity: + raise ValueError("Input must be finite") + if n <= 0: + raise ValueError("n must be a non-zero positive integer.") + elif n <= 3: + min_k = 1 + else: + min_k = math.ceil(math.log(2*n + 1, 3)) + + return Integer(min_k) + + +def schur_partition(n): + """ + + This function returns the partition in the minimum number of sum-free subsets + according to the lower bound given by the Schur Number. + + Parameters + ========== + + n: a number + n is the upper limit of the range [1, n] for which we need to find and + return the minimum number of free subsets according to the lower bound + of schur number + + Returns + ======= + + List of lists + List of the minimum number of sum-free subsets + + Notes + ===== + + It is possible for some n to make the partition into less + subsets since the only known Schur numbers are: + S(1) = 1, S(2) = 4, S(3) = 13, S(4) = 44. + e.g for n = 44 the lower bound from the function above is 5 subsets but it has been proven + that can be done with 4 subsets. + + Examples + ======== + + For n = 1, 2, 3 the answer is the set itself + + >>> from sympy.combinatorics.schur_number import schur_partition + >>> schur_partition(2) + [[1, 2]] + + For n > 3, the answer is the minimum number of sum-free subsets: + + >>> schur_partition(5) + [[3, 2], [5], [1, 4]] + + >>> schur_partition(8) + [[3, 2], [6, 5, 8], [1, 4, 7]] + """ + + if isinstance(n, Basic) and not n.is_Number: + raise ValueError("Input value must be a number") + + number_of_subsets = _schur_subsets_number(n) + if n == 1: + sum_free_subsets = [[1]] + elif n == 2: + sum_free_subsets = [[1, 2]] + elif n == 3: + sum_free_subsets = [[1, 2, 3]] + else: + sum_free_subsets = [[1, 4], [2, 3]] + + while len(sum_free_subsets) < number_of_subsets: + sum_free_subsets = _generate_next_list(sum_free_subsets, n) + missed_elements = [3*k + 1 for k in range(len(sum_free_subsets), (n-1)//3 + 1)] + sum_free_subsets[-1] += missed_elements + + return sum_free_subsets + + +def _generate_next_list(current_list, n): + new_list = [] + + for item in current_list: + temp_1 = [number*3 for number in item if number*3 <= n] + temp_2 = [number*3 - 1 for number in item if number*3 - 1 <= n] + new_item = temp_1 + temp_2 + new_list.append(new_item) + + last_list = [3*k + 1 for k in range(len(current_list)+1) if 3*k + 1 <= n] + new_list.append(last_list) + current_list = new_list + + return current_list diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_permutations.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_permutations.py new file mode 100644 index 0000000000000000000000000000000000000000..6949d5e781e0cb4e27fc7cdd24862ae22d09cb01 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_permutations.py @@ -0,0 +1,562 @@ +from itertools import permutations + +from sympy.core.expr import unchanged +from sympy.core.numbers import Integer +from sympy.core.relational import Eq +from sympy.core.symbol import Symbol +from sympy.core.singleton import S +from sympy.combinatorics.permutations import \ + Permutation, _af_parity, _af_rmul, _af_rmuln, AppliedPermutation, Cycle +from sympy.printing import sstr, srepr, pretty, latex +from sympy.testing.pytest import raises, warns_deprecated_sympy + + +rmul = Permutation.rmul +a = Symbol('a', integer=True) + + +def test_Permutation(): + # don't auto fill 0 + raises(ValueError, lambda: Permutation([1])) + p = Permutation([0, 1, 2, 3]) + # call as bijective + assert [p(i) for i in range(p.size)] == list(p) + # call as operator + assert p(list(range(p.size))) == list(p) + # call as function + assert list(p(1, 2)) == [0, 2, 1, 3] + raises(TypeError, lambda: p(-1)) + raises(TypeError, lambda: p(5)) + # conversion to list + assert list(p) == list(range(4)) + assert Permutation(size=4) == Permutation(3) + assert Permutation(Permutation(3), size=5) == Permutation(4) + # cycle form with size + assert Permutation([[1, 2]], size=4) == Permutation([[1, 2], [0], [3]]) + # random generation + assert Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1])) + + p = Permutation([2, 5, 1, 6, 3, 0, 4]) + q = Permutation([[1], [0, 3, 5, 6, 2, 4]]) + assert len({p, p}) == 1 + r = Permutation([1, 3, 2, 0, 4, 6, 5]) + ans = Permutation(_af_rmuln(*[w.array_form for w in (p, q, r)])).array_form + assert rmul(p, q, r).array_form == ans + # make sure no other permutation of p, q, r could have given + # that answer + for a, b, c in permutations((p, q, r)): + if (a, b, c) == (p, q, r): + continue + assert rmul(a, b, c).array_form != ans + + assert p.support() == list(range(7)) + assert q.support() == [0, 2, 3, 4, 5, 6] + assert Permutation(p.cyclic_form).array_form == p.array_form + assert p.cardinality == 5040 + assert q.cardinality == 5040 + assert q.cycles == 2 + assert rmul(q, p) == Permutation([4, 6, 1, 2, 5, 3, 0]) + assert rmul(p, q) == Permutation([6, 5, 3, 0, 2, 4, 1]) + assert _af_rmul(p.array_form, q.array_form) == \ + [6, 5, 3, 0, 2, 4, 1] + + assert rmul(Permutation([[1, 2, 3], [0, 4]]), + Permutation([[1, 2, 4], [0], [3]])).cyclic_form == \ + [[0, 4, 2], [1, 3]] + assert q.array_form == [3, 1, 4, 5, 0, 6, 2] + assert q.cyclic_form == [[0, 3, 5, 6, 2, 4]] + assert q.full_cyclic_form == [[0, 3, 5, 6, 2, 4], [1]] + assert p.cyclic_form == [[0, 2, 1, 5], [3, 6, 4]] + t = p.transpositions() + assert t == [(0, 5), (0, 1), (0, 2), (3, 4), (3, 6)] + assert Permutation.rmul(*[Permutation(Cycle(*ti)) for ti in (t)]) + assert Permutation([1, 0]).transpositions() == [(0, 1)] + + assert p**13 == p + assert q**0 == Permutation(list(range(q.size))) + assert q**-2 == ~q**2 + assert q**2 == Permutation([5, 1, 0, 6, 3, 2, 4]) + assert q**3 == q**2*q + assert q**4 == q**2*q**2 + + a = Permutation(1, 3) + b = Permutation(2, 0, 3) + I = Permutation(3) + assert ~a == a**-1 + assert a*~a == I + assert a*b**-1 == a*~b + + ans = Permutation(0, 5, 3, 1, 6)(2, 4) + assert (p + q.rank()).rank() == ans.rank() + assert (p + q.rank())._rank == ans.rank() + assert (q + p.rank()).rank() == ans.rank() + raises(TypeError, lambda: p + Permutation(list(range(10)))) + + assert (p - q.rank()).rank() == Permutation(0, 6, 3, 1, 2, 5, 4).rank() + assert p.rank() - q.rank() < 0 # for coverage: make sure mod is used + assert (q - p.rank()).rank() == Permutation(1, 4, 6, 2)(3, 5).rank() + + assert p*q == Permutation(_af_rmuln(*[list(w) for w in (q, p)])) + assert p*Permutation([]) == p + assert Permutation([])*p == p + assert p*Permutation([[0, 1]]) == Permutation([2, 5, 0, 6, 3, 1, 4]) + assert Permutation([[0, 1]])*p == Permutation([5, 2, 1, 6, 3, 0, 4]) + + pq = p ^ q + assert pq == Permutation([5, 6, 0, 4, 1, 2, 3]) + assert pq == rmul(q, p, ~q) + qp = q ^ p + assert qp == Permutation([4, 3, 6, 2, 1, 5, 0]) + assert qp == rmul(p, q, ~p) + raises(ValueError, lambda: p ^ Permutation([])) + + assert p.commutator(q) == Permutation(0, 1, 3, 4, 6, 5, 2) + assert q.commutator(p) == Permutation(0, 2, 5, 6, 4, 3, 1) + assert p.commutator(q) == ~q.commutator(p) + raises(ValueError, lambda: p.commutator(Permutation([]))) + + assert len(p.atoms()) == 7 + assert q.atoms() == {0, 1, 2, 3, 4, 5, 6} + + assert p.inversion_vector() == [2, 4, 1, 3, 1, 0] + assert q.inversion_vector() == [3, 1, 2, 2, 0, 1] + + assert Permutation.from_inversion_vector(p.inversion_vector()) == p + assert Permutation.from_inversion_vector(q.inversion_vector()).array_form\ + == q.array_form + raises(ValueError, lambda: Permutation.from_inversion_vector([0, 2])) + assert Permutation(list(range(500, -1, -1))).inversions() == 125250 + + s = Permutation([0, 4, 1, 3, 2]) + assert s.parity() == 0 + _ = s.cyclic_form # needed to create a value for _cyclic_form + assert len(s._cyclic_form) != s.size and s.parity() == 0 + assert not s.is_odd + assert s.is_even + assert Permutation([0, 1, 4, 3, 2]).parity() == 1 + assert _af_parity([0, 4, 1, 3, 2]) == 0 + assert _af_parity([0, 1, 4, 3, 2]) == 1 + + s = Permutation([0]) + + assert s.is_Singleton + assert Permutation([]).is_Empty + + r = Permutation([3, 2, 1, 0]) + assert (r**2).is_Identity + + assert rmul(~p, p).is_Identity + assert (~p)**13 == Permutation([5, 2, 0, 4, 6, 1, 3]) + assert ~(r**2).is_Identity + assert p.max() == 6 + assert p.min() == 0 + + q = Permutation([[6], [5], [0, 1, 2, 3, 4]]) + + assert q.max() == 4 + assert q.min() == 0 + + p = Permutation([1, 5, 2, 0, 3, 6, 4]) + q = Permutation([[1, 2, 3, 5, 6], [0, 4]]) + + assert p.ascents() == [0, 3, 4] + assert q.ascents() == [1, 2, 4] + assert r.ascents() == [] + + assert p.descents() == [1, 2, 5] + assert q.descents() == [0, 3, 5] + assert Permutation(r.descents()).is_Identity + + assert p.inversions() == 7 + # test the merge-sort with a longer permutation + big = list(p) + list(range(p.max() + 1, p.max() + 130)) + assert Permutation(big).inversions() == 7 + assert p.signature() == -1 + assert q.inversions() == 11 + assert q.signature() == -1 + assert rmul(p, ~p).inversions() == 0 + assert rmul(p, ~p).signature() == 1 + + assert p.order() == 6 + assert q.order() == 10 + assert (p**(p.order())).is_Identity + + assert p.length() == 6 + assert q.length() == 7 + assert r.length() == 4 + + assert p.runs() == [[1, 5], [2], [0, 3, 6], [4]] + assert q.runs() == [[4], [2, 3, 5], [0, 6], [1]] + assert r.runs() == [[3], [2], [1], [0]] + + assert p.index() == 8 + assert q.index() == 8 + assert r.index() == 3 + + assert p.get_precedence_distance(q) == q.get_precedence_distance(p) + assert p.get_adjacency_distance(q) == p.get_adjacency_distance(q) + assert p.get_positional_distance(q) == p.get_positional_distance(q) + p = Permutation([0, 1, 2, 3]) + q = Permutation([3, 2, 1, 0]) + assert p.get_precedence_distance(q) == 6 + assert p.get_adjacency_distance(q) == 3 + assert p.get_positional_distance(q) == 8 + p = Permutation([0, 3, 1, 2, 4]) + q = Permutation.josephus(4, 5, 2) + assert p.get_adjacency_distance(q) == 3 + raises(ValueError, lambda: p.get_adjacency_distance(Permutation([]))) + raises(ValueError, lambda: p.get_positional_distance(Permutation([]))) + raises(ValueError, lambda: p.get_precedence_distance(Permutation([]))) + + a = [Permutation.unrank_nonlex(4, i) for i in range(5)] + iden = Permutation([0, 1, 2, 3]) + for i in range(5): + for j in range(i + 1, 5): + assert a[i].commutes_with(a[j]) == \ + (rmul(a[i], a[j]) == rmul(a[j], a[i])) + if a[i].commutes_with(a[j]): + assert a[i].commutator(a[j]) == iden + assert a[j].commutator(a[i]) == iden + + a = Permutation(3) + b = Permutation(0, 6, 3)(1, 2) + assert a.cycle_structure == {1: 4} + assert b.cycle_structure == {2: 1, 3: 1, 1: 2} + # issue 11130 + raises(ValueError, lambda: Permutation(3, size=3)) + raises(ValueError, lambda: Permutation([1, 2, 0, 3], size=3)) + + +def test_Permutation_subclassing(): + # Subclass that adds permutation application on iterables + class CustomPermutation(Permutation): + def __call__(self, *i): + try: + return super().__call__(*i) + except TypeError: + pass + + try: + perm_obj = i[0] + return [self._array_form[j] for j in perm_obj] + except TypeError: + raise TypeError('unrecognized argument') + + def __eq__(self, other): + if isinstance(other, Permutation): + return self._hashable_content() == other._hashable_content() + else: + return super().__eq__(other) + + def __hash__(self): + return super().__hash__() + + p = CustomPermutation([1, 2, 3, 0]) + q = Permutation([1, 2, 3, 0]) + + assert p == q + raises(TypeError, lambda: q([1, 2])) + assert [2, 3] == p([1, 2]) + + assert type(p * q) == CustomPermutation + assert type(q * p) == Permutation # True because q.__mul__(p) is called! + + # Run all tests for the Permutation class also on the subclass + def wrapped_test_Permutation(): + # Monkeypatch the class definition in the globals + globals()['__Perm'] = globals()['Permutation'] + globals()['Permutation'] = CustomPermutation + test_Permutation() + globals()['Permutation'] = globals()['__Perm'] # Restore + del globals()['__Perm'] + + wrapped_test_Permutation() + + +def test_josephus(): + assert Permutation.josephus(4, 6, 1) == Permutation([3, 1, 0, 2, 5, 4]) + assert Permutation.josephus(1, 5, 1).is_Identity + + +def test_ranking(): + assert Permutation.unrank_lex(5, 10).rank() == 10 + p = Permutation.unrank_lex(15, 225) + assert p.rank() == 225 + p1 = p.next_lex() + assert p1.rank() == 226 + assert Permutation.unrank_lex(15, 225).rank() == 225 + assert Permutation.unrank_lex(10, 0).is_Identity + p = Permutation.unrank_lex(4, 23) + assert p.rank() == 23 + assert p.array_form == [3, 2, 1, 0] + assert p.next_lex() is None + + p = Permutation([1, 5, 2, 0, 3, 6, 4]) + q = Permutation([[1, 2, 3, 5, 6], [0, 4]]) + a = [Permutation.unrank_trotterjohnson(4, i).array_form for i in range(5)] + assert a == [[0, 1, 2, 3], [0, 1, 3, 2], [0, 3, 1, 2], [3, 0, 1, + 2], [3, 0, 2, 1] ] + assert [Permutation(pa).rank_trotterjohnson() for pa in a] == list(range(5)) + assert Permutation([0, 1, 2, 3]).next_trotterjohnson() == \ + Permutation([0, 1, 3, 2]) + + assert q.rank_trotterjohnson() == 2283 + assert p.rank_trotterjohnson() == 3389 + assert Permutation([1, 0]).rank_trotterjohnson() == 1 + a = Permutation(list(range(3))) + b = a + l = [] + tj = [] + for i in range(6): + l.append(a) + tj.append(b) + a = a.next_lex() + b = b.next_trotterjohnson() + assert a == b is None + assert {tuple(a) for a in l} == {tuple(a) for a in tj} + + p = Permutation([2, 5, 1, 6, 3, 0, 4]) + q = Permutation([[6], [5], [0, 1, 2, 3, 4]]) + assert p.rank() == 1964 + assert q.rank() == 870 + assert Permutation([]).rank_nonlex() == 0 + prank = p.rank_nonlex() + assert prank == 1600 + assert Permutation.unrank_nonlex(7, 1600) == p + qrank = q.rank_nonlex() + assert qrank == 41 + assert Permutation.unrank_nonlex(7, 41) == Permutation(q.array_form) + + a = [Permutation.unrank_nonlex(4, i).array_form for i in range(24)] + assert a == [ + [1, 2, 3, 0], [3, 2, 0, 1], [1, 3, 0, 2], [1, 2, 0, 3], [2, 3, 1, 0], + [2, 0, 3, 1], [3, 0, 1, 2], [2, 0, 1, 3], [1, 3, 2, 0], [3, 0, 2, 1], + [1, 0, 3, 2], [1, 0, 2, 3], [2, 1, 3, 0], [2, 3, 0, 1], [3, 1, 0, 2], + [2, 1, 0, 3], [3, 2, 1, 0], [0, 2, 3, 1], [0, 3, 1, 2], [0, 2, 1, 3], + [3, 1, 2, 0], [0, 3, 2, 1], [0, 1, 3, 2], [0, 1, 2, 3]] + + N = 10 + p1 = Permutation(a[0]) + for i in range(1, N+1): + p1 = p1*Permutation(a[i]) + p2 = Permutation.rmul_with_af(*[Permutation(h) for h in a[N::-1]]) + assert p1 == p2 + + ok = [] + p = Permutation([1, 0]) + for i in range(3): + ok.append(p.array_form) + p = p.next_nonlex() + if p is None: + ok.append(None) + break + assert ok == [[1, 0], [0, 1], None] + assert Permutation([3, 2, 0, 1]).next_nonlex() == Permutation([1, 3, 0, 2]) + assert [Permutation(pa).rank_nonlex() for pa in a] == list(range(24)) + + +def test_mul(): + a, b = [0, 2, 1, 3], [0, 1, 3, 2] + assert _af_rmul(a, b) == [0, 2, 3, 1] + assert _af_rmuln(a, b, list(range(4))) == [0, 2, 3, 1] + assert rmul(Permutation(a), Permutation(b)).array_form == [0, 2, 3, 1] + + a = Permutation([0, 2, 1, 3]) + b = (0, 1, 3, 2) + c = (3, 1, 2, 0) + assert Permutation.rmul(a, b, c) == Permutation([1, 2, 3, 0]) + assert Permutation.rmul(a, c) == Permutation([3, 2, 1, 0]) + raises(TypeError, lambda: Permutation.rmul(b, c)) + + n = 6 + m = 8 + a = [Permutation.unrank_nonlex(n, i).array_form for i in range(m)] + h = list(range(n)) + for i in range(m): + h = _af_rmul(h, a[i]) + h2 = _af_rmuln(*a[:i + 1]) + assert h == h2 + + +def test_args(): + p = Permutation([(0, 3, 1, 2), (4, 5)]) + assert p._cyclic_form is None + assert Permutation(p) == p + assert p.cyclic_form == [[0, 3, 1, 2], [4, 5]] + assert p._array_form == [3, 2, 0, 1, 5, 4] + p = Permutation((0, 3, 1, 2)) + assert p._cyclic_form is None + assert p._array_form == [0, 3, 1, 2] + assert Permutation([0]) == Permutation((0, )) + assert Permutation([[0], [1]]) == Permutation(((0, ), (1, ))) == \ + Permutation(((0, ), [1])) + assert Permutation([[1, 2]]) == Permutation([0, 2, 1]) + assert Permutation([[1], [4, 2]]) == Permutation([0, 1, 4, 3, 2]) + assert Permutation([[1], [4, 2]], size=1) == Permutation([0, 1, 4, 3, 2]) + assert Permutation( + [[1], [4, 2]], size=6) == Permutation([0, 1, 4, 3, 2, 5]) + assert Permutation([[0, 1], [0, 2]]) == Permutation(0, 1, 2) + assert Permutation([], size=3) == Permutation([0, 1, 2]) + assert Permutation(3).list(5) == [0, 1, 2, 3, 4] + assert Permutation(3).list(-1) == [] + assert Permutation(5)(1, 2).list(-1) == [0, 2, 1] + assert Permutation(5)(1, 2).list() == [0, 2, 1, 3, 4, 5] + raises(ValueError, lambda: Permutation([1, 2], [0])) + # enclosing brackets needed + raises(ValueError, lambda: Permutation([[1, 2], 0])) + # enclosing brackets needed on 0 + raises(ValueError, lambda: Permutation([1, 1, 0])) + raises(ValueError, lambda: Permutation([4, 5], size=10)) # where are 0-3? + # but this is ok because cycles imply that only those listed moved + assert Permutation(4, 5) == Permutation([0, 1, 2, 3, 5, 4]) + + +def test_Cycle(): + assert str(Cycle()) == '()' + assert Cycle(Cycle(1,2)) == Cycle(1, 2) + assert Cycle(1,2).copy() == Cycle(1,2) + assert list(Cycle(1, 3, 2)) == [0, 3, 1, 2] + assert Cycle(1, 2)(2, 3) == Cycle(1, 3, 2) + assert Cycle(1, 2)(2, 3)(4, 5) == Cycle(1, 3, 2)(4, 5) + assert Permutation(Cycle(1, 2)(2, 1, 0, 3)).cyclic_form, Cycle(0, 2, 1) + raises(ValueError, lambda: Cycle().list()) + assert Cycle(1, 2).list() == [0, 2, 1] + assert Cycle(1, 2).list(4) == [0, 2, 1, 3] + assert Cycle(3).list(2) == [0, 1] + assert Cycle(3).list(6) == [0, 1, 2, 3, 4, 5] + assert Permutation(Cycle(1, 2), size=4) == \ + Permutation([0, 2, 1, 3]) + assert str(Cycle(1, 2)(4, 5)) == '(1 2)(4 5)' + assert str(Cycle(1, 2)) == '(1 2)' + assert Cycle(Permutation(list(range(3)))) == Cycle() + assert Cycle(1, 2).list() == [0, 2, 1] + assert Cycle(1, 2).list(4) == [0, 2, 1, 3] + assert Cycle().size == 0 + raises(ValueError, lambda: Cycle((1, 2))) + raises(ValueError, lambda: Cycle(1, 2, 1)) + raises(TypeError, lambda: Cycle(1, 2)*{}) + raises(ValueError, lambda: Cycle(4)[a]) + raises(ValueError, lambda: Cycle(2, -4, 3)) + + # check round-trip + p = Permutation([[1, 2], [4, 3]], size=5) + assert Permutation(Cycle(p)) == p + + +def test_from_sequence(): + assert Permutation.from_sequence('SymPy') == Permutation(4)(0, 1, 3) + assert Permutation.from_sequence('SymPy', key=lambda x: x.lower()) == \ + Permutation(4)(0, 2)(1, 3) + + +def test_resize(): + p = Permutation(0, 1, 2) + assert p.resize(5) == Permutation(0, 1, 2, size=5) + assert p.resize(4) == Permutation(0, 1, 2, size=4) + assert p.resize(3) == p + raises(ValueError, lambda: p.resize(2)) + + p = Permutation(0, 1, 2)(3, 4)(5, 6) + assert p.resize(3) == Permutation(0, 1, 2) + raises(ValueError, lambda: p.resize(4)) + + +def test_printing_cyclic(): + p1 = Permutation([0, 2, 1]) + assert repr(p1) == 'Permutation(1, 2)' + assert str(p1) == '(1 2)' + p2 = Permutation() + assert repr(p2) == 'Permutation()' + assert str(p2) == '()' + p3 = Permutation([1, 2, 0, 3]) + assert repr(p3) == 'Permutation(3)(0, 1, 2)' + + +def test_printing_non_cyclic(): + p1 = Permutation([0, 1, 2, 3, 4, 5]) + assert srepr(p1, perm_cyclic=False) == 'Permutation([], size=6)' + assert sstr(p1, perm_cyclic=False) == 'Permutation([], size=6)' + p2 = Permutation([0, 1, 2]) + assert srepr(p2, perm_cyclic=False) == 'Permutation([0, 1, 2])' + assert sstr(p2, perm_cyclic=False) == 'Permutation([0, 1, 2])' + + p3 = Permutation([0, 2, 1]) + assert srepr(p3, perm_cyclic=False) == 'Permutation([0, 2, 1])' + assert sstr(p3, perm_cyclic=False) == 'Permutation([0, 2, 1])' + p4 = Permutation([0, 1, 3, 2, 4, 5, 6, 7]) + assert srepr(p4, perm_cyclic=False) == 'Permutation([0, 1, 3, 2], size=8)' + + +def test_deprecated_print_cyclic(): + p = Permutation(0, 1, 2) + try: + Permutation.print_cyclic = True + with warns_deprecated_sympy(): + assert sstr(p) == '(0 1 2)' + with warns_deprecated_sympy(): + assert srepr(p) == 'Permutation(0, 1, 2)' + with warns_deprecated_sympy(): + assert pretty(p) == '(0 1 2)' + with warns_deprecated_sympy(): + assert latex(p) == r'\left( 0\; 1\; 2\right)' + + Permutation.print_cyclic = False + with warns_deprecated_sympy(): + assert sstr(p) == 'Permutation([1, 2, 0])' + with warns_deprecated_sympy(): + assert srepr(p) == 'Permutation([1, 2, 0])' + with warns_deprecated_sympy(): + assert pretty(p, use_unicode=False) == '/0 1 2\\\n\\1 2 0/' + with warns_deprecated_sympy(): + assert latex(p) == \ + r'\begin{pmatrix} 0 & 1 & 2 \\ 1 & 2 & 0 \end{pmatrix}' + finally: + Permutation.print_cyclic = None + + +def test_permutation_equality(): + a = Permutation(0, 1, 2) + b = Permutation(0, 1, 2) + assert Eq(a, b) is S.true + c = Permutation(0, 2, 1) + assert Eq(a, c) is S.false + + d = Permutation(0, 1, 2, size=4) + assert unchanged(Eq, a, d) + e = Permutation(0, 2, 1, size=4) + assert unchanged(Eq, a, e) + + i = Permutation() + assert unchanged(Eq, i, 0) + assert unchanged(Eq, 0, i) + + +def test_issue_17661(): + c1 = Cycle(1,2) + c2 = Cycle(1,2) + assert c1 == c2 + assert repr(c1) == 'Cycle(1, 2)' + assert c1 == c2 + + +def test_permutation_apply(): + x = Symbol('x') + p = Permutation(0, 1, 2) + assert p.apply(0) == 1 + assert isinstance(p.apply(0), Integer) + assert p.apply(x) == AppliedPermutation(p, x) + assert AppliedPermutation(p, x).subs(x, 0) == 1 + + x = Symbol('x', integer=False) + raises(NotImplementedError, lambda: p.apply(x)) + x = Symbol('x', negative=True) + raises(NotImplementedError, lambda: p.apply(x)) + + +def test_AppliedPermutation(): + x = Symbol('x') + p = Permutation(0, 1, 2) + raises(ValueError, lambda: AppliedPermutation((0, 1, 2), x)) + assert AppliedPermutation(p, 1, evaluate=True) == 2 + assert AppliedPermutation(p, 1, evaluate=False).__class__ == \ + AppliedPermutation diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_tensor_can.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_tensor_can.py new file mode 100644 index 0000000000000000000000000000000000000000..3922419f20b92536426bfaae4b7e94df5db671b5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/tests/test_tensor_can.py @@ -0,0 +1,560 @@ +from sympy.combinatorics.permutations import Permutation, Perm +from sympy.combinatorics.tensor_can import (perm_af_direct_product, dummy_sgs, + riemann_bsgs, get_symmetric_group_sgs, canonicalize, bsgs_direct_product) +from sympy.combinatorics.testutil import canonicalize_naive, graph_certificate +from sympy.testing.pytest import skip, XFAIL + +def test_perm_af_direct_product(): + gens1 = [[1,0,2,3], [0,1,3,2]] + gens2 = [[1,0]] + assert perm_af_direct_product(gens1, gens2, 0) == [[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]] + gens1 = [[1,0,2,3,5,4], [0,1,3,2,4,5]] + gens2 = [[1,0,2,3]] + assert [[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]] + +def test_dummy_sgs(): + a = dummy_sgs([1,2], 0, 4) + assert a == [[0,2,1,3,4,5]] + a = dummy_sgs([2,3,4,5], 0, 8) + assert a == [x._array_form for x in [Perm(9)(2,3), Perm(9)(4,5), + Perm(9)(2,4)(3,5)]] + + a = dummy_sgs([2,3,4,5], 1, 8) + assert a == [x._array_form for x in [Perm(2,3)(8,9), Perm(4,5)(8,9), + Perm(9)(2,4)(3,5)]] + +def test_get_symmetric_group_sgs(): + assert get_symmetric_group_sgs(2) == ([0], [Permutation(3)(0,1)]) + assert get_symmetric_group_sgs(2, 1) == ([0], [Permutation(0,1)(2,3)]) + assert get_symmetric_group_sgs(3) == ([0,1], [Permutation(4)(0,1), Permutation(4)(1,2)]) + assert get_symmetric_group_sgs(3, 1) == ([0,1], [Permutation(0,1)(3,4), Permutation(1,2)(3,4)]) + assert get_symmetric_group_sgs(4) == ([0,1,2], [Permutation(5)(0,1), Permutation(5)(1,2), Permutation(5)(2,3)]) + assert get_symmetric_group_sgs(4, 1) == ([0,1,2], [Permutation(0,1)(4,5), Permutation(1,2)(4,5), Permutation(2,3)(4,5)]) + + +def test_canonicalize_no_slot_sym(): + # cases in which there is no slot symmetry after fixing the + # free indices; here and in the following if the symmetry of the + # metric is not specified, it is assumed to be symmetric. + # If it is not specified, tensors are commuting. + + # A_d0 * B^d0; g = [1,0, 2,3]; T_c = A^d0*B_d0; can = [0,1,2,3] + base1, gens1 = get_symmetric_group_sgs(1) + dummies = [0, 1] + g = Permutation([1,0,2,3]) + can = canonicalize(g, dummies, 0, (base1,gens1,1,0), (base1,gens1,1,0)) + assert can == [0,1,2,3] + # equivalently + can = canonicalize(g, dummies, 0, (base1, gens1, 2, None)) + assert can == [0,1,2,3] + + # with antisymmetric metric; T_c = -A^d0*B_d0; can = [0,1,3,2] + can = canonicalize(g, dummies, 1, (base1,gens1,1,0), (base1,gens1,1,0)) + assert can == [0,1,3,2] + + # A^a * B^b; ord = [a,b]; g = [0,1,2,3]; can = g + g = Permutation([0,1,2,3]) + dummies = [] + t0 = t1 = (base1, gens1, 1, 0) + can = canonicalize(g, dummies, 0, t0, t1) + assert can == [0,1,2,3] + # B^b * A^a + g = Permutation([1,0,2,3]) + can = canonicalize(g, dummies, 0, t0, t1) + assert can == [1,0,2,3] + + # A symmetric + # A^{b}_{d0}*A^{d0, a} order a,b,d0,-d0; T_c = A^{a d0}*A{b}_{d0} + # g = [1,3,2,0,4,5]; can = [0,2,1,3,4,5] + base2, gens2 = get_symmetric_group_sgs(2) + dummies = [2,3] + g = Permutation([1,3,2,0,4,5]) + can = canonicalize(g, dummies, 0, (base2, gens2, 2, 0)) + assert can == [0, 2, 1, 3, 4, 5] + # with antisymmetric metric + can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0)) + assert can == [0, 2, 1, 3, 4, 5] + # A^{a}_{d0}*A^{d0, b} + g = Permutation([0,3,2,1,4,5]) + can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0)) + assert can == [0, 2, 1, 3, 5, 4] + + # A, B symmetric + # A^b_d0*B^{d0,a}; g=[1,3,2,0,4,5] + # T_c = A^{b,d0}*B_{a,d0}; can = [1,2,0,3,4,5] + dummies = [2,3] + g = Permutation([1,3,2,0,4,5]) + can = canonicalize(g, dummies, 0, (base2,gens2,1,0), (base2,gens2,1,0)) + assert can == [1,2,0,3,4,5] + # same with antisymmetric metric + can = canonicalize(g, dummies, 1, (base2,gens2,1,0), (base2,gens2,1,0)) + assert can == [1,2,0,3,5,4] + + # A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5] + # T_c = A^{d0 d1}*B_d0*C_d1; can = [0,2,1,3,4,5] + base1, gens1 = get_symmetric_group_sgs(1) + base2, gens2 = get_symmetric_group_sgs(2) + g = Permutation([2,1,0,3,4,5]) + dummies = [0,1,2,3] + t0 = (base2, gens2, 1, 0) + t1 = t2 = (base1, gens1, 1, 0) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [0, 2, 1, 3, 4, 5] + + # A without symmetry + # A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5] + # T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5] + g = Permutation([2,1,0,3,4,5]) + dummies = [0,1,2,3] + t0 = ([], [Permutation(list(range(4)))], 1, 0) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [0,2,3,1,4,5] + # A, B without symmetry + # A^{d1}_{d0}*B_{d1}^{d0}; g = [2,1,3,0,4,5] + # T_c = A^{d0 d1}*B_{d0 d1}; can = [0,2,1,3,4,5] + t0 = t1 = ([], [Permutation(list(range(4)))], 1, 0) + dummies = [0,1,2,3] + g = Permutation([2,1,3,0,4,5]) + can = canonicalize(g, dummies, 0, t0, t1) + assert can == [0, 2, 1, 3, 4, 5] + # A_{d0}^{d1}*B_{d1}^{d0}; g = [1,2,3,0,4,5] + # T_c = A^{d0 d1}*B_{d1 d0}; can = [0,2,3,1,4,5] + g = Permutation([1,2,3,0,4,5]) + can = canonicalize(g, dummies, 0, t0, t1) + assert can == [0,2,3,1,4,5] + + # A, B, C without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1] + # g=[4,2,0,3,5,1,6,7] + # T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}; can = [2,4,0,5,3,1,6,7] + t0 = t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0) + dummies = [2,3,4,5] + g = Permutation([4,2,0,3,5,1,6,7]) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [2,4,0,5,3,1,6,7] + + # A symmetric, B and C without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1] + # g=[4,2,0,3,5,1,6,7] + # T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}; can = [2,4,0,3,5,1,6,7] + t0 = (base2,gens2,1,0) + t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0) + dummies = [2,3,4,5] + g = Permutation([4,2,0,3,5,1,6,7]) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [2,4,0,3,5,1,6,7] + + # A and C symmetric, B without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1] + # g=[4,2,0,3,5,1,6,7] + # T_c = A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,6,7] + t0 = t2 = (base2,gens2,1,0) + t1 = ([], [Permutation(list(range(4)))], 1, 0) + dummies = [2,3,4,5] + g = Permutation([4,2,0,3,5,1,6,7]) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [2,4,0,3,1,5,6,7] + + # A symmetric, B without symmetry, C antisymmetric + # A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1] + # g=[4,2,0,3,5,1,6,7] + # T_c = -A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,7,6] + t0 = (base2,gens2, 1, 0) + t1 = ([], [Permutation(list(range(4)))], 1, 0) + base2a, gens2a = get_symmetric_group_sgs(2, 1) + t2 = (base2a, gens2a, 1, 0) + dummies = [2,3,4,5] + g = Permutation([4,2,0,3,5,1,6,7]) + can = canonicalize(g, dummies, 0, t0, t1, t2) + assert can == [2,4,0,3,1,5,7,6] + + +def test_canonicalize_no_dummies(): + base1, gens1 = get_symmetric_group_sgs(1) + base2, gens2 = get_symmetric_group_sgs(2) + base2a, gens2a = get_symmetric_group_sgs(2, 1) + + # A commuting + # A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4] + # T_c = A^a A^b A^c; can = list(range(5)) + g = Permutation([2,1,0,3,4]) + can = canonicalize(g, [], 0, (base1, gens1, 3, 0)) + assert can == list(range(5)) + + # A anticommuting + # A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4] + # T_c = -A^a A^b A^c; can = [0,1,2,4,3] + g = Permutation([2,1,0,3,4]) + can = canonicalize(g, [], 0, (base1, gens1, 3, 1)) + assert can == [0,1,2,4,3] + + # A commuting and symmetric + # A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5] + # T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5] + g = Permutation([1,3,2,0,4,5]) + can = canonicalize(g, [], 0, (base2, gens2, 2, 0)) + assert can == [0,2,1,3,4,5] + + # A anticommuting and symmetric + # A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5] + # T_c = -A^{a c}*A^{b d}; can = [0,2,1,3,5,4] + g = Permutation([1,3,2,0,4,5]) + can = canonicalize(g, [], 0, (base2, gens2, 2, 1)) + assert can == [0,2,1,3,5,4] + # A^{c,a}*A^{b,d} ; g = [2,0,1,3,4,5] + # T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5] + g = Permutation([2,0,1,3,4,5]) + can = canonicalize(g, [], 0, (base2, gens2, 2, 1)) + assert can == [0,2,1,3,4,5] + +def test_no_metric_symmetry(): + # no metric symmetry + # A^d1_d0 * A^d0_d1; ord = [d0,-d0,d1,-d1]; g= [2,1,0,3,4,5] + # T_c = A^d0_d1 * A^d1_d0; can = [0,3,2,1,4,5] + g = Permutation([2,1,0,3,4,5]) + can = canonicalize(g, list(range(4)), None, [[], [Permutation(list(range(4)))], 2, 0]) + assert can == [0,3,2,1,4,5] + + # A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0 + # ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3] + # 0 1 2 3 4 5 6 7 + # g = [2,5,0,7,4,3,6,1,8,9] + # T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2 + # can = [0,3,2,1,4,7,6,5,8,9] + g = Permutation([2,5,0,7,4,3,6,1,8,9]) + #can = canonicalize(g, list(range(8)), 0, [[], [list(range(4))], 4, 0]) + #assert can == [0, 2, 3, 1, 4, 6, 7, 5, 8, 9] + can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0]) + assert can == [0, 3, 2, 1, 4, 7, 6, 5, 8, 9] + + # A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1 + # g = [0,5,2,7,6,1,4,3,8,9] + # T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0 + # can = [0,3,2,5,4,7,6,1,8,9] + g = Permutation([0,5,2,7,6,1,4,3,8,9]) + can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0]) + assert can == [0,3,2,5,4,7,6,1,8,9] + + g = Permutation([12,7,10,3,14,13,4,11,6,1,2,9,0,15,8,5,16,17]) + can = canonicalize(g, list(range(16)), None, [[], [Permutation(list(range(4)))], 8, 0]) + assert can == [0,3,2,5,4,7,6,1,8,11,10,13,12,15,14,9,16,17] + +def test_canonical_free(): + # t = A^{d0 a1}*A_d0^a0 + # ord = [a0,a1,d0,-d0]; g = [2,1,3,0,4,5]; dummies = [[2,3]] + # t_c = A_d0^a0*A^{d0 a1} + # can = [3,0, 2,1, 4,5] + g = Permutation([2,1,3,0,4,5]) + dummies = [[2,3]] + can = canonicalize(g, dummies, [None], ([], [Permutation(3)], 2, 0)) + assert can == [3,0, 2,1, 4,5] + +def test_canonicalize1(): + base1, gens1 = get_symmetric_group_sgs(1) + base1a, gens1a = get_symmetric_group_sgs(1, 1) + base2, gens2 = get_symmetric_group_sgs(2) + base3, gens3 = get_symmetric_group_sgs(3) + base2a, gens2a = get_symmetric_group_sgs(2, 1) + base3a, gens3a = get_symmetric_group_sgs(3, 1) + + # A_d0*A^d0; ord = [d0,-d0]; g = [1,0,2,3] + # T_c = A^d0*A_d0; can = [0,1,2,3] + g = Permutation([1,0,2,3]) + can = canonicalize(g, [0, 1], 0, (base1, gens1, 2, 0)) + assert can == list(range(4)) + + # A commuting + # A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2] + # g = [1,3,5,4,2,0,6,7] + # T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2; can = list(range(8)) + g = Permutation([1,3,5,4,2,0,6,7]) + can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 0)) + assert can == list(range(8)) + + # A anticommuting + # A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2] + # g = [1,3,5,4,2,0,6,7] + # T_c 0; can = 0 + g = Permutation([1,3,5,4,2,0,6,7]) + can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 1)) + assert can == 0 + can1 = canonicalize_naive(g, list(range(6)), 0, (base1, gens1, 6, 1)) + assert can1 == 0 + + # A commuting symmetric + # A^{d0 b}*A^a_d1*A^d1_d0; ord=[a,b,d0,-d0,d1,-d1] + # g = [2,1,0,5,4,3,6,7] + # T_c = A^{a d0}*A^{b d1}*A_{d0 d1}; can = [0,2,1,4,3,5,6,7] + g = Permutation([2,1,0,5,4,3,6,7]) + can = canonicalize(g, list(range(2,6)), 0, (base2, gens2, 3, 0)) + assert can == [0,2,1,4,3,5,6,7] + + # A, B commuting symmetric + # A^{d0 b}*A^d1_d0*B^a_d1; ord=[a,b,d0,-d0,d1,-d1] + # g = [2,1,4,3,0,5,6,7] + # T_c = A^{b d0}*A_d0^d1*B^a_d1; can = [1,2,3,4,0,5,6,7] + g = Permutation([2,1,4,3,0,5,6,7]) + can = canonicalize(g, list(range(2,6)), 0, (base2,gens2,2,0), (base2,gens2,1,0)) + assert can == [1,2,3,4,0,5,6,7] + + # A commuting symmetric + # A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1] + # g = [4,2,1,0,5,3,6,7] + # T_c = A^{a d0 d1}*A^{b}_{d0 d1}; can = [0,2,4,1,3,5,6,7] + g = Permutation([4,2,1,0,5,3,6,7]) + can = canonicalize(g, list(range(2,6)), 0, (base3, gens3, 2, 0)) + assert can == [0,2,4,1,3,5,6,7] + + + # A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0 + # ord = [a0,a1,a2,a3,d0,-d0,d1,-d1,d2,-d2,d3,-d3] + # 0 1 2 3 4 5 6 7 8 9 10 11 + # g = [10,4,8, 0,7,9, 6,11,1, 2,3,5, 12,13] + # T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3} + # can = [0,4,6, 1,5,8, 2,3,10, 7,9,11, 12,13] + g = Permutation([10,4,8, 0,7,9, 6,11,1, 2,3,5, 12,13]) + can = canonicalize(g, list(range(4,12)), 0, (base3, gens3, 4, 0)) + assert can == [0,4,6, 1,5,8, 2,3,10, 7,9,11, 12,13] + + # A commuting symmetric, B antisymmetric + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3] + # g = [0,2,4,5,7,3,1,6,8,9] + # in this esxample and in the next three, + # renaming dummy indices and using symmetry of A, + # T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3 + # can = 0 + g = Permutation([0,2,4,5,7,3,1,6,8,9]) + can = canonicalize(g, list(range(8)), 0, (base3, gens3,2,0), (base2a,gens2a,1,0)) + assert can == 0 + # A anticommuting symmetric, B anticommuting + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3} + # can = [0,2,4, 1,3,6, 5,7, 8,9] + can = canonicalize(g, list(range(8)), 0, (base3, gens3,2,1), (base2a,gens2a,1,0)) + assert can == [0,2,4, 1,3,6, 5,7, 8,9] + # A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3} + # can = [0,2,4, 1,3,6, 5,7, 9,8] + can = canonicalize(g, list(range(8)), 1, (base3, gens3,2,1), (base2a,gens2a,1,0)) + assert can == [0,2,4, 1,3,6, 5,7, 9,8] + + # A anticommuting symmetric, B anticommuting anticommuting, + # no metric symmetry + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3 + # can = [0,2,4, 1,3,7, 5,6, 8,9] + can = canonicalize(g, list(range(8)), None, (base3, gens3,2,1), (base2a,gens2a,1,0)) + assert can == [0,2,4,1,3,7,5,6,8,9] + + # Gamma anticommuting + # Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha} + # ord = [alpha, rho, mu,-mu,nu,-nu] + # g = [3,5,1,4,2,0,6,7] + # T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu} + # can = [2,4,1,0,3,5,7,6]] + g = Permutation([3,5,1,4,2,0,6,7]) + t0 = (base2a, gens2a, 1, None) + t1 = (base1, gens1, 1, None) + t2 = (base3a, gens3a, 1, None) + can = canonicalize(g, list(range(2, 6)), 0, t0, t1, t2) + assert can == [2,4,1,0,3,5,7,6] + + # Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha} + # ord = [alpha, beta, gamma, -rho, mu,-mu,nu,-nu] + # 0 1 2 3 4 5 6 7 + # g = [5,7,2,1,3,6,4,0,8,9] + # T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu} # can = [4,6,1,2,3,0,5,7,8,9] + t0 = (base2a, gens2a, 2, None) + g = Permutation([5,7,2,1,3,6,4,0,8,9]) + can = canonicalize(g, list(range(4, 8)), 0, t0, t1, t2) + assert can == [4,6,1,2,3,0,5,7,8,9] + + # f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry + # f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b} + # ord = [mu,-mu,nu,-nu,a,-a,b,-b,c,-c,d,-d, e, -e] + # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 + # g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15] + # T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e} + # can = [4,6,8, 5,10,12, 0,7, 1,11, 2,9, 3,13, 15,14] + g = Permutation([8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]) + base_f, gens_f = bsgs_direct_product(base1, gens1, base2a, gens2a) + base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1) + t0 = (base_f, gens_f, 2, 0) + t1 = (base_A, gens_A, 4, 0) + can = canonicalize(g, [list(range(4)), list(range(4, 14))], [0, 0], t0, t1) + assert can == [4,6,8, 5,10,12, 0,7, 1,11, 2,9, 3,13, 15,14] + + +def test_riemann_invariants(): + baser, gensr = riemann_bsgs + # R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]; g = [0,2,3,1,4,5] + # T_c = -R^{d0 d1}_{d0 d1}; can = [0,2,1,3,5,4] + g = Permutation([0,2,3,1,4,5]) + can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0)) + assert can == [0,2,1,3,5,4] + # use a non minimal BSGS + can = canonicalize(g, list(range(2, 4)), 0, ([2, 0], [Permutation([1,0,2,3,5,4]), Permutation([2,3,0,1,4,5])], 1, 0)) + assert can == [0,2,1,3,5,4] + + """ + The following tests in test_riemann_invariants and in + test_riemann_invariants1 have been checked using xperm.c from XPerm in + in [1] and with an older version contained in [2] + + [1] xperm.c part of xPerm written by J. M. Martin-Garcia + http://www.xact.es/index.html + [2] test_xperm.cc in cadabra by Kasper Peeters, http://cadabra.phi-sci.com/ + """ + # R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} * + # R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10} + # ord: contravariant d_k ->2*k, covariant d_k -> 2*k+1 + # T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} * + # R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11} + g = Permutation([23,2,1,10,12,8,0,11,15,5,17,19,21,7,13,9,4,14,22,3,16,18,6,20,24,25]) + can = canonicalize(g, list(range(24)), 0, (baser, gensr, 6, 0)) + assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,21,23,24,25] + + # use a non minimal BSGS + can = canonicalize(g, list(range(24)), 0, ([2, 0], [Permutation([1,0,2,3,5,4]), Permutation([2,3,0,1,4,5])], 6, 0)) + assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,21,23,24,25] + + g = Permutation([0,2,5,7,4,6,9,11,8,10,13,15,12,14,17,19,16,18,21,23,20,22,25,27,24,26,29,31,28,30,33,35,32,34,37,39,36,38,1,3,40,41]) + can = canonicalize(g, list(range(40)), 0, (baser, gensr, 10, 0)) + assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,24,26,21,23,28,30,25,27,32,34,29,31,36,38,33,35,37,39,40,41] + + +@XFAIL +def test_riemann_invariants1(): + skip('takes too much time') + baser, gensr = riemann_bsgs + g = Permutation([17, 44, 11, 3, 0, 19, 23, 15, 38, 4, 25, 27, 43, 36, 22, 14, 8, 30, 41, 20, 2, 10, 12, 28, 18, 1, 29, 13, 37, 42, 33, 7, 9, 31, 24, 26, 39, 5, 34, 47, 32, 6, 21, 40, 35, 46, 45, 16, 48, 49]) + can = canonicalize(g, list(range(48)), 0, (baser, gensr, 12, 0)) + assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 24, 26, 21, 23, 28, 30, 25, 27, 32, 34, 29, 31, 36, 38, 33, 35, 40, 42, 37, 39, 44, 46, 41, 43, 45, 47, 48, 49] + + g = Permutation([0,2,4,6, 7,8,10,12, 14,16,18,20, 19,22,24,26, 5,21,28,30, 32,34,36,38, 40,42,44,46, 13,48,50,52, 15,49,54,56, 17,33,41,58, 9,23,60,62, 29,35,63,64, 3,45,66,68, 25,37,47,57, 11,31,69,70, 27,39,53,72, 1,59,73,74, 55,61,67,76, 43,65,75,78, 51,71,77,79, 80,81]) + can = canonicalize(g, list(range(80)), 0, (baser, gensr, 20, 0)) + assert can == [0,2,4,6, 1,8,10,12, 3,14,16,18, 5,20,22,24, 7,26,28,30, 9,15,32,34, 11,36,23,38, 13,40,42,44, 17,39,29,46, 19,48,43,50, 21,45,52,54, 25,56,33,58, 27,60,53,62, 31,51,64,66, 35,65,47,68, 37,70,49,72, 41,74,57,76, 55,67,59,78, 61,69,71,75, 63,79,73,77, 80,81] + + +def test_riemann_products(): + baser, gensr = riemann_bsgs + base1, gens1 = get_symmetric_group_sgs(1) + base2, gens2 = get_symmetric_group_sgs(2) + base2a, gens2a = get_symmetric_group_sgs(2, 1) + + # R^{a b d0}_d0 = 0 + g = Permutation([0,1,2,3,4,5]) + can = canonicalize(g, list(range(2,4)), 0, (baser, gensr, 1, 0)) + assert can == 0 + + # R^{d0 b a}_d0 ; ord = [a,b,d0,-d0}; g = [2,1,0,3,4,5] + # T_c = -R^{a d0 b}_d0; can = [0,2,1,3,5,4] + g = Permutation([2,1,0,3,4,5]) + can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0)) + assert can == [0,2,1,3,5,4] + + # R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2] + # g = [4,7,1,3,2,0,5,6,8,9] + # T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2} + # can = [0,2,4,6,1,3,5,7,9,8] + g = Permutation([4,7,1,3,2,0,5,6,8,9]) + can = canonicalize(g, list(range(2,8)), 0, (baser, gensr, 2, 0)) + assert can == [0,2,4,6,1,3,5,7,9,8] + can1 = canonicalize_naive(g, list(range(2,8)), 0, (baser, gensr, 2, 0)) + assert can == can1 + + # A symmetric commuting + # R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5} + # g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15] + # T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6} + + g = Permutation([12,10,5,2,8,0,4,6,13,1,7,3,9,11,14,15]) + can = canonicalize(g, list(range(14)), 0, ((baser,gensr,2,0)), (base2,gens2,3,0)) + assert can == [0, 2, 4, 6, 1, 8, 10, 12, 3, 9, 5, 11, 7, 13, 15, 14] + + # R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1} + # ord = [a0,a1,a2,a3,a4,a5,d0,-d0,d1,-d1,d2,-d2] + # 0 1 2 3 4 5 6 7 8 9 10 11 + # can = [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13] + # T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2} + g = Permutation([10,0,2,6,8,11,1,3,4,5,7,9,12,13]) + can = canonicalize(g, list(range(6,12)), 0, (baser, gensr, 3, 0)) + assert can == [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13] + #can1 = canonicalize_naive(g, list(range(6,12)), 0, (baser, gensr, 3, 0)) + #assert can == can1 + + # A^n_{i, j} antisymmetric in i,j + # A_m0^d0_a1 * A_m1^a0_d0; ord = [m0,m1,a0,a1,d0,-d0] + # g = [0,4,3,1,2,5,6,7] + # T_c = -A_{m a1}^d0 * A_m1^a0_d0 + # can = [0,3,4,1,2,5,7,6] + base, gens = bsgs_direct_product(base1, gens1, base2a, gens2a) + dummies = list(range(4, 6)) + g = Permutation([0,4,3,1,2,5,6,7]) + can = canonicalize(g, dummies, 0, (base, gens, 2, 0)) + assert can == [0, 3, 4, 1, 2, 5, 7, 6] + + + # A^n_{i, j} symmetric in i,j + # A^m0_a0^d2 * A^n0_d2^d1 * A^n1_d1^d0 * A_{m0 d0}^a1 + # ordering: first the free indices; then first n, then d + # ord=[n0,n1,a0,a1, m0,-m0,d0,-d0,d1,-d1,d2,-d2] + # 0 1 2 3 4 5 6 7 8 9 10 11] + # g = [4,2,10, 0,11,8, 1,9,6, 5,7,3, 12,13] + # if the dummy indices m_i and d_i were separated, + # one gets + # T_c = A^{n0 d0 d1} * A^n1_d0^d2 * A^m0^a0_d1 * A_m0^a1_d2 + # can = [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13] + # If they are not, so can is + # T_c = A^{n0 m0 d0} A^n1_m0^d1 A^{d2 a0}_d0 A_d2^a1_d1 + # can = [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13] + # case with single type of indices + + base, gens = bsgs_direct_product(base1, gens1, base2, gens2) + dummies = list(range(4, 12)) + g = Permutation([4,2,10, 0,11,8, 1,9,6, 5,7,3, 12,13]) + can = canonicalize(g, dummies, 0, (base, gens, 4, 0)) + assert can == [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13] + # case with separated indices + dummies = [list(range(4, 6)), list(range(6,12))] + sym = [0, 0] + can = canonicalize(g, dummies, sym, (base, gens, 4, 0)) + assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13] + # case with separated indices with the second type of index + # with antisymmetric metric: there is a sign change + sym = [0, 1] + can = canonicalize(g, dummies, sym, (base, gens, 4, 0)) + assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 13, 12] + +def test_graph_certificate(): + # test tensor invariants constructed from random regular graphs; + # checked graph isomorphism with networkx + import random + def randomize_graph(size, g): + p = list(range(size)) + random.shuffle(p) + g1a = {} + for k, v in g1.items(): + g1a[p[k]] = [p[i] for i in v] + return g1a + + g1 = {0: [2, 3, 7], 1: [4, 5, 7], 2: [0, 4, 6], 3: [0, 6, 7], 4: [1, 2, 5], 5: [1, 4, 6], 6: [2, 3, 5], 7: [0, 1, 3]} + g2 = {0: [2, 3, 7], 1: [2, 4, 5], 2: [0, 1, 5], 3: [0, 6, 7], 4: [1, 5, 6], 5: [1, 2, 4], 6: [3, 4, 7], 7: [0, 3, 6]} + + c1 = graph_certificate(g1) + c2 = graph_certificate(g2) + assert c1 != c2 + g1a = randomize_graph(8, g1) + c1a = graph_certificate(g1a) + assert c1 == c1a + + g1 = {0: [8, 1, 9, 7], 1: [0, 9, 3, 4], 2: [3, 4, 6, 7], 3: [1, 2, 5, 6], 4: [8, 1, 2, 5], 5: [9, 3, 4, 7], 6: [8, 2, 3, 7], 7: [0, 2, 5, 6], 8: [0, 9, 4, 6], 9: [8, 0, 5, 1]} + g2 = {0: [1, 2, 5, 6], 1: [0, 9, 5, 7], 2: [0, 4, 6, 7], 3: [8, 9, 6, 7], 4: [8, 2, 6, 7], 5: [0, 9, 8, 1], 6: [0, 2, 3, 4], 7: [1, 2, 3, 4], 8: [9, 3, 4, 5], 9: [8, 1, 3, 5]} + c1 = graph_certificate(g1) + c2 = graph_certificate(g2) + assert c1 != c2 + g1a = randomize_graph(10, g1) + c1a = graph_certificate(g1a) + assert c1 == c1a diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/testutil.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/testutil.py new file mode 100644 index 0000000000000000000000000000000000000000..5b036ac29665744710e5552b6fe999bb63cf062d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/testutil.py @@ -0,0 +1,358 @@ +from sympy.combinatorics import Permutation +from sympy.combinatorics.util import _distribute_gens_by_base + +rmul = Permutation.rmul + + +def _cmp_perm_lists(first, second): + """ + Compare two lists of permutations as sets. + + Explanation + =========== + + This is used for testing purposes. Since the array form of a + permutation is currently a list, Permutation is not hashable + and cannot be put into a set. + + Examples + ======== + + >>> from sympy.combinatorics.permutations import Permutation + >>> from sympy.combinatorics.testutil import _cmp_perm_lists + >>> a = Permutation([0, 2, 3, 4, 1]) + >>> b = Permutation([1, 2, 0, 4, 3]) + >>> c = Permutation([3, 4, 0, 1, 2]) + >>> ls1 = [a, b, c] + >>> ls2 = [b, c, a] + >>> _cmp_perm_lists(ls1, ls2) + True + + """ + return {tuple(a) for a in first} == \ + {tuple(a) for a in second} + + +def _naive_list_centralizer(self, other, af=False): + from sympy.combinatorics.perm_groups import PermutationGroup + """ + Return a list of elements for the centralizer of a subgroup/set/element. + + Explanation + =========== + + This is a brute force implementation that goes over all elements of the + group and checks for membership in the centralizer. It is used to + test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``. + + Examples + ======== + + >>> from sympy.combinatorics.testutil import _naive_list_centralizer + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> D = DihedralGroup(4) + >>> _naive_list_centralizer(D, D) + [Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])] + + See Also + ======== + + sympy.combinatorics.perm_groups.centralizer + + """ + from sympy.combinatorics.permutations import _af_commutes_with + if hasattr(other, 'generators'): + elements = list(self.generate_dimino(af=True)) + gens = [x._array_form for x in other.generators] + commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens) + centralizer_list = [] + if not af: + for element in elements: + if commutes_with_gens(element): + centralizer_list.append(Permutation._af_new(element)) + else: + for element in elements: + if commutes_with_gens(element): + centralizer_list.append(element) + return centralizer_list + elif hasattr(other, 'getitem'): + return _naive_list_centralizer(self, PermutationGroup(other), af) + elif hasattr(other, 'array_form'): + return _naive_list_centralizer(self, PermutationGroup([other]), af) + + +def _verify_bsgs(group, base, gens): + """ + Verify the correctness of a base and strong generating set. + + Explanation + =========== + + This is a naive implementation using the definition of a base and a strong + generating set relative to it. There are other procedures for + verifying a base and strong generating set, but this one will + serve for more robust testing. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import AlternatingGroup + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> A = AlternatingGroup(4) + >>> A.schreier_sims() + >>> _verify_bsgs(A, A.base, A.strong_gens) + True + + See Also + ======== + + sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims + + """ + from sympy.combinatorics.perm_groups import PermutationGroup + strong_gens_distr = _distribute_gens_by_base(base, gens) + current_stabilizer = group + for i in range(len(base)): + candidate = PermutationGroup(strong_gens_distr[i]) + if current_stabilizer.order() != candidate.order(): + return False + current_stabilizer = current_stabilizer.stabilizer(base[i]) + if current_stabilizer.order() != 1: + return False + return True + + +def _verify_centralizer(group, arg, centr=None): + """ + Verify the centralizer of a group/set/element inside another group. + + This is used for testing ``.centralizer()`` from + ``sympy.combinatorics.perm_groups`` + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... AlternatingGroup) + >>> from sympy.combinatorics.perm_groups import PermutationGroup + >>> from sympy.combinatorics.permutations import Permutation + >>> from sympy.combinatorics.testutil import _verify_centralizer + >>> S = SymmetricGroup(5) + >>> A = AlternatingGroup(5) + >>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])]) + >>> _verify_centralizer(S, A, centr) + True + + See Also + ======== + + _naive_list_centralizer, + sympy.combinatorics.perm_groups.PermutationGroup.centralizer, + _cmp_perm_lists + + """ + if centr is None: + centr = group.centralizer(arg) + centr_list = list(centr.generate_dimino(af=True)) + centr_list_naive = _naive_list_centralizer(group, arg, af=True) + return _cmp_perm_lists(centr_list, centr_list_naive) + + +def _verify_normal_closure(group, arg, closure=None): + from sympy.combinatorics.perm_groups import PermutationGroup + """ + Verify the normal closure of a subgroup/subset/element in a group. + + This is used to test + sympy.combinatorics.perm_groups.PermutationGroup.normal_closure + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import (SymmetricGroup, + ... AlternatingGroup) + >>> from sympy.combinatorics.testutil import _verify_normal_closure + >>> S = SymmetricGroup(3) + >>> A = AlternatingGroup(3) + >>> _verify_normal_closure(S, A, closure=A) + True + + See Also + ======== + + sympy.combinatorics.perm_groups.PermutationGroup.normal_closure + + """ + if closure is None: + closure = group.normal_closure(arg) + conjugates = set() + if hasattr(arg, 'generators'): + subgr_gens = arg.generators + elif hasattr(arg, '__getitem__'): + subgr_gens = arg + elif hasattr(arg, 'array_form'): + subgr_gens = [arg] + for el in group.generate_dimino(): + for gen in subgr_gens: + conjugates.add(gen ^ el) + naive_closure = PermutationGroup(list(conjugates)) + return closure.is_subgroup(naive_closure) + + +def canonicalize_naive(g, dummies, sym, *v): + """ + Canonicalize tensor formed by tensors of the different types. + + Explanation + =========== + + sym_i symmetry under exchange of two component tensors of type `i` + None no symmetry + 0 commuting + 1 anticommuting + + Parameters + ========== + + g : Permutation representing the tensor. + dummies : List of dummy indices. + msym : Symmetry of the metric. + v : A list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`. + base_i, gens_i BSGS for tensors of this type + n_i number of tensors of type `i` + + Returns + ======= + + Returns 0 if the tensor is zero, else returns the array form of + the permutation representing the canonical form of the tensor. + + Examples + ======== + + >>> from sympy.combinatorics.testutil import canonicalize_naive + >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs + >>> from sympy.combinatorics import Permutation + >>> g = Permutation([1, 3, 2, 0, 4, 5]) + >>> base2, gens2 = get_symmetric_group_sgs(2) + >>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0)) + [0, 2, 1, 3, 4, 5] + """ + from sympy.combinatorics.perm_groups import PermutationGroup + from sympy.combinatorics.tensor_can import gens_products, dummy_sgs + from sympy.combinatorics.permutations import _af_rmul + v1 = [] + for i in range(len(v)): + base_i, gens_i, n_i, sym_i = v[i] + v1.append((base_i, gens_i, [[]]*n_i, sym_i)) + size, sbase, sgens = gens_products(*v1) + dgens = dummy_sgs(dummies, sym, size-2) + if isinstance(sym, int): + num_types = 1 + dummies = [dummies] + sym = [sym] + else: + num_types = len(sym) + dgens = [] + for i in range(num_types): + dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2)) + S = PermutationGroup(sgens) + D = PermutationGroup([Permutation(x) for x in dgens]) + dlist = list(D.generate(af=True)) + g = g.array_form + st = set() + for s in S.generate(af=True): + h = _af_rmul(g, s) + for d in dlist: + q = tuple(_af_rmul(d, h)) + st.add(q) + a = list(st) + a.sort() + prev = (0,)*size + for h in a: + if h[:-2] == prev[:-2]: + if h[-1] != prev[-1]: + return 0 + prev = h + return list(a[0]) + + +def graph_certificate(gr): + """ + Return a certificate for the graph + + Parameters + ========== + + gr : adjacency list + + Explanation + =========== + + The graph is assumed to be unoriented and without + external lines. + + Associate to each vertex of the graph a symmetric tensor with + number of indices equal to the degree of the vertex; indices + are contracted when they correspond to the same line of the graph. + The canonical form of the tensor gives a certificate for the graph. + + This is not an efficient algorithm to get the certificate of a graph. + + Examples + ======== + + >>> from sympy.combinatorics.testutil import graph_certificate + >>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]} + >>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]} + >>> c1 = graph_certificate(gr1) + >>> c2 = graph_certificate(gr2) + >>> c1 + [0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21] + >>> c1 == c2 + True + """ + from sympy.combinatorics.permutations import _af_invert + from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize + items = list(gr.items()) + items.sort(key=lambda x: len(x[1]), reverse=True) + pvert = [x[0] for x in items] + pvert = _af_invert(pvert) + + # the indices of the tensor are twice the number of lines of the graph + num_indices = 0 + for v, neigh in items: + num_indices += len(neigh) + # associate to each vertex its indices; for each line + # between two vertices assign the + # even index to the vertex which comes first in items, + # the odd index to the other vertex + vertices = [[] for i in items] + i = 0 + for v, neigh in items: + for v2 in neigh: + if pvert[v] < pvert[v2]: + vertices[pvert[v]].append(i) + vertices[pvert[v2]].append(i+1) + i += 2 + g = [] + for v in vertices: + g.extend(v) + assert len(g) == num_indices + g += [num_indices, num_indices + 1] + size = num_indices + 2 + assert sorted(g) == list(range(size)) + g = Permutation(g) + vlen = [0]*(len(vertices[0])+1) + for neigh in vertices: + vlen[len(neigh)] += 1 + v = [] + for i in range(len(vlen)): + n = vlen[i] + if n: + base, gens = get_symmetric_group_sgs(i) + v.append((base, gens, n, 0)) + v.reverse() + dummies = list(range(num_indices)) + can = canonicalize(g, dummies, 0, *v) + return can diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/util.py b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/util.py new file mode 100644 index 0000000000000000000000000000000000000000..94e736f56e4f10184da8df0ebe65f58c78079048 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/combinatorics/util.py @@ -0,0 +1,536 @@ +from sympy.combinatorics.permutations import Permutation, _af_invert, _af_rmul +from sympy.ntheory import isprime + +rmul = Permutation.rmul +_af_new = Permutation._af_new + +############################################ +# +# Utilities for computational group theory +# +############################################ + + +def _base_ordering(base, degree): + r""" + Order `\{0, 1, \dots, n-1\}` so that base points come first and in order. + + Parameters + ========== + + base : the base + degree : the degree of the associated permutation group + + Returns + ======= + + A list ``base_ordering`` such that ``base_ordering[point]`` is the + number of ``point`` in the ordering. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> from sympy.combinatorics.util import _base_ordering + >>> S = SymmetricGroup(4) + >>> S.schreier_sims() + >>> _base_ordering(S.base, S.degree) + [0, 1, 2, 3] + + Notes + ===== + + This is used in backtrack searches, when we define a relation `\ll` on + the underlying set for a permutation group of degree `n`, + `\{0, 1, \dots, n-1\}`, so that if `(b_1, b_2, \dots, b_k)` is a base we + have `b_i \ll b_j` whenever `i>> from sympy.combinatorics.util import _check_cycles_alt_sym + >>> from sympy.combinatorics import Permutation + >>> a = Permutation([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12]]) + >>> _check_cycles_alt_sym(a) + False + >>> b = Permutation([[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10]]) + >>> _check_cycles_alt_sym(b) + True + + See Also + ======== + + sympy.combinatorics.perm_groups.PermutationGroup.is_alt_sym + + """ + n = perm.size + af = perm.array_form + current_len = 0 + total_len = 0 + used = set() + for i in range(n//2): + if i not in used and i < n//2 - total_len: + current_len = 1 + used.add(i) + j = i + while af[j] != i: + current_len += 1 + j = af[j] + used.add(j) + total_len += current_len + if current_len > n//2 and current_len < n - 2 and isprime(current_len): + return True + return False + + +def _distribute_gens_by_base(base, gens): + r""" + Distribute the group elements ``gens`` by membership in basic stabilizers. + + Explanation + =========== + + Notice that for a base `(b_1, b_2, \dots, b_k)`, the basic stabilizers + are defined as `G^{(i)} = G_{b_1, \dots, b_{i-1}}` for + `i \in\{1, 2, \dots, k\}`. + + Parameters + ========== + + base : a sequence of points in `\{0, 1, \dots, n-1\}` + gens : a list of elements of a permutation group of degree `n`. + + Returns + ======= + + List of length `k`, where `k` is + the length of ``base``. The `i`-th entry contains those elements in + ``gens`` which fix the first `i` elements of ``base`` (so that the + `0`-th entry is equal to ``gens`` itself). If no element fixes the first + `i` elements of ``base``, the `i`-th element is set to a list containing + the identity element. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> from sympy.combinatorics.util import _distribute_gens_by_base + >>> D = DihedralGroup(3) + >>> D.schreier_sims() + >>> D.strong_gens + [(0 1 2), (0 2), (1 2)] + >>> D.base + [0, 1] + >>> _distribute_gens_by_base(D.base, D.strong_gens) + [[(0 1 2), (0 2), (1 2)], + [(1 2)]] + + See Also + ======== + + _strong_gens_from_distr, _orbits_transversals_from_bsgs, + _handle_precomputed_bsgs + + """ + base_len = len(base) + degree = gens[0].size + stabs = [[] for _ in range(base_len)] + max_stab_index = 0 + for gen in gens: + j = 0 + while j < base_len - 1 and gen._array_form[base[j]] == base[j]: + j += 1 + if j > max_stab_index: + max_stab_index = j + for k in range(j + 1): + stabs[k].append(gen) + for i in range(max_stab_index + 1, base_len): + stabs[i].append(_af_new(list(range(degree)))) + return stabs + + +def _handle_precomputed_bsgs(base, strong_gens, transversals=None, + basic_orbits=None, strong_gens_distr=None): + """ + Calculate BSGS-related structures from those present. + + Explanation + =========== + + The base and strong generating set must be provided; if any of the + transversals, basic orbits or distributed strong generators are not + provided, they will be calculated from the base and strong generating set. + + Parameters + ========== + + ``base`` - the base + ``strong_gens`` - the strong generators + ``transversals`` - basic transversals + ``basic_orbits`` - basic orbits + ``strong_gens_distr`` - strong generators distributed by membership in basic + stabilizers + + Returns + ======= + + ``(transversals, basic_orbits, strong_gens_distr)`` where ``transversals`` + are the basic transversals, ``basic_orbits`` are the basic orbits, and + ``strong_gens_distr`` are the strong generators distributed by membership + in basic stabilizers. + + Examples + ======== + + >>> from sympy.combinatorics.named_groups import DihedralGroup + >>> from sympy.combinatorics.util import _handle_precomputed_bsgs + >>> D = DihedralGroup(3) + >>> D.schreier_sims() + >>> _handle_precomputed_bsgs(D.base, D.strong_gens, + ... basic_orbits=D.basic_orbits) + ([{0: (2), 1: (0 1 2), 2: (0 2)}, {1: (2), 2: (1 2)}], [[0, 1, 2], [1, 2]], [[(0 1 2), (0 2), (1 2)], [(1 2)]]) + + See Also + ======== + + _orbits_transversals_from_bsgs, _distribute_gens_by_base + + """ + if strong_gens_distr is None: + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + if transversals is None: + if basic_orbits is None: + basic_orbits, transversals = \ + _orbits_transversals_from_bsgs(base, strong_gens_distr) + else: + transversals = \ + _orbits_transversals_from_bsgs(base, strong_gens_distr, + transversals_only=True) + else: + if basic_orbits is None: + base_len = len(base) + basic_orbits = [None]*base_len + for i in range(base_len): + basic_orbits[i] = list(transversals[i].keys()) + return transversals, basic_orbits, strong_gens_distr + + +def _orbits_transversals_from_bsgs(base, strong_gens_distr, + transversals_only=False, slp=False): + """ + Compute basic orbits and transversals from a base and strong generating set. + + Explanation + =========== + + The generators are provided as distributed across the basic stabilizers. + If the optional argument ``transversals_only`` is set to True, only the + transversals are returned. + + Parameters + ========== + + ``base`` - The base. + ``strong_gens_distr`` - Strong generators distributed by membership in basic + stabilizers. + ``transversals_only`` - bool + A flag switching between returning only the + transversals and both orbits and transversals. + ``slp`` - + If ``True``, return a list of dictionaries containing the + generator presentations of the elements of the transversals, + i.e. the list of indices of generators from ``strong_gens_distr[i]`` + such that their product is the relevant transversal element. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> from sympy.combinatorics.util import _distribute_gens_by_base + >>> S = SymmetricGroup(3) + >>> S.schreier_sims() + >>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens) + >>> (S.base, strong_gens_distr) + ([0, 1], [[(0 1 2), (2)(0 1), (1 2)], [(1 2)]]) + + See Also + ======== + + _distribute_gens_by_base, _handle_precomputed_bsgs + + """ + from sympy.combinatorics.perm_groups import _orbit_transversal + base_len = len(base) + degree = strong_gens_distr[0][0].size + transversals = [None]*base_len + slps = [None]*base_len + if transversals_only is False: + basic_orbits = [None]*base_len + for i in range(base_len): + transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i], + base[i], pairs=True, slp=True) + transversals[i] = dict(transversals[i]) + if transversals_only is False: + basic_orbits[i] = list(transversals[i].keys()) + if transversals_only: + return transversals + else: + if not slp: + return basic_orbits, transversals + return basic_orbits, transversals, slps + + +def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None): + """ + Remove redundant generators from a strong generating set. + + Parameters + ========== + + ``base`` - a base + ``strong_gens`` - a strong generating set relative to ``base`` + ``basic_orbits`` - basic orbits + ``strong_gens_distr`` - strong generators distributed by membership in basic + stabilizers + + Returns + ======= + + A strong generating set with respect to ``base`` which is a subset of + ``strong_gens``. + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> from sympy.combinatorics.util import _remove_gens + >>> from sympy.combinatorics.testutil import _verify_bsgs + >>> S = SymmetricGroup(15) + >>> base, strong_gens = S.schreier_sims_incremental() + >>> new_gens = _remove_gens(base, strong_gens) + >>> len(new_gens) + 14 + >>> _verify_bsgs(S, base, new_gens) + True + + Notes + ===== + + This procedure is outlined in [1],p.95. + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E. + "Handbook of computational group theory" + + """ + from sympy.combinatorics.perm_groups import _orbit + base_len = len(base) + degree = strong_gens[0].size + if strong_gens_distr is None: + strong_gens_distr = _distribute_gens_by_base(base, strong_gens) + if basic_orbits is None: + basic_orbits = [] + for i in range(base_len): + basic_orbit = _orbit(degree, strong_gens_distr[i], base[i]) + basic_orbits.append(basic_orbit) + strong_gens_distr.append([]) + res = strong_gens[:] + for i in range(base_len - 1, -1, -1): + gens_copy = strong_gens_distr[i][:] + for gen in strong_gens_distr[i]: + if gen not in strong_gens_distr[i + 1]: + temp_gens = gens_copy[:] + temp_gens.remove(gen) + if temp_gens == []: + continue + temp_orbit = _orbit(degree, temp_gens, base[i]) + if temp_orbit == basic_orbits[i]: + gens_copy.remove(gen) + res.remove(gen) + return res + + +def _strip(g, base, orbits, transversals): + """ + Attempt to decompose a permutation using a (possibly partial) BSGS + structure. + + Explanation + =========== + + This is done by treating the sequence ``base`` as an actual base, and + the orbits ``orbits`` and transversals ``transversals`` as basic orbits and + transversals relative to it. + + This process is called "sifting". A sift is unsuccessful when a certain + orbit element is not found or when after the sift the decomposition + does not end with the identity element. + + The argument ``transversals`` is a list of dictionaries that provides + transversal elements for the orbits ``orbits``. + + Parameters + ========== + + ``g`` - permutation to be decomposed + ``base`` - sequence of points + ``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]`` + under some subgroup of the pointwise stabilizer of ` + `base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit + in this function since the only information we need is encoded in the orbits + and transversals + ``transversals`` - a list of orbit transversals associated with the orbits + ``orbits``. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation, SymmetricGroup + >>> from sympy.combinatorics.util import _strip + >>> S = SymmetricGroup(5) + >>> S.schreier_sims() + >>> g = Permutation([0, 2, 3, 1, 4]) + >>> _strip(g, S.base, S.basic_orbits, S.basic_transversals) + ((4), 5) + + Notes + ===== + + The algorithm is described in [1],pp.89-90. The reason for returning + both the current state of the element being decomposed and the level + at which the sifting ends is that they provide important information for + the randomized version of the Schreier-Sims algorithm. + + References + ========== + + .. [1] Holt, D., Eick, B., O'Brien, E."Handbook of computational group theory" + + See Also + ======== + + sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims + sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random + + """ + h = g._array_form + base_len = len(base) + for i in range(base_len): + beta = h[base[i]] + if beta == base[i]: + continue + if beta not in orbits[i]: + return _af_new(h), i + 1 + u = transversals[i][beta]._array_form + h = _af_rmul(_af_invert(u), h) + return _af_new(h), base_len + 1 + + +def _strip_af(h, base, orbits, transversals, j, slp=[], slps={}): + """ + optimized _strip, with h, transversals and result in array form + if the stripped elements is the identity, it returns False, base_len + 1 + + j h[base[i]] == base[i] for i <= j + + """ + base_len = len(base) + for i in range(j+1, base_len): + beta = h[base[i]] + if beta == base[i]: + continue + if beta not in orbits[i]: + if not slp: + return h, i + 1 + return h, i + 1, slp + u = transversals[i][beta] + if h == u: + if not slp: + return False, base_len + 1 + return False, base_len + 1, slp + h = _af_rmul(_af_invert(u), h) + if slp: + u_slp = slps[i][beta][:] + u_slp.reverse() + u_slp = [(i, (g,)) for g in u_slp] + slp = u_slp + slp + if not slp: + return h, base_len + 1 + return h, base_len + 1, slp + + +def _strong_gens_from_distr(strong_gens_distr): + """ + Retrieve strong generating set from generators of basic stabilizers. + + This is just the union of the generators of the first and second basic + stabilizers. + + Parameters + ========== + + ``strong_gens_distr`` - strong generators distributed by membership in basic + stabilizers + + Examples + ======== + + >>> from sympy.combinatorics import SymmetricGroup + >>> from sympy.combinatorics.util import (_strong_gens_from_distr, + ... _distribute_gens_by_base) + >>> S = SymmetricGroup(3) + >>> S.schreier_sims() + >>> S.strong_gens + [(0 1 2), (2)(0 1), (1 2)] + >>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens) + >>> _strong_gens_from_distr(strong_gens_distr) + [(0 1 2), (2)(0 1), (1 2)] + + See Also + ======== + + _distribute_gens_by_base + + """ + if len(strong_gens_distr) == 1: + return strong_gens_distr[0][:] + else: + result = strong_gens_distr[0] + for gen in strong_gens_distr[1]: + if gen not in result: + result.append(gen) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c253e3276a976d9d6ae351c41dc629dc3efa0a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_arit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_arit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ed94bbb3e48699767cff59a63594b40b4e7bb8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_arit.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_assumptions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_assumptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47ad71ec4eba3382696e09232fc4548ab3a2bd13 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_assumptions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_basic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..574b021b967a07989bc96f4d22b261a7f9f39a4e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_basic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_expand.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_expand.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6142763d302916747b51637bc0e56ce0d00815cb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_expand.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_numbers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf634a37f2ffa11a6fe9809839b65972b873818c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_numbers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_sympify.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_sympify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cb89e0883d3d280ed1c616f04d32d1b5cb36b1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/__pycache__/bench_sympify.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_arit.py b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_arit.py new file mode 100644 index 0000000000000000000000000000000000000000..39860943b763a30cf4f91578dbac37dc7e6e444e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_arit.py @@ -0,0 +1,43 @@ +from sympy.core import Add, Mul, symbols + +x, y, z = symbols('x,y,z') + + +def timeit_neg(): + -x + + +def timeit_Add_x1(): + x + 1 + + +def timeit_Add_1x(): + 1 + x + + +def timeit_Add_x05(): + x + 0.5 + + +def timeit_Add_xy(): + x + y + + +def timeit_Add_xyz(): + Add(*[x, y, z]) + + +def timeit_Mul_xy(): + x*y + + +def timeit_Mul_xyz(): + Mul(*[x, y, z]) + + +def timeit_Div_xy(): + x/y + + +def timeit_Div_2y(): + 2/y diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_expand.py b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_expand.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5ac513e368cb7e9b542926bc25a5695de6d914 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_expand.py @@ -0,0 +1,23 @@ +from sympy.core import symbols, I + +x, y, z = symbols('x,y,z') + +p = 3*x**2*y*z**7 + 7*x*y*z**2 + 4*x + x*y**4 +e = (x + y + z + 1)**32 + + +def timeit_expand_nothing_todo(): + p.expand() + + +def bench_expand_32(): + """(x+y+z+1)**32 -> expand""" + e.expand() + + +def timeit_expand_complex_number_1(): + ((2 + 3*I)**1000).expand(complex=True) + + +def timeit_expand_complex_number_2(): + ((2 + 3*I/4)**1000).expand(complex=True) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_sympify.py b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_sympify.py new file mode 100644 index 0000000000000000000000000000000000000000..d8cc0abc1e35439a1a495454abf87769d5b40d04 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/core/benchmarks/bench_sympify.py @@ -0,0 +1,11 @@ +from sympy.core import sympify, Symbol + +x = Symbol('x') + + +def timeit_sympify_1(): + sympify(1) + + +def timeit_sympify_x(): + sympify(x) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8846a99510601c9675103e21ef5a0a1e839fdd11 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__init__.py @@ -0,0 +1,19 @@ +from .diffgeom import ( + BaseCovarDerivativeOp, BaseScalarField, BaseVectorField, Commutator, + contravariant_order, CoordSystem, CoordinateSymbol, + CovarDerivativeOp, covariant_order, Differential, intcurve_diffequ, + intcurve_series, LieDerivative, Manifold, metric_to_Christoffel_1st, + metric_to_Christoffel_2nd, metric_to_Ricci_components, + metric_to_Riemann_components, Patch, Point, TensorProduct, twoform_to_matrix, + vectors_in_basis, WedgeProduct, +) + +__all__ = [ + 'BaseCovarDerivativeOp', 'BaseScalarField', 'BaseVectorField', 'Commutator', + 'contravariant_order', 'CoordSystem', 'CoordinateSymbol', + 'CovarDerivativeOp', 'covariant_order', 'Differential', 'intcurve_diffequ', + 'intcurve_series', 'LieDerivative', 'Manifold', 'metric_to_Christoffel_1st', + 'metric_to_Christoffel_2nd', 'metric_to_Ricci_components', + 'metric_to_Riemann_components', 'Patch', 'Point', 'TensorProduct', + 'twoform_to_matrix', 'vectors_in_basis', 'WedgeProduct', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..891d3b9ea65c8b3d599d550e4f3bec717bf540d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eb7817990b7400363781e3e286126a9d1b1abd4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7505ba04e12bf54bf82b95b039eda4f86b3fdf8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7c3feec9f7add1109e56901bdc9973007384cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py @@ -0,0 +1,2273 @@ +from __future__ import annotations +from typing import Any + +from functools import reduce +from itertools import permutations + +from sympy.combinatorics import Permutation +from sympy.core import ( + Basic, Expr, Function, diff, + Pow, Mul, Add, Lambda, S, Tuple, Dict +) +from sympy.core.cache import cacheit + +from sympy.core.symbol import Symbol, Dummy +from sympy.core.symbol import Str +from sympy.core.sympify import _sympify +from sympy.functions import factorial +from sympy.matrices import ImmutableDenseMatrix as Matrix +from sympy.solvers import solve + +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, + ignore_warnings) + + +# TODO you are a bit excessive in the use of Dummies +# TODO dummy point, literal field +# TODO too often one needs to call doit or simplify on the output, check the +# tests and find out why +from sympy.tensor.array import ImmutableDenseNDimArray + + +class Manifold(Basic): + """ + A mathematical manifold. + + Explanation + =========== + + A manifold is a topological space that locally resembles + Euclidean space near each point [1]. + This class does not provide any means to study the topological + characteristics of the manifold that it represents, though. + + Parameters + ========== + + name : str + The name of the manifold. + + dim : int + The dimension of the manifold. + + Examples + ======== + + >>> from sympy.diffgeom import Manifold + >>> m = Manifold('M', 2) + >>> m + M + >>> m.dim + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Manifold + """ + + def __new__(cls, name, dim, **kwargs): + if not isinstance(name, Str): + name = Str(name) + dim = _sympify(dim) + obj = super().__new__(cls, name, dim) + + obj.patches = _deprecated_list( + """ + Manifold.patches is deprecated. The Manifold object is now + immutable. Instead use a separate list to keep track of the + patches. + """, []) + return obj + + @property + def name(self): + return self.args[0] + + @property + def dim(self): + return self.args[1] + + +class Patch(Basic): + """ + A patch on a manifold. + + Explanation + =========== + + Coordinate patch, or patch in short, is a simply-connected open set around + a point in the manifold [1]. On a manifold one can have many patches that + do not always include the whole manifold. On these patches coordinate + charts can be defined that permit the parameterization of any point on the + patch in terms of a tuple of real numbers (the coordinates). + + This class does not provide any means to study the topological + characteristics of the patch that it represents. + + Parameters + ========== + + name : str + The name of the patch. + + manifold : Manifold + The manifold on which the patch is defined. + + Examples + ======== + + >>> from sympy.diffgeom import Manifold, Patch + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> p + P + >>> p.dim + 2 + + References + ========== + + .. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry + (2013) + + """ + def __new__(cls, name, manifold, **kwargs): + if not isinstance(name, Str): + name = Str(name) + obj = super().__new__(cls, name, manifold) + + obj.manifold.patches.append(obj) # deprecated + obj.coord_systems = _deprecated_list( + """ + Patch.coord_systms is deprecated. The Patch class is now + immutable. Instead use a separate list to keep track of coordinate + systems. + """, []) + return obj + + @property + def name(self): + return self.args[0] + + @property + def manifold(self): + return self.args[1] + + @property + def dim(self): + return self.manifold.dim + + +class CoordSystem(Basic): + """ + A coordinate system defined on the patch. + + Explanation + =========== + + Coordinate system is a system that uses one or more coordinates to uniquely + determine the position of the points or other geometric elements on a + manifold [1]. + + By passing ``Symbols`` to *symbols* parameter, user can define the name and + assumptions of coordinate symbols of the coordinate system. If not passed, + these symbols are generated automatically and are assumed to be real valued. + + By passing *relations* parameter, user can define the transform relations of + coordinate systems. Inverse transformation and indirect transformation can + be found automatically. If this parameter is not passed, coordinate + transformation cannot be done. + + Parameters + ========== + + name : str + The name of the coordinate system. + + patch : Patch + The patch where the coordinate system is defined. + + symbols : list of Symbols, optional + Defines the names and assumptions of coordinate symbols. + + relations : dict, optional + Key is a tuple of two strings, who are the names of the systems where + the coordinates transform from and transform to. + Value is a tuple of the symbols before transformation and a tuple of + the expressions after transformation. + + Examples + ======== + + We define two-dimensional Cartesian coordinate system and polar coordinate + system. + + >>> from sympy import symbols, pi, sqrt, atan2, cos, sin + >>> from sympy.diffgeom import Manifold, Patch, CoordSystem + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> x, y = symbols('x y', real=True) + >>> r, theta = symbols('r theta', nonnegative=True) + >>> relation_dict = { + ... ('Car2D', 'Pol'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))], + ... ('Pol', 'Car2D'): [(r, theta), (r*cos(theta), r*sin(theta))] + ... } + >>> Car2D = CoordSystem('Car2D', p, (x, y), relation_dict) + >>> Pol = CoordSystem('Pol', p, (r, theta), relation_dict) + + ``symbols`` property returns ``CoordinateSymbol`` instances. These symbols + are not same with the symbols used to construct the coordinate system. + + >>> Car2D + Car2D + >>> Car2D.dim + 2 + >>> Car2D.symbols + (x, y) + >>> _[0].func + + + ``transformation()`` method returns the transformation function from + one coordinate system to another. ``transform()`` method returns the + transformed coordinates. + + >>> Car2D.transformation(Pol) + Lambda((x, y), Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]])) + >>> Car2D.transform(Pol) + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> Car2D.transform(Pol, [1, 2]) + Matrix([ + [sqrt(5)], + [atan(2)]]) + + ``jacobian()`` method returns the Jacobian matrix of coordinate + transformation between two systems. ``jacobian_determinant()`` method + returns the Jacobian determinant of coordinate transformation between two + systems. + + >>> Pol.jacobian(Car2D) + Matrix([ + [cos(theta), -r*sin(theta)], + [sin(theta), r*cos(theta)]]) + >>> Pol.jacobian(Car2D, [1, pi/2]) + Matrix([ + [0, -1], + [1, 0]]) + >>> Car2D.jacobian_determinant(Pol) + 1/sqrt(x**2 + y**2) + >>> Car2D.jacobian_determinant(Pol, [1,0]) + 1 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Coordinate_system + + """ + def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): + if not isinstance(name, Str): + name = Str(name) + + # canonicallize the symbols + if symbols is None: + names = kwargs.get('names', None) + if names is None: + symbols = Tuple( + *[Symbol('%s_%s' % (name.name, i), real=True) + for i in range(patch.dim)] + ) + else: + sympy_deprecation_warning( + f""" +The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That +is, replace + + CoordSystem(..., names={names}) + +with + + CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}]) + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + symbols = Tuple( + *[Symbol(n, real=True) for n in names] + ) + else: + syms = [] + for s in symbols: + if isinstance(s, Symbol): + syms.append(Symbol(s.name, **s._assumptions.generator)) + elif isinstance(s, str): + sympy_deprecation_warning( + f""" + +Passing a string as the coordinate symbol name to CoordSystem is deprecated. +Pass a Symbol with the appropriate name and assumptions instead. + +That is, replace {s} with Symbol({s!r}, real=True). + """, + + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + syms.append(Symbol(s, real=True)) + symbols = Tuple(*syms) + + # canonicallize the relations + rel_temp = {} + for k,v in relations.items(): + s1, s2 = k + if not isinstance(s1, Str): + s1 = Str(s1) + if not isinstance(s2, Str): + s2 = Str(s2) + key = Tuple(s1, s2) + + # Old version used Lambda as a value. + if isinstance(v, Lambda): + v = (tuple(v.signature), tuple(v.expr)) + else: + v = (tuple(v[0]), tuple(v[1])) + rel_temp[key] = v + relations = Dict(rel_temp) + + # construct the object + obj = super().__new__(cls, name, patch, symbols, relations) + + # Add deprecated attributes + obj.transforms = _deprecated_dict( + """ + CoordSystem.transforms is deprecated. The CoordSystem class is now + immutable. Use the 'relations' keyword argument to the + CoordSystems() constructor to specify relations. + """, {}) + obj._names = [str(n) for n in symbols] + obj.patch.coord_systems.append(obj) # deprecated + obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated + obj._dummy = Dummy() + + return obj + + @property + def name(self): + return self.args[0] + + @property + def patch(self): + return self.args[1] + + @property + def manifold(self): + return self.patch.manifold + + @property + def symbols(self): + return tuple(CoordinateSymbol(self, i, **s._assumptions.generator) + for i,s in enumerate(self.args[2])) + + @property + def relations(self): + return self.args[3] + + @property + def dim(self): + return self.patch.dim + + ########################################################################## + # Finding transformation relation + ########################################################################## + + def transformation(self, sys): + """ + Return coordinate transformation function from *self* to *sys*. + + Parameters + ========== + + sys : CoordSystem + + Returns + ======= + + sympy.Lambda + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.transformation(R2_p) + Lambda((x, y), Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]])) + + """ + signature = self.args[2] + + key = Tuple(self.name, sys.name) + if self == sys: + expr = Matrix(self.symbols) + elif key in self.relations: + expr = Matrix(self.relations[key][1]) + elif key[::-1] in self.relations: + expr = Matrix(self._inverse_transformation(sys, self)) + else: + expr = Matrix(self._indirect_transformation(self, sys)) + return Lambda(signature, expr) + + @staticmethod + def _solve_inverse(sym1, sym2, exprs, sys1_name, sys2_name): + ret = solve( + [t[0] - t[1] for t in zip(sym2, exprs)], + list(sym1), dict=True) + + if len(ret) == 0: + temp = "Cannot solve inverse relation from {} to {}." + raise NotImplementedError(temp.format(sys1_name, sys2_name)) + elif len(ret) > 1: + temp = "Obtained multiple inverse relation from {} to {}." + raise ValueError(temp.format(sys1_name, sys2_name)) + + return ret[0] + + @classmethod + def _inverse_transformation(cls, sys1, sys2): + # Find the transformation relation from sys2 to sys1 + forward = sys1.transform(sys2) + inv_results = cls._solve_inverse(sys1.symbols, sys2.symbols, forward, + sys1.name, sys2.name) + signature = tuple(sys1.symbols) + return [inv_results[s] for s in signature] + + @classmethod + @cacheit + def _indirect_transformation(cls, sys1, sys2): + # Find the transformation relation between two indirectly connected + # coordinate systems + rel = sys1.relations + path = cls._dijkstra(sys1, sys2) + + transforms = [] + for s1, s2 in zip(path, path[1:]): + if (s1, s2) in rel: + transforms.append(rel[(s1, s2)]) + else: + sym2, inv_exprs = rel[(s2, s1)] + sym1 = tuple(Dummy() for i in sym2) + ret = cls._solve_inverse(sym2, sym1, inv_exprs, s2, s1) + ret = tuple(ret[s] for s in sym2) + transforms.append((sym1, ret)) + syms = sys1.args[2] + exprs = syms + for newsyms, newexprs in transforms: + exprs = tuple(e.subs(zip(newsyms, exprs)) for e in newexprs) + return exprs + + @staticmethod + def _dijkstra(sys1, sys2): + # Use Dijkstra algorithm to find the shortest path between two indirectly-connected + # coordinate systems + # return value is the list of the names of the systems. + relations = sys1.relations + graph = {} + for s1, s2 in relations.keys(): + if s1 not in graph: + graph[s1] = {s2} + else: + graph[s1].add(s2) + if s2 not in graph: + graph[s2] = {s1} + else: + graph[s2].add(s1) + + path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited + + def visit(sys): + path_dict[sys][2] = 1 + for newsys in graph[sys]: + distance = path_dict[sys][0] + 1 + if path_dict[newsys][0] >= distance or not path_dict[newsys][1]: + path_dict[newsys][0] = distance + path_dict[newsys][1] = list(path_dict[sys][1]) + path_dict[newsys][1].append(sys) + + visit(sys1.name) + + while True: + min_distance = max(path_dict.values(), key=lambda x:x[0])[0] + newsys = None + for sys, lst in path_dict.items(): + if 0 < lst[0] <= min_distance and not lst[2]: + min_distance = lst[0] + newsys = sys + if newsys is None: + break + visit(newsys) + + result = path_dict[sys2.name][1] + result.append(sys2.name) + + if result == [sys2.name]: + raise KeyError("Two coordinate systems are not connected.") + return result + + def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False): + sympy_deprecation_warning( + """ + The CoordSystem.connect_to() method is deprecated. Instead, + generate a new instance of CoordSystem with the 'relations' + keyword argument (CoordSystem classes are now immutable). + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + + from_coords, to_exprs = dummyfy(from_coords, to_exprs) + self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs) + + if inverse: + to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs) + + if fill_in_gaps: + self._fill_gaps_in_transformations() + + @staticmethod + def _inv_transf(from_coords, to_exprs): + # Will be removed when connect_to is removed + inv_from = [i.as_dummy() for i in from_coords] + inv_to = solve( + [t[0] - t[1] for t in zip(inv_from, to_exprs)], + list(from_coords), dict=True)[0] + inv_to = [inv_to[fc] for fc in from_coords] + return Matrix(inv_from), Matrix(inv_to) + + @staticmethod + def _fill_gaps_in_transformations(): + # Will be removed when connect_to is removed + raise NotImplementedError + + ########################################################################## + # Coordinate transformations + ########################################################################## + + def transform(self, sys, coordinates=None): + """ + Return the result of coordinate transformation from *self* to *sys*. + If coordinates are not given, coordinate symbols of *self* are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.ImmutableDenseMatrix containing CoordinateSymbol + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.transform(R2_p) + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> R2_r.transform(R2_p, [0, 1]) + Matrix([ + [ 1], + [pi/2]]) + + """ + if coordinates is None: + coordinates = self.symbols + if self != sys: + transf = self.transformation(sys) + coordinates = transf(*coordinates) + else: + coordinates = Matrix(coordinates) + return coordinates + + def coord_tuple_transform_to(self, to_sys, coords): + """Transform ``coords`` to coord system ``to_sys``.""" + sympy_deprecation_warning( + """ + The CoordSystem.coord_tuple_transform_to() method is deprecated. + Use the CoordSystem.transform() method instead. + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + + coords = Matrix(coords) + if self != to_sys: + with ignore_warnings(SymPyDeprecationWarning): + transf = self.transforms[to_sys] + coords = transf[1].subs(list(zip(transf[0], coords))) + return coords + + def jacobian(self, sys, coordinates=None): + """ + Return the jacobian matrix of a transformation on given coordinates. + If coordinates are not given, coordinate symbols of *self* are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.ImmutableDenseMatrix + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_p.jacobian(R2_r) + Matrix([ + [cos(theta), -rho*sin(theta)], + [sin(theta), rho*cos(theta)]]) + >>> R2_p.jacobian(R2_r, [1, 0]) + Matrix([ + [1, 0], + [0, 1]]) + + """ + result = self.transform(sys).jacobian(self.symbols) + if coordinates is not None: + result = result.subs(list(zip(self.symbols, coordinates))) + return result + jacobian_matrix = jacobian + + def jacobian_determinant(self, sys, coordinates=None): + """ + Return the jacobian determinant of a transformation on given + coordinates. If coordinates are not given, coordinate symbols of *self* + are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.Expr + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.jacobian_determinant(R2_p) + 1/sqrt(x**2 + y**2) + >>> R2_r.jacobian_determinant(R2_p, [1, 0]) + 1 + + """ + return self.jacobian(sys, coordinates).det() + + + ########################################################################## + # Points + ########################################################################## + + def point(self, coords): + """Create a ``Point`` with coordinates given in this coord system.""" + return Point(self, coords) + + def point_to_coords(self, point): + """Calculate the coordinates of a point in this coord system.""" + return point.coords(self) + + ########################################################################## + # Base fields. + ########################################################################## + + def base_scalar(self, coord_index): + """Return ``BaseScalarField`` that takes a point and returns one of the coordinates.""" + return BaseScalarField(self, coord_index) + coord_function = base_scalar + + def base_scalars(self): + """Returns a list of all coordinate functions. + For more details see the ``base_scalar`` method of this class.""" + return [self.base_scalar(i) for i in range(self.dim)] + coord_functions = base_scalars + + def base_vector(self, coord_index): + """Return a basis vector field. + The basis vector field for this coordinate system. It is also an + operator on scalar fields.""" + return BaseVectorField(self, coord_index) + + def base_vectors(self): + """Returns a list of all base vectors. + For more details see the ``base_vector`` method of this class.""" + return [self.base_vector(i) for i in range(self.dim)] + + def base_oneform(self, coord_index): + """Return a basis 1-form field. + The basis one-form field for this coordinate system. It is also an + operator on vector fields.""" + return Differential(self.coord_function(coord_index)) + + def base_oneforms(self): + """Returns a list of all base oneforms. + For more details see the ``base_oneform`` method of this class.""" + return [self.base_oneform(i) for i in range(self.dim)] + + +class CoordinateSymbol(Symbol): + """A symbol which denotes an abstract value of i-th coordinate of + the coordinate system with given context. + + Explanation + =========== + + Each coordinates in coordinate system are represented by unique symbol, + such as x, y, z in Cartesian coordinate system. + + You may not construct this class directly. Instead, use `symbols` method + of CoordSystem. + + Parameters + ========== + + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import symbols, Lambda, Matrix, sqrt, atan2, cos, sin + >>> from sympy.diffgeom import Manifold, Patch, CoordSystem + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> x, y = symbols('x y', real=True) + >>> r, theta = symbols('r theta', nonnegative=True) + >>> relation_dict = { + ... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])), + ... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)])) + ... } + >>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict) + >>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict) + >>> x, y = Car2D.symbols + + ``CoordinateSymbol`` contains its coordinate symbol and index. + + >>> x.name + 'x' + >>> x.coord_sys == Car2D + True + >>> x.index + 0 + >>> x.is_real + True + + You can transform ``CoordinateSymbol`` into other coordinate system using + ``rewrite()`` method. + + >>> x.rewrite(Pol) + r*cos(theta) + >>> sqrt(x**2 + y**2).rewrite(Pol).simplify() + r + + """ + def __new__(cls, coord_sys, index, **assumptions): + name = coord_sys.args[2][index].name + obj = super().__new__(cls, name, **assumptions) + obj.coord_sys = coord_sys + obj.index = index + return obj + + def __getnewargs__(self): + return (self.coord_sys, self.index) + + def _hashable_content(self): + return ( + self.coord_sys, self.index + ) + tuple(sorted(self.assumptions0.items())) + + def _eval_rewrite(self, rule, args, **hints): + if isinstance(rule, CoordSystem): + return rule.transform(self.coord_sys)[self.index] + return super()._eval_rewrite(rule, args, **hints) + + +class Point(Basic): + """Point defined in a coordinate system. + + Explanation + =========== + + Mathematically, point is defined in the manifold and does not have any coordinates + by itself. Coordinate system is what imbues the coordinates to the point by coordinate + chart. However, due to the difficulty of realizing such logic, you must supply + a coordinate system and coordinates to define a Point here. + + The usage of this object after its definition is independent of the + coordinate system that was used in order to define it, however due to + limitations in the simplification routines you can arrive at complicated + expressions if you use inappropriate coordinate systems. + + Parameters + ========== + + coord_sys : CoordSystem + + coords : list + The coordinates of the point. + + Examples + ======== + + >>> from sympy import pi + >>> from sympy.diffgeom import Point + >>> from sympy.diffgeom.rn import R2, R2_r, R2_p + >>> rho, theta = R2_p.symbols + + >>> p = Point(R2_p, [rho, 3*pi/4]) + + >>> p.manifold == R2 + True + + >>> p.coords() + Matrix([ + [ rho], + [3*pi/4]]) + >>> p.coords(R2_r) + Matrix([ + [-sqrt(2)*rho/2], + [ sqrt(2)*rho/2]]) + + """ + + def __new__(cls, coord_sys, coords, **kwargs): + coords = Matrix(coords) + obj = super().__new__(cls, coord_sys, coords) + obj._coord_sys = coord_sys + obj._coords = coords + return obj + + @property + def patch(self): + return self._coord_sys.patch + + @property + def manifold(self): + return self._coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def coords(self, sys=None): + """ + Coordinates of the point in given coordinate system. If coordinate system + is not passed, it returns the coordinates in the coordinate system in which + the poin was defined. + """ + if sys is None: + return self._coords + else: + return self._coord_sys.transform(sys, self._coords) + + @property + def free_symbols(self): + return self._coords.free_symbols + + +class BaseScalarField(Expr): + """Base scalar field over a manifold for a given coordinate system. + + Explanation + =========== + + A scalar field takes a point as an argument and returns a scalar. + A base scalar field of a coordinate system takes a point and returns one of + the coordinates of that point in the coordinate system in question. + + To define a scalar field you need to choose the coordinate system and the + index of the coordinate. + + The use of the scalar field after its definition is independent of the + coordinate system in which it was defined, however due to limitations in + the simplification routines you may arrive at more complicated + expression if you use unappropriate coordinate systems. + You can build complicated scalar fields by just building up SymPy + expressions containing ``BaseScalarField`` instances. + + Parameters + ========== + + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import Function, pi + >>> from sympy.diffgeom import BaseScalarField + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> rho, _ = R2_p.symbols + >>> point = R2_p.point([rho, 0]) + >>> fx, fy = R2_r.base_scalars() + >>> ftheta = BaseScalarField(R2_r, 1) + + >>> fx(point) + rho + >>> fy(point) + 0 + + >>> (fx**2+fy**2).rcall(point) + rho**2 + + >>> g = Function('g') + >>> fg = g(ftheta-pi) + >>> fg.rcall(point) + g(-pi) + + """ + + is_commutative = True + + def __new__(cls, coord_sys, index, **kwargs): + index = _sympify(index) + obj = super().__new__(cls, coord_sys, index) + obj._coord_sys = coord_sys + obj._index = index + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def patch(self): + return self.coord_sys.patch + + @property + def manifold(self): + return self.coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def __call__(self, *args): + """Evaluating the field at a point or doing nothing. + If the argument is a ``Point`` instance, the field is evaluated at that + point. The field is returned itself if the argument is any other + object. It is so in order to have working recursive calling mechanics + for all fields (check the ``__call__`` method of ``Expr``). + """ + point = args[0] + if len(args) != 1 or not isinstance(point, Point): + return self + coords = point.coords(self._coord_sys) + # XXX Calling doit is necessary with all the Subs expressions + # XXX Calling simplify is necessary with all the trig expressions + return simplify(coords[self._index]).doit() + + # XXX Workaround for limitations on the content of args + free_symbols: set[Any] = set() + + +class BaseVectorField(Expr): + r"""Base vector field over a manifold for a given coordinate system. + + Explanation + =========== + + A vector field is an operator taking a scalar field and returning a + directional derivative (which is also a scalar field). + A base vector field is the same type of operator, however the derivation is + specifically done with respect to a chosen coordinate. + + To define a base vector field you need to choose the coordinate system and + the index of the coordinate. + + The use of the vector field after its definition is independent of the + coordinate system in which it was defined, however due to limitations in the + simplification routines you may arrive at more complicated expression if you + use unappropriate coordinate systems. + + Parameters + ========== + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import Function + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import BaseVectorField + >>> from sympy import pprint + + >>> x, y = R2_r.symbols + >>> rho, theta = R2_p.symbols + >>> fx, fy = R2_r.base_scalars() + >>> point_p = R2_p.point([rho, theta]) + >>> point_r = R2_r.point([x, y]) + + >>> g = Function('g') + >>> s_field = g(fx, fy) + + >>> v = BaseVectorField(R2_r, 1) + >>> pprint(v(s_field)) + / d \| + |---(g(x, xi))|| + \dxi /|xi=y + >>> pprint(v(s_field).rcall(point_r).doit()) + d + --(g(x, y)) + dy + >>> pprint(v(s_field).rcall(point_p)) + / d \| + |---(g(rho*cos(theta), xi))|| + \dxi /|xi=rho*sin(theta) + + """ + + is_commutative = False + + def __new__(cls, coord_sys, index, **kwargs): + index = _sympify(index) + obj = super().__new__(cls, coord_sys, index) + obj._coord_sys = coord_sys + obj._index = index + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def patch(self): + return self.coord_sys.patch + + @property + def manifold(self): + return self.coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def __call__(self, scalar_field): + """Apply on a scalar field. + The action of a vector field on a scalar field is a directional + differentiation. + If the argument is not a scalar field an error is raised. + """ + if covariant_order(scalar_field) or contravariant_order(scalar_field): + raise ValueError('Only scalar fields can be supplied as arguments to vector fields.') + + if scalar_field is None: + return self + + base_scalars = list(scalar_field.atoms(BaseScalarField)) + + # First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r) + d_var = self._coord_sys._dummy + # TODO: you need a real dummy function for the next line + d_funcs = [Function('_#_%s' % i)(d_var) for i, + b in enumerate(base_scalars)] + d_result = scalar_field.subs(list(zip(base_scalars, d_funcs))) + d_result = d_result.diff(d_var) + + # Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y)) + coords = self._coord_sys.symbols + d_funcs_deriv = [f.diff(d_var) for f in d_funcs] + d_funcs_deriv_sub = [] + for b in base_scalars: + jac = self._coord_sys.jacobian(b._coord_sys, coords) + d_funcs_deriv_sub.append(jac[b._index, self._index]) + d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub))) + + # Remove the dummies + result = d_result.subs(list(zip(d_funcs, base_scalars))) + result = result.subs(list(zip(coords, self._coord_sys.coord_functions()))) + return result.doit() + + +def _find_coords(expr): + # Finds CoordinateSystems existing in expr + fields = expr.atoms(BaseScalarField, BaseVectorField) + result = set() + for f in fields: + result.add(f._coord_sys) + return result + + +class Commutator(Expr): + r"""Commutator of two vector fields. + + Explanation + =========== + + The commutator of two vector fields `v_1` and `v_2` is defined as the + vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal + to `v_1(v_2(f)) - v_2(v_1(f))`. + + Examples + ======== + + + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import Commutator + >>> from sympy import simplify + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> e_r = R2_p.base_vector(0) + + >>> c_xy = Commutator(e_x, e_y) + >>> c_xr = Commutator(e_x, e_r) + >>> c_xy + 0 + + Unfortunately, the current code is not able to compute everything: + + >>> c_xr + Commutator(e_x, e_rho) + >>> simplify(c_xr(fy**2)) + -2*cos(theta)*y**2/(x**2 + y**2) + + """ + def __new__(cls, v1, v2): + if (covariant_order(v1) or contravariant_order(v1) != 1 + or covariant_order(v2) or contravariant_order(v2) != 1): + raise ValueError( + 'Only commutators of vector fields are supported.') + if v1 == v2: + return S.Zero + coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)]) + if len(coord_sys) == 1: + # Only one coordinate systems is used, hence it is easy enough to + # actually evaluate the commutator. + if all(isinstance(v, BaseVectorField) for v in (v1, v2)): + return S.Zero + bases_1, bases_2 = [list(v.atoms(BaseVectorField)) + for v in (v1, v2)] + coeffs_1 = [v1.expand().coeff(b) for b in bases_1] + coeffs_2 = [v2.expand().coeff(b) for b in bases_2] + res = 0 + for c1, b1 in zip(coeffs_1, bases_1): + for c2, b2 in zip(coeffs_2, bases_2): + res += c1*b1(c2)*b2 - c2*b2(c1)*b1 + return res + else: + obj = super().__new__(cls, v1, v2) + obj._v1 = v1 # deprecated assignment + obj._v2 = v2 # deprecated assignment + return obj + + @property + def v1(self): + return self.args[0] + + @property + def v2(self): + return self.args[1] + + def __call__(self, scalar_field): + """Apply on a scalar field. + If the argument is not a scalar field an error is raised. + """ + return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field)) + + +class Differential(Expr): + r"""Return the differential (exterior derivative) of a form field. + + Explanation + =========== + + The differential of a form (i.e. the exterior derivative) has a complicated + definition in the general case. + The differential `df` of the 0-form `f` is defined for any vector field `v` + as `df(v) = v(f)`. + + Examples + ======== + + >>> from sympy import Function + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import Differential + >>> from sympy import pprint + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> g = Function('g') + >>> s_field = g(fx, fy) + >>> dg = Differential(s_field) + + >>> dg + d(g(x, y)) + >>> pprint(dg(e_x)) + / d \| + |---(g(xi, y))|| + \dxi /|xi=x + >>> pprint(dg(e_y)) + / d \| + |---(g(x, xi))|| + \dxi /|xi=y + + Applying the exterior derivative operator twice always results in: + + >>> Differential(dg) + 0 + """ + + is_commutative = False + + def __new__(cls, form_field): + if contravariant_order(form_field): + raise ValueError( + 'A vector field was supplied as an argument to Differential.') + if isinstance(form_field, Differential): + return S.Zero + else: + obj = super().__new__(cls, form_field) + obj._form_field = form_field # deprecated assignment + return obj + + @property + def form_field(self): + return self.args[0] + + def __call__(self, *vector_fields): + """Apply on a list of vector_fields. + + Explanation + =========== + + If the number of vector fields supplied is not equal to 1 + the order of + the form field inside the differential the result is undefined. + + For 1-forms (i.e. differentials of scalar fields) the evaluation is + done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector + field, the differential is returned unchanged. This is done in order to + permit partial contractions for higher forms. + + In the general case the evaluation is done by applying the form field + inside the differential on a list with one less elements than the number + of elements in the original list. Lowering the number of vector fields + is achieved through replacing each pair of fields by their + commutator. + + If the arguments are not vectors or ``None``s an error is raised. + """ + if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None + for a in vector_fields): + raise ValueError('The arguments supplied to Differential should be vector fields or Nones.') + k = len(vector_fields) + if k == 1: + if vector_fields[0]: + return vector_fields[0].rcall(self._form_field) + return self + else: + # For higher form it is more complicated: + # Invariant formula: + # https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula + # df(v1, ... vn) = +/- vi(f(v1..no i..vn)) + # +/- f([vi,vj],v1..no i, no j..vn) + f = self._form_field + v = vector_fields + ret = 0 + for i in range(k): + t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:])) + ret += (-1)**i*t + for j in range(i + 1, k): + c = Commutator(v[i], v[j]) + if c: # TODO this is ugly - the Commutator can be Zero and + # this causes the next line to fail + t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:]) + ret += (-1)**(i + j)*t + return ret + + +class TensorProduct(Expr): + """Tensor product of forms. + + Explanation + =========== + + The tensor product permits the creation of multilinear functionals (i.e. + higher order tensors) out of lower order fields (e.g. 1-forms and vector + fields). However, the higher tensors thus created lack the interesting + features provided by the other type of product, the wedge product, namely + they are not antisymmetric and hence are not form fields. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import TensorProduct + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> TensorProduct(dx, dy)(e_x, e_y) + 1 + >>> TensorProduct(dx, dy)(e_y, e_x) + 0 + >>> TensorProduct(dx, fx*dy)(fx*e_x, e_y) + x**2 + >>> TensorProduct(e_x, e_y)(fx**2, fy**2) + 4*x*y + >>> TensorProduct(e_y, dx)(fy) + dx + + You can nest tensor products. + + >>> tp1 = TensorProduct(dx, dy) + >>> TensorProduct(tp1, dx)(e_x, e_y, e_x) + 1 + + You can make partial contraction for instance when 'raising an index'. + Putting ``None`` in the second argument of ``rcall`` means that the + respective position in the tensor product is left as it is. + + >>> TP = TensorProduct + >>> metric = TP(dx, dx) + 3*TP(dy, dy) + >>> metric.rcall(e_y, None) + 3*dy + + Or automatically pad the args with ``None`` without specifying them. + + >>> metric.rcall(e_y) + 3*dy + + """ + def __new__(cls, *args): + scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0]) + multifields = [m for m in args if covariant_order(m) + contravariant_order(m)] + if multifields: + if len(multifields) == 1: + return scalar*multifields[0] + return scalar*super().__new__(cls, *multifields) + else: + return scalar + + def __call__(self, *fields): + """Apply on a list of fields. + + If the number of input fields supplied is not equal to the order of + the tensor product field, the list of arguments is padded with ``None``'s. + + The list of arguments is divided in sublists depending on the order of + the forms inside the tensor product. The sublists are provided as + arguments to these forms and the resulting expressions are given to the + constructor of ``TensorProduct``. + + """ + tot_order = covariant_order(self) + contravariant_order(self) + tot_args = len(fields) + if tot_args != tot_order: + fields = list(fields) + [None]*(tot_order - tot_args) + orders = [covariant_order(f) + contravariant_order(f) for f in self._args] + indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)] + fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])] + multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)] + return TensorProduct(*multipliers) + + +class WedgeProduct(TensorProduct): + """Wedge product of forms. + + Explanation + =========== + + In the context of integration only completely antisymmetric forms make + sense. The wedge product permits the creation of such forms. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import WedgeProduct + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> WedgeProduct(dx, dy)(e_x, e_y) + 1 + >>> WedgeProduct(dx, dy)(e_y, e_x) + -1 + >>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y) + x**2 + >>> WedgeProduct(e_x, e_y)(fy, None) + -e_x + + You can nest wedge products. + + >>> wp1 = WedgeProduct(dx, dy) + >>> WedgeProduct(wp1, dx)(e_x, e_y, e_x) + 0 + + """ + # TODO the calculation of signatures is slow + # TODO you do not need all these permutations (neither the prefactor) + def __call__(self, *fields): + """Apply on a list of vector_fields. + The expression is rewritten internally in terms of tensor products and evaluated.""" + orders = (covariant_order(e) + contravariant_order(e) for e in self.args) + mul = 1/Mul(*(factorial(o) for o in orders)) + perms = permutations(fields) + perms_par = (Permutation( + p).signature() for p in permutations(range(len(fields)))) + tensor_prod = TensorProduct(*self.args) + return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)]) + + +class LieDerivative(Expr): + """Lie derivative with respect to a vector field. + + Explanation + =========== + + The transport operator that defines the Lie derivative is the pushforward of + the field to be derived along the integral curve of the field with respect + to which one derives. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> from sympy.diffgeom import (LieDerivative, TensorProduct) + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> e_rho, e_theta = R2_p.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> LieDerivative(e_x, fy) + 0 + >>> LieDerivative(e_x, fx) + 1 + >>> LieDerivative(e_x, e_x) + 0 + + The Lie derivative of a tensor field by another tensor field is equal to + their commutator: + + >>> LieDerivative(e_x, e_rho) + Commutator(e_x, e_rho) + >>> LieDerivative(e_x + e_y, fx) + 1 + + >>> tp = TensorProduct(dx, dy) + >>> LieDerivative(e_x, tp) + LieDerivative(e_x, TensorProduct(dx, dy)) + >>> LieDerivative(e_x, tp) + LieDerivative(e_x, TensorProduct(dx, dy)) + + """ + def __new__(cls, v_field, expr): + expr_form_ord = covariant_order(expr) + if contravariant_order(v_field) != 1 or covariant_order(v_field): + raise ValueError('Lie derivatives are defined only with respect to' + ' vector fields. The supplied argument was not a ' + 'vector field.') + if expr_form_ord > 0: + obj = super().__new__(cls, v_field, expr) + # deprecated assignments + obj._v_field = v_field + obj._expr = expr + return obj + if expr.atoms(BaseVectorField): + return Commutator(v_field, expr) + else: + return v_field.rcall(expr) + + @property + def v_field(self): + return self.args[0] + + @property + def expr(self): + return self.args[1] + + def __call__(self, *args): + v = self.v_field + expr = self.expr + lead_term = v(expr(*args)) + rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:]) + for i in range(len(args))]) + return lead_term - rest + + +class BaseCovarDerivativeOp(Expr): + """Covariant derivative operator with respect to a base vector. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import BaseCovarDerivativeOp + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + + >>> TP = TensorProduct + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) + >>> ch + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch) + >>> cvd(fx) + 1 + >>> cvd(fx*e_x) + e_x + """ + + def __new__(cls, coord_sys, index, christoffel): + index = _sympify(index) + christoffel = ImmutableDenseNDimArray(christoffel) + obj = super().__new__(cls, coord_sys, index, christoffel) + # deprecated assignments + obj._coord_sys = coord_sys + obj._index = index + obj._christoffel = christoffel + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def christoffel(self): + return self.args[2] + + def __call__(self, field): + """Apply on a scalar field. + + The action of a vector field on a scalar field is a directional + differentiation. + If the argument is not a scalar field the behaviour is undefined. + """ + if covariant_order(field) != 0: + raise NotImplementedError() + + field = vectors_in_basis(field, self._coord_sys) + + wrt_vector = self._coord_sys.base_vector(self._index) + wrt_scalar = self._coord_sys.coord_function(self._index) + vectors = list(field.atoms(BaseVectorField)) + + # First step: replace all vectors with something susceptible to + # derivation and do the derivation + # TODO: you need a real dummy function for the next line + d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i, + b in enumerate(vectors)] + d_result = field.subs(list(zip(vectors, d_funcs))) + d_result = wrt_vector(d_result) + + # Second step: backsubstitute the vectors in + d_result = d_result.subs(list(zip(d_funcs, vectors))) + + # Third step: evaluate the derivatives of the vectors + derivs = [] + for v in vectors: + d = Add(*[(self._christoffel[k, wrt_vector._index, v._index] + *v._coord_sys.base_vector(k)) + for k in range(v._coord_sys.dim)]) + derivs.append(d) + to_subs = [wrt_vector(d) for d in d_funcs] + # XXX: This substitution can fail when there are Dummy symbols and the + # cache is disabled: https://github.com/sympy/sympy/issues/17794 + result = d_result.subs(list(zip(to_subs, derivs))) + + # Remove the dummies + result = result.subs(list(zip(d_funcs, vectors))) + return result.doit() + + +class CovarDerivativeOp(Expr): + """Covariant derivative operator. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import CovarDerivativeOp + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + >>> TP = TensorProduct + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) + + >>> ch + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> cvd = CovarDerivativeOp(fx*e_x, ch) + >>> cvd(fx) + x + >>> cvd(fx*e_x) + x*e_x + + """ + + def __new__(cls, wrt, christoffel): + if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1: + raise NotImplementedError() + if contravariant_order(wrt) != 1 or covariant_order(wrt): + raise ValueError('Covariant derivatives are defined only with ' + 'respect to vector fields. The supplied argument ' + 'was not a vector field.') + christoffel = ImmutableDenseNDimArray(christoffel) + obj = super().__new__(cls, wrt, christoffel) + # deprecated assignments + obj._wrt = wrt + obj._christoffel = christoffel + return obj + + @property + def wrt(self): + return self.args[0] + + @property + def christoffel(self): + return self.args[1] + + def __call__(self, field): + vectors = list(self._wrt.atoms(BaseVectorField)) + base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel) + for v in vectors] + return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field) + + +############################################################################### +# Integral curves on vector fields +############################################################################### +def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False): + r"""Return the series expansion for an integral curve of the field. + + Explanation + =========== + + Integral curve is a function `\gamma` taking a parameter in `R` to a point + in the manifold. It verifies the equation: + + `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` + + where the given ``vector_field`` is denoted as `V`. This holds for any + value `t` for the parameter and any scalar field `f`. + + This equation can also be decomposed of a basis of coordinate functions + `V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i` + + This function returns a series expansion of `\gamma(t)` in terms of the + coordinate system ``coord_sys``. The equations and expansions are necessarily + done in coordinate-system-dependent way as there is no other way to + represent movement between points on the manifold (i.e. there is no such + thing as a difference of points for a general manifold). + + Parameters + ========== + vector_field + the vector field for which an integral curve will be given + + param + the argument of the function `\gamma` from R to the curve + + start_point + the point which corresponds to `\gamma(0)` + + n + the order to which to expand + + coord_sys + the coordinate system in which to expand + coeffs (default False) - if True return a list of elements of the expansion + + Examples + ======== + + Use the predefined R2 manifold: + + >>> from sympy.abc import t, x, y + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import intcurve_series + + Specify a starting point and a vector field: + + >>> start_point = R2_r.point([x, y]) + >>> vector_field = R2_r.e_x + + Calculate the series: + + >>> intcurve_series(vector_field, t, start_point, n=3) + Matrix([ + [t + x], + [ y]]) + + Or get the elements of the expansion in a list: + + >>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True) + >>> series[0] + Matrix([ + [x], + [y]]) + >>> series[1] + Matrix([ + [t], + [0]]) + >>> series[2] + Matrix([ + [0], + [0]]) + + The series in the polar coordinate system: + + >>> series = intcurve_series(vector_field, t, start_point, + ... n=3, coord_sys=R2_p, coeffs=True) + >>> series[0] + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> series[1] + Matrix([ + [t*x/sqrt(x**2 + y**2)], + [ -t*y/(x**2 + y**2)]]) + >>> series[2] + Matrix([ + [t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2], + [ t**2*x*y/(x**2 + y**2)**2]]) + + See Also + ======== + + intcurve_diffequ + + """ + if contravariant_order(vector_field) != 1 or covariant_order(vector_field): + raise ValueError('The supplied field was not a vector field.') + + def iter_vfield(scalar_field, i): + """Return ``vector_field`` called `i` times on ``scalar_field``.""" + return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field) + + def taylor_terms_per_coord(coord_function): + """Return the series for one of the coordinates.""" + return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i) + for i in range(n)] + coord_sys = coord_sys if coord_sys else start_point._coord_sys + coord_functions = coord_sys.coord_functions() + taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions] + if coeffs: + return [Matrix(t) for t in zip(*taylor_terms)] + else: + return Matrix([sum(c) for c in taylor_terms]) + + +def intcurve_diffequ(vector_field, param, start_point, coord_sys=None): + r"""Return the differential equation for an integral curve of the field. + + Explanation + =========== + + Integral curve is a function `\gamma` taking a parameter in `R` to a point + in the manifold. It verifies the equation: + + `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` + + where the given ``vector_field`` is denoted as `V`. This holds for any + value `t` for the parameter and any scalar field `f`. + + This function returns the differential equation of `\gamma(t)` in terms of the + coordinate system ``coord_sys``. The equations and expansions are necessarily + done in coordinate-system-dependent way as there is no other way to + represent movement between points on the manifold (i.e. there is no such + thing as a difference of points for a general manifold). + + Parameters + ========== + + vector_field + the vector field for which an integral curve will be given + + param + the argument of the function `\gamma` from R to the curve + + start_point + the point which corresponds to `\gamma(0)` + + coord_sys + the coordinate system in which to give the equations + + Returns + ======= + + a tuple of (equations, initial conditions) + + Examples + ======== + + Use the predefined R2 manifold: + + >>> from sympy.abc import t + >>> from sympy.diffgeom.rn import R2, R2_p, R2_r + >>> from sympy.diffgeom import intcurve_diffequ + + Specify a starting point and a vector field: + + >>> start_point = R2_r.point([0, 1]) + >>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y + + Get the equation: + + >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point) + >>> equations + [f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)] + >>> init_cond + [f_0(0), f_1(0) - 1] + + The series in the polar coordinate system: + + >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) + >>> equations + [Derivative(f_0(t), t), Derivative(f_1(t), t) - 1] + >>> init_cond + [f_0(0) - 1, f_1(0) - pi/2] + + See Also + ======== + + intcurve_series + + """ + if contravariant_order(vector_field) != 1 or covariant_order(vector_field): + raise ValueError('The supplied field was not a vector field.') + coord_sys = coord_sys if coord_sys else start_point._coord_sys + gammas = [Function('f_%d' % i)(param) for i in range( + start_point._coord_sys.dim)] + arbitrary_p = Point(coord_sys, gammas) + coord_functions = coord_sys.coord_functions() + equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p)) + for cf in coord_functions] + init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point)) + for cf in coord_functions] + return equations, init_cond + + +############################################################################### +# Helpers +############################################################################### +def dummyfy(args, exprs): + # TODO Is this a good idea? + d_args = Matrix([s.as_dummy() for s in args]) + reps = dict(zip(args, d_args)) + d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs]) + return d_args, d_exprs + +############################################################################### +# Helpers +############################################################################### +def contravariant_order(expr, _strict=False): + """Return the contravariant order of an expression. + + Examples + ======== + + >>> from sympy.diffgeom import contravariant_order + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.abc import a + + >>> contravariant_order(a) + 0 + >>> contravariant_order(a*R2.x + 2) + 0 + >>> contravariant_order(a*R2.x*R2.e_y + R2.e_x) + 1 + + """ + # TODO move some of this to class methods. + # TODO rewrite using the .as_blah_blah methods + if isinstance(expr, Add): + orders = [contravariant_order(e) for e in expr.args] + if len(set(orders)) != 1: + raise ValueError('Misformed expression containing contravariant fields of varying order.') + return orders[0] + elif isinstance(expr, Mul): + orders = [contravariant_order(e) for e in expr.args] + not_zero = [o for o in orders if o != 0] + if len(not_zero) > 1: + raise ValueError('Misformed expression containing multiplication between vectors.') + return 0 if not not_zero else not_zero[0] + elif isinstance(expr, Pow): + if covariant_order(expr.base) or covariant_order(expr.exp): + raise ValueError( + 'Misformed expression containing a power of a vector.') + return 0 + elif isinstance(expr, BaseVectorField): + return 1 + elif isinstance(expr, TensorProduct): + return sum(contravariant_order(a) for a in expr.args) + elif not _strict or expr.atoms(BaseScalarField): + return 0 + else: # If it does not contain anything related to the diffgeom module and it is _strict + return -1 + + +def covariant_order(expr, _strict=False): + """Return the covariant order of an expression. + + Examples + ======== + + >>> from sympy.diffgeom import covariant_order + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.abc import a + + >>> covariant_order(a) + 0 + >>> covariant_order(a*R2.x + 2) + 0 + >>> covariant_order(a*R2.x*R2.dy + R2.dx) + 1 + + """ + # TODO move some of this to class methods. + # TODO rewrite using the .as_blah_blah methods + if isinstance(expr, Add): + orders = [covariant_order(e) for e in expr.args] + if len(set(orders)) != 1: + raise ValueError('Misformed expression containing form fields of varying order.') + return orders[0] + elif isinstance(expr, Mul): + orders = [covariant_order(e) for e in expr.args] + not_zero = [o for o in orders if o != 0] + if len(not_zero) > 1: + raise ValueError('Misformed expression containing multiplication between forms.') + return 0 if not not_zero else not_zero[0] + elif isinstance(expr, Pow): + if covariant_order(expr.base) or covariant_order(expr.exp): + raise ValueError( + 'Misformed expression containing a power of a form.') + return 0 + elif isinstance(expr, Differential): + return covariant_order(*expr.args) + 1 + elif isinstance(expr, TensorProduct): + return sum(covariant_order(a) for a in expr.args) + elif not _strict or expr.atoms(BaseScalarField): + return 0 + else: # If it does not contain anything related to the diffgeom module and it is _strict + return -1 + + +############################################################################### +# Coordinate transformation functions +############################################################################### +def vectors_in_basis(expr, to_sys): + """Transform all base vectors in base vectors of a specified coord basis. + While the new base vectors are in the new coordinate system basis, any + coefficients are kept in the old system. + + Examples + ======== + + >>> from sympy.diffgeom import vectors_in_basis + >>> from sympy.diffgeom.rn import R2_r, R2_p + + >>> vectors_in_basis(R2_r.e_x, R2_p) + -y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2) + >>> vectors_in_basis(R2_p.e_r, R2_r) + sin(theta)*e_y + cos(theta)*e_x + + """ + vectors = list(expr.atoms(BaseVectorField)) + new_vectors = [] + for v in vectors: + cs = v._coord_sys + jac = cs.jacobian(to_sys, cs.coord_functions()) + new = (jac.T*Matrix(to_sys.base_vectors()))[v._index] + new_vectors.append(new) + return expr.subs(list(zip(vectors, new_vectors))) + + +############################################################################### +# Coordinate-dependent functions +############################################################################### +def twoform_to_matrix(expr): + """Return the matrix representing the twoform. + + For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`, + where `e_i` is the i-th base vector field for the coordinate system in + which the expression of `w` is given. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import twoform_to_matrix, TensorProduct + >>> TP = TensorProduct + + >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + Matrix([ + [1, 0], + [0, 1]]) + >>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + Matrix([ + [x, 0], + [0, 1]]) + >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2) + Matrix([ + [ 1, 0], + [-1/2, 1]]) + + """ + if covariant_order(expr) != 2 or contravariant_order(expr): + raise ValueError('The input expression is not a two-form.') + coord_sys = _find_coords(expr) + if len(coord_sys) != 1: + raise ValueError('The input expression concerns more than one ' + 'coordinate systems, hence there is no unambiguous ' + 'way to choose a coordinate system for the matrix.') + coord_sys = coord_sys.pop() + vectors = coord_sys.base_vectors() + expr = expr.expand() + matrix_content = [[expr.rcall(v1, v2) for v1 in vectors] + for v2 in vectors] + return Matrix(matrix_content) + + +def metric_to_Christoffel_1st(expr): + """Return the nested list of Christoffel symbols for the given metric. + This returns the Christoffel symbol of first kind that represents the + Levi-Civita connection for the given metric. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]] + + """ + matrix = twoform_to_matrix(expr) + if not matrix.is_symmetric(): + raise ValueError( + 'The two-form representing the metric is not symmetric.') + coord_sys = _find_coords(expr).pop() + deriv_matrices = [matrix.applyfunc(d) for d in coord_sys.base_vectors()] + indices = list(range(coord_sys.dim)) + christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2 + for k in indices] + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(christoffel) + + +def metric_to_Christoffel_2nd(expr): + """Return the nested list of Christoffel symbols for the given metric. + This returns the Christoffel symbol of second kind that represents the + Levi-Civita connection for the given metric. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]] + + """ + ch_1st = metric_to_Christoffel_1st(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + # XXX workaround, inverting a matrix does not work if it contains non + # symbols + #matrix = twoform_to_matrix(expr).inv() + matrix = twoform_to_matrix(expr) + s_fields = set() + for e in matrix: + s_fields.update(e.atoms(BaseScalarField)) + s_fields = list(s_fields) + dums = coord_sys.symbols + matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields))) + # XXX end of workaround + christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices]) + for k in indices] + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(christoffel) + + +def metric_to_Riemann_components(expr): + """Return the components of the Riemann tensor expressed in a given basis. + + Given a metric it calculates the components of the Riemann tensor in the + canonical basis of the coordinate system in which the metric expression is + given. + + Examples + ======== + + >>> from sympy import exp + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]] + >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ + R2.r**2*TP(R2.dtheta, R2.dtheta) + >>> non_trivial_metric + exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) + >>> riemann = metric_to_Riemann_components(non_trivial_metric) + >>> riemann[0, :, :, :] + [[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]] + >>> riemann[1, :, :, :] + [[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]] + + """ + ch_2nd = metric_to_Christoffel_2nd(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + deriv_ch = [[[[d(ch_2nd[i, j, k]) + for d in coord_sys.base_vectors()] + for k in indices] + for j in indices] + for i in indices] + riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu] + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices]) + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu] + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + return ImmutableDenseNDimArray(riemann) + + +def metric_to_Ricci_components(expr): + + """Return the components of the Ricci tensor expressed in a given basis. + + Given a metric it calculates the components of the Ricci tensor in the + canonical basis of the coordinate system in which the metric expression is + given. + + Examples + ======== + + >>> from sympy import exp + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[0, 0], [0, 0]] + >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ + R2.r**2*TP(R2.dtheta, R2.dtheta) + >>> non_trivial_metric + exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) + >>> metric_to_Ricci_components(non_trivial_metric) + [[1/rho, 0], [0, exp(-2*rho)*rho]] + + """ + riemann = metric_to_Riemann_components(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + ricci = [[Add(*[riemann[k, i, k, j] for k in indices]) + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(ricci) + +############################################################################### +# Classes for deprecation +############################################################################### + +class _deprecated_container: + # This class gives deprecation warning. + # When deprecated features are completely deleted, this should be removed as well. + # See https://github.com/sympy/sympy/pull/19368 + def __init__(self, message, data): + super().__init__(data) + self.message = message + + def warn(self): + sympy_deprecation_warning( + self.message, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + stacklevel=4 + ) + + def __iter__(self): + self.warn() + return super().__iter__() + + def __getitem__(self, key): + self.warn() + return super().__getitem__(key) + + def __contains__(self, key): + self.warn() + return super().__contains__(key) + + +class _deprecated_list(_deprecated_container, list): + pass + + +class _deprecated_dict(_deprecated_container, dict): + pass + + +# Import at end to avoid cyclic imports +from sympy.simplify.simplify import simplify diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/rn.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/rn.py new file mode 100644 index 0000000000000000000000000000000000000000..897c7e82bc804d260612f79c820af92632f3b281 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/rn.py @@ -0,0 +1,143 @@ +"""Predefined R^n manifolds together with common coord. systems. + +Coordinate systems are predefined as well as the transformation laws between +them. + +Coordinate functions can be accessed as attributes of the manifold (eg `R2.x`), +as attributes of the coordinate systems (eg `R2_r.x` and `R2_p.theta`), or by +using the usual `coord_sys.coord_function(index, name)` interface. +""" + +from typing import Any +import warnings + +from sympy.core.symbol import (Dummy, symbols) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (acos, atan2, cos, sin) +from .diffgeom import Manifold, Patch, CoordSystem + +__all__ = [ + 'R2', 'R2_origin', 'relations_2d', 'R2_r', 'R2_p', + 'R3', 'R3_origin', 'relations_3d', 'R3_r', 'R3_c', 'R3_s' +] + +############################################################################### +# R2 +############################################################################### +R2: Any = Manifold('R^2', 2) + +R2_origin: Any = Patch('origin', R2) + +x, y = symbols('x y', real=True) +r, theta = symbols('rho theta', nonnegative=True) + +relations_2d = { + ('rectangular', 'polar'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))], + ('polar', 'rectangular'): [(r, theta), (r*cos(theta), r*sin(theta))], +} + +R2_r: Any = CoordSystem('rectangular', R2_origin, (x, y), relations_2d) +R2_p: Any = CoordSystem('polar', R2_origin, (r, theta), relations_2d) + +# support deprecated feature +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + x, y, r, theta = symbols('x y r theta', cls=Dummy) + R2_r.connect_to(R2_p, [x, y], + [sqrt(x**2 + y**2), atan2(y, x)], + inverse=False, fill_in_gaps=False) + R2_p.connect_to(R2_r, [r, theta], + [r*cos(theta), r*sin(theta)], + inverse=False, fill_in_gaps=False) + +# Defining the basis coordinate functions and adding shortcuts for them to the +# manifold and the patch. +R2.x, R2.y = R2_origin.x, R2_origin.y = R2_r.x, R2_r.y = R2_r.coord_functions() +R2.r, R2.theta = R2_origin.r, R2_origin.theta = R2_p.r, R2_p.theta = R2_p.coord_functions() + +# Defining the basis vector fields and adding shortcuts for them to the +# manifold and the patch. +R2.e_x, R2.e_y = R2_origin.e_x, R2_origin.e_y = R2_r.e_x, R2_r.e_y = R2_r.base_vectors() +R2.e_r, R2.e_theta = R2_origin.e_r, R2_origin.e_theta = R2_p.e_r, R2_p.e_theta = R2_p.base_vectors() + +# Defining the basis oneform fields and adding shortcuts for them to the +# manifold and the patch. +R2.dx, R2.dy = R2_origin.dx, R2_origin.dy = R2_r.dx, R2_r.dy = R2_r.base_oneforms() +R2.dr, R2.dtheta = R2_origin.dr, R2_origin.dtheta = R2_p.dr, R2_p.dtheta = R2_p.base_oneforms() + +############################################################################### +# R3 +############################################################################### +R3: Any = Manifold('R^3', 3) + +R3_origin: Any = Patch('origin', R3) + +x, y, z = symbols('x y z', real=True) +rho, psi, r, theta, phi = symbols('rho psi r theta phi', nonnegative=True) + +relations_3d = { + ('rectangular', 'cylindrical'): [(x, y, z), + (sqrt(x**2 + y**2), atan2(y, x), z)], + ('cylindrical', 'rectangular'): [(rho, psi, z), + (rho*cos(psi), rho*sin(psi), z)], + ('rectangular', 'spherical'): [(x, y, z), + (sqrt(x**2 + y**2 + z**2), + acos(z/sqrt(x**2 + y**2 + z**2)), + atan2(y, x))], + ('spherical', 'rectangular'): [(r, theta, phi), + (r*sin(theta)*cos(phi), + r*sin(theta)*sin(phi), + r*cos(theta))], + ('cylindrical', 'spherical'): [(rho, psi, z), + (sqrt(rho**2 + z**2), + acos(z/sqrt(rho**2 + z**2)), + psi)], + ('spherical', 'cylindrical'): [(r, theta, phi), + (r*sin(theta), phi, r*cos(theta))], +} + +R3_r: Any = CoordSystem('rectangular', R3_origin, (x, y, z), relations_3d) +R3_c: Any = CoordSystem('cylindrical', R3_origin, (rho, psi, z), relations_3d) +R3_s: Any = CoordSystem('spherical', R3_origin, (r, theta, phi), relations_3d) + +# support deprecated feature +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + x, y, z, rho, psi, r, theta, phi = symbols('x y z rho psi r theta phi', cls=Dummy) + R3_r.connect_to(R3_c, [x, y, z], + [sqrt(x**2 + y**2), atan2(y, x), z], + inverse=False, fill_in_gaps=False) + R3_c.connect_to(R3_r, [rho, psi, z], + [rho*cos(psi), rho*sin(psi), z], + inverse=False, fill_in_gaps=False) + ## rectangular <-> spherical + R3_r.connect_to(R3_s, [x, y, z], + [sqrt(x**2 + y**2 + z**2), acos(z/ + sqrt(x**2 + y**2 + z**2)), atan2(y, x)], + inverse=False, fill_in_gaps=False) + R3_s.connect_to(R3_r, [r, theta, phi], + [r*sin(theta)*cos(phi), r*sin( + theta)*sin(phi), r*cos(theta)], + inverse=False, fill_in_gaps=False) + ## cylindrical <-> spherical + R3_c.connect_to(R3_s, [rho, psi, z], + [sqrt(rho**2 + z**2), acos(z/sqrt(rho**2 + z**2)), psi], + inverse=False, fill_in_gaps=False) + R3_s.connect_to(R3_c, [r, theta, phi], + [r*sin(theta), phi, r*cos(theta)], + inverse=False, fill_in_gaps=False) + +# Defining the basis coordinate functions. +R3_r.x, R3_r.y, R3_r.z = R3_r.coord_functions() +R3_c.rho, R3_c.psi, R3_c.z = R3_c.coord_functions() +R3_s.r, R3_s.theta, R3_s.phi = R3_s.coord_functions() + +# Defining the basis vector fields. +R3_r.e_x, R3_r.e_y, R3_r.e_z = R3_r.base_vectors() +R3_c.e_rho, R3_c.e_psi, R3_c.e_z = R3_c.base_vectors() +R3_s.e_r, R3_s.e_theta, R3_s.e_phi = R3_s.base_vectors() + +# Defining the basis oneform fields. +R3_r.dx, R3_r.dy, R3_r.dz = R3_r.base_oneforms() +R3_c.drho, R3_c.dpsi, R3_c.dz = R3_c.base_oneforms() +R3_s.dr, R3_s.dtheta, R3_s.dphi = R3_s.base_oneforms() diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a892a2c3ad9615c5a72bd8056f2fb9ab39dd0c0f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e040e341e39f1d2eb076b981579eb5e0bac0607c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71c13228735d553b01c74b2ace1fc56f259e833d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b8f9ed364a277e847a162ca004d1c05c0ca64f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93ffd5da5dca2a29835d60f22e59ab0527796cd2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_class_structure.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_class_structure.py new file mode 100644 index 0000000000000000000000000000000000000000..c649fd9fcb9acdf1f410a021966c6e0fee62cc2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_class_structure.py @@ -0,0 +1,33 @@ +from sympy.diffgeom import Manifold, Patch, CoordSystem, Point +from sympy.core.function import Function +from sympy.core.symbol import symbols +from sympy.testing.pytest import warns_deprecated_sympy + +m = Manifold('m', 2) +p = Patch('p', m) +a, b = symbols('a b') +cs = CoordSystem('cs', p, [a, b]) +x, y = symbols('x y') +f = Function('f') +s1, s2 = cs.coord_functions() +v1, v2 = cs.base_vectors() +f1, f2 = cs.base_oneforms() + +def test_point(): + point = Point(cs, [x, y]) + assert point != Point(cs, [2, y]) + #TODO assert point.subs(x, 2) == Point(cs, [2, y]) + #TODO assert point.free_symbols == set([x, y]) + +def test_subs(): + assert s1.subs(s1, s2) == s2 + assert v1.subs(v1, v2) == v2 + assert f1.subs(f1, f2) == f2 + assert (x*f(s1) + y).subs(s1, s2) == x*f(s2) + y + assert (f(s1)*v1).subs(v1, v2) == f(s1)*v2 + assert (y*f(s1)*f1).subs(f1, f2) == y*f(s1)*f2 + +def test_deprecated(): + with warns_deprecated_sympy(): + cs_wname = CoordSystem('cs', p, ['a', 'b']) + assert cs_wname == cs_wname.func(*cs_wname.args) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3c9265785896b8f4ffa3a2b41816ca90579758 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py @@ -0,0 +1,342 @@ +from sympy.core import Lambda, Symbol, symbols +from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r, R3_c, R3_s, R2_origin +from sympy.diffgeom import (Manifold, Patch, CoordSystem, Commutator, Differential, TensorProduct, + WedgeProduct, BaseCovarDerivativeOp, CovarDerivativeOp, LieDerivative, + covariant_order, contravariant_order, twoform_to_matrix, metric_to_Christoffel_1st, + metric_to_Christoffel_2nd, metric_to_Riemann_components, + metric_to_Ricci_components, intcurve_diffequ, intcurve_series) +from sympy.simplify import trigsimp, simplify +from sympy.functions import sqrt, atan2, sin +from sympy.matrices import Matrix +from sympy.testing.pytest import raises, nocache_fail +from sympy.testing.pytest import warns_deprecated_sympy + +TP = TensorProduct + + +def test_coordsys_transform(): + # test inverse transforms + p, q, r, s = symbols('p q r s') + rel = {('first', 'second'): [(p, q), (q, -p)]} + R2_pq = CoordSystem('first', R2_origin, [p, q], rel) + R2_rs = CoordSystem('second', R2_origin, [r, s], rel) + r, s = R2_rs.symbols + assert R2_rs.transform(R2_pq) == Matrix([[-s], [r]]) + + # inverse transform impossible case + a, b = symbols('a b', positive=True) + rel = {('first', 'second'): [(a,), (-a,)]} + R2_a = CoordSystem('first', R2_origin, [a], rel) + R2_b = CoordSystem('second', R2_origin, [b], rel) + # This transformation is uninvertible because there is no positive a, b satisfying a = -b + with raises(NotImplementedError): + R2_b.transform(R2_a) + + # inverse transform ambiguous case + c, d = symbols('c d') + rel = {('first', 'second'): [(c,), (c**2,)]} + R2_c = CoordSystem('first', R2_origin, [c], rel) + R2_d = CoordSystem('second', R2_origin, [d], rel) + # The transform method should throw if it finds multiple inverses for a coordinate transformation. + with raises(ValueError): + R2_d.transform(R2_c) + + # test indirect transformation + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): [(a, b), (2*a, 3*b)], + ('C2', 'C3'): [(c, d), (3*c, 2*d)]} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, d/3]) + assert C1.transform(C3) == Matrix([6*a, 6*b]) + assert C3.transform(C1) == Matrix([e/6, f/6]) + assert C3.transform(C2) == Matrix([e/3, f/2]) + + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): [(a, b), (2*a, 3*b + 1)], + ('C3', 'C2'): [(e, f), (-e - 2, 2*f)]} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, (d - 1)/3]) + assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2]) + assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3]) + assert C3.transform(C2) == Matrix([-e - 2, 2*f]) + + # old signature uses Lambda + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): Lambda((a, b), (2*a, 3*b + 1)), + ('C3', 'C2'): Lambda((e, f), (-e - 2, 2*f))} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, (d - 1)/3]) + assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2]) + assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3]) + assert C3.transform(C2) == Matrix([-e - 2, 2*f]) + + +def test_R2(): + x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) + point_r = R2_r.point([x0, y0]) + point_p = R2_p.point([r0, theta0]) + + # r**2 = x**2 + y**2 + assert (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_r) == 0 + assert trigsimp( (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_p) ) == 0 + assert trigsimp(R2.e_r(R2.x**2 + R2.y**2).rcall(point_p).doit()) == 2*r0 + + # polar->rect->polar == Id + a, b = symbols('a b', positive=True) + m = Matrix([[a], [b]]) + + #TODO assert m == R2_r.transform(R2_p, R2_p.transform(R2_r, [a, b])).applyfunc(simplify) + assert m == R2_p.transform(R2_r, R2_r.transform(R2_p, m)).applyfunc(simplify) + + # deprecated method + with warns_deprecated_sympy(): + assert m == R2_p.coord_tuple_transform_to( + R2_r, R2_r.coord_tuple_transform_to(R2_p, m)).applyfunc(simplify) + + +def test_R3(): + a, b, c = symbols('a b c', positive=True) + m = Matrix([[a], [b], [c]]) + + assert m == R3_c.transform(R3_r, R3_r.transform(R3_c, m)).applyfunc(simplify) + #TODO assert m == R3_r.transform(R3_c, R3_c.transform(R3_r, m)).applyfunc(simplify) + assert m == R3_s.transform( + R3_r, R3_r.transform(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_r.transform(R3_s, R3_s.transform(R3_r, m)).applyfunc(simplify) + assert m == R3_s.transform( + R3_c, R3_c.transform(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_c.transform(R3_s, R3_s.transform(R3_c, m)).applyfunc(simplify) + + with warns_deprecated_sympy(): + assert m == R3_c.coord_tuple_transform_to( + R3_r, R3_r.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify) + #TODO assert m == R3_r.coord_tuple_transform_to(R3_c, R3_c.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify) + assert m == R3_s.coord_tuple_transform_to( + R3_r, R3_r.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_r.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify) + assert m == R3_s.coord_tuple_transform_to( + R3_c, R3_c.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_c.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify) + + +def test_CoordinateSymbol(): + x, y = R2_r.symbols + r, theta = R2_p.symbols + assert y.rewrite(R2_p) == r*sin(theta) + + +def test_point(): + x, y = symbols('x, y') + p = R2_r.point([x, y]) + assert p.free_symbols == {x, y} + assert p.coords(R2_r) == p.coords() == Matrix([x, y]) + assert p.coords(R2_p) == Matrix([sqrt(x**2 + y**2), atan2(y, x)]) + + +def test_commutator(): + assert Commutator(R2.e_x, R2.e_y) == 0 + assert Commutator(R2.x*R2.e_x, R2.x*R2.e_x) == 0 + assert Commutator(R2.x*R2.e_x, R2.x*R2.e_y) == R2.x*R2.e_y + c = Commutator(R2.e_x, R2.e_r) + assert c(R2.x) == R2.y*(R2.x**2 + R2.y**2)**(-1)*sin(R2.theta) + + +def test_differential(): + xdy = R2.x*R2.dy + dxdy = Differential(xdy) + assert xdy.rcall(None) == xdy + assert dxdy(R2.e_x, R2.e_y) == 1 + assert dxdy(R2.e_x, R2.x*R2.e_y) == R2.x + assert Differential(dxdy) == 0 + + +def test_products(): + assert TensorProduct( + R2.dx, R2.dy)(R2.e_x, R2.e_y) == R2.dx(R2.e_x)*R2.dy(R2.e_y) == 1 + assert TensorProduct(R2.dx, R2.dy)(None, R2.e_y) == R2.dx + assert TensorProduct(R2.dx, R2.dy)(R2.e_x, None) == R2.dy + assert TensorProduct(R2.dx, R2.dy)(R2.e_x) == R2.dy + assert TensorProduct(R2.x, R2.dx) == R2.x*R2.dx + assert TensorProduct( + R2.e_x, R2.e_y)(R2.x, R2.y) == R2.e_x(R2.x) * R2.e_y(R2.y) == 1 + assert TensorProduct(R2.e_x, R2.e_y)(None, R2.y) == R2.e_x + assert TensorProduct(R2.e_x, R2.e_y)(R2.x, None) == R2.e_y + assert TensorProduct(R2.e_x, R2.e_y)(R2.x) == R2.e_y + assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x + assert TensorProduct( + R2.dx, R2.e_y)(R2.e_x, R2.y) == R2.dx(R2.e_x) * R2.e_y(R2.y) == 1 + assert TensorProduct(R2.dx, R2.e_y)(None, R2.y) == R2.dx + assert TensorProduct(R2.dx, R2.e_y)(R2.e_x, None) == R2.e_y + assert TensorProduct(R2.dx, R2.e_y)(R2.e_x) == R2.e_y + assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x + assert TensorProduct( + R2.e_x, R2.dy)(R2.x, R2.e_y) == R2.e_x(R2.x) * R2.dy(R2.e_y) == 1 + assert TensorProduct(R2.e_x, R2.dy)(None, R2.e_y) == R2.e_x + assert TensorProduct(R2.e_x, R2.dy)(R2.x, None) == R2.dy + assert TensorProduct(R2.e_x, R2.dy)(R2.x) == R2.dy + assert TensorProduct(R2.e_y,R2.e_x)(R2.x**2 + R2.y**2,R2.x**2 + R2.y**2) == 4*R2.x*R2.y + + assert WedgeProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y) == 1 + assert WedgeProduct(R2.e_x, R2.e_y)(R2.x, R2.y) == 1 + + +def test_lie_derivative(): + assert LieDerivative(R2.e_x, R2.y) == R2.e_x(R2.y) == 0 + assert LieDerivative(R2.e_x, R2.x) == R2.e_x(R2.x) == 1 + assert LieDerivative(R2.e_x, R2.e_x) == Commutator(R2.e_x, R2.e_x) == 0 + assert LieDerivative(R2.e_x, R2.e_r) == Commutator(R2.e_x, R2.e_r) + assert LieDerivative(R2.e_x + R2.e_y, R2.x) == 1 + assert LieDerivative( + R2.e_x, TensorProduct(R2.dx, R2.dy))(R2.e_x, R2.e_y) == 0 + + +@nocache_fail +def test_covar_deriv(): + ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + cvd = BaseCovarDerivativeOp(R2_r, 0, ch) + assert cvd(R2.x) == 1 + # This line fails if the cache is disabled: + assert cvd(R2.x*R2.e_x) == R2.e_x + cvd = CovarDerivativeOp(R2.x*R2.e_x, ch) + assert cvd(R2.x) == R2.x + assert cvd(R2.x*R2.e_x) == R2.x*R2.e_x + + +def test_intcurve_diffequ(): + t = symbols('t') + start_point = R2_r.point([1, 0]) + vector_field = -R2.y*R2.e_x + R2.x*R2.e_y + equations, init_cond = intcurve_diffequ(vector_field, t, start_point) + assert str(equations) == '[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]' + assert str(init_cond) == '[f_0(0) - 1, f_1(0)]' + equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) + assert str( + equations) == '[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]' + assert str(init_cond) == '[f_0(0) - 1, f_1(0)]' + + +def test_helpers_and_coordinate_dependent(): + one_form = R2.dr + R2.dx + two_form = Differential(R2.x*R2.dr + R2.r*R2.dx) + three_form = Differential( + R2.y*two_form) + Differential(R2.x*Differential(R2.r*R2.dr)) + metric = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dy, R2.dy) + metric_ambig = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dr, R2.dr) + misform_a = TensorProduct(R2.dr, R2.dr) + R2.dr + misform_b = R2.dr**4 + misform_c = R2.dx*R2.dy + twoform_not_sym = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dx, R2.dy) + twoform_not_TP = WedgeProduct(R2.dx, R2.dy) + + one_vector = R2.e_x + R2.e_y + two_vector = TensorProduct(R2.e_x, R2.e_y) + three_vector = TensorProduct(R2.e_x, R2.e_y, R2.e_x) + two_wp = WedgeProduct(R2.e_x,R2.e_y) + + assert covariant_order(one_form) == 1 + assert covariant_order(two_form) == 2 + assert covariant_order(three_form) == 3 + assert covariant_order(two_form + metric) == 2 + assert covariant_order(two_form + metric_ambig) == 2 + assert covariant_order(two_form + twoform_not_sym) == 2 + assert covariant_order(two_form + twoform_not_TP) == 2 + + assert contravariant_order(one_vector) == 1 + assert contravariant_order(two_vector) == 2 + assert contravariant_order(three_vector) == 3 + assert contravariant_order(two_vector + two_wp) == 2 + + raises(ValueError, lambda: covariant_order(misform_a)) + raises(ValueError, lambda: covariant_order(misform_b)) + raises(ValueError, lambda: covariant_order(misform_c)) + + assert twoform_to_matrix(metric) == Matrix([[1, 0], [0, 1]]) + assert twoform_to_matrix(twoform_not_sym) == Matrix([[1, 0], [1, 0]]) + assert twoform_to_matrix(twoform_not_TP) == Matrix([[0, -1], [1, 0]]) + + raises(ValueError, lambda: twoform_to_matrix(one_form)) + raises(ValueError, lambda: twoform_to_matrix(three_form)) + raises(ValueError, lambda: twoform_to_matrix(metric_ambig)) + + raises(ValueError, lambda: metric_to_Christoffel_1st(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Christoffel_2nd(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Riemann_components(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Ricci_components(twoform_not_sym)) + + +def test_correct_arguments(): + raises(ValueError, lambda: R2.e_x(R2.e_x)) + raises(ValueError, lambda: R2.e_x(R2.dx)) + + raises(ValueError, lambda: Commutator(R2.e_x, R2.x)) + raises(ValueError, lambda: Commutator(R2.dx, R2.e_x)) + + raises(ValueError, lambda: Differential(Differential(R2.e_x))) + + raises(ValueError, lambda: R2.dx(R2.x)) + + raises(ValueError, lambda: LieDerivative(R2.dx, R2.dx)) + raises(ValueError, lambda: LieDerivative(R2.x, R2.dx)) + + raises(ValueError, lambda: CovarDerivativeOp(R2.dx, [])) + raises(ValueError, lambda: CovarDerivativeOp(R2.x, [])) + + a = Symbol('a') + raises(ValueError, lambda: intcurve_series(R2.dx, a, R2_r.point([1, 2]))) + raises(ValueError, lambda: intcurve_series(R2.x, a, R2_r.point([1, 2]))) + + raises(ValueError, lambda: intcurve_diffequ(R2.dx, a, R2_r.point([1, 2]))) + raises(ValueError, lambda: intcurve_diffequ(R2.x, a, R2_r.point([1, 2]))) + + raises(ValueError, lambda: contravariant_order(R2.e_x + R2.dx)) + raises(ValueError, lambda: covariant_order(R2.e_x + R2.dx)) + + raises(ValueError, lambda: contravariant_order(R2.e_x*R2.e_y)) + raises(ValueError, lambda: covariant_order(R2.dx*R2.dy)) + +def test_simplify(): + x, y = R2_r.coord_functions() + dx, dy = R2_r.base_oneforms() + ex, ey = R2_r.base_vectors() + assert simplify(x) == x + assert simplify(x*y) == x*y + assert simplify(dx*dy) == dx*dy + assert simplify(ex*ey) == ex*ey + assert ((1-x)*dx)/(1-x)**2 == dx/(1-x) + + +def test_issue_17917(): + X = R2.x*R2.e_x - R2.y*R2.e_y + Y = (R2.x**2 + R2.y**2)*R2.e_x - R2.x*R2.y*R2.e_y + assert LieDerivative(X, Y).expand() == ( + R2.x**2*R2.e_x - 3*R2.y**2*R2.e_x - R2.x*R2.y*R2.e_y) + +def test_deprecations(): + m = Manifold('M', 2) + p = Patch('P', m) + with warns_deprecated_sympy(): + CoordSystem('Car2d', p, names=['x', 'y']) + + with warns_deprecated_sympy(): + c = CoordSystem('Car2d', p, ['x', 'y']) + + with warns_deprecated_sympy(): + list(m.patches) + + with warns_deprecated_sympy(): + list(c.transforms) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py new file mode 100644 index 0000000000000000000000000000000000000000..a7317a7d1191e61a074fcaa07e1859b11bdc0d10 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py @@ -0,0 +1,145 @@ +from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r +from sympy.diffgeom import intcurve_series, Differential, WedgeProduct +from sympy.core import symbols, Function, Derivative +from sympy.simplify import trigsimp, simplify +from sympy.functions import sqrt, atan2, sin, cos +from sympy.matrices import Matrix + +# Most of the functionality is covered in the +# test_functional_diffgeom_ch* tests which are based on the +# example from the paper of Sussman and Wisdom. +# If they do not cover something, additional tests are added in other test +# functions. + +# From "Functional Differential Geometry" as of 2011 +# by Sussman and Wisdom. + + +def test_functional_diffgeom_ch2(): + x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) + x, y = symbols('x, y', real=True) + f = Function('f') + + assert (R2_p.point_to_coords(R2_r.point([x0, y0])) == + Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)])) + assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) == + Matrix([r0*cos(theta0), r0*sin(theta0)])) + + assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix( + [[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]]) + + field = f(R2.x, R2.y) + p1_in_rect = R2_r.point([x0, y0]) + p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)]) + assert field.rcall(p1_in_rect) == f(x0, y0) + assert field.rcall(p1_in_polar) == f(x0, y0) + + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + assert R2.x(p_r) == x0 + assert R2.x(p_p) == r0*cos(theta0) + assert R2.r(p_p) == r0 + assert R2.r(p_r) == sqrt(x0**2 + y0**2) + assert R2.theta(p_r) == atan2(y0, x0) + + h = R2.x*R2.r**2 + R2.y**3 + assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3 + assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0) + + +def test_functional_diffgeom_ch3(): + x0, y0 = symbols('x0, y0', real=True) + x, y, t = symbols('x, y, t', real=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + + s_field = f(R2.x, R2.y) + v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y + assert v_field.rcall(s_field).rcall(p_r).doit() == b1( + x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0) + + assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0 + v = R2.e_x + 2*R2.e_y + s = R2.r**2 + 3*R2.x + assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True) + series_x, series_y = zip(*series) + assert all( + [term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x)]) + assert all( + [term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y)]) + + +def test_functional_diffgeom_ch4(): + x0, y0, theta0 = symbols('x0, y0, theta0', real=True) + x, y, r, theta = symbols('x, y, r, theta', real=True) + r0 = symbols('r0', positive=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + + f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy + assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0) + assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0) + + s_field_r = f(R2.x, R2.y) + df = Differential(s_field_r) + assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0) + assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0) + + s_field_p = f(R2.r, R2.theta) + df = Differential(s_field_p) + assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == ( + cos(theta0)*Derivative(f(r0, theta0), r0) - + sin(theta0)*Derivative(f(r0, theta0), theta0)/r0) + assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == ( + sin(theta0)*Derivative(f(r0, theta0), r0) + + cos(theta0)*Derivative(f(r0, theta0), theta0)/r0) + + assert R2.dx(R2.e_x).rcall(p_r) == 1 + assert R2.dx(R2.e_x) == 1 + assert R2.dx(R2.e_y).rcall(p_r) == 0 + assert R2.dx(R2.e_y) == 0 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + assert R2.dx(circ).rcall(p_r).doit() == -y0 + assert R2.dy(circ).rcall(p_r) == x0 + assert R2.dr(circ).rcall(p_r) == 0 + assert simplify(R2.dtheta(circ).rcall(p_r)) == 1 + + assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0 + + +def test_functional_diffgeom_ch6(): + u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True) + + u = u0*R2.e_x + u1*R2.e_y + v = v0*R2.e_x + v1*R2.e_y + wp = WedgeProduct(R2.dx, R2.dy) + assert wp(u, v) == u0*v1 - u1*v0 + + u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z + v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z + w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z + wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz) + assert wp( + u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det() + + a, b, c = symbols('a, b, c', cls=Function) + a_f = a(R3_r.x, R3_r.y, R3_r.z) + b_f = b(R3_r.x, R3_r.y, R3_r.z) + c_f = c(R3_r.x, R3_r.y, R3_r.z) + theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz + dtheta = Differential(theta) + da = Differential(a_f) + db = Differential(b_f) + dc = Differential(c_f) + expr = dtheta - WedgeProduct( + da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz) + assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py new file mode 100644 index 0000000000000000000000000000000000000000..48ddc7f8065f2b69bcd8eca4726a21c5901514ec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py @@ -0,0 +1,91 @@ +r''' +unit test describing the hyperbolic half-plane with the Poincare metric. This +is a basic model of hyperbolic geometry on the (positive) half-space + +{(x,y) \in R^2 | y > 0} + +with the Riemannian metric + +ds^2 = (dx^2 + dy^2)/y^2 + +It has constant negative scalar curvature = -2 + +https://en.wikipedia.org/wiki/Poincare_half-plane_model +''' +from sympy.matrices.dense import diag +from sympy.diffgeom import (twoform_to_matrix, + metric_to_Christoffel_1st, metric_to_Christoffel_2nd, + metric_to_Riemann_components, metric_to_Ricci_components) +import sympy.diffgeom.rn +from sympy.tensor.array import ImmutableDenseNDimArray + + +def test_H2(): + TP = sympy.diffgeom.TensorProduct + R2 = sympy.diffgeom.rn.R2 + y = R2.y + dy = R2.dy + dx = R2.dx + g = (TP(dx, dx) + TP(dy, dy))*y**(-2) + automat = twoform_to_matrix(g) + mat = diag(y**(-2), y**(-2)) + assert mat == automat + + gamma1 = metric_to_Christoffel_1st(g) + assert gamma1[0, 0, 0] == 0 + assert gamma1[0, 0, 1] == -y**(-3) + assert gamma1[0, 1, 0] == -y**(-3) + assert gamma1[0, 1, 1] == 0 + + assert gamma1[1, 1, 1] == -y**(-3) + assert gamma1[1, 1, 0] == 0 + assert gamma1[1, 0, 1] == 0 + assert gamma1[1, 0, 0] == y**(-3) + + gamma2 = metric_to_Christoffel_2nd(g) + assert gamma2[0, 0, 0] == 0 + assert gamma2[0, 0, 1] == -y**(-1) + assert gamma2[0, 1, 0] == -y**(-1) + assert gamma2[0, 1, 1] == 0 + + assert gamma2[1, 1, 1] == -y**(-1) + assert gamma2[1, 1, 0] == 0 + assert gamma2[1, 0, 1] == 0 + assert gamma2[1, 0, 0] == y**(-1) + + Rm = metric_to_Riemann_components(g) + assert Rm[0, 0, 0, 0] == 0 + assert Rm[0, 0, 0, 1] == 0 + assert Rm[0, 0, 1, 0] == 0 + assert Rm[0, 0, 1, 1] == 0 + + assert Rm[0, 1, 0, 0] == 0 + assert Rm[0, 1, 0, 1] == -y**(-2) + assert Rm[0, 1, 1, 0] == y**(-2) + assert Rm[0, 1, 1, 1] == 0 + + assert Rm[1, 0, 0, 0] == 0 + assert Rm[1, 0, 0, 1] == y**(-2) + assert Rm[1, 0, 1, 0] == -y**(-2) + assert Rm[1, 0, 1, 1] == 0 + + assert Rm[1, 1, 0, 0] == 0 + assert Rm[1, 1, 0, 1] == 0 + assert Rm[1, 1, 1, 0] == 0 + assert Rm[1, 1, 1, 1] == 0 + + Ric = metric_to_Ricci_components(g) + assert Ric[0, 0] == -y**(-2) + assert Ric[0, 1] == 0 + assert Ric[1, 0] == 0 + assert Ric[0, 0] == -y**(-2) + + assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2)) + + ## scalar curvature is -2 + #TODO - it would be nice to have index contraction built-in + R = (Ric[0, 0] + Ric[1, 1])*y**2 + assert R == -2 + + ## Gauss curvature is -1 + assert R/2 == -1