diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce74a2f2c2a20dcf138beb5968ea2622a4367c6e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f54f09b3ed8315c3c63c2cb010692cb2028ca98 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa183a1df99fc6fcc1702fd2cc233f2a012e9d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..889e5ddb9bb0db16056686dcb17605a212d723f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24c15c91f8bd9424da145458444155176d7c591b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6a2ae68c37f606c8e742402ad214df324c05fd1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c29f66e02313cf016f54bf5d33b5cd66af686a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9c96497af7b936bb2d1c10cbc615a176500eae6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549b4b96cdce0ee4d31960e89cb9dc26af0e105d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/__init__.py @@ -0,0 +1,20 @@ +""" +Unified place for determining if external dependencies are installed or not. + +You should import all external modules using the import_module() function. + +For example + +>>> from sympy.external import import_module +>>> numpy = import_module('numpy') + +If the resulting library is not installed, or if the installed version +is less than a given minimum version, the function will return None. +Otherwise, it will return the library. See the docstring of +import_module() for more information. + +""" + +from sympy.external.importtools import import_module + +__all__ = ['import_module'] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eb9ae37d9c3a698e8d05cf7fefab946d5fc0c05 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/gmpy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/gmpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ff7d406bc17aeae2972a9a5240ac9b6c9a6da75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/gmpy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/importtools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/importtools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7636ab297618926bdedb4fae405b878e72ba12cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/importtools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/pythonmpq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/pythonmpq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3482d65b1aa69de6d1f643e2b55a2b95a50c2604 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/__pycache__/pythonmpq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/pythonmpq.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/pythonmpq.py new file mode 100644 index 0000000000000000000000000000000000000000..b8efd18a40a75b8a4f4de444b0c10d6347dbca6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/pythonmpq.py @@ -0,0 +1,341 @@ +""" +PythonMPQ: Rational number type based on Python integers. + +This class is intended as a pure Python fallback for when gmpy2 is not +installed. If gmpy2 is installed then its mpq type will be used instead. The +mpq type is around 20x faster. We could just use the stdlib Fraction class +here but that is slower: + + from fractions import Fraction + from sympy.external.pythonmpq import PythonMPQ + nums = range(1000) + dens = range(5, 1005) + rats = [Fraction(n, d) for n, d in zip(nums, dens)] + sum(rats) # <--- 24 milliseconds + rats = [PythonMPQ(n, d) for n, d in zip(nums, dens)] + sum(rats) # <--- 7 milliseconds + +Both mpq and Fraction have some awkward features like the behaviour of +division with // and %: + + >>> from fractions import Fraction + >>> Fraction(2, 3) % Fraction(1, 4) + 1/6 + +For the QQ domain we do not want this behaviour because there should be no +remainder when dividing rational numbers. SymPy does not make use of this +aspect of mpq when gmpy2 is installed. Since this class is a fallback for that +case we do not bother implementing e.g. __mod__ so that we can be sure we +are not using it when gmpy2 is installed either. +""" + + +import operator +from math import gcd +from decimal import Decimal +from fractions import Fraction +import sys +from typing import Tuple as tTuple, Type + + +# Used for __hash__ +_PyHASH_MODULUS = sys.hash_info.modulus +_PyHASH_INF = sys.hash_info.inf + + +class PythonMPQ: + """Rational number implementation that is intended to be compatible with + gmpy2's mpq. + + Also slightly faster than fractions.Fraction. + + PythonMPQ should be treated as immutable although no effort is made to + prevent mutation (since that might slow down calculations). + """ + __slots__ = ('numerator', 'denominator') + + def __new__(cls, numerator, denominator=None): + """Construct PythonMPQ with gcd computation and checks""" + if denominator is not None: + # + # PythonMPQ(n, d): require n and d to be int and d != 0 + # + if isinstance(numerator, int) and isinstance(denominator, int): + # This is the slow part: + divisor = gcd(numerator, denominator) + numerator //= divisor + denominator //= divisor + return cls._new_check(numerator, denominator) + else: + # + # PythonMPQ(q) + # + # Here q can be PythonMPQ, int, Decimal, float, Fraction or str + # + if isinstance(numerator, int): + return cls._new(numerator, 1) + elif isinstance(numerator, PythonMPQ): + return cls._new(numerator.numerator, numerator.denominator) + + # Let Fraction handle Decimal/float conversion and str parsing + if isinstance(numerator, (Decimal, float, str)): + numerator = Fraction(numerator) + if isinstance(numerator, Fraction): + return cls._new(numerator.numerator, numerator.denominator) + # + # Reject everything else. This is more strict than mpq which allows + # things like mpq(Fraction, Fraction) or mpq(Decimal, any). The mpq + # behaviour is somewhat inconsistent so we choose to accept only a + # more strict subset of what mpq allows. + # + raise TypeError("PythonMPQ() requires numeric or string argument") + + @classmethod + def _new_check(cls, numerator, denominator): + """Construct PythonMPQ, check divide by zero and canonicalize signs""" + if not denominator: + raise ZeroDivisionError(f'Zero divisor {numerator}/{denominator}') + elif denominator < 0: + numerator = -numerator + denominator = -denominator + return cls._new(numerator, denominator) + + @classmethod + def _new(cls, numerator, denominator): + """Construct PythonMPQ efficiently (no checks)""" + obj = super().__new__(cls) + obj.numerator = numerator + obj.denominator = denominator + return obj + + def __int__(self): + """Convert to int (truncates towards zero)""" + p, q = self.numerator, self.denominator + if p < 0: + return -(-p//q) + return p//q + + def __float__(self): + """Convert to float (approximately)""" + return self.numerator / self.denominator + + def __bool__(self): + """True/False if nonzero/zero""" + return bool(self.numerator) + + def __eq__(self, other): + """Compare equal with PythonMPQ, int, float, Decimal or Fraction""" + if isinstance(other, PythonMPQ): + return (self.numerator == other.numerator + and self.denominator == other.denominator) + elif isinstance(other, self._compatible_types): + return self.__eq__(PythonMPQ(other)) + else: + return NotImplemented + + def __hash__(self): + """hash - same as mpq/Fraction""" + try: + dinv = pow(self.denominator, -1, _PyHASH_MODULUS) + except ValueError: + hash_ = _PyHASH_INF + else: + hash_ = hash(hash(abs(self.numerator)) * dinv) + result = hash_ if self.numerator >= 0 else -hash_ + return -2 if result == -1 else result + + def __reduce__(self): + """Deconstruct for pickling""" + return type(self), (self.numerator, self.denominator) + + def __str__(self): + """Convert to string""" + if self.denominator != 1: + return f"{self.numerator}/{self.denominator}" + else: + return f"{self.numerator}" + + def __repr__(self): + """Convert to string""" + return f"MPQ({self.numerator},{self.denominator})" + + def _cmp(self, other, op): + """Helper for lt/le/gt/ge""" + if not isinstance(other, self._compatible_types): + return NotImplemented + lhs = self.numerator * other.denominator + rhs = other.numerator * self.denominator + return op(lhs, rhs) + + def __lt__(self, other): + """self < other""" + return self._cmp(other, operator.lt) + + def __le__(self, other): + """self <= other""" + return self._cmp(other, operator.le) + + def __gt__(self, other): + """self > other""" + return self._cmp(other, operator.gt) + + def __ge__(self, other): + """self >= other""" + return self._cmp(other, operator.ge) + + def __abs__(self): + """abs(q)""" + return self._new(abs(self.numerator), self.denominator) + + def __pos__(self): + """+q""" + return self + + def __neg__(self): + """-q""" + return self._new(-self.numerator, self.denominator) + + def __add__(self, other): + """q1 + q2""" + if isinstance(other, PythonMPQ): + # + # This is much faster than the naive method used in the stdlib + # fractions module. Not sure where this method comes from + # though... + # + # Compare timings for something like: + # nums = range(1000) + # rats = [PythonMPQ(n, d) for n, d in zip(nums[:-5], nums[5:])] + # sum(rats) # <-- time this + # + ap, aq = self.numerator, self.denominator + bp, bq = other.numerator, other.denominator + g = gcd(aq, bq) + if g == 1: + p = ap*bq + aq*bp + q = bq*aq + else: + q1, q2 = aq//g, bq//g + p, q = ap*q2 + bp*q1, q1*q2 + g2 = gcd(p, g) + p, q = (p // g2), q * (g // g2) + + elif isinstance(other, int): + p = self.numerator + self.denominator * other + q = self.denominator + else: + return NotImplemented + + return self._new(p, q) + + def __radd__(self, other): + """z1 + q2""" + if isinstance(other, int): + p = self.numerator + self.denominator * other + q = self.denominator + return self._new(p, q) + else: + return NotImplemented + + def __sub__(self ,other): + """q1 - q2""" + if isinstance(other, PythonMPQ): + ap, aq = self.numerator, self.denominator + bp, bq = other.numerator, other.denominator + g = gcd(aq, bq) + if g == 1: + p = ap*bq - aq*bp + q = bq*aq + else: + q1, q2 = aq//g, bq//g + p, q = ap*q2 - bp*q1, q1*q2 + g2 = gcd(p, g) + p, q = (p // g2), q * (g // g2) + elif isinstance(other, int): + p = self.numerator - self.denominator*other + q = self.denominator + else: + return NotImplemented + + return self._new(p, q) + + def __rsub__(self, other): + """z1 - q2""" + if isinstance(other, int): + p = self.denominator * other - self.numerator + q = self.denominator + return self._new(p, q) + else: + return NotImplemented + + def __mul__(self, other): + """q1 * q2""" + if isinstance(other, PythonMPQ): + ap, aq = self.numerator, self.denominator + bp, bq = other.numerator, other.denominator + x1 = gcd(ap, bq) + x2 = gcd(bp, aq) + p, q = ((ap//x1)*(bp//x2), (aq//x2)*(bq//x1)) + elif isinstance(other, int): + x = gcd(other, self.denominator) + p = self.numerator*(other//x) + q = self.denominator//x + else: + return NotImplemented + + return self._new(p, q) + + def __rmul__(self, other): + """z1 * q2""" + if isinstance(other, int): + x = gcd(self.denominator, other) + p = self.numerator*(other//x) + q = self.denominator//x + return self._new(p, q) + else: + return NotImplemented + + def __pow__(self, exp): + """q ** z""" + p, q = self.numerator, self.denominator + + if exp < 0: + p, q, exp = q, p, -exp + + return self._new_check(p**exp, q**exp) + + def __truediv__(self, other): + """q1 / q2""" + if isinstance(other, PythonMPQ): + ap, aq = self.numerator, self.denominator + bp, bq = other.numerator, other.denominator + x1 = gcd(ap, bp) + x2 = gcd(bq, aq) + p, q = ((ap//x1)*(bq//x2), (aq//x2)*(bp//x1)) + elif isinstance(other, int): + x = gcd(other, self.numerator) + p = self.numerator//x + q = self.denominator*(other//x) + else: + return NotImplemented + + return self._new_check(p, q) + + def __rtruediv__(self, other): + """z / q""" + if isinstance(other, int): + x = gcd(self.numerator, other) + p = self.denominator*(other//x) + q = self.numerator//x + return self._new_check(p, q) + else: + return NotImplemented + + _compatible_types: tTuple[Type, ...] = () + +# +# These are the types that PythonMPQ will interoperate with for operations +# and comparisons such as ==, + etc. We define this down here so that we can +# include PythonMPQ in the list as well. +# +PythonMPQ._compatible_types = (PythonMPQ, int, Decimal, Fraction) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcb6612a21b448235e2f11fdd034117b67449ff4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_autowrap.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_autowrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de7cae95367046e30bd2ec16b6c96914b59e2089 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_autowrap.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_codegen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_codegen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7914cf0fae7054314abc61cee2fef447c30729f2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_codegen.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_importtools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_importtools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f536f1d9994a104fd53872927752dad1c135e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_importtools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_numpy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeb39d5885d7327d22b79f6115a342533a0aa115 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_numpy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_pythonmpq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_pythonmpq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f692174fae14c63e3f8d61b8ef8e1afced0add6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_pythonmpq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_scipy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_scipy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00a487ec437a6e16e9831fdc44c7e91d8c38706 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/__pycache__/test_scipy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_autowrap.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_autowrap.py new file mode 100644 index 0000000000000000000000000000000000000000..50ba10f9de911a8f9532af2c3e0cd1979cad1aa4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_autowrap.py @@ -0,0 +1,313 @@ +import sympy +import tempfile +import os +from sympy.core.mod import Mod +from sympy.core.relational import Eq +from sympy.core.symbol import symbols +from sympy.external import import_module +from sympy.tensor import IndexedBase, Idx +from sympy.utilities.autowrap import autowrap, ufuncify, CodeWrapError +from sympy.testing.pytest import skip + +numpy = import_module('numpy', min_module_version='1.6.1') +Cython = import_module('Cython', min_module_version='0.15.1') +f2py = import_module('numpy.f2py', import_kwargs={'fromlist': ['f2py']}) + +f2pyworks = False +if f2py: + try: + autowrap(symbols('x'), 'f95', 'f2py') + except (CodeWrapError, ImportError, OSError): + f2pyworks = False + else: + f2pyworks = True + +a, b, c = symbols('a b c') +n, m, d = symbols('n m d', integer=True) +A, B, C = symbols('A B C', cls=IndexedBase) +i = Idx('i', m) +j = Idx('j', n) +k = Idx('k', d) + + +def has_module(module): + """ + Return True if module exists, otherwise run skip(). + + module should be a string. + """ + # To give a string of the module name to skip(), this function takes a + # string. So we don't waste time running import_module() more than once, + # just map the three modules tested here in this dict. + modnames = {'numpy': numpy, 'Cython': Cython, 'f2py': f2py} + + if modnames[module]: + if module == 'f2py' and not f2pyworks: + skip("Couldn't run f2py.") + return True + skip("Couldn't import %s." % module) + +# +# test runners used by several language-backend combinations +# + +def runtest_autowrap_twice(language, backend): + f = autowrap((((a + b)/c)**5).expand(), language, backend) + g = autowrap((((a + b)/c)**4).expand(), language, backend) + + # check that autowrap updates the module name. Else, g gives the same as f + assert f(1, -2, 1) == -1.0 + assert g(1, -2, 1) == 1.0 + + +def runtest_autowrap_trace(language, backend): + has_module('numpy') + trace = autowrap(A[i, i], language, backend) + assert trace(numpy.eye(100)) == 100 + + +def runtest_autowrap_matrix_vector(language, backend): + has_module('numpy') + x, y = symbols('x y', cls=IndexedBase) + expr = Eq(y[i], A[i, j]*x[j]) + mv = autowrap(expr, language, backend) + + # compare with numpy's dot product + M = numpy.random.rand(10, 20) + x = numpy.random.rand(20) + y = numpy.dot(M, x) + assert numpy.sum(numpy.abs(y - mv(M, x))) < 1e-13 + + +def runtest_autowrap_matrix_matrix(language, backend): + has_module('numpy') + expr = Eq(C[i, j], A[i, k]*B[k, j]) + matmat = autowrap(expr, language, backend) + + # compare with numpy's dot product + M1 = numpy.random.rand(10, 20) + M2 = numpy.random.rand(20, 15) + M3 = numpy.dot(M1, M2) + assert numpy.sum(numpy.abs(M3 - matmat(M1, M2))) < 1e-13 + + +def runtest_ufuncify(language, backend): + has_module('numpy') + a, b, c = symbols('a b c') + fabc = ufuncify([a, b, c], a*b + c, backend=backend) + facb = ufuncify([a, c, b], a*b + c, backend=backend) + grid = numpy.linspace(-2, 2, 50) + b = numpy.linspace(-5, 4, 50) + c = numpy.linspace(-1, 1, 50) + expected = grid*b + c + numpy.testing.assert_allclose(fabc(grid, b, c), expected) + numpy.testing.assert_allclose(facb(grid, c, b), expected) + + +def runtest_issue_10274(language, backend): + expr = (a - b + c)**(13) + tmp = tempfile.mkdtemp() + f = autowrap(expr, language, backend, tempdir=tmp, + helpers=('helper', a - b + c, (a, b, c))) + assert f(1, 1, 1) == 1 + + for file in os.listdir(tmp): + if not (file.startswith("wrapped_code_") and file.endswith(".c")): + continue + + with open(tmp + '/' + file) as fil: + lines = fil.readlines() + assert lines[0] == "/******************************************************************************\n" + assert "Code generated with SymPy " + sympy.__version__ in lines[1] + assert lines[2:] == [ + " * *\n", + " * See http://www.sympy.org/ for more information. *\n", + " * *\n", + " * This file is part of 'autowrap' *\n", + " ******************************************************************************/\n", + "#include " + '"' + file[:-1]+ 'h"' + "\n", + "#include \n", + "\n", + "double helper(double a, double b, double c) {\n", + "\n", + " double helper_result;\n", + " helper_result = a - b + c;\n", + " return helper_result;\n", + "\n", + "}\n", + "\n", + "double autofunc(double a, double b, double c) {\n", + "\n", + " double autofunc_result;\n", + " autofunc_result = pow(helper(a, b, c), 13);\n", + " return autofunc_result;\n", + "\n", + "}\n", + ] + + +def runtest_issue_15337(language, backend): + has_module('numpy') + # NOTE : autowrap was originally designed to only accept an iterable for + # the kwarg "helpers", but in issue 10274 the user mistakenly thought that + # if there was only a single helper it did not need to be passed via an + # iterable that wrapped the helper tuple. There were no tests for this + # behavior so when the code was changed to accept a single tuple it broke + # the original behavior. These tests below ensure that both now work. + a, b, c, d, e = symbols('a, b, c, d, e') + expr = (a - b + c - d + e)**13 + exp_res = (1. - 2. + 3. - 4. + 5.)**13 + + f = autowrap(expr, language, backend, args=(a, b, c, d, e), + helpers=('f1', a - b + c, (a, b, c))) + numpy.testing.assert_allclose(f(1, 2, 3, 4, 5), exp_res) + + f = autowrap(expr, language, backend, args=(a, b, c, d, e), + helpers=(('f1', a - b, (a, b)), ('f2', c - d, (c, d)))) + numpy.testing.assert_allclose(f(1, 2, 3, 4, 5), exp_res) + + +def test_issue_15230(): + has_module('f2py') + + x, y = symbols('x, y') + expr = Mod(x, 3.0) - Mod(y, -2.0) + f = autowrap(expr, args=[x, y], language='F95') + exp_res = float(expr.xreplace({x: 3.5, y: 2.7}).evalf()) + assert abs(f(3.5, 2.7) - exp_res) < 1e-14 + + x, y = symbols('x, y', integer=True) + expr = Mod(x, 3) - Mod(y, -2) + f = autowrap(expr, args=[x, y], language='F95') + assert f(3, 2) == expr.xreplace({x: 3, y: 2}) + +# +# tests of language-backend combinations +# + +# f2py + + +def test_wrap_twice_f95_f2py(): + has_module('f2py') + runtest_autowrap_twice('f95', 'f2py') + + +def test_autowrap_trace_f95_f2py(): + has_module('f2py') + runtest_autowrap_trace('f95', 'f2py') + + +def test_autowrap_matrix_vector_f95_f2py(): + has_module('f2py') + runtest_autowrap_matrix_vector('f95', 'f2py') + + +def test_autowrap_matrix_matrix_f95_f2py(): + has_module('f2py') + runtest_autowrap_matrix_matrix('f95', 'f2py') + + +def test_ufuncify_f95_f2py(): + has_module('f2py') + runtest_ufuncify('f95', 'f2py') + + +def test_issue_15337_f95_f2py(): + has_module('f2py') + runtest_issue_15337('f95', 'f2py') + +# Cython + + +def test_wrap_twice_c_cython(): + has_module('Cython') + runtest_autowrap_twice('C', 'cython') + + +def test_autowrap_trace_C_Cython(): + has_module('Cython') + runtest_autowrap_trace('C99', 'cython') + + +def test_autowrap_matrix_vector_C_cython(): + has_module('Cython') + runtest_autowrap_matrix_vector('C99', 'cython') + + +def test_autowrap_matrix_matrix_C_cython(): + has_module('Cython') + runtest_autowrap_matrix_matrix('C99', 'cython') + + +def test_ufuncify_C_Cython(): + has_module('Cython') + runtest_ufuncify('C99', 'cython') + + +def test_issue_10274_C_cython(): + has_module('Cython') + runtest_issue_10274('C89', 'cython') + + +def test_issue_15337_C_cython(): + has_module('Cython') + runtest_issue_15337('C89', 'cython') + + +def test_autowrap_custom_printer(): + has_module('Cython') + + from sympy.core.numbers import pi + from sympy.utilities.codegen import C99CodeGen + from sympy.printing.c import C99CodePrinter + + class PiPrinter(C99CodePrinter): + def _print_Pi(self, expr): + return "S_PI" + + printer = PiPrinter() + gen = C99CodeGen(printer=printer) + gen.preprocessor_statements.append('#include "shortpi.h"') + + expr = pi * a + + expected = ( + '#include "%s"\n' + '#include \n' + '#include "shortpi.h"\n' + '\n' + 'double autofunc(double a) {\n' + '\n' + ' double autofunc_result;\n' + ' autofunc_result = S_PI*a;\n' + ' return autofunc_result;\n' + '\n' + '}\n' + ) + + tmpdir = tempfile.mkdtemp() + # write a trivial header file to use in the generated code + with open(os.path.join(tmpdir, 'shortpi.h'), 'w') as f: + f.write('#define S_PI 3.14') + + func = autowrap(expr, backend='cython', tempdir=tmpdir, code_gen=gen) + + assert func(4.2) == 3.14 * 4.2 + + # check that the generated code is correct + for filename in os.listdir(tmpdir): + if filename.startswith('wrapped_code') and filename.endswith('.c'): + with open(os.path.join(tmpdir, filename)) as f: + lines = f.readlines() + expected = expected % filename.replace('.c', '.h') + assert ''.join(lines[7:]) == expected + + +# Numpy + +def test_ufuncify_numpy(): + # This test doesn't use Cython, but if Cython works, then there is a valid + # C compiler, which is needed. + has_module('Cython') + runtest_ufuncify('C99', 'numpy') diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_codegen.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..5449531a1217753c3c9d68cfec4d38852aeb0f8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_codegen.py @@ -0,0 +1,379 @@ +# This tests the compilation and execution of the source code generated with +# utilities.codegen. The compilation takes place in a temporary directory that +# is removed after the test. By default the test directory is always removed, +# but this behavior can be changed by setting the environment variable +# SYMPY_TEST_CLEAN_TEMP to: +# export SYMPY_TEST_CLEAN_TEMP=always : the default behavior. +# export SYMPY_TEST_CLEAN_TEMP=success : only remove the directories of working tests. +# export SYMPY_TEST_CLEAN_TEMP=never : never remove the directories with the test code. +# When a directory is not removed, the necessary information is printed on +# screen to find the files that belong to the (failed) tests. If a test does +# not fail, py.test captures all the output and you will not see the directories +# corresponding to the successful tests. Use the --nocapture option to see all +# the output. + +# All tests below have a counterpart in utilities/test/test_codegen.py. In the +# latter file, the resulting code is compared with predefined strings, without +# compilation or execution. + +# All the generated Fortran code should conform with the Fortran 95 standard, +# and all the generated C code should be ANSI C, which facilitates the +# incorporation in various projects. The tests below assume that the binary cc +# is somewhere in the path and that it can compile ANSI C code. + +from sympy.abc import x, y, z +from sympy.external import import_module +from sympy.testing.pytest import skip +from sympy.utilities.codegen import codegen, make_routine, get_code_generator +import sys +import os +import tempfile +import subprocess + + +pyodide_js = import_module('pyodide_js') + +# templates for the main program that will test the generated code. + +main_template = {} +main_template['F95'] = """ +program main + include "codegen.h" + integer :: result; + result = 0 + + %(statements)s + + call exit(result) +end program +""" + +main_template['C89'] = """ +#include "codegen.h" +#include +#include + +int main() { + int result = 0; + + %(statements)s + + return result; +} +""" +main_template['C99'] = main_template['C89'] +# templates for the numerical tests + +numerical_test_template = {} +numerical_test_template['C89'] = """ + if (fabs(%(call)s)>%(threshold)s) { + printf("Numerical validation failed: %(call)s=%%e threshold=%(threshold)s\\n", %(call)s); + result = -1; + } +""" +numerical_test_template['C99'] = numerical_test_template['C89'] + +numerical_test_template['F95'] = """ + if (abs(%(call)s)>%(threshold)s) then + write(6,"('Numerical validation failed:')") + write(6,"('%(call)s=',e15.5,'threshold=',e15.5)") %(call)s, %(threshold)s + result = -1; + end if +""" +# command sequences for supported compilers + +compile_commands = {} +compile_commands['cc'] = [ + "cc -c codegen.c -o codegen.o", + "cc -c main.c -o main.o", + "cc main.o codegen.o -lm -o test.exe" +] + +compile_commands['gfortran'] = [ + "gfortran -c codegen.f90 -o codegen.o", + "gfortran -ffree-line-length-none -c main.f90 -o main.o", + "gfortran main.o codegen.o -o test.exe" +] + +compile_commands['g95'] = [ + "g95 -c codegen.f90 -o codegen.o", + "g95 -ffree-line-length-huge -c main.f90 -o main.o", + "g95 main.o codegen.o -o test.exe" +] + +compile_commands['ifort'] = [ + "ifort -c codegen.f90 -o codegen.o", + "ifort -c main.f90 -o main.o", + "ifort main.o codegen.o -o test.exe" +] + +combinations_lang_compiler = [ + ('C89', 'cc'), + ('C99', 'cc'), + ('F95', 'ifort'), + ('F95', 'gfortran'), + ('F95', 'g95') +] + + +def try_run(commands): + """Run a series of commands and only return True if all ran fine.""" + if pyodide_js: + return False + with open(os.devnull, 'w') as null: + for command in commands: + retcode = subprocess.call(command, stdout=null, shell=True, + stderr=subprocess.STDOUT) + if retcode != 0: + return False + return True + + +def run_test(label, routines, numerical_tests, language, commands, friendly=True): + """A driver for the codegen tests. + + This driver assumes that a compiler ifort is present in the PATH and that + ifort is (at least) a Fortran 90 compiler. The generated code is written in + a temporary directory, together with a main program that validates the + generated code. The test passes when the compilation and the validation + run correctly. + """ + + # Check input arguments before touching the file system + language = language.upper() + assert language in main_template + assert language in numerical_test_template + + # Check that environment variable makes sense + clean = os.getenv('SYMPY_TEST_CLEAN_TEMP', 'always').lower() + if clean not in ('always', 'success', 'never'): + raise ValueError("SYMPY_TEST_CLEAN_TEMP must be one of the following: 'always', 'success' or 'never'.") + + # Do all the magic to compile, run and validate the test code + # 1) prepare the temporary working directory, switch to that dir + work = tempfile.mkdtemp("_sympy_%s_test" % language, "%s_" % label) + oldwork = os.getcwd() + os.chdir(work) + + # 2) write the generated code + if friendly: + # interpret the routines as a name_expr list and call the friendly + # function codegen + codegen(routines, language, "codegen", to_files=True) + else: + code_gen = get_code_generator(language, "codegen") + code_gen.write(routines, "codegen", to_files=True) + + # 3) write a simple main program that links to the generated code, and that + # includes the numerical tests + test_strings = [] + for fn_name, args, expected, threshold in numerical_tests: + call_string = "%s(%s)-(%s)" % ( + fn_name, ",".join(str(arg) for arg in args), expected) + if language == "F95": + call_string = fortranize_double_constants(call_string) + threshold = fortranize_double_constants(str(threshold)) + test_strings.append(numerical_test_template[language] % { + "call": call_string, + "threshold": threshold, + }) + + if language == "F95": + f_name = "main.f90" + elif language.startswith("C"): + f_name = "main.c" + else: + raise NotImplementedError( + "FIXME: filename extension unknown for language: %s" % language) + + with open(f_name, "w") as f: + f.write( + main_template[language] % {'statements': "".join(test_strings)}) + + # 4) Compile and link + compiled = try_run(commands) + + # 5) Run if compiled + if compiled: + executed = try_run(["./test.exe"]) + else: + executed = False + + # 6) Clean up stuff + if clean == 'always' or (clean == 'success' and compiled and executed): + def safe_remove(filename): + if os.path.isfile(filename): + os.remove(filename) + safe_remove("codegen.f90") + safe_remove("codegen.c") + safe_remove("codegen.h") + safe_remove("codegen.o") + safe_remove("main.f90") + safe_remove("main.c") + safe_remove("main.o") + safe_remove("test.exe") + os.chdir(oldwork) + os.rmdir(work) + else: + print("TEST NOT REMOVED: %s" % work, file=sys.stderr) + os.chdir(oldwork) + + # 7) Do the assertions in the end + assert compiled, "failed to compile %s code with:\n%s" % ( + language, "\n".join(commands)) + assert executed, "failed to execute %s code from:\n%s" % ( + language, "\n".join(commands)) + + +def fortranize_double_constants(code_string): + """ + Replaces every literal float with literal doubles + """ + import re + pattern_exp = re.compile(r'\d+(\.)?\d*[eE]-?\d+') + pattern_float = re.compile(r'\d+\.\d*(?!\d*d)') + + def subs_exp(matchobj): + return re.sub('[eE]', 'd', matchobj.group(0)) + + def subs_float(matchobj): + return "%sd0" % matchobj.group(0) + + code_string = pattern_exp.sub(subs_exp, code_string) + code_string = pattern_float.sub(subs_float, code_string) + + return code_string + + +def is_feasible(language, commands): + # This test should always work, otherwise the compiler is not present. + routine = make_routine("test", x) + numerical_tests = [ + ("test", ( 1.0,), 1.0, 1e-15), + ("test", (-1.0,), -1.0, 1e-15), + ] + try: + run_test("is_feasible", [routine], numerical_tests, language, commands, + friendly=False) + return True + except AssertionError: + return False + +valid_lang_commands = [] +invalid_lang_compilers = [] +for lang, compiler in combinations_lang_compiler: + commands = compile_commands[compiler] + if is_feasible(lang, commands): + valid_lang_commands.append((lang, commands)) + else: + invalid_lang_compilers.append((lang, compiler)) + +# We test all language-compiler combinations, just to report what is skipped + +def test_C89_cc(): + if ("C89", 'cc') in invalid_lang_compilers: + skip("`cc' command didn't work as expected (C89)") + + +def test_C99_cc(): + if ("C99", 'cc') in invalid_lang_compilers: + skip("`cc' command didn't work as expected (C99)") + + +def test_F95_ifort(): + if ("F95", 'ifort') in invalid_lang_compilers: + skip("`ifort' command didn't work as expected") + + +def test_F95_gfortran(): + if ("F95", 'gfortran') in invalid_lang_compilers: + skip("`gfortran' command didn't work as expected") + + +def test_F95_g95(): + if ("F95", 'g95') in invalid_lang_compilers: + skip("`g95' command didn't work as expected") + +# Here comes the actual tests + + +def test_basic_codegen(): + numerical_tests = [ + ("test", (1.0, 6.0, 3.0), 21.0, 1e-15), + ("test", (-1.0, 2.0, -2.5), -2.5, 1e-15), + ] + name_expr = [("test", (x + y)*z)] + for lang, commands in valid_lang_commands: + run_test("basic_codegen", name_expr, numerical_tests, lang, commands) + + +def test_intrinsic_math1_codegen(): + # not included: log10 + from sympy.core.evalf import N + from sympy.functions import ln + from sympy.functions.elementary.exponential import log + from sympy.functions.elementary.hyperbolic import (cosh, sinh, tanh) + from sympy.functions.elementary.integers import (ceiling, floor) + from sympy.functions.elementary.miscellaneous import sqrt + from sympy.functions.elementary.trigonometric import (acos, asin, atan, cos, sin, tan) + name_expr = [ + ("test_fabs", abs(x)), + ("test_acos", acos(x)), + ("test_asin", asin(x)), + ("test_atan", atan(x)), + ("test_cos", cos(x)), + ("test_cosh", cosh(x)), + ("test_log", log(x)), + ("test_ln", ln(x)), + ("test_sin", sin(x)), + ("test_sinh", sinh(x)), + ("test_sqrt", sqrt(x)), + ("test_tan", tan(x)), + ("test_tanh", tanh(x)), + ] + numerical_tests = [] + for name, expr in name_expr: + for xval in 0.2, 0.5, 0.8: + expected = N(expr.subs(x, xval)) + numerical_tests.append((name, (xval,), expected, 1e-14)) + for lang, commands in valid_lang_commands: + if lang.startswith("C"): + name_expr_C = [("test_floor", floor(x)), ("test_ceil", ceiling(x))] + else: + name_expr_C = [] + run_test("intrinsic_math1", name_expr + name_expr_C, + numerical_tests, lang, commands) + + +def test_instrinsic_math2_codegen(): + # not included: frexp, ldexp, modf, fmod + from sympy.core.evalf import N + from sympy.functions.elementary.trigonometric import atan2 + name_expr = [ + ("test_atan2", atan2(x, y)), + ("test_pow", x**y), + ] + numerical_tests = [] + for name, expr in name_expr: + for xval, yval in (0.2, 1.3), (0.5, -0.2), (0.8, 0.8): + expected = N(expr.subs(x, xval).subs(y, yval)) + numerical_tests.append((name, (xval, yval), expected, 1e-14)) + for lang, commands in valid_lang_commands: + run_test("intrinsic_math2", name_expr, numerical_tests, lang, commands) + + +def test_complicated_codegen(): + from sympy.core.evalf import N + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + name_expr = [ + ("test1", ((sin(x) + cos(y) + tan(z))**7).expand()), + ("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))), + ] + numerical_tests = [] + for name, expr in name_expr: + for xval, yval, zval in (0.2, 1.3, -0.3), (0.5, -0.2, 0.0), (0.8, 2.1, 0.8): + expected = N(expr.subs(x, xval).subs(y, yval).subs(z, zval)) + numerical_tests.append((name, (xval, yval, zval), expected, 1e-12)) + for lang, commands in valid_lang_commands: + run_test( + "complicated_codegen", name_expr, numerical_tests, lang, commands) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_importtools.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_importtools.py new file mode 100644 index 0000000000000000000000000000000000000000..0b954070c179282ed2bcf5735d802c5f22a3a261 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_importtools.py @@ -0,0 +1,40 @@ +from sympy.external import import_module +from sympy.testing.pytest import warns + +# fixes issue that arose in addressing issue 6533 +def test_no_stdlib_collections(): + ''' + make sure we get the right collections when it is not part of a + larger list + ''' + import collections + matplotlib = import_module('matplotlib', + import_kwargs={'fromlist': ['cm', 'collections']}, + min_module_version='1.1.0', catch=(RuntimeError,)) + if matplotlib: + assert collections != matplotlib.collections + +def test_no_stdlib_collections2(): + ''' + make sure we get the right collections when it is not part of a + larger list + ''' + import collections + matplotlib = import_module('matplotlib', + import_kwargs={'fromlist': ['collections']}, + min_module_version='1.1.0', catch=(RuntimeError,)) + if matplotlib: + assert collections != matplotlib.collections + +def test_no_stdlib_collections3(): + '''make sure we get the right collections with no catch''' + import collections + matplotlib = import_module('matplotlib', + import_kwargs={'fromlist': ['cm', 'collections']}, + min_module_version='1.1.0') + if matplotlib: + assert collections != matplotlib.collections + +def test_min_module_version_python3_basestring_error(): + with warns(UserWarning): + import_module('mpmath', min_module_version='1000.0.1') diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_numpy.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..2cd3f4bbadfe38f64080ea3088c8e1e7f92ab46c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_numpy.py @@ -0,0 +1,333 @@ +# This testfile tests SymPy <-> NumPy compatibility + +# Don't test any SymPy features here. Just pure interaction with NumPy. +# Always write regular SymPy tests for anything, that can be tested in pure +# Python (without numpy). Here we test everything, that a user may need when +# using SymPy with NumPy +from sympy.external.importtools import version_tuple +from sympy.external import import_module + +numpy = import_module('numpy') +if numpy: + array, matrix, ndarray = numpy.array, numpy.matrix, numpy.ndarray +else: + #bin/test will not execute any tests now + disabled = True + + +from sympy.core.numbers import (Float, Integer, Rational) +from sympy.core.symbol import (Symbol, symbols) +from sympy.functions.elementary.trigonometric import sin +from sympy.matrices.dense import (Matrix, list2numpy, matrix2numpy, symarray) +from sympy.utilities.lambdify import lambdify +import sympy + +import mpmath +from sympy.abc import x, y, z +from sympy.utilities.decorator import conserve_mpmath_dps +from sympy.utilities.exceptions import ignore_warnings +from sympy.testing.pytest import raises + + +# first, systematically check, that all operations are implemented and don't +# raise an exception + + +def test_systematic_basic(): + def s(sympy_object, numpy_array): + _ = [sympy_object + numpy_array, + numpy_array + sympy_object, + sympy_object - numpy_array, + numpy_array - sympy_object, + sympy_object * numpy_array, + numpy_array * sympy_object, + sympy_object / numpy_array, + numpy_array / sympy_object, + sympy_object ** numpy_array, + numpy_array ** sympy_object] + x = Symbol("x") + y = Symbol("y") + sympy_objs = [ + Rational(2, 3), + Float("1.3"), + x, + y, + pow(x, y)*y, + Integer(5), + Float(5.5), + ] + numpy_objs = [ + array([1]), + array([3, 8, -1]), + array([x, x**2, Rational(5)]), + array([x/y*sin(y), 5, Rational(5)]), + ] + for x in sympy_objs: + for y in numpy_objs: + s(x, y) + + +# now some random tests, that test particular problems and that also +# check that the results of the operations are correct + +def test_basics(): + one = Rational(1) + zero = Rational(0) + assert array(1) == array(one) + assert array([one]) == array([one]) + assert array([x]) == array([x]) + assert array(x) == array(Symbol("x")) + assert array(one + x) == array(1 + x) + + X = array([one, zero, zero]) + assert (X == array([one, zero, zero])).all() + assert (X == array([one, 0, 0])).all() + + +def test_arrays(): + one = Rational(1) + zero = Rational(0) + X = array([one, zero, zero]) + Y = one*X + X = array([Symbol("a") + Rational(1, 2)]) + Y = X + X + assert Y == array([1 + 2*Symbol("a")]) + Y = Y + 1 + assert Y == array([2 + 2*Symbol("a")]) + Y = X - X + assert Y == array([0]) + + +def test_conversion1(): + a = list2numpy([x**2, x]) + #looks like an array? + assert isinstance(a, ndarray) + assert a[0] == x**2 + assert a[1] == x + assert len(a) == 2 + #yes, it's the array + + +def test_conversion2(): + a = 2*list2numpy([x**2, x]) + b = list2numpy([2*x**2, 2*x]) + assert (a == b).all() + + one = Rational(1) + zero = Rational(0) + X = list2numpy([one, zero, zero]) + Y = one*X + X = list2numpy([Symbol("a") + Rational(1, 2)]) + Y = X + X + assert Y == array([1 + 2*Symbol("a")]) + Y = Y + 1 + assert Y == array([2 + 2*Symbol("a")]) + Y = X - X + assert Y == array([0]) + + +def test_list2numpy(): + assert (array([x**2, x]) == list2numpy([x**2, x])).all() + + +def test_Matrix1(): + m = Matrix([[x, x**2], [5, 2/x]]) + assert (array(m.subs(x, 2)) == array([[2, 4], [5, 1]])).all() + m = Matrix([[sin(x), x**2], [5, 2/x]]) + assert (array(m.subs(x, 2)) == array([[sin(2), 4], [5, 1]])).all() + + +def test_Matrix2(): + m = Matrix([[x, x**2], [5, 2/x]]) + with ignore_warnings(PendingDeprecationWarning): + assert (matrix(m.subs(x, 2)) == matrix([[2, 4], [5, 1]])).all() + m = Matrix([[sin(x), x**2], [5, 2/x]]) + with ignore_warnings(PendingDeprecationWarning): + assert (matrix(m.subs(x, 2)) == matrix([[sin(2), 4], [5, 1]])).all() + + +def test_Matrix3(): + a = array([[2, 4], [5, 1]]) + assert Matrix(a) == Matrix([[2, 4], [5, 1]]) + assert Matrix(a) != Matrix([[2, 4], [5, 2]]) + a = array([[sin(2), 4], [5, 1]]) + assert Matrix(a) == Matrix([[sin(2), 4], [5, 1]]) + assert Matrix(a) != Matrix([[sin(0), 4], [5, 1]]) + + +def test_Matrix4(): + with ignore_warnings(PendingDeprecationWarning): + a = matrix([[2, 4], [5, 1]]) + assert Matrix(a) == Matrix([[2, 4], [5, 1]]) + assert Matrix(a) != Matrix([[2, 4], [5, 2]]) + with ignore_warnings(PendingDeprecationWarning): + a = matrix([[sin(2), 4], [5, 1]]) + assert Matrix(a) == Matrix([[sin(2), 4], [5, 1]]) + assert Matrix(a) != Matrix([[sin(0), 4], [5, 1]]) + + +def test_Matrix_sum(): + M = Matrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]]) + with ignore_warnings(PendingDeprecationWarning): + m = matrix([[2, 3, 4], [x, 5, 6], [x, y, z**2]]) + assert M + m == Matrix([[3, 5, 7], [2*x, y + 5, x + 6], [2*y + x, y - 50, z*x + z**2]]) + assert m + M == Matrix([[3, 5, 7], [2*x, y + 5, x + 6], [2*y + x, y - 50, z*x + z**2]]) + assert M + m == M.add(m) + + +def test_Matrix_mul(): + M = Matrix([[1, 2, 3], [x, y, x]]) + with ignore_warnings(PendingDeprecationWarning): + m = matrix([[2, 4], [x, 6], [x, z**2]]) + assert M*m == Matrix([ + [ 2 + 5*x, 16 + 3*z**2], + [2*x + x*y + x**2, 4*x + 6*y + x*z**2], + ]) + + assert m*M == Matrix([ + [ 2 + 4*x, 4 + 4*y, 6 + 4*x], + [ 7*x, 2*x + 6*y, 9*x], + [x + x*z**2, 2*x + y*z**2, 3*x + x*z**2], + ]) + a = array([2]) + assert a[0] * M == 2 * M + assert M * a[0] == 2 * M + + +def test_Matrix_array(): + class matarray: + def __array__(self): + from numpy import array + return array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + matarr = matarray() + assert Matrix(matarr) == Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + + +def test_matrix2numpy(): + a = matrix2numpy(Matrix([[1, x**2], [3*sin(x), 0]])) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a[0, 0] == 1 + assert a[0, 1] == x**2 + assert a[1, 0] == 3*sin(x) + assert a[1, 1] == 0 + + +def test_matrix2numpy_conversion(): + a = Matrix([[1, 2, sin(x)], [x**2, x, Rational(1, 2)]]) + b = array([[1, 2, sin(x)], [x**2, x, Rational(1, 2)]]) + assert (matrix2numpy(a) == b).all() + assert matrix2numpy(a).dtype == numpy.dtype('object') + + c = matrix2numpy(Matrix([[1, 2], [10, 20]]), dtype='int8') + d = matrix2numpy(Matrix([[1, 2], [10, 20]]), dtype='float64') + assert c.dtype == numpy.dtype('int8') + assert d.dtype == numpy.dtype('float64') + + +def test_issue_3728(): + assert (Rational(1, 2)*array([2*x, 0]) == array([x, 0])).all() + assert (Rational(1, 2) + array( + [2*x, 0]) == array([2*x + Rational(1, 2), Rational(1, 2)])).all() + assert (Float("0.5")*array([2*x, 0]) == array([Float("1.0")*x, 0])).all() + assert (Float("0.5") + array( + [2*x, 0]) == array([2*x + Float("0.5"), Float("0.5")])).all() + + +@conserve_mpmath_dps +def test_lambdify(): + mpmath.mp.dps = 16 + sin02 = mpmath.mpf("0.198669330795061215459412627") + f = lambdify(x, sin(x), "numpy") + prec = 1e-15 + assert -prec < f(0.2) - sin02 < prec + + # if this succeeds, it can't be a numpy function + + if version_tuple(numpy.__version__) >= version_tuple('1.17'): + with raises(TypeError): + f(x) + else: + with raises(AttributeError): + f(x) + + +def test_lambdify_matrix(): + f = lambdify(x, Matrix([[x, 2*x], [1, 2]]), [{'ImmutableMatrix': numpy.array}, "numpy"]) + assert (f(1) == array([[1, 2], [1, 2]])).all() + + +def test_lambdify_matrix_multi_input(): + M = sympy.Matrix([[x**2, x*y, x*z], + [y*x, y**2, y*z], + [z*x, z*y, z**2]]) + f = lambdify((x, y, z), M, [{'ImmutableMatrix': numpy.array}, "numpy"]) + + xh, yh, zh = 1.0, 2.0, 3.0 + expected = array([[xh**2, xh*yh, xh*zh], + [yh*xh, yh**2, yh*zh], + [zh*xh, zh*yh, zh**2]]) + actual = f(xh, yh, zh) + assert numpy.allclose(actual, expected) + + +def test_lambdify_matrix_vec_input(): + X = sympy.DeferredVector('X') + M = Matrix([ + [X[0]**2, X[0]*X[1], X[0]*X[2]], + [X[1]*X[0], X[1]**2, X[1]*X[2]], + [X[2]*X[0], X[2]*X[1], X[2]**2]]) + f = lambdify(X, M, [{'ImmutableMatrix': numpy.array}, "numpy"]) + + Xh = array([1.0, 2.0, 3.0]) + expected = array([[Xh[0]**2, Xh[0]*Xh[1], Xh[0]*Xh[2]], + [Xh[1]*Xh[0], Xh[1]**2, Xh[1]*Xh[2]], + [Xh[2]*Xh[0], Xh[2]*Xh[1], Xh[2]**2]]) + actual = f(Xh) + assert numpy.allclose(actual, expected) + + +def test_lambdify_transl(): + from sympy.utilities.lambdify import NUMPY_TRANSLATIONS + for sym, mat in NUMPY_TRANSLATIONS.items(): + assert sym in sympy.__dict__ + assert mat in numpy.__dict__ + + +def test_symarray(): + """Test creation of numpy arrays of SymPy symbols.""" + + import numpy as np + import numpy.testing as npt + + syms = symbols('_0,_1,_2') + s1 = symarray("", 3) + s2 = symarray("", 3) + npt.assert_array_equal(s1, np.array(syms, dtype=object)) + assert s1[0] == s2[0] + + a = symarray('a', 3) + b = symarray('b', 3) + assert not(a[0] == b[0]) + + asyms = symbols('a_0,a_1,a_2') + npt.assert_array_equal(a, np.array(asyms, dtype=object)) + + # Multidimensional checks + a2d = symarray('a', (2, 3)) + assert a2d.shape == (2, 3) + a00, a12 = symbols('a_0_0,a_1_2') + assert a2d[0, 0] == a00 + assert a2d[1, 2] == a12 + + a3d = symarray('a', (2, 3, 2)) + assert a3d.shape == (2, 3, 2) + a000, a120, a121 = symbols('a_0_0_0,a_1_2_0,a_1_2_1') + assert a3d[0, 0, 0] == a000 + assert a3d[1, 2, 0] == a120 + assert a3d[1, 2, 1] == a121 + + +def test_vectorize(): + assert (numpy.vectorize( + sin)([1, 2, 3]) == numpy.array([sin(1), sin(2), sin(3)])).all() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_pythonmpq.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_pythonmpq.py new file mode 100644 index 0000000000000000000000000000000000000000..137cfdf5c858544f0811ae666f000cfb368787a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_pythonmpq.py @@ -0,0 +1,176 @@ +""" +test_pythonmpq.py + +Test the PythonMPQ class for consistency with gmpy2's mpq type. If gmpy2 is +installed run the same tests for both. +""" +from fractions import Fraction +from decimal import Decimal +import pickle +from typing import Callable, List, Tuple, Type + +from sympy.testing.pytest import raises + +from sympy.external.pythonmpq import PythonMPQ + +# +# If gmpy2 is installed then run the tests for both mpq and PythonMPQ. +# That should ensure consistency between the implementation here and mpq. +# +rational_types: List[Tuple[Callable, Type, Callable, Type]] +rational_types = [(PythonMPQ, PythonMPQ, int, int)] +try: + from gmpy2 import mpq, mpz + rational_types.append((mpq, type(mpq(1)), mpz, type(mpz(1)))) +except ImportError: + pass + + +def test_PythonMPQ(): + # + # Test PythonMPQ and also mpq if gmpy/gmpy2 is installed. + # + for Q, TQ, Z, TZ in rational_types: + + def check_Q(q): + assert isinstance(q, TQ) + assert isinstance(q.numerator, TZ) + assert isinstance(q.denominator, TZ) + return q.numerator, q.denominator + + # Check construction from different types + assert check_Q(Q(3)) == (3, 1) + assert check_Q(Q(3, 5)) == (3, 5) + assert check_Q(Q(Q(3, 5))) == (3, 5) + assert check_Q(Q(0.5)) == (1, 2) + assert check_Q(Q('0.5')) == (1, 2) + assert check_Q(Q(Fraction(3, 5))) == (3, 5) + + # https://github.com/aleaxit/gmpy/issues/327 + if Q is PythonMPQ: + assert check_Q(Q(Decimal('0.6'))) == (3, 5) + + # Invalid types + raises(TypeError, lambda: Q([])) + raises(TypeError, lambda: Q([], [])) + + # Check normalisation of signs + assert check_Q(Q(2, 3)) == (2, 3) + assert check_Q(Q(-2, 3)) == (-2, 3) + assert check_Q(Q(2, -3)) == (-2, 3) + assert check_Q(Q(-2, -3)) == (2, 3) + + # Check gcd calculation + assert check_Q(Q(12, 8)) == (3, 2) + + # __int__/__float__ + assert int(Q(5, 3)) == 1 + assert int(Q(-5, 3)) == -1 + assert float(Q(5, 2)) == 2.5 + assert float(Q(-5, 2)) == -2.5 + + # __str__/__repr__ + assert str(Q(2, 1)) == "2" + assert str(Q(1, 2)) == "1/2" + if Q is PythonMPQ: + assert repr(Q(2, 1)) == "MPQ(2,1)" + assert repr(Q(1, 2)) == "MPQ(1,2)" + else: + assert repr(Q(2, 1)) == "mpq(2,1)" + assert repr(Q(1, 2)) == "mpq(1,2)" + + # __bool__ + assert bool(Q(1, 2)) is True + assert bool(Q(0)) is False + + # __eq__/__ne__ + assert (Q(2, 3) == Q(2, 3)) is True + assert (Q(2, 3) == Q(2, 5)) is False + assert (Q(2, 3) != Q(2, 3)) is False + assert (Q(2, 3) != Q(2, 5)) is True + + # __hash__ + assert hash(Q(3, 5)) == hash(Fraction(3, 5)) + + # __reduce__ + q = Q(2, 3) + assert pickle.loads(pickle.dumps(q)) == q + + # __ge__/__gt__/__le__/__lt__ + assert (Q(1, 3) < Q(2, 3)) is True + assert (Q(2, 3) < Q(2, 3)) is False + assert (Q(2, 3) < Q(1, 3)) is False + assert (Q(-2, 3) < Q(1, 3)) is True + assert (Q(1, 3) < Q(-2, 3)) is False + + assert (Q(1, 3) <= Q(2, 3)) is True + assert (Q(2, 3) <= Q(2, 3)) is True + assert (Q(2, 3) <= Q(1, 3)) is False + assert (Q(-2, 3) <= Q(1, 3)) is True + assert (Q(1, 3) <= Q(-2, 3)) is False + + assert (Q(1, 3) > Q(2, 3)) is False + assert (Q(2, 3) > Q(2, 3)) is False + assert (Q(2, 3) > Q(1, 3)) is True + assert (Q(-2, 3) > Q(1, 3)) is False + assert (Q(1, 3) > Q(-2, 3)) is True + + assert (Q(1, 3) >= Q(2, 3)) is False + assert (Q(2, 3) >= Q(2, 3)) is True + assert (Q(2, 3) >= Q(1, 3)) is True + assert (Q(-2, 3) >= Q(1, 3)) is False + assert (Q(1, 3) >= Q(-2, 3)) is True + + # __abs__/__pos__/__neg__ + assert abs(Q(2, 3)) == abs(Q(-2, 3)) == Q(2, 3) + assert +Q(2, 3) == Q(2, 3) + assert -Q(2, 3) == Q(-2, 3) + + # __add__/__radd__ + assert Q(2, 3) + Q(5, 7) == Q(29, 21) + assert Q(2, 3) + 1 == Q(5, 3) + assert 1 + Q(2, 3) == Q(5, 3) + raises(TypeError, lambda: [] + Q(1)) + raises(TypeError, lambda: Q(1) + []) + + # __sub__/__rsub__ + assert Q(2, 3) - Q(5, 7) == Q(-1, 21) + assert Q(2, 3) - 1 == Q(-1, 3) + assert 1 - Q(2, 3) == Q(1, 3) + raises(TypeError, lambda: [] - Q(1)) + raises(TypeError, lambda: Q(1) - []) + + # __mul__/__rmul__ + assert Q(2, 3) * Q(5, 7) == Q(10, 21) + assert Q(2, 3) * 1 == Q(2, 3) + assert 1 * Q(2, 3) == Q(2, 3) + raises(TypeError, lambda: [] * Q(1)) + raises(TypeError, lambda: Q(1) * []) + + # __pow__/__rpow__ + assert Q(2, 3) ** 2 == Q(4, 9) + assert Q(2, 3) ** 1 == Q(2, 3) + assert Q(-2, 3) ** 2 == Q(4, 9) + assert Q(-2, 3) ** -1 == Q(-3, 2) + if Q is PythonMPQ: + raises(TypeError, lambda: 1 ** Q(2, 3)) + raises(TypeError, lambda: Q(1, 4) ** Q(1, 2)) + raises(TypeError, lambda: [] ** Q(1)) + raises(TypeError, lambda: Q(1) ** []) + + # __div__/__rdiv__ + assert Q(2, 3) / Q(5, 7) == Q(14, 15) + assert Q(2, 3) / 1 == Q(2, 3) + assert 1 / Q(2, 3) == Q(3, 2) + raises(TypeError, lambda: [] / Q(1)) + raises(TypeError, lambda: Q(1) / []) + raises(ZeroDivisionError, lambda: Q(1, 2) / Q(0)) + + # __divmod__ + if Q is PythonMPQ: + raises(TypeError, lambda: Q(2, 3) // Q(1, 3)) + raises(TypeError, lambda: Q(2, 3) % Q(1, 3)) + raises(TypeError, lambda: 1 // Q(1, 3)) + raises(TypeError, lambda: 1 % Q(1, 3)) + raises(TypeError, lambda: Q(2, 3) // 1) + raises(TypeError, lambda: Q(2, 3) % 1) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_scipy.py b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_scipy.py new file mode 100644 index 0000000000000000000000000000000000000000..3746d1a311eb68bb1af16e18ab152c7236b42bb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/external/tests/test_scipy.py @@ -0,0 +1,35 @@ +# This testfile tests SymPy <-> SciPy compatibility + +# Don't test any SymPy features here. Just pure interaction with SciPy. +# Always write regular SymPy tests for anything, that can be tested in pure +# Python (without scipy). Here we test everything, that a user may need when +# using SymPy with SciPy + +from sympy.external import import_module + +scipy = import_module('scipy') +if not scipy: + #bin/test will not execute any tests now + disabled = True + +from sympy.functions.special.bessel import jn_zeros + + +def eq(a, b, tol=1e-6): + for x, y in zip(a, b): + if not (abs(x - y) < tol): + return False + return True + + +def test_jn_zeros(): + assert eq(jn_zeros(0, 4, method="scipy"), + [3.141592, 6.283185, 9.424777, 12.566370]) + assert eq(jn_zeros(1, 4, method="scipy"), + [4.493409, 7.725251, 10.904121, 14.066193]) + assert eq(jn_zeros(2, 4, method="scipy"), + [5.763459, 9.095011, 12.322940, 15.514603]) + assert eq(jn_zeros(3, 4, method="scipy"), + [6.987932, 10.417118, 13.698023, 16.923621]) + assert eq(jn_zeros(4, 4, method="scipy"), + [8.182561, 11.704907, 15.039664, 18.301255]) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a84b7517819bb2fc9886274e09d955a74cabca1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__init__.py @@ -0,0 +1,8 @@ +""" +Sandbox module of SymPy. + +This module contains experimental code, use at your own risk! + +There is no warranty that this code will still be located here in future +versions of SymPy. +""" diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4da2ac93b640927b309d62b898b4806ab06db7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/indexed_integrals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/indexed_integrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c531e6dd3f82c8705d52d69add2416b0548c31a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/__pycache__/indexed_integrals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/indexed_integrals.py b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/indexed_integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c17d141448b5a71cb814bff76a710a5bd43f88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/indexed_integrals.py @@ -0,0 +1,72 @@ +from sympy.tensor import Indexed +from sympy.core.containers import Tuple +from sympy.core.symbol import Dummy +from sympy.core.sympify import sympify +from sympy.integrals.integrals import Integral + + +class IndexedIntegral(Integral): + """ + Experimental class to test integration by indexed variables. + + Usage is analogue to ``Integral``, it simply adds awareness of + integration over indices. + + Contraction of non-identical index symbols referring to the same + ``IndexedBase`` is not yet supported. + + Examples + ======== + + >>> from sympy.sandbox.indexed_integrals import IndexedIntegral + >>> from sympy import IndexedBase, symbols + >>> A = IndexedBase('A') + >>> i, j = symbols('i j', integer=True) + >>> ii = IndexedIntegral(A[i], A[i]) + >>> ii + Integral(_A[i], _A[i]) + >>> ii.doit() + A[i]**2/2 + + If the indices are different, indexed objects are considered to be + different variables: + + >>> i2 = IndexedIntegral(A[j], A[i]) + >>> i2 + Integral(A[j], _A[i]) + >>> i2.doit() + A[i]*A[j] + """ + + def __new__(cls, function, *limits, **assumptions): + repl, limits = IndexedIntegral._indexed_process_limits(limits) + function = sympify(function) + function = function.xreplace(repl) + obj = Integral.__new__(cls, function, *limits, **assumptions) + obj._indexed_repl = repl + obj._indexed_reverse_repl = {val: key for key, val in repl.items()} + return obj + + def doit(self): + res = super().doit() + return res.xreplace(self._indexed_reverse_repl) + + @staticmethod + def _indexed_process_limits(limits): + repl = {} + newlimits = [] + for i in limits: + if isinstance(i, (tuple, list, Tuple)): + v = i[0] + vrest = i[1:] + else: + v = i + vrest = () + if isinstance(v, Indexed): + if v not in repl: + r = Dummy(str(v)) + repl[v] = r + newlimits.append((r,)+vrest) + else: + newlimits.append(i) + return repl, newlimits diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766b6ccc4db60cc19ffa3e47f9dbd1d422346522 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/test_indexed_integrals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/test_indexed_integrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..622be70b0792a2974752bafaf9e3a27844542b20 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/__pycache__/test_indexed_integrals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/test_indexed_integrals.py b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/test_indexed_integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..61b98f0ffec29e026f6dfe8e16fde8b5818b0b09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/sandbox/tests/test_indexed_integrals.py @@ -0,0 +1,25 @@ +from sympy.sandbox.indexed_integrals import IndexedIntegral +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.tensor.indexed import (Idx, IndexedBase) + + +def test_indexed_integrals(): + A = IndexedBase('A') + i, j = symbols('i j', integer=True) + a1, a2 = symbols('a1:3', cls=Idx) + assert isinstance(a1, Idx) + + assert IndexedIntegral(1, A[i]).doit() == A[i] + assert IndexedIntegral(A[i], A[i]).doit() == A[i] ** 2 / 2 + assert IndexedIntegral(A[j], A[i]).doit() == A[i] * A[j] + assert IndexedIntegral(A[i] * A[j], A[i]).doit() == A[i] ** 2 * A[j] / 2 + assert IndexedIntegral(sin(A[i]), A[i]).doit() == -cos(A[i]) + assert IndexedIntegral(sin(A[j]), A[i]).doit() == sin(A[j]) * A[i] + + assert IndexedIntegral(1, A[a1]).doit() == A[a1] + assert IndexedIntegral(A[a1], A[a1]).doit() == A[a1] ** 2 / 2 + assert IndexedIntegral(A[a2], A[a1]).doit() == A[a1] * A[a2] + assert IndexedIntegral(A[a1] * A[a2], A[a1]).doit() == A[a1] ** 2 * A[a2] / 2 + assert IndexedIntegral(sin(A[a1]), A[a1]).doit() == -cos(A[a1]) + assert IndexedIntegral(sin(A[a2]), A[a1]).doit() == sin(A[a2]) * A[a1] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a832614b1d48e26bf01e16f040f34dd412e8e32b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__init__.py @@ -0,0 +1,23 @@ +"""A module to manipulate symbolic objects with indices including tensors + +""" +from .indexed import IndexedBase, Idx, Indexed +from .index_methods import get_contraction_structure, get_indices +from .functions import shape +from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray, + MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct, + tensorcontraction, tensordiagonal, derive_by_array, permutedims, Array, + DenseNDimArray, SparseNDimArray,) + +__all__ = [ + 'IndexedBase', 'Idx', 'Indexed', + + 'get_contraction_structure', 'get_indices', + + 'shape', + + 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', + 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray', + 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', 'permutedims', + 'Array', 'DenseNDimArray', 'SparseNDimArray', +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e92def3afef8a85cd1791ac7c934e46e563985c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73a5d746061e6ba6f76340070f33da1a4f29f893 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5333e1337eed7ffc04fc3bcfbe5adadd054092c7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45d68d57db73eca3c2a11ca7fb5b2bad4a69d81c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4970ef05cdf3162cd19aacad3a3e882c84315c2e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfa667da286b01b04d8ebca0e2b5afdd3fcb5395 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eca2eb4c6c58cb113517b6e41737e9d97abbb84e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__init__.py @@ -0,0 +1,271 @@ +r""" +N-dim array module for SymPy. + +Four classes are provided to handle N-dim arrays, given by the combinations +dense/sparse (i.e. whether to store all elements or only the non-zero ones in +memory) and mutable/immutable (immutable classes are SymPy objects, but cannot +change after they have been created). + +Examples +======== + +The following examples show the usage of ``Array``. This is an abbreviation for +``ImmutableDenseNDimArray``, that is an immutable and dense N-dim array, the +other classes are analogous. For mutable classes it is also possible to change +element values after the object has been constructed. + +Array construction can detect the shape of nested lists and tuples: + +>>> from sympy import Array +>>> a1 = Array([[1, 2], [3, 4], [5, 6]]) +>>> a1 +[[1, 2], [3, 4], [5, 6]] +>>> a1.shape +(3, 2) +>>> a1.rank() +2 +>>> from sympy.abc import x, y, z +>>> a2 = Array([[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]]) +>>> a2 +[[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]] +>>> a2.shape +(2, 2, 2) +>>> a2.rank() +3 + +Otherwise one could pass a 1-dim array followed by a shape tuple: + +>>> m1 = Array(range(12), (3, 4)) +>>> m1 +[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] +>>> m2 = Array(range(12), (3, 2, 2)) +>>> m2 +[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]] +>>> m2[1,1,1] +7 +>>> m2.reshape(4, 3) +[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + +Slice support: + +>>> m2[:, 1, 1] +[3, 7, 11] + +Elementwise derivative: + +>>> from sympy.abc import x, y, z +>>> m3 = Array([x**3, x*y, z]) +>>> m3.diff(x) +[3*x**2, y, 0] +>>> m3.diff(z) +[0, 0, 1] + +Multiplication with other SymPy expressions is applied elementwisely: + +>>> (1+x)*m3 +[x**3*(x + 1), x*y*(x + 1), z*(x + 1)] + +To apply a function to each element of the N-dim array, use ``applyfunc``: + +>>> m3.applyfunc(lambda x: x/2) +[x**3/2, x*y/2, z/2] + +N-dim arrays can be converted to nested lists by the ``tolist()`` method: + +>>> m2.tolist() +[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]] +>>> isinstance(m2.tolist(), list) +True + +If the rank is 2, it is possible to convert them to matrices with ``tomatrix()``: + +>>> m1.tomatrix() +Matrix([ +[0, 1, 2, 3], +[4, 5, 6, 7], +[8, 9, 10, 11]]) + +Products and contractions +------------------------- + +Tensor product between arrays `A_{i_1,\ldots,i_n}` and `B_{j_1,\ldots,j_m}` +creates the combined array `P = A \otimes B` defined as + +`P_{i_1,\ldots,i_n,j_1,\ldots,j_m} := A_{i_1,\ldots,i_n}\cdot B_{j_1,\ldots,j_m}.` + +It is available through ``tensorproduct(...)``: + +>>> from sympy import Array, tensorproduct +>>> from sympy.abc import x,y,z,t +>>> A = Array([x, y, z, t]) +>>> B = Array([1, 2, 3, 4]) +>>> tensorproduct(A, B) +[[x, 2*x, 3*x, 4*x], [y, 2*y, 3*y, 4*y], [z, 2*z, 3*z, 4*z], [t, 2*t, 3*t, 4*t]] + +In case you don't want to evaluate the tensor product immediately, you can use +``ArrayTensorProduct``, which creates an unevaluated tensor product expression: + +>>> from sympy.tensor.array.expressions import ArrayTensorProduct +>>> ArrayTensorProduct(A, B) +ArrayTensorProduct([x, y, z, t], [1, 2, 3, 4]) + +Calling ``.as_explicit()`` on ``ArrayTensorProduct`` is equivalent to just calling +``tensorproduct(...)``: + +>>> ArrayTensorProduct(A, B).as_explicit() +[[x, 2*x, 3*x, 4*x], [y, 2*y, 3*y, 4*y], [z, 2*z, 3*z, 4*z], [t, 2*t, 3*t, 4*t]] + +Tensor product between a rank-1 array and a matrix creates a rank-3 array: + +>>> from sympy import eye +>>> p1 = tensorproduct(A, eye(4)) +>>> p1 +[[[x, 0, 0, 0], [0, x, 0, 0], [0, 0, x, 0], [0, 0, 0, x]], [[y, 0, 0, 0], [0, y, 0, 0], [0, 0, y, 0], [0, 0, 0, y]], [[z, 0, 0, 0], [0, z, 0, 0], [0, 0, z, 0], [0, 0, 0, z]], [[t, 0, 0, 0], [0, t, 0, 0], [0, 0, t, 0], [0, 0, 0, t]]] + +Now, to get back `A_0 \otimes \mathbf{1}` one can access `p_{0,m,n}` by slicing: + +>>> p1[0,:,:] +[[x, 0, 0, 0], [0, x, 0, 0], [0, 0, x, 0], [0, 0, 0, x]] + +Tensor contraction sums over the specified axes, for example contracting +positions `a` and `b` means + +`A_{i_1,\ldots,i_a,\ldots,i_b,\ldots,i_n} \implies \sum_k A_{i_1,\ldots,k,\ldots,k,\ldots,i_n}` + +Remember that Python indexing is zero starting, to contract the a-th and b-th +axes it is therefore necessary to specify `a-1` and `b-1` + +>>> from sympy import tensorcontraction +>>> C = Array([[x, y], [z, t]]) + +The matrix trace is equivalent to the contraction of a rank-2 array: + +`A_{m,n} \implies \sum_k A_{k,k}` + +>>> tensorcontraction(C, (0, 1)) +t + x + +To create an expression representing a tensor contraction that does not get +evaluated immediately, use ``ArrayContraction``, which is equivalent to +``tensorcontraction(...)`` if it is followed by ``.as_explicit()``: + +>>> from sympy.tensor.array.expressions import ArrayContraction +>>> ArrayContraction(C, (0, 1)) +ArrayContraction([[x, y], [z, t]], (0, 1)) +>>> ArrayContraction(C, (0, 1)).as_explicit() +t + x + +Matrix product is equivalent to a tensor product of two rank-2 arrays, followed +by a contraction of the 2nd and 3rd axes (in Python indexing axes number 1, 2). + +`A_{m,n}\cdot B_{i,j} \implies \sum_k A_{m, k}\cdot B_{k, j}` + +>>> D = Array([[2, 1], [0, -1]]) +>>> tensorcontraction(tensorproduct(C, D), (1, 2)) +[[2*x, x - y], [2*z, -t + z]] + +One may verify that the matrix product is equivalent: + +>>> from sympy import Matrix +>>> Matrix([[x, y], [z, t]])*Matrix([[2, 1], [0, -1]]) +Matrix([ +[2*x, x - y], +[2*z, -t + z]]) + +or equivalently + +>>> C.tomatrix()*D.tomatrix() +Matrix([ +[2*x, x - y], +[2*z, -t + z]]) + +Diagonal operator +----------------- + +The ``tensordiagonal`` function acts in a similar manner as ``tensorcontraction``, +but the joined indices are not summed over, for example diagonalizing +positions `a` and `b` means + +`A_{i_1,\ldots,i_a,\ldots,i_b,\ldots,i_n} \implies A_{i_1,\ldots,k,\ldots,k,\ldots,i_n} +\implies \tilde{A}_{i_1,\ldots,i_{a-1},i_{a+1},\ldots,i_{b-1},i_{b+1},\ldots,i_n,k}` + +where `\tilde{A}` is the array equivalent to the diagonal of `A` at positions +`a` and `b` moved to the last index slot. + +Compare the difference between contraction and diagonal operators: + +>>> from sympy import tensordiagonal +>>> from sympy.abc import a, b, c, d +>>> m = Matrix([[a, b], [c, d]]) +>>> tensorcontraction(m, [0, 1]) +a + d +>>> tensordiagonal(m, [0, 1]) +[a, d] + +In short, no summation occurs with ``tensordiagonal``. + + +Derivatives by array +-------------------- + +The usual derivative operation may be extended to support derivation with +respect to arrays, provided that all elements in the that array are symbols or +expressions suitable for derivations. + +The definition of a derivative by an array is as follows: given the array +`A_{i_1, \ldots, i_N}` and the array `X_{j_1, \ldots, j_M}` +the derivative of arrays will return a new array `B` defined by + +`B_{j_1,\ldots,j_M,i_1,\ldots,i_N} := \frac{\partial A_{i_1,\ldots,i_N}}{\partial X_{j_1,\ldots,j_M}}` + +The function ``derive_by_array`` performs such an operation: + +>>> from sympy import derive_by_array +>>> from sympy.abc import x, y, z, t +>>> from sympy import sin, exp + +With scalars, it behaves exactly as the ordinary derivative: + +>>> derive_by_array(sin(x*y), x) +y*cos(x*y) + +Scalar derived by an array basis: + +>>> derive_by_array(sin(x*y), [x, y, z]) +[y*cos(x*y), x*cos(x*y), 0] + +Deriving array by an array basis: `B^{nm} := \frac{\partial A^m}{\partial x^n}` + +>>> basis = [x, y, z] +>>> ax = derive_by_array([exp(x), sin(y*z), t], basis) +>>> ax +[[exp(x), 0, 0], [0, z*cos(y*z), 0], [0, y*cos(y*z), 0]] + +Contraction of the resulting array: `\sum_m \frac{\partial A^m}{\partial x^m}` + +>>> tensorcontraction(ax, (0, 1)) +z*cos(y*z) + exp(x) + +""" + +from .dense_ndim_array import MutableDenseNDimArray, ImmutableDenseNDimArray, DenseNDimArray +from .sparse_ndim_array import MutableSparseNDimArray, ImmutableSparseNDimArray, SparseNDimArray +from .ndim_array import NDimArray, ArrayKind +from .arrayop import tensorproduct, tensorcontraction, tensordiagonal, derive_by_array, permutedims +from .array_comprehension import ArrayComprehension, ArrayComprehensionMap + +Array = ImmutableDenseNDimArray + +__all__ = [ + 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', 'DenseNDimArray', + + 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'SparseNDimArray', + + 'NDimArray', 'ArrayKind', + + 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', + + 'permutedims', 'ArrayComprehension', 'ArrayComprehensionMap', + + 'Array', +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py new file mode 100644 index 0000000000000000000000000000000000000000..95702f499f3e40597fd0144929138ac1329962ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py @@ -0,0 +1,399 @@ +import functools, itertools +from sympy.core.sympify import _sympify, sympify +from sympy.core.expr import Expr +from sympy.core import Basic, Tuple +from sympy.tensor.array import ImmutableDenseNDimArray +from sympy.core.symbol import Symbol +from sympy.core.numbers import Integer + + +class ArrayComprehension(Basic): + """ + Generate a list comprehension. + + Explanation + =========== + + If there is a symbolic dimension, for example, say [i for i in range(1, N)] where + N is a Symbol, then the expression will not be expanded to an array. Otherwise, + calling the doit() function will launch the expansion. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a + ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.doit() + [[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]] + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k)) + >>> b.doit() + ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k)) + """ + def __new__(cls, function, *symbols, **assumptions): + if any(len(l) != 3 or None for l in symbols): + raise ValueError('ArrayComprehension requires values lower and upper bound' + ' for the expression') + arglist = [sympify(function)] + arglist.extend(cls._check_limits_validity(function, symbols)) + obj = Basic.__new__(cls, *arglist, **assumptions) + obj._limits = obj._args[1:] + obj._shape = cls._calculate_shape_from_limits(obj._limits) + obj._rank = len(obj._shape) + obj._loop_size = cls._calculate_loop_size(obj._shape) + return obj + + @property + def function(self): + """The function applied across limits. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.function + 10*i + j + """ + return self._args[0] + + @property + def limits(self): + """ + The list of limits that will be applied while expanding the array. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.limits + ((i, 1, 4), (j, 1, 3)) + """ + return self._limits + + @property + def free_symbols(self): + """ + The set of the free_symbols in the array. + Variables appeared in the bounds are supposed to be excluded + from the free symbol set. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.free_symbols + set() + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.free_symbols + {k} + """ + expr_free_sym = self.function.free_symbols + for var, inf, sup in self._limits: + expr_free_sym.discard(var) + curr_free_syms = inf.free_symbols.union(sup.free_symbols) + expr_free_sym = expr_free_sym.union(curr_free_syms) + return expr_free_sym + + @property + def variables(self): + """The tuples of the variables in the limits. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.variables + [i, j] + """ + return [l[0] for l in self._limits] + + @property + def bound_symbols(self): + """The list of dummy variables. + + Note + ==== + + Note that all variables are dummy variables since a limit without + lower bound or upper bound is not accepted. + """ + return [l[0] for l in self._limits if len(l) != 1] + + @property + def shape(self): + """ + The shape of the expanded array, which may have symbols. + + Note + ==== + + Both the lower and the upper bounds are included while + calculating the shape. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.shape + (4, 3) + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.shape + (4, k + 3) + """ + return self._shape + + @property + def is_shape_numeric(self): + """ + Test if the array is shape-numeric which means there is no symbolic + dimension. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.is_shape_numeric + True + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.is_shape_numeric + False + """ + for _, inf, sup in self._limits: + if Basic(inf, sup).atoms(Symbol): + return False + return True + + def rank(self): + """The rank of the expanded array. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.rank() + 2 + """ + return self._rank + + def __len__(self): + """ + The length of the expanded array which means the number + of elements in the array. + + Raises + ====== + + ValueError : When the length of the array is symbolic + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> len(a) + 12 + """ + if self._loop_size.free_symbols: + raise ValueError('Symbolic length is not supported') + return self._loop_size + + @classmethod + def _check_limits_validity(cls, function, limits): + #limits = sympify(limits) + new_limits = [] + for var, inf, sup in limits: + var = _sympify(var) + inf = _sympify(inf) + #since this is stored as an argument, it should be + #a Tuple + if isinstance(sup, list): + sup = Tuple(*sup) + else: + sup = _sympify(sup) + new_limits.append(Tuple(var, inf, sup)) + if any((not isinstance(i, Expr)) or i.atoms(Symbol, Integer) != i.atoms() + for i in [inf, sup]): + raise TypeError('Bounds should be an Expression(combination of Integer and Symbol)') + if (inf > sup) == True: + raise ValueError('Lower bound should be inferior to upper bound') + if var in inf.free_symbols or var in sup.free_symbols: + raise ValueError('Variable should not be part of its bounds') + return new_limits + + @classmethod + def _calculate_shape_from_limits(cls, limits): + return tuple([sup - inf + 1 for _, inf, sup in limits]) + + @classmethod + def _calculate_loop_size(cls, shape): + if not shape: + return 0 + loop_size = 1 + for l in shape: + loop_size = loop_size * l + + return loop_size + + def doit(self, **hints): + if not self.is_shape_numeric: + return self + + return self._expand_array() + + def _expand_array(self): + res = [] + for values in itertools.product(*[range(inf, sup+1) + for var, inf, sup + in self._limits]): + res.append(self._get_element(values)) + + return ImmutableDenseNDimArray(res, self.shape) + + def _get_element(self, values): + temp = self.function + for var, val in zip(self.variables, values): + temp = temp.subs(var, val) + return temp + + def tolist(self): + """Transform the expanded array to a list. + + Raises + ====== + + ValueError : When there is a symbolic dimension + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.tolist() + [[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]] + """ + if self.is_shape_numeric: + return self._expand_array().tolist() + + raise ValueError("A symbolic array cannot be expanded to a list") + + def tomatrix(self): + """Transform the expanded array to a matrix. + + Raises + ====== + + ValueError : When there is a symbolic dimension + ValueError : When the rank of the expanded array is not equal to 2 + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.tomatrix() + Matrix([ + [11, 12, 13], + [21, 22, 23], + [31, 32, 33], + [41, 42, 43]]) + """ + from sympy.matrices import Matrix + + if not self.is_shape_numeric: + raise ValueError("A symbolic array cannot be expanded to a matrix") + if self._rank != 2: + raise ValueError('Dimensions must be of size of 2') + + return Matrix(self._expand_array().tomatrix()) + + +def isLambda(v): + LAMBDA = lambda: 0 + return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__ + +class ArrayComprehensionMap(ArrayComprehension): + ''' + A subclass of ArrayComprehension dedicated to map external function lambda. + + Notes + ===== + + Only the lambda function is considered. + At most one argument in lambda function is accepted in order to avoid ambiguity + in value assignment. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehensionMap + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehensionMap(lambda: 1, (i, 1, 4)) + >>> a.doit() + [1, 1, 1, 1] + >>> b = ArrayComprehensionMap(lambda a: a+1, (j, 1, 4)) + >>> b.doit() + [2, 3, 4, 5] + + ''' + def __new__(cls, function, *symbols, **assumptions): + if any(len(l) != 3 or None for l in symbols): + raise ValueError('ArrayComprehension requires values lower and upper bound' + ' for the expression') + + if not isLambda(function): + raise ValueError('Data type not supported') + + arglist = cls._check_limits_validity(function, symbols) + obj = Basic.__new__(cls, *arglist, **assumptions) + obj._limits = obj._args + obj._shape = cls._calculate_shape_from_limits(obj._limits) + obj._rank = len(obj._shape) + obj._loop_size = cls._calculate_loop_size(obj._shape) + obj._lambda = function + return obj + + @property + def func(self): + class _(ArrayComprehensionMap): + def __new__(cls, *args, **kwargs): + return ArrayComprehensionMap(self._lambda, *args, **kwargs) + return _ + + def _get_element(self, values): + temp = self._lambda + if self._lambda.__code__.co_argcount == 0: + temp = temp() + elif self._lambda.__code__.co_argcount == 1: + temp = temp(functools.reduce(lambda a, b: a*b, values)) + return temp diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..797d5c1f6b3e95aa36c40d205bab5e3f8c9fceec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py @@ -0,0 +1,129 @@ +from __future__ import annotations + +from sympy.core.expr import Expr +from sympy.core.function import Derivative +from sympy.core.numbers import Integer +from sympy.matrices.common import MatrixCommon +from .ndim_array import NDimArray +from .arrayop import derive_by_array +from sympy.matrices.expressions.matexpr import MatrixExpr +from sympy.matrices.expressions.special import ZeroMatrix +from sympy.matrices.expressions.matexpr import _matrix_derivative + + +class ArrayDerivative(Derivative): + + is_scalar = False + + def __new__(cls, expr, *variables, **kwargs): + obj = super().__new__(cls, expr, *variables, **kwargs) + if isinstance(obj, ArrayDerivative): + obj._shape = obj._get_shape() + return obj + + def _get_shape(self): + shape = () + for v, count in self.variable_count: + if hasattr(v, "shape"): + for i in range(count): + shape += v.shape + if hasattr(self.expr, "shape"): + shape += self.expr.shape + return shape + + @property + def shape(self): + return self._shape + + @classmethod + def _get_zero_with_shape_like(cls, expr): + if isinstance(expr, (MatrixCommon, NDimArray)): + return expr.zeros(*expr.shape) + elif isinstance(expr, MatrixExpr): + return ZeroMatrix(*expr.shape) + else: + raise RuntimeError("Unable to determine shape of array-derivative.") + + @staticmethod + def _call_derive_scalar_by_matrix(expr: Expr, v: MatrixCommon) -> Expr: + return v.applyfunc(lambda x: expr.diff(x)) + + @staticmethod + def _call_derive_scalar_by_matexpr(expr: Expr, v: MatrixExpr) -> Expr: + if expr.has(v): + return _matrix_derivative(expr, v) + else: + return ZeroMatrix(*v.shape) + + @staticmethod + def _call_derive_scalar_by_array(expr: Expr, v: NDimArray) -> Expr: + return v.applyfunc(lambda x: expr.diff(x)) + + @staticmethod + def _call_derive_matrix_by_scalar(expr: MatrixCommon, v: Expr) -> Expr: + return _matrix_derivative(expr, v) + + @staticmethod + def _call_derive_matexpr_by_scalar(expr: MatrixExpr, v: Expr) -> Expr: + return expr._eval_derivative(v) + + @staticmethod + def _call_derive_array_by_scalar(expr: NDimArray, v: Expr) -> Expr: + return expr.applyfunc(lambda x: x.diff(v)) + + @staticmethod + def _call_derive_default(expr: Expr, v: Expr) -> Expr | None: + if expr.has(v): + return _matrix_derivative(expr, v) + else: + return None + + @classmethod + def _dispatch_eval_derivative_n_times(cls, expr, v, count): + # Evaluate the derivative `n` times. If + # `_eval_derivative_n_times` is not overridden by the current + # object, the default in `Basic` will call a loop over + # `_eval_derivative`: + + if not isinstance(count, (int, Integer)) or ((count <= 0) == True): + return None + + # TODO: this could be done with multiple-dispatching: + if expr.is_scalar: + if isinstance(v, MatrixCommon): + result = cls._call_derive_scalar_by_matrix(expr, v) + elif isinstance(v, MatrixExpr): + result = cls._call_derive_scalar_by_matexpr(expr, v) + elif isinstance(v, NDimArray): + result = cls._call_derive_scalar_by_array(expr, v) + elif v.is_scalar: + # scalar by scalar has a special + return super()._dispatch_eval_derivative_n_times(expr, v, count) + else: + return None + elif v.is_scalar: + if isinstance(expr, MatrixCommon): + result = cls._call_derive_matrix_by_scalar(expr, v) + elif isinstance(expr, MatrixExpr): + result = cls._call_derive_matexpr_by_scalar(expr, v) + elif isinstance(expr, NDimArray): + result = cls._call_derive_array_by_scalar(expr, v) + else: + return None + else: + # Both `expr` and `v` are some array/matrix type: + if isinstance(expr, MatrixCommon) or isinstance(expr, MatrixCommon): + result = derive_by_array(expr, v) + elif isinstance(expr, MatrixExpr) and isinstance(v, MatrixExpr): + result = cls._call_derive_default(expr, v) + elif isinstance(expr, MatrixExpr) or isinstance(v, MatrixExpr): + # if one expression is a symbolic matrix expression while the other isn't, don't evaluate: + return None + else: + result = derive_by_array(expr, v) + if result is None: + return None + if count == 1: + return result + else: + return cls._dispatch_eval_derivative_n_times(result, v, count - 1) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py new file mode 100644 index 0000000000000000000000000000000000000000..2bac3a0af88ac83c624c79f16db7d88d8d74a7db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py @@ -0,0 +1,528 @@ +import itertools +from collections.abc import Iterable + +from sympy.core._print_helpers import Printable +from sympy.core.containers import Tuple +from sympy.core.function import diff +from sympy.core.singleton import S +from sympy.core.sympify import _sympify + +from sympy.tensor.array.ndim_array import NDimArray +from sympy.tensor.array.dense_ndim_array import DenseNDimArray, ImmutableDenseNDimArray +from sympy.tensor.array.sparse_ndim_array import SparseNDimArray + + +def _arrayfy(a): + from sympy.matrices import MatrixBase + + if isinstance(a, NDimArray): + return a + if isinstance(a, (MatrixBase, list, tuple, Tuple)): + return ImmutableDenseNDimArray(a) + return a + + +def tensorproduct(*args): + """ + Tensor product among scalars or array-like objects. + + The equivalent operator for array expressions is ``ArrayTensorProduct``, + which can be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy.tensor.array import tensorproduct, Array + >>> from sympy.abc import x, y, z, t + >>> A = Array([[1, 2], [3, 4]]) + >>> B = Array([x, y]) + >>> tensorproduct(A, B) + [[[x, y], [2*x, 2*y]], [[3*x, 3*y], [4*x, 4*y]]] + >>> tensorproduct(A, x) + [[x, 2*x], [3*x, 4*x]] + >>> tensorproduct(A, B, B) + [[[[x**2, x*y], [x*y, y**2]], [[2*x**2, 2*x*y], [2*x*y, 2*y**2]]], [[[3*x**2, 3*x*y], [3*x*y, 3*y**2]], [[4*x**2, 4*x*y], [4*x*y, 4*y**2]]]] + + Applying this function on two matrices will result in a rank 4 array. + + >>> from sympy import Matrix, eye + >>> m = Matrix([[x, y], [z, t]]) + >>> p = tensorproduct(eye(3), m) + >>> p + [[[[x, y], [z, t]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[x, y], [z, t]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[x, y], [z, t]]]] + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayTensorProduct + + """ + from sympy.tensor.array import SparseNDimArray, ImmutableSparseNDimArray + + if len(args) == 0: + return S.One + if len(args) == 1: + return _arrayfy(args[0]) + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.matrices.expressions.matexpr import MatrixSymbol + if any(isinstance(arg, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)) for arg in args): + return ArrayTensorProduct(*args) + if len(args) > 2: + return tensorproduct(tensorproduct(args[0], args[1]), *args[2:]) + + # length of args is 2: + a, b = map(_arrayfy, args) + + if not isinstance(a, NDimArray) or not isinstance(b, NDimArray): + return a*b + + if isinstance(a, SparseNDimArray) and isinstance(b, SparseNDimArray): + lp = len(b) + new_array = {k1*lp + k2: v1*v2 for k1, v1 in a._sparse_array.items() for k2, v2 in b._sparse_array.items()} + return ImmutableSparseNDimArray(new_array, a.shape + b.shape) + + product_list = [i*j for i in Flatten(a) for j in Flatten(b)] + return ImmutableDenseNDimArray(product_list, a.shape + b.shape) + + +def _util_contraction_diagonal(array, *contraction_or_diagonal_axes): + array = _arrayfy(array) + + # Verify contraction_axes: + taken_dims = set() + for axes_group in contraction_or_diagonal_axes: + if not isinstance(axes_group, Iterable): + raise ValueError("collections of contraction/diagonal axes expected") + + dim = array.shape[axes_group[0]] + + for d in axes_group: + if d in taken_dims: + raise ValueError("dimension specified more than once") + if dim != array.shape[d]: + raise ValueError("cannot contract or diagonalize between axes of different dimension") + taken_dims.add(d) + + rank = array.rank() + + remaining_shape = [dim for i, dim in enumerate(array.shape) if i not in taken_dims] + cum_shape = [0]*rank + _cumul = 1 + for i in range(rank): + cum_shape[rank - i - 1] = _cumul + _cumul *= int(array.shape[rank - i - 1]) + + # DEFINITION: by absolute position it is meant the position along the one + # dimensional array containing all the tensor components. + + # Possible future work on this module: move computation of absolute + # positions to a class method. + + # Determine absolute positions of the uncontracted indices: + remaining_indices = [[cum_shape[i]*j for j in range(array.shape[i])] + for i in range(rank) if i not in taken_dims] + + # Determine absolute positions of the contracted indices: + summed_deltas = [] + for axes_group in contraction_or_diagonal_axes: + lidx = [] + for js in range(array.shape[axes_group[0]]): + lidx.append(sum([cum_shape[ig] * js for ig in axes_group])) + summed_deltas.append(lidx) + + return array, remaining_indices, remaining_shape, summed_deltas + + +def tensorcontraction(array, *contraction_axes): + """ + Contraction of an array-like object on the specified axes. + + The equivalent operator for array expressions is ``ArrayContraction``, + which can be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy import Array, tensorcontraction + >>> from sympy import Matrix, eye + >>> tensorcontraction(eye(3), (0, 1)) + 3 + >>> A = Array(range(18), (3, 2, 3)) + >>> A + [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]] + >>> tensorcontraction(A, (0, 2)) + [21, 30] + + Matrix multiplication may be emulated with a proper combination of + ``tensorcontraction`` and ``tensorproduct`` + + >>> from sympy import tensorproduct + >>> from sympy.abc import a,b,c,d,e,f,g,h + >>> m1 = Matrix([[a, b], [c, d]]) + >>> m2 = Matrix([[e, f], [g, h]]) + >>> p = tensorproduct(m1, m2) + >>> p + [[[[a*e, a*f], [a*g, a*h]], [[b*e, b*f], [b*g, b*h]]], [[[c*e, c*f], [c*g, c*h]], [[d*e, d*f], [d*g, d*h]]]] + >>> tensorcontraction(p, (1, 2)) + [[a*e + b*g, a*f + b*h], [c*e + d*g, c*f + d*h]] + >>> m1*m2 + Matrix([ + [a*e + b*g, a*f + b*h], + [c*e + d*g, c*f + d*h]]) + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayContraction + + """ + from sympy.tensor.array.expressions.array_expressions import _array_contraction + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.matrices.expressions.matexpr import MatrixSymbol + if isinstance(array, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _array_contraction(array, *contraction_axes) + + array, remaining_indices, remaining_shape, summed_deltas = _util_contraction_diagonal(array, *contraction_axes) + + # Compute the contracted array: + # + # 1. external for loops on all uncontracted indices. + # Uncontracted indices are determined by the combinatorial product of + # the absolute positions of the remaining indices. + # 2. internal loop on all contracted indices. + # It sums the values of the absolute contracted index and the absolute + # uncontracted index for the external loop. + contracted_array = [] + for icontrib in itertools.product(*remaining_indices): + index_base_position = sum(icontrib) + isum = S.Zero + for sum_to_index in itertools.product(*summed_deltas): + idx = array._get_tuple_index(index_base_position + sum(sum_to_index)) + isum += array[idx] + + contracted_array.append(isum) + + if len(remaining_indices) == 0: + assert len(contracted_array) == 1 + return contracted_array[0] + + return type(array)(contracted_array, remaining_shape) + + +def tensordiagonal(array, *diagonal_axes): + """ + Diagonalization of an array-like object on the specified axes. + + This is equivalent to multiplying the expression by Kronecker deltas + uniting the axes. + + The diagonal indices are put at the end of the axes. + + The equivalent operator for array expressions is ``ArrayDiagonal``, which + can be used to keep the expression unevaluated. + + Examples + ======== + + ``tensordiagonal`` acting on a 2-dimensional array by axes 0 and 1 is + equivalent to the diagonal of the matrix: + + >>> from sympy import Array, tensordiagonal + >>> from sympy import Matrix, eye + >>> tensordiagonal(eye(3), (0, 1)) + [1, 1, 1] + + >>> from sympy.abc import a,b,c,d + >>> m1 = Matrix([[a, b], [c, d]]) + >>> tensordiagonal(m1, [0, 1]) + [a, d] + + In case of higher dimensional arrays, the diagonalized out dimensions + are appended removed and appended as a single dimension at the end: + + >>> A = Array(range(18), (3, 2, 3)) + >>> A + [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]] + >>> tensordiagonal(A, (0, 2)) + [[0, 7, 14], [3, 10, 17]] + >>> from sympy import permutedims + >>> tensordiagonal(A, (0, 2)) == permutedims(Array([A[0, :, 0], A[1, :, 1], A[2, :, 2]]), [1, 0]) + True + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayDiagonal + + """ + if any(len(i) <= 1 for i in diagonal_axes): + raise ValueError("need at least two axes to diagonalize") + + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal, _array_diagonal + from sympy.matrices.expressions.matexpr import MatrixSymbol + if isinstance(array, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _array_diagonal(array, *diagonal_axes) + + ArrayDiagonal._validate(array, *diagonal_axes) + + array, remaining_indices, remaining_shape, diagonal_deltas = _util_contraction_diagonal(array, *diagonal_axes) + + # Compute the diagonalized array: + # + # 1. external for loops on all undiagonalized indices. + # Undiagonalized indices are determined by the combinatorial product of + # the absolute positions of the remaining indices. + # 2. internal loop on all diagonal indices. + # It appends the values of the absolute diagonalized index and the absolute + # undiagonalized index for the external loop. + diagonalized_array = [] + diagonal_shape = [len(i) for i in diagonal_deltas] + for icontrib in itertools.product(*remaining_indices): + index_base_position = sum(icontrib) + isum = [] + for sum_to_index in itertools.product(*diagonal_deltas): + idx = array._get_tuple_index(index_base_position + sum(sum_to_index)) + isum.append(array[idx]) + + isum = type(array)(isum).reshape(*diagonal_shape) + diagonalized_array.append(isum) + + return type(array)(diagonalized_array, remaining_shape + diagonal_shape) + + +def derive_by_array(expr, dx): + r""" + Derivative by arrays. Supports both arrays and scalars. + + The equivalent operator for array expressions is ``array_derive``. + + Explanation + =========== + + Given the array `A_{i_1, \ldots, i_N}` and the array `X_{j_1, \ldots, j_M}` + this function will return a new array `B` defined by + + `B_{j_1,\ldots,j_M,i_1,\ldots,i_N} := \frac{\partial A_{i_1,\ldots,i_N}}{\partial X_{j_1,\ldots,j_M}}` + + Examples + ======== + + >>> from sympy import derive_by_array + >>> from sympy.abc import x, y, z, t + >>> from sympy import cos + >>> derive_by_array(cos(x*t), x) + -t*sin(t*x) + >>> derive_by_array(cos(x*t), [x, y, z, t]) + [-t*sin(t*x), 0, 0, -x*sin(t*x)] + >>> derive_by_array([x, y**2*z], [[x, y], [z, t]]) + [[[1, 0], [0, 2*y*z]], [[0, y**2], [0, 0]]] + + """ + from sympy.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + array_types = (Iterable, MatrixBase, NDimArray) + + if isinstance(dx, array_types): + dx = ImmutableDenseNDimArray(dx) + for i in dx: + if not i._diff_wrt: + raise ValueError("cannot derive by this array") + + if isinstance(expr, array_types): + if isinstance(expr, NDimArray): + expr = expr.as_immutable() + else: + expr = ImmutableDenseNDimArray(expr) + + if isinstance(dx, array_types): + if isinstance(expr, SparseNDimArray): + lp = len(expr) + new_array = {k + i*lp: v + for i, x in enumerate(Flatten(dx)) + for k, v in expr.diff(x)._sparse_array.items()} + else: + new_array = [[y.diff(x) for y in Flatten(expr)] for x in Flatten(dx)] + return type(expr)(new_array, dx.shape + expr.shape) + else: + return expr.diff(dx) + else: + expr = _sympify(expr) + if isinstance(dx, array_types): + return ImmutableDenseNDimArray([expr.diff(i) for i in Flatten(dx)], dx.shape) + else: + dx = _sympify(dx) + return diff(expr, dx) + + +def permutedims(expr, perm=None, index_order_old=None, index_order_new=None): + """ + Permutes the indices of an array. + + Parameter specifies the permutation of the indices. + + The equivalent operator for array expressions is ``PermuteDims``, which can + be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy.abc import x, y, z, t + >>> from sympy import sin + >>> from sympy import Array, permutedims + >>> a = Array([[x, y, z], [t, sin(x), 0]]) + >>> a + [[x, y, z], [t, sin(x), 0]] + >>> permutedims(a, (1, 0)) + [[x, t], [y, sin(x)], [z, 0]] + + If the array is of second order, ``transpose`` can be used: + + >>> from sympy import transpose + >>> transpose(a) + [[x, t], [y, sin(x)], [z, 0]] + + Examples on higher dimensions: + + >>> b = Array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + >>> permutedims(b, (2, 1, 0)) + [[[1, 5], [3, 7]], [[2, 6], [4, 8]]] + >>> permutedims(b, (1, 2, 0)) + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + An alternative way to specify the same permutations as in the previous + lines involves passing the *old* and *new* indices, either as a list or as + a string: + + >>> permutedims(b, index_order_old="cba", index_order_new="abc") + [[[1, 5], [3, 7]], [[2, 6], [4, 8]]] + >>> permutedims(b, index_order_old="cab", index_order_new="abc") + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + ``Permutation`` objects are also allowed: + + >>> from sympy.combinatorics import Permutation + >>> permutedims(b, Permutation([1, 2, 0])) + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.PermuteDims + + """ + from sympy.tensor.array import SparseNDimArray + + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import _permute_dims + from sympy.matrices.expressions.matexpr import MatrixSymbol + from sympy.tensor.array.expressions import PermuteDims + from sympy.tensor.array.expressions.array_expressions import get_rank + perm = PermuteDims._get_permutation_from_arguments(perm, index_order_old, index_order_new, get_rank(expr)) + if isinstance(expr, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _permute_dims(expr, perm) + + if not isinstance(expr, NDimArray): + expr = ImmutableDenseNDimArray(expr) + + from sympy.combinatorics import Permutation + if not isinstance(perm, Permutation): + perm = Permutation(list(perm)) + + if perm.size != expr.rank(): + raise ValueError("wrong permutation size") + + # Get the inverse permutation: + iperm = ~perm + new_shape = perm(expr.shape) + + if isinstance(expr, SparseNDimArray): + return type(expr)({tuple(perm(expr._get_tuple_index(k))): v + for k, v in expr._sparse_array.items()}, new_shape) + + indices_span = perm([range(i) for i in expr.shape]) + + new_array = [None]*len(expr) + for i, idx in enumerate(itertools.product(*indices_span)): + t = iperm(idx) + new_array[i] = expr[t] + + return type(expr)(new_array, new_shape) + + +class Flatten(Printable): + """ + Flatten an iterable object to a list in a lazy-evaluation way. + + Notes + ===== + + This class is an iterator with which the memory cost can be economised. + Optimisation has been considered to ameliorate the performance for some + specific data types like DenseNDimArray and SparseNDimArray. + + Examples + ======== + + >>> from sympy.tensor.array.arrayop import Flatten + >>> from sympy.tensor.array import Array + >>> A = Array(range(6)).reshape(2, 3) + >>> Flatten(A) + Flatten([[0, 1, 2], [3, 4, 5]]) + >>> [i for i in Flatten(A)] + [0, 1, 2, 3, 4, 5] + """ + def __init__(self, iterable): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import NDimArray + + if not isinstance(iterable, (Iterable, MatrixBase)): + raise NotImplementedError("Data type not yet supported") + + if isinstance(iterable, list): + iterable = NDimArray(iterable) + + self._iter = iterable + self._idx = 0 + + def __iter__(self): + return self + + def __next__(self): + from sympy.matrices.matrices import MatrixBase + + if len(self._iter) > self._idx: + if isinstance(self._iter, DenseNDimArray): + result = self._iter._array[self._idx] + + elif isinstance(self._iter, SparseNDimArray): + if self._idx in self._iter._sparse_array: + result = self._iter._sparse_array[self._idx] + else: + result = 0 + + elif isinstance(self._iter, MatrixBase): + result = self._iter[self._idx] + + elif hasattr(self._iter, '__next__'): + result = next(self._iter) + + else: + result = self._iter[self._idx] + + else: + raise StopIteration + + self._idx += 1 + return result + + def next(self): + return self.__next__() + + def _sympystr(self, printer): + return type(self).__name__ + '(' + printer._print(self._iter) + ')' diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..576e452c55d8d374ca1f72c553f3a64de7227d43 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py @@ -0,0 +1,206 @@ +import functools +from typing import List + +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.singleton import S +from sympy.core.sympify import _sympify +from sympy.tensor.array.mutable_ndim_array import MutableNDimArray +from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray, ArrayKind +from sympy.utilities.iterables import flatten + + +class DenseNDimArray(NDimArray): + + _array: List[Basic] + + def __new__(self, *args, **kwargs): + return ImmutableDenseNDimArray(*args, **kwargs) + + @property + def kind(self) -> ArrayKind: + return ArrayKind._union(self._array) + + def __getitem__(self, index): + """ + Allows to get items from N-dim array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([0, 1, 2, 3], (2, 2)) + >>> a + [[0, 1], [2, 3]] + >>> a[0, 0] + 0 + >>> a[1, 1] + 3 + >>> a[0] + [0, 1] + >>> a[1] + [2, 3] + + + Symbolic index: + + >>> from sympy.abc import i, j + >>> a[i, j] + [[0, 1], [2, 3]][i, j] + + Replace `i` and `j` to get element `(1, 1)`: + + >>> a[i, j].subs({i: 1, j: 1}) + 3 + + """ + syindex = self._check_symbolic_index(index) + if syindex is not None: + return syindex + + index = self._check_index_for_getitem(index) + + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + sl_factors, eindices = self._get_slice_data_for_array_access(index) + array = [self._array[self._parse_index(i)] for i in eindices] + nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)] + return type(self)(array, nshape) + else: + index = self._parse_index(index) + return self._array[index] + + @classmethod + def zeros(cls, *shape): + list_length = functools.reduce(lambda x, y: x*y, shape, S.One) + return cls._new(([0]*list_length,), shape) + + def tomatrix(self): + """ + Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1 for i in range(9)], (3, 3)) + >>> b = a.tomatrix() + >>> b + Matrix([ + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + """ + from sympy.matrices import Matrix + + if self.rank() != 2: + raise ValueError('Dimensions must be of size of 2') + + return Matrix(self.shape[0], self.shape[1], self._array) + + def reshape(self, *newshape): + """ + Returns MutableDenseNDimArray instance with new shape. Elements number + must be suitable to new shape. The only argument of method sets + new shape. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3)) + >>> a.shape + (2, 3) + >>> a + [[1, 2, 3], [4, 5, 6]] + >>> b = a.reshape(3, 2) + >>> b.shape + (3, 2) + >>> b + [[1, 2], [3, 4], [5, 6]] + + """ + new_total_size = functools.reduce(lambda x,y: x*y, newshape) + if new_total_size != self._loop_size: + raise ValueError('Expecting reshape size to %d but got prod(%s) = %d' % ( + self._loop_size, str(newshape), new_total_size)) + + # there is no `.func` as this class does not subtype `Basic`: + return type(self)(self._array, newshape) + + +class ImmutableDenseNDimArray(DenseNDimArray, ImmutableNDimArray): # type: ignore + def __new__(cls, iterable, shape=None, **kwargs): + return cls._new(iterable, shape, **kwargs) + + @classmethod + def _new(cls, iterable, shape, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + shape = Tuple(*map(_sympify, shape)) + cls._check_special_bounds(flat_list, shape) + flat_list = flatten(flat_list) + flat_list = Tuple(*flat_list) + self = Basic.__new__(cls, flat_list, shape, **kwargs) + self._shape = shape + self._array = list(flat_list) + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape, 1) + return self + + def __setitem__(self, index, value): + raise TypeError('immutable N-dim array') + + def as_mutable(self): + return MutableDenseNDimArray(self) + + def _eval_simplify(self, **kwargs): + from sympy.simplify.simplify import simplify + return self.applyfunc(simplify) + +class MutableDenseNDimArray(DenseNDimArray, MutableNDimArray): + + def __new__(cls, iterable=None, shape=None, **kwargs): + return cls._new(iterable, shape, **kwargs) + + @classmethod + def _new(cls, iterable, shape, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + flat_list = flatten(flat_list) + self = object.__new__(cls) + self._shape = shape + self._array = list(flat_list) + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + return self + + def __setitem__(self, index, value): + """Allows to set items to MutableDenseNDimArray. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 2) + >>> a[0,0] = 1 + >>> a[1,1] = 1 + >>> a + [[1, 0], [0, 1]] + + """ + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value) + for i in eindices: + other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None] + self._array[self._parse_index(i)] = value[other_i] + else: + index = self._parse_index(index) + self._setter_iterable_check(value) + value = _sympify(value) + self._array[index] = value + + def as_immutable(self): + return ImmutableDenseNDimArray(self) + + @property + def free_symbols(self): + return {i for j in self._array for i in j.free_symbols} diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1658241782cdf0e38a30c43a6d67f9811297f4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__init__.py @@ -0,0 +1,178 @@ +r""" +Array expressions are expressions representing N-dimensional arrays, without +evaluating them. These expressions represent in a certain way abstract syntax +trees of operations on N-dimensional arrays. + +Every N-dimensional array operator has a corresponding array expression object. + +Table of correspondences: + +=============================== ============================= + Array operator Array expression operator +=============================== ============================= + tensorproduct ArrayTensorProduct + tensorcontraction ArrayContraction + tensordiagonal ArrayDiagonal + permutedims PermuteDims +=============================== ============================= + +Examples +======== + +``ArraySymbol`` objects are the N-dimensional equivalent of ``MatrixSymbol`` +objects in the matrix module: + +>>> from sympy.tensor.array.expressions import ArraySymbol +>>> from sympy.abc import i, j, k +>>> A = ArraySymbol("A", (3, 2, 4)) +>>> A.shape +(3, 2, 4) +>>> A[i, j, k] +A[i, j, k] +>>> A.as_explicit() +[[[A[0, 0, 0], A[0, 0, 1], A[0, 0, 2], A[0, 0, 3]], + [A[0, 1, 0], A[0, 1, 1], A[0, 1, 2], A[0, 1, 3]]], + [[A[1, 0, 0], A[1, 0, 1], A[1, 0, 2], A[1, 0, 3]], + [A[1, 1, 0], A[1, 1, 1], A[1, 1, 2], A[1, 1, 3]]], + [[A[2, 0, 0], A[2, 0, 1], A[2, 0, 2], A[2, 0, 3]], + [A[2, 1, 0], A[2, 1, 1], A[2, 1, 2], A[2, 1, 3]]]] + +Component-explicit arrays can be added inside array expressions: + +>>> from sympy import Array +>>> from sympy import tensorproduct +>>> from sympy.tensor.array.expressions import ArrayTensorProduct +>>> a = Array([1, 2, 3]) +>>> b = Array([i, j, k]) +>>> expr = ArrayTensorProduct(a, b, b) +>>> expr +ArrayTensorProduct([1, 2, 3], [i, j, k], [i, j, k]) +>>> expr.as_explicit() == tensorproduct(a, b, b) +True + +Constructing array expressions from index-explicit forms +-------------------------------------------------------- + +Array expressions are index-implicit. This means they do not use any indices to +represent array operations. The function ``convert_indexed_to_array( ... )`` +may be used to convert index-explicit expressions to array expressions. +It takes as input two parameters: the index-explicit expression and the order +of the indices: + +>>> from sympy.tensor.array.expressions import convert_indexed_to_array +>>> from sympy import Sum +>>> A = ArraySymbol("A", (3, 3)) +>>> B = ArraySymbol("B", (3, 3)) +>>> convert_indexed_to_array(A[i, j], [i, j]) +A +>>> convert_indexed_to_array(A[i, j], [j, i]) +PermuteDims(A, (0 1)) +>>> convert_indexed_to_array(A[i, j] + B[j, i], [i, j]) +ArrayAdd(A, PermuteDims(B, (0 1))) +>>> convert_indexed_to_array(Sum(A[i, j]*B[j, k], (j, 0, 2)), [i, k]) +ArrayContraction(ArrayTensorProduct(A, B), (1, 2)) + +The diagonal of a matrix in the array expression form: + +>>> convert_indexed_to_array(A[i, i], [i]) +ArrayDiagonal(A, (0, 1)) + +The trace of a matrix in the array expression form: + +>>> convert_indexed_to_array(Sum(A[i, i], (i, 0, 2)), [i]) +ArrayContraction(A, (0, 1)) + +Compatibility with matrices +--------------------------- + +Array expressions can be mixed with objects from the matrix module: + +>>> from sympy import MatrixSymbol +>>> from sympy.tensor.array.expressions import ArrayContraction +>>> M = MatrixSymbol("M", 3, 3) +>>> N = MatrixSymbol("N", 3, 3) + +Express the matrix product in the array expression form: + +>>> from sympy.tensor.array.expressions import convert_matrix_to_array +>>> expr = convert_matrix_to_array(M*N) +>>> expr +ArrayContraction(ArrayTensorProduct(M, N), (1, 2)) + +The expression can be converted back to matrix form: + +>>> from sympy.tensor.array.expressions import convert_array_to_matrix +>>> convert_array_to_matrix(expr) +M*N + +Add a second contraction on the remaining axes in order to get the trace of `M \cdot N`: + +>>> expr_tr = ArrayContraction(expr, (0, 1)) +>>> expr_tr +ArrayContraction(ArrayContraction(ArrayTensorProduct(M, N), (1, 2)), (0, 1)) + +Flatten the expression by calling ``.doit()`` and remove the nested array contraction operations: + +>>> expr_tr.doit() +ArrayContraction(ArrayTensorProduct(M, N), (0, 3), (1, 2)) + +Get the explicit form of the array expression: + +>>> expr.as_explicit() +[[M[0, 0]*N[0, 0] + M[0, 1]*N[1, 0] + M[0, 2]*N[2, 0], M[0, 0]*N[0, 1] + M[0, 1]*N[1, 1] + M[0, 2]*N[2, 1], M[0, 0]*N[0, 2] + M[0, 1]*N[1, 2] + M[0, 2]*N[2, 2]], + [M[1, 0]*N[0, 0] + M[1, 1]*N[1, 0] + M[1, 2]*N[2, 0], M[1, 0]*N[0, 1] + M[1, 1]*N[1, 1] + M[1, 2]*N[2, 1], M[1, 0]*N[0, 2] + M[1, 1]*N[1, 2] + M[1, 2]*N[2, 2]], + [M[2, 0]*N[0, 0] + M[2, 1]*N[1, 0] + M[2, 2]*N[2, 0], M[2, 0]*N[0, 1] + M[2, 1]*N[1, 1] + M[2, 2]*N[2, 1], M[2, 0]*N[0, 2] + M[2, 1]*N[1, 2] + M[2, 2]*N[2, 2]]] + +Express the trace of a matrix: + +>>> from sympy import Trace +>>> convert_matrix_to_array(Trace(M)) +ArrayContraction(M, (0, 1)) +>>> convert_matrix_to_array(Trace(M*N)) +ArrayContraction(ArrayTensorProduct(M, N), (0, 3), (1, 2)) + +Express the transposition of a matrix (will be expressed as a permutation of the axes: + +>>> convert_matrix_to_array(M.T) +PermuteDims(M, (0 1)) + +Compute the derivative array expressions: + +>>> from sympy.tensor.array.expressions import array_derive +>>> d = array_derive(M, M) +>>> d +PermuteDims(ArrayTensorProduct(I, I), (3)(1 2)) + +Verify that the derivative corresponds to the form computed with explicit matrices: + +>>> d.as_explicit() +[[[[1, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 1, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 1]]]] +>>> Me = M.as_explicit() +>>> Me.diff(Me) +[[[[1, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 1, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 1]]]] + +""" + +__all__ = [ + "ArraySymbol", "ArrayElement", "ZeroArray", "OneArray", + "ArrayTensorProduct", + "ArrayContraction", + "ArrayDiagonal", + "PermuteDims", + "ArrayAdd", + "ArrayElementwiseApplyFunc", + "Reshape", + "convert_array_to_matrix", + "convert_matrix_to_array", + "convert_array_to_indexed", + "convert_indexed_to_array", + "array_derive", +] + +from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayAdd, PermuteDims, ArrayDiagonal, \ + ArrayContraction, Reshape, ArraySymbol, ArrayElement, ZeroArray, OneArray, ArrayElementwiseApplyFunc +from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive +from sympy.tensor.array.expressions.from_array_to_indexed import convert_array_to_indexed +from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix +from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array +from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/arrayexpr_derivatives.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/arrayexpr_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..ab44a6fbf715ac7f2b8c287dcc84a49289f2dd76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/arrayexpr_derivatives.py @@ -0,0 +1,194 @@ +import operator +from functools import reduce, singledispatch + +from sympy.core.expr import Expr +from sympy.core.singleton import S +from sympy.matrices.expressions.hadamard import HadamardProduct +from sympy.matrices.expressions.inverse import Inverse +from sympy.matrices.expressions.matexpr import (MatrixExpr, MatrixSymbol) +from sympy.matrices.expressions.special import Identity, OneMatrix +from sympy.matrices.expressions.transpose import Transpose +from sympy.combinatorics.permutations import _af_invert +from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction +from sympy.tensor.array.expressions.array_expressions import ( + _ArrayExpr, ZeroArray, ArraySymbol, ArrayTensorProduct, ArrayAdd, + PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, get_rank, + get_shape, ArrayContraction, _array_tensor_product, _array_contraction, + _array_diagonal, _array_add, _permute_dims, Reshape) +from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array + + +@singledispatch +def array_derive(expr, x): + """ + Derivatives (gradients) for array expressions. + """ + raise NotImplementedError(f"not implemented for type {type(expr)}") + + +@array_derive.register(Expr) +def _(expr: Expr, x: _ArrayExpr): + return ZeroArray(*x.shape) + + +@array_derive.register(ArrayTensorProduct) +def _(expr: ArrayTensorProduct, x: Expr): + args = expr.args + addend_list = [] + for i, arg in enumerate(expr.args): + darg = array_derive(arg, x) + if darg == 0: + continue + args_prev = args[:i] + args_succ = args[i+1:] + shape_prev = reduce(operator.add, map(get_shape, args_prev), ()) + shape_succ = reduce(operator.add, map(get_shape, args_succ), ()) + addend = _array_tensor_product(*args_prev, darg, *args_succ) + tot1 = len(get_shape(x)) + tot2 = tot1 + len(shape_prev) + tot3 = tot2 + len(get_shape(arg)) + tot4 = tot3 + len(shape_succ) + perm = list(range(tot1, tot2)) + \ + list(range(tot1)) + list(range(tot2, tot3)) + \ + list(range(tot3, tot4)) + addend = _permute_dims(addend, _af_invert(perm)) + addend_list.append(addend) + if len(addend_list) == 1: + return addend_list[0] + elif len(addend_list) == 0: + return S.Zero + else: + return _array_add(*addend_list) + + +@array_derive.register(ArraySymbol) +def _(expr: ArraySymbol, x: _ArrayExpr): + if expr == x: + return _permute_dims( + ArrayTensorProduct.fromiter(Identity(i) for i in expr.shape), + [2*i for i in range(len(expr.shape))] + [2*i+1 for i in range(len(expr.shape))] + ) + return ZeroArray(*(x.shape + expr.shape)) + + +@array_derive.register(MatrixSymbol) +def _(expr: MatrixSymbol, x: _ArrayExpr): + m, n = expr.shape + if expr == x: + return _permute_dims( + _array_tensor_product(Identity(m), Identity(n)), + [0, 2, 1, 3] + ) + return ZeroArray(*(x.shape + expr.shape)) + + +@array_derive.register(Identity) +def _(expr: Identity, x: _ArrayExpr): + return ZeroArray(*(x.shape + expr.shape)) + + +@array_derive.register(OneMatrix) +def _(expr: OneMatrix, x: _ArrayExpr): + return ZeroArray(*(x.shape + expr.shape)) + + +@array_derive.register(Transpose) +def _(expr: Transpose, x: Expr): + # D(A.T, A) ==> (m,n,i,j) ==> D(A_ji, A_mn) = d_mj d_ni + # D(B.T, A) ==> (m,n,i,j) ==> D(B_ji, A_mn) + fd = array_derive(expr.arg, x) + return _permute_dims(fd, [0, 1, 3, 2]) + + +@array_derive.register(Inverse) +def _(expr: Inverse, x: Expr): + mat = expr.I + dexpr = array_derive(mat, x) + tp = _array_tensor_product(-expr, dexpr, expr) + mp = _array_contraction(tp, (1, 4), (5, 6)) + pp = _permute_dims(mp, [1, 2, 0, 3]) + return pp + + +@array_derive.register(ElementwiseApplyFunction) +def _(expr: ElementwiseApplyFunction, x: Expr): + assert get_rank(expr) == 2 + assert get_rank(x) == 2 + fdiff = expr._get_function_fdiff() + dexpr = array_derive(expr.expr, x) + tp = _array_tensor_product( + ElementwiseApplyFunction(fdiff, expr.expr), + dexpr + ) + td = _array_diagonal( + tp, (0, 4), (1, 5) + ) + return td + + +@array_derive.register(ArrayElementwiseApplyFunc) +def _(expr: ArrayElementwiseApplyFunc, x: Expr): + fdiff = expr._get_function_fdiff() + subexpr = expr.expr + dsubexpr = array_derive(subexpr, x) + tp = _array_tensor_product( + dsubexpr, + ArrayElementwiseApplyFunc(fdiff, subexpr) + ) + b = get_rank(x) + c = get_rank(expr) + diag_indices = [(b + i, b + c + i) for i in range(c)] + return _array_diagonal(tp, *diag_indices) + + +@array_derive.register(MatrixExpr) +def _(expr: MatrixExpr, x: Expr): + cg = convert_matrix_to_array(expr) + return array_derive(cg, x) + + +@array_derive.register(HadamardProduct) +def _(expr: HadamardProduct, x: Expr): + raise NotImplementedError() + + +@array_derive.register(ArrayContraction) +def _(expr: ArrayContraction, x: Expr): + fd = array_derive(expr.expr, x) + rank_x = len(get_shape(x)) + contraction_indices = expr.contraction_indices + new_contraction_indices = [tuple(j + rank_x for j in i) for i in contraction_indices] + return _array_contraction(fd, *new_contraction_indices) + + +@array_derive.register(ArrayDiagonal) +def _(expr: ArrayDiagonal, x: Expr): + dsubexpr = array_derive(expr.expr, x) + rank_x = len(get_shape(x)) + diag_indices = [[j + rank_x for j in i] for i in expr.diagonal_indices] + return _array_diagonal(dsubexpr, *diag_indices) + + +@array_derive.register(ArrayAdd) +def _(expr: ArrayAdd, x: Expr): + return _array_add(*[array_derive(arg, x) for arg in expr.args]) + + +@array_derive.register(PermuteDims) +def _(expr: PermuteDims, x: Expr): + de = array_derive(expr.expr, x) + perm = [0, 1] + [i + 2 for i in expr.permutation.array_form] + return _permute_dims(de, perm) + + +@array_derive.register(Reshape) +def _(expr: Reshape, x: Expr): + de = array_derive(expr.expr, x) + return Reshape(de, get_shape(x) + expr.shape) + + +def matrix_derive(expr, x): + from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix + ce = convert_matrix_to_array(expr) + dce = array_derive(ce, x) + return convert_array_to_matrix(dce).doit() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_indexed_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_indexed_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..c219a205c4305bd7070e5117978146224521c58c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_indexed_to_array.py @@ -0,0 +1,257 @@ +from collections import defaultdict + +from sympy import Function +from sympy.combinatorics.permutations import _af_invert +from sympy.concrete.summations import Sum +from sympy.core.add import Add +from sympy.core.mul import Mul +from sympy.core.numbers import Integer +from sympy.core.power import Pow +from sympy.core.sorting import default_sort_key +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.tensor.array.expressions import ArrayElementwiseApplyFunc +from sympy.tensor.indexed import (Indexed, IndexedBase) +from sympy.combinatorics import Permutation +from sympy.matrices.expressions.matexpr import MatrixElement +from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal, \ + get_shape, ArrayElement, _array_tensor_product, _array_diagonal, _array_contraction, _array_add, \ + _permute_dims, OneArray, ArrayAdd +from sympy.tensor.array.expressions.utils import _get_argindex, _get_diagonal_indices + + +def convert_indexed_to_array(expr, first_indices=None): + r""" + Parse indexed expression into a form useful for code generation. + + Examples + ======== + + >>> from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array + >>> from sympy import MatrixSymbol, Sum, symbols + + >>> i, j, k, d = symbols("i j k d") + >>> M = MatrixSymbol("M", d, d) + >>> N = MatrixSymbol("N", d, d) + + Recognize the trace in summation form: + + >>> expr = Sum(M[i, i], (i, 0, d-1)) + >>> convert_indexed_to_array(expr) + ArrayContraction(M, (0, 1)) + + Recognize the extraction of the diagonal by using the same index `i` on + both axes of the matrix: + + >>> expr = M[i, i] + >>> convert_indexed_to_array(expr) + ArrayDiagonal(M, (0, 1)) + + This function can help perform the transformation expressed in two + different mathematical notations as: + + `\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}` + + Recognize the matrix multiplication in summation form: + + >>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1)) + >>> convert_indexed_to_array(expr) + ArrayContraction(ArrayTensorProduct(M, N), (1, 2)) + + Specify that ``k`` has to be the starting index: + + >>> convert_indexed_to_array(expr, first_indices=[k]) + ArrayContraction(ArrayTensorProduct(N, M), (0, 3)) + """ + + result, indices = _convert_indexed_to_array(expr) + + if any(isinstance(i, (int, Integer)) for i in indices): + result = ArrayElement(result, indices) + indices = [] + + if not first_indices: + return result + + def _check_is_in(elem, indices): + if elem in indices: + return True + if any(elem in i for i in indices if isinstance(i, frozenset)): + return True + return False + + repl = {j: i for i in indices if isinstance(i, frozenset) for j in i} + first_indices = [repl.get(i, i) for i in first_indices] + for i in first_indices: + if not _check_is_in(i, indices): + first_indices.remove(i) + first_indices.extend([i for i in indices if not _check_is_in(i, first_indices)]) + + def _get_pos(elem, indices): + if elem in indices: + return indices.index(elem) + for i, e in enumerate(indices): + if not isinstance(e, frozenset): + continue + if elem in e: + return i + raise ValueError("not found") + + permutation = _af_invert([_get_pos(i, first_indices) for i in indices]) + if isinstance(result, ArrayAdd): + return _array_add(*[_permute_dims(arg, permutation) for arg in result.args]) + else: + return _permute_dims(result, permutation) + + +def _convert_indexed_to_array(expr): + if isinstance(expr, Sum): + function = expr.function + summation_indices = expr.variables + subexpr, subindices = _convert_indexed_to_array(function) + subindicessets = {j: i for i in subindices if isinstance(i, frozenset) for j in i} + summation_indices = sorted({subindicessets.get(i, i) for i in summation_indices}, key=default_sort_key) + # TODO: check that Kronecker delta is only contracted to one other element: + kronecker_indices = set() + if isinstance(function, Mul): + for arg in function.args: + if not isinstance(arg, KroneckerDelta): + continue + arg_indices = sorted(set(arg.indices), key=default_sort_key) + if len(arg_indices) == 2: + kronecker_indices.update(arg_indices) + kronecker_indices = sorted(kronecker_indices, key=default_sort_key) + # Check dimensional consistency: + shape = get_shape(subexpr) + if shape: + for ind, istart, iend in expr.limits: + i = _get_argindex(subindices, ind) + if istart != 0 or iend+1 != shape[i]: + raise ValueError("summation index and array dimension mismatch: %s" % ind) + contraction_indices = [] + subindices = list(subindices) + if isinstance(subexpr, ArrayDiagonal): + diagonal_indices = list(subexpr.diagonal_indices) + dindices = subindices[-len(diagonal_indices):] + subindices = subindices[:-len(diagonal_indices)] + for index in summation_indices: + if index in dindices: + position = dindices.index(index) + contraction_indices.append(diagonal_indices[position]) + diagonal_indices[position] = None + diagonal_indices = [i for i in diagonal_indices if i is not None] + for i, ind in enumerate(subindices): + if ind in summation_indices: + pass + if diagonal_indices: + subexpr = _array_diagonal(subexpr.expr, *diagonal_indices) + else: + subexpr = subexpr.expr + + axes_contraction = defaultdict(list) + for i, ind in enumerate(subindices): + include = all(j not in kronecker_indices for j in ind) if isinstance(ind, frozenset) else ind not in kronecker_indices + if ind in summation_indices and include: + axes_contraction[ind].append(i) + subindices[i] = None + for k, v in axes_contraction.items(): + if any(i in kronecker_indices for i in k) if isinstance(k, frozenset) else k in kronecker_indices: + continue + contraction_indices.append(tuple(v)) + free_indices = [i for i in subindices if i is not None] + indices_ret = list(free_indices) + indices_ret.sort(key=lambda x: free_indices.index(x)) + return _array_contraction( + subexpr, + *contraction_indices, + free_indices=free_indices + ), tuple(indices_ret) + if isinstance(expr, Mul): + args, indices = zip(*[_convert_indexed_to_array(arg) for arg in expr.args]) + # Check if there are KroneckerDelta objects: + kronecker_delta_repl = {} + for arg in args: + if not isinstance(arg, KroneckerDelta): + continue + # Diagonalize two indices: + i, j = arg.indices + kindices = set(arg.indices) + if i in kronecker_delta_repl: + kindices.update(kronecker_delta_repl[i]) + if j in kronecker_delta_repl: + kindices.update(kronecker_delta_repl[j]) + kindices = frozenset(kindices) + for index in kindices: + kronecker_delta_repl[index] = kindices + # Remove KroneckerDelta objects, their relations should be handled by + # ArrayDiagonal: + newargs = [] + newindices = [] + for arg, loc_indices in zip(args, indices): + if isinstance(arg, KroneckerDelta): + continue + newargs.append(arg) + newindices.append(loc_indices) + flattened_indices = [kronecker_delta_repl.get(j, j) for i in newindices for j in i] + diagonal_indices, ret_indices = _get_diagonal_indices(flattened_indices) + tp = _array_tensor_product(*newargs) + if diagonal_indices: + return _array_diagonal(tp, *diagonal_indices), ret_indices + else: + return tp, ret_indices + if isinstance(expr, MatrixElement): + indices = expr.args[1:] + diagonal_indices, ret_indices = _get_diagonal_indices(indices) + if diagonal_indices: + return _array_diagonal(expr.args[0], *diagonal_indices), ret_indices + else: + return expr.args[0], ret_indices + if isinstance(expr, ArrayElement): + indices = expr.indices + diagonal_indices, ret_indices = _get_diagonal_indices(indices) + if diagonal_indices: + return _array_diagonal(expr.name, *diagonal_indices), ret_indices + else: + return expr.name, ret_indices + if isinstance(expr, Indexed): + indices = expr.indices + diagonal_indices, ret_indices = _get_diagonal_indices(indices) + if diagonal_indices: + return _array_diagonal(expr.base, *diagonal_indices), ret_indices + else: + return expr.args[0], ret_indices + if isinstance(expr, IndexedBase): + raise NotImplementedError + if isinstance(expr, KroneckerDelta): + return expr, expr.indices + if isinstance(expr, Add): + args, indices = zip(*[_convert_indexed_to_array(arg) for arg in expr.args]) + args = list(args) + # Check if all indices are compatible. Otherwise expand the dimensions: + index0 = [] + shape0 = [] + for arg, arg_indices in zip(args, indices): + arg_indices_set = set(arg_indices) + arg_indices_missing = arg_indices_set.difference(index0) + index0.extend([i for i in arg_indices if i in arg_indices_missing]) + arg_shape = get_shape(arg) + shape0.extend([arg_shape[i] for i, e in enumerate(arg_indices) if e in arg_indices_missing]) + for i, (arg, arg_indices) in enumerate(zip(args, indices)): + if len(arg_indices) < len(index0): + missing_indices_pos = [i for i, e in enumerate(index0) if e not in arg_indices] + missing_shape = [shape0[i] for i in missing_indices_pos] + arg_indices = tuple(index0[j] for j in missing_indices_pos) + arg_indices + args[i] = _array_tensor_product(OneArray(*missing_shape), args[i]) + permutation = Permutation([arg_indices.index(j) for j in index0]) + # Perform index permutations: + args[i] = _permute_dims(args[i], permutation) + return _array_add(*args), tuple(index0) + if isinstance(expr, Pow): + subexpr, subindices = _convert_indexed_to_array(expr.base) + if isinstance(expr.exp, (int, Integer)): + diags = zip(*[(2*i, 2*i + 1) for i in range(expr.exp)]) + arr = _array_diagonal(_array_tensor_product(*[subexpr for i in range(expr.exp)]), *diags) + return arr, subindices + if isinstance(expr, Function): + subexpr, subindices = _convert_indexed_to_array(expr.args[0]) + return ArrayElementwiseApplyFunc(type(expr), subexpr), subindices + return expr, () diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..e1eaaf7241bc3b4a48234178d18da3aa5736e189 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py @@ -0,0 +1,13 @@ +from sympy.tensor.array.ndim_array import NDimArray + + +class MutableNDimArray(NDimArray): + + def as_immutable(self): + raise NotImplementedError("abstract method") + + def as_mutable(self): + return self + + def _sympy_(self): + return self.as_immutable() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb1e0ed2cd2db942ef5e510291485411982e35c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py @@ -0,0 +1,600 @@ +from sympy.core.basic import Basic +from sympy.core.containers import (Dict, Tuple) +from sympy.core.expr import Expr +from sympy.core.kind import Kind, NumberKind, UndefinedKind +from sympy.core.numbers import Integer +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.external.gmpy import SYMPY_INTS +from sympy.printing.defaults import Printable + +import itertools +from collections.abc import Iterable + + +class ArrayKind(Kind): + """ + Kind for N-dimensional array in SymPy. + + This kind represents the multidimensional array that algebraic + operations are defined. Basic class for this kind is ``NDimArray``, + but any expression representing the array can have this. + + Parameters + ========== + + element_kind : Kind + Kind of the element. Default is :obj:NumberKind ``, + which means that the array contains only numbers. + + Examples + ======== + + Any instance of array class has ``ArrayKind``. + + >>> from sympy import NDimArray + >>> NDimArray([1,2,3]).kind + ArrayKind(NumberKind) + + Although expressions representing an array may be not instance of + array class, it will have ``ArrayKind`` as well. + + >>> from sympy import Integral + >>> from sympy.tensor.array import NDimArray + >>> from sympy.abc import x + >>> intA = Integral(NDimArray([1,2,3]), x) + >>> isinstance(intA, NDimArray) + False + >>> intA.kind + ArrayKind(NumberKind) + + Use ``isinstance()`` to check for ``ArrayKind` without specifying + the element kind. Use ``is`` with specifying the element kind. + + >>> from sympy.tensor.array import ArrayKind + >>> from sympy.core import NumberKind + >>> boolA = NDimArray([True, False]) + >>> isinstance(boolA.kind, ArrayKind) + True + >>> boolA.kind is ArrayKind(NumberKind) + False + + See Also + ======== + + shape : Function to return the shape of objects with ``MatrixKind``. + + """ + def __new__(cls, element_kind=NumberKind): + obj = super().__new__(cls, element_kind) + obj.element_kind = element_kind + return obj + + def __repr__(self): + return "ArrayKind(%s)" % self.element_kind + + @classmethod + def _union(cls, kinds) -> 'ArrayKind': + elem_kinds = {e.kind for e in kinds} + if len(elem_kinds) == 1: + elemkind, = elem_kinds + else: + elemkind = UndefinedKind + return ArrayKind(elemkind) + + +class NDimArray(Printable): + """N-dimensional array. + + Examples + ======== + + Create an N-dim array of zeros: + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 3, 4) + >>> a + [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + + Create an N-dim array from a list; + + >>> a = MutableDenseNDimArray([[2, 3], [4, 5]]) + >>> a + [[2, 3], [4, 5]] + + >>> b = MutableDenseNDimArray([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]) + >>> b + [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]] + + Create an N-dim array from a flat list with dimension shape: + + >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3)) + >>> a + [[1, 2, 3], [4, 5, 6]] + + Create an N-dim array from a matrix: + + >>> from sympy import Matrix + >>> a = Matrix([[1,2],[3,4]]) + >>> a + Matrix([ + [1, 2], + [3, 4]]) + >>> b = MutableDenseNDimArray(a) + >>> b + [[1, 2], [3, 4]] + + Arithmetic operations on N-dim arrays + + >>> a = MutableDenseNDimArray([1, 1, 1, 1], (2, 2)) + >>> b = MutableDenseNDimArray([4, 4, 4, 4], (2, 2)) + >>> c = a + b + >>> c + [[5, 5], [5, 5]] + >>> a - b + [[-3, -3], [-3, -3]] + + """ + + _diff_wrt = True + is_scalar = False + + def __new__(cls, iterable, shape=None, **kwargs): + from sympy.tensor.array import ImmutableDenseNDimArray + return ImmutableDenseNDimArray(iterable, shape, **kwargs) + + def __getitem__(self, index): + raise NotImplementedError("A subclass of NDimArray should implement __getitem__") + + def _parse_index(self, index): + if isinstance(index, (SYMPY_INTS, Integer)): + if index >= self._loop_size: + raise ValueError("Only a tuple index is accepted") + return index + + if self._loop_size == 0: + raise ValueError("Index not valid with an empty array") + + if len(index) != self._rank: + raise ValueError('Wrong number of array axes') + + real_index = 0 + # check if input index can exist in current indexing + for i in range(self._rank): + if (index[i] >= self.shape[i]) or (index[i] < -self.shape[i]): + raise ValueError('Index ' + str(index) + ' out of border') + if index[i] < 0: + real_index += 1 + real_index = real_index*self.shape[i] + index[i] + + return real_index + + def _get_tuple_index(self, integer_index): + index = [] + for i, sh in enumerate(reversed(self.shape)): + index.append(integer_index % sh) + integer_index //= sh + index.reverse() + return tuple(index) + + def _check_symbolic_index(self, index): + # Check if any index is symbolic: + tuple_index = (index if isinstance(index, tuple) else (index,)) + if any((isinstance(i, Expr) and (not i.is_number)) for i in tuple_index): + for i, nth_dim in zip(tuple_index, self.shape): + if ((i < 0) == True) or ((i >= nth_dim) == True): + raise ValueError("index out of range") + from sympy.tensor import Indexed + return Indexed(self, *tuple_index) + return None + + def _setter_iterable_check(self, value): + from sympy.matrices.matrices import MatrixBase + if isinstance(value, (Iterable, MatrixBase, NDimArray)): + raise NotImplementedError + + @classmethod + def _scan_iterable_shape(cls, iterable): + def f(pointer): + if not isinstance(pointer, Iterable): + return [pointer], () + + if len(pointer) == 0: + return [], (0,) + + result = [] + elems, shapes = zip(*[f(i) for i in pointer]) + if len(set(shapes)) != 1: + raise ValueError("could not determine shape unambiguously") + for i in elems: + result.extend(i) + return result, (len(shapes),)+shapes[0] + + return f(iterable) + + @classmethod + def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + + if shape is None: + if iterable is None: + shape = () + iterable = () + # Construction of a sparse array from a sparse array + elif isinstance(iterable, SparseNDimArray): + return iterable._shape, iterable._sparse_array + + # Construct N-dim array from another N-dim array: + elif isinstance(iterable, NDimArray): + shape = iterable.shape + + # Construct N-dim array from an iterable (numpy arrays included): + elif isinstance(iterable, Iterable): + iterable, shape = cls._scan_iterable_shape(iterable) + + # Construct N-dim array from a Matrix: + elif isinstance(iterable, MatrixBase): + shape = iterable.shape + + else: + shape = () + iterable = (iterable,) + + if isinstance(iterable, (Dict, dict)) and shape is not None: + new_dict = iterable.copy() + for k, v in new_dict.items(): + if isinstance(k, (tuple, Tuple)): + new_key = 0 + for i, idx in enumerate(k): + new_key = new_key * shape[i] + idx + iterable[new_key] = iterable[k] + del iterable[k] + + if isinstance(shape, (SYMPY_INTS, Integer)): + shape = (shape,) + + if not all(isinstance(dim, (SYMPY_INTS, Integer)) for dim in shape): + raise TypeError("Shape should contain integers only.") + + return tuple(shape), iterable + + def __len__(self): + """Overload common function len(). Returns number of elements in array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3, 3) + >>> a + [[0, 0, 0], [0, 0, 0], [0, 0, 0]] + >>> len(a) + 9 + + """ + return self._loop_size + + @property + def shape(self): + """ + Returns array shape (dimension). + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3, 3) + >>> a.shape + (3, 3) + + """ + return self._shape + + def rank(self): + """ + Returns rank of array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3,4,5,6,3) + >>> a.rank() + 5 + + """ + return self._rank + + def diff(self, *args, **kwargs): + """ + Calculate the derivative of each element in the array. + + Examples + ======== + + >>> from sympy import ImmutableDenseNDimArray + >>> from sympy.abc import x, y + >>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]]) + >>> M.diff(x) + [[1, 0], [0, y]] + + """ + from sympy.tensor.array.array_derivatives import ArrayDerivative + kwargs.setdefault('evaluate', True) + return ArrayDerivative(self.as_immutable(), *args, **kwargs) + + def _eval_derivative(self, base): + # Types are (base: scalar, self: array) + return self.applyfunc(lambda x: base.diff(x)) + + def _eval_derivative_n_times(self, s, n): + return Basic._eval_derivative_n_times(self, s, n) + + def applyfunc(self, f): + """Apply a function to each element of the N-dim array. + + Examples + ======== + + >>> from sympy import ImmutableDenseNDimArray + >>> m = ImmutableDenseNDimArray([i*2+j for i in range(2) for j in range(2)], (2, 2)) + >>> m + [[0, 1], [2, 3]] + >>> m.applyfunc(lambda i: 2*i) + [[0, 2], [4, 6]] + """ + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(self, SparseNDimArray) and f(S.Zero) == 0: + return type(self)({k: f(v) for k, v in self._sparse_array.items() if f(v) != 0}, self.shape) + + return type(self)(map(f, Flatten(self)), self.shape) + + def _sympystr(self, printer): + def f(sh, shape_left, i, j): + if len(shape_left) == 1: + return "["+", ".join([printer._print(self[self._get_tuple_index(e)]) for e in range(i, j)])+"]" + + sh //= shape_left[0] + return "[" + ", ".join([f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh) for e in range(shape_left[0])]) + "]" # + "\n"*len(shape_left) + + if self.rank() == 0: + return printer._print(self[()]) + + return f(self._loop_size, self.shape, 0, self._loop_size) + + def tolist(self): + """ + Converting MutableDenseNDimArray to one-dim list + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1, 2, 3, 4], (2, 2)) + >>> a + [[1, 2], [3, 4]] + >>> b = a.tolist() + >>> b + [[1, 2], [3, 4]] + """ + + def f(sh, shape_left, i, j): + if len(shape_left) == 1: + return [self[self._get_tuple_index(e)] for e in range(i, j)] + result = [] + sh //= shape_left[0] + for e in range(shape_left[0]): + result.append(f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh)) + return result + + return f(self._loop_size, self.shape, 0, self._loop_size) + + def __add__(self, other): + from sympy.tensor.array.arrayop import Flatten + + if not isinstance(other, NDimArray): + return NotImplemented + + if self.shape != other.shape: + raise ValueError("array shape mismatch") + result_list = [i+j for i,j in zip(Flatten(self), Flatten(other))] + + return type(self)(result_list, self.shape) + + def __sub__(self, other): + from sympy.tensor.array.arrayop import Flatten + + if not isinstance(other, NDimArray): + return NotImplemented + + if self.shape != other.shape: + raise ValueError("array shape mismatch") + result_list = [i-j for i,j in zip(Flatten(self), Flatten(other))] + + return type(self)(result_list, self.shape) + + def __mul__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") + + other = sympify(other) + if isinstance(self, SparseNDimArray): + if other.is_zero: + return type(self)({}, self.shape) + return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [i*other for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __rmul__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") + + other = sympify(other) + if isinstance(self, SparseNDimArray): + if other.is_zero: + return type(self)({}, self.shape) + return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [other*i for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __truediv__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected") + + other = sympify(other) + if isinstance(self, SparseNDimArray) and other != S.Zero: + return type(self)({k: v/other for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [i/other for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __rtruediv__(self, other): + raise NotImplementedError('unsupported operation on NDimArray') + + def __neg__(self): + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(self, SparseNDimArray): + return type(self)({k: -v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [-i for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __iter__(self): + def iterator(): + if self._shape: + for i in range(self._shape[0]): + yield self[i] + else: + yield self[()] + + return iterator() + + def __eq__(self, other): + """ + NDimArray instances can be compared to each other. + Instances equal if they have same shape and data. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 3) + >>> b = MutableDenseNDimArray.zeros(2, 3) + >>> a == b + True + >>> c = a.reshape(3, 2) + >>> c == b + False + >>> a[0,0] = 1 + >>> b[0,0] = 2 + >>> a == b + False + """ + from sympy.tensor.array import SparseNDimArray + if not isinstance(other, NDimArray): + return False + + if not self.shape == other.shape: + return False + + if isinstance(self, SparseNDimArray) and isinstance(other, SparseNDimArray): + return dict(self._sparse_array) == dict(other._sparse_array) + + return list(self) == list(other) + + def __ne__(self, other): + return not self == other + + def _eval_transpose(self): + if self.rank() != 2: + raise ValueError("array rank not 2") + from .arrayop import permutedims + return permutedims(self, (1, 0)) + + def transpose(self): + return self._eval_transpose() + + def _eval_conjugate(self): + from sympy.tensor.array.arrayop import Flatten + + return self.func([i.conjugate() for i in Flatten(self)], self.shape) + + def conjugate(self): + return self._eval_conjugate() + + def _eval_adjoint(self): + return self.transpose().conjugate() + + def adjoint(self): + return self._eval_adjoint() + + def _slice_expand(self, s, dim): + if not isinstance(s, slice): + return (s,) + start, stop, step = s.indices(dim) + return [start + i*step for i in range((stop-start)//step)] + + def _get_slice_data_for_array_access(self, index): + sl_factors = [self._slice_expand(i, dim) for (i, dim) in zip(index, self.shape)] + eindices = itertools.product(*sl_factors) + return sl_factors, eindices + + def _get_slice_data_for_array_assignment(self, index, value): + if not isinstance(value, NDimArray): + value = type(self)(value) + sl_factors, eindices = self._get_slice_data_for_array_access(index) + slice_offsets = [min(i) if isinstance(i, list) else None for i in sl_factors] + # TODO: add checks for dimensions for `value`? + return value, eindices, slice_offsets + + @classmethod + def _check_special_bounds(cls, flat_list, shape): + if shape == () and len(flat_list) != 1: + raise ValueError("arrays without shape need one scalar value") + if shape == (0,) and len(flat_list) > 0: + raise ValueError("if array shape is (0,) there cannot be elements") + + def _check_index_for_getitem(self, index): + if isinstance(index, (SYMPY_INTS, Integer, slice)): + index = (index,) + + if len(index) < self.rank(): + index = tuple(index) + \ + tuple(slice(None) for i in range(len(index), self.rank())) + + if len(index) > self.rank(): + raise ValueError('Dimension of index greater than rank of array') + + return index + + +class ImmutableNDimArray(NDimArray, Basic): + _op_priority = 11.0 + + def __hash__(self): + return Basic.__hash__(self) + + def as_immutable(self): + return self + + def as_mutable(self): + raise NotImplementedError("abstract method") diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..f11aa95be8ec9d10a9104d48fb28f406fe43845e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py @@ -0,0 +1,196 @@ +from sympy.core.basic import Basic +from sympy.core.containers import (Dict, Tuple) +from sympy.core.singleton import S +from sympy.core.sympify import _sympify +from sympy.tensor.array.mutable_ndim_array import MutableNDimArray +from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray +from sympy.utilities.iterables import flatten + +import functools + +class SparseNDimArray(NDimArray): + + def __new__(self, *args, **kwargs): + return ImmutableSparseNDimArray(*args, **kwargs) + + def __getitem__(self, index): + """ + Get an element from a sparse N-dim array. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray(range(4), (2, 2)) + >>> a + [[0, 1], [2, 3]] + >>> a[0, 0] + 0 + >>> a[1, 1] + 3 + >>> a[0] + [0, 1] + >>> a[1] + [2, 3] + + Symbolic indexing: + + >>> from sympy.abc import i, j + >>> a[i, j] + [[0, 1], [2, 3]][i, j] + + Replace `i` and `j` to get element `(0, 0)`: + + >>> a[i, j].subs({i: 0, j: 0}) + 0 + + """ + syindex = self._check_symbolic_index(index) + if syindex is not None: + return syindex + + index = self._check_index_for_getitem(index) + + # `index` is a tuple with one or more slices: + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + sl_factors, eindices = self._get_slice_data_for_array_access(index) + array = [self._sparse_array.get(self._parse_index(i), S.Zero) for i in eindices] + nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)] + return type(self)(array, nshape) + else: + index = self._parse_index(index) + return self._sparse_array.get(index, S.Zero) + + @classmethod + def zeros(cls, *shape): + """ + Return a sparse N-dim array of zeros. + """ + return cls({}, shape) + + def tomatrix(self): + """ + Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray([1 for i in range(9)], (3, 3)) + >>> b = a.tomatrix() + >>> b + Matrix([ + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + """ + from sympy.matrices import SparseMatrix + if self.rank() != 2: + raise ValueError('Dimensions must be of size of 2') + + mat_sparse = {} + for key, value in self._sparse_array.items(): + mat_sparse[self._get_tuple_index(key)] = value + + return SparseMatrix(self.shape[0], self.shape[1], mat_sparse) + + def reshape(self, *newshape): + new_total_size = functools.reduce(lambda x,y: x*y, newshape) + if new_total_size != self._loop_size: + raise ValueError("Invalid reshape parameters " + newshape) + + return type(self)(self._sparse_array, newshape) + +class ImmutableSparseNDimArray(SparseNDimArray, ImmutableNDimArray): # type: ignore + + def __new__(cls, iterable=None, shape=None, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + shape = Tuple(*map(_sympify, shape)) + cls._check_special_bounds(flat_list, shape) + loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + + # Sparse array: + if isinstance(flat_list, (dict, Dict)): + sparse_array = Dict(flat_list) + else: + sparse_array = {} + for i, el in enumerate(flatten(flat_list)): + if el != 0: + sparse_array[i] = _sympify(el) + + sparse_array = Dict(sparse_array) + + self = Basic.__new__(cls, sparse_array, shape, **kwargs) + self._shape = shape + self._rank = len(shape) + self._loop_size = loop_size + self._sparse_array = sparse_array + + return self + + def __setitem__(self, index, value): + raise TypeError("immutable N-dim array") + + def as_mutable(self): + return MutableSparseNDimArray(self) + + +class MutableSparseNDimArray(MutableNDimArray, SparseNDimArray): + + def __new__(cls, iterable=None, shape=None, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + self = object.__new__(cls) + self._shape = shape + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + + # Sparse array: + if isinstance(flat_list, (dict, Dict)): + self._sparse_array = dict(flat_list) + return self + + self._sparse_array = {} + + for i, el in enumerate(flatten(flat_list)): + if el != 0: + self._sparse_array[i] = _sympify(el) + + return self + + def __setitem__(self, index, value): + """Allows to set items to MutableDenseNDimArray. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray.zeros(2, 2) + >>> a[0, 0] = 1 + >>> a[1, 1] = 1 + >>> a + [[1, 0], [0, 1]] + """ + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value) + for i in eindices: + other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None] + other_value = value[other_i] + complete_index = self._parse_index(i) + if other_value != 0: + self._sparse_array[complete_index] = other_value + elif complete_index in self._sparse_array: + self._sparse_array.pop(complete_index) + else: + index = self._parse_index(index) + value = _sympify(value) + if value == 0 and index in self._sparse_array: + self._sparse_array.pop(index) + else: + self._sparse_array[index] = value + + def as_immutable(self): + return ImmutableSparseNDimArray(self) + + @property + def free_symbols(self): + return {i for j in self._sparse_array.values() for i in j.free_symbols} diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/functions.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..9434b2035ef17f66e80c032a3d513dd07dc2e79f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/functions.py @@ -0,0 +1,154 @@ +from collections.abc import Iterable +from functools import singledispatch + +from sympy.core.expr import Expr +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.core.parameters import global_parameters + + +class TensorProduct(Expr): + """ + Generic class for tensor products. + """ + is_number = False + + def __new__(cls, *args, **kwargs): + from sympy.tensor.array import NDimArray, tensorproduct, Array + from sympy.matrices.expressions.matexpr import MatrixExpr + from sympy.matrices.matrices import MatrixBase + from sympy.strategies import flatten + + args = [sympify(arg) for arg in args] + evaluate = kwargs.get("evaluate", global_parameters.evaluate) + + if not evaluate: + obj = Expr.__new__(cls, *args) + return obj + + arrays = [] + other = [] + scalar = S.One + for arg in args: + if isinstance(arg, (Iterable, MatrixBase, NDimArray)): + arrays.append(Array(arg)) + elif isinstance(arg, (MatrixExpr,)): + other.append(arg) + else: + scalar *= arg + + coeff = scalar*tensorproduct(*arrays) + if len(other) == 0: + return coeff + if coeff != 1: + newargs = [coeff] + other + else: + newargs = other + obj = Expr.__new__(cls, *newargs, **kwargs) + return flatten(obj) + + def rank(self): + return len(self.shape) + + def _get_args_shapes(self): + from sympy.tensor.array import Array + return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args] + + @property + def shape(self): + shape_list = self._get_args_shapes() + return sum(shape_list, ()) + + def __getitem__(self, index): + index = iter(index) + return Mul.fromiter( + arg.__getitem__(tuple(next(index) for i in shp)) + for arg, shp in zip(self.args, self._get_args_shapes()) + ) + + +@singledispatch +def shape(expr): + """ + Return the shape of the *expr* as a tuple. *expr* should represent + suitable object such as matrix or array. + + Parameters + ========== + + expr : SymPy object having ``MatrixKind`` or ``ArrayKind``. + + Raises + ====== + + NoShapeError : Raised when object with wrong kind is passed. + + Examples + ======== + + This function returns the shape of any object representing matrix or array. + + >>> from sympy import shape, Array, ImmutableDenseMatrix, Integral + >>> from sympy.abc import x + >>> A = Array([1, 2]) + >>> shape(A) + (2,) + >>> shape(Integral(A, x)) + (2,) + >>> M = ImmutableDenseMatrix([1, 2]) + >>> shape(M) + (2, 1) + >>> shape(Integral(M, x)) + (2, 1) + + You can support new type by dispatching. + + >>> from sympy import Expr + >>> class NewExpr(Expr): + ... pass + >>> @shape.register(NewExpr) + ... def _(expr): + ... return shape(expr.args[0]) + >>> shape(NewExpr(M)) + (2, 1) + + If unsuitable expression is passed, ``NoShapeError()`` will be raised. + + >>> shape(Integral(x, x)) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: Integral(x, x) + + Notes + ===== + + Array-like classes (such as ``Matrix`` or ``NDimArray``) has ``shape`` + property which returns its shape, but it cannot be used for non-array + classes containing array. This function returns the shape of any + registered object representing array. + + """ + if hasattr(expr, "shape"): + return expr.shape + raise NoShapeError( + "%s does not have shape, or its type is not registered to shape()." % expr) + + +class NoShapeError(Exception): + """ + Raised when ``shape()`` is called on non-array object. + + This error can be imported from ``sympy.tensor.functions``. + + Examples + ======== + + >>> from sympy import shape + >>> from sympy.abc import x + >>> shape(x) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: x + """ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/index_methods.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/index_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..12f707b60b4ad0bcadc35a222d9abe0cc5e033fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/index_methods.py @@ -0,0 +1,469 @@ +"""Module with functions operating on IndexedBase, Indexed and Idx objects + + - Check shape conformance + - Determine indices in resulting expression + + etc. + + Methods in this module could be implemented by calling methods on Expr + objects instead. When things stabilize this could be a useful + refactoring. +""" + +from functools import reduce + +from sympy.core.function import Function +from sympy.functions import exp, Piecewise +from sympy.tensor.indexed import Idx, Indexed +from sympy.utilities import sift + +from collections import OrderedDict + +class IndexConformanceException(Exception): + pass + +def _unique_and_repeated(inds): + """ + Returns the unique and repeated indices. Also note, from the examples given below + that the order of indices is maintained as given in the input. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _unique_and_repeated + >>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0]) + ([2, 1, 4], [3, 0]) + """ + uniq = OrderedDict() + for i in inds: + if i in uniq: + uniq[i] = 0 + else: + uniq[i] = 1 + return sift(uniq, lambda x: uniq[x], binary=True) + +def _remove_repeated(inds): + """ + Removes repeated objects from sequences + + Returns a set of the unique objects and a tuple of all that have been + removed. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _remove_repeated + >>> l1 = [1, 2, 3, 2] + >>> _remove_repeated(l1) + ({1, 3}, (2,)) + + """ + u, r = _unique_and_repeated(inds) + return set(u), tuple(r) + + +def _get_indices_Mul(expr, return_dummies=False): + """Determine the outer indices of a Mul object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Mul + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Mul(x[i, k]*y[j, k]) + ({i, j}, {}) + >>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True) + ({i, j}, {}, (k,)) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + inds = list(map(list, inds)) + inds = list(reduce(lambda x, y: x + y, inds)) + inds, dummies = _remove_repeated(inds) + + symmetry = {} + for s in syms: + for pair in s: + if pair in symmetry: + symmetry[pair] *= s[pair] + else: + symmetry[pair] = s[pair] + + if return_dummies: + return inds, symmetry, dummies + else: + return inds, symmetry + + +def _get_indices_Pow(expr): + """Determine outer indices of a power or an exponential. + + A power is considered a universal function, so that the indices of a Pow is + just the collection of indices present in the expression. This may be + viewed as a bit inconsistent in the special case: + + x[i]**2 = x[i]*x[i] (1) + + The above expression could have been interpreted as the contraction of x[i] + with itself, but we choose instead to interpret it as a function + + lambda y: y**2 + + applied to each element of x (a universal function in numpy terms). In + order to allow an interpretation of (1) as a contraction, we need + contravariant and covariant Idx subclasses. (FIXME: this is not yet + implemented) + + Expressions in the base or exponent are subject to contraction as usual, + but an index that is present in the exponent, will not be considered + contractable with its own base. Note however, that indices in the same + exponent can be contracted with each other. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Pow + >>> from sympy import Pow, exp, IndexedBase, Idx + >>> A = IndexedBase('A') + >>> x = IndexedBase('x') + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> _get_indices_Pow(exp(A[i, j]*x[j])) + ({i}, {}) + >>> _get_indices_Pow(Pow(x[i], x[i])) + ({i}, {}) + >>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i])) + ({i}, {}) + + """ + base, exp = expr.as_base_exp() + binds, bsyms = get_indices(base) + einds, esyms = get_indices(exp) + + inds = binds | einds + + # FIXME: symmetries from power needs to check special cases, else nothing + symmetries = {} + + return inds, symmetries + + +def _get_indices_Add(expr): + """Determine outer indices of an Add object. + + In a sum, each term must have the same set of outer indices. A valid + expression could be + + x(i)*y(j) - x(j)*y(i) + + But we do not allow expressions like: + + x(i)*y(j) - z(j)*z(j) + + FIXME: Add support for Numpy broadcasting + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Add + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Add(x[i] + x[k]*y[i, k]) + ({i}, {}) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + # allow broadcast of scalars + non_scalars = [x for x in inds if x != set()] + if not non_scalars: + return set(), {} + + if not all(x == non_scalars[0] for x in non_scalars[1:]): + raise IndexConformanceException("Indices are not consistent: %s" % expr) + if not reduce(lambda x, y: x != y or y, syms): + symmetries = syms[0] + else: + # FIXME: search for symmetries + symmetries = {} + + return non_scalars[0], symmetries + + +def get_indices(expr): + """Determine the outer indices of expression ``expr`` + + By *outer* we mean indices that are not summation indices. Returns a set + and a dict. The set contains outer indices and the dict contains + information about index symmetries. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_indices + >>> from sympy import symbols + >>> from sympy.tensor import IndexedBase + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, a, z = symbols('i j a z', integer=True) + + The indices of the total expression is determined, Repeated indices imply a + summation, for instance the trace of a matrix A: + + >>> get_indices(A[i, i]) + (set(), {}) + + In the case of many terms, the terms are required to have identical + outer indices. Else an IndexConformanceException is raised. + + >>> get_indices(x[i] + A[i, j]*y[j]) + ({i}, {}) + + :Exceptions: + + An IndexConformanceException means that the terms ar not compatible, e.g. + + >>> get_indices(x[i] + y[j]) #doctest: +SKIP + (...) + IndexConformanceException: Indices are not consistent: x(i) + y(j) + + .. warning:: + The concept of *outer* indices applies recursively, starting on the deepest + level. This implies that dummies inside parenthesis are assumed to be + summed first, so that the following expression is handled gracefully: + + >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) + ({i, j}, {}) + + This is correct and may appear convenient, but you need to be careful + with this as SymPy will happily .expand() the product, if requested. The + resulting expression would mix the outer ``j`` with the dummies inside + the parenthesis, which makes it a different expression. To be on the + safe side, it is best to avoid such ambiguities by using unique indices + for all contractions that should be held separate. + + """ + # We call ourself recursively to determine indices of sub expressions. + + # break recursion + if isinstance(expr, Indexed): + c = expr.indices + inds, dummies = _remove_repeated(c) + return inds, {} + elif expr is None: + return set(), {} + elif isinstance(expr, Idx): + return {expr}, {} + elif expr.is_Atom: + return set(), {} + + + # recurse via specialized functions + else: + if expr.is_Mul: + return _get_indices_Mul(expr) + elif expr.is_Add: + return _get_indices_Add(expr) + elif expr.is_Pow or isinstance(expr, exp): + return _get_indices_Pow(expr) + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return set(), {} + elif isinstance(expr, Function): + # Support ufunc like behaviour by returning indices from arguments. + # Functions do not interpret repeated indices across arguments + # as summation + ind0 = set() + for arg in expr.args: + ind, sym = get_indices(arg) + ind0 |= ind + return ind0, sym + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return set(), {} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) + + +def get_contraction_structure(expr): + """Determine dummy indices of ``expr`` and describe its structure + + By *dummy* we mean indices that are summation indices. + + The structure of the expression is determined and described as follows: + + 1) A conforming summation of Indexed objects is described with a dict where + the keys are summation indices and the corresponding values are sets + containing all terms for which the summation applies. All Add objects + in the SymPy expression tree are described like this. + + 2) For all nodes in the SymPy expression tree that are *not* of type Add, the + following applies: + + If a node discovers contractions in one of its arguments, the node + itself will be stored as a key in the dict. For that key, the + corresponding value is a list of dicts, each of which is the result of a + recursive call to get_contraction_structure(). The list contains only + dicts for the non-trivial deeper contractions, omitting dicts with None + as the one and only key. + + .. Note:: The presence of expressions among the dictionary keys indicates + multiple levels of index contractions. A nested dict displays nested + contractions and may itself contain dicts from a deeper level. In + practical calculations the summation in the deepest nested level must be + calculated first so that the outer expression can access the resulting + indexed object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_contraction_structure + >>> from sympy import default_sort_key + >>> from sympy.tensor import IndexedBase, Idx + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) + >>> get_contraction_structure(x[i]*y[i] + A[j, j]) + {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} + >>> get_contraction_structure(x[i]*y[j]) + {None: {x[i]*y[j]}} + + A multiplication of contracted factors results in nested dicts representing + the internal contractions. + + >>> d = get_contraction_structure(x[i, i]*y[j, j]) + >>> sorted(d.keys(), key=default_sort_key) + [None, x[i, i]*y[j, j]] + + In this case, the product has no contractions: + + >>> d[None] + {x[i, i]*y[j, j]} + + Factors are contracted "first": + + >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) + [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] + + A parenthesized Add object is also returned as a nested dictionary. The + term containing the parenthesis is a Mul with a contraction among the + arguments, so it will be found as a key in the result. It stores the + dictionary resulting from a recursive call on the Add expression. + + >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) + >>> sorted(d.keys(), key=default_sort_key) + [(A[i, j]*x[j] + y[i])*x[i], (i,)] + >>> d[(i,)] + {(A[i, j]*x[j] + y[i])*x[i]} + >>> d[x[i]*(A[i, j]*x[j] + y[i])] + [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] + + Powers with contractions in either base or exponent will also be found as + keys in the dictionary, mapping to a list of results from recursive calls: + + >>> d = get_contraction_structure(A[j, j]**A[i, i]) + >>> d[None] + {A[j, j]**A[i, i]} + >>> nested_contractions = d[A[j, j]**A[i, i]] + >>> nested_contractions[0] + {(j,): {A[j, j]}} + >>> nested_contractions[1] + {(i,): {A[i, i]}} + + The description of the contraction structure may appear complicated when + represented with a string in the above examples, but it is easy to iterate + over: + + >>> from sympy import Expr + >>> for key in d: + ... if isinstance(key, Expr): + ... continue + ... for term in d[key]: + ... if term in d: + ... # treat deepest contraction first + ... pass + ... # treat outermost contactions here + + """ + + # We call ourself recursively to inspect sub expressions. + + if isinstance(expr, Indexed): + junk, key = _remove_repeated(expr.indices) + return {key or None: {expr}} + elif expr.is_Atom: + return {None: {expr}} + elif expr.is_Mul: + junk, junk, key = _get_indices_Mul(expr, return_dummies=True) + result = {key or None: {expr}} + # recurse on every factor + nested = [] + for fac in expr.args: + facd = get_contraction_structure(fac) + if not (None in facd and len(facd) == 1): + nested.append(facd) + if nested: + result[expr] = nested + return result + elif expr.is_Pow or isinstance(expr, exp): + # recurse in base and exp separately. If either has internal + # contractions we must include ourselves as a key in the returned dict + b, e = expr.as_base_exp() + dbase = get_contraction_structure(b) + dexp = get_contraction_structure(e) + + dicts = [] + for d in dbase, dexp: + if not (None in d and len(d) == 1): + dicts.append(d) + result = {None: {expr}} + if dicts: + result[expr] = dicts + return result + elif expr.is_Add: + # Note: we just collect all terms with identical summation indices, We + # do nothing to identify equivalent terms here, as this would require + # substitutions or pattern matching in expressions of unknown + # complexity. + result = {} + for term in expr.args: + # recurse on every term + d = get_contraction_structure(term) + for key in d: + if key in result: + result[key] |= d[key] + else: + result[key] = d[key] + return result + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return {None: expr} + elif isinstance(expr, Function): + # Collect non-trivial contraction structures in each argument + # We do not report repeated indices in separate arguments as a + # contraction + deeplist = [] + for arg in expr.args: + deep = get_contraction_structure(arg) + if not (None in deep and len(deep) == 1): + deeplist.append(deep) + d = {None: {expr}} + if deeplist: + d[expr] = deeplist + return d + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return {None: {expr}} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/indexed.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..4a050a24b241bb19604462fdf1bd17e43c83f354 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/indexed.py @@ -0,0 +1,797 @@ +r"""Module that defines indexed objects. + +The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a +matrix element ``M[i, j]`` as in the following diagram:: + + 1) The Indexed class represents the entire indexed object. + | + ___|___ + ' ' + M[i, j] + / \__\______ + | | + | | + | 2) The Idx class represents indices; each Idx can + | optionally contain information about its range. + | + 3) IndexedBase represents the 'stem' of an indexed object, here `M`. + The stem used by itself is usually taken to represent the entire + array. + +There can be any number of indices on an Indexed object. No +transformation properties are implemented in these Base objects, but +implicit contraction of repeated indices is supported. + +Note that the support for complicated (i.e. non-atomic) integer +expressions as indices is limited. (This should be improved in +future releases.) + +Examples +======== + +To express the above matrix element example you would write: + +>>> from sympy import symbols, IndexedBase, Idx +>>> M = IndexedBase('M') +>>> i, j = symbols('i j', cls=Idx) +>>> M[i, j] +M[i, j] + +Repeated indices in a product implies a summation, so to express a +matrix-vector product in terms of Indexed objects: + +>>> x = IndexedBase('x') +>>> M[i, j]*x[j] +M[i, j]*x[j] + +If the indexed objects will be converted to component based arrays, e.g. +with the code printers or the autowrap framework, you also need to provide +(symbolic or numerical) dimensions. This can be done by passing an +optional shape parameter to IndexedBase upon construction: + +>>> dim1, dim2 = symbols('dim1 dim2', integer=True) +>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2)) +>>> A.shape +(dim1, 2*dim1, dim2) +>>> A[i, j, 3].shape +(dim1, 2*dim1, dim2) + +If an IndexedBase object has no shape information, it is assumed that the +array is as large as the ranges of its indices: + +>>> n, m = symbols('n m', integer=True) +>>> i = Idx('i', m) +>>> j = Idx('j', n) +>>> M[i, j].shape +(m, n) +>>> M[i, j].ranges +[(0, m - 1), (0, n - 1)] + +The above can be compared with the following: + +>>> A[i, 2, j].shape +(dim1, 2*dim1, dim2) +>>> A[i, 2, j].ranges +[(0, m - 1), None, (0, n - 1)] + +To analyze the structure of indexed expressions, you can use the methods +get_indices() and get_contraction_structure(): + +>>> from sympy.tensor import get_indices, get_contraction_structure +>>> get_indices(A[i, j, j]) +({i}, {}) +>>> get_contraction_structure(A[i, j, j]) +{(j,): {A[i, j, j]}} + +See the appropriate docstrings for a detailed explanation of the output. +""" + +# TODO: (some ideas for improvement) +# +# o test and guarantee numpy compatibility +# - implement full support for broadcasting +# - strided arrays +# +# o more functions to analyze indexed expressions +# - identify standard constructs, e.g matrix-vector product in a subexpression +# +# o functions to generate component based arrays (numpy and sympy.Matrix) +# - generate a single array directly from Indexed +# - convert simple sub-expressions +# +# o sophisticated indexing (possibly in subclasses to preserve simplicity) +# - Idx with range smaller than dimension of Indexed +# - Idx with stepsize != 1 +# - Idx with step determined by function call +from collections.abc import Iterable + +from sympy.core.numbers import Number +from sympy.core.assumptions import StdFactKB +from sympy.core import Expr, Tuple, sympify, S +from sympy.core.symbol import _filter_assumptions, Symbol +from sympy.core.logic import fuzzy_bool, fuzzy_not +from sympy.core.sympify import _sympify +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.multipledispatch import dispatch +from sympy.utilities.iterables import is_sequence, NotIterable +from sympy.utilities.misc import filldedent + + +class IndexException(Exception): + pass + + +class Indexed(Expr): + """Represents a mathematical object with indices. + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j) + A[i, j] + + It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``: + ``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``. + + >>> A = IndexedBase('A') + >>> a_ij = A[i, j] # Prefer this, + >>> b_ij = Indexed(A, i, j) # over this. + >>> a_ij == b_ij + True + + """ + is_commutative = True + is_Indexed = True + is_symbol = True + is_Atom = True + + def __new__(cls, base, *args, **kw_args): + from sympy.tensor.array.ndim_array import NDimArray + from sympy.matrices.matrices import MatrixBase + + if not args: + raise IndexException("Indexed needs at least one index.") + if isinstance(base, (str, Symbol)): + base = IndexedBase(base) + elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase): + raise TypeError(filldedent(""" + The base can only be replaced with a string, Symbol, + IndexedBase or an object with a method for getting + items (i.e. an object with a `__getitem__` method). + """)) + args = list(map(sympify, args)) + if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all(i.is_number for i in args): + if len(args) == 1: + return base[args[0]] + else: + return base[args] + + base = _sympify(base) + + obj = Expr.__new__(cls, base, *args, **kw_args) + + try: + IndexedBase._set_assumptions(obj, base.assumptions0) + except AttributeError: + IndexedBase._set_assumptions(obj, {}) + return obj + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def name(self): + return str(self) + + @property + def _diff_wrt(self): + """Allow derivatives with respect to an ``Indexed`` object.""" + return True + + def _eval_derivative(self, wrt): + from sympy.tensor.array.ndim_array import NDimArray + + if isinstance(wrt, Indexed) and wrt.base == self.base: + if len(self.indices) != len(wrt.indices): + msg = "Different # of indices: d({!s})/d({!s})".format(self, + wrt) + raise IndexException(msg) + result = S.One + for index1, index2 in zip(self.indices, wrt.indices): + result *= KroneckerDelta(index1, index2) + return result + elif isinstance(self.base, NDimArray): + from sympy.tensor.array import derive_by_array + return Indexed(derive_by_array(self.base, wrt), *self.args[1:]) + else: + if Tuple(self.indices).has(wrt): + return S.NaN + return S.Zero + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + @property + def base(self): + """Returns the ``IndexedBase`` of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).base + A + >>> B = IndexedBase('B') + >>> B == B[i, j].base + True + + """ + return self.args[0] + + @property + def indices(self): + """ + Returns the indices of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).indices + (i, j) + + """ + return self.args[1:] + + @property + def rank(self): + """ + Returns the rank of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j, k, l, m = symbols('i:m', cls=Idx) + >>> Indexed('A', i, j).rank + 2 + >>> q = Indexed('A', i, j, k, l, m) + >>> q.rank + 5 + >>> q.rank == len(q.indices) + True + + """ + return len(self.args) - 1 + + @property + def shape(self): + """Returns a list with dimensions of each index. + + Dimensions is a property of the array, not of the indices. Still, if + the ``IndexedBase`` does not define a shape attribute, it is assumed + that the ranges of the indices correspond to the shape of the array. + + >>> from sympy import IndexedBase, Idx, symbols + >>> n, m = symbols('n m', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', m) + >>> A = IndexedBase('A', shape=(n, n)) + >>> B = IndexedBase('B') + >>> A[i, j].shape + (n, n) + >>> B[i, j].shape + (m, m) + """ + + if self.base.shape: + return self.base.shape + sizes = [] + for i in self.indices: + upper = getattr(i, 'upper', None) + lower = getattr(i, 'lower', None) + if None in (upper, lower): + raise IndexException(filldedent(""" + Range is not defined for all indices in: %s""" % self)) + try: + size = upper - lower + 1 + except TypeError: + raise IndexException(filldedent(""" + Shape cannot be inferred from Idx with + undefined range: %s""" % self)) + sizes.append(size) + return Tuple(*sizes) + + @property + def ranges(self): + """Returns a list of tuples with lower and upper range of each index. + + If an index does not define the data members upper and lower, the + corresponding slot in the list contains ``None`` instead of a tuple. + + Examples + ======== + + >>> from sympy import Indexed,Idx, symbols + >>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges + [(0, 1), (0, 3), (0, 7)] + >>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges + [(0, 2), (0, 2), (0, 2)] + >>> x, y, z = symbols('x y z', integer=True) + >>> Indexed('A', x, y, z).ranges + [None, None, None] + + """ + ranges = [] + sentinel = object() + for i in self.indices: + upper = getattr(i, 'upper', sentinel) + lower = getattr(i, 'lower', sentinel) + if sentinel not in (upper, lower): + ranges.append((lower, upper)) + else: + ranges.append(None) + return ranges + + def _sympystr(self, p): + indices = list(map(p.doprint, self.indices)) + return "%s[%s]" % (p.doprint(self.base), ", ".join(indices)) + + @property + def free_symbols(self): + base_free_symbols = self.base.free_symbols + indices_free_symbols = { + fs for i in self.indices for fs in i.free_symbols} + if base_free_symbols: + return {self} | base_free_symbols | indices_free_symbols + else: + return indices_free_symbols + + @property + def expr_free_symbols(self): + from sympy.utilities.exceptions import sympy_deprecation_warning + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + + return {self} + + +class IndexedBase(Expr, NotIterable): + """Represent the base or stem of an indexed object + + The IndexedBase class represent an array that contains elements. The main purpose + of this class is to allow the convenient creation of objects of the Indexed + class. The __getitem__ method of IndexedBase returns an instance of + Indexed. Alone, without indices, the IndexedBase class can be used as a + notation for e.g. matrix equations, resembling what you could do with the + Symbol class. But, the IndexedBase class adds functionality that is not + available for Symbol instances: + + - An IndexedBase object can optionally store shape information. This can + be used in to check array conformance and conditions for numpy + broadcasting. (TODO) + - An IndexedBase object implements syntactic sugar that allows easy symbolic + representation of array operations, using implicit summation of + repeated indices. + - The IndexedBase object symbolizes a mathematical structure equivalent + to arrays, and is recognized as such for code generation and automatic + compilation and wrapping. + + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> A = IndexedBase('A'); A + A + >>> type(A) + + + When an IndexedBase object receives indices, it returns an array with named + axes, represented by an Indexed object: + + >>> i, j = symbols('i j', integer=True) + >>> A[i, j, 2] + A[i, j, 2] + >>> type(A[i, j, 2]) + + + The IndexedBase constructor takes an optional shape argument. If given, + it overrides any shape information in the indices. (But not the index + ranges!) + + >>> m, n, o, p = symbols('m n o p', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', n) + >>> A[i, j].shape + (m, n) + >>> B = IndexedBase('B', shape=(o, p)) + >>> B[i, j].shape + (o, p) + + Assumptions can be specified with keyword arguments the same way as for Symbol: + + >>> A_real = IndexedBase('A', real=True) + >>> A_real.is_real + True + >>> A != A_real + True + + Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase: + + >>> I = symbols('I', integer=True) + >>> C_inherit = IndexedBase(I) + >>> C_explicit = IndexedBase('I', integer=True) + >>> C_inherit == C_explicit + True + """ + is_commutative = True + is_symbol = True + is_Atom = True + + @staticmethod + def _set_assumptions(obj, assumptions): + """Set assumptions on obj, making sure to apply consistent values.""" + tmp_asm_copy = assumptions.copy() + is_commutative = fuzzy_bool(assumptions.get('commutative', True)) + assumptions['commutative'] = is_commutative + obj._assumptions = StdFactKB(assumptions) + obj._assumptions._generator = tmp_asm_copy # Issue #8873 + + def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array.ndim_array import NDimArray + + assumptions, kw_args = _filter_assumptions(kw_args) + if isinstance(label, str): + label = Symbol(label, **assumptions) + elif isinstance(label, Symbol): + assumptions = label._merge(assumptions) + elif isinstance(label, (MatrixBase, NDimArray)): + return label + elif isinstance(label, Iterable): + return _sympify(label) + else: + label = _sympify(label) + + if is_sequence(shape): + shape = Tuple(*shape) + elif shape is not None: + shape = Tuple(shape) + + if shape is not None: + obj = Expr.__new__(cls, label, shape) + else: + obj = Expr.__new__(cls, label) + obj._shape = shape + obj._offset = offset + obj._strides = strides + obj._name = str(label) + + IndexedBase._set_assumptions(obj, assumptions) + return obj + + @property + def name(self): + return self._name + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + def __getitem__(self, indices, **kw_args): + if is_sequence(indices): + # Special case needed because M[*my_tuple] is a syntax error. + if self.shape and len(self.shape) != len(indices): + raise IndexException("Rank mismatch.") + return Indexed(self, *indices, **kw_args) + else: + if self.shape and len(self.shape) != 1: + raise IndexException("Rank mismatch.") + return Indexed(self, indices, **kw_args) + + @property + def shape(self): + """Returns the shape of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase, Idx + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).shape + (x, y) + + Note: If the shape of the ``IndexedBase`` is specified, it will override + any shape information given by the indices. + + >>> A = IndexedBase('A', shape=(x, y)) + >>> B = IndexedBase('B') + >>> i = Idx('i', 2) + >>> j = Idx('j', 1) + >>> A[i, j].shape + (x, y) + >>> B[i, j].shape + (2, 1) + + """ + return self._shape + + @property + def strides(self): + """Returns the strided scheme for the ``IndexedBase`` object. + + Normally this is a tuple denoting the number of + steps to take in the respective dimension when traversing + an array. For code generation purposes strides='C' and + strides='F' can also be used. + + strides='C' would mean that code printer would unroll + in row-major order and 'F' means unroll in column major + order. + + """ + + return self._strides + + @property + def offset(self): + """Returns the offset for the ``IndexedBase`` object. + + This is the value added to the resulting index when the + 2D Indexed object is unrolled to a 1D form. Used in code + generation. + + Examples + ========== + >>> from sympy.printing import ccode + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> l, m, n, o = symbols('l m n o', integer=True) + >>> A = IndexedBase('A', strides=(l, m, n), offset=o) + >>> i, j, k = map(Idx, 'ijk') + >>> ccode(A[i, j, k]) + 'A[l*i + m*j + n*k + o]' + + """ + return self._offset + + @property + def label(self): + """Returns the label of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).label + A + + """ + return self.args[0] + + def _sympystr(self, p): + return p.doprint(self.label) + + +class Idx(Expr): + """Represents an integer index as an ``Integer`` or integer expression. + + There are a number of ways to create an ``Idx`` object. The constructor + takes two arguments: + + ``label`` + An integer or a symbol that labels the index. + ``range`` + Optionally you can specify a range as either + + * ``Symbol`` or integer: This is interpreted as a dimension. Lower and + upper bounds are set to ``0`` and ``range - 1``, respectively. + * ``tuple``: The two elements are interpreted as the lower and upper + bounds of the range, respectively. + + Note: bounds of the range are assumed to be either integer or infinite (oo + and -oo are allowed to specify an unbounded range). If ``n`` is given as a + bound, then ``n.is_integer`` must not return false. + + For convenience, if the label is given as a string it is automatically + converted to an integer symbol. (Note: this conversion is not done for + range or dimension arguments.) + + Examples + ======== + + >>> from sympy import Idx, symbols, oo + >>> n, i, L, U = symbols('n i L U', integer=True) + + If a string is given for the label an integer ``Symbol`` is created and the + bounds are both ``None``: + + >>> idx = Idx('qwerty'); idx + qwerty + >>> idx.lower, idx.upper + (None, None) + + Both upper and lower bounds can be specified: + + >>> idx = Idx(i, (L, U)); idx + i + >>> idx.lower, idx.upper + (L, U) + + When only a single bound is given it is interpreted as the dimension + and the lower bound defaults to 0: + + >>> idx = Idx(i, n); idx.lower, idx.upper + (0, n - 1) + >>> idx = Idx(i, 4); idx.lower, idx.upper + (0, 3) + >>> idx = Idx(i, oo); idx.lower, idx.upper + (0, oo) + + """ + + is_integer = True + is_finite = True + is_real = True + is_symbol = True + is_Atom = True + _diff_wrt = True + + def __new__(cls, label, range=None, **kw_args): + + if isinstance(label, str): + label = Symbol(label, integer=True) + label, range = list(map(sympify, (label, range))) + + if label.is_Number: + if not label.is_integer: + raise TypeError("Index is not an integer number.") + return label + + if not label.is_integer: + raise TypeError("Idx object requires an integer label.") + + elif is_sequence(range): + if len(range) != 2: + raise ValueError(filldedent(""" + Idx range tuple must have length 2, but got %s""" % len(range))) + for bound in range: + if (bound.is_integer is False and bound is not S.Infinity + and bound is not S.NegativeInfinity): + raise TypeError("Idx object requires integer bounds.") + args = label, Tuple(*range) + elif isinstance(range, Expr): + if range is not S.Infinity and fuzzy_not(range.is_integer): + raise TypeError("Idx object requires an integer dimension.") + args = label, Tuple(0, range - 1) + elif range: + raise TypeError(filldedent(""" + The range must be an ordered iterable or + integer SymPy expression.""")) + else: + args = label, + + obj = Expr.__new__(cls, *args, **kw_args) + obj._assumptions["finite"] = True + obj._assumptions["real"] = True + return obj + + @property + def label(self): + """Returns the label (Integer or integer expression) of the Idx object. + + Examples + ======== + + >>> from sympy import Idx, Symbol + >>> x = Symbol('x', integer=True) + >>> Idx(x).label + x + >>> j = Symbol('j', integer=True) + >>> Idx(j).label + j + >>> Idx(j + 1).label + j + 1 + + """ + return self.args[0] + + @property + def lower(self): + """Returns the lower bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).lower + 0 + >>> Idx('j', 5).lower + 0 + >>> Idx('j').lower is None + True + + """ + try: + return self.args[1][0] + except IndexError: + return + + @property + def upper(self): + """Returns the upper bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).upper + 1 + >>> Idx('j', 5).upper + 4 + >>> Idx('j').upper is None + True + + """ + try: + return self.args[1][1] + except IndexError: + return + + def _sympystr(self, p): + return p.doprint(self.label) + + @property + def name(self): + return self.label.name if self.label.is_Symbol else str(self.label) + + @property + def free_symbols(self): + return {self} + + +@dispatch(Idx, Idx) +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs if rhs.upper is None else rhs.upper + other_lower = rhs if rhs.lower is None else rhs.lower + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Idx, Number) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs + other_lower = rhs + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Number, Idx) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = lhs + other_lower = lhs + + if rhs.upper is not None and (rhs.upper <= other_lower) == True: + return True + if rhs.lower is not None and (rhs.lower > other_upper) == True: + return False + return None diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tensor.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..c917f4be9eb9043aa347828fc4daed766c59911d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tensor.py @@ -0,0 +1,4863 @@ +""" +This module defines tensors with abstract index notation. + +The abstract index notation has been first formalized by Penrose. + +Tensor indices are formal objects, with a tensor type; there is no +notion of index range, it is only possible to assign the dimension, +used to trace the Kronecker delta; the dimension can be a Symbol. + +The Einstein summation convention is used. +The covariant indices are indicated with a minus sign in front of the index. + +For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c`` +contracted. + +A tensor expression ``t`` can be called; called with its +indices in sorted order it is equal to itself: +in the above example ``t(a, b) == t``; +one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``. + +The contracted indices are dummy indices, internally they have no name, +the indices being represented by a graph-like structure. + +Tensors are put in canonical form using ``canon_bp``, which uses +the Butler-Portugal algorithm for canonicalization using the monoterm +symmetries of the tensors. + +If there is a (anti)symmetric metric, the indices can be raised and +lowered when the tensor is put in canonical form. +""" + +from __future__ import annotations +from typing import Any +from functools import reduce +from math import prod + +from abc import abstractmethod, ABC +from collections import defaultdict +import operator +import itertools +from sympy.core.numbers import (Integer, Rational) +from sympy.combinatorics import Permutation +from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \ + bsgs_direct_product, canonicalize, riemann_bsgs +from sympy.core import Basic, Expr, sympify, Add, Mul, S +from sympy.core.containers import Tuple, Dict +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import Symbol, symbols +from sympy.core.sympify import CantSympify, _sympify +from sympy.core.operations import AssocOp +from sympy.external.gmpy import SYMPY_INTS +from sympy.matrices import eye +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, + ignore_warnings) +from sympy.utilities.decorator import memoize_property, deprecated +from sympy.utilities.iterables import sift + + +def deprecate_data(): + sympy_deprecation_warning( + """ + The data attribute of TensorIndexType is deprecated. Use The + replace_with_arrays() method instead. + """, + deprecated_since_version="1.4", + active_deprecations_target="deprecated-tensorindextype-attrs", + stacklevel=4, + ) + +def deprecate_fun_eval(): + sympy_deprecation_warning( + """ + The Tensor.fun_eval() method is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +def deprecate_call(): + sympy_deprecation_warning( + """ + Calling a tensor like Tensor(*indices) is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +class _IndexStructure(CantSympify): + """ + This class handles the indices (free and dummy ones). It contains the + algorithms to manage the dummy indices replacements and contractions of + free indices under multiplications of tensor expressions, as well as stuff + related to canonicalization sorting, getting the permutation of the + expression and so on. It also includes tools to get the ``TensorIndex`` + objects corresponding to the given index structure. + """ + + def __init__(self, free, dum, index_types, indices, canon_bp=False): + self.free = free + self.dum = dum + self.index_types = index_types + self.indices = indices + self._ext_rank = len(self.free) + 2*len(self.dum) + self.dum.sort(key=lambda x: x[0]) + + @staticmethod + def from_indices(*indices): + """ + Create a new ``_IndexStructure`` object from a list of ``indices``. + + Explanation + =========== + + ``indices`` ``TensorIndex`` objects, the indices. Contractions are + detected upon construction. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure.from_indices(m0, m1, -m1, m3) + _IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz]) + """ + + free, dum = _IndexStructure._free_dum_from_indices(*indices) + index_types = [i.tensor_index_type for i in indices] + indices = _IndexStructure._replace_dummy_names(indices, free, dum) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def from_components_free_dum(components, free, dum): + index_types = [] + for component in components: + index_types.extend(component.index_types) + indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def _free_dum_from_indices(*indices): + """ + Convert ``indices`` into ``free``, ``dum`` for single component tensor. + + Explanation + =========== + + ``free`` list of tuples ``(index, pos, 0)``, + where ``pos`` is the position of index in + the list of indices formed by the component tensors + + ``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)`` + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \ + _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3) + ([(m0, 0), (m3, 3)], [(1, 2)]) + """ + n = len(indices) + if n == 1: + return [(indices[0], 0)], [] + + # find the positions of the free indices and of the dummy indices + free = [True]*len(indices) + index_dict = {} + dum = [] + for i, index in enumerate(indices): + name = index.name + typ = index.tensor_index_type + contr = index.is_up + if (name, typ) in index_dict: + # found a pair of dummy indices + is_contr, pos = index_dict[(name, typ)] + # check consistency and update free + if is_contr: + if contr: + raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i)) + else: + free[pos] = False + free[i] = False + else: + if contr: + free[pos] = False + free[i] = False + else: + raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i)) + if contr: + dum.append((i, pos)) + else: + dum.append((pos, i)) + else: + index_dict[(name, typ)] = index.is_up, i + + free = [(index, i) for i, index in enumerate(indices) if free[i]] + free.sort() + return free, dum + + def get_indices(self): + """ + Get a list of indices, creating new tensor indices to complete dummy indices. + """ + return self.indices[:] + + @staticmethod + def generate_indices_from_free_dum_index_types(free, dum, index_types): + indices = [None]*(len(free)+2*len(dum)) + for idx, pos in free: + indices[pos] = idx + + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for pos1, pos2 in dum: + typ1 = index_types[pos1] + indname = generate_dummy_name(typ1) + indices[pos1] = TensorIndex(indname, typ1, True) + indices[pos2] = TensorIndex(indname, typ1, False) + + return _IndexStructure._replace_dummy_names(indices, free, dum) + + @staticmethod + def _get_generator_for_dummy_indices(free): + cdt = defaultdict(int) + # if the free indices have names with dummy_name, start with an + # index higher than those for the dummy indices + # to avoid name collisions + for indx, ipos in free: + if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name: + cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + return dummy_name_gen + + @staticmethod + def _replace_dummy_names(indices, free, dum): + dum.sort(key=lambda x: x[0]) + new_indices = list(indices) + assert len(indices) == len(free) + 2*len(dum) + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for ipos1, ipos2 in dum: + typ1 = new_indices[ipos1].tensor_index_type + indname = generate_dummy_name(typ1) + new_indices[ipos1] = TensorIndex(indname, typ1, True) + new_indices[ipos2] = TensorIndex(indname, typ1, False) + return new_indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices. + """ + # get sorted indices according to their position: + free = sorted(self.free, key=lambda x: x[1]) + return [i[0] for i in free] + + def __str__(self): + return "_IndexStructure({}, {}, {})".format(self.free, self.dum, self.index_types) + + def __repr__(self): + return self.__str__() + + def _get_sorted_free_indices_for_canon(self): + sorted_free = self.free[:] + sorted_free.sort(key=lambda x: x[0]) + return sorted_free + + def _get_sorted_dum_indices_for_canon(self): + return sorted(self.dum, key=lambda x: x[0]) + + def _get_lexicographically_sorted_index_types(self): + permutation = self.indices_canon_args()[0] + index_types = [None]*self._ext_rank + for i, it in enumerate(self.index_types): + index_types[permutation(i)] = it + return index_types + + def _get_lexicographically_sorted_indices(self): + permutation = self.indices_canon_args()[0] + indices = [None]*self._ext_rank + for i, it in enumerate(self.indices): + indices[permutation(i)] = it + return indices + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``. + + Explanation + =========== + + ``g`` permutation corresponding to the tensor in the representation + used in canonicalization + + ``is_canon_bp`` if True, then ``g`` is the permutation + corresponding to the canonical form of the tensor + """ + sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()] + lex_index_types = self._get_lexicographically_sorted_index_types() + lex_indices = self._get_lexicographically_sorted_indices() + nfree = len(sorted_free) + rank = self._ext_rank + dum = [[None]*2 for i in range((rank - nfree)//2)] + free = [] + + index_types = [None]*rank + indices = [None]*rank + for i in range(rank): + gi = g[i] + index_types[i] = lex_index_types[gi] + indices[i] = lex_indices[gi] + if gi < nfree: + ind = sorted_free[gi] + assert index_types[i] == sorted_free[gi].tensor_index_type + free.append((ind, i)) + else: + j = gi - nfree + idum, cov = divmod(j, 2) + if cov: + dum[idum][1] = i + else: + dum[idum][0] = i + dum = [tuple(x) for x in dum] + + return _IndexStructure(free, dum, index_types, indices) + + def indices_canon_args(self): + """ + Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize`` + + See ``canonicalize`` in ``tensor_can.py`` in combinatorics module. + """ + # to be called after sorted_components + from sympy.combinatorics.permutations import _af_new + n = self._ext_rank + g = [None]*n + [n, n+1] + + # Converts the symmetry of the metric into msym from .canonicalize() + # method in the combinatorics module + def metric_symmetry_to_msym(metric): + if metric is None: + return None + sym = metric.symmetry + if sym == TensorSymmetry.fully_symmetric(2): + return 0 + if sym == TensorSymmetry.fully_symmetric(-2): + return 1 + return None + + # ordered indices: first the free indices, ordered by types + # then the dummy indices, ordered by types and contravariant before + # covariant + # g[position in tensor] = position in ordered indices + for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()): + g[ipos] = i + pos = len(self.free) + j = len(self.free) + dummies = [] + prev = None + a = [] + msym = [] + for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon(): + g[ipos1] = j + g[ipos2] = j + 1 + j += 2 + typ = self.index_types[ipos1] + if typ != prev: + if a: + dummies.append(a) + a = [pos, pos + 1] + prev = typ + msym.append(metric_symmetry_to_msym(typ.metric)) + else: + a.extend([pos, pos + 1]) + pos += 2 + if a: + dummies.append(a) + + return _af_new(g), dummies, msym + + +def components_canon_args(components): + numtyp = [] + prev = None + for t in components: + if t == prev: + numtyp[-1][1] += 1 + else: + prev = t + numtyp.append([prev, 1]) + v = [] + for h, n in numtyp: + if h.comm in (0, 1): + comm = h.comm + else: + comm = TensorManager.get_comm(h.comm, h.comm) + v.append((h.symmetry.base, h.symmetry.generators, n, comm)) + return v + + +class _TensorDataLazyEvaluator(CantSympify): + """ + EXPERIMENTAL: do not rely on this class, it may change without deprecation + warnings in future versions of SymPy. + + Explanation + =========== + + This object contains the logic to associate components data to a tensor + expression. Components data are set via the ``.data`` property of tensor + expressions, is stored inside this class as a mapping between the tensor + expression and the ``ndarray``. + + Computations are executed lazily: whereas the tensor expressions can have + contractions, tensor products, and additions, components data are not + computed until they are accessed by reading the ``.data`` property + associated to the tensor expression. + """ + _substitutions_dict: dict[Any, Any] = {} + _substitutions_dict_tensmul: dict[Any, Any] = {} + + def __getitem__(self, key): + dat = self._get(key) + if dat is None: + return None + + from .array import NDimArray + if not isinstance(dat, NDimArray): + return dat + + if dat.rank() == 0: + return dat[()] + elif dat.rank() == 1 and len(dat) == 1: + return dat[0] + return dat + + def _get(self, key): + """ + Retrieve ``data`` associated with ``key``. + + Explanation + =========== + + This algorithm looks into ``self._substitutions_dict`` for all + ``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a + TensorHead instance). It reconstructs the components data that the + tensor expression should have by performing on components data the + operations that correspond to the abstract tensor operations applied. + + Metric tensor is handled in a different manner: it is pre-computed in + ``self._substitutions_dict_tensmul``. + """ + if key in self._substitutions_dict: + return self._substitutions_dict[key] + + if isinstance(key, TensorHead): + return None + + if isinstance(key, Tensor): + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in key.get_indices()]) + srch = (key.component,) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + array_list = [self.data_from_tensor(key)] + return self.data_contract_dum(array_list, key.dum, key.ext_rank) + + if isinstance(key, TensMul): + tensmul_args = key.args + if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1: + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in tensmul_args[0].get_indices()]) + srch = (tensmul_args[0].components[0],) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + #data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)] + data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)] + coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank) + return coeff*data_result + + if isinstance(key, TensAdd): + data_list = [] + free_args_list = [] + for arg in key.args: + if isinstance(arg, TensExpr): + data_list.append(arg.data) + free_args_list.append([x[0] for x in arg.free]) + else: + data_list.append(arg) + free_args_list.append([]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + + sum_list = [] + from .array import permutedims + for data, free_args in zip(data_list, free_args_list): + if len(free_args) < 2: + sum_list.append(data) + else: + free_args_pos = {y: x for x, y in enumerate(free_args)} + axes = [free_args_pos[arg] for arg in key.free_args] + sum_list.append(permutedims(data, axes)) + return reduce(lambda x, y: x+y, sum_list) + + return None + + @staticmethod + def data_contract_dum(ndarray_list, dum, ext_rank): + from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray + arrays = list(map(MutableDenseNDimArray, ndarray_list)) + prodarr = tensorproduct(*arrays) + return tensorcontraction(prodarr, *dum) + + def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead): + """ + This method is used when assigning components data to a ``TensMul`` + object, it converts components data to a fully contravariant ndarray, + which is then stored according to the ``TensorHead`` key. + """ + if data is None: + return None + + return self._correct_signature_from_indices( + data, + tensmul.get_indices(), + tensmul.free, + tensmul.dum, + True) + + def data_from_tensor(self, tensor): + """ + This method corrects the components data to the right signature + (covariant/contravariant) using the metric associated with each + ``TensorIndexType``. + """ + tensorhead = tensor.component + + if tensorhead.data is None: + return None + + return self._correct_signature_from_indices( + tensorhead.data, + tensor.get_indices(), + tensor.free, + tensor.dum) + + def _assign_data_to_tensor_expr(self, key, data): + if isinstance(key, TensAdd): + raise ValueError('cannot assign data to TensAdd') + # here it is assumed that `key` is a `TensMul` instance. + if len(key.components) != 1: + raise ValueError('cannot assign data to TensMul with multiple components') + tensorhead = key.components[0] + newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead) + return tensorhead, newdata + + def _check_permutations_on_data(self, tens, data): + from .array import permutedims + from .array.arrayop import Flatten + + if isinstance(tens, TensorHead): + rank = tens.rank + generators = tens.symmetry.generators + elif isinstance(tens, Tensor): + rank = tens.rank + generators = tens.components[0].symmetry.generators + elif isinstance(tens, TensorIndexType): + rank = tens.metric.rank + generators = tens.metric.symmetry.generators + + # Every generator is a permutation, check that by permuting the array + # by that permutation, the array will be the same, except for a + # possible sign change if the permutation admits it. + for gener in generators: + sign_change = +1 if (gener(rank) == rank) else -1 + data_swapped = data + last_data = data + permute_axes = list(map(gener, range(rank))) + # the order of a permutation is the number of times to get the + # identity by applying that permutation. + for i in range(gener.order()-1): + data_swapped = permutedims(data_swapped, permute_axes) + # if any value in the difference array is non-zero, raise an error: + if any(Flatten(last_data - sign_change*data_swapped)): + raise ValueError("Component data symmetry structure error") + last_data = data_swapped + + def __setitem__(self, key, value): + """ + Set the components data of a tensor object/expression. + + Explanation + =========== + + Components data are transformed to the all-contravariant form and stored + with the corresponding ``TensorHead`` object. If a ``TensorHead`` object + cannot be uniquely identified, it will raise an error. + """ + data = _TensorDataLazyEvaluator.parse_data(value) + self._check_permutations_on_data(key, data) + + # TensorHead and TensorIndexType can be assigned data directly, while + # TensMul must first convert data to a fully contravariant form, and + # assign it to its corresponding TensorHead single component. + if not isinstance(key, (TensorHead, TensorIndexType)): + key, data = self._assign_data_to_tensor_expr(key, data) + + if isinstance(key, TensorHead): + for dim, indextype in zip(data.shape, key.index_types): + if indextype.data is None: + raise ValueError("index type {} has no components data"\ + " associated (needed to raise/lower index)".format(indextype)) + if not indextype.dim.is_number: + continue + if dim != indextype.dim: + raise ValueError("wrong dimension of ndarray") + self._substitutions_dict[key] = data + + def __delitem__(self, key): + del self._substitutions_dict[key] + + def __contains__(self, key): + return key in self._substitutions_dict + + def add_metric_data(self, metric, data): + """ + Assign data to the ``metric`` tensor. The metric tensor behaves in an + anomalous way when raising and lowering indices. + + Explanation + =========== + + A fully covariant metric is the inverse transpose of the fully + contravariant metric (it is meant matrix inverse). If the metric is + symmetric, the transpose is not necessary and mixed + covariant/contravariant metrics are Kronecker deltas. + """ + # hard assignment, data should not be added to `TensorHead` for metric: + # the problem with `TensorHead` is that the metric is anomalous, i.e. + # raising and lowering the index means considering the metric or its + # inverse, this is not the case for other tensors. + self._substitutions_dict_tensmul[metric, True, True] = data + inverse_transpose = self.inverse_transpose_matrix(data) + # in symmetric spaces, the transpose is the same as the original matrix, + # the full covariant metric tensor is the inverse transpose, so this + # code will be able to handle non-symmetric metrics. + self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose + # now mixed cases, these are identical to the unit matrix if the metric + # is symmetric. + m = data.tomatrix() + invt = inverse_transpose.tomatrix() + self._substitutions_dict_tensmul[metric, True, False] = m * invt + self._substitutions_dict_tensmul[metric, False, True] = invt * m + + @staticmethod + def _flip_index_by_metric(data, metric, pos): + from .array import tensorproduct, tensorcontraction + + mdim = metric.rank() + ddim = data.rank() + + if pos == 0: + data = tensorcontraction( + tensorproduct( + metric, + data + ), + (1, mdim+pos) + ) + else: + data = tensorcontraction( + tensorproduct( + data, + metric + ), + (pos, ddim) + ) + return data + + @staticmethod + def inverse_matrix(ndarray): + m = ndarray.tomatrix().inv() + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def inverse_transpose_matrix(ndarray): + m = ndarray.tomatrix().inv().T + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def _correct_signature_from_indices(data, indices, free, dum, inverse=False): + """ + Utility function to correct the values inside the components data + ndarray according to whether indices are covariant or contravariant. + + It uses the metric matrix to lower values of covariant indices. + """ + # change the ndarray values according covariantness/contravariantness of the indices + # use the metric + for i, indx in enumerate(indices): + if not indx.is_up and not inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i) + elif not indx.is_up and inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric( + data, + _TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data), + i + ) + return data + + @staticmethod + def _sort_data_axes(old, new): + from .array import permutedims + + new_data = old.data.copy() + + old_free = [i[0] for i in old.free] + new_free = [i[0] for i in new.free] + + for i in range(len(new_free)): + for j in range(i, len(old_free)): + if old_free[j] == new_free[i]: + old_free[i], old_free[j] = old_free[j], old_free[i] + new_data = permutedims(new_data, (i, j)) + break + return new_data + + @staticmethod + def add_rearrange_tensmul_parts(new_tensmul, old_tensmul): + def sorted_compo(): + return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul) + + _TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo() + + @staticmethod + def parse_data(data): + """ + Transform ``data`` to array. The parameter ``data`` may + contain data in various formats, e.g. nested lists, SymPy ``Matrix``, + and so on. + + Examples + ======== + + >>> from sympy.tensor.tensor import _TensorDataLazyEvaluator + >>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12]) + [1, 3, -6, 12] + + >>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]]) + [[1, 2], [4, 7]] + """ + from .array import MutableDenseNDimArray + + if not isinstance(data, MutableDenseNDimArray): + if len(data) == 2 and hasattr(data[0], '__call__'): + data = MutableDenseNDimArray(data[0], data[1]) + else: + data = MutableDenseNDimArray(data) + return data + +_tensor_data_substitution_dict = _TensorDataLazyEvaluator() + + +class _TensorManager: + """ + Class to manage tensor properties. + + Notes + ===== + + Tensors belong to tensor commutation groups; each group has a label + ``comm``; there are predefined labels: + + ``0`` tensors commuting with any other tensor + + ``1`` tensors anticommuting among themselves + + ``2`` tensors not commuting, apart with those with ``comm=0`` + + Other groups can be defined using ``set_comm``; tensors in those + groups commute with those with ``comm=0``; by default they + do not commute with any other group. + """ + def __init__(self): + self._comm_init() + + def _comm_init(self): + self._comm = [{} for i in range(3)] + for i in range(3): + self._comm[0][i] = 0 + self._comm[i][0] = 0 + self._comm[1][1] = 1 + self._comm[2][1] = None + self._comm[1][2] = None + self._comm_symbols2i = {0:0, 1:1, 2:2} + self._comm_i2symbol = {0:0, 1:1, 2:2} + + @property + def comm(self): + return self._comm + + def comm_symbols2i(self, i): + """ + Get the commutation group number corresponding to ``i``. + + ``i`` can be a symbol or a number or a string. + + If ``i`` is not already defined its commutation group number + is set. + """ + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + return n + return self._comm_symbols2i[i] + + def comm_i2symbol(self, i): + """ + Returns the symbol corresponding to the commutation group number. + """ + return self._comm_i2symbol[i] + + def set_comm(self, i, j, c): + """ + Set the commutation parameter ``c`` for commutation groups ``i, j``. + + Parameters + ========== + + i, j : symbols representing commutation groups + + c : group commutation number + + Notes + ===== + + ``i, j`` can be symbols, strings or numbers, + apart from ``0, 1`` and ``2`` which are reserved respectively + for commuting, anticommuting tensors and tensors not commuting + with any other group apart with the commuting tensors. + For the remaining cases, use this method to set the commutation rules; + by default ``c=None``. + + The group commutation number ``c`` is assigned in correspondence + to the group commutation symbols; it can be + + 0 commuting + + 1 anticommuting + + None no commutation property + + Examples + ======== + + ``G`` and ``GH`` do not commute with themselves and commute with + each other; A is commuting. + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz') + >>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz) + >>> A = TensorHead('A', [Lorentz]) + >>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm') + >>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm') + >>> TensorManager.set_comm('Gcomm', 'GHcomm', 0) + >>> (GH(i1)*G(i0)).canon_bp() + G(i0)*GH(i1) + >>> (G(i1)*G(i0)).canon_bp() + G(i1)*G(i0) + >>> (G(i1)*A(i0)).canon_bp() + A(i0)*G(i1) + """ + if c not in (0, 1, None): + raise ValueError('`c` can assume only the values 0, 1 or None') + + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + if j not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[0][n] = 0 + self._comm[n][0] = 0 + self._comm_symbols2i[j] = n + self._comm_i2symbol[n] = j + ni = self._comm_symbols2i[i] + nj = self._comm_symbols2i[j] + self._comm[ni][nj] = c + self._comm[nj][ni] = c + + def set_comms(self, *args): + """ + Set the commutation group numbers ``c`` for symbols ``i, j``. + + Parameters + ========== + + args : sequence of ``(i, j, c)`` + """ + for i, j, c in args: + self.set_comm(i, j, c) + + def get_comm(self, i, j): + """ + Return the commutation parameter for commutation group numbers ``i, j`` + + see ``_TensorManager.set_comm`` + """ + return self._comm[i].get(j, 0 if i == 0 or j == 0 else None) + + def clear(self): + """ + Clear the TensorManager. + """ + self._comm_init() + + +TensorManager = _TensorManager() + + +class TensorIndexType(Basic): + """ + A TensorIndexType is characterized by its name and its metric. + + Parameters + ========== + + name : name of the tensor type + dummy_name : name of the head of dummy indices + dim : dimension, it can be a symbol or an integer or ``None`` + eps_dim : dimension of the epsilon tensor + metric_symmetry : integer that denotes metric symmetry or ``None`` for no metric + metric_name : string with the name of the metric tensor + + Attributes + ========== + + ``metric`` : the metric tensor + ``delta`` : ``Kronecker delta`` + ``epsilon`` : the ``Levi-Civita epsilon`` tensor + ``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis. + + Notes + ===== + + The possible values of the ``metric_symmetry`` parameter are: + + ``1`` : metric tensor is fully symmetric + ``0`` : metric tensor possesses no index symmetry + ``-1`` : metric tensor is fully antisymmetric + ``None``: there is no metric tensor (metric equals to ``None``) + + The metric is assumed to be symmetric by default. It can also be set + to a custom tensor by the ``.set_metric()`` method. + + If there is a metric the metric is used to raise and lower indices. + + In the case of non-symmetric metric, the following raising and + lowering conventions will be adopted: + + ``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)`` + + From these it is easy to find: + + ``g(-a, b) = delta(-a, b)`` + + where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta`` + (see ``TensorIndex`` for the conventions on indices). + For antisymmetric metrics there is also the following equality: + + ``g(a, -b) = -delta(a, -b)`` + + If there is no metric it is not possible to raise or lower indices; + e.g. the index of the defining representation of ``SU(N)`` + is 'covariant' and the conjugate representation is + 'contravariant'; for ``N > 2`` they are linearly independent. + + ``eps_dim`` is by default equal to ``dim``, if the latter is an integer; + else it can be assigned (for use in naive dimensional regularization); + if ``eps_dim`` is not an integer ``epsilon`` is ``None``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> Lorentz.metric + metric(Lorentz,Lorentz) + """ + + def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None, + metric_symmetry=1, metric_name='metric', **kwargs): + if 'dummy_fmt' in kwargs: + dummy_fmt = kwargs['dummy_fmt'] + sympy_deprecation_warning( + f""" + The dummy_fmt keyword to TensorIndexType is deprecated. Use + dummy_name={dummy_fmt} instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-dummy-fmt", + ) + dummy_name = dummy_fmt + + if isinstance(name, str): + name = Symbol(name) + + if dummy_name is None: + dummy_name = str(name)[0] + if isinstance(dummy_name, str): + dummy_name = Symbol(dummy_name) + + if dim is None: + dim = Symbol("dim_" + dummy_name.name) + else: + dim = sympify(dim) + + if eps_dim is None: + eps_dim = dim + else: + eps_dim = sympify(eps_dim) + + metric_symmetry = sympify(metric_symmetry) + + if isinstance(metric_name, str): + metric_name = Symbol(metric_name) + + if 'metric' in kwargs: + SymPyDeprecationWarning( + """ + The 'metric' keyword argument to TensorIndexType is + deprecated. Use the 'metric_symmetry' keyword argument or the + TensorIndexType.set_metric() method instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-metric", + ) + metric = kwargs.get('metric') + if metric is not None: + if metric in (True, False, 0, 1): + metric_name = 'metric' + #metric_antisym = metric + else: + metric_name = metric.name + #metric_antisym = metric.antisym + + if metric: + metric_symmetry = -1 + else: + metric_symmetry = 1 + + obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim, + metric_symmetry, metric_name) + + obj._autogenerated = [] + return obj + + @property + def name(self): + return self.args[0].name + + @property + def dummy_name(self): + return self.args[1].name + + @property + def dim(self): + return self.args[2] + + @property + def eps_dim(self): + return self.args[3] + + @memoize_property + def metric(self): + metric_symmetry = self.args[4] + metric_name = self.args[5] + if metric_symmetry is None: + return None + + if metric_symmetry == 0: + symmetry = TensorSymmetry.no_symmetry(2) + elif metric_symmetry == 1: + symmetry = TensorSymmetry.fully_symmetric(2) + elif metric_symmetry == -1: + symmetry = TensorSymmetry.fully_symmetric(-2) + + return TensorHead(metric_name, [self]*2, symmetry) + + @memoize_property + def delta(self): + return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2)) + + @memoize_property + def epsilon(self): + if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)): + return None + symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim) + return TensorHead('Eps', [self]*self.eps_dim, symmetry) + + def set_metric(self, tensor): + self._metric = tensor + + def __lt__(self, other): + return self.name < other.name + + def __str__(self): + return self.name + + __repr__ = __str__ + + # Everything below this line is deprecated + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # This assignment is a bit controversial, should metric components be assigned + # to the metric only or also to the TensorIndexType object? The advantage here + # is the ability to assign a 1D array and transform it to a 2D diagonal array. + from .array import MutableDenseNDimArray + + data = _TensorDataLazyEvaluator.parse_data(data) + if data.rank() > 2: + raise ValueError("data have to be of rank 1 (diagonal metric) or 2.") + if data.rank() == 1: + if self.dim.is_number: + nda_dim = data.shape[0] + if nda_dim != self.dim: + raise ValueError("Dimension mismatch") + + dim = data.shape[0] + newndarray = MutableDenseNDimArray.zeros(dim, dim) + for i, val in enumerate(data): + newndarray[i, i] = val + data = newndarray + dim1, dim2 = data.shape + if dim1 != dim2: + raise ValueError("Non-square matrix tensor.") + if self.dim.is_number: + if self.dim != dim1: + raise ValueError("Dimension mismatch") + _tensor_data_substitution_dict[self] = data + _tensor_data_substitution_dict.add_metric_data(self.metric, data) + with ignore_warnings(SymPyDeprecationWarning): + delta = self.get_kronecker_delta() + i1 = TensorIndex('i1', self) + i2 = TensorIndex('i2', self) + with ignore_warnings(SymPyDeprecationWarning): + delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1)) + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + @deprecated( + """ + The TensorIndexType.get_kronecker_delta() method is deprecated. Use + the TensorIndexType.delta attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_kronecker_delta(self): + sym2 = TensorSymmetry(get_symmetric_group_sgs(2)) + delta = TensorHead('KD', [self]*2, sym2) + return delta + + @deprecated( + """ + The TensorIndexType.get_epsilon() method is deprecated. Use + the TensorIndexType.epsilon attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_epsilon(self): + if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)): + return None + sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1)) + epsilon = TensorHead('Eps', [self]*self._eps_dim, sym) + return epsilon + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + This destroys components data associated to the ``TensorIndexType``, if + any, specifically: + + * metric tensor data + * Kronecker tensor data + """ + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def delete_tensmul_data(key): + if key in _tensor_data_substitution_dict._substitutions_dict_tensmul: + del _tensor_data_substitution_dict._substitutions_dict_tensmul[key] + + # delete metric data: + delete_tensmul_data((self.metric, True, True)) + delete_tensmul_data((self.metric, True, False)) + delete_tensmul_data((self.metric, False, True)) + delete_tensmul_data((self.metric, False, False)) + + # delete delta tensor data: + delta = self.get_kronecker_delta() + if delta in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[delta] + + +class TensorIndex(Basic): + """ + Represents a tensor index + + Parameters + ========== + + name : name of the index, or ``True`` if you want it to be automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + + Attributes + ========== + + ``name`` + ``tensor_index_type`` + ``is_up`` + + Notes + ===== + + Tensor indices are contracted with the Einstein summation convention. + + An index can be in contravariant or in covariant form; in the latter + case it is represented prepending a ``-`` to the index name. Adding + ``-`` to a covariant (is_up=False) index makes it contravariant. + + Dummy indices have a name with head given by + ``tensor_inde_type.dummy_name`` with underscore and a number. + + Similar to ``symbols`` multiple contravariant indices can be created + at once using ``tensor_indices(s, typ)``, where ``s`` is a string + of names. + + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> mu = TensorIndex('mu', Lorentz, is_up=False) + >>> nu, rho = tensor_indices('nu, rho', Lorentz) + >>> A = TensorHead('A', [Lorentz, Lorentz]) + >>> A(mu, nu) + A(-mu, nu) + >>> A(-mu, -rho) + A(mu, -rho) + >>> A(mu, -mu) + A(-L_0, L_0) + """ + def __new__(cls, name, tensor_index_type, is_up=True): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up) + + @property + def name(self): + return self.args[0].name + + @property + def tensor_index_type(self): + return self.args[1] + + @property + def is_up(self): + return self.args[2] + + def _print(self): + s = self.name + if not self.is_up: + s = '-%s' % s + return s + + def __lt__(self, other): + return ((self.tensor_index_type, self.name) < + (other.tensor_index_type, other.name)) + + def __neg__(self): + t1 = TensorIndex(self.name, self.tensor_index_type, + (not self.is_up)) + return t1 + + +def tensor_indices(s, typ): + """ + Returns list of tensor indices given their names and their types. + + Parameters + ========== + + s : string of comma separated names of indices + + typ : ``TensorIndexType`` of the indices + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + """ + if isinstance(s, str): + a = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + tilist = [TensorIndex(i, typ) for i in a] + if len(tilist) == 1: + return tilist[0] + return tilist + + +class TensorSymmetry(Basic): + """ + Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric + index permutation). For the relevant terminology see ``tensor_can.py`` + section of the combinatorics module. + + Parameters + ========== + + bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor + + Attributes + ========== + + ``base`` : base of the BSGS + ``generators`` : generators of the BSGS + ``rank`` : rank of the tensor + + Notes + ===== + + A tensor can have an arbitrary monoterm symmetry provided by its BSGS. + Multiterm symmetries, like the cyclic symmetry of the Riemann tensor + (i.e., Bianchi identity), are not covered. See combinatorics module for + information on how to generate BSGS for a general index permutation group. + Simple symmetries can be generated using built-in methods. + + See Also + ======== + + sympy.combinatorics.tensor_can.get_symmetric_group_sgs + + Examples + ======== + + Define a symmetric tensor of rank 2 + + >>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> sym = TensorSymmetry(get_symmetric_group_sgs(2)) + >>> T = TensorHead('T', [Lorentz]*2, sym) + + Note, that the same can also be done using built-in TensorSymmetry methods + + >>> sym2 = TensorSymmetry.fully_symmetric(2) + >>> sym == sym2 + True + """ + def __new__(cls, *args, **kw_args): + if len(args) == 1: + base, generators = args[0] + elif len(args) == 2: + base, generators = args + else: + raise TypeError("bsgs required, either two separate parameters or one tuple") + + if not isinstance(base, Tuple): + base = Tuple(*base) + if not isinstance(generators, Tuple): + generators = Tuple(*generators) + + return Basic.__new__(cls, base, generators, **kw_args) + + @property + def base(self): + return self.args[0] + + @property + def generators(self): + return self.args[1] + + @property + def rank(self): + return self.generators[0].size - 2 + + @classmethod + def fully_symmetric(cls, rank): + """ + Returns a fully symmetric (antisymmetric if ``rank``<0) + TensorSymmetry object for ``abs(rank)`` indices. + """ + if rank > 0: + bsgs = get_symmetric_group_sgs(rank, False) + elif rank < 0: + bsgs = get_symmetric_group_sgs(-rank, True) + elif rank == 0: + bsgs = ([], [Permutation(1)]) + return TensorSymmetry(bsgs) + + @classmethod + def direct_product(cls, *args): + """ + Returns a TensorSymmetry object that is being a direct product of + fully (anti-)symmetric index permutation groups. + + Notes + ===== + + Some examples for different values of ``(*args)``: + ``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)`` + ``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)`` + ``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)`` + ``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting + ``(1, 1, 1)`` tensor with 3 indices without any symmetry + """ + base, sgs = [], [Permutation(1)] + for arg in args: + if arg > 0: + bsgs2 = get_symmetric_group_sgs(arg, False) + elif arg < 0: + bsgs2 = get_symmetric_group_sgs(-arg, True) + else: + continue + base, sgs = bsgs_direct_product(base, sgs, *bsgs2) + + return TensorSymmetry(base, sgs) + + @classmethod + def riemann(cls): + """ + Returns a monotorem symmetry of the Riemann tensor + """ + return TensorSymmetry(riemann_bsgs) + + @classmethod + def no_symmetry(cls, rank): + """ + TensorSymmetry object for ``rank`` indices with no symmetry + """ + return TensorSymmetry([], [Permutation(rank+1)]) + + +@deprecated( + """ + The tensorsymmetry() function is deprecated. Use the TensorSymmetry + constructor instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorsymmetry", +) +def tensorsymmetry(*args): + """ + Returns a ``TensorSymmetry`` object. This method is deprecated, use + ``TensorSymmetry.direct_product()`` or ``.riemann()`` instead. + + Explanation + =========== + + One can represent a tensor with any monoterm slot symmetry group + using a BSGS. + + ``args`` can be a BSGS + ``args[0]`` base + ``args[1]`` sgs + + Usually tensors are in (direct products of) representations + of the symmetric group; + ``args`` can be a list of lists representing the shapes of Young tableaux + + Notes + ===== + + For instance: + ``[[1]]`` vector + ``[[1]*n]`` symmetric tensor of rank ``n`` + ``[[n]]`` antisymmetric tensor of rank ``n`` + ``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor + ``[[1],[1]]`` vector*vector + ``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector + + Notice that with the shape ``[2, 2]`` we associate only the monoterm + symmetries of the Riemann tensor; this is an abuse of notation, + since the shape ``[2, 2]`` corresponds usually to the irreducible + representation characterized by the monoterm symmetries and by the + cyclic symmetry. + """ + from sympy.combinatorics import Permutation + + def tableau2bsgs(a): + if len(a) == 1: + # antisymmetric vector + n = a[0] + bsgs = get_symmetric_group_sgs(n, 1) + else: + if all(x == 1 for x in a): + # symmetric vector + n = len(a) + bsgs = get_symmetric_group_sgs(n) + elif a == [2, 2]: + bsgs = riemann_bsgs + else: + raise NotImplementedError + return bsgs + + if not args: + return TensorSymmetry(Tuple(), Tuple(Permutation(1))) + + if len(args) == 2 and isinstance(args[1][0], Permutation): + return TensorSymmetry(args) + base, sgs = tableau2bsgs(args[0]) + for a in args[1:]: + basex, sgsx = tableau2bsgs(a) + base, sgs = bsgs_direct_product(base, sgs, basex, sgsx) + return TensorSymmetry(Tuple(base, sgs)) + +@deprecated( + "TensorType is deprecated. Use tensor_heads() instead.", + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensortype", +) +class TensorType(Basic): + """ + Class of tensor types. Deprecated, use tensor_heads() instead. + + Parameters + ========== + + index_types : list of ``TensorIndexType`` of the tensor indices + symmetry : ``TensorSymmetry`` of the tensor + + Attributes + ========== + + ``index_types`` + ``symmetry`` + ``types`` : list of ``TensorIndexType`` without repetitions + """ + is_commutative = False + + def __new__(cls, index_types, symmetry, **kw_args): + assert symmetry.rank == len(index_types) + obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args) + return obj + + @property + def index_types(self): + return self.args[0] + + @property + def symmetry(self): + return self.args[1] + + @property + def types(self): + return sorted(set(self.index_types), key=lambda x: x.name) + + def __str__(self): + return 'TensorType(%s)' % ([str(x) for x in self.index_types]) + + def __call__(self, s, comm=0): + """ + Return a TensorHead object or a list of TensorHead objects. + + Parameters + ========== + + s : name or string of names. + + comm : Commutation group. + + see ``_TensorManager.set_comm`` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + if len(names) == 1: + return TensorHead(names[0], self.index_types, self.symmetry, comm) + else: + return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names] + + +@deprecated( + """ + The tensorhead() function is deprecated. Use tensor_heads() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorhead", +) +def tensorhead(name, typ, sym=None, comm=0): + """ + Function generating tensorhead(s). This method is deprecated, + use TensorHead constructor or tensor_heads() instead. + + Parameters + ========== + + name : name or sequence of names (as in ``symbols``) + + typ : index types + + sym : same as ``*args`` in ``tensorsymmetry`` + + comm : commutation group number + see ``_TensorManager.set_comm`` + """ + if sym is None: + sym = [[1] for i in range(len(typ))] + with ignore_warnings(SymPyDeprecationWarning): + sym = tensorsymmetry(*sym) + return TensorHead(name, typ, sym, comm) + + +class TensorHead(Basic): + """ + Tensor head of the tensor. + + Parameters + ========== + + name : name of the tensor + index_types : list of TensorIndexType + symmetry : TensorSymmetry of the tensor + comm : commutation group number + + Attributes + ========== + + ``name`` + ``index_types`` + ``rank`` : total number of indices + ``symmetry`` + ``comm`` : commutation group + + Notes + ===== + + Similar to ``symbols`` multiple TensorHeads can be created using + ``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s`` + is the string of names and ``sym`` is the monoterm tensor symmetry + (see ``tensorsymmetry``). + + A ``TensorHead`` belongs to a commutation group, defined by a + symbol on number ``comm`` (see ``_TensorManager.set_comm``); + tensors in a commutation group have the same commutation properties; + by default ``comm`` is ``0``, the group of the commuting tensors. + + Examples + ======== + + Define a fully antisymmetric tensor of rank 2: + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> asym2 = TensorSymmetry.fully_symmetric(-2) + >>> A = TensorHead('A', [Lorentz, Lorentz], asym2) + + Examples with ndarray values, the components data assigned to the + ``TensorHead`` object are assumed to be in a fully-contravariant + representation. In case it is necessary to assign components data which + represents the values of a non-fully covariant tensor, see the other + examples. + + >>> from sympy.tensor.tensor import tensor_indices + >>> from sympy import diag + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i0, i1 = tensor_indices('i0:2', Lorentz) + + Specify a replacement dictionary to keep track of the arrays to use for + replacements in the tensorial expression. The ``TensorIndexType`` is + associated to the metric used for contractions (in fully covariant form): + + >>> repl = {Lorentz: diag(1, -1, -1, -1)} + + Let's see some examples of working with components with the electromagnetic + tensor: + + >>> from sympy import symbols + >>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z') + >>> c = symbols('c', positive=True) + + Let's define `F`, an antisymmetric tensor: + + >>> F = TensorHead('F', [Lorentz, Lorentz], asym2) + + Let's update the dictionary to contain the matrix to use in the + replacements: + + >>> repl.update({F(-i0, -i1): [ + ... [0, Ex/c, Ey/c, Ez/c], + ... [-Ex/c, 0, -Bz, By], + ... [-Ey/c, Bz, 0, -Bx], + ... [-Ez/c, -By, Bx, 0]]}) + + Now it is possible to retrieve the contravariant form of the Electromagnetic + tensor: + + >>> F(i0, i1).replace_with_arrays(repl, [i0, i1]) + [[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]] + + and the mixed contravariant-covariant form: + + >>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1]) + [[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]] + + Energy-momentum of a particle may be represented as: + + >>> from sympy import symbols + >>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1)) + >>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True) + >>> repl.update({P(i0): [E, px, py, pz]}) + + The contravariant and covariant components are, respectively: + + >>> P(i0).replace_with_arrays(repl, [i0]) + [E, p_x, p_y, p_z] + >>> P(-i0).replace_with_arrays(repl, [-i0]) + [E, -p_x, -p_y, -p_z] + + The contraction of a 1-index tensor by itself: + + >>> expr = P(i0)*P(-i0) + >>> expr.replace_with_arrays(repl, []) + E**2 - p_x**2 - p_y**2 - p_z**2 + """ + is_commutative = False + + def __new__(cls, name, index_types, symmetry=None, comm=0): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry) + obj.comm = TensorManager.comm_symbols2i(comm) + return obj + + @property + def name(self): + return self.args[0].name + + @property + def index_types(self): + return list(self.args[1]) + + @property + def symmetry(self): + return self.args[2] + + @property + def rank(self): + return len(self.index_types) + + def __lt__(self, other): + return (self.name, self.index_types) < (other.name, other.index_types) + + def commutes_with(self, other): + """ + Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute. + + Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute. + """ + r = TensorManager.get_comm(self.comm, other.comm) + return r + + def _print(self): + return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types])) + + def __call__(self, *indices, **kw_args): + """ + Returns a tensor with indices. + + Explanation + =========== + + There is a special behavior in case of indices denoted by ``True``, + they are considered auto-matrix indices, their slots are automatically + filled, and confer to the tensor the behavior of a matrix or vector + upon multiplication with another tensor containing auto-matrix indices + of the same ``TensorIndexType``. This means indices get summed over the + same way as in matrix multiplication. For matrix behavior, define two + auto-matrix indices, for vector behavior define just one. + + Indices can also be strings, in which case the attribute + ``index_types`` is used to convert them to proper ``TensorIndex``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + >>> t = A(a, -b) + >>> t + A(a, -b) + + """ + + updated_indices = [] + for idx, typ in zip(indices, self.index_types): + if isinstance(idx, str): + idx = idx.strip().replace(" ", "") + if idx.startswith('-'): + updated_indices.append(TensorIndex(idx[1:], typ, + is_up=False)) + else: + updated_indices.append(TensorIndex(idx, typ)) + else: + updated_indices.append(idx) + + updated_indices += indices[len(updated_indices):] + + tensor = Tensor(self, updated_indices, **kw_args) + return tensor.doit() + + # Everything below this line is deprecated + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power on abstract tensors.") + from .array import tensorproduct, tensorcontraction + metrics = [_.data for _ in self.index_types] + + marray = self.data + marraydim = marray.rank() + for metric in metrics: + marray = tensorproduct(marray, metric, marray) + marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2)) + + return marray ** (other * S.Half) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + Destroy components data associated to the ``TensorHead`` object, this + checks for attached components data, and destroys components data too. + """ + # do not garbage collect Kronecker tensor (it should be done by + # ``TensorIndexType`` garbage collection) + deprecate_data() + if self.name == "KD": + return + + # the data attached to a tensor must be deleted only by the TensorHead + # destructor. If the TensorHead is deleted, it means that there are no + # more instances of that tensor anywhere. + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + +def tensor_heads(s, index_types, symmetry=None, comm=0): + """ + Returns a sequence of TensorHeads from a string `s` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + thlist = [TensorHead(name, index_types, symmetry, comm) for name in names] + if len(thlist) == 1: + return thlist[0] + return thlist + + +class TensExpr(Expr, ABC): + """ + Abstract base class for tensor expressions + + Notes + ===== + + A tensor expression is an expression formed by tensors; + currently the sums of tensors are distributed. + + A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``. + + ``TensMul`` objects are formed by products of component tensors, + and include a coefficient, which is a SymPy expression. + + + In the internal representation contracted indices are represented + by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position + of the component tensor with contravariant index, ``ipos1`` is the + slot which the index occupies in that component tensor. + + Contracted indices are therefore nameless in the internal representation. + """ + + _op_priority = 12.0 + is_commutative = False + + def __neg__(self): + return self*S.NegativeOne + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + return TensAdd(self, other).doit() + + def __radd__(self, other): + return TensAdd(other, self).doit() + + def __sub__(self, other): + return TensAdd(self, -other).doit() + + def __rsub__(self, other): + return TensAdd(other, -self).doit() + + def __mul__(self, other): + """ + Multiply two tensors using Einstein summation convention. + + Explanation + =========== + + If the two tensors have an index in common, one contravariant + and the other covariant, in their product the indices are summed + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t1 = p(m0) + >>> t2 = q(-m0) + >>> t1*t2 + p(L_0)*q(-L_0) + """ + return TensMul(self, other).doit() + + def __rmul__(self, other): + return TensMul(other, self).doit() + + def __truediv__(self, other): + other = _sympify(other) + if isinstance(other, TensExpr): + raise ValueError('cannot divide by a tensor') + return TensMul(self, S.One/other).doit() + + def __rtruediv__(self, other): + raise ValueError('cannot divide by a tensor') + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power without ndarray data.") + from .array import tensorproduct, tensorcontraction + free = self.free + marray = self.data + mdim = marray.rank() + for metric in free: + marray = tensorcontraction( + tensorproduct( + marray, + metric[0].tensor_index_type.data, + marray), + (0, mdim), (mdim+1, mdim+2) + ) + return marray ** (other * S.Half) + + def __rpow__(self, other): + raise NotImplementedError + + @property + @abstractmethod + def nocoeff(self): + raise NotImplementedError("abstract method") + + @property + @abstractmethod + def coeff(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_indices(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_free_indices(self) -> list[TensorIndex]: + raise NotImplementedError("abstract method") + + @abstractmethod + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + raise NotImplementedError("abstract method") + + def fun_eval(self, *index_tuples): + deprecate_fun_eval() + return self.substitute_indices(*index_tuples) + + def get_matrix(self): + """ + DEPRECATED: do not use. + + Returns ndarray components data as a matrix, if components data are + available and ndarray dimension does not exceed 2. + """ + from sympy.matrices.dense import Matrix + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if 0 < self.rank <= 2: + rows = self.data.shape[0] + columns = self.data.shape[1] if self.rank == 2 else 1 + if self.rank == 2: + mat_list = [] * rows + for i in range(rows): + mat_list.append([]) + for j in range(columns): + mat_list[i].append(self[i, j]) + else: + mat_list = [None] * rows + for i in range(rows): + mat_list[i] = self[i] + return Matrix(mat_list) + else: + raise NotImplementedError( + "missing multidimensional reduction to matrix.") + + @staticmethod + def _get_indices_permutation(indices1, indices2): + return [indices1.index(i) for i in indices2] + + def expand(self, **hints): + return _expand(self, **hints).doit() + + def _expand(self, **kwargs): + return self + + def _get_free_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_free_indices_set()) + return indset + + def _get_dummy_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_dummy_indices_set()) + return indset + + def _get_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_indices_set()) + return indset + + @property + def _iterate_dummy_indices(self): + dummy_set = self._get_dummy_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in dummy_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_free_indices(self): + free_set = self._get_free_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in free_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_indices(self): + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @staticmethod + def _contract_and_permute_with_metric(metric, array, pos, dim): + # TODO: add possibility of metric after (spinors) + from .array import tensorcontraction, tensorproduct, permutedims + + array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos)) + permu = list(range(dim)) + permu[0], permu[pos] = permu[pos], permu[0] + return permutedims(array, permu) + + @staticmethod + def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict): + from .array import permutedims + + index_types1 = [i.tensor_index_type for i in free_ind1] + + # Check if variance of indices needs to be fixed: + pos2up = [] + pos2down = [] + free2remaining = free_ind2[:] + for pos1, index1 in enumerate(free_ind1): + if index1 in free2remaining: + pos2 = free2remaining.index(index1) + free2remaining[pos2] = None + continue + if -index1 in free2remaining: + pos2 = free2remaining.index(-index1) + free2remaining[pos2] = None + free_ind2[pos2] = index1 + if index1.is_up: + pos2up.append(pos2) + else: + pos2down.append(pos2) + else: + index2 = free2remaining[pos1] + if index2 is None: + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + free2remaining[pos1] = None + free_ind2[pos1] = index1 + if index1.is_up ^ index2.is_up: + if index1.is_up: + pos2up.append(pos1) + else: + pos2down.append(pos1) + + if len(set(free_ind1) & set(free_ind2)) < len(free_ind1): + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + + # Raise indices: + for pos in pos2up: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1)) + # Lower indices: + for pos in pos2down: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1)) + + if free_ind1: + permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1) + array = permutedims(array, permutation) + + if hasattr(array, "rank") and array.rank() == 0: + array = array[()] + + return free_ind2, array + + def replace_with_arrays(self, replacement_dict, indices=None): + """ + Replace the tensorial expressions with arrays. The final array will + correspond to the N-dimensional array with indices arranged according + to ``indices``. + + Parameters + ========== + + replacement_dict + dictionary containing the replacement rules for tensors. + indices + the index order with respect to which the array is read. The + original index order will be used if no value is passed. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> from sympy.tensor.tensor import TensorHead + >>> from sympy import symbols, diag + + >>> L = TensorIndexType("L") + >>> i, j = tensor_indices("i j", L) + >>> A = TensorHead("A", [L]) + >>> A(i).replace_with_arrays({A(i): [1, 2]}, [i]) + [1, 2] + + Since 'indices' is optional, we can also call replace_with_arrays by + this way if no specific index order is needed: + + >>> A(i).replace_with_arrays({A(i): [1, 2]}) + [1, 2] + + >>> expr = A(i)*A(j) + >>> expr.replace_with_arrays({A(i): [1, 2]}) + [[1, 2], [2, 4]] + + For contractions, specify the metric of the ``TensorIndexType``, which + in this case is ``L``, in its covariant form: + + >>> expr = A(i)*A(-i) + >>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)}) + -3 + + Symmetrization of an array: + + >>> H = TensorHead("H", [L, L]) + >>> a, b, c, d = symbols("a b c d") + >>> expr = H(i, j)/2 + H(j, i)/2 + >>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]}) + [[a, b/2 + c/2], [b/2 + c/2, d]] + + Anti-symmetrization of an array: + + >>> expr = H(i, j)/2 - H(j, i)/2 + >>> repl = {H(i, j): [[a, b], [c, d]]} + >>> expr.replace_with_arrays(repl) + [[0, b/2 - c/2], [-b/2 + c/2, 0]] + + The same expression can be read as the transpose by inverting ``i`` and + ``j``: + + >>> expr.replace_with_arrays(repl, [j, i]) + [[0, -b/2 + c/2], [b/2 - c/2, 0]] + """ + from .array import Array + + indices = indices or [] + remap = {k.args[0] if k.is_up else -k.args[0]: k for k in self.get_free_indices()} + for i, index in enumerate(indices): + if isinstance(index, (Symbol, Mul)): + if index in remap: + indices[i] = remap[index] + else: + indices[i] = -remap[-index] + + replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()} + + # Check dimensions of replaced arrays: + for tensor, array in replacement_dict.items(): + if isinstance(tensor, TensorIndexType): + expected_shape = [tensor.dim for i in range(2)] + else: + expected_shape = [index_type.dim for index_type in tensor.index_types] + if len(expected_shape) != array.rank() or (not all(dim1 == dim2 if + dim1.is_number else True for dim1, dim2 in zip(expected_shape, + array.shape))): + raise ValueError("shapes for tensor %s expected to be %s, "\ + "replacement array shape is %s" % (tensor, expected_shape, + array.shape)) + + ret_indices, array = self._extract_data(replacement_dict) + + last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict) + return array + + def _check_add_Sum(self, expr, index_symbols): + from sympy.concrete.summations import Sum + indices = self.get_indices() + dum = self.dum + sum_indices = [ (index_symbols[i], 0, + indices[i].tensor_index_type.dim-1) for i, j in dum] + if sum_indices: + expr = Sum(expr, *sum_indices) + return expr + + def _expand_partial_derivative(self): + # simply delegate the _expand_partial_derivative() to + # its arguments to expand a possibly found PartialDerivative + return self.func(*[ + a._expand_partial_derivative() + if isinstance(a, TensExpr) else a + for a in self.args]) + + +class TensAdd(TensExpr, AssocOp): + """ + Sum of tensors. + + Parameters + ========== + + free_args : list of the free indices + + Attributes + ========== + + ``args`` : tuple of addends + ``rank`` : rank of the tensor + ``free_args`` : list of the free indices in sorted order + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(a) + q(a); t + p(a) + q(a) + + Examples with components data added to the tensor expression: + + >>> from sympy import symbols, diag + >>> x, y, z, t = symbols("x y z t") + >>> repl = {} + >>> repl[Lorentz] = diag(1, -1, -1, -1) + >>> repl[p(a)] = [1, 2, 3, 4] + >>> repl[q(a)] = [x, y, z, t] + + The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58 + + >>> expr = p(a) + q(a) + >>> expr.replace_with_arrays(repl, [a]) + [x + 1, y + 2, z + 3, t + 4] + """ + + def __new__(cls, *args, **kw_args): + args = [_sympify(x) for x in args if x] + args = TensAdd._tensAdd_flatten(args) + args.sort(key=default_sort_key) + if not args: + return S.Zero + if len(args) == 1: + return args[0] + + return Basic.__new__(cls, *args, **kw_args) + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self) -> list[TensorIndex]: + return self.free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args] + return self.func(*newargs) + + @memoize_property + def rank(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].rank + else: + return 0 + + @memoize_property + def free_args(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].free_args + else: + return [] + + @memoize_property + def free_indices(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].get_free_indices() + else: + return set() + + def doit(self, **hints): + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + else: + args = self.args + + # if any of the args are zero (after doit), drop them. Otherwise, _tensAdd_check will complain about non-matching indices, even though the TensAdd is correctly formed. + args = [arg for arg in args if arg != S.Zero] + + if len(args) == 0: + return S.Zero + elif len(args) == 1: + return args[0] + + # now check that all addends have the same indices: + TensAdd._tensAdd_check(args) + + # Collect terms appearing more than once, differing by their coefficients: + args = TensAdd._tensAdd_collect_terms(args) + + # collect canonicalized terms + def sort_key(t): + if not isinstance(t, TensExpr): + return [], [], [] + if hasattr(t, "_index_structure") and hasattr(t, "components"): + x = get_index_structure(t) + return t.components, x.free, x.dum + return [], [], [] + args.sort(key=sort_key) + + if not args: + return S.Zero + # it there is only a component tensor return it + if len(args) == 1: + return args[0] + + obj = self.func(*args) + return obj + + @staticmethod + def _tensAdd_flatten(args): + # flatten TensAdd, coerce terms which are not tensors to tensors + a = [] + for x in args: + if isinstance(x, (Add, TensAdd)): + a.extend(list(x.args)) + else: + a.append(x) + args = [x for x in a if x.coeff] + return args + + @staticmethod + def _tensAdd_check(args): + # check that all addends have the same free indices + + def get_indices_set(x: Expr) -> set[TensorIndex]: + if isinstance(x, TensExpr): + return set(x.get_free_indices()) + return set() + + indices0 = get_indices_set(args[0]) + list_indices = [get_indices_set(arg) for arg in args[1:]] + if not all(x == indices0 for x in list_indices): + raise ValueError('all tensors must have the same indices') + + @staticmethod + def _tensAdd_collect_terms(args): + # collect TensMul terms differing at most by their coefficient + terms_dict = defaultdict(list) + scalars = S.Zero + if isinstance(args[0], TensExpr): + free_indices = set(args[0].get_free_indices()) + else: + free_indices = set() + + for arg in args: + if not isinstance(arg, TensExpr): + if free_indices != set(): + raise ValueError("wrong valence") + scalars += arg + continue + if free_indices != set(arg.get_free_indices()): + raise ValueError("wrong valence") + # TODO: what is the part which is not a coeff? + # needs an implementation similar to .as_coeff_Mul() + terms_dict[arg.nocoeff].append(arg.coeff) + + new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0] + if isinstance(scalars, Add): + new_args = list(scalars.args) + new_args + elif scalars != 0: + new_args = [scalars] + new_args + return new_args + + def get_indices(self): + indices = [] + for arg in self.args: + indices.extend([i for i in get_indices(arg) if i not in indices]) + return indices + + def _expand(self, **hints): + return TensAdd(*[_expand(i, **hints) for i in self.args]) + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + index_tuples = list(zip(free_args, indices)) + a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args] + res = TensAdd(*a).doit() + return res + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + """ + expr = self.expand() + args = [canon_bp(x) for x in expr.args] + res = TensAdd(*args).doit() + return res + + def equals(self, other): + other = _sympify(other) + if isinstance(other, TensMul) and other.coeff == 0: + return all(x.coeff == 0 for x in self.args) + if isinstance(other, TensExpr): + if self.rank != other.rank: + return False + if isinstance(other, TensAdd): + if set(self.args) != set(other.args): + return False + else: + return True + t = self - other + if not isinstance(t, TensExpr): + return t == 0 + else: + if isinstance(t, TensMul): + return t.coeff == 0 + else: + return all(x.coeff == 0 for x in t.args) + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def contract_delta(self, delta): + args = [x.contract_delta(delta) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + contract_all : if True, eliminate all ``g`` which are contracted + + Notes + ===== + + see the ``TensorIndexType`` docstring for the contraction conventions + """ + + args = [contract_metric(x, g) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensAdd(*new_args).doit() + + def _print(self): + a = [] + args = self.args + for x in args: + a.append(str(x)) + s = ' + '.join(a) + s = s.replace('+ -', '- ') + return s + + def _extract_data(self, replacement_dict): + from sympy.tensor.array import Array, permutedims + args_indices, arrays = zip(*[ + arg._extract_data(replacement_dict) if + isinstance(arg, TensExpr) else ([], arg) for arg in self.args + ]) + arrays = [Array(i) for i in arrays] + ref_indices = args_indices[0] + for i in range(1, len(args_indices)): + indices = args_indices[i] + array = arrays[i] + permutation = TensMul._get_indices_permutation(indices, ref_indices) + arrays[i] = permutedims(array, permutation) + return ref_indices, sum(arrays, Array.zeros(*array.shape)) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self.expand()] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + if not self.data: + raise ValueError("No iteration on abstract tensors") + return self.data.flatten().__iter__() + + def _eval_rewrite_as_Indexed(self, *args): + return Add.fromiter(args) + + def _eval_partial_derivative(self, s): + # Evaluation like Add + list_addends = [] + for a in self.args: + if isinstance(a, TensExpr): + list_addends.append(a._eval_partial_derivative(s)) + # do not call diff if s is no symbol + elif s._diff_wrt: + list_addends.append(a._eval_derivative(s)) + + return self.func(*list_addends) + + +class Tensor(TensExpr): + """ + Base tensor class, i.e. this represents a tensor, the single unit to be + put into an expression. + + Explanation + =========== + + This object is usually created from a ``TensorHead``, by attaching indices + to it. Indices preceded by a minus sign are considered contravariant, + otherwise covariant. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead + >>> Lorentz = TensorIndexType("Lorentz", dummy_name="L") + >>> mu, nu = tensor_indices('mu nu', Lorentz) + >>> A = TensorHead("A", [Lorentz, Lorentz]) + >>> A(mu, -nu) + A(mu, -nu) + >>> A(mu, -mu) + A(L_0, -L_0) + + It is also possible to use symbols instead of inidices (appropriate indices + are then generated automatically). + + >>> from sympy import Symbol + >>> x = Symbol('x') + >>> A(x, mu) + A(x, mu) + >>> A(x, -x) + A(L_0, -L_0) + + """ + + is_commutative = False + + _index_structure = None # type: _IndexStructure + args: tuple[TensorHead, Tuple] + + def __new__(cls, tensor_head, indices, *, is_canon_bp=False, **kw_args): + indices = cls._parse_indices(tensor_head, indices) + obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args) + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = Tensor._build_index_map(indices, obj._index_structure) + return obj + + @property + def free(self): + return self._free + + @property + def dum(self): + return self._dum + + @property + def ext_rank(self): + return self._ext_rank + + @property + def coeff(self): + return self._coeff + + @property + def nocoeff(self): + return self._nocoeff + + @property + def component(self): + return self._component + + @property + def components(self): + return self._components + + @property + def head(self): + return self.args[0] + + @property + def indices(self): + return self.args[1] + + @property + def free_indices(self): + return set(self._index_structure.get_free_indices()) + + @property + def index_types(self): + return self.head.index_types + + @property + def rank(self): + return len(self.free_indices) + + @staticmethod + def _build_index_map(indices, index_structure): + index_map = {} + for idx in indices: + index_map[idx] = (indices.index(idx),) + return index_map + + def doit(self, **hints): + args, indices, free, dum = TensMul._tensMul_contract_indices([self]) + return args[0] + + @staticmethod + def _parse_indices(tensor_head, indices): + if not isinstance(indices, (tuple, list, Tuple)): + raise TypeError("indices should be an array, got %s" % type(indices)) + indices = list(indices) + for i, index in enumerate(indices): + if isinstance(index, Symbol): + indices[i] = TensorIndex(index, tensor_head.index_types[i], True) + elif isinstance(index, Mul): + c, e = index.as_coeff_Mul() + if c == -1 and isinstance(e, Symbol): + indices[i] = TensorIndex(e, tensor_head.index_types[i], False) + else: + raise ValueError("index not understood: %s" % index) + elif not isinstance(index, TensorIndex): + raise TypeError("wrong type for index: %s is %s" % (index, type(index))) + return indices + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + return self.func(self.args[0], indices, is_canon_bp=is_canon_bp).doit() + + def _get_free_indices_set(self): + return {i[0] for i in self._index_structure.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self._index_structure.dum)) + return {idx for i, idx in enumerate(self.args[1]) if i in dummy_pos} + + def _get_indices_set(self): + return set(self.args[1].args) + + @property + def free_in_args(self): + return [(ind, pos, 0) for ind, pos in self.free] + + @property + def dum_in_args(self): + return [(p1, p2, 0, 0) for p1, p2 in self.dum] + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + def commutes_with(self, other): + """ + :param other: + :return: + 0 commute + 1 anticommute + None neither commute nor anticommute + """ + if not isinstance(other, TensExpr): + return 0 + elif isinstance(other, Tensor): + return self.component.commutes_with(other.component) + return NotImplementedError + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g``. + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp) + + def canon_bp(self): + if self.is_canon_bp: + return self + expr = self.expand() + g, dummies, msym = expr._index_structure.indices_canon_args() + v = components_canon_args([expr.component]) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tensor = self.perm2tensor(can, True) + return tensor + + def split(self): + return [self] + + def _expand(self, **kwargs): + return self + + def sorted_components(self): + return self + + def get_indices(self) -> list[TensorIndex]: + """ + Get a list of indices, corresponding to those of the tensor. + """ + return list(self.args[1]) + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices, corresponding to those of the tensor. + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: this could be optimized by only swapping the indices + # instead of visiting the whole expression tree: + return self.xreplace(repl) + + def as_base_exp(self): + return self, S.One + + def substitute_indices(self, *index_tuples): + """ + Return a tensor with free indices substituted according to ``index_tuples``. + + ``index_types`` list of tuples ``(old_index, new_index)``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(i, k)*B(-k, -j); t + A(i, L_0)*B(-L_0, -j) + >>> t.substitute_indices((i, k),(-j, l)) + A(k, L_0)*B(-L_0, l) + """ + indices = [] + for index in self.indices: + for ind_old, ind_new in index_tuples: + if (index.name == ind_old.name and index.tensor_index_type == + ind_old.tensor_index_type): + if index.is_up == ind_old.is_up: + indices.append(ind_new) + else: + indices.append(-ind_new) + break + else: + indices.append(index) + return self.head(*indices) + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + # TODO: put this into TensExpr? + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + # TODO: put this into TensExpr? + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _extract_data(self, replacement_dict): + from .array import Array + for k, v in replacement_dict.items(): + if isinstance(k, Tensor) and k.args[0] == self.args[0]: + other = k + array = v + break + else: + raise ValueError("%s not found in %s" % (self, replacement_dict)) + + # TODO: inefficient, this should be done at root level only: + replacement_dict = {k: Array(v) for k, v in replacement_dict.items()} + array = Array(array) + + dum1 = self.dum + dum2 = other.dum + + if len(dum2) > 0: + for pair in dum2: + # allow `dum2` if the contained values are also in `dum1`. + if pair not in dum1: + raise NotImplementedError("%s with contractions is not implemented" % other) + # Remove elements in `dum2` from `dum1`: + dum1 = [pair for pair in dum1 if pair not in dum2] + if len(dum1) > 0: + indices1 = self.get_indices() + indices2 = other.get_indices() + repl = {} + for p1, p2 in dum1: + repl[indices2[p2]] = -indices2[p1] + for pos in (p1, p2): + if indices1[pos].is_up ^ indices2[pos].is_up: + metric = replacement_dict[indices1[pos].tensor_index_type] + if indices1[pos].is_up: + metric = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = self._contract_and_permute_with_metric(metric, array, pos, len(indices2)) + other = other.xreplace(repl).doit() + array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2)) + + free_ind1 = self.get_free_indices() + free_ind2 = other.get_free_indices() + + return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # TODO: check data compatibility with properties of tensor. + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + def _print(self): + indices = [str(ind) for ind in self.indices] + component = self.component + if component.rank > 0: + return ('%s(%s)' % (component.name, ', '.join(indices))) + else: + return ('%s' % component.name) + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return S.One == other + + def _get_compar_comp(self): + t = self.canon_bp() + r = (t.coeff, tuple(t.components), \ + tuple(sorted(t.free)), tuple(sorted(t.dum))) + return r + + return _get_compar_comp(self) == _get_compar_comp(other) + + def contract_metric(self, g): + # if metric is not the same, ignore this step: + if self.component != g: + return self + # in case there are free components, do not perform anything: + if len(self.free) != 0: + return self + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + sign = S.One + typ = g.index_types[0] + + if not antisym: + # g(i, -i) + sign = sign*typ.dim + else: + # g(i, -i) + sign = sign*typ.dim + + dp0, dp1 = self.dum[0] + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + + return sign + + def contract_delta(self, metric): + return self.contract_metric(metric) + + def _eval_rewrite_as_Indexed(self, tens, indices): + from sympy.tensor.indexed import Indexed + # TODO: replace .args[0] with .name: + index_symbols = [i.args[0] for i in self.get_indices()] + expr = Indexed(tens.args[0], *index_symbols) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr + + if not isinstance(s, Tensor): + return S.Zero + else: + + # @a_i/@a_k = delta_i^k + # @a_i/@a^k = g_ij delta^j_k + # @a^i/@a^k = delta^i_k + # @a^i/@a_k = g^ij delta_j^k + # TODO: if there is no metric present, the derivative should be zero? + + if self.head != s.head: + return S.Zero + + # if heads are the same, provide delta and/or metric products + # for every free index pair in the appropriate tensor + # assumed that the free indices are in proper order + # A contravariante index in the derivative becomes covariant + # after performing the derivative and vice versa + + kronecker_delta_list = [1] + + # not guarantee a correct index order + + for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())): + if iself.tensor_index_type != iother.tensor_index_type: + raise ValueError("index types not compatible") + else: + tensor_index_type = iself.tensor_index_type + tensor_metric = tensor_index_type.metric + dummy = TensorIndex("d_" + str(count), tensor_index_type, + is_up=iself.is_up) + if iself.is_up == iother.is_up: + kroneckerdelta = tensor_index_type.delta(iself, -iother) + else: + kroneckerdelta = ( + TensMul(tensor_metric(iself, dummy), + tensor_index_type.delta(-dummy, -iother)) + ) + kronecker_delta_list.append(kroneckerdelta) + return TensMul.fromiter(kronecker_delta_list).doit() + # doit necessary to rename dummy indices accordingly + + +class TensMul(TensExpr, AssocOp): + """ + Product of tensors. + + Parameters + ========== + + coeff : SymPy coefficient of the tensor + args + + Attributes + ========== + + ``components`` : list of ``TensorHead`` of the component tensors + ``types`` : list of nonrepeated ``TensorIndexType`` + ``free`` : list of ``(ind, ipos, icomp)``, see Notes + ``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes + ``ext_rank`` : rank of the tensor counting the dummy indices + ``rank`` : rank of the tensor + ``coeff`` : SymPy coefficient of the tensor + ``free_args`` : list of the free indices in sorted order + ``is_canon_bp`` : ``True`` if the tensor in in canonical form + + Notes + ===== + + ``args[0]`` list of ``TensorHead`` of the component tensors. + + ``args[1]`` list of ``(ind, ipos, icomp)`` + where ``ind`` is a free index, ``ipos`` is the slot position + of ``ind`` in the ``icomp``-th component tensor. + + ``args[2]`` list of tuples representing dummy indices. + ``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant + dummy index is the ``ipos1``-th slot position in the ``icomp1``-th + component tensor; the corresponding covariant index is + in the ``ipos2`` slot position in the ``icomp2``-th component tensor. + + """ + identity = S.One + + _index_structure = None # type: _IndexStructure + + def __new__(cls, *args, **kw_args): + is_canon_bp = kw_args.get('is_canon_bp', False) + args = list(map(_sympify, args)) + + """ + If the internal dummy indices in one arg conflict with the free indices + of the remaining args, we need to rename those internal dummy indices. + """ + free = [get_free_indices(arg) for arg in args] + free = set(itertools.chain(*free)) #flatten free + newargs = [] + for arg in args: + dum_this = set(get_dummy_indices(arg)) + dum_other = [get_dummy_indices(a) for a in newargs] + dum_other = set(itertools.chain(*dum_other)) #flatten dum_other + free_this = set(get_free_indices(arg)) + if len(dum_this.intersection(free)) > 0: + exclude = free_this.union(free, dum_other) + newarg = TensMul._dedupe_indices(arg, exclude) + else: + newarg = arg + newargs.append(newarg) + + args = newargs + + # Flatten: + args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = TensExpr.__new__(cls, *args) + obj._indices = indices + obj._index_types = index_types[:] + obj._index_structure = index_structure + obj._free = index_structure.free[:] + obj._dum = index_structure.dum[:] + obj._free_indices = {x[0] for x in obj.free} + obj._rank = len(obj.free) + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = S.One + obj._is_canon_bp = is_canon_bp + return obj + + index_types = property(lambda self: self._index_types) + free = property(lambda self: self._free) + dum = property(lambda self: self._dum) + free_indices = property(lambda self: self._free_indices) + rank = property(lambda self: self._rank) + ext_rank = property(lambda self: self._ext_rank) + + @staticmethod + def _indices_to_free_dum(args_indices): + free2pos1 = {} + free2pos2 = {} + dummy_data = [] + indices = [] + + # Notation for positions (to better understand the code): + # `pos1`: position in the `args`. + # `pos2`: position in the indices. + + # Example: + # A(i, j)*B(k, m, n)*C(p) + # `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul). + # `pos2` of `n` is 4 because it's the fifth overall index. + + # Counter for the index position wrt the whole expression: + pos2 = 0 + + for pos1, arg_indices in enumerate(args_indices): + + for index_pos, index in enumerate(arg_indices): + if not isinstance(index, TensorIndex): + raise TypeError("expected TensorIndex") + if -index in free2pos1: + # Dummy index detected: + other_pos1 = free2pos1.pop(-index) + other_pos2 = free2pos2.pop(-index) + if index.is_up: + dummy_data.append((index, pos1, other_pos1, pos2, other_pos2)) + else: + dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2)) + indices.append(index) + elif index in free2pos1: + raise ValueError("Repeated index: %s" % index) + else: + free2pos1[index] = pos1 + free2pos2[index] = pos2 + indices.append(index) + pos2 += 1 + + free = [(i, p) for (i, p) in free2pos2.items()] + free_names = [i.name for i in free2pos2.keys()] + + dummy_data.sort(key=lambda x: x[3]) + return indices, free, free_names, dummy_data + + @staticmethod + def _dummy_data_to_dum(dummy_data): + return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data] + + @staticmethod + def _tensMul_contract_indices(args, replace_indices=True): + replacements = [{} for _ in args] + + #_index_order = all(_has_index_order(arg) for arg in args) + + args_indices = [get_indices(arg) for arg in args] + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + + cdt = defaultdict(int) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + if replace_indices: + for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data: + index_type = old_index.tensor_index_type + while True: + dummy_name = dummy_name_gen(index_type) + if dummy_name not in free_names: + break + dummy = TensorIndex(dummy_name, index_type, True) + replacements[pos1cov][old_index] = dummy + replacements[pos1contra][-old_index] = -dummy + indices[pos2cov] = dummy + indices[pos2contra] = -dummy + args = [ + arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg + for arg, repl in zip(args, replacements)] + + dum = TensMul._dummy_data_to_dum(dummy_data) + return args, indices, free, dum + + @staticmethod + def _get_components_from_args(args): + """ + Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied + by one another. + """ + components = [] + for arg in args: + if not isinstance(arg, TensExpr): + continue + if isinstance(arg, TensAdd): + continue + components.extend(arg.components) + return components + + @staticmethod + def _rebuild_tensors_list(args, index_structure): + indices = index_structure.get_indices() + #tensors = [None for i in components] # pre-allocate list + ind_pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + prev_pos = ind_pos + ind_pos += arg.ext_rank + args[i] = Tensor(arg.component, indices[prev_pos:ind_pos]) + + def doit(self, **hints): + is_canon_bp = self._is_canon_bp + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + + """ + There may now be conflicts between dummy indices of different args + (each arg's doit method does not have any information about which + dummy indices are already used in the other args), so we + deduplicate them. + """ + rule = dict(zip(self.args, args)) + rule = self._dedupe_indices_in_rule(rule) + args = [rule[a] for a in self.args] + + else: + args = self.args + + args = [arg for arg in args if arg != self.identity] + + # Extract non-tensor coefficients: + coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One) + args = [arg for arg in args if isinstance(arg, TensExpr)] + + if len(args) == 0: + return coeff + + if coeff != self.identity: + args = [coeff] + args + if coeff == 0: + return S.Zero + + if len(args) == 1: + return args[0] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = self.func(*args) + obj._index_types = index_types + obj._index_structure = index_structure + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = coeff + obj._is_canon_bp = is_canon_bp + return obj + + # TODO: this method should be private + # TODO: should this method be renamed _from_components_free_dum ? + @staticmethod + def from_data(coeff, components, free, dum, **kw_args): + return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit() + + @staticmethod + def _get_tensors_from_components_free_dum(components, free, dum): + """ + Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``. + """ + index_structure = _IndexStructure.from_components_free_dum(components, free, dum) + indices = index_structure.get_indices() + tensors = [None for i in components] # pre-allocate list + + # distribute indices on components to build a list of tensors: + ind_pos = 0 + for i, component in enumerate(components): + prev_pos = ind_pos + ind_pos += component.rank + tensors[i] = Tensor(component, indices[prev_pos:ind_pos]) + return tensors + + def _get_free_indices_set(self): + return {i[0] for i in self.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self.dum)) + return {idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos} + + def _get_position_offset_for_indices(self): + arg_offset = [None for i in range(self.ext_rank)] + counter = 0 + for i, arg in enumerate(self.args): + if not isinstance(arg, TensExpr): + continue + for j in range(arg.ext_rank): + arg_offset[j + counter] = counter + counter += arg.ext_rank + return arg_offset + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + @property + def components(self): + return self._get_components_from_args(self.args) + + @property + def free_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free] + + @property + def coeff(self): + # return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)]) + return self._coeff + + @property + def nocoeff(self): + return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit() + + @property + def dum_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum] + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return self.coeff == other + + return self.canon_bp() == other.canon_bp() + + def get_indices(self): + """ + Returns the list of indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + The dummy indices are given a name which does not collide with + the names of the free indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_indices() + [L_0, -L_0, m2] + """ + return self._indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Returns the list of free indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_free_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_free_indices() + [m2] + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]) + + def split(self): + """ + Returns a list of tensors, whose product is ``self``. + + Explanation + =========== + + Dummy indices contracted among different tensor components + become free indices with the same name as the one used to + represent the dummy indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(a,b)*B(-b,c) + >>> t + A(a, L_0)*B(-L_0, c) + >>> t.split() + [A(a, L_0), B(-L_0, c)] + """ + if self.args == (): + return [self] + splitp = [] + res = 1 + for arg in self.args: + if isinstance(arg, Tensor): + splitp.append(res*arg) + res = 1 + else: + res *= arg + return splitp + + def _expand(self, **hints): + # TODO: temporary solution, in the future this should be linked to + # `Expr.expand`. + args = [_expand(arg, **hints) for arg in self.args] + args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args] + return TensAdd(*[ + TensMul(*i) for i in itertools.product(*args1)] + ) + + def __neg__(self): + return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit() + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _get_args_for_traditional_printer(self): + args = list(self.args) + if self.coeff.could_extract_minus_sign(): + # expressions like "-A(a)" + sign = "-" + if args[0] == S.NegativeOne: + args = args[1:] + else: + args[0] = -args[0] + else: + sign = "" + return sign, args + + def _sort_args_for_sorted_components(self): + """ + Returns the ``args`` sorted according to the components commutation + properties. + + Explanation + =========== + + The sorting is done taking into account the commutation group + of the component tensors. + """ + cv = [arg for arg in self.args if isinstance(arg, TensExpr)] + sign = 1 + n = len(cv) - 1 + for i in range(n): + for j in range(n, i, -1): + c = cv[j-1].commutes_with(cv[j]) + # if `c` is `None`, it does neither commute nor anticommute, skip: + if c not in (0, 1): + continue + typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name) + typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name) + if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name): + cv[j-1], cv[j] = cv[j], cv[j-1] + # if `c` is 1, the anticommute, so change sign: + if c: + sign = -sign + + coeff = sign * self.coeff + if coeff != 1: + return [coeff] + cv + return cv + + def sorted_components(self): + """ + Returns a tensor product with sorted components. + """ + return TensMul(*self._sort_args_for_sorted_components()).doit() + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp=is_canon_bp) + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) + >>> t = A(m0,-m1)*A(m1,-m0) + >>> t.canon_bp() + -A(L_0, L_1)*A(-L_0, -L_1) + >>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0) + >>> t.canon_bp() + 0 + """ + if self._is_canon_bp: + return self + expr = self.expand() + if isinstance(expr, TensAdd): + return expr.canon_bp() + if not expr.components: + return expr + t = expr.sorted_components() + g, dummies, msym = t._index_structure.indices_canon_args() + v = components_canon_args(t.components) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tmul = t.perm2tensor(can, True) + return tmul + + def contract_delta(self, delta): + t = self.contract_metric(delta) + return t + + def _get_indices_to_args_pos(self): + """ + Get a dict mapping the index position to TensMul's argument number. + """ + pos_map = {} + pos_counter = 0 + for arg_i, arg in enumerate(self.args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + for i in range(arg.ext_rank): + pos_map[pos_counter] = arg_i + pos_counter += 1 + return pos_map + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + Notes + ===== + + See the ``TensorIndexType`` docstring for the contraction conventions. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m0)*q(m1)*g(-m0, -m1) + >>> t.canon_bp() + metric(L_0, L_1)*p(-L_0)*q(-L_1) + >>> t.contract_metric(g).canon_bp() + p(L_0)*q(-L_0) + """ + expr = self.expand() + if self != expr: + expr = canon_bp(expr) + return contract_metric(expr, g) + pos_map = self._get_indices_to_args_pos() + args = list(self.args) + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + + # list of positions of the metric ``g`` inside ``args`` + gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g] + if not gpos: + return self + + # Sign is either 1 or -1, to correct the sign after metric contraction + # (for spinor indices). + sign = 1 + dum = self.dum[:] + free = self.free[:] + elim = set() + for gposx in gpos: + if gposx in elim: + continue + free1 = [x for x in free if pos_map[x[1]] == gposx] + dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx] + if not dum1: + continue + elim.add(gposx) + # subs with the multiplication neutral element, that is, remove it: + args[gposx] = 1 + if len(dum1) == 2: + if not antisym: + dum10, dum11 = dum1 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 contravariant + p0 = dum10[0] + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + else: + dum10, dum11 = dum1 + # change the sign to bring the indices of the metric to contravariant + # form; change the sign if dum10 has the metric index in position 0 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 is contravariant + p0 = dum10[0] + if dum10[1] == 1: + sign = -sign + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if dum10[0] == 0: + sign = -sign + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + sign = -sign + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + + elif len(dum1) == 1: + if not antisym: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + else: + p1 = dp0 + + ind, p = free1[0] + free.append((ind, p1)) + else: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + if dp0 == 0: + sign = -sign + else: + p1 = dp0 + ind, p = free1[0] + free.append((ind, p1)) + dum = [x for x in dum if x not in dum1] + free = [x for x in free if x not in free1] + + # shift positions: + shift = 0 + shifts = [0]*len(args) + for i in range(len(args)): + if i in elim: + shift += 2 + continue + shifts[i] = shift + free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim] + dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim] + + res = sign*TensMul(*args).doit() + if not isinstance(res, TensExpr): + return res + im = _IndexStructure.from_components_free_dum(res.components, free, dum) + return res._set_new_index_structure(im) + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + args = list(self.args)[:] + pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + ext_rank = arg.ext_rank + args[i] = arg._set_indices(*indices[pos:pos+ext_rank]) + pos += ext_rank + return TensMul(*args, is_canon_bp=is_canon_bp).doit() + + @staticmethod + def _index_replacement_for_contract_metric(args, free, dum): + for arg in args: + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensMul(*new_args).doit() + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + def _extract_data(self, replacement_dict): + args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)]) + coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One) + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + dum = TensMul._dummy_data_to_dum(dummy_data) + ext_rank = self.ext_rank + free.sort(key=lambda x: x[1]) + free_indices = [i[0] for i in free] + return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + dat = _tensor_data_substitution_dict[self.expand()] + return dat + + @data.setter + def data(self, data): + deprecate_data() + raise ValueError("Not possible to set component data to a tensor expression") + + @data.deleter + def data(self): + deprecate_data() + raise ValueError("Not possible to delete component data to a tensor expression") + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No iteration on abstract tensors") + return self.data.__iter__() + + @staticmethod + def _dedupe_indices(new, exclude): + """ + exclude: set + new: TensExpr + + If ``new`` has any dummy indices that are in ``exclude``, return a version + of new with those indices replaced. If no replacements are needed, + return None + + """ + exclude = set(exclude) + dums_new = set(get_dummy_indices(new)) + free_new = set(get_free_indices(new)) + + conflicts = dums_new.intersection(exclude) + if len(conflicts) == 0: + return None + + """ + ``exclude_for_gen`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. + Since the latter does not use the index position for anything, we just + set it as ``None`` here. + """ + exclude.update(dums_new) + exclude.update(free_new) + exclude_for_gen = [(i, None) for i in exclude] + gen = _IndexStructure._get_generator_for_dummy_indices(exclude_for_gen) + repl = {} + for d in conflicts: + if -d in repl.keys(): + continue + newname = gen(d.tensor_index_type) + new_d = d.func(newname, *d.args[1:]) + repl[d] = new_d + repl[-d] = -new_d + + if len(repl) == 0: + return None + + new_renamed = new._replace_indices(repl) + return new_renamed + + def _dedupe_indices_in_rule(self, rule): + """ + rule: dict + + This applies TensMul._dedupe_indices on all values of rule. + + """ + index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)} + other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()} + exclude = set(self.get_indices()) + + newrule = {} + newrule.update(index_rules) + exclude.update(index_rules.keys()) + exclude.update(index_rules.values()) + for old, new in other_rules.items(): + new_renamed = TensMul._dedupe_indices(new, exclude) + if old == new or new_renamed is None: + newrule[old] = new + else: + newrule[old] = new_renamed + exclude.update(get_indices(new_renamed)) + return newrule + + def _eval_rewrite_as_Indexed(self, *args): + from sympy.concrete.summations import Sum + index_symbols = [i.args[0] for i in self.get_indices()] + args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args] + expr = Mul.fromiter(args) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): + # Evaluation like Mul + terms = [] + for i, arg in enumerate(self.args): + # checking whether some tensor instance is differentiated + # or some other thing is necessary, but ugly + if isinstance(arg, TensExpr): + d = arg._eval_partial_derivative(s) + else: + # do not call diff is s is no symbol + if s._diff_wrt: + d = arg._eval_derivative(s) + else: + d = S.Zero + if d: + terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:])) + return TensAdd.fromiter(terms) + + +class TensorElement(TensExpr): + """ + Tensor with evaluated components. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> i, j, k = symbols("i j k") + >>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2)) + >>> A(i, j).get_free_indices() + [i, j] + + If we want to set component ``i`` to a specific value, use the + ``TensorElement`` class: + + >>> from sympy.tensor.tensor import TensorElement + >>> te = TensorElement(A(i, j), {i: 2}) + + As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd + element), the free indices will only contain ``j``: + + >>> te.get_free_indices() + [j] + """ + + def __new__(cls, expr, index_map): + if not isinstance(expr, Tensor): + # remap + if not isinstance(expr, TensExpr): + raise TypeError("%s is not a tensor expression" % expr) + return expr.func(*[TensorElement(arg, index_map) for arg in expr.args]) + expr_free_indices = expr.get_free_indices() + name_translation = {i.args[0]: i for i in expr_free_indices} + index_map = {name_translation.get(index, index): value for index, value in index_map.items()} + index_map = {index: value for index, value in index_map.items() if index in expr_free_indices} + if len(index_map) == 0: + return expr + free_indices = [i for i in expr_free_indices if i not in index_map.keys()] + index_map = Dict(index_map) + obj = TensExpr.__new__(cls, expr, index_map) + obj._free_indices = free_indices + return obj + + @property + def free(self): + return [(index, i) for i, index in enumerate(self.get_free_indices())] + + @property + def dum(self): + # TODO: inherit dummies from expr + return [] + + @property + def expr(self): + return self._args[0] + + @property + def index_map(self): + return self._args[1] + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self): + return self._free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: can be improved: + return self.xreplace(repl) + + def get_indices(self): + return self.get_free_indices() + + def _extract_data(self, replacement_dict): + ret_indices, array = self.expr._extract_data(replacement_dict) + index_map = self.index_map + slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices) + ret_indices = [i for i in ret_indices if i not in index_map] + array = array.__getitem__(slice_tuple) + return ret_indices, array + + +class WildTensorHead(TensorHead): + """ + A wild object that is used to create ``WildTensor`` instances + + Explanation + =========== + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + + A WildTensorHead can be created without specifying a ``TensorIndexType`` + + >>> W = WildTensorHead("W") + + Calling it with a ``TensorIndex`` creates a ``WildTensor`` instance. + + >>> type(W(p)) + + + The ``TensorIndexType`` is automatically detected from the index that is passed + + >>> W(p).component + W(R3) + + Calling it with no indices returns an object that can match tensors with any number of indices. + + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + If you want to ignore the order of indices while matching, pass ``unordered_indices=True``. + + >>> U = WildTensorHead("U", unordered_indices=True) + >>> W(p,q).matches(Q(q,p)) + >>> U(p,q).matches(Q(q,p)) + {U(R3,R3): _WildTensExpr(Q(q, p))} + + Parameters + ========== + name : name of the tensor + unordered_indices : whether the order of the indices matters for matching + (default: False) + + See also + ======== + ``WildTensor`` + ``TensorHead`` + + """ + def __new__(cls, name, index_types=None, symmetry=None, comm=0, unordered_indices=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if index_types is None: + index_types = [] + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + if symmetry != TensorSymmetry.no_symmetry(len(index_types)): + raise NotImplementedError("Wild matching based on symmetry is not implemented.") + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), sympify(symmetry), sympify(comm), sympify(unordered_indices)) + obj.comm = TensorManager.comm_symbols2i(comm) + obj.unordered_indices = unordered_indices + + return obj + + def __call__(self, *indices, **kwargs): + tensor = WildTensor(self, indices, **kwargs) + return tensor.doit() + + +class WildTensor(Tensor): + """ + A wild object which matches ``Tensor`` instances + + Explanation + =========== + This is instantiated by attaching indices to a ``WildTensorHead`` instance. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + + Matching also takes the indices into account + >>> W(p).matches(K(p)) + {W(R3): _WildTensExpr(K(p))} + >>> W(p).matches(K(q)) + >>> W(p).matches(K(-p)) + + If you want to match objects with any number of indices, just use a ``WildTensor`` with no indices. + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + See Also + ======== + ``WildTensorHead`` + ``Tensor`` + + """ + def __new__(cls, tensor_head, indices, **kw_args): + is_canon_bp = kw_args.pop("is_canon_bp", False) + + if tensor_head.func == TensorHead: + """ + If someone tried to call WildTensor by supplying a TensorHead (not a WildTensorHead), return a normal tensor instead. This is helpful when using subs on an expression to replace occurrences of a WildTensorHead with a TensorHead. + """ + return Tensor(tensor_head, indices, is_canon_bp=is_canon_bp, **kw_args) + elif tensor_head.func == _WildTensExpr: + return tensor_head(*indices) + + indices = cls._parse_indices(tensor_head, indices) + index_types = [ind.tensor_index_type for ind in indices] + tensor_head = tensor_head.func( + tensor_head.name, + index_types, + symmetry=None, + comm=tensor_head.comm, + unordered_indices=tensor_head.unordered_indices, + ) + + obj = Basic.__new__(cls, tensor_head, Tuple(*indices)) + obj.name = tensor_head.name + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = obj._build_index_map(indices, obj._index_structure) + + return obj + + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensExpr) and expr != S(1): + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + if len(self.indices) > 0: + if not hasattr(expr, "get_free_indices"): + return None + expr_indices = expr.get_free_indices() + if len(expr_indices) != len(self.indices): + return None + if self._component.unordered_indices: + m = self._match_indices_ignoring_order(expr) + if m is None: + return None + else: + repl_dict.update(m) + else: + for i in range(len(expr_indices)): + m = self.indices[i].matches(expr_indices[i]) + if m is None: + return None + else: + repl_dict.update(m) + + repl_dict[self.component] = _WildTensExpr(expr) + else: + #If no indices were passed to the WildTensor, it may match tensors with any number of indices. + repl_dict[self] = expr + + return repl_dict + + def _match_indices_ignoring_order(self, expr, repl_dict=None, old=False): + """ + Helper method for matches. Checks if the indices of self and expr + match disregarding index ordering. + """ + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + def siftkey(ind): + if isinstance(ind, WildTensorIndex): + if ind.ignore_updown: + return "wild, updown" + else: + return "wild" + else: + return "nonwild" + + indices_sifted = sift(self.indices, siftkey) + + matched_indices = [] + expr_indices_remaining = expr.get_indices() + for ind in indices_sifted["nonwild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + if e_ind in matched_indices: + continue + m = ind.matches(e_ind) + if m is not None: + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild, updown"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + if len(matched_indices) < len(self.indices): + return None + else: + return repl_dict + +class WildTensorIndex(TensorIndex): + """ + A wild object that matches TensorIndex instances. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorIndex, TensorIndexType, WildTensorIndex + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex("p", R3) + + By default, covariant indices only match with covariant indices (and + similarly for contravariant) + + >>> q = WildTensorIndex("q", R3) + >>> (q).matches(p) + {q: p} + >>> (q).matches(-p) + + If you want matching to ignore whether the index is co/contra-variant, set + ignore_updown=True + + >>> r = WildTensorIndex("r", R3, ignore_updown=True) + >>> (r).matches(-p) + {r: -p} + >>> (r).matches(p) + {r: p} + + Parameters + ========== + name : name of the index (string), or ``True`` if you want it to be + automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + ignore_updown : bool, Whether this should match both co- and contra-variant + indices (default:False) + """ + def __new__(cls, name, tensor_index_type, is_up=True, ignore_updown=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + ignore_updown = sympify(ignore_updown) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up, ignore_updown) + + @property + def ignore_updown(self): + return self.args[3] + + def __neg__(self): + t1 = WildTensorIndex(self.name, self.tensor_index_type, + (not self.is_up), self.ignore_updown) + return t1 + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensorIndex): + return None + if self.tensor_index_type != expr.tensor_index_type: + return None + if not self.ignore_updown: + if self.is_up != expr.is_up: + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + repl_dict[self] = expr + return repl_dict + + +class _WildTensExpr(Basic): + """ + INTERNAL USE ONLY + + This is an object that helps with replacement of WildTensors in expressions. + When this object is set as the tensor_head of a WildTensor, it replaces the + WildTensor by a TensExpr (passed when initializing this object). + + Examples + ======== + >>> from sympy.tensor.tensor import WildTensorHead, TensorIndex, TensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> print( ( K(p) ).replace( W(p), W(q)*W(-q)*W(p) ) ) + K(R_0)*K(-R_0)*K(p) + + """ + def __init__(self, expr): + if not isinstance(expr, TensExpr): + raise TypeError("_WildTensExpr expects a TensExpr as argument") + self.expr = expr + + def __call__(self, *indices): + return self.expr._replace_indices(dict(zip(self.expr.get_free_indices(), indices))) + + def __neg__(self): + return self.func(self.expr*S.NegativeOne) + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(self.expr+other.expr) + + def __radd__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(other.expr+self.expr) + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + return other + (-self) + + def __mul__(self, other): + raise NotImplementedError + + def __rmul__(self, other): + raise NotImplementedError + + def __truediv__(self, other): + raise NotImplementedError + + def __rtruediv__(self, other): + raise NotImplementedError + + def __pow__(self, other): + raise NotImplementedError + + def __rpow__(self, other): + raise NotImplementedError + + +def canon_bp(p): + """ + Butler-Portugal canonicalization. See ``tensor_can.py`` from the + combinatorics module for the details. + """ + if isinstance(p, TensExpr): + return p.canon_bp() + return p + + +def tensor_mul(*a): + """ + product of tensors + """ + if not a: + return TensMul.from_data(S.One, [], [], []) + t = a[0] + for tx in a[1:]: + t = t*tx + return t + + +def riemann_cyclic_replace(t_r): + """ + replace Riemann tensor with an equivalent expression + + ``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)`` + + """ + free = sorted(t_r.free, key=lambda x: x[1]) + m, n, p, q = [x[0] for x in free] + t0 = t_r*Rational(2, 3) + t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3) + t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3) + t3 = t0 + t1 + t2 + return t3 + +def riemann_cyclic(t2): + """ + Replace each Riemann tensor with an equivalent expression + satisfying the cyclic identity. + + This trick is discussed in the reference guide to Cadabra. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann()) + >>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l)) + >>> riemann_cyclic(t) + 0 + """ + t2 = t2.expand() + if isinstance(t2, (TensMul, Tensor)): + args = [t2] + else: + args = t2.args + a1 = [x.split() for x in args] + a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1] + a3 = [tensor_mul(*v) for v in a2] + t3 = TensAdd(*a3).doit() + if not t3: + return t3 + else: + return canon_bp(t3) + + +def get_lines(ex, index_type): + """ + Returns ``(lines, traces, rest)`` for an index type, + where ``lines`` is the list of list of positions of a matrix line, + ``traces`` is the list of list of traced matrix lines, + ``rest`` is the rest of the elements of the tensor. + """ + def _join_lines(a): + i = 0 + while i < len(a): + x = a[i] + xend = x[-1] + xstart = x[0] + hit = True + while hit: + hit = False + for j in range(i + 1, len(a)): + if j >= len(a): + break + if a[j][0] == xend: + hit = True + x.extend(a[j][1:]) + xend = x[-1] + a.pop(j) + continue + if a[j][0] == xstart: + hit = True + a[i] = reversed(a[j][1:]) + x + x = a[i] + xstart = a[i][0] + a.pop(j) + continue + if a[j][-1] == xend: + hit = True + x.extend(reversed(a[j][:-1])) + xend = x[-1] + a.pop(j) + continue + if a[j][-1] == xstart: + hit = True + a[i] = a[j][:-1] + x + x = a[i] + xstart = x[0] + a.pop(j) + continue + i += 1 + return a + + arguments = ex.args + dt = {} + for c in ex.args: + if not isinstance(c, TensExpr): + continue + if c in dt: + continue + index_types = c.index_types + a = [] + for i in range(len(index_types)): + if index_types[i] is index_type: + a.append(i) + if len(a) > 2: + raise ValueError('at most two indices of type %s allowed' % index_type) + if len(a) == 2: + dt[c] = a + #dum = ex.dum + lines = [] + traces = [] + traces1 = [] + #indices_to_args_pos = ex._get_indices_to_args_pos() + # TODO: add a dum_to_components_map ? + for p0, p1, c0, c1 in ex.dum_in_args: + if arguments[c0] not in dt: + continue + if c0 == c1: + traces.append([c0]) + continue + ta0 = dt[arguments[c0]] + ta1 = dt[arguments[c1]] + if p0 not in ta0: + continue + if ta0.index(p0) == ta1.index(p1): + # case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1; + # to deal with this case one could add to the position + # a flag for transposition; + # one could write [(c0, False), (c1, True)] + raise NotImplementedError + # if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1 + # if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0 + ta0 = dt[arguments[c0]] + b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0) + lines1 = lines[:] + for line in lines: + if line[-1] == b0: + if line[0] == b1: + n = line.index(min(line)) + traces1.append(line) + traces.append(line[n:] + line[:n]) + else: + line.append(b1) + break + elif line[0] == b1: + line.insert(0, b0) + break + else: + lines1.append([b0, b1]) + + lines = [x for x in lines1 if x not in traces1] + lines = _join_lines(lines) + rest = [] + for line in lines: + for y in line: + rest.append(y) + for line in traces: + for y in line: + rest.append(y) + rest = [x for x in range(len(arguments)) if x not in rest] + + return lines, traces, rest + + +def get_free_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_free_indices() + + +def get_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_indices() + +def get_dummy_indices(t): + if not isinstance(t, TensExpr): + return () + inds = t.get_indices() + free = t.get_free_indices() + return [i for i in inds if i not in free] + +def get_index_structure(t): + if isinstance(t, TensExpr): + return t._index_structure + return _IndexStructure([], [], [], []) + + +def get_coeff(t): + if isinstance(t, Tensor): + return S.One + if isinstance(t, TensMul): + return t.coeff + if isinstance(t, TensExpr): + raise ValueError("no coefficient associated to this tensor expression") + return t + +def contract_metric(t, g): + if isinstance(t, TensExpr): + return t.contract_metric(g) + return t + + +def perm2tensor(t, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + if not isinstance(t, TensExpr): + return t + elif isinstance(t, (Tensor, TensMul)): + nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp) + res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp) + if g[-1] != len(g) - 1: + return -res + + return res + raise NotImplementedError() + + +def substitute_indices(t, *index_tuples): + if not isinstance(t, TensExpr): + return t + return t.substitute_indices(*index_tuples) + + +def _expand(expr, **kwargs): + if isinstance(expr, TensExpr): + return expr._expand(**kwargs) + else: + return expr.expand(**kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceda7a85a4ba05bde0545cbe25159c3d0c327522 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3beec07d5bf95c2bdd2c335db6ea52aa1387ecb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_index_methods.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_index_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d21d81e84979b29706279b991a33fde1317ee59b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_index_methods.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_indexed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8826dd3d0d78dd867cd3203d6b6585fc4b5ad207 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_indexed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_printing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_printing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d155dbb983c43c36df58555cdacffd66b69b0dc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_printing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e3fdad16737dcbc25f6b5950842be928d767e5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_element.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_element.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51e19d7290c0d978c01a14b3501db62ffc38bf9e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_element.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_operators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1a4e730aa682d45954e6b51494268c268861a06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/__pycache__/test_tensor_operators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_functions.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ae40865d1bddffaa976dc3d94ae1ef1b6c97ca35 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_functions.py @@ -0,0 +1,57 @@ +from sympy.tensor.functions import TensorProduct +from sympy.matrices.dense import Matrix +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.tensor.array import Array +from sympy.abc import x, y, z +from sympy.abc import i, j, k, l + + +A = MatrixSymbol("A", 3, 3) +B = MatrixSymbol("B", 3, 3) +C = MatrixSymbol("C", 3, 3) + + +def test_TensorProduct_construction(): + assert TensorProduct(3, 4) == 12 + assert isinstance(TensorProduct(A, A), TensorProduct) + + expr = TensorProduct(TensorProduct(x, y), z) + assert expr == x*y*z + + expr = TensorProduct(TensorProduct(A, B), C) + assert expr == TensorProduct(A, B, C) + + expr = TensorProduct(Matrix.eye(2), Array([[0, -1], [1, 0]])) + assert expr == Array([ + [ + [[0, -1], [1, 0]], + [[0, 0], [0, 0]] + ], + [ + [[0, 0], [0, 0]], + [[0, -1], [1, 0]] + ] + ]) + + +def test_TensorProduct_shape(): + + expr = TensorProduct(3, 4, evaluate=False) + assert expr.shape == () + assert expr.rank() == 0 + + expr = TensorProduct(Array([1, 2]), Array([x, y]), evaluate=False) + assert expr.shape == (2, 2) + assert expr.rank() == 2 + expr = TensorProduct(expr, expr, evaluate=False) + assert expr.shape == (2, 2, 2, 2) + assert expr.rank() == 4 + + expr = TensorProduct(Matrix.eye(2), Array([[0, -1], [1, 0]]), evaluate=False) + assert expr.shape == (2, 2, 2, 2) + assert expr.rank() == 4 + + +def test_TensorProduct_getitem(): + expr = TensorProduct(A, B) + assert expr[i, j, k, l] == A[i, j]*B[k, l] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_index_methods.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_index_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..df20f7e7c1ab392321e8350b95dd07c5639c1865 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_index_methods.py @@ -0,0 +1,227 @@ +from sympy.core import symbols, S, Pow, Function +from sympy.functions import exp +from sympy.testing.pytest import raises +from sympy.tensor.indexed import Idx, IndexedBase +from sympy.tensor.index_methods import IndexConformanceException + +from sympy.tensor.index_methods import (get_contraction_structure, get_indices) + + +def test_trivial_indices(): + x, y = symbols('x y') + assert get_indices(x) == (set(), {}) + assert get_indices(x*y) == (set(), {}) + assert get_indices(x + y) == (set(), {}) + assert get_indices(x**y) == (set(), {}) + + +def test_get_indices_Indexed(): + x = IndexedBase('x') + i, j = Idx('i'), Idx('j') + assert get_indices(x[i, j]) == ({i, j}, {}) + assert get_indices(x[j, i]) == ({j, i}, {}) + + +def test_get_indices_Idx(): + f = Function('f') + i, j = Idx('i'), Idx('j') + assert get_indices(f(i)*j) == ({i, j}, {}) + assert get_indices(f(j, i)) == ({j, i}, {}) + assert get_indices(f(i)*i) == (set(), {}) + + +def test_get_indices_mul(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j = Idx('i'), Idx('j') + assert get_indices(x[j]*y[i]) == ({i, j}, {}) + assert get_indices(x[i]*y[j]) == ({i, j}, {}) + + +def test_get_indices_exceptions(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j = Idx('i'), Idx('j') + raises(IndexConformanceException, lambda: get_indices(x[i] + y[j])) + + +def test_scalar_broadcast(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j = Idx('i'), Idx('j') + assert get_indices(x[i] + y[i, i]) == ({i}, {}) + assert get_indices(x[i] + y[j, j]) == ({i}, {}) + + +def test_get_indices_add(): + x = IndexedBase('x') + y = IndexedBase('y') + A = IndexedBase('A') + i, j, k = Idx('i'), Idx('j'), Idx('k') + assert get_indices(x[i] + 2*y[i]) == ({i}, {}) + assert get_indices(y[i] + 2*A[i, j]*x[j]) == ({i}, {}) + assert get_indices(y[i] + 2*(x[i] + A[i, j]*x[j])) == ({i}, {}) + assert get_indices(y[i] + x[i]*(A[j, j] + 1)) == ({i}, {}) + assert get_indices( + y[i] + x[i]*x[j]*(y[j] + A[j, k]*x[k])) == ({i}, {}) + + +def test_get_indices_Pow(): + x = IndexedBase('x') + y = IndexedBase('y') + A = IndexedBase('A') + i, j, k = Idx('i'), Idx('j'), Idx('k') + assert get_indices(Pow(x[i], y[j])) == ({i, j}, {}) + assert get_indices(Pow(x[i, k], y[j, k])) == ({i, j, k}, {}) + assert get_indices(Pow(A[i, k], y[k] + A[k, j]*x[j])) == ({i, k}, {}) + assert get_indices(Pow(2, x[i])) == get_indices(exp(x[i])) + + # test of a design decision, this may change: + assert get_indices(Pow(x[i], 2)) == ({i}, {}) + + +def test_get_contraction_structure_basic(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j = Idx('i'), Idx('j') + assert get_contraction_structure(x[i]*y[j]) == {None: {x[i]*y[j]}} + assert get_contraction_structure(x[i] + y[j]) == {None: {x[i], y[j]}} + assert get_contraction_structure(x[i]*y[i]) == {(i,): {x[i]*y[i]}} + assert get_contraction_structure( + 1 + x[i]*y[i]) == {None: {S.One}, (i,): {x[i]*y[i]}} + assert get_contraction_structure(x[i]**y[i]) == {None: {x[i]**y[i]}} + + +def test_get_contraction_structure_complex(): + x = IndexedBase('x') + y = IndexedBase('y') + A = IndexedBase('A') + i, j, k = Idx('i'), Idx('j'), Idx('k') + expr1 = y[i] + A[i, j]*x[j] + d1 = {None: {y[i]}, (j,): {A[i, j]*x[j]}} + assert get_contraction_structure(expr1) == d1 + expr2 = expr1*A[k, i] + x[k] + d2 = {None: {x[k]}, (i,): {expr1*A[k, i]}, expr1*A[k, i]: [d1]} + assert get_contraction_structure(expr2) == d2 + + +def test_contraction_structure_simple_Pow(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j, k = Idx('i'), Idx('j'), Idx('k') + ii_jj = x[i, i]**y[j, j] + assert get_contraction_structure(ii_jj) == { + None: {ii_jj}, + ii_jj: [ + {(i,): {x[i, i]}}, + {(j,): {y[j, j]}} + ] + } + + ii_jk = x[i, i]**y[j, k] + assert get_contraction_structure(ii_jk) == { + None: {x[i, i]**y[j, k]}, + x[i, i]**y[j, k]: [ + {(i,): {x[i, i]}} + ] + } + + +def test_contraction_structure_Mul_and_Pow(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j, k = Idx('i'), Idx('j'), Idx('k') + + i_ji = x[i]**(y[j]*x[i]) + assert get_contraction_structure(i_ji) == {None: {i_ji}} + ij_i = (x[i]*y[j])**(y[i]) + assert get_contraction_structure(ij_i) == {None: {ij_i}} + j_ij_i = x[j]*(x[i]*y[j])**(y[i]) + assert get_contraction_structure(j_ij_i) == {(j,): {j_ij_i}} + j_i_ji = x[j]*x[i]**(y[j]*x[i]) + assert get_contraction_structure(j_i_ji) == {(j,): {j_i_ji}} + ij_exp_kki = x[i]*y[j]*exp(y[i]*y[k, k]) + result = get_contraction_structure(ij_exp_kki) + expected = { + (i,): {ij_exp_kki}, + ij_exp_kki: [{ + None: {exp(y[i]*y[k, k])}, + exp(y[i]*y[k, k]): [{ + None: {y[i]*y[k, k]}, + y[i]*y[k, k]: [{(k,): {y[k, k]}}] + }]} + ] + } + assert result == expected + + +def test_contraction_structure_Add_in_Pow(): + x = IndexedBase('x') + y = IndexedBase('y') + i, j, k = Idx('i'), Idx('j'), Idx('k') + s_ii_jj_s = (1 + x[i, i])**(1 + y[j, j]) + expected = { + None: {s_ii_jj_s}, + s_ii_jj_s: [ + {None: {S.One}, (i,): {x[i, i]}}, + {None: {S.One}, (j,): {y[j, j]}} + ] + } + result = get_contraction_structure(s_ii_jj_s) + assert result == expected + + s_ii_jk_s = (1 + x[i, i]) ** (1 + y[j, k]) + expected_2 = { + None: {(x[i, i] + 1)**(y[j, k] + 1)}, + s_ii_jk_s: [ + {None: {S.One}, (i,): {x[i, i]}} + ] + } + result_2 = get_contraction_structure(s_ii_jk_s) + assert result_2 == expected_2 + + +def test_contraction_structure_Pow_in_Pow(): + x = IndexedBase('x') + y = IndexedBase('y') + z = IndexedBase('z') + i, j, k = Idx('i'), Idx('j'), Idx('k') + ii_jj_kk = x[i, i]**y[j, j]**z[k, k] + expected = { + None: {ii_jj_kk}, + ii_jj_kk: [ + {(i,): {x[i, i]}}, + { + None: {y[j, j]**z[k, k]}, + y[j, j]**z[k, k]: [ + {(j,): {y[j, j]}}, + {(k,): {z[k, k]}} + ] + } + ] + } + assert get_contraction_structure(ii_jj_kk) == expected + + +def test_ufunc_support(): + f = Function('f') + g = Function('g') + x = IndexedBase('x') + y = IndexedBase('y') + i, j = Idx('i'), Idx('j') + a = symbols('a') + + assert get_indices(f(x[i])) == ({i}, {}) + assert get_indices(f(x[i], y[j])) == ({i, j}, {}) + assert get_indices(f(y[i])*g(x[i])) == (set(), {}) + assert get_indices(f(a, x[i])) == ({i}, {}) + assert get_indices(f(a, y[i], x[j])*g(x[i])) == ({j}, {}) + assert get_indices(g(f(x[i]))) == ({i}, {}) + + assert get_contraction_structure(f(x[i])) == {None: {f(x[i])}} + assert get_contraction_structure( + f(y[i])*g(x[i])) == {(i,): {f(y[i])*g(x[i])}} + assert get_contraction_structure( + f(y[i])*g(f(x[i]))) == {(i,): {f(y[i])*g(f(x[i]))}} + assert get_contraction_structure( + f(x[j], y[i])*g(x[i])) == {(i,): {f(x[j], y[i])*g(x[i])}} diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_indexed.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc53763876219f928820fbabfb51baeff3ddd4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_indexed.py @@ -0,0 +1,503 @@ +from sympy.core import symbols, Symbol, Tuple, oo, Dummy +from sympy.tensor.indexed import IndexException +from sympy.testing.pytest import raises +from sympy.utilities.iterables import iterable + +# import test: +from sympy.concrete.summations import Sum +from sympy.core.function import Function, Subs, Derivative +from sympy.core.relational import (StrictLessThan, GreaterThan, + StrictGreaterThan, LessThan) +from sympy.core.singleton import S +from sympy.functions.elementary.exponential import exp, log +from sympy.functions.elementary.trigonometric import cos, sin +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.series.order import Order +from sympy.sets.fancysets import Range +from sympy.tensor.indexed import IndexedBase, Idx, Indexed + + +def test_Idx_construction(): + i, a, b = symbols('i a b', integer=True) + assert Idx(i) != Idx(i, 1) + assert Idx(i, a) == Idx(i, (0, a - 1)) + assert Idx(i, oo) == Idx(i, (0, oo)) + + x = symbols('x', integer=False) + raises(TypeError, lambda: Idx(x)) + raises(TypeError, lambda: Idx(0.5)) + raises(TypeError, lambda: Idx(i, x)) + raises(TypeError, lambda: Idx(i, 0.5)) + raises(TypeError, lambda: Idx(i, (x, 5))) + raises(TypeError, lambda: Idx(i, (2, x))) + raises(TypeError, lambda: Idx(i, (2, 3.5))) + + +def test_Idx_properties(): + i, a, b = symbols('i a b', integer=True) + assert Idx(i).is_integer + assert Idx(i).name == 'i' + assert Idx(i + 2).name == 'i + 2' + assert Idx('foo').name == 'foo' + + +def test_Idx_bounds(): + i, a, b = symbols('i a b', integer=True) + assert Idx(i).lower is None + assert Idx(i).upper is None + assert Idx(i, a).lower == 0 + assert Idx(i, a).upper == a - 1 + assert Idx(i, 5).lower == 0 + assert Idx(i, 5).upper == 4 + assert Idx(i, oo).lower == 0 + assert Idx(i, oo).upper is oo + assert Idx(i, (a, b)).lower == a + assert Idx(i, (a, b)).upper == b + assert Idx(i, (1, 5)).lower == 1 + assert Idx(i, (1, 5)).upper == 5 + assert Idx(i, (-oo, oo)).lower is -oo + assert Idx(i, (-oo, oo)).upper is oo + + +def test_Idx_fixed_bounds(): + i, a, b, x = symbols('i a b x', integer=True) + assert Idx(x).lower is None + assert Idx(x).upper is None + assert Idx(x, a).lower == 0 + assert Idx(x, a).upper == a - 1 + assert Idx(x, 5).lower == 0 + assert Idx(x, 5).upper == 4 + assert Idx(x, oo).lower == 0 + assert Idx(x, oo).upper is oo + assert Idx(x, (a, b)).lower == a + assert Idx(x, (a, b)).upper == b + assert Idx(x, (1, 5)).lower == 1 + assert Idx(x, (1, 5)).upper == 5 + assert Idx(x, (-oo, oo)).lower is -oo + assert Idx(x, (-oo, oo)).upper is oo + + +def test_Idx_inequalities(): + i14 = Idx("i14", (1, 4)) + i79 = Idx("i79", (7, 9)) + i46 = Idx("i46", (4, 6)) + i35 = Idx("i35", (3, 5)) + + assert i14 <= 5 + assert i14 < 5 + assert not (i14 >= 5) + assert not (i14 > 5) + + assert 5 >= i14 + assert 5 > i14 + assert not (5 <= i14) + assert not (5 < i14) + + assert LessThan(i14, 5) + assert StrictLessThan(i14, 5) + assert not GreaterThan(i14, 5) + assert not StrictGreaterThan(i14, 5) + + assert i14 <= 4 + assert isinstance(i14 < 4, StrictLessThan) + assert isinstance(i14 >= 4, GreaterThan) + assert not (i14 > 4) + + assert isinstance(i14 <= 1, LessThan) + assert not (i14 < 1) + assert i14 >= 1 + assert isinstance(i14 > 1, StrictGreaterThan) + + assert not (i14 <= 0) + assert not (i14 < 0) + assert i14 >= 0 + assert i14 > 0 + + from sympy.abc import x + + assert isinstance(i14 < x, StrictLessThan) + assert isinstance(i14 > x, StrictGreaterThan) + assert isinstance(i14 <= x, LessThan) + assert isinstance(i14 >= x, GreaterThan) + + assert i14 < i79 + assert i14 <= i79 + assert not (i14 > i79) + assert not (i14 >= i79) + + assert i14 <= i46 + assert isinstance(i14 < i46, StrictLessThan) + assert isinstance(i14 >= i46, GreaterThan) + assert not (i14 > i46) + + assert isinstance(i14 < i35, StrictLessThan) + assert isinstance(i14 > i35, StrictGreaterThan) + assert isinstance(i14 <= i35, LessThan) + assert isinstance(i14 >= i35, GreaterThan) + + iNone1 = Idx("iNone1") + iNone2 = Idx("iNone2") + + assert isinstance(iNone1 < iNone2, StrictLessThan) + assert isinstance(iNone1 > iNone2, StrictGreaterThan) + assert isinstance(iNone1 <= iNone2, LessThan) + assert isinstance(iNone1 >= iNone2, GreaterThan) + + +def test_Idx_inequalities_current_fails(): + i14 = Idx("i14", (1, 4)) + + assert S(5) >= i14 + assert S(5) > i14 + assert not (S(5) <= i14) + assert not (S(5) < i14) + + +def test_Idx_func_args(): + i, a, b = symbols('i a b', integer=True) + ii = Idx(i) + assert ii.func(*ii.args) == ii + ii = Idx(i, a) + assert ii.func(*ii.args) == ii + ii = Idx(i, (a, b)) + assert ii.func(*ii.args) == ii + + +def test_Idx_subs(): + i, a, b = symbols('i a b', integer=True) + assert Idx(i, a).subs(a, b) == Idx(i, b) + assert Idx(i, a).subs(i, b) == Idx(b, a) + + assert Idx(i).subs(i, 2) == Idx(2) + assert Idx(i, a).subs(a, 2) == Idx(i, 2) + assert Idx(i, (a, b)).subs(i, 2) == Idx(2, (a, b)) + + +def test_IndexedBase_sugar(): + i, j = symbols('i j', integer=True) + a = symbols('a') + A1 = Indexed(a, i, j) + A2 = IndexedBase(a) + assert A1 == A2[i, j] + assert A1 == A2[(i, j)] + assert A1 == A2[[i, j]] + assert A1 == A2[Tuple(i, j)] + assert all(a.is_Integer for a in A2[1, 0].args[1:]) + + +def test_IndexedBase_subs(): + i = symbols('i', integer=True) + a, b = symbols('a b') + A = IndexedBase(a) + B = IndexedBase(b) + assert A[i] == B[i].subs(b, a) + C = {1: 2} + assert C[1] == A[1].subs(A, C) + + +def test_IndexedBase_shape(): + i, j, m, n = symbols('i j m n', integer=True) + a = IndexedBase('a', shape=(m, m)) + b = IndexedBase('a', shape=(m, n)) + assert b.shape == Tuple(m, n) + assert a[i, j] != b[i, j] + assert a[i, j] == b[i, j].subs(n, m) + assert b.func(*b.args) == b + assert b[i, j].func(*b[i, j].args) == b[i, j] + raises(IndexException, lambda: b[i]) + raises(IndexException, lambda: b[i, i, j]) + F = IndexedBase("F", shape=m) + assert F.shape == Tuple(m) + assert F[i].subs(i, j) == F[j] + raises(IndexException, lambda: F[i, j]) + + +def test_IndexedBase_assumptions(): + i = Symbol('i', integer=True) + a = Symbol('a') + A = IndexedBase(a, positive=True) + for c in (A, A[i]): + assert c.is_real + assert c.is_complex + assert not c.is_imaginary + assert c.is_nonnegative + assert c.is_nonzero + assert c.is_commutative + assert log(exp(c)) == c + + assert A != IndexedBase(a) + assert A == IndexedBase(a, positive=True, real=True) + assert A[i] != Indexed(a, i) + + +def test_IndexedBase_assumptions_inheritance(): + I = Symbol('I', integer=True) + I_inherit = IndexedBase(I) + I_explicit = IndexedBase('I', integer=True) + + assert I_inherit.is_integer + assert I_explicit.is_integer + assert I_inherit.label.is_integer + assert I_explicit.label.is_integer + assert I_inherit == I_explicit + + +def test_issue_17652(): + """Regression test issue #17652. + + IndexedBase.label should not upcast subclasses of Symbol + """ + class SubClass(Symbol): + pass + + x = SubClass('X') + assert type(x) == SubClass + base = IndexedBase(x) + assert type(x) == SubClass + assert type(base.label) == SubClass + + +def test_Indexed_constructor(): + i, j = symbols('i j', integer=True) + A = Indexed('A', i, j) + assert A == Indexed(Symbol('A'), i, j) + assert A == Indexed(IndexedBase('A'), i, j) + raises(TypeError, lambda: Indexed(A, i, j)) + raises(IndexException, lambda: Indexed("A")) + assert A.free_symbols == {A, A.base.label, i, j} + + +def test_Indexed_func_args(): + i, j = symbols('i j', integer=True) + a = symbols('a') + A = Indexed(a, i, j) + assert A == A.func(*A.args) + + +def test_Indexed_subs(): + i, j, k = symbols('i j k', integer=True) + a, b = symbols('a b') + A = IndexedBase(a) + B = IndexedBase(b) + assert A[i, j] == B[i, j].subs(b, a) + assert A[i, j] == A[i, k].subs(k, j) + + +def test_Indexed_properties(): + i, j = symbols('i j', integer=True) + A = Indexed('A', i, j) + assert A.name == 'A[i, j]' + assert A.rank == 2 + assert A.indices == (i, j) + assert A.base == IndexedBase('A') + assert A.ranges == [None, None] + raises(IndexException, lambda: A.shape) + + n, m = symbols('n m', integer=True) + assert Indexed('A', Idx( + i, m), Idx(j, n)).ranges == [Tuple(0, m - 1), Tuple(0, n - 1)] + assert Indexed('A', Idx(i, m), Idx(j, n)).shape == Tuple(m, n) + raises(IndexException, lambda: Indexed("A", Idx(i, m), Idx(j)).shape) + + +def test_Indexed_shape_precedence(): + i, j = symbols('i j', integer=True) + o, p = symbols('o p', integer=True) + n, m = symbols('n m', integer=True) + a = IndexedBase('a', shape=(o, p)) + assert a.shape == Tuple(o, p) + assert Indexed( + a, Idx(i, m), Idx(j, n)).ranges == [Tuple(0, m - 1), Tuple(0, n - 1)] + assert Indexed(a, Idx(i, m), Idx(j, n)).shape == Tuple(o, p) + assert Indexed( + a, Idx(i, m), Idx(j)).ranges == [Tuple(0, m - 1), (None, None)] + assert Indexed(a, Idx(i, m), Idx(j)).shape == Tuple(o, p) + + +def test_complex_indices(): + i, j = symbols('i j', integer=True) + A = Indexed('A', i, i + j) + assert A.rank == 2 + assert A.indices == (i, i + j) + + +def test_not_interable(): + i, j = symbols('i j', integer=True) + A = Indexed('A', i, i + j) + assert not iterable(A) + + +def test_Indexed_coeff(): + N = Symbol('N', integer=True) + len_y = N + i = Idx('i', len_y-1) + y = IndexedBase('y', shape=(len_y,)) + a = (1/y[i+1]*y[i]).coeff(y[i]) + b = (y[i]/y[i+1]).coeff(y[i]) + assert a == b + + +def test_differentiation(): + from sympy.functions.special.tensor_functions import KroneckerDelta + i, j, k, l = symbols('i j k l', cls=Idx) + a = symbols('a') + m, n = symbols("m, n", integer=True, finite=True) + assert m.is_real + h, L = symbols('h L', cls=IndexedBase) + hi, hj = h[i], h[j] + + expr = hi + assert expr.diff(hj) == KroneckerDelta(i, j) + assert expr.diff(hi) == KroneckerDelta(i, i) + + expr = S(2) * hi + assert expr.diff(hj) == S(2) * KroneckerDelta(i, j) + assert expr.diff(hi) == S(2) * KroneckerDelta(i, i) + assert expr.diff(a) is S.Zero + + assert Sum(expr, (i, -oo, oo)).diff(hj) == Sum(2*KroneckerDelta(i, j), (i, -oo, oo)) + assert Sum(expr.diff(hj), (i, -oo, oo)) == Sum(2*KroneckerDelta(i, j), (i, -oo, oo)) + assert Sum(expr, (i, -oo, oo)).diff(hj).doit() == 2 + + assert Sum(expr.diff(hi), (i, -oo, oo)).doit() == Sum(2, (i, -oo, oo)).doit() + assert Sum(expr, (i, -oo, oo)).diff(hi).doit() is oo + + expr = a * hj * hj / S(2) + assert expr.diff(hi) == a * h[j] * KroneckerDelta(i, j) + assert expr.diff(a) == hj * hj / S(2) + assert expr.diff(a, 2) is S.Zero + + assert Sum(expr, (i, -oo, oo)).diff(hi) == Sum(a*KroneckerDelta(i, j)*h[j], (i, -oo, oo)) + assert Sum(expr.diff(hi), (i, -oo, oo)) == Sum(a*KroneckerDelta(i, j)*h[j], (i, -oo, oo)) + assert Sum(expr, (i, -oo, oo)).diff(hi).doit() == a*h[j] + + assert Sum(expr, (j, -oo, oo)).diff(hi) == Sum(a*KroneckerDelta(i, j)*h[j], (j, -oo, oo)) + assert Sum(expr.diff(hi), (j, -oo, oo)) == Sum(a*KroneckerDelta(i, j)*h[j], (j, -oo, oo)) + assert Sum(expr, (j, -oo, oo)).diff(hi).doit() == a*h[i] + + expr = a * sin(hj * hj) + assert expr.diff(hi) == 2*a*cos(hj * hj) * hj * KroneckerDelta(i, j) + assert expr.diff(hj) == 2*a*cos(hj * hj) * hj + + expr = a * L[i, j] * h[j] + assert expr.diff(hi) == a*L[i, j]*KroneckerDelta(i, j) + assert expr.diff(hj) == a*L[i, j] + assert expr.diff(L[i, j]) == a*h[j] + assert expr.diff(L[k, l]) == a*KroneckerDelta(i, k)*KroneckerDelta(j, l)*h[j] + assert expr.diff(L[i, l]) == a*KroneckerDelta(j, l)*h[j] + + assert Sum(expr, (j, -oo, oo)).diff(L[k, l]) == Sum(a * KroneckerDelta(i, k) * KroneckerDelta(j, l) * h[j], (j, -oo, oo)) + assert Sum(expr, (j, -oo, oo)).diff(L[k, l]).doit() == a * KroneckerDelta(i, k) * h[l] + + assert h[m].diff(h[m]) == 1 + assert h[m].diff(h[n]) == KroneckerDelta(m, n) + assert Sum(a*h[m], (m, -oo, oo)).diff(h[n]) == Sum(a*KroneckerDelta(m, n), (m, -oo, oo)) + assert Sum(a*h[m], (m, -oo, oo)).diff(h[n]).doit() == a + assert Sum(a*h[m], (n, -oo, oo)).diff(h[n]) == Sum(a*KroneckerDelta(m, n), (n, -oo, oo)) + assert Sum(a*h[m], (m, -oo, oo)).diff(h[m]).doit() == oo*a + + +def test_indexed_series(): + A = IndexedBase("A") + i = symbols("i", integer=True) + assert sin(A[i]).series(A[i]) == A[i] - A[i]**3/6 + A[i]**5/120 + Order(A[i]**6, A[i]) + + +def test_indexed_is_constant(): + A = IndexedBase("A") + i, j, k = symbols("i,j,k") + assert not A[i].is_constant() + assert A[i].is_constant(j) + assert not A[1+2*i, k].is_constant() + assert not A[1+2*i, k].is_constant(i) + assert A[1+2*i, k].is_constant(j) + assert not A[1+2*i, k].is_constant(k) + + +def test_issue_12533(): + d = IndexedBase('d') + assert IndexedBase(range(5)) == Range(0, 5, 1) + assert d[0].subs(Symbol("d"), range(5)) == 0 + assert d[0].subs(d, range(5)) == 0 + assert d[1].subs(d, range(5)) == 1 + assert Indexed(Range(5), 2) == 2 + + +def test_issue_12780(): + n = symbols("n") + i = Idx("i", (0, n)) + raises(TypeError, lambda: i.subs(n, 1.5)) + + +def test_issue_18604(): + m = symbols("m") + assert Idx("i", m).name == 'i' + assert Idx("i", m).lower == 0 + assert Idx("i", m).upper == m - 1 + m = symbols("m", real=False) + raises(TypeError, lambda: Idx("i", m)) + +def test_Subs_with_Indexed(): + A = IndexedBase("A") + i, j, k = symbols("i,j,k") + x, y, z = symbols("x,y,z") + f = Function("f") + + assert Subs(A[i], A[i], A[j]).diff(A[j]) == 1 + assert Subs(A[i], A[i], x).diff(A[i]) == 0 + assert Subs(A[i], A[i], x).diff(A[j]) == 0 + assert Subs(A[i], A[i], x).diff(x) == 1 + assert Subs(A[i], A[i], x).diff(y) == 0 + assert Subs(A[i], A[i], A[j]).diff(A[k]) == KroneckerDelta(j, k) + assert Subs(x, x, A[i]).diff(A[j]) == KroneckerDelta(i, j) + assert Subs(f(A[i]), A[i], x).diff(A[j]) == 0 + assert Subs(f(A[i]), A[i], A[k]).diff(A[j]) == Derivative(f(A[k]), A[k])*KroneckerDelta(j, k) + assert Subs(x, x, A[i]**2).diff(A[j]) == 2*KroneckerDelta(i, j)*A[i] + assert Subs(A[i], A[i], A[j]**2).diff(A[k]) == 2*KroneckerDelta(j, k)*A[j] + + assert Subs(A[i]*x, x, A[i]).diff(A[i]) == 2*A[i] + assert Subs(A[i]*x, x, A[i]).diff(A[j]) == 2*A[i]*KroneckerDelta(i, j) + assert Subs(A[i]*x, x, A[j]).diff(A[i]) == A[j] + A[i]*KroneckerDelta(i, j) + assert Subs(A[i]*x, x, A[j]).diff(A[j]) == A[i] + A[j]*KroneckerDelta(i, j) + assert Subs(A[i]*x, x, A[i]).diff(A[k]) == 2*A[i]*KroneckerDelta(i, k) + assert Subs(A[i]*x, x, A[j]).diff(A[k]) == KroneckerDelta(i, k)*A[j] + KroneckerDelta(j, k)*A[i] + + assert Subs(A[i]*x, A[i], x).diff(A[i]) == 0 + assert Subs(A[i]*x, A[i], x).diff(A[j]) == 0 + assert Subs(A[i]*x, A[j], x).diff(A[i]) == x + assert Subs(A[i]*x, A[j], x).diff(A[j]) == x*KroneckerDelta(i, j) + assert Subs(A[i]*x, A[i], x).diff(A[k]) == 0 + assert Subs(A[i]*x, A[j], x).diff(A[k]) == x*KroneckerDelta(i, k) + + +def test_complicated_derivative_with_Indexed(): + x, y = symbols("x,y", cls=IndexedBase) + sigma = symbols("sigma") + i, j, k = symbols("i,j,k") + m0,m1,m2,m3,m4,m5 = symbols("m0:6") + f = Function("f") + + expr = f((x[i] - y[i])**2/sigma) + _xi_1 = symbols("xi_1", cls=Dummy) + assert expr.diff(x[m0]).dummy_eq( + (x[i] - y[i])*KroneckerDelta(i, m0)*\ + 2*Subs( + Derivative(f(_xi_1), _xi_1), + (_xi_1,), + ((x[i] - y[i])**2/sigma,) + )/sigma + ) + assert expr.diff(x[m0]).diff(x[m1]).dummy_eq( + 2*KroneckerDelta(i, m0)*\ + KroneckerDelta(i, m1)*Subs( + Derivative(f(_xi_1), _xi_1), + (_xi_1,), + ((x[i] - y[i])**2/sigma,) + )/sigma + \ + 4*(x[i] - y[i])**2*KroneckerDelta(i, m0)*KroneckerDelta(i, m1)*\ + Subs( + Derivative(f(_xi_1), _xi_1, _xi_1), + (_xi_1,), + ((x[i] - y[i])**2/sigma,) + )/sigma**2 + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..9f3cf7f0591a7012c93354ab7b8d7e010def38bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py @@ -0,0 +1,13 @@ +from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead +from sympy import I + +def test_printing_TensMul(): + R3 = TensorIndexType('R3', dim=3) + p, q = tensor_indices("p q", R3) + K = TensorHead("K", [R3]) + + assert repr(2*K(p)) == "2*K(p)" + assert repr(-K(p)) == "-K(p)" + assert repr(-2*K(p)*K(q)) == "-2*K(p)*K(q)" + assert repr(-I*K(p)) == "-I*K(p)" + assert repr(I*K(p)) == "I*K(p)" diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_tensor.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..ae9bb5aad8350edd9ad389c88f024141344b6396 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/tests/test_tensor.py @@ -0,0 +1,2042 @@ +from sympy.concrete.summations import Sum +from sympy.core.function import expand +from sympy.core.numbers import Integer +from sympy.matrices.dense import (Matrix, eye) +from sympy.tensor.indexed import Indexed +from sympy.combinatorics import Permutation +from sympy.core import S, Rational, Symbol, Basic, Add +from sympy.core.containers import Tuple +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.tensor.array import Array +from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, \ + get_symmetric_group_sgs, TensorIndex, tensor_mul, TensAdd, \ + riemann_cyclic_replace, riemann_cyclic, TensMul, tensor_heads, \ + TensorManager, TensExpr, TensorHead, canon_bp, \ + tensorhead, tensorsymmetry, TensorType, substitute_indices, \ + WildTensorIndex, WildTensorHead, _WildTensExpr +from sympy.testing.pytest import raises, XFAIL, warns_deprecated_sympy +from sympy.matrices import diag + +def _is_equal(arg1, arg2): + if isinstance(arg1, TensExpr): + return arg1.equals(arg2) + elif isinstance(arg2, TensExpr): + return arg2.equals(arg1) + return arg1 == arg2 + + +#################### Tests from tensor_can.py ####################### +def test_canonicalize_no_slot_sym(): + # A_d0 * B^d0; T_c = A^d0*B_d0 + Lorentz = TensorIndexType('Lorentz', dummy_name='L') + a, b, d0, d1 = tensor_indices('a,b,d0,d1', Lorentz) + A, B = tensor_heads('A,B', [Lorentz], TensorSymmetry.no_symmetry(1)) + t = A(-d0)*B(d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0)*B(-L_0)' + + # A^a * B^b; T_c = T + t = A(a)*B(b) + tc = t.canon_bp() + assert tc == t + # B^b * A^a + t1 = B(b)*A(a) + tc = t1.canon_bp() + assert str(tc) == 'A(a)*B(b)' + + # A symmetric + # A^{b}_{d0}*A^{d0, a}; T_c = A^{a d0}*A{b}_{d0} + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(b, -d0)*A(d0, a) + tc = t.canon_bp() + assert str(tc) == 'A(a, L_0)*A(b, -L_0)' + + # A^{d1}_{d0}*B^d0*C_d1 + # T_c = A^{d0 d1}*B_d0*C_d1 + B, C = tensor_heads('B,C', [Lorentz], TensorSymmetry.no_symmetry(1)) + t = A(d1, -d0)*B(d0)*C(-d1) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-L_0)*C(-L_1)' + + # A without symmetry + # A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5] + # T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5] + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + t = A(d1, -d0)*B(d0)*C(-d1) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-L_1)*C(-L_0)' + + # A, B without symmetry + # A^{d1}_{d0}*B_{d1}^{d0} + # T_c = A^{d0 d1}*B_{d0 d1} + B = TensorHead('B', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + t = A(d1, -d0)*B(-d1, d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-L_0, -L_1)' + # A_{d0}^{d1}*B_{d1}^{d0} + # T_c = A^{d0 d1}*B_{d1 d0} + t = A(-d0, d1)*B(-d1, d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-L_1, -L_0)' + + # A, B, C without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} + # T_c=A^{d0 d1}*B_{a d1}*C_{d0 b} + C = TensorHead('C', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + t = A(d1, d0)*B(-a, -d0)*C(-d1, -b) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-a, -L_1)*C(-L_0, -b)' + + # A symmetric, B and C without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} + # T_c = A^{d0 d1}*B_{a d0}*C_{d1 b} + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(d1, d0)*B(-a, -d0)*C(-d1, -b) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-L_1, -b)' + + # A and C symmetric, B without symmetry + # A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1] + # T_c = A^{d0 d1}*B_{a d0}*C_{b d1} + C = TensorHead('C', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(d1, d0)*B(-a, -d0)*C(-d1, -b) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-b, -L_1)' + +def test_canonicalize_no_dummies(): + Lorentz = TensorIndexType('Lorentz', dummy_name='L') + a, b, c, d = tensor_indices('a, b, c, d', Lorentz) + + # A commuting + # A^c A^b A^a + # T_c = A^a A^b A^c + A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1)) + t = A(c)*A(b)*A(a) + tc = t.canon_bp() + assert str(tc) == 'A(a)*A(b)*A(c)' + + # A anticommuting + # A^c A^b A^a + # T_c = -A^a A^b A^c + A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1) + t = A(c)*A(b)*A(a) + tc = t.canon_bp() + assert str(tc) == '-A(a)*A(b)*A(c)' + + # A commuting and symmetric + # A^{b,d}*A^{c,a} + # T_c = A^{a c}*A^{b d} + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(b, d)*A(c, a) + tc = t.canon_bp() + assert str(tc) == 'A(a, c)*A(b, d)' + + # A anticommuting and symmetric + # A^{b,d}*A^{c,a} + # T_c = -A^{a c}*A^{b d} + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2), 1) + t = A(b, d)*A(c, a) + tc = t.canon_bp() + assert str(tc) == '-A(a, c)*A(b, d)' + + # A^{c,a}*A^{b,d} + # T_c = A^{a c}*A^{b d} + t = A(c, a)*A(b, d) + tc = t.canon_bp() + assert str(tc) == 'A(a, c)*A(b, d)' + +def test_tensorhead_construction_without_symmetry(): + L = TensorIndexType('Lorentz') + A1 = TensorHead('A', [L, L]) + A2 = TensorHead('A', [L, L], TensorSymmetry.no_symmetry(2)) + assert A1 == A2 + A3 = TensorHead('A', [L, L], TensorSymmetry.fully_symmetric(2)) # Symmetric + assert A1 != A3 + +def test_no_metric_symmetry(): + # no metric symmetry; A no symmetry + # A^d1_d0 * A^d0_d1 + # T_c = A^d0_d1 * A^d1_d0 + Lorentz = TensorIndexType('Lorentz', dummy_name='L', metric_symmetry=0) + d0, d1, d2, d3 = tensor_indices('d:4', Lorentz) + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + t = A(d1, -d0)*A(d0, -d1) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)' + + # A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0 + # T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2 + t = A(d1, -d2)*A(d0, -d3)*A(d2, -d1)*A(d3, -d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)*A(L_2, -L_3)*A(L_3, -L_2)' + + # A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1 + # T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0 + t = A(d0, -d1)*A(d1, -d2)*A(d2, -d3)*A(d3, -d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_2)*A(L_2, -L_3)*A(L_3, -L_0)' + +def test_canonicalize1(): + Lorentz = TensorIndexType('Lorentz', dummy_name='L') + a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \ + tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Lorentz) + + # A_d0*A^d0; ord = [d0,-d0] + # T_c = A^d0*A_d0 + A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1)) + t = A(-d0)*A(d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0)*A(-L_0)' + + # A commuting + # A_d0*A_d1*A_d2*A^d2*A^d1*A^d0 + # T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2 + t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0) + tc = t.canon_bp() + assert str(tc) == 'A(L_0)*A(-L_0)*A(L_1)*A(-L_1)*A(L_2)*A(-L_2)' + + # A anticommuting + # A_d0*A_d1*A_d2*A^d2*A^d1*A^d0 + # T_c 0 + A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1) + t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0) + tc = t.canon_bp() + assert tc == 0 + + # A commuting symmetric + # A^{d0 b}*A^a_d1*A^d1_d0 + # T_c = A^{a d0}*A^{b d1}*A_{d0 d1} + A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(d0, b)*A(a, -d1)*A(d1, -d0) + tc = t.canon_bp() + assert str(tc) == 'A(a, L_0)*A(b, L_1)*A(-L_0, -L_1)' + + # A, B commuting symmetric + # A^{d0 b}*A^d1_d0*B^a_d1 + # T_c = A^{b d0}*A_d0^d1*B^a_d1 + B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + t = A(d0, b)*A(d1, -d0)*B(a, -d1) + tc = t.canon_bp() + assert str(tc) == 'A(b, L_0)*A(-L_0, L_1)*B(a, -L_1)' + + # A commuting symmetric + # A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1] + # T_c = A^{a d0 d1}*A^{b}_{d0 d1} + A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3)) + t = A(d1, d0, b)*A(a, -d1, -d0) + tc = t.canon_bp() + assert str(tc) == 'A(a, L_0, L_1)*A(b, -L_0, -L_1)' + + # A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0 + # T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3} + t = A(d3, d0, d2)*A(a0, -d1, -d2)*A(d1, -d3, a1)*A(a2, a3, -d0) + tc = t.canon_bp() + assert str(tc) == 'A(a0, L_0, L_1)*A(a1, -L_0, L_2)*A(a2, a3, L_3)*A(-L_1, -L_2, -L_3)' + + # A commuting symmetric, B antisymmetric + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # in this esxample and in the next three, + # renaming dummy indices and using symmetry of A, + # T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3 + # can = 0 + A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3)) + B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) + t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3) + tc = t.canon_bp() + assert tc == 0 + + # A anticommuting symmetric, B antisymmetric + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3} + A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3), 1) + B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) + t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3) + tc = t.canon_bp() + assert str(tc) == 'A(L_0, L_1, L_2)*A(-L_0, -L_1, L_3)*B(-L_2, -L_3)' + + # A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3} + Spinor = TensorIndexType('Spinor', dummy_name='S', metric_symmetry=-1) + a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \ + tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Spinor) + A = TensorHead('A', [Spinor]*3, TensorSymmetry.fully_symmetric(3), 1) + B = TensorHead('B', [Spinor]*2, TensorSymmetry.fully_symmetric(-2)) + t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3) + tc = t.canon_bp() + assert str(tc) == '-A(S_0, S_1, S_2)*A(-S_0, -S_1, S_3)*B(-S_2, -S_3)' + + # A anticommuting symmetric, B antisymmetric anticommuting, + # no metric symmetry + # A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3 + # T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3 + Mat = TensorIndexType('Mat', metric_symmetry=0, dummy_name='M') + a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \ + tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Mat) + A = TensorHead('A', [Mat]*3, TensorSymmetry.fully_symmetric(3), 1) + B = TensorHead('B', [Mat]*2, TensorSymmetry.fully_symmetric(-2)) + t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3) + tc = t.canon_bp() + assert str(tc) == 'A(M_0, M_1, M_2)*A(-M_0, -M_1, -M_3)*B(-M_2, M_3)' + + # Gamma anticommuting + # Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha} + # T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu} + alpha, beta, gamma, mu, nu, rho = \ + tensor_indices('alpha,beta,gamma,mu,nu,rho', Lorentz) + Gamma = TensorHead('Gamma', [Lorentz], + TensorSymmetry.fully_symmetric(1), 2) + Gamma2 = TensorHead('Gamma', [Lorentz]*2, + TensorSymmetry.fully_symmetric(-2), 2) + Gamma3 = TensorHead('Gamma', [Lorentz]*3, + TensorSymmetry.fully_symmetric(-3), 2) + t = Gamma2(-mu, -nu)*Gamma(rho)*Gamma3(nu, mu, alpha) + tc = t.canon_bp() + assert str(tc) == '-Gamma(L_0, L_1)*Gamma(rho)*Gamma(alpha, -L_0, -L_1)' + + # Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha} + # T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu} + t = Gamma2(mu, nu)*Gamma2(beta, gamma)*Gamma(-rho)*Gamma3(alpha, -mu, -nu) + tc = t.canon_bp() + assert str(tc) == 'Gamma(L_0, L_1)*Gamma(beta, gamma)*Gamma(-rho)*Gamma(alpha, -L_0, -L_1)' + + # f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry + # f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b} + # g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15] + # T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e} + Flavor = TensorIndexType('Flavor', dummy_name='F') + a, b, c, d, e, ff = tensor_indices('a,b,c,d,e,f', Flavor) + mu, nu = tensor_indices('mu,nu', Lorentz) + f = TensorHead('f', [Flavor]*3, TensorSymmetry.direct_product(1, -2)) + A = TensorHead('A', [Lorentz, Flavor], TensorSymmetry.no_symmetry(2)) + t = f(c, -d, -a)*f(-c, -e, -b)*A(-mu, d)*A(-nu, a)*A(nu, e)*A(mu, b) + tc = t.canon_bp() + assert str(tc) == '-f(F_0, F_1, F_2)*f(-F_0, F_3, F_4)*A(L_0, -F_1)*A(-L_0, -F_3)*A(L_1, -F_2)*A(-L_1, -F_4)' + + +def test_bug_correction_tensor_indices(): + # to make sure that tensor_indices does not return a list if creating + # only one index: + A = TensorIndexType("A") + i = tensor_indices('i', A) + assert not isinstance(i, (tuple, list)) + assert isinstance(i, TensorIndex) + + +def test_riemann_invariants(): + Lorentz = TensorIndexType('Lorentz', dummy_name='L') + d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11 = \ + tensor_indices('d0:12', Lorentz) + # R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1] + # T_c = -R^{d0 d1}_{d0 d1} + R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann()) + t = R(d0, d1, -d1, -d0) + tc = t.canon_bp() + assert str(tc) == '-R(L_0, L_1, -L_0, -L_1)' + + # R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} * + # R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10} + # can = [0,2,4,6, 1,3,8,10, 5,7,12,14, 9,11,16,18, 13,15,20,22, + # 17,19,21>> from sympy.tensor.tensor import TensorIndexType, TensorHead + >>> from sympy.tensor.toperators import PartialDerivative + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> A = TensorHead("A", [L]) + >>> B = TensorHead("B", [L]) + >>> i, j, k = symbols("i j k") + + >>> expr = PartialDerivative(A(i), A(j)) + >>> expr + PartialDerivative(A(i), A(j)) + + The ``PartialDerivative`` object behaves like a tensorial expression: + + >>> expr.get_indices() + [i, -j] + + Notice that the deriving variables have opposite valence than the + printed one: ``A(j)`` is printed as covariant, but the index of the + derivative is actually contravariant, i.e. ``-j``. + + Indices can be contracted: + + >>> expr = PartialDerivative(A(i), A(i)) + >>> expr + PartialDerivative(A(L_0), A(L_0)) + >>> expr.get_indices() + [L_0, -L_0] + + The method ``.get_indices()`` always returns all indices (even the + contracted ones). If only uncontracted indices are needed, call + ``.get_free_indices()``: + + >>> expr.get_free_indices() + [] + + Nested partial derivatives are flattened: + + >>> expr = PartialDerivative(PartialDerivative(A(i), A(j)), A(k)) + >>> expr + PartialDerivative(A(i), A(j), A(k)) + >>> expr.get_indices() + [i, -j, -k] + + Replace a derivative with array values: + + >>> from sympy.abc import x, y + >>> from sympy import sin, log + >>> compA = [sin(x), log(x)*y**3] + >>> compB = [x, y] + >>> expr = PartialDerivative(A(i), B(j)) + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}) + [[cos(x), 0], [y**3/x, 3*y**2*log(x)]] + + The returned array is indexed by `(i, -j)`. + + Be careful that other SymPy modules put the indices of the deriving + variables before the indices of the derivand in the derivative result. + For example: + + >>> expr.get_free_indices() + [i, -j] + + >>> from sympy import Matrix, Array + >>> Matrix(compA).diff(Matrix(compB)).reshape(2, 2) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + >>> Array(compA).diff(Array(compB)) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + + These are the transpose of the result of ``PartialDerivative``, + as the matrix and the array modules put the index `-j` before `i` in the + derivative result. An array read with index order `(-j, i)` is indeed the + transpose of the same array read with index order `(i, -j)`. By specifying + the index order to ``.replace_with_arrays`` one can get a compatible + expression: + + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}, [-j, i]) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + """ + + def __new__(cls, expr, *variables): + + # Flatten: + if isinstance(expr, PartialDerivative): + variables = expr.variables + variables + expr = expr.expr + + args, indices, free, dum = cls._contract_indices_for_derivative( + S(expr), variables) + + obj = TensExpr.__new__(cls, *args) + + obj._indices = indices + obj._free = free + obj._dum = dum + return obj + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + @classmethod + def _contract_indices_for_derivative(cls, expr, variables): + variables_opposite_valence = [] + + for i in variables: + if isinstance(i, Tensor): + i_free_indices = i.get_free_indices() + variables_opposite_valence.append( + i.xreplace({k: -k for k in i_free_indices})) + elif isinstance(i, Symbol): + variables_opposite_valence.append(i) + + args, indices, free, dum = TensMul._tensMul_contract_indices( + [expr] + variables_opposite_valence, replace_indices=True) + + for i in range(1, len(args)): + args_i = args[i] + if isinstance(args_i, Tensor): + i_indices = args[i].get_free_indices() + args[i] = args[i].xreplace({k: -k for k in i_indices}) + + return args, indices, free, dum + + def doit(self, **hints): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + return obj + + def _expand_partial_derivative(self): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + result = obj + + if not args[0].free_symbols: + return S.Zero + elif isinstance(obj.expr, TensAdd): + # take care of sums of multi PDs + result = obj.expr.func(*[ + self.func(a, *obj.variables)._expand_partial_derivative() + for a in result.expr.args]) + elif isinstance(obj.expr, TensMul): + # take care of products of multi PDs + if len(obj.variables) == 1: + # derivative with respect to single variable + terms = [] + mulargs = list(obj.expr.args) + for ind in range(len(mulargs)): + if not isinstance(sympify(mulargs[ind]), Number): + # a number coefficient is not considered for + # expansion of PartialDerivative + d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative() + terms.append(TensMul(*(mulargs[:ind] + + [d] + + mulargs[(ind + 1):]))) + result = TensAdd.fromiter(terms) + else: + # derivative with respect to multiple variables + # decompose: + # partial(expr, (u, v)) + # = partial(partial(expr, u).doit(), v).doit() + result = obj.expr # init with expr + for v in obj.variables: + result = self.func(result, v)._expand_partial_derivative() + # then throw PD on it + + return result + + def _perform_derivative(self): + result = self.expr + for v in self.variables: + if isinstance(result, TensExpr): + result = result._eval_partial_derivative(v) + else: + if v._diff_wrt: + result = result._eval_derivative(v) + else: + result = S.Zero + return result + + def get_indices(self): + return self._indices + + def get_free_indices(self): + free = sorted(self._free, key=lambda x: x[1]) + return [i[0] for i in free] + + def _replace_indices(self, repl): + expr = self.expr.xreplace(repl) + mirrored = {-k: -v for k, v in repl.items()} + variables = [i.xreplace(mirrored) for i in self.variables] + return self.func(expr, *variables) + + @property + def expr(self): + return self.args[0] + + @property + def variables(self): + return self.args[1:] + + def _extract_data(self, replacement_dict): + from .array import derive_by_array, tensorcontraction + indices, array = self.expr._extract_data(replacement_dict) + for variable in self.variables: + var_indices, var_array = variable._extract_data(replacement_dict) + var_indices = [-i for i in var_indices] + coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array]) + dim_before = len(array.shape) + array = derive_by_array(array, var_array) + dim_after = len(array.shape) + dim_increase = dim_after - dim_before + array = permutedims(array, [i + dim_increase for i in range(dim_before)] + list(range(dim_increase))) + array = array.as_mutable() + varindex = var_indices[0] + # Remove coefficients of base vector: + coeff_index = [0] + [slice(None) for i in range(len(indices))] + for i, coeff in enumerate(coeff_array): + coeff_index[0] = i + array[tuple(coeff_index)] /= coeff + if -varindex in indices: + pos = indices.index(-varindex) + array = tensorcontraction(array, (0, pos+1)) + indices.pop(pos) + else: + indices.append(varindex) + return indices, array