diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39a2188c20cc0bae9d333292fcaa6f04d2980f7c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45c6623fc48911d4d88553071368f3cc19db8c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8263ef2542a0341e164f71d54e3f1fabd017ba01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce94dfe675fd5b4f6469ed9db25240b99c49fbb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5aff9cb742d46436c035f2b7fc0b25369560557 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f95874290cace58f5cf77061ce627472ae2dbf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/_print_helpers.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/_print_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..d704ed220d444e2d8510b280dca85c8ae6149d4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/_print_helpers.py @@ -0,0 +1,65 @@ +""" +Base class to provide str and repr hooks that `init_printing` can overwrite. + +This is exposed publicly in the `printing.defaults` module, +but cannot be defined there without causing circular imports. +""" + +class Printable: + """ + The default implementation of printing for SymPy classes. + + This implements a hack that allows us to print elements of built-in + Python containers in a readable way. Natively Python uses ``repr()`` + even if ``str()`` was explicitly requested. Mix in this trait into + a class to get proper default printing. + + This also adds support for LaTeX printing in jupyter notebooks. + """ + + # Since this class is used as a mixin we set empty slots. That means that + # instances of any subclasses that use slots will not need to have a + # __dict__. + __slots__ = () + + # Note, we always use the default ordering (lex) in __str__ and __repr__, + # regardless of the global setting. See issue 5487. + def __str__(self): + from sympy.printing.str import sstr + return sstr(self, order=None) + + __repr__ = __str__ + + def _repr_disabled(self): + """ + No-op repr function used to disable jupyter display hooks. + + When :func:`sympy.init_printing` is used to disable certain display + formats, this function is copied into the appropriate ``_repr_*_`` + attributes. + + While we could just set the attributes to `None``, doing it this way + allows derived classes to call `super()`. + """ + return None + + # We don't implement _repr_png_ here because it would add a large amount of + # data to any notebook containing SymPy expressions, without adding + # anything useful to the notebook. It can still enabled manually, e.g., + # for the qtconsole, with init_printing(). + _repr_png_ = _repr_disabled + + _repr_svg_ = _repr_disabled + + def _repr_latex_(self): + """ + IPython/Jupyter LaTeX printing + + To change the behavior of this (e.g., pass in some settings to LaTeX), + use init_printing(). init_printing() will also enable LaTeX printing + for built in numeric types like ints and container types that contain + SymPy objects, like lists and dictionaries of expressions. + """ + from sympy.printing.latex import latex + s = latex(self, mode='plain') + return "$\\displaystyle %s$" % s diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/add.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/add.py new file mode 100644 index 0000000000000000000000000000000000000000..f63869d4815c05f7b29c3e739ec2d8b965c3e092 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/add.py @@ -0,0 +1,1287 @@ +from typing import Tuple as tTuple +from collections import defaultdict +from functools import cmp_to_key, reduce +from operator import attrgetter +from .basic import Basic +from .parameters import global_parameters +from .logic import _fuzzy_group, fuzzy_or, fuzzy_not +from .singleton import S +from .operations import AssocOp, AssocOpDispatcher +from .cache import cacheit +from .numbers import ilcm, igcd, equal_valued +from .expr import Expr +from .kind import UndefinedKind +from sympy.utilities.iterables import is_sequence, sift + +# Key for sorting commutative args in canonical order +_args_sortkey = cmp_to_key(Basic.compare) + + +def _could_extract_minus_sign(expr): + # assume expr is Add-like + # We choose the one with less arguments with minus signs + negative_args = sum(1 for i in expr.args + if i.could_extract_minus_sign()) + positive_args = len(expr.args) - negative_args + if positive_args > negative_args: + return False + elif positive_args < negative_args: + return True + # choose based on .sort_key() to prefer + # x - 1 instead of 1 - x and + # 3 - sqrt(2) instead of -3 + sqrt(2) + return bool(expr.sort_key() < (-expr).sort_key()) + + +def _addsort(args): + # in-place sorting of args + args.sort(key=_args_sortkey) + + +def _unevaluated_Add(*args): + """Return a well-formed unevaluated Add: Numbers are collected and + put in slot 0 and args are sorted. Use this when args have changed + but you still want to return an unevaluated Add. + + Examples + ======== + + >>> from sympy.core.add import _unevaluated_Add as uAdd + >>> from sympy import S, Add + >>> from sympy.abc import x, y + >>> a = uAdd(*[S(1.0), x, S(2)]) + >>> a.args[0] + 3.00000000000000 + >>> a.args[1] + x + + Beyond the Number being in slot 0, there is no other assurance of + order for the arguments since they are hash sorted. So, for testing + purposes, output produced by this in some other function can only + be tested against the output of this function or as one of several + options: + + >>> opts = (Add(x, y, evaluate=False), Add(y, x, evaluate=False)) + >>> a = uAdd(x, y) + >>> assert a in opts and a == uAdd(x, y) + >>> uAdd(x + 1, x + 2) + x + x + 3 + """ + args = list(args) + newargs = [] + co = S.Zero + while args: + a = args.pop() + if a.is_Add: + # this will keep nesting from building up + # so that x + (x + 1) -> x + x + 1 (3 args) + args.extend(a.args) + elif a.is_Number: + co += a + else: + newargs.append(a) + _addsort(newargs) + if co: + newargs.insert(0, co) + return Add._from_args(newargs) + + +class Add(Expr, AssocOp): + """ + Expression representing addition operation for algebraic group. + + .. deprecated:: 1.7 + + Using arguments that aren't subclasses of :class:`~.Expr` in core + operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is + deprecated. See :ref:`non-expr-args-deprecated` for details. + + Every argument of ``Add()`` must be ``Expr``. Infix operator ``+`` + on most scalar objects in SymPy calls this class. + + Another use of ``Add()`` is to represent the structure of abstract + addition so that its arguments can be substituted to return different + class. Refer to examples section for this. + + ``Add()`` evaluates the argument unless ``evaluate=False`` is passed. + The evaluation logic includes: + + 1. Flattening + ``Add(x, Add(y, z))`` -> ``Add(x, y, z)`` + + 2. Identity removing + ``Add(x, 0, y)`` -> ``Add(x, y)`` + + 3. Coefficient collecting by ``.as_coeff_Mul()`` + ``Add(x, 2*x)`` -> ``Mul(3, x)`` + + 4. Term sorting + ``Add(y, x, 2)`` -> ``Add(2, x, y)`` + + If no argument is passed, identity element 0 is returned. If single + element is passed, that element is returned. + + Note that ``Add(*args)`` is more efficient than ``sum(args)`` because + it flattens the arguments. ``sum(a, b, c, ...)`` recursively adds the + arguments as ``a + (b + (c + ...))``, which has quadratic complexity. + On the other hand, ``Add(a, b, c, d)`` does not assume nested + structure, making the complexity linear. + + Since addition is group operation, every argument should have the + same :obj:`sympy.core.kind.Kind()`. + + Examples + ======== + + >>> from sympy import Add, I + >>> from sympy.abc import x, y + >>> Add(x, 1) + x + 1 + >>> Add(x, x) + 2*x + >>> 2*x**2 + 3*x + I*y + 2*y + 2*x/5 + 1.0*y + 1 + 2*x**2 + 17*x/5 + 3.0*y + I*y + 1 + + If ``evaluate=False`` is passed, result is not evaluated. + + >>> Add(1, 2, evaluate=False) + 1 + 2 + >>> Add(x, x, evaluate=False) + x + x + + ``Add()`` also represents the general structure of addition operation. + + >>> from sympy import MatrixSymbol + >>> A,B = MatrixSymbol('A', 2,2), MatrixSymbol('B', 2,2) + >>> expr = Add(x,y).subs({x:A, y:B}) + >>> expr + A + B + >>> type(expr) + + + Note that the printers do not display in args order. + + >>> Add(x, 1) + x + 1 + >>> Add(x, 1).args + (1, x) + + See Also + ======== + + MatAdd + + """ + + __slots__ = () + + args: tTuple[Expr, ...] + + is_Add = True + + _args_type = Expr + + @classmethod + def flatten(cls, seq): + """ + Takes the sequence "seq" of nested Adds and returns a flatten list. + + Returns: (commutative_part, noncommutative_part, order_symbols) + + Applies associativity, all terms are commutable with respect to + addition. + + NB: the removal of 0 is already handled by AssocOp.__new__ + + See Also + ======== + + sympy.core.mul.Mul.flatten + + """ + from sympy.calculus.accumulationbounds import AccumBounds + from sympy.matrices.expressions import MatrixExpr + from sympy.tensor.tensor import TensExpr + rv = None + if len(seq) == 2: + a, b = seq + if b.is_Rational: + a, b = b, a + if a.is_Rational: + if b.is_Mul: + rv = [a, b], [], None + if rv: + if all(s.is_commutative for s in rv[0]): + return rv + return [], rv[0], None + + terms = {} # term -> coeff + # e.g. x**2 -> 5 for ... + 5*x**2 + ... + + coeff = S.Zero # coefficient (Number or zoo) to always be in slot 0 + # e.g. 3 + ... + order_factors = [] + + extra = [] + + for o in seq: + + # O(x) + if o.is_Order: + if o.expr.is_zero: + continue + for o1 in order_factors: + if o1.contains(o): + o = None + break + if o is None: + continue + order_factors = [o] + [ + o1 for o1 in order_factors if not o.contains(o1)] + continue + + # 3 or NaN + elif o.is_Number: + if (o is S.NaN or coeff is S.ComplexInfinity and + o.is_finite is False) and not extra: + # we know for sure the result will be nan + return [S.NaN], [], None + if coeff.is_Number or isinstance(coeff, AccumBounds): + coeff += o + if coeff is S.NaN and not extra: + # we know for sure the result will be nan + return [S.NaN], [], None + continue + + elif isinstance(o, AccumBounds): + coeff = o.__add__(coeff) + continue + + elif isinstance(o, MatrixExpr): + # can't add 0 to Matrix so make sure coeff is not 0 + extra.append(o) + continue + + elif isinstance(o, TensExpr): + coeff = o.__add__(coeff) if coeff else o + continue + + elif o is S.ComplexInfinity: + if coeff.is_finite is False and not extra: + # we know for sure the result will be nan + return [S.NaN], [], None + coeff = S.ComplexInfinity + continue + + # Add([...]) + elif o.is_Add: + # NB: here we assume Add is always commutative + seq.extend(o.args) # TODO zerocopy? + continue + + # Mul([...]) + elif o.is_Mul: + c, s = o.as_coeff_Mul() + + # check for unevaluated Pow, e.g. 2**3 or 2**(-1/2) + elif o.is_Pow: + b, e = o.as_base_exp() + if b.is_Number and (e.is_Integer or + (e.is_Rational and e.is_negative)): + seq.append(b**e) + continue + c, s = S.One, o + + else: + # everything else + c = S.One + s = o + + # now we have: + # o = c*s, where + # + # c is a Number + # s is an expression with number factor extracted + # let's collect terms with the same s, so e.g. + # 2*x**2 + 3*x**2 -> 5*x**2 + if s in terms: + terms[s] += c + if terms[s] is S.NaN and not extra: + # we know for sure the result will be nan + return [S.NaN], [], None + else: + terms[s] = c + + # now let's construct new args: + # [2*x**2, x**3, 7*x**4, pi, ...] + newseq = [] + noncommutative = False + for s, c in terms.items(): + # 0*s + if c.is_zero: + continue + # 1*s + elif c is S.One: + newseq.append(s) + # c*s + else: + if s.is_Mul: + # Mul, already keeps its arguments in perfect order. + # so we can simply put c in slot0 and go the fast way. + cs = s._new_rawargs(*((c,) + s.args)) + newseq.append(cs) + elif s.is_Add: + # we just re-create the unevaluated Mul + newseq.append(Mul(c, s, evaluate=False)) + else: + # alternatively we have to call all Mul's machinery (slow) + newseq.append(Mul(c, s)) + + noncommutative = noncommutative or not s.is_commutative + + # oo, -oo + if coeff is S.Infinity: + newseq = [f for f in newseq if not (f.is_extended_nonnegative or f.is_real)] + + elif coeff is S.NegativeInfinity: + newseq = [f for f in newseq if not (f.is_extended_nonpositive or f.is_real)] + + if coeff is S.ComplexInfinity: + # zoo might be + # infinite_real + finite_im + # finite_real + infinite_im + # infinite_real + infinite_im + # addition of a finite real or imaginary number won't be able to + # change the zoo nature; adding an infinite qualtity would result + # in a NaN condition if it had sign opposite of the infinite + # portion of zoo, e.g., infinite_real - infinite_real. + newseq = [c for c in newseq if not (c.is_finite and + c.is_extended_real is not None)] + + # process O(x) + if order_factors: + newseq2 = [] + for t in newseq: + for o in order_factors: + # x + O(x) -> O(x) + if o.contains(t): + t = None + break + # x + O(x**2) -> x + O(x**2) + if t is not None: + newseq2.append(t) + newseq = newseq2 + order_factors + # 1 + O(1) -> O(1) + for o in order_factors: + if o.contains(coeff): + coeff = S.Zero + break + + # order args canonically + _addsort(newseq) + + # current code expects coeff to be first + if coeff is not S.Zero: + newseq.insert(0, coeff) + + if extra: + newseq += extra + noncommutative = True + + # we are done + if noncommutative: + return [], newseq, None + else: + return newseq, [], None + + @classmethod + def class_key(cls): + return 3, 1, cls.__name__ + + @property + def kind(self): + k = attrgetter('kind') + kinds = map(k, self.args) + kinds = frozenset(kinds) + if len(kinds) != 1: + # Since addition is group operator, kind must be same. + # We know that this is unexpected signature, so return this. + result = UndefinedKind + else: + result, = kinds + return result + + def could_extract_minus_sign(self): + return _could_extract_minus_sign(self) + + @cacheit + def as_coeff_add(self, *deps): + """ + Returns a tuple (coeff, args) where self is treated as an Add and coeff + is the Number term and args is a tuple of all other terms. + + Examples + ======== + + >>> from sympy.abc import x + >>> (7 + 3*x).as_coeff_add() + (7, (3*x,)) + >>> (7*x).as_coeff_add() + (0, (7*x,)) + """ + if deps: + l1, l2 = sift(self.args, lambda x: x.has_free(*deps), binary=True) + return self._new_rawargs(*l2), tuple(l1) + coeff, notrat = self.args[0].as_coeff_add() + if coeff is not S.Zero: + return coeff, notrat + self.args[1:] + return S.Zero, self.args + + def as_coeff_Add(self, rational=False, deps=None): + """ + Efficiently extract the coefficient of a summation. + """ + coeff, args = self.args[0], self.args[1:] + + if coeff.is_Number and not rational or coeff.is_Rational: + return coeff, self._new_rawargs(*args) + return S.Zero, self + + # Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we + # let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See + # issue 5524. + + def _eval_power(self, e): + from .evalf import pure_complex + from .relational import is_eq + if len(self.args) == 2 and any(_.is_infinite for _ in self.args): + if e.is_zero is False and is_eq(e, S.One) is False: + # looking for literal a + I*b + a, b = self.args + if a.coeff(S.ImaginaryUnit): + a, b = b, a + ico = b.coeff(S.ImaginaryUnit) + if ico and ico.is_extended_real and a.is_extended_real: + if e.is_extended_negative: + return S.Zero + if e.is_extended_positive: + return S.ComplexInfinity + return + if e.is_Rational and self.is_number: + ri = pure_complex(self) + if ri: + r, i = ri + if e.q == 2: + from sympy.functions.elementary.miscellaneous import sqrt + D = sqrt(r**2 + i**2) + if D.is_Rational: + from .exprtools import factor_terms + from sympy.functions.elementary.complexes import sign + from .function import expand_multinomial + # (r, i, D) is a Pythagorean triple + root = sqrt(factor_terms((D - r)/2))**e.p + return root*expand_multinomial(( + # principle value + (D + r)/abs(i) + sign(i)*S.ImaginaryUnit)**e.p) + elif e == -1: + return _unevaluated_Mul( + r - i*S.ImaginaryUnit, + 1/(r**2 + i**2)) + elif e.is_Number and abs(e) != 1: + # handle the Float case: (2.0 + 4*x)**e -> 4**e*(0.5 + x)**e + c, m = zip(*[i.as_coeff_Mul() for i in self.args]) + if any(i.is_Float for i in c): # XXX should this always be done? + big = -1 + for i in c: + if abs(i) >= big: + big = abs(i) + if big > 0 and not equal_valued(big, 1): + from sympy.functions.elementary.complexes import sign + bigs = (big, -big) + c = [sign(i) if i in bigs else i/big for i in c] + addpow = Add(*[c*m for c, m in zip(c, m)])**e + return big**e*addpow + + @cacheit + def _eval_derivative(self, s): + return self.func(*[a.diff(s) for a in self.args]) + + def _eval_nseries(self, x, n, logx, cdir=0): + terms = [t.nseries(x, n=n, logx=logx, cdir=cdir) for t in self.args] + return self.func(*terms) + + def _matches_simple(self, expr, repl_dict): + # handle (w+3).matches('x+5') -> {w: x+2} + coeff, terms = self.as_coeff_add() + if len(terms) == 1: + return terms[0].matches(expr - coeff, repl_dict) + return + + def matches(self, expr, repl_dict=None, old=False): + return self._matches_commutative(expr, repl_dict, old) + + @staticmethod + def _combine_inverse(lhs, rhs): + """ + Returns lhs - rhs, but treats oo like a symbol so oo - oo + returns 0, instead of a nan. + """ + from sympy.simplify.simplify import signsimp + inf = (S.Infinity, S.NegativeInfinity) + if lhs.has(*inf) or rhs.has(*inf): + from .symbol import Dummy + oo = Dummy('oo') + reps = { + S.Infinity: oo, + S.NegativeInfinity: -oo} + ireps = {v: k for k, v in reps.items()} + eq = lhs.xreplace(reps) - rhs.xreplace(reps) + if eq.has(oo): + eq = eq.replace( + lambda x: x.is_Pow and x.base is oo, + lambda x: x.base) + rv = eq.xreplace(ireps) + else: + rv = lhs - rhs + srv = signsimp(rv) + return srv if srv.is_Number else rv + + @cacheit + def as_two_terms(self): + """Return head and tail of self. + + This is the most efficient way to get the head and tail of an + expression. + + - if you want only the head, use self.args[0]; + - if you want to process the arguments of the tail then use + self.as_coef_add() which gives the head and a tuple containing + the arguments of the tail when treated as an Add. + - if you want the coefficient when self is treated as a Mul + then use self.as_coeff_mul()[0] + + >>> from sympy.abc import x, y + >>> (3*x - 2*y + 5).as_two_terms() + (5, 3*x - 2*y) + """ + return self.args[0], self._new_rawargs(*self.args[1:]) + + def as_numer_denom(self): + """ + Decomposes an expression to its numerator part and its + denominator part. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> (x*y/z).as_numer_denom() + (x*y, z) + >>> (x*(y + 1)/y**7).as_numer_denom() + (x*(y + 1), y**7) + + See Also + ======== + + sympy.core.expr.Expr.as_numer_denom + """ + # clear rational denominator + content, expr = self.primitive() + if not isinstance(expr, Add): + return Mul(content, expr, evaluate=False).as_numer_denom() + ncon, dcon = content.as_numer_denom() + + # collect numerators and denominators of the terms + nd = defaultdict(list) + for f in expr.args: + ni, di = f.as_numer_denom() + nd[di].append(ni) + + # check for quick exit + if len(nd) == 1: + d, n = nd.popitem() + return self.func( + *[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d) + + # sum up the terms having a common denominator + for d, n in nd.items(): + if len(n) == 1: + nd[d] = n[0] + else: + nd[d] = self.func(*n) + + # assemble single numerator and denominator + denoms, numers = [list(i) for i in zip(*iter(nd.items()))] + n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:])) + for i in range(len(numers))]), Mul(*denoms) + + return _keep_coeff(ncon, n), _keep_coeff(dcon, d) + + def _eval_is_polynomial(self, syms): + return all(term._eval_is_polynomial(syms) for term in self.args) + + def _eval_is_rational_function(self, syms): + return all(term._eval_is_rational_function(syms) for term in self.args) + + def _eval_is_meromorphic(self, x, a): + return _fuzzy_group((arg.is_meromorphic(x, a) for arg in self.args), + quick_exit=True) + + def _eval_is_algebraic_expr(self, syms): + return all(term._eval_is_algebraic_expr(syms) for term in self.args) + + # assumption methods + _eval_is_real = lambda self: _fuzzy_group( + (a.is_real for a in self.args), quick_exit=True) + _eval_is_extended_real = lambda self: _fuzzy_group( + (a.is_extended_real for a in self.args), quick_exit=True) + _eval_is_complex = lambda self: _fuzzy_group( + (a.is_complex for a in self.args), quick_exit=True) + _eval_is_antihermitian = lambda self: _fuzzy_group( + (a.is_antihermitian for a in self.args), quick_exit=True) + _eval_is_finite = lambda self: _fuzzy_group( + (a.is_finite for a in self.args), quick_exit=True) + _eval_is_hermitian = lambda self: _fuzzy_group( + (a.is_hermitian for a in self.args), quick_exit=True) + _eval_is_integer = lambda self: _fuzzy_group( + (a.is_integer for a in self.args), quick_exit=True) + _eval_is_rational = lambda self: _fuzzy_group( + (a.is_rational for a in self.args), quick_exit=True) + _eval_is_algebraic = lambda self: _fuzzy_group( + (a.is_algebraic for a in self.args), quick_exit=True) + _eval_is_commutative = lambda self: _fuzzy_group( + a.is_commutative for a in self.args) + + def _eval_is_infinite(self): + sawinf = False + for a in self.args: + ainf = a.is_infinite + if ainf is None: + return None + elif ainf is True: + # infinite+infinite might not be infinite + if sawinf is True: + return None + sawinf = True + return sawinf + + def _eval_is_imaginary(self): + nz = [] + im_I = [] + for a in self.args: + if a.is_extended_real: + if a.is_zero: + pass + elif a.is_zero is False: + nz.append(a) + else: + return + elif a.is_imaginary: + im_I.append(a*S.ImaginaryUnit) + elif a.is_Mul and S.ImaginaryUnit in a.args: + coeff, ai = a.as_coeff_mul(S.ImaginaryUnit) + if ai == (S.ImaginaryUnit,) and coeff.is_extended_real: + im_I.append(-coeff) + else: + return + else: + return + b = self.func(*nz) + if b != self: + if b.is_zero: + return fuzzy_not(self.func(*im_I).is_zero) + elif b.is_zero is False: + return False + + def _eval_is_zero(self): + if self.is_commutative is False: + # issue 10528: there is no way to know if a nc symbol + # is zero or not + return + nz = [] + z = 0 + im_or_z = False + im = 0 + for a in self.args: + if a.is_extended_real: + if a.is_zero: + z += 1 + elif a.is_zero is False: + nz.append(a) + else: + return + elif a.is_imaginary: + im += 1 + elif a.is_Mul and S.ImaginaryUnit in a.args: + coeff, ai = a.as_coeff_mul(S.ImaginaryUnit) + if ai == (S.ImaginaryUnit,) and coeff.is_extended_real: + im_or_z = True + else: + return + else: + return + if z == len(self.args): + return True + if len(nz) in [0, len(self.args)]: + return None + b = self.func(*nz) + if b.is_zero: + if not im_or_z: + if im == 0: + return True + elif im == 1: + return False + if b.is_zero is False: + return False + + def _eval_is_odd(self): + l = [f for f in self.args if not (f.is_even is True)] + if not l: + return False + if l[0].is_odd: + return self._new_rawargs(*l[1:]).is_even + + def _eval_is_irrational(self): + for t in self.args: + a = t.is_irrational + if a: + others = list(self.args) + others.remove(t) + if all(x.is_rational is True for x in others): + return True + return None + if a is None: + return + return False + + def _all_nonneg_or_nonppos(self): + nn = np = 0 + for a in self.args: + if a.is_nonnegative: + if np: + return False + nn = 1 + elif a.is_nonpositive: + if nn: + return False + np = 1 + else: + break + else: + return True + + def _eval_is_extended_positive(self): + if self.is_number: + return super()._eval_is_extended_positive() + c, a = self.as_coeff_Add() + if not c.is_zero: + from .exprtools import _monotonic_sign + v = _monotonic_sign(a) + if v is not None: + s = v + c + if s != self and s.is_extended_positive and a.is_extended_nonnegative: + return True + if len(self.free_symbols) == 1: + v = _monotonic_sign(self) + if v is not None and v != self and v.is_extended_positive: + return True + pos = nonneg = nonpos = unknown_sign = False + saw_INF = set() + args = [a for a in self.args if not a.is_zero] + if not args: + return False + for a in args: + ispos = a.is_extended_positive + infinite = a.is_infinite + if infinite: + saw_INF.add(fuzzy_or((ispos, a.is_extended_nonnegative))) + if True in saw_INF and False in saw_INF: + return + if ispos: + pos = True + continue + elif a.is_extended_nonnegative: + nonneg = True + continue + elif a.is_extended_nonpositive: + nonpos = True + continue + + if infinite is None: + return + unknown_sign = True + + if saw_INF: + if len(saw_INF) > 1: + return + return saw_INF.pop() + elif unknown_sign: + return + elif not nonpos and not nonneg and pos: + return True + elif not nonpos and pos: + return True + elif not pos and not nonneg: + return False + + def _eval_is_extended_nonnegative(self): + if not self.is_number: + c, a = self.as_coeff_Add() + if not c.is_zero and a.is_extended_nonnegative: + from .exprtools import _monotonic_sign + v = _monotonic_sign(a) + if v is not None: + s = v + c + if s != self and s.is_extended_nonnegative: + return True + if len(self.free_symbols) == 1: + v = _monotonic_sign(self) + if v is not None and v != self and v.is_extended_nonnegative: + return True + + def _eval_is_extended_nonpositive(self): + if not self.is_number: + c, a = self.as_coeff_Add() + if not c.is_zero and a.is_extended_nonpositive: + from .exprtools import _monotonic_sign + v = _monotonic_sign(a) + if v is not None: + s = v + c + if s != self and s.is_extended_nonpositive: + return True + if len(self.free_symbols) == 1: + v = _monotonic_sign(self) + if v is not None and v != self and v.is_extended_nonpositive: + return True + + def _eval_is_extended_negative(self): + if self.is_number: + return super()._eval_is_extended_negative() + c, a = self.as_coeff_Add() + if not c.is_zero: + from .exprtools import _monotonic_sign + v = _monotonic_sign(a) + if v is not None: + s = v + c + if s != self and s.is_extended_negative and a.is_extended_nonpositive: + return True + if len(self.free_symbols) == 1: + v = _monotonic_sign(self) + if v is not None and v != self and v.is_extended_negative: + return True + neg = nonpos = nonneg = unknown_sign = False + saw_INF = set() + args = [a for a in self.args if not a.is_zero] + if not args: + return False + for a in args: + isneg = a.is_extended_negative + infinite = a.is_infinite + if infinite: + saw_INF.add(fuzzy_or((isneg, a.is_extended_nonpositive))) + if True in saw_INF and False in saw_INF: + return + if isneg: + neg = True + continue + elif a.is_extended_nonpositive: + nonpos = True + continue + elif a.is_extended_nonnegative: + nonneg = True + continue + + if infinite is None: + return + unknown_sign = True + + if saw_INF: + if len(saw_INF) > 1: + return + return saw_INF.pop() + elif unknown_sign: + return + elif not nonneg and not nonpos and neg: + return True + elif not nonneg and neg: + return True + elif not neg and not nonpos: + return False + + def _eval_subs(self, old, new): + if not old.is_Add: + if old is S.Infinity and -old in self.args: + # foo - oo is foo + (-oo) internally + return self.xreplace({-old: -new}) + return None + + coeff_self, terms_self = self.as_coeff_Add() + coeff_old, terms_old = old.as_coeff_Add() + + if coeff_self.is_Rational and coeff_old.is_Rational: + if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y + return self.func(new, coeff_self, -coeff_old) + if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y + return self.func(-new, coeff_self, coeff_old) + + if coeff_self.is_Rational and coeff_old.is_Rational \ + or coeff_self == coeff_old: + args_old, args_self = self.func.make_args( + terms_old), self.func.make_args(terms_self) + if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x + self_set = set(args_self) + old_set = set(args_old) + + if old_set < self_set: + ret_set = self_set - old_set + return self.func(new, coeff_self, -coeff_old, + *[s._subs(old, new) for s in ret_set]) + + args_old = self.func.make_args( + -terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d + old_set = set(args_old) + if old_set < self_set: + ret_set = self_set - old_set + return self.func(-new, coeff_self, coeff_old, + *[s._subs(old, new) for s in ret_set]) + + def removeO(self): + args = [a for a in self.args if not a.is_Order] + return self._new_rawargs(*args) + + def getO(self): + args = [a for a in self.args if a.is_Order] + if args: + return self._new_rawargs(*args) + + @cacheit + def extract_leading_order(self, symbols, point=None): + """ + Returns the leading term and its order. + + Examples + ======== + + >>> from sympy.abc import x + >>> (x + 1 + 1/x**5).extract_leading_order(x) + ((x**(-5), O(x**(-5))),) + >>> (1 + x).extract_leading_order(x) + ((1, O(1)),) + >>> (x + x**2).extract_leading_order(x) + ((x, O(x)),) + + """ + from sympy.series.order import Order + lst = [] + symbols = list(symbols if is_sequence(symbols) else [symbols]) + if not point: + point = [0]*len(symbols) + seq = [(f, Order(f, *zip(symbols, point))) for f in self.args] + for ef, of in seq: + for e, o in lst: + if o.contains(of) and o != of: + of = None + break + if of is None: + continue + new_lst = [(ef, of)] + for e, o in lst: + if of.contains(o) and o != of: + continue + new_lst.append((e, o)) + lst = new_lst + return tuple(lst) + + def as_real_imag(self, deep=True, **hints): + """ + Return a tuple representing a complex number. + + Examples + ======== + + >>> from sympy import I + >>> (7 + 9*I).as_real_imag() + (7, 9) + >>> ((1 + I)/(1 - I)).as_real_imag() + (0, 1) + >>> ((1 + 2*I)*(1 + 3*I)).as_real_imag() + (-5, 5) + """ + sargs = self.args + re_part, im_part = [], [] + for term in sargs: + re, im = term.as_real_imag(deep=deep) + re_part.append(re) + im_part.append(im) + return (self.func(*re_part), self.func(*im_part)) + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + from sympy.core.symbol import Dummy, Symbol + from sympy.series.order import Order + from sympy.functions.elementary.exponential import log + from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold + from .function import expand_mul + + o = self.getO() + if o is None: + o = Order(0) + old = self.removeO() + + if old.has(Piecewise): + old = piecewise_fold(old) + + # This expansion is the last part of expand_log. expand_log also calls + # expand_mul with factor=True, which would be more expensive + if any(isinstance(a, log) for a in self.args): + logflags = {"deep": True, "log": True, "mul": False, "power_exp": False, + "power_base": False, "multinomial": False, "basic": False, "force": False, + "factor": False} + old = old.expand(**logflags) + expr = expand_mul(old) + + if not expr.is_Add: + return expr.as_leading_term(x, logx=logx, cdir=cdir) + + infinite = [t for t in expr.args if t.is_infinite] + + _logx = Dummy('logx') if logx is None else logx + leading_terms = [t.as_leading_term(x, logx=_logx, cdir=cdir) for t in expr.args] + + min, new_expr = Order(0), 0 + + try: + for term in leading_terms: + order = Order(term, x) + if not min or order not in min: + min = order + new_expr = term + elif min in order: + new_expr += term + + except TypeError: + return expr + + if logx is None: + new_expr = new_expr.subs(_logx, log(x)) + + is_zero = new_expr.is_zero + if is_zero is None: + new_expr = new_expr.trigsimp().cancel() + is_zero = new_expr.is_zero + if is_zero is True: + # simple leading term analysis gave us cancelled terms but we have to send + # back a term, so compute the leading term (via series) + try: + n0 = min.getn() + except NotImplementedError: + n0 = S.One + if n0.has(Symbol): + n0 = S.One + res = Order(1) + incr = S.One + while res.is_Order: + res = old._eval_nseries(x, n=n0+incr, logx=logx, cdir=cdir).cancel().powsimp().trigsimp() + incr *= 2 + return res.as_leading_term(x, logx=logx, cdir=cdir) + + elif new_expr is S.NaN: + return old.func._from_args(infinite) + o + + else: + return new_expr + + def _eval_adjoint(self): + return self.func(*[t.adjoint() for t in self.args]) + + def _eval_conjugate(self): + return self.func(*[t.conjugate() for t in self.args]) + + def _eval_transpose(self): + return self.func(*[t.transpose() for t in self.args]) + + def primitive(self): + """ + Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```. + + ``R`` is collected only from the leading coefficient of each term. + + Examples + ======== + + >>> from sympy.abc import x, y + + >>> (2*x + 4*y).primitive() + (2, x + 2*y) + + >>> (2*x/3 + 4*y/9).primitive() + (2/9, 3*x + 2*y) + + >>> (2*x/3 + 4.2*y).primitive() + (1/3, 2*x + 12.6*y) + + No subprocessing of term factors is performed: + + >>> ((2 + 2*x)*x + 2).primitive() + (1, x*(2*x + 2) + 2) + + Recursive processing can be done with the ``as_content_primitive()`` + method: + + >>> ((2 + 2*x)*x + 2).as_content_primitive() + (2, x*(x + 1) + 1) + + See also: primitive() function in polytools.py + + """ + + terms = [] + inf = False + for a in self.args: + c, m = a.as_coeff_Mul() + if not c.is_Rational: + c = S.One + m = a + inf = inf or m is S.ComplexInfinity + terms.append((c.p, c.q, m)) + + if not inf: + ngcd = reduce(igcd, [t[0] for t in terms], 0) + dlcm = reduce(ilcm, [t[1] for t in terms], 1) + else: + ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0) + dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1) + + if ngcd == dlcm == 1: + return S.One, self + if not inf: + for i, (p, q, term) in enumerate(terms): + terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term) + else: + for i, (p, q, term) in enumerate(terms): + if q: + terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term) + else: + terms[i] = _keep_coeff(Rational(p, q), term) + + # we don't need a complete re-flattening since no new terms will join + # so we just use the same sort as is used in Add.flatten. When the + # coefficient changes, the ordering of terms may change, e.g. + # (3*x, 6*y) -> (2*y, x) + # + # We do need to make sure that term[0] stays in position 0, however. + # + if terms[0].is_Number or terms[0] is S.ComplexInfinity: + c = terms.pop(0) + else: + c = None + _addsort(terms) + if c: + terms.insert(0, c) + return Rational(ngcd, dlcm), self._new_rawargs(*terms) + + def as_content_primitive(self, radical=False, clear=True): + """Return the tuple (R, self/R) where R is the positive Rational + extracted from self. If radical is True (default is False) then + common radicals will be removed and included as a factor of the + primitive expression. + + Examples + ======== + + >>> from sympy import sqrt + >>> (3 + 3*sqrt(2)).as_content_primitive() + (3, 1 + sqrt(2)) + + Radical content can also be factored out of the primitive: + + >>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True) + (2, sqrt(2)*(1 + 2*sqrt(5))) + + See docstring of Expr.as_content_primitive for more examples. + """ + con, prim = self.func(*[_keep_coeff(*a.as_content_primitive( + radical=radical, clear=clear)) for a in self.args]).primitive() + if not clear and not con.is_Integer and prim.is_Add: + con, d = con.as_numer_denom() + _p = prim/d + if any(a.as_coeff_Mul()[0].is_Integer for a in _p.args): + prim = _p + else: + con /= d + if radical and prim.is_Add: + # look for common radicals that can be removed + args = prim.args + rads = [] + common_q = None + for m in args: + term_rads = defaultdict(list) + for ai in Mul.make_args(m): + if ai.is_Pow: + b, e = ai.as_base_exp() + if e.is_Rational and b.is_Integer: + term_rads[e.q].append(abs(int(b))**e.p) + if not term_rads: + break + if common_q is None: + common_q = set(term_rads.keys()) + else: + common_q = common_q & set(term_rads.keys()) + if not common_q: + break + rads.append(term_rads) + else: + # process rads + # keep only those in common_q + for r in rads: + for q in list(r.keys()): + if q not in common_q: + r.pop(q) + for q in r: + r[q] = Mul(*r[q]) + # find the gcd of bases for each q + G = [] + for q in common_q: + g = reduce(igcd, [r[q] for r in rads], 0) + if g != 1: + G.append(g**Rational(1, q)) + if G: + G = Mul(*G) + args = [ai/G for ai in args] + prim = G*prim.func(*args) + + return con, prim + + @property + def _sorted_args(self): + from .sorting import default_sort_key + return tuple(sorted(self.args, key=default_sort_key)) + + def _eval_difference_delta(self, n, step): + from sympy.series.limitseq import difference_delta as dd + return self.func(*[dd(a, n, step) for a in self.args]) + + @property + def _mpc_(self): + """ + Convert self to an mpmath mpc if possible + """ + from .numbers import Float + re_part, rest = self.as_coeff_Add() + im_part, imag_unit = rest.as_coeff_Mul() + if not imag_unit == S.ImaginaryUnit: + # ValueError may seem more reasonable but since it's a @property, + # we need to use AttributeError to keep from confusing things like + # hasattr. + raise AttributeError("Cannot convert Add to mpc. Must be of the form Number + Number*I") + + return (Float(re_part)._mpf_, Float(im_part)._mpf_) + + def __neg__(self): + if not global_parameters.distribute: + return super().__neg__() + return Mul(S.NegativeOne, self) + +add = AssocOpDispatcher('add') + +from .mul import Mul, _keep_coeff, _unevaluated_Mul +from .numbers import Rational diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/alphabets.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/alphabets.py new file mode 100644 index 0000000000000000000000000000000000000000..1ea2ae1c410ccd30e7ec9551f4cd8b19a36cdba1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/alphabets.py @@ -0,0 +1,4 @@ +greeks = ('alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', + 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', + 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', + 'phi', 'chi', 'psi', 'omega') diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/assumptions_generated.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/assumptions_generated.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b2597a72b500155370db385b58e61f0f951984 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/assumptions_generated.py @@ -0,0 +1,1615 @@ +""" +Do NOT manually edit this file. +Instead, run ./bin/ask_update.py. +""" + +defined_facts = [ + 'algebraic', + 'antihermitian', + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', +] # defined_facts + + +full_implications = dict( [ + # Implications of algebraic = True: + (('algebraic', True), set( ( + ('commutative', True), + ('complex', True), + ('finite', True), + ('infinite', False), + ('transcendental', False), + ) ), + ), + # Implications of algebraic = False: + (('algebraic', False), set( ( + ('composite', False), + ('even', False), + ('integer', False), + ('odd', False), + ('prime', False), + ('rational', False), + ('zero', False), + ) ), + ), + # Implications of antihermitian = True: + (('antihermitian', True), set( ( + ) ), + ), + # Implications of antihermitian = False: + (('antihermitian', False), set( ( + ('imaginary', False), + ) ), + ), + # Implications of commutative = True: + (('commutative', True), set( ( + ) ), + ), + # Implications of commutative = False: + (('commutative', False), set( ( + ('algebraic', False), + ('complex', False), + ('composite', False), + ('even', False), + ('extended_negative', False), + ('extended_nonnegative', False), + ('extended_nonpositive', False), + ('extended_nonzero', False), + ('extended_positive', False), + ('extended_real', False), + ('imaginary', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of complex = True: + (('complex', True), set( ( + ('commutative', True), + ('finite', True), + ('infinite', False), + ) ), + ), + # Implications of complex = False: + (('complex', False), set( ( + ('algebraic', False), + ('composite', False), + ('even', False), + ('imaginary', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of composite = True: + (('composite', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_nonpositive', False), + ('extended_nonzero', True), + ('extended_positive', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', True), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', True), + ('nonpositive', False), + ('nonzero', True), + ('positive', True), + ('prime', False), + ('rational', True), + ('real', True), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of composite = False: + (('composite', False), set( ( + ) ), + ), + # Implications of even = True: + (('even', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', True), + ('irrational', False), + ('noninteger', False), + ('odd', False), + ('rational', True), + ('real', True), + ('transcendental', False), + ) ), + ), + # Implications of even = False: + (('even', False), set( ( + ('zero', False), + ) ), + ), + # Implications of extended_negative = True: + (('extended_negative', True), set( ( + ('commutative', True), + ('composite', False), + ('extended_nonnegative', False), + ('extended_nonpositive', True), + ('extended_nonzero', True), + ('extended_positive', False), + ('extended_real', True), + ('imaginary', False), + ('nonnegative', False), + ('positive', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of extended_negative = False: + (('extended_negative', False), set( ( + ('negative', False), + ) ), + ), + # Implications of extended_nonnegative = True: + (('extended_nonnegative', True), set( ( + ('commutative', True), + ('extended_negative', False), + ('extended_real', True), + ('imaginary', False), + ('negative', False), + ) ), + ), + # Implications of extended_nonnegative = False: + (('extended_nonnegative', False), set( ( + ('composite', False), + ('extended_positive', False), + ('nonnegative', False), + ('positive', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of extended_nonpositive = True: + (('extended_nonpositive', True), set( ( + ('commutative', True), + ('composite', False), + ('extended_positive', False), + ('extended_real', True), + ('imaginary', False), + ('positive', False), + ('prime', False), + ) ), + ), + # Implications of extended_nonpositive = False: + (('extended_nonpositive', False), set( ( + ('extended_negative', False), + ('negative', False), + ('nonpositive', False), + ('zero', False), + ) ), + ), + # Implications of extended_nonzero = True: + (('extended_nonzero', True), set( ( + ('commutative', True), + ('extended_real', True), + ('imaginary', False), + ('zero', False), + ) ), + ), + # Implications of extended_nonzero = False: + (('extended_nonzero', False), set( ( + ('composite', False), + ('extended_negative', False), + ('extended_positive', False), + ('negative', False), + ('nonzero', False), + ('positive', False), + ('prime', False), + ) ), + ), + # Implications of extended_positive = True: + (('extended_positive', True), set( ( + ('commutative', True), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_nonpositive', False), + ('extended_nonzero', True), + ('extended_real', True), + ('imaginary', False), + ('negative', False), + ('nonpositive', False), + ('zero', False), + ) ), + ), + # Implications of extended_positive = False: + (('extended_positive', False), set( ( + ('composite', False), + ('positive', False), + ('prime', False), + ) ), + ), + # Implications of extended_real = True: + (('extended_real', True), set( ( + ('commutative', True), + ('imaginary', False), + ) ), + ), + # Implications of extended_real = False: + (('extended_real', False), set( ( + ('composite', False), + ('even', False), + ('extended_negative', False), + ('extended_nonnegative', False), + ('extended_nonpositive', False), + ('extended_nonzero', False), + ('extended_positive', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('zero', False), + ) ), + ), + # Implications of finite = True: + (('finite', True), set( ( + ('infinite', False), + ) ), + ), + # Implications of finite = False: + (('finite', False), set( ( + ('algebraic', False), + ('complex', False), + ('composite', False), + ('even', False), + ('imaginary', False), + ('infinite', True), + ('integer', False), + ('irrational', False), + ('negative', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of hermitian = True: + (('hermitian', True), set( ( + ) ), + ), + # Implications of hermitian = False: + (('hermitian', False), set( ( + ('composite', False), + ('even', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('zero', False), + ) ), + ), + # Implications of imaginary = True: + (('imaginary', True), set( ( + ('antihermitian', True), + ('commutative', True), + ('complex', True), + ('composite', False), + ('even', False), + ('extended_negative', False), + ('extended_nonnegative', False), + ('extended_nonpositive', False), + ('extended_nonzero', False), + ('extended_positive', False), + ('extended_real', False), + ('finite', True), + ('infinite', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('zero', False), + ) ), + ), + # Implications of imaginary = False: + (('imaginary', False), set( ( + ) ), + ), + # Implications of infinite = True: + (('infinite', True), set( ( + ('algebraic', False), + ('complex', False), + ('composite', False), + ('even', False), + ('finite', False), + ('imaginary', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('real', False), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of infinite = False: + (('infinite', False), set( ( + ('finite', True), + ) ), + ), + # Implications of integer = True: + (('integer', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('irrational', False), + ('noninteger', False), + ('rational', True), + ('real', True), + ('transcendental', False), + ) ), + ), + # Implications of integer = False: + (('integer', False), set( ( + ('composite', False), + ('even', False), + ('odd', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of irrational = True: + (('irrational', True), set( ( + ('commutative', True), + ('complex', True), + ('composite', False), + ('even', False), + ('extended_nonzero', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', False), + ('noninteger', True), + ('nonzero', True), + ('odd', False), + ('prime', False), + ('rational', False), + ('real', True), + ('zero', False), + ) ), + ), + # Implications of irrational = False: + (('irrational', False), set( ( + ) ), + ), + # Implications of negative = True: + (('negative', True), set( ( + ('commutative', True), + ('complex', True), + ('composite', False), + ('extended_negative', True), + ('extended_nonnegative', False), + ('extended_nonpositive', True), + ('extended_nonzero', True), + ('extended_positive', False), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('nonnegative', False), + ('nonpositive', True), + ('nonzero', True), + ('positive', False), + ('prime', False), + ('real', True), + ('zero', False), + ) ), + ), + # Implications of negative = False: + (('negative', False), set( ( + ) ), + ), + # Implications of noninteger = True: + (('noninteger', True), set( ( + ('commutative', True), + ('composite', False), + ('even', False), + ('extended_nonzero', True), + ('extended_real', True), + ('imaginary', False), + ('integer', False), + ('odd', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of noninteger = False: + (('noninteger', False), set( ( + ) ), + ), + # Implications of nonnegative = True: + (('nonnegative', True), set( ( + ('commutative', True), + ('complex', True), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('negative', False), + ('real', True), + ) ), + ), + # Implications of nonnegative = False: + (('nonnegative', False), set( ( + ('composite', False), + ('positive', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of nonpositive = True: + (('nonpositive', True), set( ( + ('commutative', True), + ('complex', True), + ('composite', False), + ('extended_nonpositive', True), + ('extended_positive', False), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('positive', False), + ('prime', False), + ('real', True), + ) ), + ), + # Implications of nonpositive = False: + (('nonpositive', False), set( ( + ('negative', False), + ('zero', False), + ) ), + ), + # Implications of nonzero = True: + (('nonzero', True), set( ( + ('commutative', True), + ('complex', True), + ('extended_nonzero', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('real', True), + ('zero', False), + ) ), + ), + # Implications of nonzero = False: + (('nonzero', False), set( ( + ('composite', False), + ('negative', False), + ('positive', False), + ('prime', False), + ) ), + ), + # Implications of odd = True: + (('odd', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('even', False), + ('extended_nonzero', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', True), + ('irrational', False), + ('noninteger', False), + ('nonzero', True), + ('rational', True), + ('real', True), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of odd = False: + (('odd', False), set( ( + ) ), + ), + # Implications of positive = True: + (('positive', True), set( ( + ('commutative', True), + ('complex', True), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_nonpositive', False), + ('extended_nonzero', True), + ('extended_positive', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('negative', False), + ('nonnegative', True), + ('nonpositive', False), + ('nonzero', True), + ('real', True), + ('zero', False), + ) ), + ), + # Implications of positive = False: + (('positive', False), set( ( + ('composite', False), + ('prime', False), + ) ), + ), + # Implications of prime = True: + (('prime', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('composite', False), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_nonpositive', False), + ('extended_nonzero', True), + ('extended_positive', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', True), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', True), + ('nonpositive', False), + ('nonzero', True), + ('positive', True), + ('rational', True), + ('real', True), + ('transcendental', False), + ('zero', False), + ) ), + ), + # Implications of prime = False: + (('prime', False), set( ( + ) ), + ), + # Implications of rational = True: + (('rational', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('irrational', False), + ('real', True), + ('transcendental', False), + ) ), + ), + # Implications of rational = False: + (('rational', False), set( ( + ('composite', False), + ('even', False), + ('integer', False), + ('odd', False), + ('prime', False), + ('zero', False), + ) ), + ), + # Implications of real = True: + (('real', True), set( ( + ('commutative', True), + ('complex', True), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ) ), + ), + # Implications of real = False: + (('real', False), set( ( + ('composite', False), + ('even', False), + ('integer', False), + ('irrational', False), + ('negative', False), + ('nonnegative', False), + ('nonpositive', False), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', False), + ('zero', False), + ) ), + ), + # Implications of transcendental = True: + (('transcendental', True), set( ( + ('algebraic', False), + ('commutative', True), + ('complex', True), + ('composite', False), + ('even', False), + ('finite', True), + ('infinite', False), + ('integer', False), + ('odd', False), + ('prime', False), + ('rational', False), + ('zero', False), + ) ), + ), + # Implications of transcendental = False: + (('transcendental', False), set( ( + ) ), + ), + # Implications of zero = True: + (('zero', True), set( ( + ('algebraic', True), + ('commutative', True), + ('complex', True), + ('composite', False), + ('even', True), + ('extended_negative', False), + ('extended_nonnegative', True), + ('extended_nonpositive', True), + ('extended_nonzero', False), + ('extended_positive', False), + ('extended_real', True), + ('finite', True), + ('hermitian', True), + ('imaginary', False), + ('infinite', False), + ('integer', True), + ('irrational', False), + ('negative', False), + ('noninteger', False), + ('nonnegative', True), + ('nonpositive', True), + ('nonzero', False), + ('odd', False), + ('positive', False), + ('prime', False), + ('rational', True), + ('real', True), + ('transcendental', False), + ) ), + ), + # Implications of zero = False: + (('zero', False), set( ( + ) ), + ), + ] ) # full_implications + + +prereq = { + + # facts that could determine the value of algebraic + 'algebraic': { + 'commutative', + 'complex', + 'composite', + 'even', + 'finite', + 'infinite', + 'integer', + 'odd', + 'prime', + 'rational', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of antihermitian + 'antihermitian': { + 'imaginary', + }, + + # facts that could determine the value of commutative + 'commutative': { + 'algebraic', + 'complex', + 'composite', + 'even', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'imaginary', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of complex + 'complex': { + 'algebraic', + 'commutative', + 'composite', + 'even', + 'finite', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of composite + 'composite': { + 'algebraic', + 'commutative', + 'complex', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of even + 'even': { + 'algebraic', + 'commutative', + 'complex', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'noninteger', + 'odd', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of extended_negative + 'extended_negative': { + 'commutative', + 'composite', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'imaginary', + 'negative', + 'nonnegative', + 'positive', + 'prime', + 'zero', + }, + + # facts that could determine the value of extended_nonnegative + 'extended_nonnegative': { + 'commutative', + 'composite', + 'extended_negative', + 'extended_positive', + 'extended_real', + 'imaginary', + 'negative', + 'nonnegative', + 'positive', + 'prime', + 'zero', + }, + + # facts that could determine the value of extended_nonpositive + 'extended_nonpositive': { + 'commutative', + 'composite', + 'extended_negative', + 'extended_positive', + 'extended_real', + 'imaginary', + 'negative', + 'nonpositive', + 'positive', + 'prime', + 'zero', + }, + + # facts that could determine the value of extended_nonzero + 'extended_nonzero': { + 'commutative', + 'composite', + 'extended_negative', + 'extended_positive', + 'extended_real', + 'imaginary', + 'irrational', + 'negative', + 'noninteger', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'zero', + }, + + # facts that could determine the value of extended_positive + 'extended_positive': { + 'commutative', + 'composite', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_real', + 'imaginary', + 'negative', + 'nonpositive', + 'positive', + 'prime', + 'zero', + }, + + # facts that could determine the value of extended_real + 'extended_real': { + 'commutative', + 'composite', + 'even', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'imaginary', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'zero', + }, + + # facts that could determine the value of finite + 'finite': { + 'algebraic', + 'complex', + 'composite', + 'even', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of hermitian + 'hermitian': { + 'composite', + 'even', + 'integer', + 'irrational', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'zero', + }, + + # facts that could determine the value of imaginary + 'imaginary': { + 'antihermitian', + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'zero', + }, + + # facts that could determine the value of infinite + 'infinite': { + 'algebraic', + 'complex', + 'composite', + 'even', + 'finite', + 'imaginary', + 'integer', + 'irrational', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of integer + 'integer': { + 'algebraic', + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'irrational', + 'noninteger', + 'odd', + 'prime', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of irrational + 'irrational': { + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'odd', + 'prime', + 'rational', + 'real', + 'zero', + }, + + # facts that could determine the value of negative + 'negative': { + 'commutative', + 'complex', + 'composite', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'positive', + 'prime', + 'real', + 'zero', + }, + + # facts that could determine the value of noninteger + 'noninteger': { + 'commutative', + 'composite', + 'even', + 'extended_real', + 'imaginary', + 'integer', + 'irrational', + 'odd', + 'prime', + 'zero', + }, + + # facts that could determine the value of nonnegative + 'nonnegative': { + 'commutative', + 'complex', + 'composite', + 'extended_negative', + 'extended_nonnegative', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'negative', + 'positive', + 'prime', + 'real', + 'zero', + }, + + # facts that could determine the value of nonpositive + 'nonpositive': { + 'commutative', + 'complex', + 'composite', + 'extended_nonpositive', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'negative', + 'positive', + 'prime', + 'real', + 'zero', + }, + + # facts that could determine the value of nonzero + 'nonzero': { + 'commutative', + 'complex', + 'composite', + 'extended_nonzero', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'irrational', + 'negative', + 'odd', + 'positive', + 'prime', + 'real', + 'zero', + }, + + # facts that could determine the value of odd + 'odd': { + 'algebraic', + 'commutative', + 'complex', + 'even', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'noninteger', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of positive + 'positive': { + 'commutative', + 'complex', + 'composite', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'prime', + 'real', + 'zero', + }, + + # facts that could determine the value of prime + 'prime': { + 'algebraic', + 'commutative', + 'complex', + 'composite', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'positive', + 'rational', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of rational + 'rational': { + 'algebraic', + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'odd', + 'prime', + 'real', + 'transcendental', + 'zero', + }, + + # facts that could determine the value of real + 'real': { + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'zero', + }, + + # facts that could determine the value of transcendental + 'transcendental': { + 'algebraic', + 'commutative', + 'complex', + 'composite', + 'even', + 'finite', + 'infinite', + 'integer', + 'odd', + 'prime', + 'rational', + 'zero', + }, + + # facts that could determine the value of zero + 'zero': { + 'algebraic', + 'commutative', + 'complex', + 'composite', + 'even', + 'extended_negative', + 'extended_nonnegative', + 'extended_nonpositive', + 'extended_nonzero', + 'extended_positive', + 'extended_real', + 'finite', + 'hermitian', + 'imaginary', + 'infinite', + 'integer', + 'irrational', + 'negative', + 'noninteger', + 'nonnegative', + 'nonpositive', + 'nonzero', + 'odd', + 'positive', + 'prime', + 'rational', + 'real', + 'transcendental', + }, + +} # prereq + + +# Note: the order of the beta rules is used in the beta_triggers +beta_rules = [ + + # Rules implying composite = True + ({('even', True), ('positive', True), ('prime', False)}, + ('composite', True)), + + # Rules implying even = False + ({('composite', False), ('positive', True), ('prime', False)}, + ('even', False)), + + # Rules implying even = True + ({('integer', True), ('odd', False)}, + ('even', True)), + + # Rules implying extended_negative = True + ({('extended_positive', False), ('extended_real', True), ('zero', False)}, + ('extended_negative', True)), + ({('extended_nonpositive', True), ('extended_nonzero', True)}, + ('extended_negative', True)), + + # Rules implying extended_nonnegative = True + ({('extended_negative', False), ('extended_real', True)}, + ('extended_nonnegative', True)), + + # Rules implying extended_nonpositive = True + ({('extended_positive', False), ('extended_real', True)}, + ('extended_nonpositive', True)), + + # Rules implying extended_nonzero = True + ({('extended_real', True), ('zero', False)}, + ('extended_nonzero', True)), + + # Rules implying extended_positive = True + ({('extended_negative', False), ('extended_real', True), ('zero', False)}, + ('extended_positive', True)), + ({('extended_nonnegative', True), ('extended_nonzero', True)}, + ('extended_positive', True)), + + # Rules implying extended_real = False + ({('infinite', False), ('real', False)}, + ('extended_real', False)), + ({('extended_negative', False), ('extended_positive', False), ('zero', False)}, + ('extended_real', False)), + + # Rules implying infinite = True + ({('extended_real', True), ('real', False)}, + ('infinite', True)), + + # Rules implying irrational = True + ({('rational', False), ('real', True)}, + ('irrational', True)), + + # Rules implying negative = True + ({('positive', False), ('real', True), ('zero', False)}, + ('negative', True)), + ({('nonpositive', True), ('nonzero', True)}, + ('negative', True)), + ({('extended_negative', True), ('finite', True)}, + ('negative', True)), + + # Rules implying noninteger = True + ({('extended_real', True), ('integer', False)}, + ('noninteger', True)), + + # Rules implying nonnegative = True + ({('negative', False), ('real', True)}, + ('nonnegative', True)), + ({('extended_nonnegative', True), ('finite', True)}, + ('nonnegative', True)), + + # Rules implying nonpositive = True + ({('positive', False), ('real', True)}, + ('nonpositive', True)), + ({('extended_nonpositive', True), ('finite', True)}, + ('nonpositive', True)), + + # Rules implying nonzero = True + ({('extended_nonzero', True), ('finite', True)}, + ('nonzero', True)), + + # Rules implying odd = True + ({('even', False), ('integer', True)}, + ('odd', True)), + + # Rules implying positive = False + ({('composite', False), ('even', True), ('prime', False)}, + ('positive', False)), + + # Rules implying positive = True + ({('negative', False), ('real', True), ('zero', False)}, + ('positive', True)), + ({('nonnegative', True), ('nonzero', True)}, + ('positive', True)), + ({('extended_positive', True), ('finite', True)}, + ('positive', True)), + + # Rules implying prime = True + ({('composite', False), ('even', True), ('positive', True)}, + ('prime', True)), + + # Rules implying real = False + ({('negative', False), ('positive', False), ('zero', False)}, + ('real', False)), + + # Rules implying real = True + ({('extended_real', True), ('infinite', False)}, + ('real', True)), + ({('extended_real', True), ('finite', True)}, + ('real', True)), + + # Rules implying transcendental = True + ({('algebraic', False), ('complex', True)}, + ('transcendental', True)), + + # Rules implying zero = True + ({('extended_negative', False), ('extended_positive', False), ('extended_real', True)}, + ('zero', True)), + ({('negative', False), ('positive', False), ('real', True)}, + ('zero', True)), + ({('extended_nonnegative', True), ('extended_nonpositive', True)}, + ('zero', True)), + ({('nonnegative', True), ('nonpositive', True)}, + ('zero', True)), + +] # beta_rules +beta_triggers = { + ('algebraic', False): [32, 11, 3, 8, 29, 14, 25, 13, 17, 7], + ('algebraic', True): [10, 30, 31, 27, 16, 21, 19, 22], + ('antihermitian', False): [], + ('commutative', False): [], + ('complex', False): [10, 12, 11, 3, 8, 17, 7], + ('complex', True): [32, 10, 30, 31, 27, 16, 21, 19, 22], + ('composite', False): [1, 28, 24], + ('composite', True): [23, 2], + ('even', False): [23, 11, 3, 8, 29, 14, 25, 7], + ('even', True): [3, 33, 8, 6, 5, 14, 34, 25, 20, 18, 27, 16, 21, 19, 22, 0, 28, 24, 7], + ('extended_negative', False): [11, 33, 8, 5, 29, 34, 25, 18], + ('extended_negative', True): [30, 12, 31, 29, 14, 20, 16, 21, 22, 17], + ('extended_nonnegative', False): [11, 3, 6, 29, 14, 20, 7], + ('extended_nonnegative', True): [30, 12, 31, 33, 8, 9, 6, 29, 34, 25, 18, 19, 35, 17, 7], + ('extended_nonpositive', False): [11, 8, 5, 29, 25, 18, 7], + ('extended_nonpositive', True): [30, 12, 31, 3, 33, 4, 5, 29, 14, 34, 20, 21, 35, 17, 7], + ('extended_nonzero', False): [11, 33, 6, 5, 29, 34, 20, 18], + ('extended_nonzero', True): [30, 12, 31, 3, 8, 4, 9, 6, 5, 29, 14, 25, 22, 17], + ('extended_positive', False): [11, 3, 33, 6, 29, 14, 34, 20], + ('extended_positive', True): [30, 12, 31, 29, 25, 18, 27, 19, 22, 17], + ('extended_real', False): [], + ('extended_real', True): [30, 12, 31, 3, 33, 8, 6, 5, 17, 7], + ('finite', False): [11, 3, 8, 17, 7], + ('finite', True): [10, 30, 31, 27, 16, 21, 19, 22], + ('hermitian', False): [10, 12, 11, 3, 8, 17, 7], + ('imaginary', True): [32], + ('infinite', False): [10, 30, 31, 27, 16, 21, 19, 22], + ('infinite', True): [11, 3, 8, 17, 7], + ('integer', False): [11, 3, 8, 29, 14, 25, 17, 7], + ('integer', True): [23, 2, 3, 33, 8, 6, 5, 14, 34, 25, 20, 18, 27, 16, 21, 19, 22, 7], + ('irrational', True): [32, 3, 8, 4, 9, 6, 5, 14, 25, 15, 26, 20, 18, 27, 16, 21, 19], + ('negative', False): [29, 34, 25, 18], + ('negative', True): [32, 13, 17], + ('noninteger', True): [30, 12, 31, 3, 8, 4, 9, 6, 5, 29, 14, 25, 22], + ('nonnegative', False): [11, 3, 8, 29, 14, 20, 7], + ('nonnegative', True): [32, 33, 8, 9, 6, 34, 25, 26, 20, 27, 21, 22, 35, 36, 13, 17, 7], + ('nonpositive', False): [11, 3, 8, 29, 25, 18, 7], + ('nonpositive', True): [32, 3, 33, 4, 5, 14, 34, 15, 18, 16, 19, 22, 35, 36, 13, 17, 7], + ('nonzero', False): [29, 34, 20, 18], + ('nonzero', True): [32, 3, 8, 4, 9, 6, 5, 14, 25, 15, 26, 20, 18, 27, 16, 21, 19, 13, 17], + ('odd', False): [2], + ('odd', True): [3, 8, 4, 9, 6, 5, 14, 25, 15, 26, 20, 18, 27, 16, 21, 19], + ('positive', False): [29, 14, 34, 20], + ('positive', True): [32, 0, 1, 28, 13, 17], + ('prime', False): [0, 1, 24], + ('prime', True): [23, 2], + ('rational', False): [11, 3, 8, 29, 14, 25, 13, 17, 7], + ('rational', True): [3, 33, 8, 6, 5, 14, 34, 25, 20, 18, 27, 16, 21, 19, 22, 17, 7], + ('real', False): [10, 12, 11, 3, 8, 17, 7], + ('real', True): [32, 3, 33, 8, 6, 5, 14, 34, 25, 20, 18, 27, 16, 21, 19, 22, 13, 17, 7], + ('transcendental', True): [10, 30, 31, 11, 3, 8, 29, 14, 25, 27, 16, 21, 19, 22, 13, 17, 7], + ('zero', False): [11, 3, 8, 29, 14, 25, 7], + ('zero', True): [], +} # beta_triggers + + +generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications, + 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers} diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/decorators.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..1da77d2be6b8351ad16ef142f7add51f316e58ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/decorators.py @@ -0,0 +1,238 @@ +""" +SymPy core decorators. + +The purpose of this module is to expose decorators without any other +dependencies, so that they can be easily imported anywhere in sympy/core. +""" + +from functools import wraps +from .sympify import SympifyError, sympify + + +def _sympifyit(arg, retval=None): + """ + decorator to smartly _sympify function arguments + + Explanation + =========== + + @_sympifyit('other', NotImplemented) + def add(self, other): + ... + + In add, other can be thought of as already being a SymPy object. + + If it is not, the code is likely to catch an exception, then other will + be explicitly _sympified, and the whole code restarted. + + if _sympify(arg) fails, NotImplemented will be returned + + See also + ======== + + __sympifyit + """ + def deco(func): + return __sympifyit(func, arg, retval) + + return deco + + +def __sympifyit(func, arg, retval=None): + """Decorator to _sympify `arg` argument for function `func`. + + Do not use directly -- use _sympifyit instead. + """ + + # we support f(a,b) only + if not func.__code__.co_argcount: + raise LookupError("func not found") + # only b is _sympified + assert func.__code__.co_varnames[1] == arg + if retval is None: + @wraps(func) + def __sympifyit_wrapper(a, b): + return func(a, sympify(b, strict=True)) + + else: + @wraps(func) + def __sympifyit_wrapper(a, b): + try: + # If an external class has _op_priority, it knows how to deal + # with SymPy objects. Otherwise, it must be converted. + if not hasattr(b, '_op_priority'): + b = sympify(b, strict=True) + return func(a, b) + except SympifyError: + return retval + + return __sympifyit_wrapper + + +def call_highest_priority(method_name): + """A decorator for binary special methods to handle _op_priority. + + Explanation + =========== + + Binary special methods in Expr and its subclasses use a special attribute + '_op_priority' to determine whose special method will be called to + handle the operation. In general, the object having the highest value of + '_op_priority' will handle the operation. Expr and subclasses that define + custom binary special methods (__mul__, etc.) should decorate those + methods with this decorator to add the priority logic. + + The ``method_name`` argument is the name of the method of the other class + that will be called. Use this decorator in the following manner:: + + # Call other.__rmul__ if other._op_priority > self._op_priority + @call_highest_priority('__rmul__') + def __mul__(self, other): + ... + + # Call other.__mul__ if other._op_priority > self._op_priority + @call_highest_priority('__mul__') + def __rmul__(self, other): + ... + """ + def priority_decorator(func): + @wraps(func) + def binary_op_wrapper(self, other): + if hasattr(other, '_op_priority'): + if other._op_priority > self._op_priority: + f = getattr(other, method_name, None) + if f is not None: + return f(self) + return func(self, other) + return binary_op_wrapper + return priority_decorator + + +def sympify_method_args(cls): + '''Decorator for a class with methods that sympify arguments. + + Explanation + =========== + + The sympify_method_args decorator is to be used with the sympify_return + decorator for automatic sympification of method arguments. This is + intended for the common idiom of writing a class like : + + Examples + ======== + + >>> from sympy import Basic, SympifyError, S + >>> from sympy.core.sympify import _sympify + + >>> class MyTuple(Basic): + ... def __add__(self, other): + ... try: + ... other = _sympify(other) + ... except SympifyError: + ... return NotImplemented + ... if not isinstance(other, MyTuple): + ... return NotImplemented + ... return MyTuple(*(self.args + other.args)) + + >>> MyTuple(S(1), S(2)) + MyTuple(S(3), S(4)) + MyTuple(1, 2, 3, 4) + + In the above it is important that we return NotImplemented when other is + not sympifiable and also when the sympified result is not of the expected + type. This allows the MyTuple class to be used cooperatively with other + classes that overload __add__ and want to do something else in combination + with instance of Tuple. + + Using this decorator the above can be written as + + >>> from sympy.core.decorators import sympify_method_args, sympify_return + + >>> @sympify_method_args + ... class MyTuple(Basic): + ... @sympify_return([('other', 'MyTuple')], NotImplemented) + ... def __add__(self, other): + ... return MyTuple(*(self.args + other.args)) + + >>> MyTuple(S(1), S(2)) + MyTuple(S(3), S(4)) + MyTuple(1, 2, 3, 4) + + The idea here is that the decorators take care of the boiler-plate code + for making this happen in each method that potentially needs to accept + unsympified arguments. Then the body of e.g. the __add__ method can be + written without needing to worry about calling _sympify or checking the + type of the resulting object. + + The parameters for sympify_return are a list of tuples of the form + (parameter_name, expected_type) and the value to return (e.g. + NotImplemented). The expected_type parameter can be a type e.g. Tuple or a + string 'Tuple'. Using a string is useful for specifying a Type within its + class body (as in the above example). + + Notes: Currently sympify_return only works for methods that take a single + argument (not including self). Specifying an expected_type as a string + only works for the class in which the method is defined. + ''' + # Extract the wrapped methods from each of the wrapper objects created by + # the sympify_return decorator. Doing this here allows us to provide the + # cls argument which is used for forward string referencing. + for attrname, obj in cls.__dict__.items(): + if isinstance(obj, _SympifyWrapper): + setattr(cls, attrname, obj.make_wrapped(cls)) + return cls + + +def sympify_return(*args): + '''Function/method decorator to sympify arguments automatically + + See the docstring of sympify_method_args for explanation. + ''' + # Store a wrapper object for the decorated method + def wrapper(func): + return _SympifyWrapper(func, args) + return wrapper + + +class _SympifyWrapper: + '''Internal class used by sympify_return and sympify_method_args''' + + def __init__(self, func, args): + self.func = func + self.args = args + + def make_wrapped(self, cls): + func = self.func + parameters, retval = self.args + + # XXX: Handle more than one parameter? + [(parameter, expectedcls)] = parameters + + # Handle forward references to the current class using strings + if expectedcls == cls.__name__: + expectedcls = cls + + # Raise RuntimeError since this is a failure at import time and should + # not be recoverable. + nargs = func.__code__.co_argcount + # we support f(a, b) only + if nargs != 2: + raise RuntimeError('sympify_return can only be used with 2 argument functions') + # only b is _sympified + if func.__code__.co_varnames[1] != parameter: + raise RuntimeError('parameter name mismatch "%s" in %s' % + (parameter, func.__name__)) + + @wraps(func) + def _func(self, other): + # XXX: The check for _op_priority here should be removed. It is + # needed to stop mutable matrices from being sympified to + # immutable matrices which breaks things in quantum... + if not hasattr(other, '_op_priority'): + try: + other = sympify(other, strict=True) + except SympifyError: + return retval + if not isinstance(other, expectedcls): + return retval + return func(self, other) + + return _func diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/expr.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/expr.py new file mode 100644 index 0000000000000000000000000000000000000000..7b3b7cc574c890b8226dd714f4784d379fb5adb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/expr.py @@ -0,0 +1,4165 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from collections.abc import Iterable +from functools import reduce +import re + +from .sympify import sympify, _sympify +from .basic import Basic, Atom +from .singleton import S +from .evalf import EvalfMixin, pure_complex, DEFAULT_MAXPREC +from .decorators import call_highest_priority, sympify_method_args, sympify_return +from .cache import cacheit +from .sorting import default_sort_key +from .kind import NumberKind +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.misc import as_int, func_name, filldedent +from sympy.utilities.iterables import has_variety, sift +from mpmath.libmp import mpf_log, prec_to_dps +from mpmath.libmp.libintmath import giant_steps + + +if TYPE_CHECKING: + from .numbers import Number + +from collections import defaultdict + + +def _corem(eq, c): # helper for extract_additively + # return co, diff from co*c + diff + co = [] + non = [] + for i in Add.make_args(eq): + ci = i.coeff(c) + if not ci: + non.append(i) + else: + co.append(ci) + return Add(*co), Add(*non) + + +@sympify_method_args +class Expr(Basic, EvalfMixin): + """ + Base class for algebraic expressions. + + Explanation + =========== + + Everything that requires arithmetic operations to be defined + should subclass this class, instead of Basic (which should be + used only for argument storage and expression manipulation, i.e. + pattern matching, substitutions, etc). + + If you want to override the comparisons of expressions: + Should use _eval_is_ge for inequality, or _eval_is_eq, with multiple dispatch. + _eval_is_ge return true if x >= y, false if x < y, and None if the two types + are not comparable or the comparison is indeterminate + + See Also + ======== + + sympy.core.basic.Basic + """ + + __slots__: tuple[str, ...] = () + + is_scalar = True # self derivative is 1 + + @property + def _diff_wrt(self): + """Return True if one can differentiate with respect to this + object, else False. + + Explanation + =========== + + Subclasses such as Symbol, Function and Derivative return True + to enable derivatives wrt them. The implementation in Derivative + separates the Symbol and non-Symbol (_diff_wrt=True) variables and + temporarily converts the non-Symbols into Symbols when performing + the differentiation. By default, any object deriving from Expr + will behave like a scalar with self.diff(self) == 1. If this is + not desired then the object must also set `is_scalar = False` or + else define an _eval_derivative routine. + + Note, see the docstring of Derivative for how this should work + mathematically. In particular, note that expr.subs(yourclass, Symbol) + should be well-defined on a structural level, or this will lead to + inconsistent results. + + Examples + ======== + + >>> from sympy import Expr + >>> e = Expr() + >>> e._diff_wrt + False + >>> class MyScalar(Expr): + ... _diff_wrt = True + ... + >>> MyScalar().diff(MyScalar()) + 1 + >>> class MySymbol(Expr): + ... _diff_wrt = True + ... is_scalar = False + ... + >>> MySymbol().diff(MySymbol()) + Derivative(MySymbol(), MySymbol()) + """ + return False + + @cacheit + def sort_key(self, order=None): + + coeff, expr = self.as_coeff_Mul() + + if expr.is_Pow: + if expr.base is S.Exp1: + # If we remove this, many doctests will go crazy: + # (keeps E**x sorted like the exp(x) function, + # part of exp(x) to E**x transition) + expr, exp = Function("exp")(expr.exp), S.One + else: + expr, exp = expr.args + else: + exp = S.One + + if expr.is_Dummy: + args = (expr.sort_key(),) + elif expr.is_Atom: + args = (str(expr),) + else: + if expr.is_Add: + args = expr.as_ordered_terms(order=order) + elif expr.is_Mul: + args = expr.as_ordered_factors(order=order) + else: + args = expr.args + + args = tuple( + [ default_sort_key(arg, order=order) for arg in args ]) + + args = (len(args), tuple(args)) + exp = exp.sort_key(order=order) + + return expr.class_key(), args, exp, coeff + + def _hashable_content(self): + """Return a tuple of information about self that can be used to + compute the hash. If a class defines additional attributes, + like ``name`` in Symbol, then this method should be updated + accordingly to return such relevant attributes. + Defining more than _hashable_content is necessary if __eq__ has + been defined by a class. See note about this in Basic.__eq__.""" + return self._args + + # *************** + # * Arithmetics * + # *************** + # Expr and its subclasses use _op_priority to determine which object + # passed to a binary special method (__mul__, etc.) will handle the + # operation. In general, the 'call_highest_priority' decorator will choose + # the object with the highest _op_priority to handle the call. + # Custom subclasses that want to define their own binary special methods + # should set an _op_priority value that is higher than the default. + # + # **NOTE**: + # This is a temporary fix, and will eventually be replaced with + # something better and more powerful. See issue 5510. + _op_priority = 10.0 + + @property + def _add_handler(self): + return Add + + @property + def _mul_handler(self): + return Mul + + def __pos__(self): + return self + + def __neg__(self): + # Mul has its own __neg__ routine, so we just + # create a 2-args Mul with the -1 in the canonical + # slot 0. + c = self.is_commutative + return Mul._from_args((S.NegativeOne, self), c) + + def __abs__(self) -> Expr: + from sympy.functions.elementary.complexes import Abs + return Abs(self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__radd__') + def __add__(self, other): + return Add(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__add__') + def __radd__(self, other): + return Add(other, self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rsub__') + def __sub__(self, other): + return Add(self, -other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__sub__') + def __rsub__(self, other): + return Add(other, -self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rmul__') + def __mul__(self, other): + return Mul(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__mul__') + def __rmul__(self, other): + return Mul(other, self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rpow__') + def _pow(self, other): + return Pow(self, other) + + def __pow__(self, other, mod=None) -> Expr: + if mod is None: + return self._pow(other) + try: + _self, other, mod = as_int(self), as_int(other), as_int(mod) + if other >= 0: + return _sympify(pow(_self, other, mod)) + else: + from .numbers import mod_inverse + return _sympify(mod_inverse(pow(_self, -other, mod), mod)) + except ValueError: + power = self._pow(other) + try: + return power%mod + except TypeError: + return NotImplemented + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__pow__') + def __rpow__(self, other): + return Pow(other, self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rtruediv__') + def __truediv__(self, other): + denom = Pow(other, S.NegativeOne) + if self is S.One: + return denom + else: + return Mul(self, denom) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__truediv__') + def __rtruediv__(self, other): + denom = Pow(self, S.NegativeOne) + if other is S.One: + return denom + else: + return Mul(other, denom) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rmod__') + def __mod__(self, other): + return Mod(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__mod__') + def __rmod__(self, other): + return Mod(other, self) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rfloordiv__') + def __floordiv__(self, other): + from sympy.functions.elementary.integers import floor + return floor(self / other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__floordiv__') + def __rfloordiv__(self, other): + from sympy.functions.elementary.integers import floor + return floor(other / self) + + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__rdivmod__') + def __divmod__(self, other): + from sympy.functions.elementary.integers import floor + return floor(self / other), Mod(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + @call_highest_priority('__divmod__') + def __rdivmod__(self, other): + from sympy.functions.elementary.integers import floor + return floor(other / self), Mod(other, self) + + def __int__(self): + # Although we only need to round to the units position, we'll + # get one more digit so the extra testing below can be avoided + # unless the rounded value rounded to an integer, e.g. if an + # expression were equal to 1.9 and we rounded to the unit position + # we would get a 2 and would not know if this rounded up or not + # without doing a test (as done below). But if we keep an extra + # digit we know that 1.9 is not the same as 1 and there is no + # need for further testing: our int value is correct. If the value + # were 1.99, however, this would round to 2.0 and our int value is + # off by one. So...if our round value is the same as the int value + # (regardless of how much extra work we do to calculate extra decimal + # places) we need to test whether we are off by one. + from .symbol import Dummy + if not self.is_number: + raise TypeError("Cannot convert symbols to int") + r = self.round(2) + if not r.is_Number: + raise TypeError("Cannot convert complex to int") + if r in (S.NaN, S.Infinity, S.NegativeInfinity): + raise TypeError("Cannot convert %s to int" % r) + i = int(r) + if not i: + return 0 + # off-by-one check + if i == r and not (self - i).equals(0): + isign = 1 if i > 0 else -1 + x = Dummy() + # in the following (self - i).evalf(2) will not always work while + # (self - r).evalf(2) and the use of subs does; if the test that + # was added when this comment was added passes, it might be safe + # to simply use sign to compute this rather than doing this by hand: + diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1 + if diff_sign != isign: + i -= isign + return i + + def __float__(self): + # Don't bother testing if it's a number; if it's not this is going + # to fail, and if it is we still need to check that it evalf'ed to + # a number. + result = self.evalf() + if result.is_Number: + return float(result) + if result.is_number and result.as_real_imag()[1]: + raise TypeError("Cannot convert complex to float") + raise TypeError("Cannot convert expression to float") + + def __complex__(self): + result = self.evalf() + re, im = result.as_real_imag() + return complex(float(re), float(im)) + + @sympify_return([('other', 'Expr')], NotImplemented) + def __ge__(self, other): + from .relational import GreaterThan + return GreaterThan(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + def __le__(self, other): + from .relational import LessThan + return LessThan(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + def __gt__(self, other): + from .relational import StrictGreaterThan + return StrictGreaterThan(self, other) + + @sympify_return([('other', 'Expr')], NotImplemented) + def __lt__(self, other): + from .relational import StrictLessThan + return StrictLessThan(self, other) + + def __trunc__(self): + if not self.is_number: + raise TypeError("Cannot truncate symbols and expressions") + else: + return Integer(self) + + def __format__(self, format_spec: str): + if self.is_number: + mt = re.match(r'\+?\d*\.(\d+)f', format_spec) + if mt: + prec = int(mt.group(1)) + rounded = self.round(prec) + if rounded.is_Integer: + return format(int(rounded), format_spec) + if rounded.is_Float: + return format(rounded, format_spec) + return super().__format__(format_spec) + + @staticmethod + def _from_mpmath(x, prec): + if hasattr(x, "_mpf_"): + return Float._new(x._mpf_, prec) + elif hasattr(x, "_mpc_"): + re, im = x._mpc_ + re = Float._new(re, prec) + im = Float._new(im, prec)*S.ImaginaryUnit + return re + im + else: + raise TypeError("expected mpmath number (mpf or mpc)") + + @property + def is_number(self): + """Returns True if ``self`` has no free symbols and no + undefined functions (AppliedUndef, to be precise). It will be + faster than ``if not self.free_symbols``, however, since + ``is_number`` will fail as soon as it hits a free symbol + or undefined function. + + Examples + ======== + + >>> from sympy import Function, Integral, cos, sin, pi + >>> from sympy.abc import x + >>> f = Function('f') + + >>> x.is_number + False + >>> f(1).is_number + False + >>> (2*x).is_number + False + >>> (2 + Integral(2, x)).is_number + False + >>> (2 + Integral(2, (x, 1, 2))).is_number + True + + Not all numbers are Numbers in the SymPy sense: + + >>> pi.is_number, pi.is_Number + (True, False) + + If something is a number it should evaluate to a number with + real and imaginary parts that are Numbers; the result may not + be comparable, however, since the real and/or imaginary part + of the result may not have precision. + + >>> cos(1).is_number and cos(1).is_comparable + True + + >>> z = cos(1)**2 + sin(1)**2 - 1 + >>> z.is_number + True + >>> z.is_comparable + False + + See Also + ======== + + sympy.core.basic.Basic.is_comparable + """ + return all(obj.is_number for obj in self.args) + + def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1): + """Return self evaluated, if possible, replacing free symbols with + random complex values, if necessary. + + Explanation + =========== + + The random complex value for each free symbol is generated + by the random_complex_number routine giving real and imaginary + parts in the range given by the re_min, re_max, im_min, and im_max + values. The returned value is evaluated to a precision of n + (if given) else the maximum of 15 and the precision needed + to get more than 1 digit of precision. If the expression + could not be evaluated to a number, or could not be evaluated + to more than 1 digit of precision, then None is returned. + + Examples + ======== + + >>> from sympy import sqrt + >>> from sympy.abc import x, y + >>> x._random() # doctest: +SKIP + 0.0392918155679172 + 0.916050214307199*I + >>> x._random(2) # doctest: +SKIP + -0.77 - 0.87*I + >>> (x + y/2)._random(2) # doctest: +SKIP + -0.57 + 0.16*I + >>> sqrt(2)._random(2) + 1.4 + + See Also + ======== + + sympy.core.random.random_complex_number + """ + + free = self.free_symbols + prec = 1 + if free: + from sympy.core.random import random_complex_number + a, c, b, d = re_min, re_max, im_min, im_max + reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True) + for zi in free]))) + try: + nmag = abs(self.evalf(2, subs=reps)) + except (ValueError, TypeError): + # if an out of range value resulted in evalf problems + # then return None -- XXX is there a way to know how to + # select a good random number for a given expression? + # e.g. when calculating n! negative values for n should not + # be used + return None + else: + reps = {} + nmag = abs(self.evalf(2)) + + if not hasattr(nmag, '_prec'): + # e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True + return None + + if nmag._prec == 1: + # increase the precision up to the default maximum + # precision to see if we can get any significance + + # evaluate + for prec in giant_steps(2, DEFAULT_MAXPREC): + nmag = abs(self.evalf(prec, subs=reps)) + if nmag._prec != 1: + break + + if nmag._prec != 1: + if n is None: + n = max(prec, 15) + return self.evalf(n, subs=reps) + + # never got any significance + return None + + def is_constant(self, *wrt, **flags): + """Return True if self is constant, False if not, or None if + the constancy could not be determined conclusively. + + Explanation + =========== + + If an expression has no free symbols then it is a constant. If + there are free symbols it is possible that the expression is a + constant, perhaps (but not necessarily) zero. To test such + expressions, a few strategies are tried: + + 1) numerical evaluation at two random points. If two such evaluations + give two different values and the values have a precision greater than + 1 then self is not constant. If the evaluations agree or could not be + obtained with any precision, no decision is made. The numerical testing + is done only if ``wrt`` is different than the free symbols. + + 2) differentiation with respect to variables in 'wrt' (or all free + symbols if omitted) to see if the expression is constant or not. This + will not always lead to an expression that is zero even though an + expression is constant (see added test in test_expr.py). If + all derivatives are zero then self is constant with respect to the + given symbols. + + 3) finding out zeros of denominator expression with free_symbols. + It will not be constant if there are zeros. It gives more negative + answers for expression that are not constant. + + If neither evaluation nor differentiation can prove the expression is + constant, None is returned unless two numerical values happened to be + the same and the flag ``failing_number`` is True -- in that case the + numerical value will be returned. + + If flag simplify=False is passed, self will not be simplified; + the default is True since self should be simplified before testing. + + Examples + ======== + + >>> from sympy import cos, sin, Sum, S, pi + >>> from sympy.abc import a, n, x, y + >>> x.is_constant() + False + >>> S(2).is_constant() + True + >>> Sum(x, (x, 1, 10)).is_constant() + True + >>> Sum(x, (x, 1, n)).is_constant() + False + >>> Sum(x, (x, 1, n)).is_constant(y) + True + >>> Sum(x, (x, 1, n)).is_constant(n) + False + >>> Sum(x, (x, 1, n)).is_constant(x) + True + >>> eq = a*cos(x)**2 + a*sin(x)**2 - a + >>> eq.is_constant() + True + >>> eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0 + True + + >>> (0**x).is_constant() + False + >>> x.is_constant() + False + >>> (x**x).is_constant() + False + >>> one = cos(x)**2 + sin(x)**2 + >>> one.is_constant() + True + >>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1 + True + """ + + def check_denominator_zeros(expression): + from sympy.solvers.solvers import denoms + + retNone = False + for den in denoms(expression): + z = den.is_zero + if z is True: + return True + if z is None: + retNone = True + if retNone: + return None + return False + + simplify = flags.get('simplify', True) + + if self.is_number: + return True + free = self.free_symbols + if not free: + return True # assume f(1) is some constant + + # if we are only interested in some symbols and they are not in the + # free symbols then this expression is constant wrt those symbols + wrt = set(wrt) + if wrt and not wrt & free: + return True + wrt = wrt or free + + # simplify unless this has already been done + expr = self + if simplify: + expr = expr.simplify() + + # is_zero should be a quick assumptions check; it can be wrong for + # numbers (see test_is_not_constant test), giving False when it + # shouldn't, but hopefully it will never give True unless it is sure. + if expr.is_zero: + return True + + # Don't attempt substitution or differentiation with non-number symbols + wrt_number = {sym for sym in wrt if sym.kind is NumberKind} + + # try numerical evaluation to see if we get two different values + failing_number = None + if wrt_number == free: + # try 0 (for a) and 1 (for b) + try: + a = expr.subs(list(zip(free, [0]*len(free))), + simultaneous=True) + if a is S.NaN: + # evaluation may succeed when substitution fails + a = expr._random(None, 0, 0, 0, 0) + except ZeroDivisionError: + a = None + if a is not None and a is not S.NaN: + try: + b = expr.subs(list(zip(free, [1]*len(free))), + simultaneous=True) + if b is S.NaN: + # evaluation may succeed when substitution fails + b = expr._random(None, 1, 0, 1, 0) + except ZeroDivisionError: + b = None + if b is not None and b is not S.NaN and b.equals(a) is False: + return False + # try random real + b = expr._random(None, -1, 0, 1, 0) + if b is not None and b is not S.NaN and b.equals(a) is False: + return False + # try random complex + b = expr._random() + if b is not None and b is not S.NaN: + if b.equals(a) is False: + return False + failing_number = a if a.is_number else b + + # now we will test each wrt symbol (or all free symbols) to see if the + # expression depends on them or not using differentiation. This is + # not sufficient for all expressions, however, so we don't return + # False if we get a derivative other than 0 with free symbols. + for w in wrt_number: + deriv = expr.diff(w) + if simplify: + deriv = deriv.simplify() + if deriv != 0: + if not (pure_complex(deriv, or_real=True)): + if flags.get('failing_number', False): + return failing_number + return False + cd = check_denominator_zeros(self) + if cd is True: + return False + elif cd is None: + return None + return True + + def equals(self, other, failing_expression=False): + """Return True if self == other, False if it does not, or None. If + failing_expression is True then the expression which did not simplify + to a 0 will be returned instead of None. + + Explanation + =========== + + If ``self`` is a Number (or complex number) that is not zero, then + the result is False. + + If ``self`` is a number and has not evaluated to zero, evalf will be + used to test whether the expression evaluates to zero. If it does so + and the result has significance (i.e. the precision is either -1, for + a Rational result, or is greater than 1) then the evalf value will be + used to return True or False. + + """ + from sympy.simplify.simplify import nsimplify, simplify + from sympy.solvers.solvers import solve + from sympy.polys.polyerrors import NotAlgebraic + from sympy.polys.numberfields import minimal_polynomial + + other = sympify(other) + if self == other: + return True + + # they aren't the same so see if we can make the difference 0; + # don't worry about doing simplification steps one at a time + # because if the expression ever goes to 0 then the subsequent + # simplification steps that are done will be very fast. + diff = factor_terms(simplify(self - other), radical=True) + + if not diff: + return True + + if not diff.has(Add, Mod): + # if there is no expanding to be done after simplifying + # then this can't be a zero + return False + + factors = diff.as_coeff_mul()[1] + if len(factors) > 1: # avoid infinity recursion + fac_zero = [fac.equals(0) for fac in factors] + if None not in fac_zero: # every part can be decided + return any(fac_zero) + + constant = diff.is_constant(simplify=False, failing_number=True) + + if constant is False: + return False + + if not diff.is_number: + if constant is None: + # e.g. unless the right simplification is done, a symbolic + # zero is possible (see expression of issue 6829: without + # simplification constant will be None). + return + + if constant is True: + # this gives a number whether there are free symbols or not + ndiff = diff._random() + # is_comparable will work whether the result is real + # or complex; it could be None, however. + if ndiff and ndiff.is_comparable: + return False + + # sometimes we can use a simplified result to give a clue as to + # what the expression should be; if the expression is *not* zero + # then we should have been able to compute that and so now + # we can just consider the cases where the approximation appears + # to be zero -- we try to prove it via minimal_polynomial. + # + # removed + # ns = nsimplify(diff) + # if diff.is_number and (not ns or ns == diff): + # + # The thought was that if it nsimplifies to 0 that's a sure sign + # to try the following to prove it; or if it changed but wasn't + # zero that might be a sign that it's not going to be easy to + # prove. But tests seem to be working without that logic. + # + if diff.is_number: + # try to prove via self-consistency + surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer] + # it seems to work better to try big ones first + surds.sort(key=lambda x: -x.args[0]) + for s in surds: + try: + # simplify is False here -- this expression has already + # been identified as being hard to identify as zero; + # we will handle the checking ourselves using nsimplify + # to see if we are in the right ballpark or not and if so + # *then* the simplification will be attempted. + sol = solve(diff, s, simplify=False) + if sol: + if s in sol: + # the self-consistent result is present + return True + if all(si.is_Integer for si in sol): + # perfect powers are removed at instantiation + # so surd s cannot be an integer + return False + if all(i.is_algebraic is False for i in sol): + # a surd is algebraic + return False + if any(si in surds for si in sol): + # it wasn't equal to s but it is in surds + # and different surds are not equal + return False + if any(nsimplify(s - si) == 0 and + simplify(s - si) == 0 for si in sol): + return True + if s.is_real: + if any(nsimplify(si, [s]) == s and simplify(si) == s + for si in sol): + return True + except NotImplementedError: + pass + + # try to prove with minimal_polynomial but know when + # *not* to use this or else it can take a long time. e.g. issue 8354 + if True: # change True to condition that assures non-hang + try: + mp = minimal_polynomial(diff) + if mp.is_Symbol: + return True + return False + except (NotAlgebraic, NotImplementedError): + pass + + # diff has not simplified to zero; constant is either None, True + # or the number with significance (is_comparable) that was randomly + # calculated twice as the same value. + if constant not in (True, None) and constant != 0: + return False + + if failing_expression: + return diff + return None + + def _eval_is_extended_positive_negative(self, positive): + from sympy.polys.numberfields import minimal_polynomial + from sympy.polys.polyerrors import NotAlgebraic + if self.is_number: + # check to see that we can get a value + try: + n2 = self._eval_evalf(2) + # XXX: This shouldn't be caught here + # Catches ValueError: hypsum() failed to converge to the requested + # 34 bits of accuracy + except ValueError: + return None + if n2 is None: + return None + if getattr(n2, '_prec', 1) == 1: # no significance + return None + if n2 is S.NaN: + return None + + f = self.evalf(2) + if f.is_Float: + match = f, S.Zero + else: + match = pure_complex(f) + if match is None: + return False + r, i = match + if not (i.is_Number and r.is_Number): + return False + if r._prec != 1 and i._prec != 1: + return bool(not i and ((r > 0) if positive else (r < 0))) + elif r._prec == 1 and (not i or i._prec == 1) and \ + self._eval_is_algebraic() and not self.has(Function): + try: + if minimal_polynomial(self).is_Symbol: + return False + except (NotAlgebraic, NotImplementedError): + pass + + def _eval_is_extended_positive(self): + return self._eval_is_extended_positive_negative(positive=True) + + def _eval_is_extended_negative(self): + return self._eval_is_extended_positive_negative(positive=False) + + def _eval_interval(self, x, a, b): + """ + Returns evaluation over an interval. For most functions this is: + + self.subs(x, b) - self.subs(x, a), + + possibly using limit() if NaN is returned from subs, or if + singularities are found between a and b. + + If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x), + respectively. + + """ + from sympy.calculus.accumulationbounds import AccumBounds + from sympy.functions.elementary.exponential import log + from sympy.series.limits import limit, Limit + from sympy.sets.sets import Interval + from sympy.solvers.solveset import solveset + + if (a is None and b is None): + raise ValueError('Both interval ends cannot be None.') + + def _eval_endpoint(left): + c = a if left else b + if c is None: + return S.Zero + else: + C = self.subs(x, c) + if C.has(S.NaN, S.Infinity, S.NegativeInfinity, + S.ComplexInfinity, AccumBounds): + if (a < b) != False: + C = limit(self, x, c, "+" if left else "-") + else: + C = limit(self, x, c, "-" if left else "+") + + if isinstance(C, Limit): + raise NotImplementedError("Could not compute limit") + return C + + if a == b: + return S.Zero + + A = _eval_endpoint(left=True) + if A is S.NaN: + return A + + B = _eval_endpoint(left=False) + + if (a and b) is None: + return B - A + + value = B - A + + if a.is_comparable and b.is_comparable: + if a < b: + domain = Interval(a, b) + else: + domain = Interval(b, a) + # check the singularities of self within the interval + # if singularities is a ConditionSet (not iterable), catch the exception and pass + singularities = solveset(self.cancel().as_numer_denom()[1], x, + domain=domain) + for logterm in self.atoms(log): + singularities = singularities | solveset(logterm.args[0], x, + domain=domain) + try: + for s in singularities: + if value is S.NaN: + # no need to keep adding, it will stay NaN + break + if not s.is_comparable: + continue + if (a < s) == (s < b) == True: + value += -limit(self, x, s, "+") + limit(self, x, s, "-") + elif (b < s) == (s < a) == True: + value += limit(self, x, s, "+") - limit(self, x, s, "-") + except TypeError: + pass + + return value + + def _eval_power(self, other): + # subclass to compute self**other for cases when + # other is not NaN, 0, or 1 + return None + + def _eval_conjugate(self): + if self.is_extended_real: + return self + elif self.is_imaginary: + return -self + + def conjugate(self): + """Returns the complex conjugate of 'self'.""" + from sympy.functions.elementary.complexes import conjugate as c + return c(self) + + def dir(self, x, cdir): + if self.is_zero: + return S.Zero + from sympy.functions.elementary.exponential import log + minexp = S.Zero + arg = self + while arg: + minexp += S.One + arg = arg.diff(x) + coeff = arg.subs(x, 0) + if coeff is S.NaN: + coeff = arg.limit(x, 0) + if coeff is S.ComplexInfinity: + try: + coeff, _ = arg.leadterm(x) + if coeff.has(log(x)): + raise ValueError() + except ValueError: + coeff = arg.limit(x, 0) + if coeff != S.Zero: + break + return coeff*cdir**minexp + + def _eval_transpose(self): + from sympy.functions.elementary.complexes import conjugate + if (self.is_complex or self.is_infinite): + return self + elif self.is_hermitian: + return conjugate(self) + elif self.is_antihermitian: + return -conjugate(self) + + def transpose(self): + from sympy.functions.elementary.complexes import transpose + return transpose(self) + + def _eval_adjoint(self): + from sympy.functions.elementary.complexes import conjugate, transpose + if self.is_hermitian: + return self + elif self.is_antihermitian: + return -self + obj = self._eval_conjugate() + if obj is not None: + return transpose(obj) + obj = self._eval_transpose() + if obj is not None: + return conjugate(obj) + + def adjoint(self): + from sympy.functions.elementary.complexes import adjoint + return adjoint(self) + + @classmethod + def _parse_order(cls, order): + """Parse and configure the ordering of terms. """ + from sympy.polys.orderings import monomial_key + + startswith = getattr(order, "startswith", None) + if startswith is None: + reverse = False + else: + reverse = startswith('rev-') + if reverse: + order = order[4:] + + monom_key = monomial_key(order) + + def neg(monom): + return tuple([neg(m) if isinstance(m, tuple) else -m for m in monom]) + + def key(term): + _, ((re, im), monom, ncpart) = term + + monom = neg(monom_key(monom)) + ncpart = tuple([e.sort_key(order=order) for e in ncpart]) + coeff = ((bool(im), im), (re, im)) + + return monom, ncpart, coeff + + return key, reverse + + def as_ordered_factors(self, order=None): + """Return list of ordered factors (if Mul) else [self].""" + return [self] + + def as_poly(self, *gens, **args): + """Converts ``self`` to a polynomial or returns ``None``. + + Explanation + =========== + + >>> from sympy import sin + >>> from sympy.abc import x, y + + >>> print((x**2 + x*y).as_poly()) + Poly(x**2 + x*y, x, y, domain='ZZ') + + >>> print((x**2 + x*y).as_poly(x, y)) + Poly(x**2 + x*y, x, y, domain='ZZ') + + >>> print((x**2 + sin(y)).as_poly(x, y)) + None + + """ + from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded + from sympy.polys.polytools import Poly + + try: + poly = Poly(self, *gens, **args) + + if not poly.is_Poly: + return None + else: + return poly + except (PolynomialError, GeneratorsNeeded): + # PolynomialError is caught for e.g. exp(x).as_poly(x) + # GeneratorsNeeded is caught for e.g. S(2).as_poly() + return None + + def as_ordered_terms(self, order=None, data=False): + """ + Transform an expression to an ordered list of terms. + + Examples + ======== + + >>> from sympy import sin, cos + >>> from sympy.abc import x + + >>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms() + [sin(x)**2*cos(x), sin(x)**2, 1] + + """ + + from .numbers import Number, NumberSymbol + + if order is None and self.is_Add: + # Spot the special case of Add(Number, Mul(Number, expr)) with the + # first number positive and the second number negative + key = lambda x:not isinstance(x, (Number, NumberSymbol)) + add_args = sorted(Add.make_args(self), key=key) + if (len(add_args) == 2 + and isinstance(add_args[0], (Number, NumberSymbol)) + and isinstance(add_args[1], Mul)): + mul_args = sorted(Mul.make_args(add_args[1]), key=key) + if (len(mul_args) == 2 + and isinstance(mul_args[0], Number) + and add_args[0].is_positive + and mul_args[0].is_negative): + return add_args + + key, reverse = self._parse_order(order) + terms, gens = self.as_terms() + + if not any(term.is_Order for term, _ in terms): + ordered = sorted(terms, key=key, reverse=reverse) + else: + _terms, _order = [], [] + + for term, repr in terms: + if not term.is_Order: + _terms.append((term, repr)) + else: + _order.append((term, repr)) + + ordered = sorted(_terms, key=key, reverse=True) \ + + sorted(_order, key=key, reverse=True) + + if data: + return ordered, gens + else: + return [term for term, _ in ordered] + + def as_terms(self): + """Transform an expression to a list of terms. """ + from .exprtools import decompose_power + + gens, terms = set(), [] + + for term in Add.make_args(self): + coeff, _term = term.as_coeff_Mul() + + coeff = complex(coeff) + cpart, ncpart = {}, [] + + if _term is not S.One: + for factor in Mul.make_args(_term): + if factor.is_number: + try: + coeff *= complex(factor) + except (TypeError, ValueError): + pass + else: + continue + + if factor.is_commutative: + base, exp = decompose_power(factor) + + cpart[base] = exp + gens.add(base) + else: + ncpart.append(factor) + + coeff = coeff.real, coeff.imag + ncpart = tuple(ncpart) + + terms.append((term, (coeff, cpart, ncpart))) + + gens = sorted(gens, key=default_sort_key) + + k, indices = len(gens), {} + + for i, g in enumerate(gens): + indices[g] = i + + result = [] + + for term, (coeff, cpart, ncpart) in terms: + monom = [0]*k + + for base, exp in cpart.items(): + monom[indices[base]] = exp + + result.append((term, (coeff, tuple(monom), ncpart))) + + return result, gens + + def removeO(self): + """Removes the additive O(..) symbol if there is one""" + return self + + def getO(self): + """Returns the additive O(..) symbol if there is one, else None.""" + return None + + def getn(self): + """ + Returns the order of the expression. + + Explanation + =========== + + The order is determined either from the O(...) term. If there + is no O(...) term, it returns None. + + Examples + ======== + + >>> from sympy import O + >>> from sympy.abc import x + >>> (1 + x + O(x**2)).getn() + 2 + >>> (1 + x).getn() + + """ + o = self.getO() + if o is None: + return None + elif o.is_Order: + o = o.expr + if o is S.One: + return S.Zero + if o.is_Symbol: + return S.One + if o.is_Pow: + return o.args[1] + if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n + for oi in o.args: + if oi.is_Symbol: + return S.One + if oi.is_Pow: + from .symbol import Dummy, Symbol + syms = oi.atoms(Symbol) + if len(syms) == 1: + x = syms.pop() + oi = oi.subs(x, Dummy('x', positive=True)) + if oi.base.is_Symbol and oi.exp.is_Rational: + return abs(oi.exp) + + raise NotImplementedError('not sure of order of %s' % o) + + def count_ops(self, visual=None): + from .function import count_ops + return count_ops(self, visual) + + def args_cnc(self, cset=False, warn=True, split_1=True): + """Return [commutative factors, non-commutative factors] of self. + + Explanation + =========== + + self is treated as a Mul and the ordering of the factors is maintained. + If ``cset`` is True the commutative factors will be returned in a set. + If there were repeated factors (as may happen with an unevaluated Mul) + then an error will be raised unless it is explicitly suppressed by + setting ``warn`` to False. + + Note: -1 is always separated from a Number unless split_1 is False. + + Examples + ======== + + >>> from sympy import symbols, oo + >>> A, B = symbols('A B', commutative=0) + >>> x, y = symbols('x y') + >>> (-2*x*y).args_cnc() + [[-1, 2, x, y], []] + >>> (-2.5*x).args_cnc() + [[-1, 2.5, x], []] + >>> (-2*x*A*B*y).args_cnc() + [[-1, 2, x, y], [A, B]] + >>> (-2*x*A*B*y).args_cnc(split_1=False) + [[-2, x, y], [A, B]] + >>> (-2*x*y).args_cnc(cset=True) + [{-1, 2, x, y}, []] + + The arg is always treated as a Mul: + + >>> (-2 + x + A).args_cnc() + [[], [x - 2 + A]] + >>> (-oo).args_cnc() # -oo is a singleton + [[-1, oo], []] + """ + + if self.is_Mul: + args = list(self.args) + else: + args = [self] + for i, mi in enumerate(args): + if not mi.is_commutative: + c = args[:i] + nc = args[i:] + break + else: + c = args + nc = [] + + if c and split_1 and ( + c[0].is_Number and + c[0].is_extended_negative and + c[0] is not S.NegativeOne): + c[:1] = [S.NegativeOne, -c[0]] + + if cset: + clen = len(c) + c = set(c) + if clen and warn and len(c) != clen: + raise ValueError('repeated commutative arguments: %s' % + [ci for ci in c if list(self.args).count(ci) > 1]) + return [c, nc] + + def coeff(self, x, n=1, right=False, _first=True): + """ + Returns the coefficient from the term(s) containing ``x**n``. If ``n`` + is zero then all terms independent of ``x`` will be returned. + + Explanation + =========== + + When ``x`` is noncommutative, the coefficient to the left (default) or + right of ``x`` can be returned. The keyword 'right' is ignored when + ``x`` is commutative. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.abc import x, y, z + + You can select terms that have an explicit negative in front of them: + + >>> (-x + 2*y).coeff(-1) + x + >>> (x - 2*y).coeff(-1) + 2*y + + You can select terms with no Rational coefficient: + + >>> (x + 2*y).coeff(1) + x + >>> (3 + 2*x + 4*x**2).coeff(1) + 0 + + You can select terms independent of x by making n=0; in this case + expr.as_independent(x)[0] is returned (and 0 will be returned instead + of None): + + >>> (3 + 2*x + 4*x**2).coeff(x, 0) + 3 + >>> eq = ((x + 1)**3).expand() + 1 + >>> eq + x**3 + 3*x**2 + 3*x + 2 + >>> [eq.coeff(x, i) for i in reversed(range(4))] + [1, 3, 3, 2] + >>> eq -= 2 + >>> [eq.coeff(x, i) for i in reversed(range(4))] + [1, 3, 3, 0] + + You can select terms that have a numerical term in front of them: + + >>> (-x - 2*y).coeff(2) + -y + >>> from sympy import sqrt + >>> (x + sqrt(2)*x).coeff(sqrt(2)) + x + + The matching is exact: + + >>> (3 + 2*x + 4*x**2).coeff(x) + 2 + >>> (3 + 2*x + 4*x**2).coeff(x**2) + 4 + >>> (3 + 2*x + 4*x**2).coeff(x**3) + 0 + >>> (z*(x + y)**2).coeff((x + y)**2) + z + >>> (z*(x + y)**2).coeff(x + y) + 0 + + In addition, no factoring is done, so 1 + z*(1 + y) is not obtained + from the following: + + >>> (x + z*(x + x*y)).coeff(x) + 1 + + If such factoring is desired, factor_terms can be used first: + + >>> from sympy import factor_terms + >>> factor_terms(x + z*(x + x*y)).coeff(x) + z*(y + 1) + 1 + + >>> n, m, o = symbols('n m o', commutative=False) + >>> n.coeff(n) + 1 + >>> (3*n).coeff(n) + 3 + >>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m + 1 + m + >>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m + m + + If there is more than one possible coefficient 0 is returned: + + >>> (n*m + m*n).coeff(n) + 0 + + If there is only one possible coefficient, it is returned: + + >>> (n*m + x*m*n).coeff(m*n) + x + >>> (n*m + x*m*n).coeff(m*n, right=1) + 1 + + See Also + ======== + + as_coefficient: separate the expression into a coefficient and factor + as_coeff_Add: separate the additive constant from an expression + as_coeff_Mul: separate the multiplicative constant from an expression + as_independent: separate x-dependent terms/factors from others + sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly + sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used + """ + x = sympify(x) + if not isinstance(x, Basic): + return S.Zero + + n = as_int(n) + + if not x: + return S.Zero + + if x == self: + if n == 1: + return S.One + return S.Zero + + if x is S.One: + co = [a for a in Add.make_args(self) + if a.as_coeff_Mul()[0] is S.One] + if not co: + return S.Zero + return Add(*co) + + if n == 0: + if x.is_Add and self.is_Add: + c = self.coeff(x, right=right) + if not c: + return S.Zero + if not right: + return self - Add(*[a*x for a in Add.make_args(c)]) + return self - Add(*[x*a for a in Add.make_args(c)]) + return self.as_independent(x, as_Add=True)[0] + + # continue with the full method, looking for this power of x: + x = x**n + + def incommon(l1, l2): + if not l1 or not l2: + return [] + n = min(len(l1), len(l2)) + for i in range(n): + if l1[i] != l2[i]: + return l1[:i] + return l1[:] + + def find(l, sub, first=True): + """ Find where list sub appears in list l. When ``first`` is True + the first occurrence from the left is returned, else the last + occurrence is returned. Return None if sub is not in l. + + Examples + ======== + + >> l = range(5)*2 + >> find(l, [2, 3]) + 2 + >> find(l, [2, 3], first=0) + 7 + >> find(l, [2, 4]) + None + + """ + if not sub or not l or len(sub) > len(l): + return None + n = len(sub) + if not first: + l.reverse() + sub.reverse() + for i in range(len(l) - n + 1): + if all(l[i + j] == sub[j] for j in range(n)): + break + else: + i = None + if not first: + l.reverse() + sub.reverse() + if i is not None and not first: + i = len(l) - (i + n) + return i + + co = [] + args = Add.make_args(self) + self_c = self.is_commutative + x_c = x.is_commutative + if self_c and not x_c: + return S.Zero + if _first and self.is_Add and not self_c and not x_c: + # get the part that depends on x exactly + xargs = Mul.make_args(x) + d = Add(*[i for i in Add.make_args(self.as_independent(x)[1]) + if all(xi in Mul.make_args(i) for xi in xargs)]) + rv = d.coeff(x, right=right, _first=False) + if not rv.is_Add or not right: + return rv + c_part, nc_part = zip(*[i.args_cnc() for i in rv.args]) + if has_variety(c_part): + return rv + return Add(*[Mul._from_args(i) for i in nc_part]) + + one_c = self_c or x_c + xargs, nx = x.args_cnc(cset=True, warn=bool(not x_c)) + # find the parts that pass the commutative terms + for a in args: + margs, nc = a.args_cnc(cset=True, warn=bool(not self_c)) + if nc is None: + nc = [] + if len(xargs) > len(margs): + continue + resid = margs.difference(xargs) + if len(resid) + len(xargs) == len(margs): + if one_c: + co.append(Mul(*(list(resid) + nc))) + else: + co.append((resid, nc)) + if one_c: + if co == []: + return S.Zero + elif co: + return Add(*co) + else: # both nc + # now check the non-comm parts + if not co: + return S.Zero + if all(n == co[0][1] for r, n in co): + ii = find(co[0][1], nx, right) + if ii is not None: + if not right: + return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii])) + else: + return Mul(*co[0][1][ii + len(nx):]) + beg = reduce(incommon, (n[1] for n in co)) + if beg: + ii = find(beg, nx, right) + if ii is not None: + if not right: + gcdc = co[0][0] + for i in range(1, len(co)): + gcdc = gcdc.intersection(co[i][0]) + if not gcdc: + break + return Mul(*(list(gcdc) + beg[:ii])) + else: + m = ii + len(nx) + return Add(*[Mul(*(list(r) + n[m:])) for r, n in co]) + end = list(reversed( + reduce(incommon, (list(reversed(n[1])) for n in co)))) + if end: + ii = find(end, nx, right) + if ii is not None: + if not right: + return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co]) + else: + return Mul(*end[ii + len(nx):]) + # look for single match + hit = None + for i, (r, n) in enumerate(co): + ii = find(n, nx, right) + if ii is not None: + if not hit: + hit = ii, r, n + else: + break + else: + if hit: + ii, r, n = hit + if not right: + return Mul(*(list(r) + n[:ii])) + else: + return Mul(*n[ii + len(nx):]) + + return S.Zero + + def as_expr(self, *gens): + """ + Convert a polynomial to a SymPy expression. + + Examples + ======== + + >>> from sympy import sin + >>> from sympy.abc import x, y + + >>> f = (x**2 + x*y).as_poly(x, y) + >>> f.as_expr() + x**2 + x*y + + >>> sin(x).as_expr() + sin(x) + + """ + return self + + def as_coefficient(self, expr): + """ + Extracts symbolic coefficient at the given expression. In + other words, this functions separates 'self' into the product + of 'expr' and 'expr'-free coefficient. If such separation + is not possible it will return None. + + Examples + ======== + + >>> from sympy import E, pi, sin, I, Poly + >>> from sympy.abc import x + + >>> E.as_coefficient(E) + 1 + >>> (2*E).as_coefficient(E) + 2 + >>> (2*sin(E)*E).as_coefficient(E) + + Two terms have E in them so a sum is returned. (If one were + desiring the coefficient of the term exactly matching E then + the constant from the returned expression could be selected. + Or, for greater precision, a method of Poly can be used to + indicate the desired term from which the coefficient is + desired.) + + >>> (2*E + x*E).as_coefficient(E) + x + 2 + >>> _.args[0] # just want the exact match + 2 + >>> p = Poly(2*E + x*E); p + Poly(x*E + 2*E, x, E, domain='ZZ') + >>> p.coeff_monomial(E) + 2 + >>> p.nth(0, 1) + 2 + + Since the following cannot be written as a product containing + E as a factor, None is returned. (If the coefficient ``2*x`` is + desired then the ``coeff`` method should be used.) + + >>> (2*E*x + x).as_coefficient(E) + >>> (2*E*x + x).coeff(E) + 2*x + + >>> (E*(x + 1) + x).as_coefficient(E) + + >>> (2*pi*I).as_coefficient(pi*I) + 2 + >>> (2*I).as_coefficient(pi*I) + + See Also + ======== + + coeff: return sum of terms have a given factor + as_coeff_Add: separate the additive constant from an expression + as_coeff_Mul: separate the multiplicative constant from an expression + as_independent: separate x-dependent terms/factors from others + sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly + sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used + + + """ + + r = self.extract_multiplicatively(expr) + if r and not r.has(expr): + return r + + def as_independent(self, *deps, **hint) -> tuple[Expr, Expr]: + """ + A mostly naive separation of a Mul or Add into arguments that are not + are dependent on deps. To obtain as complete a separation of variables + as possible, use a separation method first, e.g.: + + * separatevars() to change Mul, Add and Pow (including exp) into Mul + * .expand(mul=True) to change Add or Mul into Add + * .expand(log=True) to change log expr into an Add + + The only non-naive thing that is done here is to respect noncommutative + ordering of variables and to always return (0, 0) for `self` of zero + regardless of hints. + + For nonzero `self`, the returned tuple (i, d) has the + following interpretation: + + * i will has no variable that appears in deps + * d will either have terms that contain variables that are in deps, or + be equal to 0 (when self is an Add) or 1 (when self is a Mul) + * if self is an Add then self = i + d + * if self is a Mul then self = i*d + * otherwise (self, S.One) or (S.One, self) is returned. + + To force the expression to be treated as an Add, use the hint as_Add=True + + Examples + ======== + + -- self is an Add + + >>> from sympy import sin, cos, exp + >>> from sympy.abc import x, y, z + + >>> (x + x*y).as_independent(x) + (0, x*y + x) + >>> (x + x*y).as_independent(y) + (x, x*y) + >>> (2*x*sin(x) + y + x + z).as_independent(x) + (y + z, 2*x*sin(x) + x) + >>> (2*x*sin(x) + y + x + z).as_independent(x, y) + (z, 2*x*sin(x) + x + y) + + -- self is a Mul + + >>> (x*sin(x)*cos(y)).as_independent(x) + (cos(y), x*sin(x)) + + non-commutative terms cannot always be separated out when self is a Mul + + >>> from sympy import symbols + >>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False) + >>> (n1 + n1*n2).as_independent(n2) + (n1, n1*n2) + >>> (n2*n1 + n1*n2).as_independent(n2) + (0, n1*n2 + n2*n1) + >>> (n1*n2*n3).as_independent(n1) + (1, n1*n2*n3) + >>> (n1*n2*n3).as_independent(n2) + (n1, n2*n3) + >>> ((x-n1)*(x-y)).as_independent(x) + (1, (x - y)*(x - n1)) + + -- self is anything else: + + >>> (sin(x)).as_independent(x) + (1, sin(x)) + >>> (sin(x)).as_independent(y) + (sin(x), 1) + >>> exp(x+y).as_independent(x) + (1, exp(x + y)) + + -- force self to be treated as an Add: + + >>> (3*x).as_independent(x, as_Add=True) + (0, 3*x) + + -- force self to be treated as a Mul: + + >>> (3+x).as_independent(x, as_Add=False) + (1, x + 3) + >>> (-3+x).as_independent(x, as_Add=False) + (1, x - 3) + + Note how the below differs from the above in making the + constant on the dep term positive. + + >>> (y*(-3+x)).as_independent(x) + (y, x - 3) + + -- use .as_independent() for true independence testing instead + of .has(). The former considers only symbols in the free + symbols while the latter considers all symbols + + >>> from sympy import Integral + >>> I = Integral(x, (x, 1, 2)) + >>> I.has(x) + True + >>> x in I.free_symbols + False + >>> I.as_independent(x) == (I, 1) + True + >>> (I + x).as_independent(x) == (I, x) + True + + Note: when trying to get independent terms, a separation method + might need to be used first. In this case, it is important to keep + track of what you send to this routine so you know how to interpret + the returned values + + >>> from sympy import separatevars, log + >>> separatevars(exp(x+y)).as_independent(x) + (exp(y), exp(x)) + >>> (x + x*y).as_independent(y) + (x, x*y) + >>> separatevars(x + x*y).as_independent(y) + (x, y + 1) + >>> (x*(1 + y)).as_independent(y) + (x, y + 1) + >>> (x*(1 + y)).expand(mul=True).as_independent(y) + (x, x*y) + >>> a, b=symbols('a b', positive=True) + >>> (log(a*b).expand(log=True)).as_independent(b) + (log(a), log(b)) + + See Also + ======== + + separatevars + expand_log + sympy.core.add.Add.as_two_terms + sympy.core.mul.Mul.as_two_terms + as_coeff_mul + """ + from .symbol import Symbol + from .add import _unevaluated_Add + from .mul import _unevaluated_Mul + + if self is S.Zero: + return (self, self) + + func = self.func + if hint.get('as_Add', isinstance(self, Add) ): + want = Add + else: + want = Mul + + # sift out deps into symbolic and other and ignore + # all symbols but those that are in the free symbols + sym = set() + other = [] + for d in deps: + if isinstance(d, Symbol): # Symbol.is_Symbol is True + sym.add(d) + else: + other.append(d) + + def has(e): + """return the standard has() if there are no literal symbols, else + check to see that symbol-deps are in the free symbols.""" + has_other = e.has(*other) + if not sym: + return has_other + return has_other or e.has(*(e.free_symbols & sym)) + + if (want is not func or + func is not Add and func is not Mul): + if has(self): + return (want.identity, self) + else: + return (self, want.identity) + else: + if func is Add: + args = list(self.args) + else: + args, nc = self.args_cnc() + + d = sift(args, has) + depend = d[True] + indep = d[False] + if func is Add: # all terms were treated as commutative + return (Add(*indep), _unevaluated_Add(*depend)) + else: # handle noncommutative by stopping at first dependent term + for i, n in enumerate(nc): + if has(n): + depend.extend(nc[i:]) + break + indep.append(n) + return Mul(*indep), ( + Mul(*depend, evaluate=False) if nc else + _unevaluated_Mul(*depend)) + + def as_real_imag(self, deep=True, **hints): + """Performs complex expansion on 'self' and returns a tuple + containing collected both real and imaginary parts. This + method cannot be confused with re() and im() functions, + which does not perform complex expansion at evaluation. + + However it is possible to expand both re() and im() + functions and get exactly the same results as with + a single call to this function. + + >>> from sympy import symbols, I + + >>> x, y = symbols('x,y', real=True) + + >>> (x + y*I).as_real_imag() + (x, y) + + >>> from sympy.abc import z, w + + >>> (z + w*I).as_real_imag() + (re(z) - im(w), re(w) + im(z)) + + """ + if hints.get('ignore') == self: + return None + else: + from sympy.functions.elementary.complexes import im, re + return (re(self), im(self)) + + def as_powers_dict(self): + """Return self as a dictionary of factors with each factor being + treated as a power. The keys are the bases of the factors and the + values, the corresponding exponents. The resulting dictionary should + be used with caution if the expression is a Mul and contains non- + commutative factors since the order that they appeared will be lost in + the dictionary. + + See Also + ======== + as_ordered_factors: An alternative for noncommutative applications, + returning an ordered list of factors. + args_cnc: Similar to as_ordered_factors, but guarantees separation + of commutative and noncommutative factors. + """ + d = defaultdict(int) + d.update([self.as_base_exp()]) + return d + + def as_coefficients_dict(self, *syms): + """Return a dictionary mapping terms to their Rational coefficient. + Since the dictionary is a defaultdict, inquiries about terms which + were not present will return a coefficient of 0. + + If symbols ``syms`` are provided, any multiplicative terms + independent of them will be considered a coefficient and a + regular dictionary of syms-dependent generators as keys and + their corresponding coefficients as values will be returned. + + Examples + ======== + + >>> from sympy.abc import a, x, y + >>> (3*x + a*x + 4).as_coefficients_dict() + {1: 4, x: 3, a*x: 1} + >>> _[a] + 0 + >>> (3*a*x).as_coefficients_dict() + {a*x: 3} + >>> (3*a*x).as_coefficients_dict(x) + {x: 3*a} + >>> (3*a*x).as_coefficients_dict(y) + {1: 3*a*x} + + """ + d = defaultdict(list) + if not syms: + for ai in Add.make_args(self): + c, m = ai.as_coeff_Mul() + d[m].append(c) + for k, v in d.items(): + if len(v) == 1: + d[k] = v[0] + else: + d[k] = Add(*v) + else: + ind, dep = self.as_independent(*syms, as_Add=True) + for i in Add.make_args(dep): + if i.is_Mul: + c, x = i.as_coeff_mul(*syms) + if c is S.One: + d[i].append(c) + else: + d[i._new_rawargs(*x)].append(c) + elif i: + d[i].append(S.One) + d = {k: Add(*d[k]) for k in d} + if ind is not S.Zero: + d.update({S.One: ind}) + di = defaultdict(int) + di.update(d) + return di + + def as_base_exp(self) -> tuple[Expr, Expr]: + # a -> b ** e + return self, S.One + + def as_coeff_mul(self, *deps, **kwargs) -> tuple[Expr, tuple[Expr, ...]]: + """Return the tuple (c, args) where self is written as a Mul, ``m``. + + c should be a Rational multiplied by any factors of the Mul that are + independent of deps. + + args should be a tuple of all other factors of m; args is empty + if self is a Number or if self is independent of deps (when given). + + This should be used when you do not know if self is a Mul or not but + you want to treat self as a Mul or if you want to process the + individual arguments of the tail of self as a Mul. + + - if you know self is a Mul and want only the head, use self.args[0]; + - if you do not want to process the arguments of the tail but need the + tail then use self.as_two_terms() which gives the head and tail; + - if you want to split self into an independent and dependent parts + use ``self.as_independent(*deps)`` + + >>> from sympy import S + >>> from sympy.abc import x, y + >>> (S(3)).as_coeff_mul() + (3, ()) + >>> (3*x*y).as_coeff_mul() + (3, (x, y)) + >>> (3*x*y).as_coeff_mul(x) + (3*y, (x,)) + >>> (3*y).as_coeff_mul(x) + (3*y, ()) + """ + if deps: + if not self.has(*deps): + return self, () + return S.One, (self,) + + def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]: + """Return the tuple (c, args) where self is written as an Add, ``a``. + + c should be a Rational added to any terms of the Add that are + independent of deps. + + args should be a tuple of all other terms of ``a``; args is empty + if self is a Number or if self is independent of deps (when given). + + This should be used when you do not know if self is an Add or not but + you want to treat self as an Add or if you want to process the + individual arguments of the tail of self as an Add. + + - if you know self is an Add and want only the head, use self.args[0]; + - if you do not want to process the arguments of the tail but need the + tail then use self.as_two_terms() which gives the head and tail. + - if you want to split self into an independent and dependent parts + use ``self.as_independent(*deps)`` + + >>> from sympy import S + >>> from sympy.abc import x, y + >>> (S(3)).as_coeff_add() + (3, ()) + >>> (3 + x).as_coeff_add() + (3, (x,)) + >>> (3 + x + y).as_coeff_add(x) + (y + 3, (x,)) + >>> (3 + y).as_coeff_add(x) + (y + 3, ()) + + """ + if deps: + if not self.has_free(*deps): + return self, () + return S.Zero, (self,) + + def primitive(self): + """Return the positive Rational that can be extracted non-recursively + from every term of self (i.e., self is treated like an Add). This is + like the as_coeff_Mul() method but primitive always extracts a positive + Rational (never a negative or a Float). + + Examples + ======== + + >>> from sympy.abc import x + >>> (3*(x + 1)**2).primitive() + (3, (x + 1)**2) + >>> a = (6*x + 2); a.primitive() + (2, 3*x + 1) + >>> b = (x/2 + 3); b.primitive() + (1/2, x + 6) + >>> (a*b).primitive() == (1, a*b) + True + """ + if not self: + return S.One, S.Zero + c, r = self.as_coeff_Mul(rational=True) + if c.is_negative: + c, r = -c, -r + return c, r + + def as_content_primitive(self, radical=False, clear=True): + """This method should recursively remove a Rational from all arguments + and return that (content) and the new self (primitive). The content + should always be positive and ``Mul(*foo.as_content_primitive()) == foo``. + The primitive need not be in canonical form and should try to preserve + the underlying structure if possible (i.e. expand_mul should not be + applied to self). + + Examples + ======== + + >>> from sympy import sqrt + >>> from sympy.abc import x, y, z + + >>> eq = 2 + 2*x + 2*y*(3 + 3*y) + + The as_content_primitive function is recursive and retains structure: + + >>> eq.as_content_primitive() + (2, x + 3*y*(y + 1) + 1) + + Integer powers will have Rationals extracted from the base: + + >>> ((2 + 6*x)**2).as_content_primitive() + (4, (3*x + 1)**2) + >>> ((2 + 6*x)**(2*y)).as_content_primitive() + (1, (2*(3*x + 1))**(2*y)) + + Terms may end up joining once their as_content_primitives are added: + + >>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() + (11, x*(y + 1)) + >>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() + (9, x*(y + 1)) + >>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive() + (1, 6.0*x*(y + 1) + 3*z*(y + 1)) + >>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive() + (121, x**2*(y + 1)**2) + >>> ((x*(1 + y) + 0.4*x*(3 + 3*y))**2).as_content_primitive() + (1, 4.84*x**2*(y + 1)**2) + + Radical content can also be factored out of the primitive: + + >>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True) + (2, sqrt(2)*(1 + 2*sqrt(5))) + + If clear=False (default is True) then content will not be removed + from an Add if it can be distributed to leave one or more + terms with integer coefficients. + + >>> (x/2 + y).as_content_primitive() + (1/2, x + 2*y) + >>> (x/2 + y).as_content_primitive(clear=False) + (1, x/2 + y) + """ + return S.One, self + + def as_numer_denom(self): + """Return the numerator and the denominator of an expression. + + expression -> a/b -> a, b + + This is just a stub that should be defined by + an object's class methods to get anything else. + + See Also + ======== + + normal: return ``a/b`` instead of ``(a, b)`` + + """ + return self, S.One + + def normal(self): + """Return the expression as a fraction. + + expression -> a/b + + See Also + ======== + + as_numer_denom: return ``(a, b)`` instead of ``a/b`` + + """ + from .mul import _unevaluated_Mul + n, d = self.as_numer_denom() + if d is S.One: + return n + if d.is_Number: + return _unevaluated_Mul(n, 1/d) + else: + return n/d + + def extract_multiplicatively(self, c): + """Return None if it's not possible to make self in the form + c * something in a nice way, i.e. preserving the properties + of arguments of self. + + Examples + ======== + + >>> from sympy import symbols, Rational + + >>> x, y = symbols('x,y', real=True) + + >>> ((x*y)**3).extract_multiplicatively(x**2 * y) + x*y**2 + + >>> ((x*y)**3).extract_multiplicatively(x**4 * y) + + >>> (2*x).extract_multiplicatively(2) + x + + >>> (2*x).extract_multiplicatively(3) + + >>> (Rational(1, 2)*x).extract_multiplicatively(3) + x/6 + + """ + from sympy.functions.elementary.exponential import exp + from .add import _unevaluated_Add + c = sympify(c) + if self is S.NaN: + return None + if c is S.One: + return self + elif c == self: + return S.One + + if c.is_Add: + cc, pc = c.primitive() + if cc is not S.One: + c = Mul(cc, pc, evaluate=False) + + if c.is_Mul: + a, b = c.as_two_terms() + x = self.extract_multiplicatively(a) + if x is not None: + return x.extract_multiplicatively(b) + else: + return x + + quotient = self / c + if self.is_Number: + if self is S.Infinity: + if c.is_positive: + return S.Infinity + elif self is S.NegativeInfinity: + if c.is_negative: + return S.Infinity + elif c.is_positive: + return S.NegativeInfinity + elif self is S.ComplexInfinity: + if not c.is_zero: + return S.ComplexInfinity + elif self.is_Integer: + if not quotient.is_Integer: + return None + elif self.is_positive and quotient.is_negative: + return None + else: + return quotient + elif self.is_Rational: + if not quotient.is_Rational: + return None + elif self.is_positive and quotient.is_negative: + return None + else: + return quotient + elif self.is_Float: + if not quotient.is_Float: + return None + elif self.is_positive and quotient.is_negative: + return None + else: + return quotient + elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit: + if quotient.is_Mul and len(quotient.args) == 2: + if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self: + return quotient + elif quotient.is_Integer and c.is_Number: + return quotient + elif self.is_Add: + cs, ps = self.primitive() + # assert cs >= 1 + if c.is_Number and c is not S.NegativeOne: + # assert c != 1 (handled at top) + if cs is not S.One: + if c.is_negative: + xc = -(cs.extract_multiplicatively(-c)) + else: + xc = cs.extract_multiplicatively(c) + if xc is not None: + return xc*ps # rely on 2-arg Mul to restore Add + return # |c| != 1 can only be extracted from cs + if c == ps: + return cs + # check args of ps + newargs = [] + for arg in ps.args: + newarg = arg.extract_multiplicatively(c) + if newarg is None: + return # all or nothing + newargs.append(newarg) + if cs is not S.One: + args = [cs*t for t in newargs] + # args may be in different order + return _unevaluated_Add(*args) + else: + return Add._from_args(newargs) + elif self.is_Mul: + args = list(self.args) + for i, arg in enumerate(args): + newarg = arg.extract_multiplicatively(c) + if newarg is not None: + args[i] = newarg + return Mul(*args) + elif self.is_Pow or isinstance(self, exp): + sb, se = self.as_base_exp() + cb, ce = c.as_base_exp() + if cb == sb: + new_exp = se.extract_additively(ce) + if new_exp is not None: + return Pow(sb, new_exp) + elif c == sb: + new_exp = self.exp.extract_additively(1) + if new_exp is not None: + return Pow(sb, new_exp) + + def extract_additively(self, c): + """Return self - c if it's possible to subtract c from self and + make all matching coefficients move towards zero, else return None. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> e = 2*x + 3 + >>> e.extract_additively(x + 1) + x + 2 + >>> e.extract_additively(3*x) + >>> e.extract_additively(4) + >>> (y*(x + 1)).extract_additively(x + 1) + >>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1) + (x + 1)*(x + 2*y) + 3 + + See Also + ======== + extract_multiplicatively + coeff + as_coefficient + + """ + + c = sympify(c) + if self is S.NaN: + return None + if c.is_zero: + return self + elif c == self: + return S.Zero + elif self == S.Zero: + return None + + if self.is_Number: + if not c.is_Number: + return None + co = self + diff = co - c + # XXX should we match types? i.e should 3 - .1 succeed? + if (co > 0 and diff >= 0 and diff < co or + co < 0 and diff <= 0 and diff > co): + return diff + return None + + if c.is_Number: + co, t = self.as_coeff_Add() + xa = co.extract_additively(c) + if xa is None: + return None + return xa + t + + # handle the args[0].is_Number case separately + # since we will have trouble looking for the coeff of + # a number. + if c.is_Add and c.args[0].is_Number: + # whole term as a term factor + co = self.coeff(c) + xa0 = (co.extract_additively(1) or 0)*c + if xa0: + diff = self - co*c + return (xa0 + (diff.extract_additively(c) or diff)) or None + # term-wise + h, t = c.as_coeff_Add() + sh, st = self.as_coeff_Add() + xa = sh.extract_additively(h) + if xa is None: + return None + xa2 = st.extract_additively(t) + if xa2 is None: + return None + return xa + xa2 + + # whole term as a term factor + co, diff = _corem(self, c) + xa0 = (co.extract_additively(1) or 0)*c + if xa0: + return (xa0 + (diff.extract_additively(c) or diff)) or None + # term-wise + coeffs = [] + for a in Add.make_args(c): + ac, at = a.as_coeff_Mul() + co = self.coeff(at) + if not co: + return None + coc, cot = co.as_coeff_Add() + xa = coc.extract_additively(ac) + if xa is None: + return None + self -= co*at + coeffs.append((cot + xa)*at) + coeffs.append(self) + return Add(*coeffs) + + @property + def expr_free_symbols(self): + """ + Like ``free_symbols``, but returns the free symbols only if + they are contained in an expression node. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> (x + y).expr_free_symbols # doctest: +SKIP + {x, y} + + If the expression is contained in a non-expression object, do not return + the free symbols. Compare: + + >>> from sympy import Tuple + >>> t = Tuple(x + y) + >>> t.expr_free_symbols # doctest: +SKIP + set() + >>> t.free_symbols + {x, y} + """ + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + return {j for i in self.args for j in i.expr_free_symbols} + + def could_extract_minus_sign(self): + """Return True if self has -1 as a leading factor or has + more literal negative signs than positive signs in a sum, + otherwise False. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> e = x - y + >>> {i.could_extract_minus_sign() for i in (e, -e)} + {False, True} + + Though the ``y - x`` is considered like ``-(x - y)``, since it + is in a product without a leading factor of -1, the result is + false below: + + >>> (x*(y - x)).could_extract_minus_sign() + False + + To put something in canonical form wrt to sign, use `signsimp`: + + >>> from sympy import signsimp + >>> signsimp(x*(y - x)) + -x*(x - y) + >>> _.could_extract_minus_sign() + True + """ + return False + + def extract_branch_factor(self, allow_half=False): + """ + Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way. + Return (z, n). + + >>> from sympy import exp_polar, I, pi + >>> from sympy.abc import x, y + >>> exp_polar(I*pi).extract_branch_factor() + (exp_polar(I*pi), 0) + >>> exp_polar(2*I*pi).extract_branch_factor() + (1, 1) + >>> exp_polar(-pi*I).extract_branch_factor() + (exp_polar(I*pi), -1) + >>> exp_polar(3*pi*I + x).extract_branch_factor() + (exp_polar(x + I*pi), 1) + >>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor() + (y*exp_polar(2*pi*x), -1) + >>> exp_polar(-I*pi/2).extract_branch_factor() + (exp_polar(-I*pi/2), 0) + + If allow_half is True, also extract exp_polar(I*pi): + + >>> exp_polar(I*pi).extract_branch_factor(allow_half=True) + (1, 1/2) + >>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True) + (1, 1) + >>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True) + (1, 3/2) + >>> exp_polar(-I*pi).extract_branch_factor(allow_half=True) + (1, -1/2) + """ + from sympy.functions.elementary.exponential import exp_polar + from sympy.functions.elementary.integers import ceiling + + n = S.Zero + res = S.One + args = Mul.make_args(self) + exps = [] + for arg in args: + if isinstance(arg, exp_polar): + exps += [arg.exp] + else: + res *= arg + piimult = S.Zero + extras = [] + + ipi = S.Pi*S.ImaginaryUnit + while exps: + exp = exps.pop() + if exp.is_Add: + exps += exp.args + continue + if exp.is_Mul: + coeff = exp.as_coefficient(ipi) + if coeff is not None: + piimult += coeff + continue + extras += [exp] + if piimult.is_number: + coeff = piimult + tail = () + else: + coeff, tail = piimult.as_coeff_add(*piimult.free_symbols) + # round down to nearest multiple of 2 + branchfact = ceiling(coeff/2 - S.Half)*2 + n += branchfact/2 + c = coeff - branchfact + if allow_half: + nc = c.extract_additively(1) + if nc is not None: + n += S.Half + c = nc + newexp = ipi*Add(*((c, ) + tail)) + Add(*extras) + if newexp != 0: + res *= exp_polar(newexp) + return res, n + + def is_polynomial(self, *syms): + r""" + Return True if self is a polynomial in syms and False otherwise. + + This checks if self is an exact polynomial in syms. This function + returns False for expressions that are "polynomials" with symbolic + exponents. Thus, you should be able to apply polynomial algorithms to + expressions for which this returns True, and Poly(expr, \*syms) should + work if and only if expr.is_polynomial(\*syms) returns True. The + polynomial does not have to be in expanded form. If no symbols are + given, all free symbols in the expression will be used. + + This is not part of the assumptions system. You cannot do + Symbol('z', polynomial=True). + + Examples + ======== + + >>> from sympy import Symbol, Function + >>> x = Symbol('x') + >>> ((x**2 + 1)**4).is_polynomial(x) + True + >>> ((x**2 + 1)**4).is_polynomial() + True + >>> (2**x + 1).is_polynomial(x) + False + >>> (2**x + 1).is_polynomial(2**x) + True + >>> f = Function('f') + >>> (f(x) + 1).is_polynomial(x) + False + >>> (f(x) + 1).is_polynomial(f(x)) + True + >>> (1/f(x) + 1).is_polynomial(f(x)) + False + + >>> n = Symbol('n', nonnegative=True, integer=True) + >>> (x**n + 1).is_polynomial(x) + False + + This function does not attempt any nontrivial simplifications that may + result in an expression that does not appear to be a polynomial to + become one. + + >>> from sympy import sqrt, factor, cancel + >>> y = Symbol('y', positive=True) + >>> a = sqrt(y**2 + 2*y + 1) + >>> a.is_polynomial(y) + False + >>> factor(a) + y + 1 + >>> factor(a).is_polynomial(y) + True + + >>> b = (y**2 + 2*y + 1)/(y + 1) + >>> b.is_polynomial(y) + False + >>> cancel(b) + y + 1 + >>> cancel(b).is_polynomial(y) + True + + See also .is_rational_function() + + """ + if syms: + syms = set(map(sympify, syms)) + else: + syms = self.free_symbols + if not syms: + return True + + return self._eval_is_polynomial(syms) + + def _eval_is_polynomial(self, syms): + if self in syms: + return True + if not self.has_free(*syms): + # constant polynomial + return True + # subclasses should return True or False + + def is_rational_function(self, *syms): + """ + Test whether function is a ratio of two polynomials in the given + symbols, syms. When syms is not given, all free symbols will be used. + The rational function does not have to be in expanded or in any kind of + canonical form. + + This function returns False for expressions that are "rational + functions" with symbolic exponents. Thus, you should be able to call + .as_numer_denom() and apply polynomial algorithms to the result for + expressions for which this returns True. + + This is not part of the assumptions system. You cannot do + Symbol('z', rational_function=True). + + Examples + ======== + + >>> from sympy import Symbol, sin + >>> from sympy.abc import x, y + + >>> (x/y).is_rational_function() + True + + >>> (x**2).is_rational_function() + True + + >>> (x/sin(y)).is_rational_function(y) + False + + >>> n = Symbol('n', integer=True) + >>> (x**n + 1).is_rational_function(x) + False + + This function does not attempt any nontrivial simplifications that may + result in an expression that does not appear to be a rational function + to become one. + + >>> from sympy import sqrt, factor + >>> y = Symbol('y', positive=True) + >>> a = sqrt(y**2 + 2*y + 1)/y + >>> a.is_rational_function(y) + False + >>> factor(a) + (y + 1)/y + >>> factor(a).is_rational_function(y) + True + + See also is_algebraic_expr(). + + """ + if syms: + syms = set(map(sympify, syms)) + else: + syms = self.free_symbols + if not syms: + return self not in _illegal + + return self._eval_is_rational_function(syms) + + def _eval_is_rational_function(self, syms): + if self in syms: + return True + if not self.has_xfree(syms): + return True + # subclasses should return True or False + + def is_meromorphic(self, x, a): + """ + This tests whether an expression is meromorphic as + a function of the given symbol ``x`` at the point ``a``. + + This method is intended as a quick test that will return + None if no decision can be made without simplification or + more detailed analysis. + + Examples + ======== + + >>> from sympy import zoo, log, sin, sqrt + >>> from sympy.abc import x + + >>> f = 1/x**2 + 1 - 2*x**3 + >>> f.is_meromorphic(x, 0) + True + >>> f.is_meromorphic(x, 1) + True + >>> f.is_meromorphic(x, zoo) + True + + >>> g = x**log(3) + >>> g.is_meromorphic(x, 0) + False + >>> g.is_meromorphic(x, 1) + True + >>> g.is_meromorphic(x, zoo) + False + + >>> h = sin(1/x)*x**2 + >>> h.is_meromorphic(x, 0) + False + >>> h.is_meromorphic(x, 1) + True + >>> h.is_meromorphic(x, zoo) + True + + Multivalued functions are considered meromorphic when their + branches are meromorphic. Thus most functions are meromorphic + everywhere except at essential singularities and branch points. + In particular, they will be meromorphic also on branch cuts + except at their endpoints. + + >>> log(x).is_meromorphic(x, -1) + True + >>> log(x).is_meromorphic(x, 0) + False + >>> sqrt(x).is_meromorphic(x, -1) + True + >>> sqrt(x).is_meromorphic(x, 0) + False + + """ + if not x.is_symbol: + raise TypeError("{} should be of symbol type".format(x)) + a = sympify(a) + + return self._eval_is_meromorphic(x, a) + + def _eval_is_meromorphic(self, x, a): + if self == x: + return True + if not self.has_free(x): + return True + # subclasses should return True or False + + def is_algebraic_expr(self, *syms): + """ + This tests whether a given expression is algebraic or not, in the + given symbols, syms. When syms is not given, all free symbols + will be used. The rational function does not have to be in expanded + or in any kind of canonical form. + + This function returns False for expressions that are "algebraic + expressions" with symbolic exponents. This is a simple extension to the + is_rational_function, including rational exponentiation. + + Examples + ======== + + >>> from sympy import Symbol, sqrt + >>> x = Symbol('x', real=True) + >>> sqrt(1 + x).is_rational_function() + False + >>> sqrt(1 + x).is_algebraic_expr() + True + + This function does not attempt any nontrivial simplifications that may + result in an expression that does not appear to be an algebraic + expression to become one. + + >>> from sympy import exp, factor + >>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1) + >>> a.is_algebraic_expr(x) + False + >>> factor(a).is_algebraic_expr() + True + + See Also + ======== + + is_rational_function + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Algebraic_expression + + """ + if syms: + syms = set(map(sympify, syms)) + else: + syms = self.free_symbols + if not syms: + return True + + return self._eval_is_algebraic_expr(syms) + + def _eval_is_algebraic_expr(self, syms): + if self in syms: + return True + if not self.has_free(*syms): + return True + # subclasses should return True or False + + ################################################################################### + ##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ################## + ################################################################################### + + def series(self, x=None, x0=0, n=6, dir="+", logx=None, cdir=0): + """ + Series expansion of "self" around ``x = x0`` yielding either terms of + the series one by one (the lazy series given when n=None), else + all the terms at once when n != None. + + Returns the series expansion of "self" around the point ``x = x0`` + with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6). + + If ``x=None`` and ``self`` is univariate, the univariate symbol will + be supplied, otherwise an error will be raised. + + Parameters + ========== + + expr : Expression + The expression whose series is to be expanded. + + x : Symbol + It is the variable of the expression to be calculated. + + x0 : Value + The value around which ``x`` is calculated. Can be any value + from ``-oo`` to ``oo``. + + n : Value + The value used to represent the order in terms of ``x**n``, + up to which the series is to be expanded. + + dir : String, optional + The series-expansion can be bi-directional. If ``dir="+"``, + then (x->x0+). If ``dir="-", then (x->x0-). For infinite + ``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined + from the direction of the infinity (i.e., ``dir="-"`` for + ``oo``). + + logx : optional + It is used to replace any log(x) in the returned series with a + symbolic value rather than evaluating the actual value. + + cdir : optional + It stands for complex direction, and indicates the direction + from which the expansion needs to be evaluated. + + Examples + ======== + + >>> from sympy import cos, exp, tan + >>> from sympy.abc import x, y + >>> cos(x).series() + 1 - x**2/2 + x**4/24 + O(x**6) + >>> cos(x).series(n=4) + 1 - x**2/2 + O(x**4) + >>> cos(x).series(x, x0=1, n=2) + cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1)) + >>> e = cos(x + exp(y)) + >>> e.series(y, n=2) + cos(x + 1) - y*sin(x + 1) + O(y**2) + >>> e.series(x, n=2) + cos(exp(y)) - x*sin(exp(y)) + O(x**2) + + If ``n=None`` then a generator of the series terms will be returned. + + >>> term=cos(x).series(n=None) + >>> [next(term) for i in range(2)] + [1, -x**2/2] + + For ``dir=+`` (default) the series is calculated from the right and + for ``dir=-`` the series from the left. For smooth functions this + flag will not alter the results. + + >>> abs(x).series(dir="+") + x + >>> abs(x).series(dir="-") + -x + >>> f = tan(x) + >>> f.series(x, 2, 6, "+") + tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) + + (x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 + + 5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 + + 2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2)) + + >>> f.series(x, 2, 3, "-") + tan(2) + (2 - x)*(-tan(2)**2 - 1) + (2 - x)**2*(tan(2)**3 + tan(2)) + + O((x - 2)**3, (x, 2)) + + For rational expressions this method may return original expression without the Order term. + >>> (1/x).series(x, n=8) + 1/x + + Returns + ======= + + Expr : Expression + Series expansion of the expression about x0 + + Raises + ====== + + TypeError + If "n" and "x0" are infinity objects + + PoleError + If "x0" is an infinity object + + """ + if x is None: + syms = self.free_symbols + if not syms: + return self + elif len(syms) > 1: + raise ValueError('x must be given for multivariate functions.') + x = syms.pop() + + from .symbol import Dummy, Symbol + if isinstance(x, Symbol): + dep = x in self.free_symbols + else: + d = Dummy() + dep = d in self.xreplace({x: d}).free_symbols + if not dep: + if n is None: + return (s for s in [self]) + else: + return self + + if len(dir) != 1 or dir not in '+-': + raise ValueError("Dir must be '+' or '-'") + + x0 = sympify(x0) + cdir = sympify(cdir) + from sympy.functions.elementary.complexes import im, sign + + if not cdir.is_zero: + if cdir.is_real: + dir = '+' if cdir.is_positive else '-' + else: + dir = '+' if im(cdir).is_positive else '-' + else: + if x0 and x0.is_infinite: + cdir = sign(x0).simplify() + elif str(dir) == "+": + cdir = S.One + elif str(dir) == "-": + cdir = S.NegativeOne + elif cdir == S.Zero: + cdir = S.One + + cdir = cdir/abs(cdir) + + if x0 and x0.is_infinite: + from .function import PoleError + try: + s = self.subs(x, cdir/x).series(x, n=n, dir='+', cdir=1) + if n is None: + return (si.subs(x, cdir/x) for si in s) + return s.subs(x, cdir/x) + except PoleError: + s = self.subs(x, cdir*x).aseries(x, n=n) + return s.subs(x, cdir*x) + + # use rep to shift origin to x0 and change sign (if dir is negative) + # and undo the process with rep2 + if x0 or cdir != 1: + s = self.subs({x: x0 + cdir*x}).series(x, x0=0, n=n, dir='+', logx=logx, cdir=1) + if n is None: # lseries... + return (si.subs({x: x/cdir - x0/cdir}) for si in s) + return s.subs({x: x/cdir - x0/cdir}) + + # from here on it's x0=0 and dir='+' handling + + if x.is_positive is x.is_negative is None or x.is_Symbol is not True: + # replace x with an x that has a positive assumption + xpos = Dummy('x', positive=True) + rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx, cdir=cdir) + if n is None: + return (s.subs(xpos, x) for s in rv) + else: + return rv.subs(xpos, x) + + from sympy.series.order import Order + if n is not None: # nseries handling + s1 = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) + o = s1.getO() or S.Zero + if o: + # make sure the requested order is returned + ngot = o.getn() + if ngot > n: + # leave o in its current form (e.g. with x*log(x)) so + # it eats terms properly, then replace it below + if n != 0: + s1 += o.subs(x, x**Rational(n, ngot)) + else: + s1 += Order(1, x) + elif ngot < n: + # increase the requested number of terms to get the desired + # number keep increasing (up to 9) until the received order + # is different than the original order and then predict how + # many additional terms are needed + from sympy.functions.elementary.integers import ceiling + for more in range(1, 9): + s1 = self._eval_nseries(x, n=n + more, logx=logx, cdir=cdir) + newn = s1.getn() + if newn != ngot: + ndo = n + ceiling((n - ngot)*more/(newn - ngot)) + s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir) + while s1.getn() < n: + s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir) + ndo += 1 + break + else: + raise ValueError('Could not calculate %s terms for %s' + % (str(n), self)) + s1 += Order(x**n, x) + o = s1.getO() + s1 = s1.removeO() + elif s1.has(Order): + # asymptotic expansion + return s1 + else: + o = Order(x**n, x) + s1done = s1.doit() + try: + if (s1done + o).removeO() == s1done: + o = S.Zero + except NotImplementedError: + return s1 + + try: + from sympy.simplify.radsimp import collect + return collect(s1, x) + o + except NotImplementedError: + return s1 + o + + else: # lseries handling + def yield_lseries(s): + """Return terms of lseries one at a time.""" + for si in s: + if not si.is_Add: + yield si + continue + # yield terms 1 at a time if possible + # by increasing order until all the + # terms have been returned + yielded = 0 + o = Order(si, x)*x + ndid = 0 + ndo = len(si.args) + while 1: + do = (si - yielded + o).removeO() + o *= x + if not do or do.is_Order: + continue + if do.is_Add: + ndid += len(do.args) + else: + ndid += 1 + yield do + if ndid == ndo: + break + yielded += do + + return yield_lseries(self.removeO()._eval_lseries(x, logx=logx, cdir=cdir)) + + def aseries(self, x=None, n=6, bound=0, hir=False): + """Asymptotic Series expansion of self. + This is equivalent to ``self.series(x, oo, n)``. + + Parameters + ========== + + self : Expression + The expression whose series is to be expanded. + + x : Symbol + It is the variable of the expression to be calculated. + + n : Value + The value used to represent the order in terms of ``x**n``, + up to which the series is to be expanded. + + hir : Boolean + Set this parameter to be True to produce hierarchical series. + It stops the recursion at an early level and may provide nicer + and more useful results. + + bound : Value, Integer + Use the ``bound`` parameter to give limit on rewriting + coefficients in its normalised form. + + Examples + ======== + + >>> from sympy import sin, exp + >>> from sympy.abc import x + + >>> e = sin(1/x + exp(-x)) - sin(1/x) + + >>> e.aseries(x) + (1/(24*x**4) - 1/(2*x**2) + 1 + O(x**(-6), (x, oo)))*exp(-x) + + >>> e.aseries(x, n=3, hir=True) + -exp(-2*x)*sin(1/x)/2 + exp(-x)*cos(1/x) + O(exp(-3*x), (x, oo)) + + >>> e = exp(exp(x)/(1 - 1/x)) + + >>> e.aseries(x) + exp(exp(x)/(1 - 1/x)) + + >>> e.aseries(x, bound=3) # doctest: +SKIP + exp(exp(x)/x**2)*exp(exp(x)/x)*exp(-exp(x) + exp(x)/(1 - 1/x) - exp(x)/x - exp(x)/x**2)*exp(exp(x)) + + For rational expressions this method may return original expression without the Order term. + >>> (1/x).aseries(x, n=8) + 1/x + + Returns + ======= + + Expr + Asymptotic series expansion of the expression. + + Notes + ===== + + This algorithm is directly induced from the limit computational algorithm provided by Gruntz. + It majorly uses the mrv and rewrite sub-routines. The overall idea of this algorithm is first + to look for the most rapidly varying subexpression w of a given expression f and then expands f + in a series in w. Then same thing is recursively done on the leading coefficient + till we get constant coefficients. + + If the most rapidly varying subexpression of a given expression f is f itself, + the algorithm tries to find a normalised representation of the mrv set and rewrites f + using this normalised representation. + + If the expansion contains an order term, it will be either ``O(x ** (-n))`` or ``O(w ** (-n))`` + where ``w`` belongs to the most rapidly varying expression of ``self``. + + References + ========== + + .. [1] Gruntz, Dominik. A new algorithm for computing asymptotic series. + In: Proc. 1993 Int. Symp. Symbolic and Algebraic Computation. 1993. + pp. 239-244. + .. [2] Gruntz thesis - p90 + .. [3] https://en.wikipedia.org/wiki/Asymptotic_expansion + + See Also + ======== + + Expr.aseries: See the docstring of this function for complete details of this wrapper. + """ + + from .symbol import Dummy + + if x.is_positive is x.is_negative is None: + xpos = Dummy('x', positive=True) + return self.subs(x, xpos).aseries(xpos, n, bound, hir).subs(xpos, x) + + from .function import PoleError + from sympy.series.gruntz import mrv, rewrite + + try: + om, exps = mrv(self, x) + except PoleError: + return self + + # We move one level up by replacing `x` by `exp(x)`, and then + # computing the asymptotic series for f(exp(x)). Then asymptotic series + # can be obtained by moving one-step back, by replacing x by ln(x). + + from sympy.functions.elementary.exponential import exp, log + from sympy.series.order import Order + + if x in om: + s = self.subs(x, exp(x)).aseries(x, n, bound, hir).subs(x, log(x)) + if s.getO(): + return s + Order(1/x**n, (x, S.Infinity)) + return s + + k = Dummy('k', positive=True) + # f is rewritten in terms of omega + func, logw = rewrite(exps, om, x, k) + + if self in om: + if bound <= 0: + return self + s = (self.exp).aseries(x, n, bound=bound) + s = s.func(*[t.removeO() for t in s.args]) + try: + res = exp(s.subs(x, 1/x).as_leading_term(x).subs(x, 1/x)) + except PoleError: + res = self + + func = exp(self.args[0] - res.args[0]) / k + logw = log(1/res) + + s = func.series(k, 0, n) + + # Hierarchical series + if hir: + return s.subs(k, exp(logw)) + + o = s.getO() + terms = sorted(Add.make_args(s.removeO()), key=lambda i: int(i.as_coeff_exponent(k)[1])) + s = S.Zero + has_ord = False + + # Then we recursively expand these coefficients one by one into + # their asymptotic series in terms of their most rapidly varying subexpressions. + for t in terms: + coeff, expo = t.as_coeff_exponent(k) + if coeff.has(x): + # Recursive step + snew = coeff.aseries(x, n, bound=bound-1) + if has_ord and snew.getO(): + break + elif snew.getO(): + has_ord = True + s += (snew * k**expo) + else: + s += t + + if not o or has_ord: + return s.subs(k, exp(logw)) + return (s + o).subs(k, exp(logw)) + + + def taylor_term(self, n, x, *previous_terms): + """General method for the taylor term. + + This method is slow, because it differentiates n-times. Subclasses can + redefine it to make it faster by using the "previous_terms". + """ + from .symbol import Dummy + from sympy.functions.combinatorial.factorials import factorial + + x = sympify(x) + _x = Dummy('x') + return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / factorial(n) + + def lseries(self, x=None, x0=0, dir='+', logx=None, cdir=0): + """ + Wrapper for series yielding an iterator of the terms of the series. + + Note: an infinite series will yield an infinite iterator. The following, + for exaxmple, will never terminate. It will just keep printing terms + of the sin(x) series:: + + for term in sin(x).lseries(x): + print term + + The advantage of lseries() over nseries() is that many times you are + just interested in the next term in the series (i.e. the first term for + example), but you do not know how many you should ask for in nseries() + using the "n" parameter. + + See also nseries(). + """ + return self.series(x, x0, n=None, dir=dir, logx=logx, cdir=cdir) + + def _eval_lseries(self, x, logx=None, cdir=0): + # default implementation of lseries is using nseries(), and adaptively + # increasing the "n". As you can see, it is not very efficient, because + # we are calculating the series over and over again. Subclasses should + # override this method and implement much more efficient yielding of + # terms. + n = 0 + series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) + + while series.is_Order: + n += 1 + series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) + + e = series.removeO() + yield e + if e is S.Zero: + return + + while 1: + while 1: + n += 1 + series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir).removeO() + if e != series: + break + if (series - self).cancel() is S.Zero: + return + yield series - e + e = series + + def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): + """ + Wrapper to _eval_nseries if assumptions allow, else to series. + + If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is + called. This calculates "n" terms in the innermost expressions and + then builds up the final series just by "cross-multiplying" everything + out. + + The optional ``logx`` parameter can be used to replace any log(x) in the + returned series with a symbolic value to avoid evaluating log(x) at 0. A + symbol to use in place of log(x) should be provided. + + Advantage -- it's fast, because we do not have to determine how many + terms we need to calculate in advance. + + Disadvantage -- you may end up with less terms than you may have + expected, but the O(x**n) term appended will always be correct and + so the result, though perhaps shorter, will also be correct. + + If any of those assumptions is not met, this is treated like a + wrapper to series which will try harder to return the correct + number of terms. + + See also lseries(). + + Examples + ======== + + >>> from sympy import sin, log, Symbol + >>> from sympy.abc import x, y + >>> sin(x).nseries(x, 0, 6) + x - x**3/6 + x**5/120 + O(x**6) + >>> log(x+1).nseries(x, 0, 5) + x - x**2/2 + x**3/3 - x**4/4 + O(x**5) + + Handling of the ``logx`` parameter --- in the following example the + expansion fails since ``sin`` does not have an asymptotic expansion + at -oo (the limit of log(x) as x approaches 0): + + >>> e = sin(log(x)) + >>> e.nseries(x, 0, 6) + Traceback (most recent call last): + ... + PoleError: ... + ... + >>> logx = Symbol('logx') + >>> e.nseries(x, 0, 6, logx=logx) + sin(logx) + + In the following example, the expansion works but only returns self + unless the ``logx`` parameter is used: + + >>> e = x**y + >>> e.nseries(x, 0, 2) + x**y + >>> e.nseries(x, 0, 2, logx=logx) + exp(logx*y) + + """ + if x and x not in self.free_symbols: + return self + if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): + return self.series(x, x0, n, dir, cdir=cdir) + else: + return self._eval_nseries(x, n=n, logx=logx, cdir=cdir) + + def _eval_nseries(self, x, n, logx, cdir): + """ + Return terms of series for self up to O(x**n) at x=0 + from the positive direction. + + This is a method that should be overridden in subclasses. Users should + never call this method directly (use .nseries() instead), so you do not + have to write docstrings for _eval_nseries(). + """ + raise NotImplementedError(filldedent(""" + The _eval_nseries method should be added to + %s to give terms up to O(x**n) at x=0 + from the positive direction so it is available when + nseries calls it.""" % self.func) + ) + + def limit(self, x, xlim, dir='+'): + """ Compute limit x->xlim. + """ + from sympy.series.limits import limit + return limit(self, x, xlim, dir) + + def compute_leading_term(self, x, logx=None): + """Deprecated function to compute the leading term of a series. + + as_leading_term is only allowed for results of .series() + This is a wrapper to compute a series first. + """ + from sympy.utilities.exceptions import SymPyDeprecationWarning + + SymPyDeprecationWarning( + feature="compute_leading_term", + useinstead="as_leading_term", + issue=21843, + deprecated_since_version="1.12" + ).warn() + + from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold + if self.has(Piecewise): + expr = piecewise_fold(self) + else: + expr = self + if self.removeO() == 0: + return self + + from .symbol import Dummy + from sympy.functions.elementary.exponential import log + from sympy.series.order import Order + + _logx = logx + logx = Dummy('logx') if logx is None else logx + res = Order(1) + incr = S.One + while res.is_Order: + res = expr._eval_nseries(x, n=1+incr, logx=logx).cancel().powsimp().trigsimp() + incr *= 2 + + if _logx is None: + res = res.subs(logx, log(x)) + + return res.as_leading_term(x) + + @cacheit + def as_leading_term(self, *symbols, logx=None, cdir=0): + """ + Returns the leading (nonzero) term of the series expansion of self. + + The _eval_as_leading_term routines are used to do this, and they must + always return a non-zero value. + + Examples + ======== + + >>> from sympy.abc import x + >>> (1 + x + x**2).as_leading_term(x) + 1 + >>> (1/x**2 + x + x**2).as_leading_term(x) + x**(-2) + + """ + if len(symbols) > 1: + c = self + for x in symbols: + c = c.as_leading_term(x, logx=logx, cdir=cdir) + return c + elif not symbols: + return self + x = sympify(symbols[0]) + if not x.is_symbol: + raise ValueError('expecting a Symbol but got %s' % x) + if x not in self.free_symbols: + return self + obj = self._eval_as_leading_term(x, logx=logx, cdir=cdir) + if obj is not None: + from sympy.simplify.powsimp import powsimp + return powsimp(obj, deep=True, combine='exp') + raise NotImplementedError('as_leading_term(%s, %s)' % (self, x)) + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + return self + + def as_coeff_exponent(self, x) -> tuple[Expr, Expr]: + """ ``c*x**e -> c,e`` where x can be any symbolic expression. + """ + from sympy.simplify.radsimp import collect + s = collect(self, x) + c, p = s.as_coeff_mul(x) + if len(p) == 1: + b, e = p[0].as_base_exp() + if b == x: + return c, e + return s, S.Zero + + def leadterm(self, x, logx=None, cdir=0): + """ + Returns the leading term a*x**b as a tuple (a, b). + + Examples + ======== + + >>> from sympy.abc import x + >>> (1+x+x**2).leadterm(x) + (1, 0) + >>> (1/x**2+x+x**2).leadterm(x) + (1, -2) + + """ + from .symbol import Dummy + from sympy.functions.elementary.exponential import log + l = self.as_leading_term(x, logx=logx, cdir=cdir) + d = Dummy('logx') + if l.has(log(x)): + l = l.subs(log(x), d) + c, e = l.as_coeff_exponent(x) + if x in c.free_symbols: + raise ValueError(filldedent(""" + cannot compute leadterm(%s, %s). The coefficient + should have been free of %s but got %s""" % (self, x, x, c))) + c = c.subs(d, log(x)) + return c, e + + def as_coeff_Mul(self, rational: bool = False) -> tuple['Number', Expr]: + """Efficiently extract the coefficient of a product.""" + return S.One, self + + def as_coeff_Add(self, rational=False) -> tuple['Number', Expr]: + """Efficiently extract the coefficient of a summation.""" + return S.Zero, self + + def fps(self, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, + full=False): + """ + Compute formal power power series of self. + + See the docstring of the :func:`fps` function in sympy.series.formal for + more information. + """ + from sympy.series.formal import fps + + return fps(self, x, x0, dir, hyper, order, rational, full) + + def fourier_series(self, limits=None): + """Compute fourier sine/cosine series of self. + + See the docstring of the :func:`fourier_series` in sympy.series.fourier + for more information. + """ + from sympy.series.fourier import fourier_series + + return fourier_series(self, limits) + + ################################################################################### + ##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS #################### + ################################################################################### + + def diff(self, *symbols, **assumptions): + assumptions.setdefault("evaluate", True) + return _derivative_dispatch(self, *symbols, **assumptions) + + ########################################################################### + ###################### EXPRESSION EXPANSION METHODS ####################### + ########################################################################### + + # Relevant subclasses should override _eval_expand_hint() methods. See + # the docstring of expand() for more info. + + def _eval_expand_complex(self, **hints): + real, imag = self.as_real_imag(**hints) + return real + S.ImaginaryUnit*imag + + @staticmethod + def _expand_hint(expr, hint, deep=True, **hints): + """ + Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``. + + Returns ``(expr, hit)``, where expr is the (possibly) expanded + ``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and + ``False`` otherwise. + """ + hit = False + # XXX: Hack to support non-Basic args + # | + # V + if deep and getattr(expr, 'args', ()) and not expr.is_Atom: + sargs = [] + for arg in expr.args: + arg, arghit = Expr._expand_hint(arg, hint, **hints) + hit |= arghit + sargs.append(arg) + + if hit: + expr = expr.func(*sargs) + + if hasattr(expr, hint): + newexpr = getattr(expr, hint)(**hints) + if newexpr != expr: + return (newexpr, True) + + return (expr, hit) + + @cacheit + def expand(self, deep=True, modulus=None, power_base=True, power_exp=True, + mul=True, log=True, multinomial=True, basic=True, **hints): + """ + Expand an expression using hints. + + See the docstring of the expand() function in sympy.core.function for + more information. + + """ + from sympy.simplify.radsimp import fraction + + hints.update(power_base=power_base, power_exp=power_exp, mul=mul, + log=log, multinomial=multinomial, basic=basic) + + expr = self + if hints.pop('frac', False): + n, d = [a.expand(deep=deep, modulus=modulus, **hints) + for a in fraction(self)] + return n/d + elif hints.pop('denom', False): + n, d = fraction(self) + return n/d.expand(deep=deep, modulus=modulus, **hints) + elif hints.pop('numer', False): + n, d = fraction(self) + return n.expand(deep=deep, modulus=modulus, **hints)/d + + # Although the hints are sorted here, an earlier hint may get applied + # at a given node in the expression tree before another because of how + # the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y + + # x*z) because while applying log at the top level, log and mul are + # applied at the deeper level in the tree so that when the log at the + # upper level gets applied, the mul has already been applied at the + # lower level. + + # Additionally, because hints are only applied once, the expression + # may not be expanded all the way. For example, if mul is applied + # before multinomial, x*(x + 1)**2 won't be expanded all the way. For + # now, we just use a special case to make multinomial run before mul, + # so that at least polynomials will be expanded all the way. In the + # future, smarter heuristics should be applied. + # TODO: Smarter heuristics + + def _expand_hint_key(hint): + """Make multinomial come before mul""" + if hint == 'mul': + return 'mulz' + return hint + + for hint in sorted(hints.keys(), key=_expand_hint_key): + use_hint = hints[hint] + if use_hint: + hint = '_eval_expand_' + hint + expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints) + + while True: + was = expr + if hints.get('multinomial', False): + expr, _ = Expr._expand_hint( + expr, '_eval_expand_multinomial', deep=deep, **hints) + if hints.get('mul', False): + expr, _ = Expr._expand_hint( + expr, '_eval_expand_mul', deep=deep, **hints) + if hints.get('log', False): + expr, _ = Expr._expand_hint( + expr, '_eval_expand_log', deep=deep, **hints) + if expr == was: + break + + if modulus is not None: + modulus = sympify(modulus) + + if not modulus.is_Integer or modulus <= 0: + raise ValueError( + "modulus must be a positive integer, got %s" % modulus) + + terms = [] + + for term in Add.make_args(expr): + coeff, tail = term.as_coeff_Mul(rational=True) + + coeff %= modulus + + if coeff: + terms.append(coeff*tail) + + expr = Add(*terms) + + return expr + + ########################################################################### + ################### GLOBAL ACTION VERB WRAPPER METHODS #################### + ########################################################################### + + def integrate(self, *args, **kwargs): + """See the integrate function in sympy.integrals""" + from sympy.integrals.integrals import integrate + return integrate(self, *args, **kwargs) + + def nsimplify(self, constants=(), tolerance=None, full=False): + """See the nsimplify function in sympy.simplify""" + from sympy.simplify.simplify import nsimplify + return nsimplify(self, constants, tolerance, full) + + def separate(self, deep=False, force=False): + """See the separate function in sympy.simplify""" + from .function import expand_power_base + return expand_power_base(self, deep=deep, force=force) + + def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True): + """See the collect function in sympy.simplify""" + from sympy.simplify.radsimp import collect + return collect(self, syms, func, evaluate, exact, distribute_order_term) + + def together(self, *args, **kwargs): + """See the together function in sympy.polys""" + from sympy.polys.rationaltools import together + return together(self, *args, **kwargs) + + def apart(self, x=None, **args): + """See the apart function in sympy.polys""" + from sympy.polys.partfrac import apart + return apart(self, x, **args) + + def ratsimp(self): + """See the ratsimp function in sympy.simplify""" + from sympy.simplify.ratsimp import ratsimp + return ratsimp(self) + + def trigsimp(self, **args): + """See the trigsimp function in sympy.simplify""" + from sympy.simplify.trigsimp import trigsimp + return trigsimp(self, **args) + + def radsimp(self, **kwargs): + """See the radsimp function in sympy.simplify""" + from sympy.simplify.radsimp import radsimp + return radsimp(self, **kwargs) + + def powsimp(self, *args, **kwargs): + """See the powsimp function in sympy.simplify""" + from sympy.simplify.powsimp import powsimp + return powsimp(self, *args, **kwargs) + + def combsimp(self): + """See the combsimp function in sympy.simplify""" + from sympy.simplify.combsimp import combsimp + return combsimp(self) + + def gammasimp(self): + """See the gammasimp function in sympy.simplify""" + from sympy.simplify.gammasimp import gammasimp + return gammasimp(self) + + def factor(self, *gens, **args): + """See the factor() function in sympy.polys.polytools""" + from sympy.polys.polytools import factor + return factor(self, *gens, **args) + + def cancel(self, *gens, **args): + """See the cancel function in sympy.polys""" + from sympy.polys.polytools import cancel + return cancel(self, *gens, **args) + + def invert(self, g, *gens, **args): + """Return the multiplicative inverse of ``self`` mod ``g`` + where ``self`` (and ``g``) may be symbolic expressions). + + See Also + ======== + sympy.core.numbers.mod_inverse, sympy.polys.polytools.invert + """ + if self.is_number and getattr(g, 'is_number', True): + from .numbers import mod_inverse + return mod_inverse(self, g) + from sympy.polys.polytools import invert + return invert(self, g, *gens, **args) + + def round(self, n=None): + """Return x rounded to the given decimal place. + + If a complex number would results, apply round to the real + and imaginary components of the number. + + Examples + ======== + + >>> from sympy import pi, E, I, S, Number + >>> pi.round() + 3 + >>> pi.round(2) + 3.14 + >>> (2*pi + E*I).round() + 6 + 3*I + + The round method has a chopping effect: + + >>> (2*pi + I/10).round() + 6 + >>> (pi/10 + 2*I).round() + 2*I + >>> (pi/10 + E*I).round(2) + 0.31 + 2.72*I + + Notes + ===== + + The Python ``round`` function uses the SymPy ``round`` method so it + will always return a SymPy number (not a Python float or int): + + >>> isinstance(round(S(123), -2), Number) + True + """ + x = self + + if not x.is_number: + raise TypeError("Cannot round symbolic expression") + if not x.is_Atom: + if not pure_complex(x.n(2), or_real=True): + raise TypeError( + 'Expected a number but got %s:' % func_name(x)) + elif x in _illegal: + return x + if x.is_extended_real is False: + r, i = x.as_real_imag() + return r.round(n) + S.ImaginaryUnit*i.round(n) + if not x: + return S.Zero if n is None else x + + p = as_int(n or 0) + + if x.is_Integer: + return Integer(round(int(x), p)) + + digits_to_decimal = _mag(x) # _mag(12) = 2, _mag(.012) = -1 + allow = digits_to_decimal + p + precs = [f._prec for f in x.atoms(Float)] + dps = prec_to_dps(max(precs)) if precs else None + if dps is None: + # assume everything is exact so use the Python + # float default or whatever was requested + dps = max(15, allow) + else: + allow = min(allow, dps) + # this will shift all digits to right of decimal + # and give us dps to work with as an int + shift = -digits_to_decimal + dps + extra = 1 # how far we look past known digits + # NOTE + # mpmath will calculate the binary representation to + # an arbitrary number of digits but we must base our + # answer on a finite number of those digits, e.g. + # .575 2589569785738035/2**52 in binary. + # mpmath shows us that the first 18 digits are + # >>> Float(.575).n(18) + # 0.574999999999999956 + # The default precision is 15 digits and if we ask + # for 15 we get + # >>> Float(.575).n(15) + # 0.575000000000000 + # mpmath handles rounding at the 15th digit. But we + # need to be careful since the user might be asking + # for rounding at the last digit and our semantics + # are to round toward the even final digit when there + # is a tie. So the extra digit will be used to make + # that decision. In this case, the value is the same + # to 15 digits: + # >>> Float(.575).n(16) + # 0.5750000000000000 + # Now converting this to the 15 known digits gives + # 575000000000000.0 + # which rounds to integer + # 5750000000000000 + # And now we can round to the desired digt, e.g. at + # the second from the left and we get + # 5800000000000000 + # and rescaling that gives + # 0.58 + # as the final result. + # If the value is made slightly less than 0.575 we might + # still obtain the same value: + # >>> Float(.575-1e-16).n(16)*10**15 + # 574999999999999.8 + # What 15 digits best represents the known digits (which are + # to the left of the decimal? 5750000000000000, the same as + # before. The only way we will round down (in this case) is + # if we declared that we had more than 15 digits of precision. + # For example, if we use 16 digits of precision, the integer + # we deal with is + # >>> Float(.575-1e-16).n(17)*10**16 + # 5749999999999998.4 + # and this now rounds to 5749999999999998 and (if we round to + # the 2nd digit from the left) we get 5700000000000000. + # + xf = x.n(dps + extra)*Pow(10, shift) + xi = Integer(xf) + # use the last digit to select the value of xi + # nearest to x before rounding at the desired digit + sign = 1 if x > 0 else -1 + dif2 = sign*(xf - xi).n(extra) + if dif2 < 0: + raise NotImplementedError( + 'not expecting int(x) to round away from 0') + if dif2 > .5: + xi += sign # round away from 0 + elif dif2 == .5: + xi += sign if xi%2 else -sign # round toward even + # shift p to the new position + ip = p - shift + # let Python handle the int rounding then rescale + xr = round(xi.p, ip) + # restore scale + rv = Rational(xr, Pow(10, shift)) + # return Float or Integer + if rv.is_Integer: + if n is None: # the single-arg case + return rv + # use str or else it won't be a float + return Float(str(rv), dps) # keep same precision + else: + if not allow and rv > self: + allow += 1 + return Float(rv, allow) + + __round__ = round + + def _eval_derivative_matrix_lines(self, x): + from sympy.matrices.expressions.matexpr import _LeftRightArgs + return [_LeftRightArgs([S.One, S.One], higher=self._eval_derivative(x))] + + +class AtomicExpr(Atom, Expr): + """ + A parent class for object which are both atoms and Exprs. + + For example: Symbol, Number, Rational, Integer, ... + But not: Add, Mul, Pow, ... + """ + is_number = False + is_Atom = True + + __slots__ = () + + def _eval_derivative(self, s): + if self == s: + return S.One + return S.Zero + + def _eval_derivative_n_times(self, s, n): + from .containers import Tuple + from sympy.matrices.expressions.matexpr import MatrixExpr + from sympy.matrices.common import MatrixCommon + if isinstance(s, (MatrixCommon, Tuple, Iterable, MatrixExpr)): + return super()._eval_derivative_n_times(s, n) + from .relational import Eq + from sympy.functions.elementary.piecewise import Piecewise + if self == s: + return Piecewise((self, Eq(n, 0)), (1, Eq(n, 1)), (0, True)) + else: + return Piecewise((self, Eq(n, 0)), (0, True)) + + def _eval_is_polynomial(self, syms): + return True + + def _eval_is_rational_function(self, syms): + return self not in _illegal + + def _eval_is_meromorphic(self, x, a): + from sympy.calculus.accumulationbounds import AccumBounds + return (not self.is_Number or self.is_finite) and not isinstance(self, AccumBounds) + + def _eval_is_algebraic_expr(self, syms): + return True + + def _eval_nseries(self, x, n, logx, cdir=0): + return self + + @property + def expr_free_symbols(self): + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + return {self} + + +def _mag(x): + r"""Return integer $i$ such that $0.1 \le x/10^i < 1$ + + Examples + ======== + + >>> from sympy.core.expr import _mag + >>> from sympy import Float + >>> _mag(Float(.1)) + 0 + >>> _mag(Float(.01)) + -1 + >>> _mag(Float(1234)) + 4 + """ + from math import log10, ceil, log + xpos = abs(x.n()) + if not xpos: + return S.Zero + try: + mag_first_dig = int(ceil(log10(xpos))) + except (ValueError, OverflowError): + mag_first_dig = int(ceil(Float(mpf_log(xpos._mpf_, 53))/log(10))) + # check that we aren't off by 1 + if (xpos/10**mag_first_dig) >= 1: + assert 1 <= (xpos/10**mag_first_dig) < 10 + mag_first_dig += 1 + return mag_first_dig + + +class UnevaluatedExpr(Expr): + """ + Expression that is not evaluated unless released. + + Examples + ======== + + >>> from sympy import UnevaluatedExpr + >>> from sympy.abc import x + >>> x*(1/x) + 1 + >>> x*UnevaluatedExpr(1/x) + x*1/x + + """ + + def __new__(cls, arg, **kwargs): + arg = _sympify(arg) + obj = Expr.__new__(cls, arg, **kwargs) + return obj + + def doit(self, **hints): + if hints.get("deep", True): + return self.args[0].doit(**hints) + else: + return self.args[0] + + + +def unchanged(func, *args): + """Return True if `func` applied to the `args` is unchanged. + Can be used instead of `assert foo == foo`. + + Examples + ======== + + >>> from sympy import Piecewise, cos, pi + >>> from sympy.core.expr import unchanged + >>> from sympy.abc import x + + >>> unchanged(cos, 1) # instead of assert cos(1) == cos(1) + True + + >>> unchanged(cos, pi) + False + + Comparison of args uses the builtin capabilities of the object's + arguments to test for equality so args can be defined loosely. Here, + the ExprCondPair arguments of Piecewise compare as equal to the + tuples that can be used to create the Piecewise: + + >>> unchanged(Piecewise, (x, x > 1), (0, True)) + True + """ + f = func(*args) + return f.func == func and f.args == args + + +class ExprBuilder: + def __init__(self, op, args=None, validator=None, check=True): + if not hasattr(op, "__call__"): + raise TypeError("op {} needs to be callable".format(op)) + self.op = op + if args is None: + self.args = [] + else: + self.args = args + self.validator = validator + if (validator is not None) and check: + self.validate() + + @staticmethod + def _build_args(args): + return [i.build() if isinstance(i, ExprBuilder) else i for i in args] + + def validate(self): + if self.validator is None: + return + args = self._build_args(self.args) + self.validator(*args) + + def build(self, check=True): + args = self._build_args(self.args) + if self.validator and check: + self.validator(*args) + return self.op(*args) + + def append_argument(self, arg, check=True): + self.args.append(arg) + if self.validator and check: + self.validate(*self.args) + + def __getitem__(self, item): + if item == 0: + return self.op + else: + return self.args[item-1] + + def __repr__(self): + return str(self.build()) + + def search_element(self, elem): + for i, arg in enumerate(self.args): + if isinstance(arg, ExprBuilder): + ret = arg.search_index(elem) + if ret is not None: + return (i,) + ret + elif id(arg) == id(elem): + return (i,) + return None + + +from .mul import Mul +from .add import Add +from .power import Pow +from .function import Function, _derivative_dispatch +from .mod import Mod +from .exprtools import factor_terms +from .numbers import Float, Integer, Rational, _illegal diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/function.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/function.py new file mode 100644 index 0000000000000000000000000000000000000000..db15050173e843b1d6d601f9bf556ec667e8188a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/function.py @@ -0,0 +1,3385 @@ +""" +There are three types of functions implemented in SymPy: + + 1) defined functions (in the sense that they can be evaluated) like + exp or sin; they have a name and a body: + f = exp + 2) undefined function which have a name but no body. Undefined + functions can be defined using a Function class as follows: + f = Function('f') + (the result will be a Function instance) + 3) anonymous function (or lambda function) which have a body (defined + with dummy variables) but have no name: + f = Lambda(x, exp(x)*x) + f = Lambda((x, y), exp(x)*y) + The fourth type of functions are composites, like (sin + cos)(x); these work in + SymPy core, but are not yet part of SymPy. + + Examples + ======== + + >>> import sympy + >>> f = sympy.Function("f") + >>> from sympy.abc import x + >>> f(x) + f(x) + >>> print(sympy.srepr(f(x).func)) + Function('f') + >>> f(x).args + (x,) + +""" + +from __future__ import annotations +from typing import Any +from collections.abc import Iterable + +from .add import Add +from .basic import Basic, _atomic +from .cache import cacheit +from .containers import Tuple, Dict +from .decorators import _sympifyit +from .evalf import pure_complex +from .expr import Expr, AtomicExpr +from .logic import fuzzy_and, fuzzy_or, fuzzy_not, FuzzyBool +from .mul import Mul +from .numbers import Rational, Float, Integer +from .operations import LatticeOp +from .parameters import global_parameters +from .rules import Transform +from .singleton import S +from .sympify import sympify, _sympify + +from .sorting import default_sort_key, ordered +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, ignore_warnings) +from sympy.utilities.iterables import (has_dups, sift, iterable, + is_sequence, uniq, topological_sort) +from sympy.utilities.lambdify import MPMATH_TRANSLATIONS +from sympy.utilities.misc import as_int, filldedent, func_name + +import mpmath +from mpmath.libmp.libmpf import prec_to_dps + +import inspect +from collections import Counter + +def _coeff_isneg(a): + """Return True if the leading Number is negative. + + Examples + ======== + + >>> from sympy.core.function import _coeff_isneg + >>> from sympy import S, Symbol, oo, pi + >>> _coeff_isneg(-3*pi) + True + >>> _coeff_isneg(S(3)) + False + >>> _coeff_isneg(-oo) + True + >>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1 + False + + For matrix expressions: + + >>> from sympy import MatrixSymbol, sqrt + >>> A = MatrixSymbol("A", 3, 3) + >>> _coeff_isneg(-sqrt(2)*A) + True + >>> _coeff_isneg(sqrt(2)*A) + False + """ + + if a.is_MatMul: + a = a.args[0] + if a.is_Mul: + a = a.args[0] + return a.is_Number and a.is_extended_negative + + +class PoleError(Exception): + pass + + +class ArgumentIndexError(ValueError): + def __str__(self): + return ("Invalid operation with argument number %s for Function %s" % + (self.args[1], self.args[0])) + + +class BadSignatureError(TypeError): + '''Raised when a Lambda is created with an invalid signature''' + pass + + +class BadArgumentsError(TypeError): + '''Raised when a Lambda is called with an incorrect number of arguments''' + pass + + +# Python 3 version that does not raise a Deprecation warning +def arity(cls): + """Return the arity of the function if it is known, else None. + + Explanation + =========== + + When default values are specified for some arguments, they are + optional and the arity is reported as a tuple of possible values. + + Examples + ======== + + >>> from sympy import arity, log + >>> arity(lambda x: x) + 1 + >>> arity(log) + (1, 2) + >>> arity(lambda *x: sum(x)) is None + True + """ + eval_ = getattr(cls, 'eval', cls) + + parameters = inspect.signature(eval_).parameters.items() + if [p for _, p in parameters if p.kind == p.VAR_POSITIONAL]: + return + p_or_k = [p for _, p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD] + # how many have no default and how many have a default value + no, yes = map(len, sift(p_or_k, + lambda p:p.default == p.empty, binary=True)) + return no if not yes else tuple(range(no, no + yes + 1)) + +class FunctionClass(type): + """ + Base class for function classes. FunctionClass is a subclass of type. + + Use Function('' [ , signature ]) to create + undefined function classes. + """ + _new = type.__new__ + + def __init__(cls, *args, **kwargs): + # honor kwarg value or class-defined value before using + # the number of arguments in the eval function (if present) + nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', arity(cls))) + if nargs is None and 'nargs' not in cls.__dict__: + for supcls in cls.__mro__: + if hasattr(supcls, '_nargs'): + nargs = supcls._nargs + break + else: + continue + + # Canonicalize nargs here; change to set in nargs. + if is_sequence(nargs): + if not nargs: + raise ValueError(filldedent(''' + Incorrectly specified nargs as %s: + if there are no arguments, it should be + `nargs = 0`; + if there are any number of arguments, + it should be + `nargs = None`''' % str(nargs))) + nargs = tuple(ordered(set(nargs))) + elif nargs is not None: + nargs = (as_int(nargs),) + cls._nargs = nargs + + # When __init__ is called from UndefinedFunction it is called with + # just one arg but when it is called from subclassing Function it is + # called with the usual (name, bases, namespace) type() signature. + if len(args) == 3: + namespace = args[2] + if 'eval' in namespace and not isinstance(namespace['eval'], classmethod): + raise TypeError("eval on Function subclasses should be a class method (defined with @classmethod)") + + @property + def __signature__(self): + """ + Allow Python 3's inspect.signature to give a useful signature for + Function subclasses. + """ + # Python 3 only, but backports (like the one in IPython) still might + # call this. + try: + from inspect import signature + except ImportError: + return None + + # TODO: Look at nargs + return signature(self.eval) + + @property + def free_symbols(self): + return set() + + @property + def xreplace(self): + # Function needs args so we define a property that returns + # a function that takes args...and then use that function + # to return the right value + return lambda rule, **_: rule.get(self, self) + + @property + def nargs(self): + """Return a set of the allowed number of arguments for the function. + + Examples + ======== + + >>> from sympy import Function + >>> f = Function('f') + + If the function can take any number of arguments, the set of whole + numbers is returned: + + >>> Function('f').nargs + Naturals0 + + If the function was initialized to accept one or more arguments, a + corresponding set will be returned: + + >>> Function('f', nargs=1).nargs + {1} + >>> Function('f', nargs=(2, 1)).nargs + {1, 2} + + The undefined function, after application, also has the nargs + attribute; the actual number of arguments is always available by + checking the ``args`` attribute: + + >>> f = Function('f') + >>> f(1).nargs + Naturals0 + >>> len(f(1).args) + 1 + """ + from sympy.sets.sets import FiniteSet + # XXX it would be nice to handle this in __init__ but there are import + # problems with trying to import FiniteSet there + return FiniteSet(*self._nargs) if self._nargs else S.Naturals0 + + def _valid_nargs(self, n : int) -> bool: + """ Return True if the specified integer is a valid number of arguments + + The number of arguments n is guaranteed to be an integer and positive + + """ + if self._nargs: + return n in self._nargs + + nargs = self.nargs + return nargs is S.Naturals0 or n in nargs + + def __repr__(cls): + return cls.__name__ + + +class Application(Basic, metaclass=FunctionClass): + """ + Base class for applied functions. + + Explanation + =========== + + Instances of Application represent the result of applying an application of + any type to any object. + """ + + is_Function = True + + @cacheit + def __new__(cls, *args, **options): + from sympy.sets.fancysets import Naturals0 + from sympy.sets.sets import FiniteSet + + args = list(map(sympify, args)) + evaluate = options.pop('evaluate', global_parameters.evaluate) + # WildFunction (and anything else like it) may have nargs defined + # and we throw that value away here + options.pop('nargs', None) + + if options: + raise ValueError("Unknown options: %s" % options) + + if evaluate: + evaluated = cls.eval(*args) + if evaluated is not None: + return evaluated + + obj = super().__new__(cls, *args, **options) + + # make nargs uniform here + sentinel = object() + objnargs = getattr(obj, "nargs", sentinel) + if objnargs is not sentinel: + # things passing through here: + # - functions subclassed from Function (e.g. myfunc(1).nargs) + # - functions like cos(1).nargs + # - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs + # Canonicalize nargs here + if is_sequence(objnargs): + nargs = tuple(ordered(set(objnargs))) + elif objnargs is not None: + nargs = (as_int(objnargs),) + else: + nargs = None + else: + # things passing through here: + # - WildFunction('f').nargs + # - AppliedUndef with no nargs like Function('f')(1).nargs + nargs = obj._nargs # note the underscore here + # convert to FiniteSet + obj.nargs = FiniteSet(*nargs) if nargs else Naturals0() + return obj + + @classmethod + def eval(cls, *args): + """ + Returns a canonical form of cls applied to arguments args. + + Explanation + =========== + + The ``eval()`` method is called when the class ``cls`` is about to be + instantiated and it should return either some simplified instance + (possible of some other class), or if the class ``cls`` should be + unmodified, return None. + + Examples of ``eval()`` for the function "sign" + + .. code-block:: python + + @classmethod + def eval(cls, arg): + if arg is S.NaN: + return S.NaN + if arg.is_zero: return S.Zero + if arg.is_positive: return S.One + if arg.is_negative: return S.NegativeOne + if isinstance(arg, Mul): + coeff, terms = arg.as_coeff_Mul(rational=True) + if coeff is not S.One: + return cls(coeff) * cls(terms) + + """ + return + + @property + def func(self): + return self.__class__ + + def _eval_subs(self, old, new): + if (old.is_Function and new.is_Function and + callable(old) and callable(new) and + old == self.func and len(self.args) in new.nargs): + return new(*[i._subs(old, new) for i in self.args]) + + +class Function(Application, Expr): + r""" + Base class for applied mathematical functions. + + It also serves as a constructor for undefined function classes. + + See the :ref:`custom-functions` guide for details on how to subclass + ``Function`` and what methods can be defined. + + Examples + ======== + + **Undefined Functions** + + To create an undefined function, pass a string of the function name to + ``Function``. + + >>> from sympy import Function, Symbol + >>> x = Symbol('x') + >>> f = Function('f') + >>> g = Function('g')(x) + >>> f + f + >>> f(x) + f(x) + >>> g + g(x) + >>> f(x).diff(x) + Derivative(f(x), x) + >>> g.diff(x) + Derivative(g(x), x) + + Assumptions can be passed to ``Function`` the same as with a + :class:`~.Symbol`. Alternatively, you can use a ``Symbol`` with + assumptions for the function name and the function will inherit the name + and assumptions associated with the ``Symbol``: + + >>> f_real = Function('f', real=True) + >>> f_real(x).is_real + True + >>> f_real_inherit = Function(Symbol('f', real=True)) + >>> f_real_inherit(x).is_real + True + + Note that assumptions on a function are unrelated to the assumptions on + the variables it is called on. If you want to add a relationship, subclass + ``Function`` and define custom assumptions handler methods. See the + :ref:`custom-functions-assumptions` section of the :ref:`custom-functions` + guide for more details. + + **Custom Function Subclasses** + + The :ref:`custom-functions` guide has several + :ref:`custom-functions-complete-examples` of how to subclass ``Function`` + to create a custom function. + + """ + + @property + def _diff_wrt(self): + return False + + @cacheit + def __new__(cls, *args, **options): + # Handle calls like Function('f') + if cls is Function: + return UndefinedFunction(*args, **options) + + n = len(args) + + if not cls._valid_nargs(n): + # XXX: exception message must be in exactly this format to + # make it work with NumPy's functions like vectorize(). See, + # for example, https://github.com/numpy/numpy/issues/1697. + # The ideal solution would be just to attach metadata to + # the exception and change NumPy to take advantage of this. + temp = ('%(name)s takes %(qual)s %(args)s ' + 'argument%(plural)s (%(given)s given)') + raise TypeError(temp % { + 'name': cls, + 'qual': 'exactly' if len(cls.nargs) == 1 else 'at least', + 'args': min(cls.nargs), + 'plural': 's'*(min(cls.nargs) != 1), + 'given': n}) + + evaluate = options.get('evaluate', global_parameters.evaluate) + result = super().__new__(cls, *args, **options) + if evaluate and isinstance(result, cls) and result.args: + _should_evalf = [cls._should_evalf(a) for a in result.args] + pr2 = min(_should_evalf) + if pr2 > 0: + pr = max(_should_evalf) + result = result.evalf(prec_to_dps(pr)) + + return _sympify(result) + + @classmethod + def _should_evalf(cls, arg): + """ + Decide if the function should automatically evalf(). + + Explanation + =========== + + By default (in this implementation), this happens if (and only if) the + ARG is a floating point number (including complex numbers). + This function is used by __new__. + + Returns the precision to evalf to, or -1 if it should not evalf. + """ + if arg.is_Float: + return arg._prec + if not arg.is_Add: + return -1 + m = pure_complex(arg) + if m is None: + return -1 + # the elements of m are of type Number, so have a _prec + return max(m[0]._prec, m[1]._prec) + + @classmethod + def class_key(cls): + from sympy.sets.fancysets import Naturals0 + funcs = { + 'exp': 10, + 'log': 11, + 'sin': 20, + 'cos': 21, + 'tan': 22, + 'cot': 23, + 'sinh': 30, + 'cosh': 31, + 'tanh': 32, + 'coth': 33, + 'conjugate': 40, + 're': 41, + 'im': 42, + 'arg': 43, + } + name = cls.__name__ + + try: + i = funcs[name] + except KeyError: + i = 0 if isinstance(cls.nargs, Naturals0) else 10000 + + return 4, i, name + + def _eval_evalf(self, prec): + + def _get_mpmath_func(fname): + """Lookup mpmath function based on name""" + if isinstance(self, AppliedUndef): + # Shouldn't lookup in mpmath but might have ._imp_ + return None + + if not hasattr(mpmath, fname): + fname = MPMATH_TRANSLATIONS.get(fname, None) + if fname is None: + return None + return getattr(mpmath, fname) + + _eval_mpmath = getattr(self, '_eval_mpmath', None) + if _eval_mpmath is None: + func = _get_mpmath_func(self.func.__name__) + args = self.args + else: + func, args = _eval_mpmath() + + # Fall-back evaluation + if func is None: + imp = getattr(self, '_imp_', None) + if imp is None: + return None + try: + return Float(imp(*[i.evalf(prec) for i in self.args]), prec) + except (TypeError, ValueError): + return None + + # Convert all args to mpf or mpc + # Convert the arguments to *higher* precision than requested for the + # final result. + # XXX + 5 is a guess, it is similar to what is used in evalf.py. Should + # we be more intelligent about it? + try: + args = [arg._to_mpmath(prec + 5) for arg in args] + def bad(m): + from mpmath import mpf, mpc + # the precision of an mpf value is the last element + # if that is 1 (and m[1] is not 1 which would indicate a + # power of 2), then the eval failed; so check that none of + # the arguments failed to compute to a finite precision. + # Note: An mpc value has two parts, the re and imag tuple; + # check each of those parts, too. Anything else is allowed to + # pass + if isinstance(m, mpf): + m = m._mpf_ + return m[1] !=1 and m[-1] == 1 + elif isinstance(m, mpc): + m, n = m._mpc_ + return m[1] !=1 and m[-1] == 1 and \ + n[1] !=1 and n[-1] == 1 + else: + return False + if any(bad(a) for a in args): + raise ValueError # one or more args failed to compute with significance + except ValueError: + return + + with mpmath.workprec(prec): + v = func(*args) + + return Expr._from_mpmath(v, prec) + + def _eval_derivative(self, s): + # f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s) + i = 0 + l = [] + for a in self.args: + i += 1 + da = a.diff(s) + if da.is_zero: + continue + try: + df = self.fdiff(i) + except ArgumentIndexError: + df = Function.fdiff(self, i) + l.append(df * da) + return Add(*l) + + def _eval_is_commutative(self): + return fuzzy_and(a.is_commutative for a in self.args) + + def _eval_is_meromorphic(self, x, a): + if not self.args: + return True + if any(arg.has(x) for arg in self.args[1:]): + return False + + arg = self.args[0] + if not arg._eval_is_meromorphic(x, a): + return None + + return fuzzy_not(type(self).is_singular(arg.subs(x, a))) + + _singularities: FuzzyBool | tuple[Expr, ...] = None + + @classmethod + def is_singular(cls, a): + """ + Tests whether the argument is an essential singularity + or a branch point, or the functions is non-holomorphic. + """ + ss = cls._singularities + if ss in (True, None, False): + return ss + + return fuzzy_or(a.is_infinite if s is S.ComplexInfinity + else (a - s).is_zero for s in ss) + + def as_base_exp(self): + """ + Returns the method as the 2-tuple (base, exponent). + """ + return self, S.One + + def _eval_aseries(self, n, args0, x, logx): + """ + Compute an asymptotic expansion around args0, in terms of self.args. + This function is only used internally by _eval_nseries and should not + be called directly; derived classes can overwrite this to implement + asymptotic expansions. + """ + raise PoleError(filldedent(''' + Asymptotic expansion of %s around %s is + not implemented.''' % (type(self), args0))) + + def _eval_nseries(self, x, n, logx, cdir=0): + """ + This function does compute series for multivariate functions, + but the expansion is always in terms of *one* variable. + + Examples + ======== + + >>> from sympy import atan2 + >>> from sympy.abc import x, y + >>> atan2(x, y).series(x, n=2) + atan2(0, y) + x/y + O(x**2) + >>> atan2(x, y).series(y, n=2) + -y/x + atan2(x, 0) + O(y**2) + + This function also computes asymptotic expansions, if necessary + and possible: + + >>> from sympy import loggamma + >>> loggamma(1/x)._eval_nseries(x,0,None) + -1/x - log(x)/x + log(x)/2 + O(1) + + """ + from .symbol import uniquely_named_symbol + from sympy.series.order import Order + from sympy.sets.sets import FiniteSet + args = self.args + args0 = [t.limit(x, 0) for t in args] + if any(t.is_finite is False for t in args0): + from .numbers import oo, zoo, nan + a = [t.as_leading_term(x, logx=logx) for t in args] + a0 = [t.limit(x, 0) for t in a] + if any(t.has(oo, -oo, zoo, nan) for t in a0): + return self._eval_aseries(n, args0, x, logx) + # Careful: the argument goes to oo, but only logarithmically so. We + # are supposed to do a power series expansion "around the + # logarithmic term". e.g. + # f(1+x+log(x)) + # -> f(1+logx) + x*f'(1+logx) + O(x**2) + # where 'logx' is given in the argument + a = [t._eval_nseries(x, n, logx) for t in args] + z = [r - r0 for (r, r0) in zip(a, a0)] + p = [Dummy() for _ in z] + q = [] + v = None + for ai, zi, pi in zip(a0, z, p): + if zi.has(x): + if v is not None: + raise NotImplementedError + q.append(ai + pi) + v = pi + else: + q.append(ai) + e1 = self.func(*q) + if v is None: + return e1 + s = e1._eval_nseries(v, n, logx) + o = s.getO() + s = s.removeO() + s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x) + return s + if (self.func.nargs is S.Naturals0 + or (self.func.nargs == FiniteSet(1) and args0[0]) + or any(c > 1 for c in self.func.nargs)): + e = self + e1 = e.expand() + if e == e1: + #for example when e = sin(x+1) or e = sin(cos(x)) + #let's try the general algorithm + if len(e.args) == 1: + # issue 14411 + e = e.func(e.args[0].cancel()) + term = e.subs(x, S.Zero) + if term.is_finite is False or term is S.NaN: + raise PoleError("Cannot expand %s around 0" % (self)) + series = term + fact = S.One + + _x = uniquely_named_symbol('xi', self) + e = e.subs(x, _x) + for i in range(1, n): + fact *= Rational(i) + e = e.diff(_x) + subs = e.subs(_x, S.Zero) + if subs is S.NaN: + # try to evaluate a limit if we have to + subs = e.limit(_x, S.Zero) + if subs.is_finite is False: + raise PoleError("Cannot expand %s around 0" % (self)) + term = subs*(x**i)/fact + term = term.expand() + series += term + return series + Order(x**n, x) + return e1.nseries(x, n=n, logx=logx) + arg = self.args[0] + l = [] + g = None + # try to predict a number of terms needed + nterms = n + 2 + cf = Order(arg.as_leading_term(x), x).getn() + if cf != 0: + nterms = (n/cf).ceiling() + for i in range(nterms): + g = self.taylor_term(i, arg, g) + g = g.nseries(x, n=n, logx=logx) + l.append(g) + return Add(*l) + Order(x**n, x) + + def fdiff(self, argindex=1): + """ + Returns the first derivative of the function. + """ + if not (1 <= argindex <= len(self.args)): + raise ArgumentIndexError(self, argindex) + ix = argindex - 1 + A = self.args[ix] + if A._diff_wrt: + if len(self.args) == 1 or not A.is_Symbol: + return _derivative_dispatch(self, A) + for i, v in enumerate(self.args): + if i != ix and A in v.free_symbols: + # it can't be in any other argument's free symbols + # issue 8510 + break + else: + return _derivative_dispatch(self, A) + + # See issue 4624 and issue 4719, 5600 and 8510 + D = Dummy('xi_%i' % argindex, dummy_index=hash(A)) + args = self.args[:ix] + (D,) + self.args[ix + 1:] + return Subs(Derivative(self.func(*args), D), D, A) + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + """Stub that should be overridden by new Functions to return + the first non-zero term in a series if ever an x-dependent + argument whose leading term vanishes as x -> 0 might be encountered. + See, for example, cos._eval_as_leading_term. + """ + from sympy.series.order import Order + args = [a.as_leading_term(x, logx=logx) for a in self.args] + o = Order(1, x) + if any(x in a.free_symbols and o.contains(a) for a in args): + # Whereas x and any finite number are contained in O(1, x), + # expressions like 1/x are not. If any arg simplified to a + # vanishing expression as x -> 0 (like x or x**2, but not + # 3, 1/x, etc...) then the _eval_as_leading_term is needed + # to supply the first non-zero term of the series, + # + # e.g. expression leading term + # ---------- ------------ + # cos(1/x) cos(1/x) + # cos(cos(x)) cos(1) + # cos(x) 1 <- _eval_as_leading_term needed + # sin(x) x <- _eval_as_leading_term needed + # + raise NotImplementedError( + '%s has no _eval_as_leading_term routine' % self.func) + else: + return self.func(*args) + + +class AppliedUndef(Function): + """ + Base class for expressions resulting from the application of an undefined + function. + """ + + is_number = False + + def __new__(cls, *args, **options): + args = list(map(sympify, args)) + u = [a.name for a in args if isinstance(a, UndefinedFunction)] + if u: + raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % ( + 's'*(len(u) > 1), ', '.join(u))) + obj = super().__new__(cls, *args, **options) + return obj + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + return self + + @property + def _diff_wrt(self): + """ + Allow derivatives wrt to undefined functions. + + Examples + ======== + + >>> from sympy import Function, Symbol + >>> f = Function('f') + >>> x = Symbol('x') + >>> f(x)._diff_wrt + True + >>> f(x).diff(x) + Derivative(f(x), x) + """ + return True + + +class UndefSageHelper: + """ + Helper to facilitate Sage conversion. + """ + def __get__(self, ins, typ): + import sage.all as sage + if ins is None: + return lambda: sage.function(typ.__name__) + else: + args = [arg._sage_() for arg in ins.args] + return lambda : sage.function(ins.__class__.__name__)(*args) + +_undef_sage_helper = UndefSageHelper() + +class UndefinedFunction(FunctionClass): + """ + The (meta)class of undefined functions. + """ + def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs): + from .symbol import _filter_assumptions + # Allow Function('f', real=True) + # and/or Function(Symbol('f', real=True)) + assumptions, kwargs = _filter_assumptions(kwargs) + if isinstance(name, Symbol): + assumptions = name._merge(assumptions) + name = name.name + elif not isinstance(name, str): + raise TypeError('expecting string or Symbol for name') + else: + commutative = assumptions.get('commutative', None) + assumptions = Symbol(name, **assumptions).assumptions0 + if commutative is None: + assumptions.pop('commutative') + __dict__ = __dict__ or {} + # put the `is_*` for into __dict__ + __dict__.update({'is_%s' % k: v for k, v in assumptions.items()}) + # You can add other attributes, although they do have to be hashable + # (but seriously, if you want to add anything other than assumptions, + # just subclass Function) + __dict__.update(kwargs) + # add back the sanitized assumptions without the is_ prefix + kwargs.update(assumptions) + # Save these for __eq__ + __dict__.update({'_kwargs': kwargs}) + # do this for pickling + __dict__['__module__'] = None + obj = super().__new__(mcl, name, bases, __dict__) + obj.name = name + obj._sage_ = _undef_sage_helper + return obj + + def __instancecheck__(cls, instance): + return cls in type(instance).__mro__ + + _kwargs: dict[str, bool | None] = {} + + def __hash__(self): + return hash((self.class_key(), frozenset(self._kwargs.items()))) + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.class_key() == other.class_key() and + self._kwargs == other._kwargs) + + def __ne__(self, other): + return not self == other + + @property + def _diff_wrt(self): + return False + + +# XXX: The type: ignore on WildFunction is because mypy complains: +# +# sympy/core/function.py:939: error: Cannot determine type of 'sort_key' in +# base class 'Expr' +# +# Somehow this is because of the @cacheit decorator but it is not clear how to +# fix it. + + +class WildFunction(Function, AtomicExpr): # type: ignore + """ + A WildFunction function matches any function (with its arguments). + + Examples + ======== + + >>> from sympy import WildFunction, Function, cos + >>> from sympy.abc import x, y + >>> F = WildFunction('F') + >>> f = Function('f') + >>> F.nargs + Naturals0 + >>> x.match(F) + >>> F.match(F) + {F_: F_} + >>> f(x).match(F) + {F_: f(x)} + >>> cos(x).match(F) + {F_: cos(x)} + >>> f(x, y).match(F) + {F_: f(x, y)} + + To match functions with a given number of arguments, set ``nargs`` to the + desired value at instantiation: + + >>> F = WildFunction('F', nargs=2) + >>> F.nargs + {2} + >>> f(x).match(F) + >>> f(x, y).match(F) + {F_: f(x, y)} + + To match functions with a range of arguments, set ``nargs`` to a tuple + containing the desired number of arguments, e.g. if ``nargs = (1, 2)`` + then functions with 1 or 2 arguments will be matched. + + >>> F = WildFunction('F', nargs=(1, 2)) + >>> F.nargs + {1, 2} + >>> f(x).match(F) + {F_: f(x)} + >>> f(x, y).match(F) + {F_: f(x, y)} + >>> f(x, y, 1).match(F) + + """ + + # XXX: What is this class attribute used for? + include: set[Any] = set() + + def __init__(cls, name, **assumptions): + from sympy.sets.sets import Set, FiniteSet + cls.name = name + nargs = assumptions.pop('nargs', S.Naturals0) + if not isinstance(nargs, Set): + # Canonicalize nargs here. See also FunctionClass. + if is_sequence(nargs): + nargs = tuple(ordered(set(nargs))) + elif nargs is not None: + nargs = (as_int(nargs),) + nargs = FiniteSet(*nargs) + cls.nargs = nargs + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, (AppliedUndef, Function)): + return None + if len(expr.args) not in self.nargs: + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + repl_dict[self] = expr + return repl_dict + + +class Derivative(Expr): + """ + Carries out differentiation of the given expression with respect to symbols. + + Examples + ======== + + >>> from sympy import Derivative, Function, symbols, Subs + >>> from sympy.abc import x, y + >>> f, g = symbols('f g', cls=Function) + + >>> Derivative(x**2, x, evaluate=True) + 2*x + + Denesting of derivatives retains the ordering of variables: + + >>> Derivative(Derivative(f(x, y), y), x) + Derivative(f(x, y), y, x) + + Contiguously identical symbols are merged into a tuple giving + the symbol and the count: + + >>> Derivative(f(x), x, x, y, x) + Derivative(f(x), (x, 2), y, x) + + If the derivative cannot be performed, and evaluate is True, the + order of the variables of differentiation will be made canonical: + + >>> Derivative(f(x, y), y, x, evaluate=True) + Derivative(f(x, y), x, y) + + Derivatives with respect to undefined functions can be calculated: + + >>> Derivative(f(x)**2, f(x), evaluate=True) + 2*f(x) + + Such derivatives will show up when the chain rule is used to + evalulate a derivative: + + >>> f(g(x)).diff(x) + Derivative(f(g(x)), g(x))*Derivative(g(x), x) + + Substitution is used to represent derivatives of functions with + arguments that are not symbols or functions: + + >>> f(2*x + 3).diff(x) == 2*Subs(f(y).diff(y), y, 2*x + 3) + True + + Notes + ===== + + Simplification of high-order derivatives: + + Because there can be a significant amount of simplification that can be + done when multiple differentiations are performed, results will be + automatically simplified in a fairly conservative fashion unless the + keyword ``simplify`` is set to False. + + >>> from sympy import sqrt, diff, Function, symbols + >>> from sympy.abc import x, y, z + >>> f, g = symbols('f,g', cls=Function) + + >>> e = sqrt((x + 1)**2 + x) + >>> diff(e, (x, 5), simplify=False).count_ops() + 136 + >>> diff(e, (x, 5)).count_ops() + 30 + + Ordering of variables: + + If evaluate is set to True and the expression cannot be evaluated, the + list of differentiation symbols will be sorted, that is, the expression is + assumed to have continuous derivatives up to the order asked. + + Derivative wrt non-Symbols: + + For the most part, one may not differentiate wrt non-symbols. + For example, we do not allow differentiation wrt `x*y` because + there are multiple ways of structurally defining where x*y appears + in an expression: a very strict definition would make + (x*y*z).diff(x*y) == 0. Derivatives wrt defined functions (like + cos(x)) are not allowed, either: + + >>> (x*y*z).diff(x*y) + Traceback (most recent call last): + ... + ValueError: Can't calculate derivative wrt x*y. + + To make it easier to work with variational calculus, however, + derivatives wrt AppliedUndef and Derivatives are allowed. + For example, in the Euler-Lagrange method one may write + F(t, u, v) where u = f(t) and v = f'(t). These variables can be + written explicitly as functions of time:: + + >>> from sympy.abc import t + >>> F = Function('F') + >>> U = f(t) + >>> V = U.diff(t) + + The derivative wrt f(t) can be obtained directly: + + >>> direct = F(t, U, V).diff(U) + + When differentiation wrt a non-Symbol is attempted, the non-Symbol + is temporarily converted to a Symbol while the differentiation + is performed and the same answer is obtained: + + >>> indirect = F(t, U, V).subs(U, x).diff(x).subs(x, U) + >>> assert direct == indirect + + The implication of this non-symbol replacement is that all + functions are treated as independent of other functions and the + symbols are independent of the functions that contain them:: + + >>> x.diff(f(x)) + 0 + >>> g(x).diff(f(x)) + 0 + + It also means that derivatives are assumed to depend only + on the variables of differentiation, not on anything contained + within the expression being differentiated:: + + >>> F = f(x) + >>> Fx = F.diff(x) + >>> Fx.diff(F) # derivative depends on x, not F + 0 + >>> Fxx = Fx.diff(x) + >>> Fxx.diff(Fx) # derivative depends on x, not Fx + 0 + + The last example can be made explicit by showing the replacement + of Fx in Fxx with y: + + >>> Fxx.subs(Fx, y) + Derivative(y, x) + + Since that in itself will evaluate to zero, differentiating + wrt Fx will also be zero: + + >>> _.doit() + 0 + + Replacing undefined functions with concrete expressions + + One must be careful to replace undefined functions with expressions + that contain variables consistent with the function definition and + the variables of differentiation or else insconsistent result will + be obtained. Consider the following example: + + >>> eq = f(x)*g(y) + >>> eq.subs(f(x), x*y).diff(x, y).doit() + y*Derivative(g(y), y) + g(y) + >>> eq.diff(x, y).subs(f(x), x*y).doit() + y*Derivative(g(y), y) + + The results differ because `f(x)` was replaced with an expression + that involved both variables of differentiation. In the abstract + case, differentiation of `f(x)` by `y` is 0; in the concrete case, + the presence of `y` made that derivative nonvanishing and produced + the extra `g(y)` term. + + Defining differentiation for an object + + An object must define ._eval_derivative(symbol) method that returns + the differentiation result. This function only needs to consider the + non-trivial case where expr contains symbol and it should call the diff() + method internally (not _eval_derivative); Derivative should be the only + one to call _eval_derivative. + + Any class can allow derivatives to be taken with respect to + itself (while indicating its scalar nature). See the + docstring of Expr._diff_wrt. + + See Also + ======== + _sort_variable_count + """ + + is_Derivative = True + + @property + def _diff_wrt(self): + """An expression may be differentiated wrt a Derivative if + it is in elementary form. + + Examples + ======== + + >>> from sympy import Function, Derivative, cos + >>> from sympy.abc import x + >>> f = Function('f') + + >>> Derivative(f(x), x)._diff_wrt + True + >>> Derivative(cos(x), x)._diff_wrt + False + >>> Derivative(x + 1, x)._diff_wrt + False + + A Derivative might be an unevaluated form of what will not be + a valid variable of differentiation if evaluated. For example, + + >>> Derivative(f(f(x)), x).doit() + Derivative(f(x), x)*Derivative(f(f(x)), f(x)) + + Such an expression will present the same ambiguities as arise + when dealing with any other product, like ``2*x``, so ``_diff_wrt`` + is False: + + >>> Derivative(f(f(x)), x)._diff_wrt + False + """ + return self.expr._diff_wrt and isinstance(self.doit(), Derivative) + + def __new__(cls, expr, *variables, **kwargs): + expr = sympify(expr) + symbols_or_none = getattr(expr, "free_symbols", None) + has_symbol_set = isinstance(symbols_or_none, set) + + if not has_symbol_set: + raise ValueError(filldedent(''' + Since there are no variables in the expression %s, + it cannot be differentiated.''' % expr)) + + # determine value for variables if it wasn't given + if not variables: + variables = expr.free_symbols + if len(variables) != 1: + if expr.is_number: + return S.Zero + if len(variables) == 0: + raise ValueError(filldedent(''' + Since there are no variables in the expression, + the variable(s) of differentiation must be supplied + to differentiate %s''' % expr)) + else: + raise ValueError(filldedent(''' + Since there is more than one variable in the + expression, the variable(s) of differentiation + must be supplied to differentiate %s''' % expr)) + + # Split the list of variables into a list of the variables we are diff + # wrt, where each element of the list has the form (s, count) where + # s is the entity to diff wrt and count is the order of the + # derivative. + variable_count = [] + array_likes = (tuple, list, Tuple) + + from sympy.tensor.array import Array, NDimArray + + for i, v in enumerate(variables): + if isinstance(v, UndefinedFunction): + raise TypeError( + "cannot differentiate wrt " + "UndefinedFunction: %s" % v) + + if isinstance(v, array_likes): + if len(v) == 0: + # Ignore empty tuples: Derivative(expr, ... , (), ... ) + continue + if isinstance(v[0], array_likes): + # Derive by array: Derivative(expr, ... , [[x, y, z]], ... ) + if len(v) == 1: + v = Array(v[0]) + count = 1 + else: + v, count = v + v = Array(v) + else: + v, count = v + if count == 0: + continue + variable_count.append(Tuple(v, count)) + continue + + v = sympify(v) + if isinstance(v, Integer): + if i == 0: + raise ValueError("First variable cannot be a number: %i" % v) + count = v + prev, prevcount = variable_count[-1] + if prevcount != 1: + raise TypeError("tuple {} followed by number {}".format((prev, prevcount), v)) + if count == 0: + variable_count.pop() + else: + variable_count[-1] = Tuple(prev, count) + else: + count = 1 + variable_count.append(Tuple(v, count)) + + # light evaluation of contiguous, identical + # items: (x, 1), (x, 1) -> (x, 2) + merged = [] + for t in variable_count: + v, c = t + if c.is_negative: + raise ValueError( + 'order of differentiation must be nonnegative') + if merged and merged[-1][0] == v: + c += merged[-1][1] + if not c: + merged.pop() + else: + merged[-1] = Tuple(v, c) + else: + merged.append(t) + variable_count = merged + + # sanity check of variables of differentation; we waited + # until the counts were computed since some variables may + # have been removed because the count was 0 + for v, c in variable_count: + # v must have _diff_wrt True + if not v._diff_wrt: + __ = '' # filler to make error message neater + raise ValueError(filldedent(''' + Can't calculate derivative wrt %s.%s''' % (v, + __))) + + # We make a special case for 0th derivative, because there is no + # good way to unambiguously print this. + if len(variable_count) == 0: + return expr + + evaluate = kwargs.get('evaluate', False) + + if evaluate: + if isinstance(expr, Derivative): + expr = expr.canonical + variable_count = [ + (v.canonical if isinstance(v, Derivative) else v, c) + for v, c in variable_count] + + # Look for a quick exit if there are symbols that don't appear in + # expression at all. Note, this cannot check non-symbols like + # Derivatives as those can be created by intermediate + # derivatives. + zero = False + free = expr.free_symbols + from sympy.matrices.expressions.matexpr import MatrixExpr + + for v, c in variable_count: + vfree = v.free_symbols + if c.is_positive and vfree: + if isinstance(v, AppliedUndef): + # these match exactly since + # x.diff(f(x)) == g(x).diff(f(x)) == 0 + # and are not created by differentiation + D = Dummy() + if not expr.xreplace({v: D}).has(D): + zero = True + break + elif isinstance(v, MatrixExpr): + zero = False + break + elif isinstance(v, Symbol) and v not in free: + zero = True + break + else: + if not free & vfree: + # e.g. v is IndexedBase or Matrix + zero = True + break + if zero: + return cls._get_zero_with_shape_like(expr) + + # make the order of symbols canonical + #TODO: check if assumption of discontinuous derivatives exist + variable_count = cls._sort_variable_count(variable_count) + + # denest + if isinstance(expr, Derivative): + variable_count = list(expr.variable_count) + variable_count + expr = expr.expr + return _derivative_dispatch(expr, *variable_count, **kwargs) + + # we return here if evaluate is False or if there is no + # _eval_derivative method + if not evaluate or not hasattr(expr, '_eval_derivative'): + # return an unevaluated Derivative + if evaluate and variable_count == [(expr, 1)] and expr.is_scalar: + # special hack providing evaluation for classes + # that have defined is_scalar=True but have no + # _eval_derivative defined + return S.One + return Expr.__new__(cls, expr, *variable_count) + + # evaluate the derivative by calling _eval_derivative method + # of expr for each variable + # ------------------------------------------------------------- + nderivs = 0 # how many derivatives were performed + unhandled = [] + from sympy.matrices.common import MatrixCommon + for i, (v, count) in enumerate(variable_count): + + old_expr = expr + old_v = None + + is_symbol = v.is_symbol or isinstance(v, + (Iterable, Tuple, MatrixCommon, NDimArray)) + + if not is_symbol: + old_v = v + v = Dummy('xi') + expr = expr.xreplace({old_v: v}) + # Derivatives and UndefinedFunctions are independent + # of all others + clashing = not (isinstance(old_v, Derivative) or \ + isinstance(old_v, AppliedUndef)) + if v not in expr.free_symbols and not clashing: + return expr.diff(v) # expr's version of 0 + if not old_v.is_scalar and not hasattr( + old_v, '_eval_derivative'): + # special hack providing evaluation for classes + # that have defined is_scalar=True but have no + # _eval_derivative defined + expr *= old_v.diff(old_v) + + obj = cls._dispatch_eval_derivative_n_times(expr, v, count) + if obj is not None and obj.is_zero: + return obj + + nderivs += count + + if old_v is not None: + if obj is not None: + # remove the dummy that was used + obj = obj.subs(v, old_v) + # restore expr + expr = old_expr + + if obj is None: + # we've already checked for quick-exit conditions + # that give 0 so the remaining variables + # are contained in the expression but the expression + # did not compute a derivative so we stop taking + # derivatives + unhandled = variable_count[i:] + break + + expr = obj + + # what we have so far can be made canonical + expr = expr.replace( + lambda x: isinstance(x, Derivative), + lambda x: x.canonical) + + if unhandled: + if isinstance(expr, Derivative): + unhandled = list(expr.variable_count) + unhandled + expr = expr.expr + expr = Expr.__new__(cls, expr, *unhandled) + + if (nderivs > 1) == True and kwargs.get('simplify', True): + from .exprtools import factor_terms + from sympy.simplify.simplify import signsimp + expr = factor_terms(signsimp(expr)) + return expr + + @property + def canonical(cls): + return cls.func(cls.expr, + *Derivative._sort_variable_count(cls.variable_count)) + + @classmethod + def _sort_variable_count(cls, vc): + """ + Sort (variable, count) pairs into canonical order while + retaining order of variables that do not commute during + differentiation: + + * symbols and functions commute with each other + * derivatives commute with each other + * a derivative does not commute with anything it contains + * any other object is not allowed to commute if it has + free symbols in common with another object + + Examples + ======== + + >>> from sympy import Derivative, Function, symbols + >>> vsort = Derivative._sort_variable_count + >>> x, y, z = symbols('x y z') + >>> f, g, h = symbols('f g h', cls=Function) + + Contiguous items are collapsed into one pair: + + >>> vsort([(x, 1), (x, 1)]) + [(x, 2)] + >>> vsort([(y, 1), (f(x), 1), (y, 1), (f(x), 1)]) + [(y, 2), (f(x), 2)] + + Ordering is canonical. + + >>> def vsort0(*v): + ... # docstring helper to + ... # change vi -> (vi, 0), sort, and return vi vals + ... return [i[0] for i in vsort([(i, 0) for i in v])] + + >>> vsort0(y, x) + [x, y] + >>> vsort0(g(y), g(x), f(y)) + [f(y), g(x), g(y)] + + Symbols are sorted as far to the left as possible but never + move to the left of a derivative having the same symbol in + its variables; the same applies to AppliedUndef which are + always sorted after Symbols: + + >>> dfx = f(x).diff(x) + >>> assert vsort0(dfx, y) == [y, dfx] + >>> assert vsort0(dfx, x) == [dfx, x] + """ + if not vc: + return [] + vc = list(vc) + if len(vc) == 1: + return [Tuple(*vc[0])] + V = list(range(len(vc))) + E = [] + v = lambda i: vc[i][0] + D = Dummy() + def _block(d, v, wrt=False): + # return True if v should not come before d else False + if d == v: + return wrt + if d.is_Symbol: + return False + if isinstance(d, Derivative): + # a derivative blocks if any of it's variables contain + # v; the wrt flag will return True for an exact match + # and will cause an AppliedUndef to block if v is in + # the arguments + if any(_block(k, v, wrt=True) + for k in d._wrt_variables): + return True + return False + if not wrt and isinstance(d, AppliedUndef): + return False + if v.is_Symbol: + return v in d.free_symbols + if isinstance(v, AppliedUndef): + return _block(d.xreplace({v: D}), D) + return d.free_symbols & v.free_symbols + for i in range(len(vc)): + for j in range(i): + if _block(v(j), v(i)): + E.append((j,i)) + # this is the default ordering to use in case of ties + O = dict(zip(ordered(uniq([i for i, c in vc])), range(len(vc)))) + ix = topological_sort((V, E), key=lambda i: O[v(i)]) + # merge counts of contiguously identical items + merged = [] + for v, c in [vc[i] for i in ix]: + if merged and merged[-1][0] == v: + merged[-1][1] += c + else: + merged.append([v, c]) + return [Tuple(*i) for i in merged] + + def _eval_is_commutative(self): + return self.expr.is_commutative + + def _eval_derivative(self, v): + # If v (the variable of differentiation) is not in + # self.variables, we might be able to take the derivative. + if v not in self._wrt_variables: + dedv = self.expr.diff(v) + if isinstance(dedv, Derivative): + return dedv.func(dedv.expr, *(self.variable_count + dedv.variable_count)) + # dedv (d(self.expr)/dv) could have simplified things such that the + # derivative wrt things in self.variables can now be done. Thus, + # we set evaluate=True to see if there are any other derivatives + # that can be done. The most common case is when dedv is a simple + # number so that the derivative wrt anything else will vanish. + return self.func(dedv, *self.variables, evaluate=True) + # In this case v was in self.variables so the derivative wrt v has + # already been attempted and was not computed, either because it + # couldn't be or evaluate=False originally. + variable_count = list(self.variable_count) + variable_count.append((v, 1)) + return self.func(self.expr, *variable_count, evaluate=False) + + def doit(self, **hints): + expr = self.expr + if hints.get('deep', True): + expr = expr.doit(**hints) + hints['evaluate'] = True + rv = self.func(expr, *self.variable_count, **hints) + if rv!= self and rv.has(Derivative): + rv = rv.doit(**hints) + return rv + + @_sympifyit('z0', NotImplementedError) + def doit_numerically(self, z0): + """ + Evaluate the derivative at z numerically. + + When we can represent derivatives at a point, this should be folded + into the normal evalf. For now, we need a special method. + """ + if len(self.free_symbols) != 1 or len(self.variables) != 1: + raise NotImplementedError('partials and higher order derivatives') + z = list(self.free_symbols)[0] + + def eval(x): + f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec)) + f0 = f0.evalf(prec_to_dps(mpmath.mp.prec)) + return f0._to_mpmath(mpmath.mp.prec) + return Expr._from_mpmath(mpmath.diff(eval, + z0._to_mpmath(mpmath.mp.prec)), + mpmath.mp.prec) + + @property + def expr(self): + return self._args[0] + + @property + def _wrt_variables(self): + # return the variables of differentiation without + # respect to the type of count (int or symbolic) + return [i[0] for i in self.variable_count] + + @property + def variables(self): + # TODO: deprecate? YES, make this 'enumerated_variables' and + # name _wrt_variables as variables + # TODO: support for `d^n`? + rv = [] + for v, count in self.variable_count: + if not count.is_Integer: + raise TypeError(filldedent(''' + Cannot give expansion for symbolic count. If you just + want a list of all variables of differentiation, use + _wrt_variables.''')) + rv.extend([v]*count) + return tuple(rv) + + @property + def variable_count(self): + return self._args[1:] + + @property + def derivative_count(self): + return sum([count for _, count in self.variable_count], 0) + + @property + def free_symbols(self): + ret = self.expr.free_symbols + # Add symbolic counts to free_symbols + for _, count in self.variable_count: + ret.update(count.free_symbols) + return ret + + @property + def kind(self): + return self.args[0].kind + + def _eval_subs(self, old, new): + # The substitution (old, new) cannot be done inside + # Derivative(expr, vars) for a variety of reasons + # as handled below. + if old in self._wrt_variables: + # first handle the counts + expr = self.func(self.expr, *[(v, c.subs(old, new)) + for v, c in self.variable_count]) + if expr != self: + return expr._eval_subs(old, new) + # quick exit case + if not getattr(new, '_diff_wrt', False): + # case (0): new is not a valid variable of + # differentiation + if isinstance(old, Symbol): + # don't introduce a new symbol if the old will do + return Subs(self, old, new) + else: + xi = Dummy('xi') + return Subs(self.xreplace({old: xi}), xi, new) + + # If both are Derivatives with the same expr, check if old is + # equivalent to self or if old is a subderivative of self. + if old.is_Derivative and old.expr == self.expr: + if self.canonical == old.canonical: + return new + + # collections.Counter doesn't have __le__ + def _subset(a, b): + return all((a[i] <= b[i]) == True for i in a) + + old_vars = Counter(dict(reversed(old.variable_count))) + self_vars = Counter(dict(reversed(self.variable_count))) + if _subset(old_vars, self_vars): + return _derivative_dispatch(new, *(self_vars - old_vars).items()).canonical + + args = list(self.args) + newargs = [x._subs(old, new) for x in args] + if args[0] == old: + # complete replacement of self.expr + # we already checked that the new is valid so we know + # it won't be a problem should it appear in variables + return _derivative_dispatch(*newargs) + + if newargs[0] != args[0]: + # case (1) can't change expr by introducing something that is in + # the _wrt_variables if it was already in the expr + # e.g. + # for Derivative(f(x, g(y)), y), x cannot be replaced with + # anything that has y in it; for f(g(x), g(y)).diff(g(y)) + # g(x) cannot be replaced with anything that has g(y) + syms = {vi: Dummy() for vi in self._wrt_variables + if not vi.is_Symbol} + wrt = {syms.get(vi, vi) for vi in self._wrt_variables} + forbidden = args[0].xreplace(syms).free_symbols & wrt + nfree = new.xreplace(syms).free_symbols + ofree = old.xreplace(syms).free_symbols + if (nfree - ofree) & forbidden: + return Subs(self, old, new) + + viter = ((i, j) for ((i, _), (j, _)) in zip(newargs[1:], args[1:])) + if any(i != j for i, j in viter): # a wrt-variable change + # case (2) can't change vars by introducing a variable + # that is contained in expr, e.g. + # for Derivative(f(z, g(h(x), y)), y), y cannot be changed to + # x, h(x), or g(h(x), y) + for a in _atomic(self.expr, recursive=True): + for i in range(1, len(newargs)): + vi, _ = newargs[i] + if a == vi and vi != args[i][0]: + return Subs(self, old, new) + # more arg-wise checks + vc = newargs[1:] + oldv = self._wrt_variables + newe = self.expr + subs = [] + for i, (vi, ci) in enumerate(vc): + if not vi._diff_wrt: + # case (3) invalid differentiation expression so + # create a replacement dummy + xi = Dummy('xi_%i' % i) + # replace the old valid variable with the dummy + # in the expression + newe = newe.xreplace({oldv[i]: xi}) + # and replace the bad variable with the dummy + vc[i] = (xi, ci) + # and record the dummy with the new (invalid) + # differentiation expression + subs.append((xi, vi)) + + if subs: + # handle any residual substitution in the expression + newe = newe._subs(old, new) + # return the Subs-wrapped derivative + return Subs(Derivative(newe, *vc), *zip(*subs)) + + # everything was ok + return _derivative_dispatch(*newargs) + + def _eval_lseries(self, x, logx, cdir=0): + dx = self.variables + for term in self.expr.lseries(x, logx=logx, cdir=cdir): + yield self.func(term, *dx) + + def _eval_nseries(self, x, n, logx, cdir=0): + arg = self.expr.nseries(x, n=n, logx=logx) + o = arg.getO() + dx = self.variables + rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())] + if o: + rv.append(o/x) + return Add(*rv) + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + series_gen = self.expr.lseries(x) + d = S.Zero + for leading_term in series_gen: + d = diff(leading_term, *self.variables) + if d != 0: + break + return d + + def as_finite_difference(self, points=1, x0=None, wrt=None): + """ Expresses a Derivative instance as a finite difference. + + Parameters + ========== + + points : sequence or coefficient, optional + If sequence: discrete values (length >= order+1) of the + independent variable used for generating the finite + difference weights. + If it is a coefficient, it will be used as the step-size + for generating an equidistant sequence of length order+1 + centered around ``x0``. Default: 1 (step-size 1) + + x0 : number or Symbol, optional + the value of the independent variable (``wrt``) at which the + derivative is to be approximated. Default: same as ``wrt``. + + wrt : Symbol, optional + "with respect to" the variable for which the (partial) + derivative is to be approximated for. If not provided it + is required that the derivative is ordinary. Default: ``None``. + + + Examples + ======== + + >>> from sympy import symbols, Function, exp, sqrt, Symbol + >>> x, h = symbols('x h') + >>> f = Function('f') + >>> f(x).diff(x).as_finite_difference() + -f(x - 1/2) + f(x + 1/2) + + The default step size and number of points are 1 and + ``order + 1`` respectively. We can change the step size by + passing a symbol as a parameter: + + >>> f(x).diff(x).as_finite_difference(h) + -f(-h/2 + x)/h + f(h/2 + x)/h + + We can also specify the discretized values to be used in a + sequence: + + >>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h]) + -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h) + + The algorithm is not restricted to use equidistant spacing, nor + do we need to make the approximation around ``x0``, but we can get + an expression estimating the derivative at an offset: + + >>> e, sq2 = exp(1), sqrt(2) + >>> xl = [x-h, x+h, x+e*h] + >>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS + 2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/... + + To approximate ``Derivative`` around ``x0`` using a non-equidistant + spacing step, the algorithm supports assignment of undefined + functions to ``points``: + + >>> dx = Function('dx') + >>> f(x).diff(x).as_finite_difference(points=dx(x), x0=x-h) + -f(-h + x - dx(-h + x)/2)/dx(-h + x) + f(-h + x + dx(-h + x)/2)/dx(-h + x) + + Partial derivatives are also supported: + + >>> y = Symbol('y') + >>> d2fdxdy=f(x,y).diff(x,y) + >>> d2fdxdy.as_finite_difference(wrt=x) + -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y) + + We can apply ``as_finite_difference`` to ``Derivative`` instances in + compound expressions using ``replace``: + + >>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative, + ... lambda arg: arg.as_finite_difference()) + 42**(-f(x - 1/2) + f(x + 1/2)) + 1 + + + See also + ======== + + sympy.calculus.finite_diff.apply_finite_diff + sympy.calculus.finite_diff.differentiate_finite + sympy.calculus.finite_diff.finite_diff_weights + + """ + from sympy.calculus.finite_diff import _as_finite_diff + return _as_finite_diff(self, points, x0, wrt) + + @classmethod + def _get_zero_with_shape_like(cls, expr): + return S.Zero + + @classmethod + def _dispatch_eval_derivative_n_times(cls, expr, v, count): + # Evaluate the derivative `n` times. If + # `_eval_derivative_n_times` is not overridden by the current + # object, the default in `Basic` will call a loop over + # `_eval_derivative`: + return expr._eval_derivative_n_times(v, count) + + +def _derivative_dispatch(expr, *variables, **kwargs): + from sympy.matrices.common import MatrixCommon + from sympy.matrices.expressions.matexpr import MatrixExpr + from sympy.tensor.array import NDimArray + array_types = (MatrixCommon, MatrixExpr, NDimArray, list, tuple, Tuple) + if isinstance(expr, array_types) or any(isinstance(i[0], array_types) if isinstance(i, (tuple, list, Tuple)) else isinstance(i, array_types) for i in variables): + from sympy.tensor.array.array_derivatives import ArrayDerivative + return ArrayDerivative(expr, *variables, **kwargs) + return Derivative(expr, *variables, **kwargs) + + +class Lambda(Expr): + """ + Lambda(x, expr) represents a lambda function similar to Python's + 'lambda x: expr'. A function of several variables is written as + Lambda((x, y, ...), expr). + + Examples + ======== + + A simple example: + + >>> from sympy import Lambda + >>> from sympy.abc import x + >>> f = Lambda(x, x**2) + >>> f(4) + 16 + + For multivariate functions, use: + + >>> from sympy.abc import y, z, t + >>> f2 = Lambda((x, y, z, t), x + y**z + t**z) + >>> f2(1, 2, 3, 4) + 73 + + It is also possible to unpack tuple arguments: + + >>> f = Lambda(((x, y), z), x + y + z) + >>> f((1, 2), 3) + 6 + + A handy shortcut for lots of arguments: + + >>> p = x, y, z + >>> f = Lambda(p, x + y*z) + >>> f(*p) + x + y*z + + """ + is_Function = True + + def __new__(cls, signature, expr): + if iterable(signature) and not isinstance(signature, (tuple, Tuple)): + sympy_deprecation_warning( + """ + Using a non-tuple iterable as the first argument to Lambda + is deprecated. Use Lambda(tuple(args), expr) instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-non-tuple-lambda", + ) + signature = tuple(signature) + sig = signature if iterable(signature) else (signature,) + sig = sympify(sig) + cls._check_signature(sig) + + if len(sig) == 1 and sig[0] == expr: + return S.IdentityFunction + + return Expr.__new__(cls, sig, sympify(expr)) + + @classmethod + def _check_signature(cls, sig): + syms = set() + + def rcheck(args): + for a in args: + if a.is_symbol: + if a in syms: + raise BadSignatureError("Duplicate symbol %s" % a) + syms.add(a) + elif isinstance(a, Tuple): + rcheck(a) + else: + raise BadSignatureError("Lambda signature should be only tuples" + " and symbols, not %s" % a) + + if not isinstance(sig, Tuple): + raise BadSignatureError("Lambda signature should be a tuple not %s" % sig) + # Recurse through the signature: + rcheck(sig) + + @property + def signature(self): + """The expected form of the arguments to be unpacked into variables""" + return self._args[0] + + @property + def expr(self): + """The return value of the function""" + return self._args[1] + + @property + def variables(self): + """The variables used in the internal representation of the function""" + def _variables(args): + if isinstance(args, Tuple): + for arg in args: + yield from _variables(arg) + else: + yield args + return tuple(_variables(self.signature)) + + @property + def nargs(self): + from sympy.sets.sets import FiniteSet + return FiniteSet(len(self.signature)) + + bound_symbols = variables + + @property + def free_symbols(self): + return self.expr.free_symbols - set(self.variables) + + def __call__(self, *args): + n = len(args) + if n not in self.nargs: # Lambda only ever has 1 value in nargs + # XXX: exception message must be in exactly this format to + # make it work with NumPy's functions like vectorize(). See, + # for example, https://github.com/numpy/numpy/issues/1697. + # The ideal solution would be just to attach metadata to + # the exception and change NumPy to take advantage of this. + ## XXX does this apply to Lambda? If not, remove this comment. + temp = ('%(name)s takes exactly %(args)s ' + 'argument%(plural)s (%(given)s given)') + raise BadArgumentsError(temp % { + 'name': self, + 'args': list(self.nargs)[0], + 'plural': 's'*(list(self.nargs)[0] != 1), + 'given': n}) + + d = self._match_signature(self.signature, args) + + return self.expr.xreplace(d) + + def _match_signature(self, sig, args): + + symargmap = {} + + def rmatch(pars, args): + for par, arg in zip(pars, args): + if par.is_symbol: + symargmap[par] = arg + elif isinstance(par, Tuple): + if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars): + raise BadArgumentsError("Can't match %s and %s" % (args, pars)) + rmatch(par, arg) + + rmatch(sig, args) + + return symargmap + + @property + def is_identity(self): + """Return ``True`` if this ``Lambda`` is an identity function. """ + return self.signature == self.expr + + def _eval_evalf(self, prec): + return self.func(self.args[0], self.args[1].evalf(n=prec_to_dps(prec))) + + +class Subs(Expr): + """ + Represents unevaluated substitutions of an expression. + + ``Subs(expr, x, x0)`` represents the expression resulting + from substituting x with x0 in expr. + + Parameters + ========== + + expr : Expr + An expression. + + x : tuple, variable + A variable or list of distinct variables. + + x0 : tuple or list of tuples + A point or list of evaluation points + corresponding to those variables. + + Examples + ======== + + >>> from sympy import Subs, Function, sin, cos + >>> from sympy.abc import x, y, z + >>> f = Function('f') + + Subs are created when a particular substitution cannot be made. The + x in the derivative cannot be replaced with 0 because 0 is not a + valid variables of differentiation: + + >>> f(x).diff(x).subs(x, 0) + Subs(Derivative(f(x), x), x, 0) + + Once f is known, the derivative and evaluation at 0 can be done: + + >>> _.subs(f, sin).doit() == sin(x).diff(x).subs(x, 0) == cos(0) + True + + Subs can also be created directly with one or more variables: + + >>> Subs(f(x)*sin(y) + z, (x, y), (0, 1)) + Subs(z + f(x)*sin(y), (x, y), (0, 1)) + >>> _.doit() + z + f(0)*sin(1) + + Notes + ===== + + ``Subs`` objects are generally useful to represent unevaluated derivatives + calculated at a point. + + The variables may be expressions, but they are subjected to the limitations + of subs(), so it is usually a good practice to use only symbols for + variables, since in that case there can be no ambiguity. + + There's no automatic expansion - use the method .doit() to effect all + possible substitutions of the object and also of objects inside the + expression. + + When evaluating derivatives at a point that is not a symbol, a Subs object + is returned. One is also able to calculate derivatives of Subs objects - in + this case the expression is always expanded (for the unevaluated form, use + Derivative()). + + In order to allow expressions to combine before doit is done, a + representation of the Subs expression is used internally to make + expressions that are superficially different compare the same: + + >>> a, b = Subs(x, x, 0), Subs(y, y, 0) + >>> a + b + 2*Subs(x, x, 0) + + This can lead to unexpected consequences when using methods + like `has` that are cached: + + >>> s = Subs(x, x, 0) + >>> s.has(x), s.has(y) + (True, False) + >>> ss = s.subs(x, y) + >>> ss.has(x), ss.has(y) + (True, False) + >>> s, ss + (Subs(x, x, 0), Subs(y, y, 0)) + """ + def __new__(cls, expr, variables, point, **assumptions): + if not is_sequence(variables, Tuple): + variables = [variables] + variables = Tuple(*variables) + + if has_dups(variables): + repeated = [str(v) for v, i in Counter(variables).items() if i > 1] + __ = ', '.join(repeated) + raise ValueError(filldedent(''' + The following expressions appear more than once: %s + ''' % __)) + + point = Tuple(*(point if is_sequence(point, Tuple) else [point])) + + if len(point) != len(variables): + raise ValueError('Number of point values must be the same as ' + 'the number of variables.') + + if not point: + return sympify(expr) + + # denest + if isinstance(expr, Subs): + variables = expr.variables + variables + point = expr.point + point + expr = expr.expr + else: + expr = sympify(expr) + + # use symbols with names equal to the point value (with prepended _) + # to give a variable-independent expression + pre = "_" + pts = sorted(set(point), key=default_sort_key) + from sympy.printing.str import StrPrinter + class CustomStrPrinter(StrPrinter): + def _print_Dummy(self, expr): + return str(expr) + str(expr.dummy_index) + def mystr(expr, **settings): + p = CustomStrPrinter(settings) + return p.doprint(expr) + while 1: + s_pts = {p: Symbol(pre + mystr(p)) for p in pts} + reps = [(v, s_pts[p]) + for v, p in zip(variables, point)] + # if any underscore-prepended symbol is already a free symbol + # and is a variable with a different point value, then there + # is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0)) + # because the new symbol that would be created is _1 but _1 + # is already mapped to 0 so __0 and __1 are used for the new + # symbols + if any(r in expr.free_symbols and + r in variables and + Symbol(pre + mystr(point[variables.index(r)])) != r + for _, r in reps): + pre += "_" + continue + break + + obj = Expr.__new__(cls, expr, Tuple(*variables), point) + obj._expr = expr.xreplace(dict(reps)) + return obj + + def _eval_is_commutative(self): + return self.expr.is_commutative + + def doit(self, **hints): + e, v, p = self.args + + # remove self mappings + for i, (vi, pi) in enumerate(zip(v, p)): + if vi == pi: + v = v[:i] + v[i + 1:] + p = p[:i] + p[i + 1:] + if not v: + return self.expr + + if isinstance(e, Derivative): + # apply functions first, e.g. f -> cos + undone = [] + for i, vi in enumerate(v): + if isinstance(vi, FunctionClass): + e = e.subs(vi, p[i]) + else: + undone.append((vi, p[i])) + if not isinstance(e, Derivative): + e = e.doit() + if isinstance(e, Derivative): + # do Subs that aren't related to differentiation + undone2 = [] + D = Dummy() + arg = e.args[0] + for vi, pi in undone: + if D not in e.xreplace({vi: D}).free_symbols: + if arg.has(vi): + e = e.subs(vi, pi) + else: + undone2.append((vi, pi)) + undone = undone2 + # differentiate wrt variables that are present + wrt = [] + D = Dummy() + expr = e.expr + free = expr.free_symbols + for vi, ci in e.variable_count: + if isinstance(vi, Symbol) and vi in free: + expr = expr.diff((vi, ci)) + elif D in expr.subs(vi, D).free_symbols: + expr = expr.diff((vi, ci)) + else: + wrt.append((vi, ci)) + # inject remaining subs + rv = expr.subs(undone) + # do remaining differentiation *in order given* + for vc in wrt: + rv = rv.diff(vc) + else: + # inject remaining subs + rv = e.subs(undone) + else: + rv = e.doit(**hints).subs(list(zip(v, p))) + + if hints.get('deep', True) and rv != self: + rv = rv.doit(**hints) + return rv + + def evalf(self, prec=None, **options): + return self.doit().evalf(prec, **options) + + n = evalf # type:ignore + + @property + def variables(self): + """The variables to be evaluated""" + return self._args[1] + + bound_symbols = variables + + @property + def expr(self): + """The expression on which the substitution operates""" + return self._args[0] + + @property + def point(self): + """The values for which the variables are to be substituted""" + return self._args[2] + + @property + def free_symbols(self): + return (self.expr.free_symbols - set(self.variables) | + set(self.point.free_symbols)) + + @property + def expr_free_symbols(self): + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + # Don't show the warning twice from the recursive call + with ignore_warnings(SymPyDeprecationWarning): + return (self.expr.expr_free_symbols - set(self.variables) | + set(self.point.expr_free_symbols)) + + def __eq__(self, other): + if not isinstance(other, Subs): + return False + return self._hashable_content() == other._hashable_content() + + def __ne__(self, other): + return not(self == other) + + def __hash__(self): + return super().__hash__() + + def _hashable_content(self): + return (self._expr.xreplace(self.canonical_variables), + ) + tuple(ordered([(v, p) for v, p in + zip(self.variables, self.point) if not self.expr.has(v)])) + + def _eval_subs(self, old, new): + # Subs doit will do the variables in order; the semantics + # of subs for Subs is have the following invariant for + # Subs object foo: + # foo.doit().subs(reps) == foo.subs(reps).doit() + pt = list(self.point) + if old in self.variables: + if _atomic(new) == {new} and not any( + i.has(new) for i in self.args): + # the substitution is neutral + return self.xreplace({old: new}) + # any occurrence of old before this point will get + # handled by replacements from here on + i = self.variables.index(old) + for j in range(i, len(self.variables)): + pt[j] = pt[j]._subs(old, new) + return self.func(self.expr, self.variables, pt) + v = [i._subs(old, new) for i in self.variables] + if v != list(self.variables): + return self.func(self.expr, self.variables + (old,), pt + [new]) + expr = self.expr._subs(old, new) + pt = [i._subs(old, new) for i in self.point] + return self.func(expr, v, pt) + + def _eval_derivative(self, s): + # Apply the chain rule of the derivative on the substitution variables: + f = self.expr + vp = V, P = self.variables, self.point + val = Add.fromiter(p.diff(s)*Subs(f.diff(v), *vp).doit() + for v, p in zip(V, P)) + + # these are all the free symbols in the expr + efree = f.free_symbols + # some symbols like IndexedBase include themselves and args + # as free symbols + compound = {i for i in efree if len(i.free_symbols) > 1} + # hide them and see what independent free symbols remain + dums = {Dummy() for i in compound} + masked = f.xreplace(dict(zip(compound, dums))) + ifree = masked.free_symbols - dums + # include the compound symbols + free = ifree | compound + # remove the variables already handled + free -= set(V) + # add back any free symbols of remaining compound symbols + free |= {i for j in free & compound for i in j.free_symbols} + # if symbols of s are in free then there is more to do + if free & s.free_symbols: + val += Subs(f.diff(s), self.variables, self.point).doit() + return val + + def _eval_nseries(self, x, n, logx, cdir=0): + if x in self.point: + # x is the variable being substituted into + apos = self.point.index(x) + other = self.variables[apos] + else: + other = x + arg = self.expr.nseries(other, n=n, logx=logx) + o = arg.getO() + terms = Add.make_args(arg.removeO()) + rv = Add(*[self.func(a, *self.args[1:]) for a in terms]) + if o: + rv += o.subs(other, x) + return rv + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + if x in self.point: + ipos = self.point.index(x) + xvar = self.variables[ipos] + return self.expr.as_leading_term(xvar) + if x in self.variables: + # if `x` is a dummy variable, it means it won't exist after the + # substitution has been performed: + return self + # The variable is independent of the substitution: + return self.expr.as_leading_term(x) + + +def diff(f, *symbols, **kwargs): + """ + Differentiate f with respect to symbols. + + Explanation + =========== + + This is just a wrapper to unify .diff() and the Derivative class; its + interface is similar to that of integrate(). You can use the same + shortcuts for multiple variables as with Derivative. For example, + diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative + of f(x). + + You can pass evaluate=False to get an unevaluated Derivative class. Note + that if there are 0 symbols (such as diff(f(x), x, 0), then the result will + be the function (the zeroth derivative), even if evaluate=False. + + Examples + ======== + + >>> from sympy import sin, cos, Function, diff + >>> from sympy.abc import x, y + >>> f = Function('f') + + >>> diff(sin(x), x) + cos(x) + >>> diff(f(x), x, x, x) + Derivative(f(x), (x, 3)) + >>> diff(f(x), x, 3) + Derivative(f(x), (x, 3)) + >>> diff(sin(x)*cos(y), x, 2, y, 2) + sin(x)*cos(y) + + >>> type(diff(sin(x), x)) + cos + >>> type(diff(sin(x), x, evaluate=False)) + + >>> type(diff(sin(x), x, 0)) + sin + >>> type(diff(sin(x), x, 0, evaluate=False)) + sin + + >>> diff(sin(x)) + cos(x) + >>> diff(sin(x*y)) + Traceback (most recent call last): + ... + ValueError: specify differentiation variables to differentiate sin(x*y) + + Note that ``diff(sin(x))`` syntax is meant only for convenience + in interactive sessions and should be avoided in library code. + + References + ========== + + .. [1] https://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html + + See Also + ======== + + Derivative + idiff: computes the derivative implicitly + + """ + if hasattr(f, 'diff'): + return f.diff(*symbols, **kwargs) + kwargs.setdefault('evaluate', True) + return _derivative_dispatch(f, *symbols, **kwargs) + + +def expand(e, deep=True, modulus=None, power_base=True, power_exp=True, + mul=True, log=True, multinomial=True, basic=True, **hints): + r""" + Expand an expression using methods given as hints. + + Explanation + =========== + + Hints evaluated unless explicitly set to False are: ``basic``, ``log``, + ``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following + hints are supported but not applied unless set to True: ``complex``, + ``func``, and ``trig``. In addition, the following meta-hints are + supported by some or all of the other hints: ``frac``, ``numer``, + ``denom``, ``modulus``, and ``force``. ``deep`` is supported by all + hints. Additionally, subclasses of Expr may define their own hints or + meta-hints. + + The ``basic`` hint is used for any special rewriting of an object that + should be done automatically (along with the other hints like ``mul``) + when expand is called. This is a catch-all hint to handle any sort of + expansion that may not be described by the existing hint names. To use + this hint an object should override the ``_eval_expand_basic`` method. + Objects may also define their own expand methods, which are not run by + default. See the API section below. + + If ``deep`` is set to ``True`` (the default), things like arguments of + functions are recursively expanded. Use ``deep=False`` to only expand on + the top level. + + If the ``force`` hint is used, assumptions about variables will be ignored + in making the expansion. + + Hints + ===== + + These hints are run by default + + mul + --- + + Distributes multiplication over addition: + + >>> from sympy import cos, exp, sin + >>> from sympy.abc import x, y, z + >>> (y*(x + z)).expand(mul=True) + x*y + y*z + + multinomial + ----------- + + Expand (x + y + ...)**n where n is a positive integer. + + >>> ((x + y + z)**2).expand(multinomial=True) + x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2 + + power_exp + --------- + + Expand addition in exponents into multiplied bases. + + >>> exp(x + y).expand(power_exp=True) + exp(x)*exp(y) + >>> (2**(x + y)).expand(power_exp=True) + 2**x*2**y + + power_base + ---------- + + Split powers of multiplied bases. + + This only happens by default if assumptions allow, or if the + ``force`` meta-hint is used: + + >>> ((x*y)**z).expand(power_base=True) + (x*y)**z + >>> ((x*y)**z).expand(power_base=True, force=True) + x**z*y**z + >>> ((2*y)**z).expand(power_base=True) + 2**z*y**z + + Note that in some cases where this expansion always holds, SymPy performs + it automatically: + + >>> (x*y)**2 + x**2*y**2 + + log + --- + + Pull out power of an argument as a coefficient and split logs products + into sums of logs. + + Note that these only work if the arguments of the log function have the + proper assumptions--the arguments must be positive and the exponents must + be real--or else the ``force`` hint must be True: + + >>> from sympy import log, symbols + >>> log(x**2*y).expand(log=True) + log(x**2*y) + >>> log(x**2*y).expand(log=True, force=True) + 2*log(x) + log(y) + >>> x, y = symbols('x,y', positive=True) + >>> log(x**2*y).expand(log=True) + 2*log(x) + log(y) + + basic + ----- + + This hint is intended primarily as a way for custom subclasses to enable + expansion by default. + + These hints are not run by default: + + complex + ------- + + Split an expression into real and imaginary parts. + + >>> x, y = symbols('x,y') + >>> (x + y).expand(complex=True) + re(x) + re(y) + I*im(x) + I*im(y) + >>> cos(x).expand(complex=True) + -I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x)) + + Note that this is just a wrapper around ``as_real_imag()``. Most objects + that wish to redefine ``_eval_expand_complex()`` should consider + redefining ``as_real_imag()`` instead. + + func + ---- + + Expand other functions. + + >>> from sympy import gamma + >>> gamma(x + 1).expand(func=True) + x*gamma(x) + + trig + ---- + + Do trigonometric expansions. + + >>> cos(x + y).expand(trig=True) + -sin(x)*sin(y) + cos(x)*cos(y) + >>> sin(2*x).expand(trig=True) + 2*sin(x)*cos(x) + + Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)`` + and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x) + = 1`. The current implementation uses the form obtained from Chebyshev + polynomials, but this may change. See `this MathWorld article + `_ for more + information. + + Notes + ===== + + - You can shut off unwanted methods:: + + >>> (exp(x + y)*(x + y)).expand() + x*exp(x)*exp(y) + y*exp(x)*exp(y) + >>> (exp(x + y)*(x + y)).expand(power_exp=False) + x*exp(x + y) + y*exp(x + y) + >>> (exp(x + y)*(x + y)).expand(mul=False) + (x + y)*exp(x)*exp(y) + + - Use deep=False to only expand on the top level:: + + >>> exp(x + exp(x + y)).expand() + exp(x)*exp(exp(x)*exp(y)) + >>> exp(x + exp(x + y)).expand(deep=False) + exp(x)*exp(exp(x + y)) + + - Hints are applied in an arbitrary, but consistent order (in the current + implementation, they are applied in alphabetical order, except + multinomial comes before mul, but this may change). Because of this, + some hints may prevent expansion by other hints if they are applied + first. For example, ``mul`` may distribute multiplications and prevent + ``log`` and ``power_base`` from expanding them. Also, if ``mul`` is + applied before ``multinomial`, the expression might not be fully + distributed. The solution is to use the various ``expand_hint`` helper + functions or to use ``hint=False`` to this function to finely control + which hints are applied. Here are some examples:: + + >>> from sympy import expand, expand_mul, expand_power_base + >>> x, y, z = symbols('x,y,z', positive=True) + + >>> expand(log(x*(y + z))) + log(x) + log(y + z) + + Here, we see that ``log`` was applied before ``mul``. To get the mul + expanded form, either of the following will work:: + + >>> expand_mul(log(x*(y + z))) + log(x*y + x*z) + >>> expand(log(x*(y + z)), log=False) + log(x*y + x*z) + + A similar thing can happen with the ``power_base`` hint:: + + >>> expand((x*(y + z))**x) + (x*y + x*z)**x + + To get the ``power_base`` expanded form, either of the following will + work:: + + >>> expand((x*(y + z))**x, mul=False) + x**x*(y + z)**x + >>> expand_power_base((x*(y + z))**x) + x**x*(y + z)**x + + >>> expand((x + y)*y/x) + y + y**2/x + + The parts of a rational expression can be targeted:: + + >>> expand((x + y)*y/x/(x + 1), frac=True) + (x*y + y**2)/(x**2 + x) + >>> expand((x + y)*y/x/(x + 1), numer=True) + (x*y + y**2)/(x*(x + 1)) + >>> expand((x + y)*y/x/(x + 1), denom=True) + y*(x + y)/(x**2 + x) + + - The ``modulus`` meta-hint can be used to reduce the coefficients of an + expression post-expansion:: + + >>> expand((3*x + 1)**2) + 9*x**2 + 6*x + 1 + >>> expand((3*x + 1)**2, modulus=5) + 4*x**2 + x + 1 + + - Either ``expand()`` the function or ``.expand()`` the method can be + used. Both are equivalent:: + + >>> expand((x + 1)**2) + x**2 + 2*x + 1 + >>> ((x + 1)**2).expand() + x**2 + 2*x + 1 + + API + === + + Objects can define their own expand hints by defining + ``_eval_expand_hint()``. The function should take the form:: + + def _eval_expand_hint(self, **hints): + # Only apply the method to the top-level expression + ... + + See also the example below. Objects should define ``_eval_expand_hint()`` + methods only if ``hint`` applies to that specific object. The generic + ``_eval_expand_hint()`` method defined in Expr will handle the no-op case. + + Each hint should be responsible for expanding that hint only. + Furthermore, the expansion should be applied to the top-level expression + only. ``expand()`` takes care of the recursion that happens when + ``deep=True``. + + You should only call ``_eval_expand_hint()`` methods directly if you are + 100% sure that the object has the method, as otherwise you are liable to + get unexpected ``AttributeError``s. Note, again, that you do not need to + recursively apply the hint to args of your object: this is handled + automatically by ``expand()``. ``_eval_expand_hint()`` should + generally not be used at all outside of an ``_eval_expand_hint()`` method. + If you want to apply a specific expansion from within another method, use + the public ``expand()`` function, method, or ``expand_hint()`` functions. + + In order for expand to work, objects must be rebuildable by their args, + i.e., ``obj.func(*obj.args) == obj`` must hold. + + Expand methods are passed ``**hints`` so that expand hints may use + 'metahints'--hints that control how different expand methods are applied. + For example, the ``force=True`` hint described above that causes + ``expand(log=True)`` to ignore assumptions is such a metahint. The + ``deep`` meta-hint is handled exclusively by ``expand()`` and is not + passed to ``_eval_expand_hint()`` methods. + + Note that expansion hints should generally be methods that perform some + kind of 'expansion'. For hints that simply rewrite an expression, use the + .rewrite() API. + + Examples + ======== + + >>> from sympy import Expr, sympify + >>> class MyClass(Expr): + ... def __new__(cls, *args): + ... args = sympify(args) + ... return Expr.__new__(cls, *args) + ... + ... def _eval_expand_double(self, *, force=False, **hints): + ... ''' + ... Doubles the args of MyClass. + ... + ... If there more than four args, doubling is not performed, + ... unless force=True is also used (False by default). + ... ''' + ... if not force and len(self.args) > 4: + ... return self + ... return self.func(*(self.args + self.args)) + ... + >>> a = MyClass(1, 2, MyClass(3, 4)) + >>> a + MyClass(1, 2, MyClass(3, 4)) + >>> a.expand(double=True) + MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4)) + >>> a.expand(double=True, deep=False) + MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4)) + + >>> b = MyClass(1, 2, 3, 4, 5) + >>> b.expand(double=True) + MyClass(1, 2, 3, 4, 5) + >>> b.expand(double=True, force=True) + MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5) + + See Also + ======== + + expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig, + expand_power_base, expand_power_exp, expand_func, sympy.simplify.hyperexpand.hyperexpand + + """ + # don't modify this; modify the Expr.expand method + hints['power_base'] = power_base + hints['power_exp'] = power_exp + hints['mul'] = mul + hints['log'] = log + hints['multinomial'] = multinomial + hints['basic'] = basic + return sympify(e).expand(deep=deep, modulus=modulus, **hints) + +# This is a special application of two hints + +def _mexpand(expr, recursive=False): + # expand multinomials and then expand products; this may not always + # be sufficient to give a fully expanded expression (see + # test_issue_8247_8354 in test_arit) + if expr is None: + return + was = None + while was != expr: + was, expr = expr, expand_mul(expand_multinomial(expr)) + if not recursive: + break + return expr + + +# These are simple wrappers around single hints. + + +def expand_mul(expr, deep=True): + """ + Wrapper around expand that only uses the mul hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import symbols, expand_mul, exp, log + >>> x, y = symbols('x,y', positive=True) + >>> expand_mul(exp(x+y)*(x+y)*log(x*y**2)) + x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2) + + """ + return sympify(expr).expand(deep=deep, mul=True, power_exp=False, + power_base=False, basic=False, multinomial=False, log=False) + + +def expand_multinomial(expr, deep=True): + """ + Wrapper around expand that only uses the multinomial hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import symbols, expand_multinomial, exp + >>> x, y = symbols('x y', positive=True) + >>> expand_multinomial((x + exp(x + 1))**2) + x**2 + 2*x*exp(x + 1) + exp(2*x + 2) + + """ + return sympify(expr).expand(deep=deep, mul=False, power_exp=False, + power_base=False, basic=False, multinomial=True, log=False) + + +def expand_log(expr, deep=True, force=False, factor=False): + """ + Wrapper around expand that only uses the log hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import symbols, expand_log, exp, log + >>> x, y = symbols('x,y', positive=True) + >>> expand_log(exp(x+y)*(x+y)*log(x*y**2)) + (x + y)*(log(x) + 2*log(y))*exp(x + y) + + """ + from sympy.functions.elementary.exponential import log + if factor is False: + def _handle(x): + x1 = expand_mul(expand_log(x, deep=deep, force=force, factor=True)) + if x1.count(log) <= x.count(log): + return x1 + return x + + expr = expr.replace( + lambda x: x.is_Mul and all(any(isinstance(i, log) and i.args[0].is_Rational + for i in Mul.make_args(j)) for j in x.as_numer_denom()), + _handle) + + return sympify(expr).expand(deep=deep, log=True, mul=False, + power_exp=False, power_base=False, multinomial=False, + basic=False, force=force, factor=factor) + + +def expand_func(expr, deep=True): + """ + Wrapper around expand that only uses the func hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import expand_func, gamma + >>> from sympy.abc import x + >>> expand_func(gamma(x + 2)) + x*(x + 1)*gamma(x) + + """ + return sympify(expr).expand(deep=deep, func=True, basic=False, + log=False, mul=False, power_exp=False, power_base=False, multinomial=False) + + +def expand_trig(expr, deep=True): + """ + Wrapper around expand that only uses the trig hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import expand_trig, sin + >>> from sympy.abc import x, y + >>> expand_trig(sin(x+y)*(x+y)) + (x + y)*(sin(x)*cos(y) + sin(y)*cos(x)) + + """ + return sympify(expr).expand(deep=deep, trig=True, basic=False, + log=False, mul=False, power_exp=False, power_base=False, multinomial=False) + + +def expand_complex(expr, deep=True): + """ + Wrapper around expand that only uses the complex hint. See the expand + docstring for more information. + + Examples + ======== + + >>> from sympy import expand_complex, exp, sqrt, I + >>> from sympy.abc import z + >>> expand_complex(exp(z)) + I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z)) + >>> expand_complex(sqrt(I)) + sqrt(2)/2 + sqrt(2)*I/2 + + See Also + ======== + + sympy.core.expr.Expr.as_real_imag + """ + return sympify(expr).expand(deep=deep, complex=True, basic=False, + log=False, mul=False, power_exp=False, power_base=False, multinomial=False) + + +def expand_power_base(expr, deep=True, force=False): + """ + Wrapper around expand that only uses the power_base hint. + + A wrapper to expand(power_base=True) which separates a power with a base + that is a Mul into a product of powers, without performing any other + expansions, provided that assumptions about the power's base and exponent + allow. + + deep=False (default is True) will only apply to the top-level expression. + + force=True (default is False) will cause the expansion to ignore + assumptions about the base and exponent. When False, the expansion will + only happen if the base is non-negative or the exponent is an integer. + + >>> from sympy.abc import x, y, z + >>> from sympy import expand_power_base, sin, cos, exp, Symbol + + >>> (x*y)**2 + x**2*y**2 + + >>> (2*x)**y + (2*x)**y + >>> expand_power_base(_) + 2**y*x**y + + >>> expand_power_base((x*y)**z) + (x*y)**z + >>> expand_power_base((x*y)**z, force=True) + x**z*y**z + >>> expand_power_base(sin((x*y)**z), deep=False) + sin((x*y)**z) + >>> expand_power_base(sin((x*y)**z), force=True) + sin(x**z*y**z) + + >>> expand_power_base((2*sin(x))**y + (2*cos(x))**y) + 2**y*sin(x)**y + 2**y*cos(x)**y + + >>> expand_power_base((2*exp(y))**x) + 2**x*exp(y)**x + + >>> expand_power_base((2*cos(x))**y) + 2**y*cos(x)**y + + Notice that sums are left untouched. If this is not the desired behavior, + apply full ``expand()`` to the expression: + + >>> expand_power_base(((x+y)*z)**2) + z**2*(x + y)**2 + >>> (((x+y)*z)**2).expand() + x**2*z**2 + 2*x*y*z**2 + y**2*z**2 + + >>> expand_power_base((2*y)**(1+z)) + 2**(z + 1)*y**(z + 1) + >>> ((2*y)**(1+z)).expand() + 2*2**z*y**(z + 1) + + The power that is unexpanded can be expanded safely when + ``y != 0``, otherwise different values might be obtained for the expression: + + >>> prev = _ + + If we indicate that ``y`` is positive but then replace it with + a value of 0 after expansion, the expression becomes 0: + + >>> p = Symbol('p', positive=True) + >>> prev.subs(y, p).expand().subs(p, 0) + 0 + + But if ``z = -1`` the expression would not be zero: + + >>> prev.subs(y, 0).subs(z, -1) + 1 + + See Also + ======== + + expand + + """ + return sympify(expr).expand(deep=deep, log=False, mul=False, + power_exp=False, power_base=True, multinomial=False, + basic=False, force=force) + + +def expand_power_exp(expr, deep=True): + """ + Wrapper around expand that only uses the power_exp hint. + + See the expand docstring for more information. + + Examples + ======== + + >>> from sympy import expand_power_exp, Symbol + >>> from sympy.abc import x, y + >>> expand_power_exp(3**(y + 2)) + 9*3**y + >>> expand_power_exp(x**(y + 2)) + x**(y + 2) + + If ``x = 0`` the value of the expression depends on the + value of ``y``; if the expression were expanded the result + would be 0. So expansion is only done if ``x != 0``: + + >>> expand_power_exp(Symbol('x', zero=False)**(y + 2)) + x**2*x**y + """ + return sympify(expr).expand(deep=deep, complex=False, basic=False, + log=False, mul=False, power_exp=True, power_base=False, multinomial=False) + + +def count_ops(expr, visual=False): + """ + Return a representation (integer or expression) of the operations in expr. + + Parameters + ========== + + expr : Expr + If expr is an iterable, the sum of the op counts of the + items will be returned. + + visual : bool, optional + If ``False`` (default) then the sum of the coefficients of the + visual expression will be returned. + If ``True`` then the number of each type of operation is shown + with the core class types (or their virtual equivalent) multiplied by the + number of times they occur. + + Examples + ======== + + >>> from sympy.abc import a, b, x, y + >>> from sympy import sin, count_ops + + Although there is not a SUB object, minus signs are interpreted as + either negations or subtractions: + + >>> (x - y).count_ops(visual=True) + SUB + >>> (-x).count_ops(visual=True) + NEG + + Here, there are two Adds and a Pow: + + >>> (1 + a + b**2).count_ops(visual=True) + 2*ADD + POW + + In the following, an Add, Mul, Pow and two functions: + + >>> (sin(x)*x + sin(x)**2).count_ops(visual=True) + ADD + MUL + POW + 2*SIN + + for a total of 5: + + >>> (sin(x)*x + sin(x)**2).count_ops(visual=False) + 5 + + Note that "what you type" is not always what you get. The expression + 1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather + than two DIVs: + + >>> (1/x/y).count_ops(visual=True) + DIV + MUL + + The visual option can be used to demonstrate the difference in + operations for expressions in different forms. Here, the Horner + representation is compared with the expanded form of a polynomial: + + >>> eq=x*(1 + x*(2 + x*(3 + x))) + >>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True) + -MUL + 3*POW + + The count_ops function also handles iterables: + + >>> count_ops([x, sin(x), None, True, x + 2], visual=False) + 2 + >>> count_ops([x, sin(x), None, True, x + 2], visual=True) + ADD + SIN + >>> count_ops({x: sin(x), x + 2: y + 1}, visual=True) + 2*ADD + SIN + + """ + from .relational import Relational + from sympy.concrete.summations import Sum + from sympy.integrals.integrals import Integral + from sympy.logic.boolalg import BooleanFunction + from sympy.simplify.radsimp import fraction + + expr = sympify(expr) + if isinstance(expr, Expr) and not expr.is_Relational: + + ops = [] + args = [expr] + NEG = Symbol('NEG') + DIV = Symbol('DIV') + SUB = Symbol('SUB') + ADD = Symbol('ADD') + EXP = Symbol('EXP') + while args: + a = args.pop() + + # if the following fails because the object is + # not Basic type, then the object should be fixed + # since it is the intention that all args of Basic + # should themselves be Basic + if a.is_Rational: + #-1/3 = NEG + DIV + if a is not S.One: + if a.p < 0: + ops.append(NEG) + if a.q != 1: + ops.append(DIV) + continue + elif a.is_Mul or a.is_MatMul: + if _coeff_isneg(a): + ops.append(NEG) + if a.args[0] is S.NegativeOne: + a = a.as_two_terms()[1] + else: + a = -a + n, d = fraction(a) + if n.is_Integer: + ops.append(DIV) + if n < 0: + ops.append(NEG) + args.append(d) + continue # won't be -Mul but could be Add + elif d is not S.One: + if not d.is_Integer: + args.append(d) + ops.append(DIV) + args.append(n) + continue # could be -Mul + elif a.is_Add or a.is_MatAdd: + aargs = list(a.args) + negs = 0 + for i, ai in enumerate(aargs): + if _coeff_isneg(ai): + negs += 1 + args.append(-ai) + if i > 0: + ops.append(SUB) + else: + args.append(ai) + if i > 0: + ops.append(ADD) + if negs == len(aargs): # -x - y = NEG + SUB + ops.append(NEG) + elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD + ops.append(SUB - ADD) + continue + if a.is_Pow and a.exp is S.NegativeOne: + ops.append(DIV) + args.append(a.base) # won't be -Mul but could be Add + continue + if a == S.Exp1: + ops.append(EXP) + continue + if a.is_Pow and a.base == S.Exp1: + ops.append(EXP) + args.append(a.exp) + continue + if a.is_Mul or isinstance(a, LatticeOp): + o = Symbol(a.func.__name__.upper()) + # count the args + ops.append(o*(len(a.args) - 1)) + elif a.args and ( + a.is_Pow or + a.is_Function or + isinstance(a, Derivative) or + isinstance(a, Integral) or + isinstance(a, Sum)): + # if it's not in the list above we don't + # consider a.func something to count, e.g. + # Tuple, MatrixSymbol, etc... + if isinstance(a.func, UndefinedFunction): + o = Symbol("FUNC_" + a.func.__name__.upper()) + else: + o = Symbol(a.func.__name__.upper()) + ops.append(o) + + if not a.is_Symbol: + args.extend(a.args) + + elif isinstance(expr, Dict): + ops = [count_ops(k, visual=visual) + + count_ops(v, visual=visual) for k, v in expr.items()] + elif iterable(expr): + ops = [count_ops(i, visual=visual) for i in expr] + elif isinstance(expr, (Relational, BooleanFunction)): + ops = [] + for arg in expr.args: + ops.append(count_ops(arg, visual=True)) + o = Symbol(func_name(expr, short=True).upper()) + ops.append(o) + elif not isinstance(expr, Basic): + ops = [] + else: # it's Basic not isinstance(expr, Expr): + if not isinstance(expr, Basic): + raise TypeError("Invalid type of expr") + else: + ops = [] + args = [expr] + while args: + a = args.pop() + + if a.args: + o = Symbol(type(a).__name__.upper()) + if a.is_Boolean: + ops.append(o*(len(a.args)-1)) + else: + ops.append(o) + args.extend(a.args) + + if not ops: + if visual: + return S.Zero + return 0 + + ops = Add(*ops) + + if visual: + return ops + + if ops.is_Number: + return int(ops) + + return sum(int((a.args or [1])[0]) for a in Add.make_args(ops)) + + +def nfloat(expr, n=15, exponent=False, dkeys=False): + """Make all Rationals in expr Floats except those in exponents + (unless the exponents flag is set to True) and those in undefined + functions. When processing dictionaries, do not modify the keys + unless ``dkeys=True``. + + Examples + ======== + + >>> from sympy import nfloat, cos, pi, sqrt + >>> from sympy.abc import x, y + >>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y)) + x**4 + 0.5*x + sqrt(y) + 1.5 + >>> nfloat(x**4 + sqrt(y), exponent=True) + x**4.0 + y**0.5 + + Container types are not modified: + + >>> type(nfloat((1, 2))) is tuple + True + """ + from sympy.matrices.matrices import MatrixBase + + kw = {"n": n, "exponent": exponent, "dkeys": dkeys} + + if isinstance(expr, MatrixBase): + return expr.applyfunc(lambda e: nfloat(e, **kw)) + + # handling of iterable containers + if iterable(expr, exclude=str): + if isinstance(expr, (dict, Dict)): + if dkeys: + args = [tuple((nfloat(i, **kw) for i in a)) + for a in expr.items()] + else: + args = [(k, nfloat(v, **kw)) for k, v in expr.items()] + if isinstance(expr, dict): + return type(expr)(args) + else: + return expr.func(*args) + elif isinstance(expr, Basic): + return expr.func(*[nfloat(a, **kw) for a in expr.args]) + return type(expr)([nfloat(a, **kw) for a in expr]) + + rv = sympify(expr) + + if rv.is_Number: + return Float(rv, n) + elif rv.is_number: + # evalf doesn't always set the precision + rv = rv.n(n) + if rv.is_Number: + rv = Float(rv.n(n), n) + else: + pass # pure_complex(rv) is likely True + return rv + elif rv.is_Atom: + return rv + elif rv.is_Relational: + args_nfloat = (nfloat(arg, **kw) for arg in rv.args) + return rv.func(*args_nfloat) + + + # watch out for RootOf instances that don't like to have + # their exponents replaced with Dummies and also sometimes have + # problems with evaluating at low precision (issue 6393) + from sympy.polys.rootoftools import RootOf + rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)}) + + from .power import Pow + if not exponent: + reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)] + rv = rv.xreplace(dict(reps)) + rv = rv.n(n) + if not exponent: + rv = rv.xreplace({d.exp: p.exp for p, d in reps}) + else: + # Pow._eval_evalf special cases Integer exponents so if + # exponent is suppose to be handled we have to do so here + rv = rv.xreplace(Transform( + lambda x: Pow(x.base, Float(x.exp, n)), + lambda x: x.is_Pow and x.exp.is_Integer)) + + return rv.xreplace(Transform( + lambda x: x.func(*nfloat(x.args, n, exponent)), + lambda x: isinstance(x, Function) and not isinstance(x, AppliedUndef))) + + +from .symbol import Dummy, Symbol diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/mod.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/mod.py new file mode 100644 index 0000000000000000000000000000000000000000..78dccd3634703c8e38360bf979ab4fcd58f24763 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/mod.py @@ -0,0 +1,238 @@ +from .add import Add +from .exprtools import gcd_terms +from .function import Function +from .kind import NumberKind +from .logic import fuzzy_and, fuzzy_not +from .mul import Mul +from .numbers import equal_valued +from .singleton import S + + +class Mod(Function): + """Represents a modulo operation on symbolic expressions. + + Parameters + ========== + + p : Expr + Dividend. + + q : Expr + Divisor. + + Notes + ===== + + The convention used is the same as Python's: the remainder always has the + same sign as the divisor. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> x**2 % y + Mod(x**2, y) + >>> _.subs({x: 5, y: 6}) + 1 + + """ + + kind = NumberKind + + @classmethod + def eval(cls, p, q): + def number_eval(p, q): + """Try to return p % q if both are numbers or +/-p is known + to be less than or equal q. + """ + + if q.is_zero: + raise ZeroDivisionError("Modulo by zero") + if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False: + return S.NaN + if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1): + return S.Zero + + if q.is_Number: + if p.is_Number: + return p%q + if q == 2: + if p.is_even: + return S.Zero + elif p.is_odd: + return S.One + + if hasattr(p, '_eval_Mod'): + rv = getattr(p, '_eval_Mod')(q) + if rv is not None: + return rv + + # by ratio + r = p/q + if r.is_integer: + return S.Zero + try: + d = int(r) + except TypeError: + pass + else: + if isinstance(d, int): + rv = p - d*q + if (rv*q < 0) == True: + rv += q + return rv + + # by difference + # -2|q| < p < 2|q| + d = abs(p) + for _ in range(2): + d -= abs(q) + if d.is_negative: + if q.is_positive: + if p.is_positive: + return d + q + elif p.is_negative: + return -d + elif q.is_negative: + if p.is_positive: + return d + elif p.is_negative: + return -d + q + break + + rv = number_eval(p, q) + if rv is not None: + return rv + + # denest + if isinstance(p, cls): + qinner = p.args[1] + if qinner % q == 0: + return cls(p.args[0], q) + elif (qinner*(q - qinner)).is_nonnegative: + # |qinner| < |q| and have same sign + return p + elif isinstance(-p, cls): + qinner = (-p).args[1] + if qinner % q == 0: + return cls(-(-p).args[0], q) + elif (qinner*(q + qinner)).is_nonpositive: + # |qinner| < |q| and have different sign + return p + elif isinstance(p, Add): + # separating into modulus and non modulus + both_l = non_mod_l, mod_l = [], [] + for arg in p.args: + both_l[isinstance(arg, cls)].append(arg) + # if q same for all + if mod_l and all(inner.args[1] == q for inner in mod_l): + net = Add(*non_mod_l) + Add(*[i.args[0] for i in mod_l]) + return cls(net, q) + + elif isinstance(p, Mul): + # separating into modulus and non modulus + both_l = non_mod_l, mod_l = [], [] + for arg in p.args: + both_l[isinstance(arg, cls)].append(arg) + + if mod_l and all(inner.args[1] == q for inner in mod_l) and all(t.is_integer for t in p.args) and q.is_integer: + # finding distributive term + non_mod_l = [cls(x, q) for x in non_mod_l] + mod = [] + non_mod = [] + for j in non_mod_l: + if isinstance(j, cls): + mod.append(j.args[0]) + else: + non_mod.append(j) + prod_mod = Mul(*mod) + prod_non_mod = Mul(*non_mod) + prod_mod1 = Mul(*[i.args[0] for i in mod_l]) + net = prod_mod1*prod_mod + return prod_non_mod*cls(net, q) + + if q.is_Integer and q is not S.One: + non_mod_l = [i % q if i.is_Integer and (i % q is not S.Zero) else i for + i in non_mod_l] + + p = Mul(*(non_mod_l + mod_l)) + + # XXX other possibilities? + + from sympy.polys.polyerrors import PolynomialError + from sympy.polys.polytools import gcd + + # extract gcd; any further simplification should be done by the user + try: + G = gcd(p, q) + if not equal_valued(G, 1): + p, q = [gcd_terms(i/G, clear=False, fraction=False) + for i in (p, q)] + except PolynomialError: # issue 21373 + G = S.One + pwas, qwas = p, q + + # simplify terms + # (x + y + 2) % x -> Mod(y + 2, x) + if p.is_Add: + args = [] + for i in p.args: + a = cls(i, q) + if a.count(cls) > i.count(cls): + args.append(i) + else: + args.append(a) + if args != list(p.args): + p = Add(*args) + + else: + # handle coefficients if they are not Rational + # since those are not handled by factor_terms + # e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y) + cp, p = p.as_coeff_Mul() + cq, q = q.as_coeff_Mul() + ok = False + if not cp.is_Rational or not cq.is_Rational: + r = cp % cq + if equal_valued(r, 0): + G *= cq + p *= int(cp/cq) + ok = True + if not ok: + p = cp*p + q = cq*q + + # simple -1 extraction + if p.could_extract_minus_sign() and q.could_extract_minus_sign(): + G, p, q = [-i for i in (G, p, q)] + + # check again to see if p and q can now be handled as numbers + rv = number_eval(p, q) + if rv is not None: + return rv*G + + # put 1.0 from G on inside + if G.is_Float and equal_valued(G, 1): + p *= G + return cls(p, q, evaluate=False) + elif G.is_Mul and G.args[0].is_Float and equal_valued(G.args[0], 1): + p = G.args[0]*p + G = Mul._from_args(G.args[1:]) + return G*cls(p, q, evaluate=(p, q) != (pwas, qwas)) + + def _eval_is_integer(self): + p, q = self.args + if fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]): + return True + + def _eval_is_nonnegative(self): + if self.args[1].is_positive: + return True + + def _eval_is_nonpositive(self): + if self.args[1].is_negative: + return True + + def _eval_rewrite_as_floor(self, a, b, **kwargs): + from sympy.functions.elementary.integers import floor + return a - b*floor(a/b) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/multidimensional.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/multidimensional.py new file mode 100644 index 0000000000000000000000000000000000000000..133e0ab6cba6a87c627feb6f6034a6daed1128c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/multidimensional.py @@ -0,0 +1,131 @@ +""" +Provides functionality for multidimensional usage of scalar-functions. + +Read the vectorize docstring for more details. +""" + +from functools import wraps + + +def apply_on_element(f, args, kwargs, n): + """ + Returns a structure with the same dimension as the specified argument, + where each basic element is replaced by the function f applied on it. All + other arguments stay the same. + """ + # Get the specified argument. + if isinstance(n, int): + structure = args[n] + is_arg = True + elif isinstance(n, str): + structure = kwargs[n] + is_arg = False + + # Define reduced function that is only dependent on the specified argument. + def f_reduced(x): + if hasattr(x, "__iter__"): + return list(map(f_reduced, x)) + else: + if is_arg: + args[n] = x + else: + kwargs[n] = x + return f(*args, **kwargs) + + # f_reduced will call itself recursively so that in the end f is applied to + # all basic elements. + return list(map(f_reduced, structure)) + + +def iter_copy(structure): + """ + Returns a copy of an iterable object (also copying all embedded iterables). + """ + return [iter_copy(i) if hasattr(i, "__iter__") else i for i in structure] + + +def structure_copy(structure): + """ + Returns a copy of the given structure (numpy-array, list, iterable, ..). + """ + if hasattr(structure, "copy"): + return structure.copy() + return iter_copy(structure) + + +class vectorize: + """ + Generalizes a function taking scalars to accept multidimensional arguments. + + Examples + ======== + + >>> from sympy import vectorize, diff, sin, symbols, Function + >>> x, y, z = symbols('x y z') + >>> f, g, h = list(map(Function, 'fgh')) + + >>> @vectorize(0) + ... def vsin(x): + ... return sin(x) + + >>> vsin([1, x, y]) + [sin(1), sin(x), sin(y)] + + >>> @vectorize(0, 1) + ... def vdiff(f, y): + ... return diff(f, y) + + >>> vdiff([f(x, y, z), g(x, y, z), h(x, y, z)], [x, y, z]) + [[Derivative(f(x, y, z), x), Derivative(f(x, y, z), y), Derivative(f(x, y, z), z)], [Derivative(g(x, y, z), x), Derivative(g(x, y, z), y), Derivative(g(x, y, z), z)], [Derivative(h(x, y, z), x), Derivative(h(x, y, z), y), Derivative(h(x, y, z), z)]] + """ + def __init__(self, *mdargs): + """ + The given numbers and strings characterize the arguments that will be + treated as data structures, where the decorated function will be applied + to every single element. + If no argument is given, everything is treated multidimensional. + """ + for a in mdargs: + if not isinstance(a, (int, str)): + raise TypeError("a is of invalid type") + self.mdargs = mdargs + + def __call__(self, f): + """ + Returns a wrapper for the one-dimensional function that can handle + multidimensional arguments. + """ + @wraps(f) + def wrapper(*args, **kwargs): + # Get arguments that should be treated multidimensional + if self.mdargs: + mdargs = self.mdargs + else: + mdargs = range(len(args)) + kwargs.keys() + + arglength = len(args) + + for n in mdargs: + if isinstance(n, int): + if n >= arglength: + continue + entry = args[n] + is_arg = True + elif isinstance(n, str): + try: + entry = kwargs[n] + except KeyError: + continue + is_arg = False + if hasattr(entry, "__iter__"): + # Create now a copy of the given array and manipulate then + # the entries directly. + if is_arg: + args = list(args) + args[n] = structure_copy(entry) + else: + kwargs[n] = structure_copy(entry) + result = apply_on_element(wrapper, args, kwargs, n) + return result + return f(*args, **kwargs) + return wrapper diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/power.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/power.py new file mode 100644 index 0000000000000000000000000000000000000000..d8f4206d5107ee3ca28817369df93c08547506fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/power.py @@ -0,0 +1,2004 @@ +from __future__ import annotations +from typing import Callable +from math import log as _log, sqrt as _sqrt +from itertools import product + +from .sympify import _sympify +from .cache import cacheit +from .singleton import S +from .expr import Expr +from .evalf import PrecisionExhausted +from .function import (expand_complex, expand_multinomial, + expand_mul, _mexpand, PoleError) +from .logic import fuzzy_bool, fuzzy_not, fuzzy_and, fuzzy_or +from .parameters import global_parameters +from .relational import is_gt, is_lt +from .kind import NumberKind, UndefinedKind +from sympy.external.gmpy import HAS_GMPY, gmpy +from sympy.utilities.iterables import sift +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.misc import as_int +from sympy.multipledispatch import Dispatcher + +from mpmath.libmp import sqrtrem as mpmath_sqrtrem + + + +def isqrt(n): + """Return the largest integer less than or equal to sqrt(n).""" + if n < 0: + raise ValueError("n must be nonnegative") + n = int(n) + + # Fast path: with IEEE 754 binary64 floats and a correctly-rounded + # math.sqrt, int(math.sqrt(n)) works for any integer n satisfying 0 <= n < + # 4503599761588224 = 2**52 + 2**27. But Python doesn't guarantee either + # IEEE 754 format floats *or* correct rounding of math.sqrt, so check the + # answer and fall back to the slow method if necessary. + if n < 4503599761588224: + s = int(_sqrt(n)) + if 0 <= n - s*s <= 2*s: + return s + + return integer_nthroot(n, 2)[0] + + +def integer_nthroot(y, n): + """ + Return a tuple containing x = floor(y**(1/n)) + and a boolean indicating whether the result is exact (that is, + whether x**n == y). + + Examples + ======== + + >>> from sympy import integer_nthroot + >>> integer_nthroot(16, 2) + (4, True) + >>> integer_nthroot(26, 2) + (5, False) + + To simply determine if a number is a perfect square, the is_square + function should be used: + + >>> from sympy.ntheory.primetest import is_square + >>> is_square(26) + False + + See Also + ======== + sympy.ntheory.primetest.is_square + integer_log + """ + y, n = as_int(y), as_int(n) + if y < 0: + raise ValueError("y must be nonnegative") + if n < 1: + raise ValueError("n must be positive") + if HAS_GMPY and n < 2**63: + # Currently it works only for n < 2**63, else it produces TypeError + # sympy issue: https://github.com/sympy/sympy/issues/18374 + # gmpy2 issue: https://github.com/aleaxit/gmpy/issues/257 + if HAS_GMPY >= 2: + x, t = gmpy.iroot(y, n) + else: + x, t = gmpy.root(y, n) + return as_int(x), bool(t) + return _integer_nthroot_python(y, n) + +def _integer_nthroot_python(y, n): + if y in (0, 1): + return y, True + if n == 1: + return y, True + if n == 2: + x, rem = mpmath_sqrtrem(y) + return int(x), not rem + if n >= y.bit_length(): + return 1, False + # Get initial estimate for Newton's method. Care must be taken to + # avoid overflow + try: + guess = int(y**(1./n) + 0.5) + except OverflowError: + exp = _log(y, 2)/n + if exp > 53: + shift = int(exp - 53) + guess = int(2.0**(exp - shift) + 1) << shift + else: + guess = int(2.0**exp) + if guess > 2**50: + # Newton iteration + xprev, x = -1, guess + while 1: + t = x**(n - 1) + xprev, x = x, ((n - 1)*x + y//t)//n + if abs(x - xprev) < 2: + break + else: + x = guess + # Compensate + t = x**n + while t < y: + x += 1 + t = x**n + while t > y: + x -= 1 + t = x**n + return int(x), t == y # int converts long to int if possible + + +def integer_log(y, x): + r""" + Returns ``(e, bool)`` where e is the largest nonnegative integer + such that :math:`|y| \geq |x^e|` and ``bool`` is True if $y = x^e$. + + Examples + ======== + + >>> from sympy import integer_log + >>> integer_log(125, 5) + (3, True) + >>> integer_log(17, 9) + (1, False) + >>> integer_log(4, -2) + (2, True) + >>> integer_log(-125,-5) + (3, True) + + See Also + ======== + integer_nthroot + sympy.ntheory.primetest.is_square + sympy.ntheory.factor_.multiplicity + sympy.ntheory.factor_.perfect_power + """ + if x == 1: + raise ValueError('x cannot take value as 1') + if y == 0: + raise ValueError('y cannot take value as 0') + + if x in (-2, 2): + x = int(x) + y = as_int(y) + e = y.bit_length() - 1 + return e, x**e == y + if x < 0: + n, b = integer_log(y if y > 0 else -y, -x) + return n, b and bool(n % 2 if y < 0 else not n % 2) + + x = as_int(x) + y = as_int(y) + r = e = 0 + while y >= x: + d = x + m = 1 + while y >= d: + y, rem = divmod(y, d) + r = r or rem + e += m + if y > d: + d *= d + m *= 2 + return e, r == 0 and y == 1 + + +class Pow(Expr): + """ + Defines the expression x**y as "x raised to a power y" + + .. deprecated:: 1.7 + + Using arguments that aren't subclasses of :class:`~.Expr` in core + operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is + deprecated. See :ref:`non-expr-args-deprecated` for details. + + Singleton definitions involving (0, 1, -1, oo, -oo, I, -I): + + +--------------+---------+-----------------------------------------------+ + | expr | value | reason | + +==============+=========+===============================================+ + | z**0 | 1 | Although arguments over 0**0 exist, see [2]. | + +--------------+---------+-----------------------------------------------+ + | z**1 | z | | + +--------------+---------+-----------------------------------------------+ + | (-oo)**(-1) | 0 | | + +--------------+---------+-----------------------------------------------+ + | (-1)**-1 | -1 | | + +--------------+---------+-----------------------------------------------+ + | S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be | + | | | undefined, but is convenient in some contexts | + | | | where the base is assumed to be positive. | + +--------------+---------+-----------------------------------------------+ + | 1**-1 | 1 | | + +--------------+---------+-----------------------------------------------+ + | oo**-1 | 0 | | + +--------------+---------+-----------------------------------------------+ + | 0**oo | 0 | Because for all complex numbers z near | + | | | 0, z**oo -> 0. | + +--------------+---------+-----------------------------------------------+ + | 0**-oo | zoo | This is not strictly true, as 0**oo may be | + | | | oscillating between positive and negative | + | | | values or rotating in the complex plane. | + | | | It is convenient, however, when the base | + | | | is positive. | + +--------------+---------+-----------------------------------------------+ + | 1**oo | nan | Because there are various cases where | + | 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), | + | | | but lim( x(t)**y(t), t) != 1. See [3]. | + +--------------+---------+-----------------------------------------------+ + | b**zoo | nan | Because b**z has no limit as z -> zoo | + +--------------+---------+-----------------------------------------------+ + | (-1)**oo | nan | Because of oscillations in the limit. | + | (-1)**(-oo) | | | + +--------------+---------+-----------------------------------------------+ + | oo**oo | oo | | + +--------------+---------+-----------------------------------------------+ + | oo**-oo | 0 | | + +--------------+---------+-----------------------------------------------+ + | (-oo)**oo | nan | | + | (-oo)**-oo | | | + +--------------+---------+-----------------------------------------------+ + | oo**I | nan | oo**e could probably be best thought of as | + | (-oo)**I | | the limit of x**e for real x as x tends to | + | | | oo. If e is I, then the limit does not exist | + | | | and nan is used to indicate that. | + +--------------+---------+-----------------------------------------------+ + | oo**(1+I) | zoo | If the real part of e is positive, then the | + | (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value | + | | | is zoo. | + +--------------+---------+-----------------------------------------------+ + | oo**(-1+I) | 0 | If the real part of e is negative, then the | + | -oo**(-1+I) | | limit is 0. | + +--------------+---------+-----------------------------------------------+ + + Because symbolic computations are more flexible than floating point + calculations and we prefer to never return an incorrect answer, + we choose not to conform to all IEEE 754 conventions. This helps + us avoid extra test-case code in the calculation of limits. + + See Also + ======== + + sympy.core.numbers.Infinity + sympy.core.numbers.NegativeInfinity + sympy.core.numbers.NaN + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Exponentiation + .. [2] https://en.wikipedia.org/wiki/Zero_to_the_power_of_zero + .. [3] https://en.wikipedia.org/wiki/Indeterminate_forms + + """ + is_Pow = True + + __slots__ = ('is_commutative',) + + args: tuple[Expr, Expr] + _args: tuple[Expr, Expr] + + @cacheit + def __new__(cls, b, e, evaluate=None): + if evaluate is None: + evaluate = global_parameters.evaluate + + b = _sympify(b) + e = _sympify(e) + + # XXX: This can be removed when non-Expr args are disallowed rather + # than deprecated. + from .relational import Relational + if isinstance(b, Relational) or isinstance(e, Relational): + raise TypeError('Relational cannot be used in Pow') + + # XXX: This should raise TypeError once deprecation period is over: + for arg in [b, e]: + if not isinstance(arg, Expr): + sympy_deprecation_warning( + f""" + Using non-Expr arguments in Pow is deprecated (in this case, one of the + arguments is of type {type(arg).__name__!r}). + + If you really did intend to construct a power with this base, use the ** + operator instead.""", + deprecated_since_version="1.7", + active_deprecations_target="non-expr-args-deprecated", + stacklevel=4, + ) + + if evaluate: + if e is S.ComplexInfinity: + return S.NaN + if e is S.Infinity: + if is_gt(b, S.One): + return S.Infinity + if is_gt(b, S.NegativeOne) and is_lt(b, S.One): + return S.Zero + if is_lt(b, S.NegativeOne): + if b.is_finite: + return S.ComplexInfinity + if b.is_finite is False: + return S.NaN + if e is S.Zero: + return S.One + elif e is S.One: + return b + elif e == -1 and not b: + return S.ComplexInfinity + elif e.__class__.__name__ == "AccumulationBounds": + if b == S.Exp1: + from sympy.calculus.accumulationbounds import AccumBounds + return AccumBounds(Pow(b, e.min), Pow(b, e.max)) + # autosimplification if base is a number and exp odd/even + # if base is Number then the base will end up positive; we + # do not do this with arbitrary expressions since symbolic + # cancellation might occur as in (x - 1)/(1 - x) -> -1. If + # we returned Piecewise((-1, Ne(x, 1))) for such cases then + # we could do this...but we don't + elif (e.is_Symbol and e.is_integer or e.is_Integer + ) and (b.is_number and b.is_Mul or b.is_Number + ) and b.could_extract_minus_sign(): + if e.is_even: + b = -b + elif e.is_odd: + return -Pow(-b, e) + if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0 + return S.NaN + elif b is S.One: + if abs(e).is_infinite: + return S.NaN + return S.One + else: + # recognize base as E + from sympy.functions.elementary.exponential import exp_polar + if not e.is_Atom and b is not S.Exp1 and not isinstance(b, exp_polar): + from .exprtools import factor_terms + from sympy.functions.elementary.exponential import log + from sympy.simplify.radsimp import fraction + c, ex = factor_terms(e, sign=False).as_coeff_Mul() + num, den = fraction(ex) + if isinstance(den, log) and den.args[0] == b: + return S.Exp1**(c*num) + elif den.is_Add: + from sympy.functions.elementary.complexes import sign, im + s = sign(im(b)) + if s.is_Number and s and den == \ + log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi: + return S.Exp1**(c*num) + + obj = b._eval_power(e) + if obj is not None: + return obj + obj = Expr.__new__(cls, b, e) + obj = cls._exec_constructor_postprocessors(obj) + if not isinstance(obj, Pow): + return obj + obj.is_commutative = (b.is_commutative and e.is_commutative) + return obj + + def inverse(self, argindex=1): + if self.base == S.Exp1: + from sympy.functions.elementary.exponential import log + return log + return None + + @property + def base(self) -> Expr: + return self._args[0] + + @property + def exp(self) -> Expr: + return self._args[1] + + @property + def kind(self): + if self.exp.kind is NumberKind: + return self.base.kind + else: + return UndefinedKind + + @classmethod + def class_key(cls): + return 3, 2, cls.__name__ + + def _eval_refine(self, assumptions): + from sympy.assumptions.ask import ask, Q + b, e = self.as_base_exp() + if ask(Q.integer(e), assumptions) and b.could_extract_minus_sign(): + if ask(Q.even(e), assumptions): + return Pow(-b, e) + elif ask(Q.odd(e), assumptions): + return -Pow(-b, e) + + def _eval_power(self, other): + b, e = self.as_base_exp() + if b is S.NaN: + return (b**e)**other # let __new__ handle it + + s = None + if other.is_integer: + s = 1 + elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)... + s = 1 + elif e.is_extended_real is not None: + from sympy.functions.elementary.complexes import arg, im, re, sign + from sympy.functions.elementary.exponential import exp, log + from sympy.functions.elementary.integers import floor + # helper functions =========================== + def _half(e): + """Return True if the exponent has a literal 2 as the + denominator, else None.""" + if getattr(e, 'q', None) == 2: + return True + n, d = e.as_numer_denom() + if n.is_integer and d == 2: + return True + def _n2(e): + """Return ``e`` evaluated to a Number with 2 significant + digits, else None.""" + try: + rv = e.evalf(2, strict=True) + if rv.is_Number: + return rv + except PrecisionExhausted: + pass + # =================================================== + if e.is_extended_real: + # we need _half(other) with constant floor or + # floor(S.Half - e*arg(b)/2/pi) == 0 + + + # handle -1 as special case + if e == -1: + # floor arg. is 1/2 + arg(b)/2/pi + if _half(other): + if b.is_negative is True: + return S.NegativeOne**other*Pow(-b, e*other) + elif b.is_negative is False: # XXX ok if im(b) != 0? + return Pow(b, -other) + elif e.is_even: + if b.is_extended_real: + b = abs(b) + if b.is_imaginary: + b = abs(im(b))*S.ImaginaryUnit + + if (abs(e) < 1) == True or e == 1: + s = 1 # floor = 0 + elif b.is_extended_nonnegative: + s = 1 # floor = 0 + elif re(b).is_extended_nonnegative and (abs(e) < 2) == True: + s = 1 # floor = 0 + elif _half(other): + s = exp(2*S.Pi*S.ImaginaryUnit*other*floor( + S.Half - e*arg(b)/(2*S.Pi))) + if s.is_extended_real and _n2(sign(s) - s) == 0: + s = sign(s) + else: + s = None + else: + # e.is_extended_real is False requires: + # _half(other) with constant floor or + # floor(S.Half - im(e*log(b))/2/pi) == 0 + try: + s = exp(2*S.ImaginaryUnit*S.Pi*other* + floor(S.Half - im(e*log(b))/2/S.Pi)) + # be careful to test that s is -1 or 1 b/c sign(I) == I: + # so check that s is real + if s.is_extended_real and _n2(sign(s) - s) == 0: + s = sign(s) + else: + s = None + except PrecisionExhausted: + s = None + + if s is not None: + return s*Pow(b, e*other) + + def _eval_Mod(self, q): + r"""A dispatched function to compute `b^e \bmod q`, dispatched + by ``Mod``. + + Notes + ===== + + Algorithms: + + 1. For unevaluated integer power, use built-in ``pow`` function + with 3 arguments, if powers are not too large wrt base. + + 2. For very large powers, use totient reduction if $e \ge \log(m)$. + Bound on m, is for safe factorization memory wise i.e. $m^{1/4}$. + For pollard-rho to be faster than built-in pow $\log(e) > m^{1/4}$ + check is added. + + 3. For any unevaluated power found in `b` or `e`, the step 2 + will be recursed down to the base and the exponent + such that the $b \bmod q$ becomes the new base and + $\phi(q) + e \bmod \phi(q)$ becomes the new exponent, and then + the computation for the reduced expression can be done. + """ + + base, exp = self.base, self.exp + + if exp.is_integer and exp.is_positive: + if q.is_integer and base % q == 0: + return S.Zero + + from sympy.ntheory.factor_ import totient + + if base.is_Integer and exp.is_Integer and q.is_Integer: + b, e, m = int(base), int(exp), int(q) + mb = m.bit_length() + if mb <= 80 and e >= mb and e.bit_length()**4 >= m: + phi = int(totient(m)) + return Integer(pow(b, phi + e%phi, m)) + return Integer(pow(b, e, m)) + + from .mod import Mod + + if isinstance(base, Pow) and base.is_integer and base.is_number: + base = Mod(base, q) + return Mod(Pow(base, exp, evaluate=False), q) + + if isinstance(exp, Pow) and exp.is_integer and exp.is_number: + bit_length = int(q).bit_length() + # XXX Mod-Pow actually attempts to do a hanging evaluation + # if this dispatched function returns None. + # May need some fixes in the dispatcher itself. + if bit_length <= 80: + phi = totient(q) + exp = phi + Mod(exp, phi) + return Mod(Pow(base, exp, evaluate=False), q) + + def _eval_is_even(self): + if self.exp.is_integer and self.exp.is_positive: + return self.base.is_even + + def _eval_is_negative(self): + ext_neg = Pow._eval_is_extended_negative(self) + if ext_neg is True: + return self.is_finite + return ext_neg + + def _eval_is_extended_positive(self): + if self.base == self.exp: + if self.base.is_extended_nonnegative: + return True + elif self.base.is_positive: + if self.exp.is_real: + return True + elif self.base.is_extended_negative: + if self.exp.is_even: + return True + if self.exp.is_odd: + return False + elif self.base.is_zero: + if self.exp.is_extended_real: + return self.exp.is_zero + elif self.base.is_extended_nonpositive: + if self.exp.is_odd: + return False + elif self.base.is_imaginary: + if self.exp.is_integer: + m = self.exp % 4 + if m.is_zero: + return True + if m.is_integer and m.is_zero is False: + return False + if self.exp.is_imaginary: + from sympy.functions.elementary.exponential import log + return log(self.base).is_imaginary + + def _eval_is_extended_negative(self): + if self.exp is S.Half: + if self.base.is_complex or self.base.is_extended_real: + return False + if self.base.is_extended_negative: + if self.exp.is_odd and self.base.is_finite: + return True + if self.exp.is_even: + return False + elif self.base.is_extended_positive: + if self.exp.is_extended_real: + return False + elif self.base.is_zero: + if self.exp.is_extended_real: + return False + elif self.base.is_extended_nonnegative: + if self.exp.is_extended_nonnegative: + return False + elif self.base.is_extended_nonpositive: + if self.exp.is_even: + return False + elif self.base.is_extended_real: + if self.exp.is_even: + return False + + def _eval_is_zero(self): + if self.base.is_zero: + if self.exp.is_extended_positive: + return True + elif self.exp.is_extended_nonpositive: + return False + elif self.base == S.Exp1: + return self.exp is S.NegativeInfinity + elif self.base.is_zero is False: + if self.base.is_finite and self.exp.is_finite: + return False + elif self.exp.is_negative: + return self.base.is_infinite + elif self.exp.is_nonnegative: + return False + elif self.exp.is_infinite and self.exp.is_extended_real: + if (1 - abs(self.base)).is_extended_positive: + return self.exp.is_extended_positive + elif (1 - abs(self.base)).is_extended_negative: + return self.exp.is_extended_negative + elif self.base.is_finite and self.exp.is_negative: + # when self.base.is_zero is None + return False + + def _eval_is_integer(self): + b, e = self.args + if b.is_rational: + if b.is_integer is False and e.is_positive: + return False # rat**nonneg + if b.is_integer and e.is_integer: + if b is S.NegativeOne: + return True + if e.is_nonnegative or e.is_positive: + return True + if b.is_integer and e.is_negative and (e.is_finite or e.is_integer): + if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero): + return False + if b.is_Number and e.is_Number: + check = self.func(*self.args) + return check.is_Integer + if e.is_negative and b.is_positive and (b - 1).is_positive: + return False + if e.is_negative and b.is_negative and (b + 1).is_negative: + return False + + def _eval_is_extended_real(self): + if self.base is S.Exp1: + if self.exp.is_extended_real: + return True + elif self.exp.is_imaginary: + return (2*S.ImaginaryUnit*self.exp/S.Pi).is_even + + from sympy.functions.elementary.exponential import log, exp + real_b = self.base.is_extended_real + if real_b is None: + if self.base.func == exp and self.base.exp.is_imaginary: + return self.exp.is_imaginary + if self.base.func == Pow and self.base.base is S.Exp1 and self.base.exp.is_imaginary: + return self.exp.is_imaginary + return + real_e = self.exp.is_extended_real + if real_e is None: + return + if real_b and real_e: + if self.base.is_extended_positive: + return True + elif self.base.is_extended_nonnegative and self.exp.is_extended_nonnegative: + return True + elif self.exp.is_integer and self.base.is_extended_nonzero: + return True + elif self.exp.is_integer and self.exp.is_nonnegative: + return True + elif self.base.is_extended_negative: + if self.exp.is_Rational: + return False + if real_e and self.exp.is_extended_negative and self.base.is_zero is False: + return Pow(self.base, -self.exp).is_extended_real + im_b = self.base.is_imaginary + im_e = self.exp.is_imaginary + if im_b: + if self.exp.is_integer: + if self.exp.is_even: + return True + elif self.exp.is_odd: + return False + elif im_e and log(self.base).is_imaginary: + return True + elif self.exp.is_Add: + c, a = self.exp.as_coeff_Add() + if c and c.is_Integer: + return Mul( + self.base**c, self.base**a, evaluate=False).is_extended_real + elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit): + if (self.exp/2).is_integer is False: + return False + if real_b and im_e: + if self.base is S.NegativeOne: + return True + c = self.exp.coeff(S.ImaginaryUnit) + if c: + if self.base.is_rational and c.is_rational: + if self.base.is_nonzero and (self.base - 1).is_nonzero and c.is_nonzero: + return False + ok = (c*log(self.base)/S.Pi).is_integer + if ok is not None: + return ok + + if real_b is False and real_e: # we already know it's not imag + from sympy.functions.elementary.complexes import arg + i = arg(self.base)*self.exp/S.Pi + if i.is_complex: # finite + return i.is_integer + + def _eval_is_complex(self): + + if self.base == S.Exp1: + return fuzzy_or([self.exp.is_complex, self.exp.is_extended_negative]) + + if all(a.is_complex for a in self.args) and self._eval_is_finite(): + return True + + def _eval_is_imaginary(self): + if self.base.is_commutative is False: + return False + + if self.base.is_imaginary: + if self.exp.is_integer: + odd = self.exp.is_odd + if odd is not None: + return odd + return + + if self.base == S.Exp1: + f = 2 * self.exp / (S.Pi*S.ImaginaryUnit) + # exp(pi*integer) = 1 or -1, so not imaginary + if f.is_even: + return False + # exp(pi*integer + pi/2) = I or -I, so it is imaginary + if f.is_odd: + return True + return None + + if self.exp.is_imaginary: + from sympy.functions.elementary.exponential import log + imlog = log(self.base).is_imaginary + if imlog is not None: + return False # I**i -> real; (2*I)**i -> complex ==> not imaginary + + if self.base.is_extended_real and self.exp.is_extended_real: + if self.base.is_positive: + return False + else: + rat = self.exp.is_rational + if not rat: + return rat + if self.exp.is_integer: + return False + else: + half = (2*self.exp).is_integer + if half: + return self.base.is_negative + return half + + if self.base.is_extended_real is False: # we already know it's not imag + from sympy.functions.elementary.complexes import arg + i = arg(self.base)*self.exp/S.Pi + isodd = (2*i).is_odd + if isodd is not None: + return isodd + + def _eval_is_odd(self): + if self.exp.is_integer: + if self.exp.is_positive: + return self.base.is_odd + elif self.exp.is_nonnegative and self.base.is_odd: + return True + elif self.base is S.NegativeOne: + return True + + def _eval_is_finite(self): + if self.exp.is_negative: + if self.base.is_zero: + return False + if self.base.is_infinite or self.base.is_nonzero: + return True + c1 = self.base.is_finite + if c1 is None: + return + c2 = self.exp.is_finite + if c2 is None: + return + if c1 and c2: + if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero): + return True + + def _eval_is_prime(self): + ''' + An integer raised to the n(>=2)-th power cannot be a prime. + ''' + if self.base.is_integer and self.exp.is_integer and (self.exp - 1).is_positive: + return False + + def _eval_is_composite(self): + """ + A power is composite if both base and exponent are greater than 1 + """ + if (self.base.is_integer and self.exp.is_integer and + ((self.base - 1).is_positive and (self.exp - 1).is_positive or + (self.base + 1).is_negative and self.exp.is_positive and self.exp.is_even)): + return True + + def _eval_is_polar(self): + return self.base.is_polar + + def _eval_subs(self, old, new): + from sympy.calculus.accumulationbounds import AccumBounds + + if isinstance(self.exp, AccumBounds): + b = self.base.subs(old, new) + e = self.exp.subs(old, new) + if isinstance(e, AccumBounds): + return e.__rpow__(b) + return self.func(b, e) + + from sympy.functions.elementary.exponential import exp, log + + def _check(ct1, ct2, old): + """Return (bool, pow, remainder_pow) where, if bool is True, then the + exponent of Pow `old` will combine with `pow` so the substitution + is valid, otherwise bool will be False. + + For noncommutative objects, `pow` will be an integer, and a factor + `Pow(old.base, remainder_pow)` needs to be included. If there is + no such factor, None is returned. For commutative objects, + remainder_pow is always None. + + cti are the coefficient and terms of an exponent of self or old + In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y) + will give y**2 since (b**x)**2 == b**(2*x); if that equality does + not hold then the substitution should not occur so `bool` will be + False. + + """ + coeff1, terms1 = ct1 + coeff2, terms2 = ct2 + if terms1 == terms2: + if old.is_commutative: + # Allow fractional powers for commutative objects + pow = coeff1/coeff2 + try: + as_int(pow, strict=False) + combines = True + except ValueError: + b, e = old.as_base_exp() + # These conditions ensure that (b**e)**f == b**(e*f) for any f + combines = b.is_positive and e.is_real or b.is_nonnegative and e.is_nonnegative + + return combines, pow, None + else: + # With noncommutative symbols, substitute only integer powers + if not isinstance(terms1, tuple): + terms1 = (terms1,) + if not all(term.is_integer for term in terms1): + return False, None, None + + try: + # Round pow toward zero + pow, remainder = divmod(as_int(coeff1), as_int(coeff2)) + if pow < 0 and remainder != 0: + pow += 1 + remainder -= as_int(coeff2) + + if remainder == 0: + remainder_pow = None + else: + remainder_pow = Mul(remainder, *terms1) + + return True, pow, remainder_pow + except ValueError: + # Can't substitute + pass + + return False, None, None + + if old == self.base or (old == exp and self.base == S.Exp1): + if new.is_Function and isinstance(new, Callable): + return new(self.exp._subs(old, new)) + else: + return new**self.exp._subs(old, new) + + # issue 10829: (4**x - 3*y + 2).subs(2**x, y) -> y**2 - 3*y + 2 + if isinstance(old, self.func) and self.exp == old.exp: + l = log(self.base, old.base) + if l.is_Number: + return Pow(new, l) + + if isinstance(old, self.func) and self.base == old.base: + if self.exp.is_Add is False: + ct1 = self.exp.as_independent(Symbol, as_Add=False) + ct2 = old.exp.as_independent(Symbol, as_Add=False) + ok, pow, remainder_pow = _check(ct1, ct2, old) + if ok: + # issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2 + result = self.func(new, pow) + if remainder_pow is not None: + result = Mul(result, Pow(old.base, remainder_pow)) + return result + else: # b**(6*x + a).subs(b**(3*x), y) -> y**2 * b**a + # exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2)) + oarg = old.exp + new_l = [] + o_al = [] + ct2 = oarg.as_coeff_mul() + for a in self.exp.args: + newa = a._subs(old, new) + ct1 = newa.as_coeff_mul() + ok, pow, remainder_pow = _check(ct1, ct2, old) + if ok: + new_l.append(new**pow) + if remainder_pow is not None: + o_al.append(remainder_pow) + continue + elif not old.is_commutative and not newa.is_integer: + # If any term in the exponent is non-integer, + # we do not do any substitutions in the noncommutative case + return + o_al.append(newa) + if new_l: + expo = Add(*o_al) + new_l.append(Pow(self.base, expo, evaluate=False) if expo != 1 else self.base) + return Mul(*new_l) + + if (isinstance(old, exp) or (old.is_Pow and old.base is S.Exp1)) and self.exp.is_extended_real and self.base.is_positive: + ct1 = old.exp.as_independent(Symbol, as_Add=False) + ct2 = (self.exp*log(self.base)).as_independent( + Symbol, as_Add=False) + ok, pow, remainder_pow = _check(ct1, ct2, old) + if ok: + result = self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z + if remainder_pow is not None: + result = Mul(result, Pow(old.base, remainder_pow)) + return result + + def as_base_exp(self): + """Return base and exp of self. + + Explanation + =========== + + If base a Rational less than 1, then return 1/Rational, -exp. + If this extra processing is not needed, the base and exp + properties will give the raw arguments. + + Examples + ======== + + >>> from sympy import Pow, S + >>> p = Pow(S.Half, 2, evaluate=False) + >>> p.as_base_exp() + (2, -2) + >>> p.args + (1/2, 2) + >>> p.base, p.exp + (1/2, 2) + + """ + + b, e = self.args + if b.is_Rational and b.p < b.q and b.p > 0: + return 1/b, -e + return b, e + + def _eval_adjoint(self): + from sympy.functions.elementary.complexes import adjoint + i, p = self.exp.is_integer, self.base.is_positive + if i: + return adjoint(self.base)**self.exp + if p: + return self.base**adjoint(self.exp) + if i is False and p is False: + expanded = expand_complex(self) + if expanded != self: + return adjoint(expanded) + + def _eval_conjugate(self): + from sympy.functions.elementary.complexes import conjugate as c + i, p = self.exp.is_integer, self.base.is_positive + if i: + return c(self.base)**self.exp + if p: + return self.base**c(self.exp) + if i is False and p is False: + expanded = expand_complex(self) + if expanded != self: + return c(expanded) + if self.is_extended_real: + return self + + def _eval_transpose(self): + from sympy.functions.elementary.complexes import transpose + if self.base == S.Exp1: + return self.func(S.Exp1, self.exp.transpose()) + i, p = self.exp.is_integer, (self.base.is_complex or self.base.is_infinite) + if p: + return self.base**self.exp + if i: + return transpose(self.base)**self.exp + if i is False and p is False: + expanded = expand_complex(self) + if expanded != self: + return transpose(expanded) + + def _eval_expand_power_exp(self, **hints): + """a**(n + m) -> a**n*a**m""" + b = self.base + e = self.exp + if b == S.Exp1: + from sympy.concrete.summations import Sum + if isinstance(e, Sum) and e.is_commutative: + from sympy.concrete.products import Product + return Product(self.func(b, e.function), *e.limits) + if e.is_Add and (hints.get('force', False) or + b.is_zero is False or e._all_nonneg_or_nonppos()): + if e.is_commutative: + return Mul(*[self.func(b, x) for x in e.args]) + if b.is_commutative: + c, nc = sift(e.args, lambda x: x.is_commutative, binary=True) + if c: + return Mul(*[self.func(b, x) for x in c] + )*b**Add._from_args(nc) + return self + + def _eval_expand_power_base(self, **hints): + """(a*b)**n -> a**n * b**n""" + force = hints.get('force', False) + + b = self.base + e = self.exp + if not b.is_Mul: + return self + + cargs, nc = b.args_cnc(split_1=False) + + # expand each term - this is top-level-only + # expansion but we have to watch out for things + # that don't have an _eval_expand method + if nc: + nc = [i._eval_expand_power_base(**hints) + if hasattr(i, '_eval_expand_power_base') else i + for i in nc] + + if e.is_Integer: + if e.is_positive: + rv = Mul(*nc*e) + else: + rv = Mul(*[i**-1 for i in nc[::-1]]*-e) + if cargs: + rv *= Mul(*cargs)**e + return rv + + if not cargs: + return self.func(Mul(*nc), e, evaluate=False) + + nc = [Mul(*nc)] + + # sift the commutative bases + other, maybe_real = sift(cargs, lambda x: x.is_extended_real is False, + binary=True) + def pred(x): + if x is S.ImaginaryUnit: + return S.ImaginaryUnit + polar = x.is_polar + if polar: + return True + if polar is None: + return fuzzy_bool(x.is_extended_nonnegative) + sifted = sift(maybe_real, pred) + nonneg = sifted[True] + other += sifted[None] + neg = sifted[False] + imag = sifted[S.ImaginaryUnit] + if imag: + I = S.ImaginaryUnit + i = len(imag) % 4 + if i == 0: + pass + elif i == 1: + other.append(I) + elif i == 2: + if neg: + nonn = -neg.pop() + if nonn is not S.One: + nonneg.append(nonn) + else: + neg.append(S.NegativeOne) + else: + if neg: + nonn = -neg.pop() + if nonn is not S.One: + nonneg.append(nonn) + else: + neg.append(S.NegativeOne) + other.append(I) + del imag + + # bring out the bases that can be separated from the base + + if force or e.is_integer: + # treat all commutatives the same and put nc in other + cargs = nonneg + neg + other + other = nc + else: + # this is just like what is happening automatically, except + # that now we are doing it for an arbitrary exponent for which + # no automatic expansion is done + + assert not e.is_Integer + + # handle negatives by making them all positive and putting + # the residual -1 in other + if len(neg) > 1: + o = S.One + if not other and neg[0].is_Number: + o *= neg.pop(0) + if len(neg) % 2: + o = -o + for n in neg: + nonneg.append(-n) + if o is not S.One: + other.append(o) + elif neg and other: + if neg[0].is_Number and neg[0] is not S.NegativeOne: + other.append(S.NegativeOne) + nonneg.append(-neg[0]) + else: + other.extend(neg) + else: + other.extend(neg) + del neg + + cargs = nonneg + other += nc + + rv = S.One + if cargs: + if e.is_Rational: + npow, cargs = sift(cargs, lambda x: x.is_Pow and + x.exp.is_Rational and x.base.is_number, + binary=True) + rv = Mul(*[self.func(b.func(*b.args), e) for b in npow]) + rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs]) + if other: + rv *= self.func(Mul(*other), e, evaluate=False) + return rv + + def _eval_expand_multinomial(self, **hints): + """(a + b + ..)**n -> a**n + n*a**(n-1)*b + .., n is nonzero integer""" + + base, exp = self.args + result = self + + if exp.is_Rational and exp.p > 0 and base.is_Add: + if not exp.is_Integer: + n = Integer(exp.p // exp.q) + + if not n: + return result + else: + radical, result = self.func(base, exp - n), [] + + expanded_base_n = self.func(base, n) + if expanded_base_n.is_Pow: + expanded_base_n = \ + expanded_base_n._eval_expand_multinomial() + for term in Add.make_args(expanded_base_n): + result.append(term*radical) + + return Add(*result) + + n = int(exp) + + if base.is_commutative: + order_terms, other_terms = [], [] + + for b in base.args: + if b.is_Order: + order_terms.append(b) + else: + other_terms.append(b) + + if order_terms: + # (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n) + f = Add(*other_terms) + o = Add(*order_terms) + + if n == 2: + return expand_multinomial(f**n, deep=False) + n*f*o + else: + g = expand_multinomial(f**(n - 1), deep=False) + return expand_mul(f*g, deep=False) + n*g*o + + if base.is_number: + # Efficiently expand expressions of the form (a + b*I)**n + # where 'a' and 'b' are real numbers and 'n' is integer. + a, b = base.as_real_imag() + + if a.is_Rational and b.is_Rational: + if not a.is_Integer: + if not b.is_Integer: + k = self.func(a.q * b.q, n) + a, b = a.p*b.q, a.q*b.p + else: + k = self.func(a.q, n) + a, b = a.p, a.q*b + elif not b.is_Integer: + k = self.func(b.q, n) + a, b = a*b.q, b.p + else: + k = 1 + + a, b, c, d = int(a), int(b), 1, 0 + + while n: + if n & 1: + c, d = a*c - b*d, b*c + a*d + n -= 1 + a, b = a*a - b*b, 2*a*b + n //= 2 + + I = S.ImaginaryUnit + + if k == 1: + return c + I*d + else: + return Integer(c)/k + I*d/k + + p = other_terms + # (x + y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3 + # in this particular example: + # p = [x,y]; n = 3 + # so now it's easy to get the correct result -- we get the + # coefficients first: + from sympy.ntheory.multinomial import multinomial_coefficients + from sympy.polys.polyutils import basic_from_dict + expansion_dict = multinomial_coefficients(len(p), n) + # in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3} + # and now construct the expression. + return basic_from_dict(expansion_dict, *p) + else: + if n == 2: + return Add(*[f*g for f in base.args for g in base.args]) + else: + multi = (base**(n - 1))._eval_expand_multinomial() + if multi.is_Add: + return Add(*[f*g for f in base.args + for g in multi.args]) + else: + # XXX can this ever happen if base was an Add? + return Add(*[f*multi for f in base.args]) + elif (exp.is_Rational and exp.p < 0 and base.is_Add and + abs(exp.p) > exp.q): + return 1 / self.func(base, -exp)._eval_expand_multinomial() + elif exp.is_Add and base.is_Number and (hints.get('force', False) or + base.is_zero is False or exp._all_nonneg_or_nonppos()): + # a + b a b + # n --> n n, where n, a, b are Numbers + # XXX should be in expand_power_exp? + coeff, tail = [], [] + for term in exp.args: + if term.is_Number: + coeff.append(self.func(base, term)) + else: + tail.append(term) + return Mul(*(coeff + [self.func(base, Add._from_args(tail))])) + else: + return result + + def as_real_imag(self, deep=True, **hints): + if self.exp.is_Integer: + from sympy.polys.polytools import poly + + exp = self.exp + re_e, im_e = self.base.as_real_imag(deep=deep) + if not im_e: + return self, S.Zero + a, b = symbols('a b', cls=Dummy) + if exp >= 0: + if re_e.is_Number and im_e.is_Number: + # We can be more efficient in this case + expr = expand_multinomial(self.base**exp) + if expr != self: + return expr.as_real_imag() + + expr = poly( + (a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp + else: + mag = re_e**2 + im_e**2 + re_e, im_e = re_e/mag, -im_e/mag + if re_e.is_Number and im_e.is_Number: + # We can be more efficient in this case + expr = expand_multinomial((re_e + im_e*S.ImaginaryUnit)**-exp) + if expr != self: + return expr.as_real_imag() + + expr = poly((a + b)**-exp) + + # Terms with even b powers will be real + r = [i for i in expr.terms() if not i[0][1] % 2] + re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) + # Terms with odd b powers will be imaginary + r = [i for i in expr.terms() if i[0][1] % 4 == 1] + im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) + r = [i for i in expr.terms() if i[0][1] % 4 == 3] + im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) + + return (re_part.subs({a: re_e, b: S.ImaginaryUnit*im_e}), + im_part1.subs({a: re_e, b: im_e}) + im_part3.subs({a: re_e, b: -im_e})) + + from sympy.functions.elementary.trigonometric import atan2, cos, sin + + if self.exp.is_Rational: + re_e, im_e = self.base.as_real_imag(deep=deep) + + if im_e.is_zero and self.exp is S.Half: + if re_e.is_extended_nonnegative: + return self, S.Zero + if re_e.is_extended_nonpositive: + return S.Zero, (-self.base)**self.exp + + # XXX: This is not totally correct since for x**(p/q) with + # x being imaginary there are actually q roots, but + # only a single one is returned from here. + r = self.func(self.func(re_e, 2) + self.func(im_e, 2), S.Half) + + t = atan2(im_e, re_e) + + rp, tp = self.func(r, self.exp), t*self.exp + + return rp*cos(tp), rp*sin(tp) + elif self.base is S.Exp1: + from sympy.functions.elementary.exponential import exp + re_e, im_e = self.exp.as_real_imag() + if deep: + re_e = re_e.expand(deep, **hints) + im_e = im_e.expand(deep, **hints) + c, s = cos(im_e), sin(im_e) + return exp(re_e)*c, exp(re_e)*s + else: + from sympy.functions.elementary.complexes import im, re + if deep: + hints['complex'] = False + + expanded = self.expand(deep, **hints) + if hints.get('ignore') == expanded: + return None + else: + return (re(expanded), im(expanded)) + else: + return re(self), im(self) + + def _eval_derivative(self, s): + from sympy.functions.elementary.exponential import log + dbase = self.base.diff(s) + dexp = self.exp.diff(s) + return self * (dexp * log(self.base) + dbase * self.exp/self.base) + + def _eval_evalf(self, prec): + base, exp = self.as_base_exp() + if base == S.Exp1: + # Use mpmath function associated to class "exp": + from sympy.functions.elementary.exponential import exp as exp_function + return exp_function(self.exp, evaluate=False)._eval_evalf(prec) + base = base._evalf(prec) + if not exp.is_Integer: + exp = exp._evalf(prec) + if exp.is_negative and base.is_number and base.is_extended_real is False: + base = base.conjugate() / (base * base.conjugate())._evalf(prec) + exp = -exp + return self.func(base, exp).expand() + return self.func(base, exp) + + def _eval_is_polynomial(self, syms): + if self.exp.has(*syms): + return False + + if self.base.has(*syms): + return bool(self.base._eval_is_polynomial(syms) and + self.exp.is_Integer and (self.exp >= 0)) + else: + return True + + def _eval_is_rational(self): + # The evaluation of self.func below can be very expensive in the case + # of integer**integer if the exponent is large. We should try to exit + # before that if possible: + if (self.exp.is_integer and self.base.is_rational + and fuzzy_not(fuzzy_and([self.exp.is_negative, self.base.is_zero]))): + return True + p = self.func(*self.as_base_exp()) # in case it's unevaluated + if not p.is_Pow: + return p.is_rational + b, e = p.as_base_exp() + if e.is_Rational and b.is_Rational: + # we didn't check that e is not an Integer + # because Rational**Integer autosimplifies + return False + if e.is_integer: + if b.is_rational: + if fuzzy_not(b.is_zero) or e.is_nonnegative: + return True + if b == e: # always rational, even for 0**0 + return True + elif b.is_irrational: + return e.is_zero + if b is S.Exp1: + if e.is_rational and e.is_nonzero: + return False + + def _eval_is_algebraic(self): + def _is_one(expr): + try: + return (expr - 1).is_zero + except ValueError: + # when the operation is not allowed + return False + + if self.base.is_zero or _is_one(self.base): + return True + elif self.base is S.Exp1: + s = self.func(*self.args) + if s.func == self.func: + if self.exp.is_nonzero: + if self.exp.is_algebraic: + return False + elif (self.exp/S.Pi).is_rational: + return False + elif (self.exp/(S.ImaginaryUnit*S.Pi)).is_rational: + return True + else: + return s.is_algebraic + elif self.exp.is_rational: + if self.base.is_algebraic is False: + return self.exp.is_zero + if self.base.is_zero is False: + if self.exp.is_nonzero: + return self.base.is_algebraic + elif self.base.is_algebraic: + return True + if self.exp.is_positive: + return self.base.is_algebraic + elif self.base.is_algebraic and self.exp.is_algebraic: + if ((fuzzy_not(self.base.is_zero) + and fuzzy_not(_is_one(self.base))) + or self.base.is_integer is False + or self.base.is_irrational): + return self.exp.is_rational + + def _eval_is_rational_function(self, syms): + if self.exp.has(*syms): + return False + + if self.base.has(*syms): + return self.base._eval_is_rational_function(syms) and \ + self.exp.is_Integer + else: + return True + + def _eval_is_meromorphic(self, x, a): + # f**g is meromorphic if g is an integer and f is meromorphic. + # E**(log(f)*g) is meromorphic if log(f)*g is meromorphic + # and finite. + base_merom = self.base._eval_is_meromorphic(x, a) + exp_integer = self.exp.is_Integer + if exp_integer: + return base_merom + + exp_merom = self.exp._eval_is_meromorphic(x, a) + if base_merom is False: + # f**g = E**(log(f)*g) may be meromorphic if the + # singularities of log(f) and g cancel each other, + # for example, if g = 1/log(f). Hence, + return False if exp_merom else None + elif base_merom is None: + return None + + b = self.base.subs(x, a) + # b is extended complex as base is meromorphic. + # log(base) is finite and meromorphic when b != 0, zoo. + b_zero = b.is_zero + if b_zero: + log_defined = False + else: + log_defined = fuzzy_and((b.is_finite, fuzzy_not(b_zero))) + + if log_defined is False: # zero or pole of base + return exp_integer # False or None + elif log_defined is None: + return None + + if not exp_merom: + return exp_merom # False or None + + return self.exp.subs(x, a).is_finite + + def _eval_is_algebraic_expr(self, syms): + if self.exp.has(*syms): + return False + + if self.base.has(*syms): + return self.base._eval_is_algebraic_expr(syms) and \ + self.exp.is_Rational + else: + return True + + def _eval_rewrite_as_exp(self, base, expo, **kwargs): + from sympy.functions.elementary.exponential import exp, log + + if base.is_zero or base.has(exp) or expo.has(exp): + return base**expo + + if base.has(Symbol): + # delay evaluation if expo is non symbolic + # (as exp(x*log(5)) automatically reduces to x**5) + if global_parameters.exp_is_pow: + return Pow(S.Exp1, log(base)*expo, evaluate=expo.has(Symbol)) + else: + return exp(log(base)*expo, evaluate=expo.has(Symbol)) + + else: + from sympy.functions.elementary.complexes import arg, Abs + return exp((log(Abs(base)) + S.ImaginaryUnit*arg(base))*expo) + + def as_numer_denom(self): + if not self.is_commutative: + return self, S.One + base, exp = self.as_base_exp() + n, d = base.as_numer_denom() + # this should be the same as ExpBase.as_numer_denom wrt + # exponent handling + neg_exp = exp.is_negative + if exp.is_Mul and not neg_exp and not exp.is_positive: + neg_exp = exp.could_extract_minus_sign() + int_exp = exp.is_integer + # the denominator cannot be separated from the numerator if + # its sign is unknown unless the exponent is an integer, e.g. + # sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the + # denominator is negative the numerator and denominator can + # be negated and the denominator (now positive) separated. + if not (d.is_extended_real or int_exp): + n = base + d = S.One + dnonpos = d.is_nonpositive + if dnonpos: + n, d = -n, -d + elif dnonpos is None and not int_exp: + n = base + d = S.One + if neg_exp: + n, d = d, n + exp = -exp + if exp.is_infinite: + if n is S.One and d is not S.One: + return n, self.func(d, exp) + if n is not S.One and d is S.One: + return self.func(n, exp), d + return self.func(n, exp), self.func(d, exp) + + def matches(self, expr, repl_dict=None, old=False): + expr = _sympify(expr) + if repl_dict is None: + repl_dict = {} + + # special case, pattern = 1 and expr.exp can match to 0 + if expr is S.One: + d = self.exp.matches(S.Zero, repl_dict) + if d is not None: + return d + + # make sure the expression to be matched is an Expr + if not isinstance(expr, Expr): + return None + + b, e = expr.as_base_exp() + + # special case number + sb, se = self.as_base_exp() + if sb.is_Symbol and se.is_Integer and expr: + if e.is_rational: + return sb.matches(b**(e/se), repl_dict) + return sb.matches(expr**(1/se), repl_dict) + + d = repl_dict.copy() + d = self.base.matches(b, d) + if d is None: + return None + + d = self.exp.xreplace(d).matches(e, d) + if d is None: + return Expr.matches(self, expr, repl_dict) + return d + + def _eval_nseries(self, x, n, logx, cdir=0): + # NOTE! This function is an important part of the gruntz algorithm + # for computing limits. It has to return a generalized power + # series with coefficients in C(log, log(x)). In more detail: + # It has to return an expression + # c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms) + # where e_i are numbers (not necessarily integers) and c_i are + # expressions involving only numbers, the log function, and log(x). + # The series expansion of b**e is computed as follows: + # 1) We express b as f*(1 + g) where f is the leading term of b. + # g has order O(x**d) where d is strictly positive. + # 2) Then b**e = (f**e)*((1 + g)**e). + # (1 + g)**e is computed using binomial series. + from sympy.functions.elementary.exponential import exp, log + from sympy.series.limits import limit + from sympy.series.order import Order + from sympy.core.sympify import sympify + if self.base is S.Exp1: + e_series = self.exp.nseries(x, n=n, logx=logx) + if e_series.is_Order: + return 1 + e_series + e0 = limit(e_series.removeO(), x, 0) + if e0 is S.NegativeInfinity: + return Order(x**n, x) + if e0 is S.Infinity: + return self + t = e_series - e0 + exp_series = term = exp(e0) + # series of exp(e0 + t) in t + for i in range(1, n): + term *= t/i + term = term.nseries(x, n=n, logx=logx) + exp_series += term + exp_series += Order(t**n, x) + from sympy.simplify.powsimp import powsimp + return powsimp(exp_series, deep=True, combine='exp') + from sympy.simplify.powsimp import powdenest + from .numbers import _illegal + self = powdenest(self, force=True).trigsimp() + b, e = self.as_base_exp() + + if e.has(*_illegal): + raise PoleError() + + if e.has(x): + return exp(e*log(b))._eval_nseries(x, n=n, logx=logx, cdir=cdir) + + if logx is not None and b.has(log): + from .symbol import Wild + c, ex = symbols('c, ex', cls=Wild, exclude=[x]) + b = b.replace(log(c*x**ex), log(c) + ex*logx) + self = b**e + + b = b.removeO() + try: + from sympy.functions.special.gamma_functions import polygamma + if b.has(polygamma, S.EulerGamma) and logx is not None: + raise ValueError() + _, m = b.leadterm(x) + except (ValueError, NotImplementedError, PoleError): + b = b._eval_nseries(x, n=max(2, n), logx=logx, cdir=cdir).removeO() + if b.has(S.NaN, S.ComplexInfinity): + raise NotImplementedError() + _, m = b.leadterm(x) + + if e.has(log): + from sympy.simplify.simplify import logcombine + e = logcombine(e).cancel() + + if not (m.is_zero or e.is_number and e.is_real): + if self == self._eval_as_leading_term(x, logx=logx, cdir=cdir): + res = exp(e*log(b))._eval_nseries(x, n=n, logx=logx, cdir=cdir) + if res == exp(e*log(b)): + return self + return res + + f = b.as_leading_term(x, logx=logx) + g = (b/f - S.One).cancel(expand=False) + if not m.is_number: + raise NotImplementedError() + maxpow = n - m*e + if maxpow.has(Symbol): + maxpow = sympify(n) + + if maxpow.is_negative: + return Order(x**(m*e), x) + + if g.is_zero: + r = f**e + if r != self: + r += Order(x**n, x) + return r + + def coeff_exp(term, x): + coeff, exp = S.One, S.Zero + for factor in Mul.make_args(term): + if factor.has(x): + base, exp = factor.as_base_exp() + if base != x: + try: + return term.leadterm(x) + except ValueError: + return term, S.Zero + else: + coeff *= factor + return coeff, exp + + def mul(d1, d2): + res = {} + for e1, e2 in product(d1, d2): + ex = e1 + e2 + if ex < maxpow: + res[ex] = res.get(ex, S.Zero) + d1[e1]*d2[e2] + return res + + try: + c, d = g.leadterm(x, logx=logx) + except (ValueError, NotImplementedError): + if limit(g/x**maxpow, x, 0) == 0: + # g has higher order zero + return f**e + e*f**e*g # first term of binomial series + else: + raise NotImplementedError() + if c.is_Float and d == S.Zero: + # Convert floats like 0.5 to exact SymPy numbers like S.Half, to + # prevent rounding errors which can induce wrong values of d leading + # to a NotImplementedError being returned from the block below. + from sympy.simplify.simplify import nsimplify + _, d = nsimplify(g).leadterm(x, logx=logx) + if not d.is_positive: + g = g.simplify() + if g.is_zero: + return f**e + _, d = g.leadterm(x, logx=logx) + if not d.is_positive: + g = ((b - f)/f).expand() + _, d = g.leadterm(x, logx=logx) + if not d.is_positive: + raise NotImplementedError() + + from sympy.functions.elementary.integers import ceiling + gpoly = g._eval_nseries(x, n=ceiling(maxpow), logx=logx, cdir=cdir).removeO() + gterms = {} + + for term in Add.make_args(gpoly): + co1, e1 = coeff_exp(term, x) + gterms[e1] = gterms.get(e1, S.Zero) + co1 + + k = S.One + terms = {S.Zero: S.One} + tk = gterms + + from sympy.functions.combinatorial.factorials import factorial, ff + + while (k*d - maxpow).is_negative: + coeff = ff(e, k)/factorial(k) + for ex in tk: + terms[ex] = terms.get(ex, S.Zero) + coeff*tk[ex] + tk = mul(tk, gterms) + k += S.One + + from sympy.functions.elementary.complexes import im + + if not e.is_integer and m.is_zero and f.is_negative: + ndir = (b - f).dir(x, cdir) + if im(ndir).is_negative: + inco, inex = coeff_exp(f**e*(-1)**(-2*e), x) + elif im(ndir).is_zero: + inco, inex = coeff_exp(exp(e*log(b)).as_leading_term(x, logx=logx, cdir=cdir), x) + else: + inco, inex = coeff_exp(f**e, x) + else: + inco, inex = coeff_exp(f**e, x) + res = S.Zero + + for e1 in terms: + ex = e1 + inex + res += terms[e1]*inco*x**(ex) + + if not (e.is_integer and e.is_positive and (e*d - n).is_nonpositive and + res == _mexpand(self)): + try: + res += Order(x**n, x) + except NotImplementedError: + return exp(e*log(b))._eval_nseries(x, n=n, logx=logx, cdir=cdir) + return res + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + from sympy.functions.elementary.exponential import exp, log + e = self.exp + b = self.base + if self.base is S.Exp1: + arg = e.as_leading_term(x, logx=logx) + arg0 = arg.subs(x, 0) + if arg0 is S.NaN: + arg0 = arg.limit(x, 0) + if arg0.is_infinite is False: + return S.Exp1**arg0 + raise PoleError("Cannot expand %s around 0" % (self)) + elif e.has(x): + lt = exp(e * log(b)) + return lt.as_leading_term(x, logx=logx, cdir=cdir) + else: + from sympy.functions.elementary.complexes import im + try: + f = b.as_leading_term(x, logx=logx, cdir=cdir) + except PoleError: + return self + if not e.is_integer and f.is_negative and not f.has(x): + ndir = (b - f).dir(x, cdir) + if im(ndir).is_negative: + # Normally, f**e would evaluate to exp(e*log(f)) but on branch cuts + # an other value is expected through the following computation + # exp(e*(log(f) - 2*pi*I)) == f**e*exp(-2*e*pi*I) == f**e*(-1)**(-2*e). + return self.func(f, e) * (-1)**(-2*e) + elif im(ndir).is_zero: + log_leadterm = log(b)._eval_as_leading_term(x, logx=logx, cdir=cdir) + if log_leadterm.is_infinite is False: + return exp(e*log_leadterm) + return self.func(f, e) + + @cacheit + def _taylor_term(self, n, x, *previous_terms): # of (1 + x)**e + from sympy.functions.combinatorial.factorials import binomial + return binomial(self.exp, n) * self.func(x, n) + + def taylor_term(self, n, x, *previous_terms): + if self.base is not S.Exp1: + return super().taylor_term(n, x, *previous_terms) + if n < 0: + return S.Zero + if n == 0: + return S.One + from .sympify import sympify + x = sympify(x) + if previous_terms: + p = previous_terms[-1] + if p is not None: + return p * x / n + from sympy.functions.combinatorial.factorials import factorial + return x**n/factorial(n) + + def _eval_rewrite_as_sin(self, base, exp): + if self.base is S.Exp1: + from sympy.functions.elementary.trigonometric import sin + return sin(S.ImaginaryUnit*self.exp + S.Pi/2) - S.ImaginaryUnit*sin(S.ImaginaryUnit*self.exp) + + def _eval_rewrite_as_cos(self, base, exp): + if self.base is S.Exp1: + from sympy.functions.elementary.trigonometric import cos + return cos(S.ImaginaryUnit*self.exp) + S.ImaginaryUnit*cos(S.ImaginaryUnit*self.exp + S.Pi/2) + + def _eval_rewrite_as_tanh(self, base, exp): + if self.base is S.Exp1: + from sympy.functions.elementary.hyperbolic import tanh + return (1 + tanh(self.exp/2))/(1 - tanh(self.exp/2)) + + def _eval_rewrite_as_sqrt(self, base, exp, **kwargs): + from sympy.functions.elementary.trigonometric import sin, cos + if base is not S.Exp1: + return None + if exp.is_Mul: + coeff = exp.coeff(S.Pi * S.ImaginaryUnit) + if coeff and coeff.is_number: + cosine, sine = cos(S.Pi*coeff), sin(S.Pi*coeff) + if not isinstance(cosine, cos) and not isinstance (sine, sin): + return cosine + S.ImaginaryUnit*sine + + def as_content_primitive(self, radical=False, clear=True): + """Return the tuple (R, self/R) where R is the positive Rational + extracted from self. + + Examples + ======== + + >>> from sympy import sqrt + >>> sqrt(4 + 4*sqrt(2)).as_content_primitive() + (2, sqrt(1 + sqrt(2))) + >>> sqrt(3 + 3*sqrt(2)).as_content_primitive() + (1, sqrt(3)*sqrt(1 + sqrt(2))) + + >>> from sympy import expand_power_base, powsimp, Mul + >>> from sympy.abc import x, y + + >>> ((2*x + 2)**2).as_content_primitive() + (4, (x + 1)**2) + >>> (4**((1 + y)/2)).as_content_primitive() + (2, 4**(y/2)) + >>> (3**((1 + y)/2)).as_content_primitive() + (1, 3**((y + 1)/2)) + >>> (3**((5 + y)/2)).as_content_primitive() + (9, 3**((y + 1)/2)) + >>> eq = 3**(2 + 2*x) + >>> powsimp(eq) == eq + True + >>> eq.as_content_primitive() + (9, 3**(2*x)) + >>> powsimp(Mul(*_)) + 3**(2*x + 2) + + >>> eq = (2 + 2*x)**y + >>> s = expand_power_base(eq); s.is_Mul, s + (False, (2*x + 2)**y) + >>> eq.as_content_primitive() + (1, (2*(x + 1))**y) + >>> s = expand_power_base(_[1]); s.is_Mul, s + (True, 2**y*(x + 1)**y) + + See docstring of Expr.as_content_primitive for more examples. + """ + + b, e = self.as_base_exp() + b = _keep_coeff(*b.as_content_primitive(radical=radical, clear=clear)) + ce, pe = e.as_content_primitive(radical=radical, clear=clear) + if b.is_Rational: + #e + #= ce*pe + #= ce*(h + t) + #= ce*h + ce*t + #=> self + #= b**(ce*h)*b**(ce*t) + #= b**(cehp/cehq)*b**(ce*t) + #= b**(iceh + r/cehq)*b**(ce*t) + #= b**(iceh)*b**(r/cehq)*b**(ce*t) + #= b**(iceh)*b**(ce*t + r/cehq) + h, t = pe.as_coeff_Add() + if h.is_Rational and b != S.Zero: + ceh = ce*h + c = self.func(b, ceh) + r = S.Zero + if not c.is_Rational: + iceh, r = divmod(ceh.p, ceh.q) + c = self.func(b, iceh) + return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q)) + e = _keep_coeff(ce, pe) + # b**e = (h*t)**e = h**e*t**e = c*m*t**e + if e.is_Rational and b.is_Mul: + h, t = b.as_content_primitive(radical=radical, clear=clear) # h is positive + c, m = self.func(h, e).as_coeff_Mul() # so c is positive + m, me = m.as_base_exp() + if m is S.One or me == e: # probably always true + # return the following, not return c, m*Pow(t, e) + # which would change Pow into Mul; we let SymPy + # decide what to do by using the unevaluated Mul, e.g + # should it stay as sqrt(2 + 2*sqrt(5)) or become + # sqrt(2)*sqrt(1 + sqrt(5)) + return c, self.func(_keep_coeff(m, t), e) + return S.One, self.func(b, e) + + def is_constant(self, *wrt, **flags): + expr = self + if flags.get('simplify', True): + expr = expr.simplify() + b, e = expr.as_base_exp() + bz = b.equals(0) + if bz: # recalculate with assumptions in case it's unevaluated + new = b**e + if new != expr: + return new.is_constant() + econ = e.is_constant(*wrt) + bcon = b.is_constant(*wrt) + if bcon: + if econ: + return True + bz = b.equals(0) + if bz is False: + return False + elif bcon is None: + return None + + return e.equals(0) + + def _eval_difference_delta(self, n, step): + b, e = self.args + if e.has(n) and not b.has(n): + new_e = e.subs(n, n + step) + return (b**(new_e - e) - 1) * self + +power = Dispatcher('power') +power.add((object, object), Pow) + +from .add import Add +from .numbers import Integer +from .mul import Mul, _keep_coeff +from .symbol import Symbol, Dummy, symbols diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/rules.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..5ae331f71b21c8a6ef35f499c5c5c89239349e9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/rules.py @@ -0,0 +1,66 @@ +""" +Replacement rules. +""" + +class Transform: + """ + Immutable mapping that can be used as a generic transformation rule. + + Parameters + ========== + + transform : callable + Computes the value corresponding to any key. + + filter : callable, optional + If supplied, specifies which objects are in the mapping. + + Examples + ======== + + >>> from sympy.core.rules import Transform + >>> from sympy.abc import x + + This Transform will return, as a value, one more than the key: + + >>> add1 = Transform(lambda x: x + 1) + >>> add1[1] + 2 + >>> add1[x] + x + 1 + + By default, all values are considered to be in the dictionary. If a filter + is supplied, only the objects for which it returns True are considered as + being in the dictionary: + + >>> add1_odd = Transform(lambda x: x + 1, lambda x: x%2 == 1) + >>> 2 in add1_odd + False + >>> add1_odd.get(2, 0) + 0 + >>> 3 in add1_odd + True + >>> add1_odd[3] + 4 + >>> add1_odd.get(3, 0) + 4 + """ + + def __init__(self, transform, filter=lambda x: True): + self._transform = transform + self._filter = filter + + def __contains__(self, item): + return self._filter(item) + + def __getitem__(self, key): + if self._filter(key): + return self._transform(key) + else: + raise KeyError(key) + + def get(self, item, default=None): + if item in self: + return self[item] + else: + return default diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/core/symbol.py b/env-llmeval/lib/python3.10/site-packages/sympy/core/symbol.py new file mode 100644 index 0000000000000000000000000000000000000000..68c89874c97f78de2d3d97184f5db94177112a4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/core/symbol.py @@ -0,0 +1,945 @@ +from __future__ import annotations + +from .assumptions import StdFactKB, _assume_defined +from .basic import Basic, Atom +from .cache import cacheit +from .containers import Tuple +from .expr import Expr, AtomicExpr +from .function import AppliedUndef, FunctionClass +from .kind import NumberKind, UndefinedKind +from .logic import fuzzy_bool +from .singleton import S +from .sorting import ordered +from .sympify import sympify +from sympy.logic.boolalg import Boolean +from sympy.utilities.iterables import sift, is_sequence +from sympy.utilities.misc import filldedent + +import string +import re as _re +import random +from itertools import product +from typing import Any + + +class Str(Atom): + """ + Represents string in SymPy. + + Explanation + =========== + + Previously, ``Symbol`` was used where string is needed in ``args`` of SymPy + objects, e.g. denoting the name of the instance. However, since ``Symbol`` + represents mathematical scalar, this class should be used instead. + + """ + __slots__ = ('name',) + + def __new__(cls, name, **kwargs): + if not isinstance(name, str): + raise TypeError("name should be a string, not %s" % repr(type(name))) + obj = Expr.__new__(cls, **kwargs) + obj.name = name + return obj + + def __getnewargs__(self): + return (self.name,) + + def _hashable_content(self): + return (self.name,) + + +def _filter_assumptions(kwargs): + """Split the given dict into assumptions and non-assumptions. + Keys are taken as assumptions if they correspond to an + entry in ``_assume_defined``. + """ + assumptions, nonassumptions = map(dict, sift(kwargs.items(), + lambda i: i[0] in _assume_defined, + binary=True)) + Symbol._sanitize(assumptions) + return assumptions, nonassumptions + +def _symbol(s, matching_symbol=None, **assumptions): + """Return s if s is a Symbol, else if s is a string, return either + the matching_symbol if the names are the same or else a new symbol + with the same assumptions as the matching symbol (or the + assumptions as provided). + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.core.symbol import _symbol + >>> _symbol('y') + y + >>> _.is_real is None + True + >>> _symbol('y', real=True).is_real + True + + >>> x = Symbol('x') + >>> _symbol(x, real=True) + x + >>> _.is_real is None # ignore attribute if s is a Symbol + True + + Below, the variable sym has the name 'foo': + + >>> sym = Symbol('foo', real=True) + + Since 'x' is not the same as sym's name, a new symbol is created: + + >>> _symbol('x', sym).name + 'x' + + It will acquire any assumptions give: + + >>> _symbol('x', sym, real=False).is_real + False + + Since 'foo' is the same as sym's name, sym is returned + + >>> _symbol('foo', sym) + foo + + Any assumptions given are ignored: + + >>> _symbol('foo', sym, real=False).is_real + True + + NB: the symbol here may not be the same as a symbol with the same + name defined elsewhere as a result of different assumptions. + + See Also + ======== + + sympy.core.symbol.Symbol + + """ + if isinstance(s, str): + if matching_symbol and matching_symbol.name == s: + return matching_symbol + return Symbol(s, **assumptions) + elif isinstance(s, Symbol): + return s + else: + raise ValueError('symbol must be string for symbol name or Symbol') + +def uniquely_named_symbol(xname, exprs=(), compare=str, modify=None, **assumptions): + """ + Return a symbol whose name is derivated from *xname* but is unique + from any other symbols in *exprs*. + + *xname* and symbol names in *exprs* are passed to *compare* to be + converted to comparable forms. If ``compare(xname)`` is not unique, + it is recursively passed to *modify* until unique name is acquired. + + Parameters + ========== + + xname : str or Symbol + Base name for the new symbol. + + exprs : Expr or iterable of Expr + Expressions whose symbols are compared to *xname*. + + compare : function + Unary function which transforms *xname* and symbol names from + *exprs* to comparable form. + + modify : function + Unary function which modifies the string. Default is appending + the number, or increasing the number if exists. + + Examples + ======== + + By default, a number is appended to *xname* to generate unique name. + If the number already exists, it is recursively increased. + + >>> from sympy.core.symbol import uniquely_named_symbol, Symbol + >>> uniquely_named_symbol('x', Symbol('x')) + x0 + >>> uniquely_named_symbol('x', (Symbol('x'), Symbol('x0'))) + x1 + >>> uniquely_named_symbol('x0', (Symbol('x1'), Symbol('x0'))) + x2 + + Name generation can be controlled by passing *modify* parameter. + + >>> from sympy.abc import x + >>> uniquely_named_symbol('x', x, modify=lambda s: 2*s) + xx + + """ + def numbered_string_incr(s, start=0): + if not s: + return str(start) + i = len(s) - 1 + while i != -1: + if not s[i].isdigit(): + break + i -= 1 + n = str(int(s[i + 1:] or start - 1) + 1) + return s[:i + 1] + n + + default = None + if is_sequence(xname): + xname, default = xname + x = compare(xname) + if not exprs: + return _symbol(x, default, **assumptions) + if not is_sequence(exprs): + exprs = [exprs] + names = set().union( + [i.name for e in exprs for i in e.atoms(Symbol)] + + [i.func.name for e in exprs for i in e.atoms(AppliedUndef)]) + if modify is None: + modify = numbered_string_incr + while any(x == compare(s) for s in names): + x = modify(x) + return _symbol(x, default, **assumptions) +_uniquely_named_symbol = uniquely_named_symbol + +class Symbol(AtomicExpr, Boolean): + """ + Assumptions: + commutative = True + + You can override the default assumptions in the constructor. + + Examples + ======== + + >>> from sympy import symbols + >>> A,B = symbols('A,B', commutative = False) + >>> bool(A*B != B*A) + True + >>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative + True + + """ + + is_comparable = False + + __slots__ = ('name', '_assumptions_orig', '_assumptions0') + + name: str + + is_Symbol = True + is_symbol = True + + @property + def kind(self): + if self.is_commutative: + return NumberKind + return UndefinedKind + + @property + def _diff_wrt(self): + """Allow derivatives wrt Symbols. + + Examples + ======== + + >>> from sympy import Symbol + >>> x = Symbol('x') + >>> x._diff_wrt + True + """ + return True + + @staticmethod + def _sanitize(assumptions, obj=None): + """Remove None, convert values to bool, check commutativity *in place*. + """ + + # be strict about commutativity: cannot be None + is_commutative = fuzzy_bool(assumptions.get('commutative', True)) + if is_commutative is None: + whose = '%s ' % obj.__name__ if obj else '' + raise ValueError( + '%scommutativity must be True or False.' % whose) + + # sanitize other assumptions so 1 -> True and 0 -> False + for key in list(assumptions.keys()): + v = assumptions[key] + if v is None: + assumptions.pop(key) + continue + assumptions[key] = bool(v) + + def _merge(self, assumptions): + base = self.assumptions0 + for k in set(assumptions) & set(base): + if assumptions[k] != base[k]: + raise ValueError(filldedent(''' + non-matching assumptions for %s: existing value + is %s and new value is %s''' % ( + k, base[k], assumptions[k]))) + base.update(assumptions) + return base + + def __new__(cls, name, **assumptions): + """Symbols are identified by name and assumptions:: + + >>> from sympy import Symbol + >>> Symbol("x") == Symbol("x") + True + >>> Symbol("x", real=True) == Symbol("x", real=False) + False + + """ + cls._sanitize(assumptions, cls) + return Symbol.__xnew_cached_(cls, name, **assumptions) + + @staticmethod + def __xnew__(cls, name, **assumptions): # never cached (e.g. dummy) + if not isinstance(name, str): + raise TypeError("name should be a string, not %s" % repr(type(name))) + + # This is retained purely so that srepr can include commutative=True if + # that was explicitly specified but not if it was not. Ideally srepr + # should not distinguish these cases because the symbols otherwise + # compare equal and are considered equivalent. + # + # See https://github.com/sympy/sympy/issues/8873 + # + assumptions_orig = assumptions.copy() + + # The only assumption that is assumed by default is comutative=True: + assumptions.setdefault('commutative', True) + + assumptions_kb = StdFactKB(assumptions) + assumptions0 = dict(assumptions_kb) + + obj = Expr.__new__(cls) + obj.name = name + + obj._assumptions = assumptions_kb + obj._assumptions_orig = assumptions_orig + obj._assumptions0 = assumptions0 + + # The three assumptions dicts are all a little different: + # + # >>> from sympy import Symbol + # >>> x = Symbol('x', finite=True) + # >>> x.is_positive # query an assumption + # >>> x._assumptions + # {'finite': True, 'infinite': False, 'commutative': True, 'positive': None} + # >>> x._assumptions0 + # {'finite': True, 'infinite': False, 'commutative': True} + # >>> x._assumptions_orig + # {'finite': True} + # + # Two symbols with the same name are equal if their _assumptions0 are + # the same. Arguably it should be _assumptions_orig that is being + # compared because that is more transparent to the user (it is + # what was passed to the constructor modulo changes made by _sanitize). + + return obj + + @staticmethod + @cacheit + def __xnew_cached_(cls, name, **assumptions): # symbols are always cached + return Symbol.__xnew__(cls, name, **assumptions) + + def __getnewargs_ex__(self): + return ((self.name,), self._assumptions_orig) + + # NOTE: __setstate__ is not needed for pickles created by __getnewargs_ex__ + # but was used before Symbol was changed to use __getnewargs_ex__ in v1.9. + # Pickles created in previous SymPy versions will still need __setstate__ + # so that they can be unpickled in SymPy > v1.9. + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + def _hashable_content(self): + # Note: user-specified assumptions not hashed, just derived ones + return (self.name,) + tuple(sorted(self.assumptions0.items())) + + def _eval_subs(self, old, new): + if old.is_Pow: + from sympy.core.power import Pow + return Pow(self, S.One, evaluate=False)._eval_subs(old, new) + + def _eval_refine(self, assumptions): + return self + + @property + def assumptions0(self): + return self._assumptions0.copy() + + @cacheit + def sort_key(self, order=None): + return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One + + def as_dummy(self): + # only put commutativity in explicitly if it is False + return Dummy(self.name) if self.is_commutative is not False \ + else Dummy(self.name, commutative=self.is_commutative) + + def as_real_imag(self, deep=True, **hints): + if hints.get('ignore') == self: + return None + else: + from sympy.functions.elementary.complexes import im, re + return (re(self), im(self)) + + def is_constant(self, *wrt, **flags): + if not wrt: + return False + return self not in wrt + + @property + def free_symbols(self): + return {self} + + binary_symbols = free_symbols # in this case, not always + + def as_set(self): + return S.UniversalSet + + +class Dummy(Symbol): + """Dummy symbols are each unique, even if they have the same name: + + Examples + ======== + + >>> from sympy import Dummy + >>> Dummy("x") == Dummy("x") + False + + If a name is not supplied then a string value of an internal count will be + used. This is useful when a temporary variable is needed and the name + of the variable used in the expression is not important. + + >>> Dummy() #doctest: +SKIP + _Dummy_10 + + """ + + # In the rare event that a Dummy object needs to be recreated, both the + # `name` and `dummy_index` should be passed. This is used by `srepr` for + # example: + # >>> d1 = Dummy() + # >>> d2 = eval(srepr(d1)) + # >>> d2 == d1 + # True + # + # If a new session is started between `srepr` and `eval`, there is a very + # small chance that `d2` will be equal to a previously-created Dummy. + + _count = 0 + _prng = random.Random() + _base_dummy_index = _prng.randint(10**6, 9*10**6) + + __slots__ = ('dummy_index',) + + is_Dummy = True + + def __new__(cls, name=None, dummy_index=None, **assumptions): + if dummy_index is not None: + assert name is not None, "If you specify a dummy_index, you must also provide a name" + + if name is None: + name = "Dummy_" + str(Dummy._count) + + if dummy_index is None: + dummy_index = Dummy._base_dummy_index + Dummy._count + Dummy._count += 1 + + cls._sanitize(assumptions, cls) + obj = Symbol.__xnew__(cls, name, **assumptions) + + obj.dummy_index = dummy_index + + return obj + + def __getnewargs_ex__(self): + return ((self.name, self.dummy_index), self._assumptions_orig) + + @cacheit + def sort_key(self, order=None): + return self.class_key(), ( + 2, (self.name, self.dummy_index)), S.One.sort_key(), S.One + + def _hashable_content(self): + return Symbol._hashable_content(self) + (self.dummy_index,) + + +class Wild(Symbol): + """ + A Wild symbol matches anything, or anything + without whatever is explicitly excluded. + + Parameters + ========== + + name : str + Name of the Wild instance. + + exclude : iterable, optional + Instances in ``exclude`` will not be matched. + + properties : iterable of functions, optional + Functions, each taking an expressions as input + and returns a ``bool``. All functions in ``properties`` + need to return ``True`` in order for the Wild instance + to match the expression. + + Examples + ======== + + >>> from sympy import Wild, WildFunction, cos, pi + >>> from sympy.abc import x, y, z + >>> a = Wild('a') + >>> x.match(a) + {a_: x} + >>> pi.match(a) + {a_: pi} + >>> (3*x**2).match(a*x) + {a_: 3*x} + >>> cos(x).match(a) + {a_: cos(x)} + >>> b = Wild('b', exclude=[x]) + >>> (3*x**2).match(b*x) + >>> b.match(a) + {a_: b_} + >>> A = WildFunction('A') + >>> A.match(a) + {a_: A_} + + Tips + ==== + + When using Wild, be sure to use the exclude + keyword to make the pattern more precise. + Without the exclude pattern, you may get matches + that are technically correct, but not what you + wanted. For example, using the above without + exclude: + + >>> from sympy import symbols + >>> a, b = symbols('a b', cls=Wild) + >>> (2 + 3*y).match(a*x + b*y) + {a_: 2/x, b_: 3} + + This is technically correct, because + (2/x)*x + 3*y == 2 + 3*y, but you probably + wanted it to not match at all. The issue is that + you really did not want a and b to include x and y, + and the exclude parameter lets you specify exactly + this. With the exclude parameter, the pattern will + not match. + + >>> a = Wild('a', exclude=[x, y]) + >>> b = Wild('b', exclude=[x, y]) + >>> (2 + 3*y).match(a*x + b*y) + + Exclude also helps remove ambiguity from matches. + + >>> E = 2*x**3*y*z + >>> a, b = symbols('a b', cls=Wild) + >>> E.match(a*b) + {a_: 2*y*z, b_: x**3} + >>> a = Wild('a', exclude=[x, y]) + >>> E.match(a*b) + {a_: z, b_: 2*x**3*y} + >>> a = Wild('a', exclude=[x, y, z]) + >>> E.match(a*b) + {a_: 2, b_: x**3*y*z} + + Wild also accepts a ``properties`` parameter: + + >>> a = Wild('a', properties=[lambda k: k.is_Integer]) + >>> E.match(a*b) + {a_: 2, b_: x**3*y*z} + + """ + is_Wild = True + + __slots__ = ('exclude', 'properties') + + def __new__(cls, name, exclude=(), properties=(), **assumptions): + exclude = tuple([sympify(x) for x in exclude]) + properties = tuple(properties) + cls._sanitize(assumptions, cls) + return Wild.__xnew__(cls, name, exclude, properties, **assumptions) + + def __getnewargs__(self): + return (self.name, self.exclude, self.properties) + + @staticmethod + @cacheit + def __xnew__(cls, name, exclude, properties, **assumptions): + obj = Symbol.__xnew__(cls, name, **assumptions) + obj.exclude = exclude + obj.properties = properties + return obj + + def _hashable_content(self): + return super()._hashable_content() + (self.exclude, self.properties) + + # TODO add check against another Wild + def matches(self, expr, repl_dict=None, old=False): + if any(expr.has(x) for x in self.exclude): + return None + if not all(f(expr) for f in self.properties): + return None + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + repl_dict[self] = expr + return repl_dict + + +_range = _re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])') + + +def symbols(names, *, cls=Symbol, **args) -> Any: + r""" + Transform strings into instances of :class:`Symbol` class. + + :func:`symbols` function returns a sequence of symbols with names taken + from ``names`` argument, which can be a comma or whitespace delimited + string, or a sequence of strings:: + + >>> from sympy import symbols, Function + + >>> x, y, z = symbols('x,y,z') + >>> a, b, c = symbols('a b c') + + The type of output is dependent on the properties of input arguments:: + + >>> symbols('x') + x + >>> symbols('x,') + (x,) + >>> symbols('x,y') + (x, y) + >>> symbols(('a', 'b', 'c')) + (a, b, c) + >>> symbols(['a', 'b', 'c']) + [a, b, c] + >>> symbols({'a', 'b', 'c'}) + {a, b, c} + + If an iterable container is needed for a single symbol, set the ``seq`` + argument to ``True`` or terminate the symbol name with a comma:: + + >>> symbols('x', seq=True) + (x,) + + To reduce typing, range syntax is supported to create indexed symbols. + Ranges are indicated by a colon and the type of range is determined by + the character to the right of the colon. If the character is a digit + then all contiguous digits to the left are taken as the nonnegative + starting value (or 0 if there is no digit left of the colon) and all + contiguous digits to the right are taken as 1 greater than the ending + value:: + + >>> symbols('x:10') + (x0, x1, x2, x3, x4, x5, x6, x7, x8, x9) + + >>> symbols('x5:10') + (x5, x6, x7, x8, x9) + >>> symbols('x5(:2)') + (x50, x51) + + >>> symbols('x5:10,y:5') + (x5, x6, x7, x8, x9, y0, y1, y2, y3, y4) + + >>> symbols(('x5:10', 'y:5')) + ((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4)) + + If the character to the right of the colon is a letter, then the single + letter to the left (or 'a' if there is none) is taken as the start + and all characters in the lexicographic range *through* the letter to + the right are used as the range:: + + >>> symbols('x:z') + (x, y, z) + >>> symbols('x:c') # null range + () + >>> symbols('x(:c)') + (xa, xb, xc) + + >>> symbols(':c') + (a, b, c) + + >>> symbols('a:d, x:z') + (a, b, c, d, x, y, z) + + >>> symbols(('a:d', 'x:z')) + ((a, b, c, d), (x, y, z)) + + Multiple ranges are supported; contiguous numerical ranges should be + separated by parentheses to disambiguate the ending number of one + range from the starting number of the next:: + + >>> symbols('x:2(1:3)') + (x01, x02, x11, x12) + >>> symbols(':3:2') # parsing is from left to right + (00, 01, 10, 11, 20, 21) + + Only one pair of parentheses surrounding ranges are removed, so to + include parentheses around ranges, double them. And to include spaces, + commas, or colons, escape them with a backslash:: + + >>> symbols('x((a:b))') + (x(a), x(b)) + >>> symbols(r'x(:1\,:2)') # or r'x((:1)\,(:2))' + (x(0,0), x(0,1)) + + All newly created symbols have assumptions set according to ``args``:: + + >>> a = symbols('a', integer=True) + >>> a.is_integer + True + + >>> x, y, z = symbols('x,y,z', real=True) + >>> x.is_real and y.is_real and z.is_real + True + + Despite its name, :func:`symbols` can create symbol-like objects like + instances of Function or Wild classes. To achieve this, set ``cls`` + keyword argument to the desired type:: + + >>> symbols('f,g,h', cls=Function) + (f, g, h) + + >>> type(_[0]) + + + """ + result = [] + + if isinstance(names, str): + marker = 0 + splitters = r'\,', r'\:', r'\ ' + literals: list[tuple[str, str]] = [] + for splitter in splitters: + if splitter in names: + while chr(marker) in names: + marker += 1 + lit_char = chr(marker) + marker += 1 + names = names.replace(splitter, lit_char) + literals.append((lit_char, splitter[1:])) + def literal(s): + if literals: + for c, l in literals: + s = s.replace(c, l) + return s + + names = names.strip() + as_seq = names.endswith(',') + if as_seq: + names = names[:-1].rstrip() + if not names: + raise ValueError('no symbols given') + + # split on commas + names = [n.strip() for n in names.split(',')] + if not all(n for n in names): + raise ValueError('missing symbol between commas') + # split on spaces + for i in range(len(names) - 1, -1, -1): + names[i: i + 1] = names[i].split() + + seq = args.pop('seq', as_seq) + + for name in names: + if not name: + raise ValueError('missing symbol') + + if ':' not in name: + symbol = cls(literal(name), **args) + result.append(symbol) + continue + + split: list[str] = _range.split(name) + split_list: list[list[str]] = [] + # remove 1 layer of bounding parentheses around ranges + for i in range(len(split) - 1): + if i and ':' in split[i] and split[i] != ':' and \ + split[i - 1].endswith('(') and \ + split[i + 1].startswith(')'): + split[i - 1] = split[i - 1][:-1] + split[i + 1] = split[i + 1][1:] + for s in split: + if ':' in s: + if s.endswith(':'): + raise ValueError('missing end range') + a, b = s.split(':') + if b[-1] in string.digits: + a_i = 0 if not a else int(a) + b_i = int(b) + split_list.append([str(c) for c in range(a_i, b_i)]) + else: + a = a or 'a' + split_list.append([string.ascii_letters[c] for c in range( + string.ascii_letters.index(a), + string.ascii_letters.index(b) + 1)]) # inclusive + if not split_list[-1]: + break + else: + split_list.append([s]) + else: + seq = True + if len(split_list) == 1: + names = split_list[0] + else: + names = [''.join(s) for s in product(*split_list)] + if literals: + result.extend([cls(literal(s), **args) for s in names]) + else: + result.extend([cls(s, **args) for s in names]) + + if not seq and len(result) <= 1: + if not result: + return () + return result[0] + + return tuple(result) + else: + for name in names: + result.append(symbols(name, cls=cls, **args)) + + return type(names)(result) + + +def var(names, **args): + """ + Create symbols and inject them into the global namespace. + + Explanation + =========== + + This calls :func:`symbols` with the same arguments and puts the results + into the *global* namespace. It's recommended not to use :func:`var` in + library code, where :func:`symbols` has to be used:: + + Examples + ======== + + >>> from sympy import var + + >>> var('x') + x + >>> x # noqa: F821 + x + + >>> var('a,ab,abc') + (a, ab, abc) + >>> abc # noqa: F821 + abc + + >>> var('x,y', real=True) + (x, y) + >>> x.is_real and y.is_real # noqa: F821 + True + + See :func:`symbols` documentation for more details on what kinds of + arguments can be passed to :func:`var`. + + """ + def traverse(symbols, frame): + """Recursively inject symbols to the global namespace. """ + for symbol in symbols: + if isinstance(symbol, Basic): + frame.f_globals[symbol.name] = symbol + elif isinstance(symbol, FunctionClass): + frame.f_globals[symbol.__name__] = symbol + else: + traverse(symbol, frame) + + from inspect import currentframe + frame = currentframe().f_back + + try: + syms = symbols(names, **args) + + if syms is not None: + if isinstance(syms, Basic): + frame.f_globals[syms.name] = syms + elif isinstance(syms, FunctionClass): + frame.f_globals[syms.__name__] = syms + else: + traverse(syms, frame) + finally: + del frame # break cyclic dependencies as stated in inspect docs + + return syms + +def disambiguate(*iter): + """ + Return a Tuple containing the passed expressions with symbols + that appear the same when printed replaced with numerically + subscripted symbols, and all Dummy symbols replaced with Symbols. + + Parameters + ========== + + iter: list of symbols or expressions. + + Examples + ======== + + >>> from sympy.core.symbol import disambiguate + >>> from sympy import Dummy, Symbol, Tuple + >>> from sympy.abc import y + + >>> tup = Symbol('_x'), Dummy('x'), Dummy('x') + >>> disambiguate(*tup) + (x_2, x, x_1) + + >>> eqs = Tuple(Symbol('x')/y, Dummy('x')/y) + >>> disambiguate(*eqs) + (x_1/y, x/y) + + >>> ix = Symbol('x', integer=True) + >>> vx = Symbol('x') + >>> disambiguate(vx + ix) + (x + x_1,) + + To make your own mapping of symbols to use, pass only the free symbols + of the expressions and create a dictionary: + + >>> free = eqs.free_symbols + >>> mapping = dict(zip(free, disambiguate(*free))) + >>> eqs.xreplace(mapping) + (x_1/y, x/y) + + """ + new_iter = Tuple(*iter) + key = lambda x:tuple(sorted(x.assumptions0.items())) + syms = ordered(new_iter.free_symbols, keys=key) + mapping = {} + for s in syms: + mapping.setdefault(str(s).lstrip('_'), []).append(s) + reps = {} + for k in mapping: + # the first or only symbol doesn't get subscripted but make + # sure that it's a Symbol, not a Dummy + mapk0 = Symbol("%s" % (k), **mapping[k][0].assumptions0) + if mapping[k][0] != mapk0: + reps[mapping[k][0]] = mapk0 + # the others get subscripts (and are made into Symbols) + skip = 0 + for i in range(1, len(mapping[k])): + while True: + name = "%s_%i" % (k, i + skip) + if name not in mapping: + break + skip += 1 + ki = mapping[k][i] + reps[ki] = Symbol(name, **ki.assumptions0) + return new_iter.xreplace(reps) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8846a99510601c9675103e21ef5a0a1e839fdd11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/__init__.py @@ -0,0 +1,19 @@ +from .diffgeom import ( + BaseCovarDerivativeOp, BaseScalarField, BaseVectorField, Commutator, + contravariant_order, CoordSystem, CoordinateSymbol, + CovarDerivativeOp, covariant_order, Differential, intcurve_diffequ, + intcurve_series, LieDerivative, Manifold, metric_to_Christoffel_1st, + metric_to_Christoffel_2nd, metric_to_Ricci_components, + metric_to_Riemann_components, Patch, Point, TensorProduct, twoform_to_matrix, + vectors_in_basis, WedgeProduct, +) + +__all__ = [ + 'BaseCovarDerivativeOp', 'BaseScalarField', 'BaseVectorField', 'Commutator', + 'contravariant_order', 'CoordSystem', 'CoordinateSymbol', + 'CovarDerivativeOp', 'covariant_order', 'Differential', 'intcurve_diffequ', + 'intcurve_series', 'LieDerivative', 'Manifold', 'metric_to_Christoffel_1st', + 'metric_to_Christoffel_2nd', 'metric_to_Ricci_components', + 'metric_to_Riemann_components', 'Patch', 'Point', 'TensorProduct', + 'twoform_to_matrix', 'vectors_in_basis', 'WedgeProduct', +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7c3feec9f7add1109e56901bdc9973007384cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/diffgeom.py @@ -0,0 +1,2273 @@ +from __future__ import annotations +from typing import Any + +from functools import reduce +from itertools import permutations + +from sympy.combinatorics import Permutation +from sympy.core import ( + Basic, Expr, Function, diff, + Pow, Mul, Add, Lambda, S, Tuple, Dict +) +from sympy.core.cache import cacheit + +from sympy.core.symbol import Symbol, Dummy +from sympy.core.symbol import Str +from sympy.core.sympify import _sympify +from sympy.functions import factorial +from sympy.matrices import ImmutableDenseMatrix as Matrix +from sympy.solvers import solve + +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, + ignore_warnings) + + +# TODO you are a bit excessive in the use of Dummies +# TODO dummy point, literal field +# TODO too often one needs to call doit or simplify on the output, check the +# tests and find out why +from sympy.tensor.array import ImmutableDenseNDimArray + + +class Manifold(Basic): + """ + A mathematical manifold. + + Explanation + =========== + + A manifold is a topological space that locally resembles + Euclidean space near each point [1]. + This class does not provide any means to study the topological + characteristics of the manifold that it represents, though. + + Parameters + ========== + + name : str + The name of the manifold. + + dim : int + The dimension of the manifold. + + Examples + ======== + + >>> from sympy.diffgeom import Manifold + >>> m = Manifold('M', 2) + >>> m + M + >>> m.dim + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Manifold + """ + + def __new__(cls, name, dim, **kwargs): + if not isinstance(name, Str): + name = Str(name) + dim = _sympify(dim) + obj = super().__new__(cls, name, dim) + + obj.patches = _deprecated_list( + """ + Manifold.patches is deprecated. The Manifold object is now + immutable. Instead use a separate list to keep track of the + patches. + """, []) + return obj + + @property + def name(self): + return self.args[0] + + @property + def dim(self): + return self.args[1] + + +class Patch(Basic): + """ + A patch on a manifold. + + Explanation + =========== + + Coordinate patch, or patch in short, is a simply-connected open set around + a point in the manifold [1]. On a manifold one can have many patches that + do not always include the whole manifold. On these patches coordinate + charts can be defined that permit the parameterization of any point on the + patch in terms of a tuple of real numbers (the coordinates). + + This class does not provide any means to study the topological + characteristics of the patch that it represents. + + Parameters + ========== + + name : str + The name of the patch. + + manifold : Manifold + The manifold on which the patch is defined. + + Examples + ======== + + >>> from sympy.diffgeom import Manifold, Patch + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> p + P + >>> p.dim + 2 + + References + ========== + + .. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry + (2013) + + """ + def __new__(cls, name, manifold, **kwargs): + if not isinstance(name, Str): + name = Str(name) + obj = super().__new__(cls, name, manifold) + + obj.manifold.patches.append(obj) # deprecated + obj.coord_systems = _deprecated_list( + """ + Patch.coord_systms is deprecated. The Patch class is now + immutable. Instead use a separate list to keep track of coordinate + systems. + """, []) + return obj + + @property + def name(self): + return self.args[0] + + @property + def manifold(self): + return self.args[1] + + @property + def dim(self): + return self.manifold.dim + + +class CoordSystem(Basic): + """ + A coordinate system defined on the patch. + + Explanation + =========== + + Coordinate system is a system that uses one or more coordinates to uniquely + determine the position of the points or other geometric elements on a + manifold [1]. + + By passing ``Symbols`` to *symbols* parameter, user can define the name and + assumptions of coordinate symbols of the coordinate system. If not passed, + these symbols are generated automatically and are assumed to be real valued. + + By passing *relations* parameter, user can define the transform relations of + coordinate systems. Inverse transformation and indirect transformation can + be found automatically. If this parameter is not passed, coordinate + transformation cannot be done. + + Parameters + ========== + + name : str + The name of the coordinate system. + + patch : Patch + The patch where the coordinate system is defined. + + symbols : list of Symbols, optional + Defines the names and assumptions of coordinate symbols. + + relations : dict, optional + Key is a tuple of two strings, who are the names of the systems where + the coordinates transform from and transform to. + Value is a tuple of the symbols before transformation and a tuple of + the expressions after transformation. + + Examples + ======== + + We define two-dimensional Cartesian coordinate system and polar coordinate + system. + + >>> from sympy import symbols, pi, sqrt, atan2, cos, sin + >>> from sympy.diffgeom import Manifold, Patch, CoordSystem + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> x, y = symbols('x y', real=True) + >>> r, theta = symbols('r theta', nonnegative=True) + >>> relation_dict = { + ... ('Car2D', 'Pol'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))], + ... ('Pol', 'Car2D'): [(r, theta), (r*cos(theta), r*sin(theta))] + ... } + >>> Car2D = CoordSystem('Car2D', p, (x, y), relation_dict) + >>> Pol = CoordSystem('Pol', p, (r, theta), relation_dict) + + ``symbols`` property returns ``CoordinateSymbol`` instances. These symbols + are not same with the symbols used to construct the coordinate system. + + >>> Car2D + Car2D + >>> Car2D.dim + 2 + >>> Car2D.symbols + (x, y) + >>> _[0].func + + + ``transformation()`` method returns the transformation function from + one coordinate system to another. ``transform()`` method returns the + transformed coordinates. + + >>> Car2D.transformation(Pol) + Lambda((x, y), Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]])) + >>> Car2D.transform(Pol) + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> Car2D.transform(Pol, [1, 2]) + Matrix([ + [sqrt(5)], + [atan(2)]]) + + ``jacobian()`` method returns the Jacobian matrix of coordinate + transformation between two systems. ``jacobian_determinant()`` method + returns the Jacobian determinant of coordinate transformation between two + systems. + + >>> Pol.jacobian(Car2D) + Matrix([ + [cos(theta), -r*sin(theta)], + [sin(theta), r*cos(theta)]]) + >>> Pol.jacobian(Car2D, [1, pi/2]) + Matrix([ + [0, -1], + [1, 0]]) + >>> Car2D.jacobian_determinant(Pol) + 1/sqrt(x**2 + y**2) + >>> Car2D.jacobian_determinant(Pol, [1,0]) + 1 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Coordinate_system + + """ + def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): + if not isinstance(name, Str): + name = Str(name) + + # canonicallize the symbols + if symbols is None: + names = kwargs.get('names', None) + if names is None: + symbols = Tuple( + *[Symbol('%s_%s' % (name.name, i), real=True) + for i in range(patch.dim)] + ) + else: + sympy_deprecation_warning( + f""" +The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That +is, replace + + CoordSystem(..., names={names}) + +with + + CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}]) + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + symbols = Tuple( + *[Symbol(n, real=True) for n in names] + ) + else: + syms = [] + for s in symbols: + if isinstance(s, Symbol): + syms.append(Symbol(s.name, **s._assumptions.generator)) + elif isinstance(s, str): + sympy_deprecation_warning( + f""" + +Passing a string as the coordinate symbol name to CoordSystem is deprecated. +Pass a Symbol with the appropriate name and assumptions instead. + +That is, replace {s} with Symbol({s!r}, real=True). + """, + + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + syms.append(Symbol(s, real=True)) + symbols = Tuple(*syms) + + # canonicallize the relations + rel_temp = {} + for k,v in relations.items(): + s1, s2 = k + if not isinstance(s1, Str): + s1 = Str(s1) + if not isinstance(s2, Str): + s2 = Str(s2) + key = Tuple(s1, s2) + + # Old version used Lambda as a value. + if isinstance(v, Lambda): + v = (tuple(v.signature), tuple(v.expr)) + else: + v = (tuple(v[0]), tuple(v[1])) + rel_temp[key] = v + relations = Dict(rel_temp) + + # construct the object + obj = super().__new__(cls, name, patch, symbols, relations) + + # Add deprecated attributes + obj.transforms = _deprecated_dict( + """ + CoordSystem.transforms is deprecated. The CoordSystem class is now + immutable. Use the 'relations' keyword argument to the + CoordSystems() constructor to specify relations. + """, {}) + obj._names = [str(n) for n in symbols] + obj.patch.coord_systems.append(obj) # deprecated + obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated + obj._dummy = Dummy() + + return obj + + @property + def name(self): + return self.args[0] + + @property + def patch(self): + return self.args[1] + + @property + def manifold(self): + return self.patch.manifold + + @property + def symbols(self): + return tuple(CoordinateSymbol(self, i, **s._assumptions.generator) + for i,s in enumerate(self.args[2])) + + @property + def relations(self): + return self.args[3] + + @property + def dim(self): + return self.patch.dim + + ########################################################################## + # Finding transformation relation + ########################################################################## + + def transformation(self, sys): + """ + Return coordinate transformation function from *self* to *sys*. + + Parameters + ========== + + sys : CoordSystem + + Returns + ======= + + sympy.Lambda + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.transformation(R2_p) + Lambda((x, y), Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]])) + + """ + signature = self.args[2] + + key = Tuple(self.name, sys.name) + if self == sys: + expr = Matrix(self.symbols) + elif key in self.relations: + expr = Matrix(self.relations[key][1]) + elif key[::-1] in self.relations: + expr = Matrix(self._inverse_transformation(sys, self)) + else: + expr = Matrix(self._indirect_transformation(self, sys)) + return Lambda(signature, expr) + + @staticmethod + def _solve_inverse(sym1, sym2, exprs, sys1_name, sys2_name): + ret = solve( + [t[0] - t[1] for t in zip(sym2, exprs)], + list(sym1), dict=True) + + if len(ret) == 0: + temp = "Cannot solve inverse relation from {} to {}." + raise NotImplementedError(temp.format(sys1_name, sys2_name)) + elif len(ret) > 1: + temp = "Obtained multiple inverse relation from {} to {}." + raise ValueError(temp.format(sys1_name, sys2_name)) + + return ret[0] + + @classmethod + def _inverse_transformation(cls, sys1, sys2): + # Find the transformation relation from sys2 to sys1 + forward = sys1.transform(sys2) + inv_results = cls._solve_inverse(sys1.symbols, sys2.symbols, forward, + sys1.name, sys2.name) + signature = tuple(sys1.symbols) + return [inv_results[s] for s in signature] + + @classmethod + @cacheit + def _indirect_transformation(cls, sys1, sys2): + # Find the transformation relation between two indirectly connected + # coordinate systems + rel = sys1.relations + path = cls._dijkstra(sys1, sys2) + + transforms = [] + for s1, s2 in zip(path, path[1:]): + if (s1, s2) in rel: + transforms.append(rel[(s1, s2)]) + else: + sym2, inv_exprs = rel[(s2, s1)] + sym1 = tuple(Dummy() for i in sym2) + ret = cls._solve_inverse(sym2, sym1, inv_exprs, s2, s1) + ret = tuple(ret[s] for s in sym2) + transforms.append((sym1, ret)) + syms = sys1.args[2] + exprs = syms + for newsyms, newexprs in transforms: + exprs = tuple(e.subs(zip(newsyms, exprs)) for e in newexprs) + return exprs + + @staticmethod + def _dijkstra(sys1, sys2): + # Use Dijkstra algorithm to find the shortest path between two indirectly-connected + # coordinate systems + # return value is the list of the names of the systems. + relations = sys1.relations + graph = {} + for s1, s2 in relations.keys(): + if s1 not in graph: + graph[s1] = {s2} + else: + graph[s1].add(s2) + if s2 not in graph: + graph[s2] = {s1} + else: + graph[s2].add(s1) + + path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited + + def visit(sys): + path_dict[sys][2] = 1 + for newsys in graph[sys]: + distance = path_dict[sys][0] + 1 + if path_dict[newsys][0] >= distance or not path_dict[newsys][1]: + path_dict[newsys][0] = distance + path_dict[newsys][1] = list(path_dict[sys][1]) + path_dict[newsys][1].append(sys) + + visit(sys1.name) + + while True: + min_distance = max(path_dict.values(), key=lambda x:x[0])[0] + newsys = None + for sys, lst in path_dict.items(): + if 0 < lst[0] <= min_distance and not lst[2]: + min_distance = lst[0] + newsys = sys + if newsys is None: + break + visit(newsys) + + result = path_dict[sys2.name][1] + result.append(sys2.name) + + if result == [sys2.name]: + raise KeyError("Two coordinate systems are not connected.") + return result + + def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False): + sympy_deprecation_warning( + """ + The CoordSystem.connect_to() method is deprecated. Instead, + generate a new instance of CoordSystem with the 'relations' + keyword argument (CoordSystem classes are now immutable). + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + + from_coords, to_exprs = dummyfy(from_coords, to_exprs) + self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs) + + if inverse: + to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs) + + if fill_in_gaps: + self._fill_gaps_in_transformations() + + @staticmethod + def _inv_transf(from_coords, to_exprs): + # Will be removed when connect_to is removed + inv_from = [i.as_dummy() for i in from_coords] + inv_to = solve( + [t[0] - t[1] for t in zip(inv_from, to_exprs)], + list(from_coords), dict=True)[0] + inv_to = [inv_to[fc] for fc in from_coords] + return Matrix(inv_from), Matrix(inv_to) + + @staticmethod + def _fill_gaps_in_transformations(): + # Will be removed when connect_to is removed + raise NotImplementedError + + ########################################################################## + # Coordinate transformations + ########################################################################## + + def transform(self, sys, coordinates=None): + """ + Return the result of coordinate transformation from *self* to *sys*. + If coordinates are not given, coordinate symbols of *self* are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.ImmutableDenseMatrix containing CoordinateSymbol + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.transform(R2_p) + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> R2_r.transform(R2_p, [0, 1]) + Matrix([ + [ 1], + [pi/2]]) + + """ + if coordinates is None: + coordinates = self.symbols + if self != sys: + transf = self.transformation(sys) + coordinates = transf(*coordinates) + else: + coordinates = Matrix(coordinates) + return coordinates + + def coord_tuple_transform_to(self, to_sys, coords): + """Transform ``coords`` to coord system ``to_sys``.""" + sympy_deprecation_warning( + """ + The CoordSystem.coord_tuple_transform_to() method is deprecated. + Use the CoordSystem.transform() method instead. + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + ) + + coords = Matrix(coords) + if self != to_sys: + with ignore_warnings(SymPyDeprecationWarning): + transf = self.transforms[to_sys] + coords = transf[1].subs(list(zip(transf[0], coords))) + return coords + + def jacobian(self, sys, coordinates=None): + """ + Return the jacobian matrix of a transformation on given coordinates. + If coordinates are not given, coordinate symbols of *self* are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.ImmutableDenseMatrix + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_p.jacobian(R2_r) + Matrix([ + [cos(theta), -rho*sin(theta)], + [sin(theta), rho*cos(theta)]]) + >>> R2_p.jacobian(R2_r, [1, 0]) + Matrix([ + [1, 0], + [0, 1]]) + + """ + result = self.transform(sys).jacobian(self.symbols) + if coordinates is not None: + result = result.subs(list(zip(self.symbols, coordinates))) + return result + jacobian_matrix = jacobian + + def jacobian_determinant(self, sys, coordinates=None): + """ + Return the jacobian determinant of a transformation on given + coordinates. If coordinates are not given, coordinate symbols of *self* + are used. + + Parameters + ========== + + sys : CoordSystem + + coordinates : Any iterable, optional. + + Returns + ======= + + sympy.Expr + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> R2_r.jacobian_determinant(R2_p) + 1/sqrt(x**2 + y**2) + >>> R2_r.jacobian_determinant(R2_p, [1, 0]) + 1 + + """ + return self.jacobian(sys, coordinates).det() + + + ########################################################################## + # Points + ########################################################################## + + def point(self, coords): + """Create a ``Point`` with coordinates given in this coord system.""" + return Point(self, coords) + + def point_to_coords(self, point): + """Calculate the coordinates of a point in this coord system.""" + return point.coords(self) + + ########################################################################## + # Base fields. + ########################################################################## + + def base_scalar(self, coord_index): + """Return ``BaseScalarField`` that takes a point and returns one of the coordinates.""" + return BaseScalarField(self, coord_index) + coord_function = base_scalar + + def base_scalars(self): + """Returns a list of all coordinate functions. + For more details see the ``base_scalar`` method of this class.""" + return [self.base_scalar(i) for i in range(self.dim)] + coord_functions = base_scalars + + def base_vector(self, coord_index): + """Return a basis vector field. + The basis vector field for this coordinate system. It is also an + operator on scalar fields.""" + return BaseVectorField(self, coord_index) + + def base_vectors(self): + """Returns a list of all base vectors. + For more details see the ``base_vector`` method of this class.""" + return [self.base_vector(i) for i in range(self.dim)] + + def base_oneform(self, coord_index): + """Return a basis 1-form field. + The basis one-form field for this coordinate system. It is also an + operator on vector fields.""" + return Differential(self.coord_function(coord_index)) + + def base_oneforms(self): + """Returns a list of all base oneforms. + For more details see the ``base_oneform`` method of this class.""" + return [self.base_oneform(i) for i in range(self.dim)] + + +class CoordinateSymbol(Symbol): + """A symbol which denotes an abstract value of i-th coordinate of + the coordinate system with given context. + + Explanation + =========== + + Each coordinates in coordinate system are represented by unique symbol, + such as x, y, z in Cartesian coordinate system. + + You may not construct this class directly. Instead, use `symbols` method + of CoordSystem. + + Parameters + ========== + + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import symbols, Lambda, Matrix, sqrt, atan2, cos, sin + >>> from sympy.diffgeom import Manifold, Patch, CoordSystem + >>> m = Manifold('M', 2) + >>> p = Patch('P', m) + >>> x, y = symbols('x y', real=True) + >>> r, theta = symbols('r theta', nonnegative=True) + >>> relation_dict = { + ... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])), + ... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)])) + ... } + >>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict) + >>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict) + >>> x, y = Car2D.symbols + + ``CoordinateSymbol`` contains its coordinate symbol and index. + + >>> x.name + 'x' + >>> x.coord_sys == Car2D + True + >>> x.index + 0 + >>> x.is_real + True + + You can transform ``CoordinateSymbol`` into other coordinate system using + ``rewrite()`` method. + + >>> x.rewrite(Pol) + r*cos(theta) + >>> sqrt(x**2 + y**2).rewrite(Pol).simplify() + r + + """ + def __new__(cls, coord_sys, index, **assumptions): + name = coord_sys.args[2][index].name + obj = super().__new__(cls, name, **assumptions) + obj.coord_sys = coord_sys + obj.index = index + return obj + + def __getnewargs__(self): + return (self.coord_sys, self.index) + + def _hashable_content(self): + return ( + self.coord_sys, self.index + ) + tuple(sorted(self.assumptions0.items())) + + def _eval_rewrite(self, rule, args, **hints): + if isinstance(rule, CoordSystem): + return rule.transform(self.coord_sys)[self.index] + return super()._eval_rewrite(rule, args, **hints) + + +class Point(Basic): + """Point defined in a coordinate system. + + Explanation + =========== + + Mathematically, point is defined in the manifold and does not have any coordinates + by itself. Coordinate system is what imbues the coordinates to the point by coordinate + chart. However, due to the difficulty of realizing such logic, you must supply + a coordinate system and coordinates to define a Point here. + + The usage of this object after its definition is independent of the + coordinate system that was used in order to define it, however due to + limitations in the simplification routines you can arrive at complicated + expressions if you use inappropriate coordinate systems. + + Parameters + ========== + + coord_sys : CoordSystem + + coords : list + The coordinates of the point. + + Examples + ======== + + >>> from sympy import pi + >>> from sympy.diffgeom import Point + >>> from sympy.diffgeom.rn import R2, R2_r, R2_p + >>> rho, theta = R2_p.symbols + + >>> p = Point(R2_p, [rho, 3*pi/4]) + + >>> p.manifold == R2 + True + + >>> p.coords() + Matrix([ + [ rho], + [3*pi/4]]) + >>> p.coords(R2_r) + Matrix([ + [-sqrt(2)*rho/2], + [ sqrt(2)*rho/2]]) + + """ + + def __new__(cls, coord_sys, coords, **kwargs): + coords = Matrix(coords) + obj = super().__new__(cls, coord_sys, coords) + obj._coord_sys = coord_sys + obj._coords = coords + return obj + + @property + def patch(self): + return self._coord_sys.patch + + @property + def manifold(self): + return self._coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def coords(self, sys=None): + """ + Coordinates of the point in given coordinate system. If coordinate system + is not passed, it returns the coordinates in the coordinate system in which + the poin was defined. + """ + if sys is None: + return self._coords + else: + return self._coord_sys.transform(sys, self._coords) + + @property + def free_symbols(self): + return self._coords.free_symbols + + +class BaseScalarField(Expr): + """Base scalar field over a manifold for a given coordinate system. + + Explanation + =========== + + A scalar field takes a point as an argument and returns a scalar. + A base scalar field of a coordinate system takes a point and returns one of + the coordinates of that point in the coordinate system in question. + + To define a scalar field you need to choose the coordinate system and the + index of the coordinate. + + The use of the scalar field after its definition is independent of the + coordinate system in which it was defined, however due to limitations in + the simplification routines you may arrive at more complicated + expression if you use unappropriate coordinate systems. + You can build complicated scalar fields by just building up SymPy + expressions containing ``BaseScalarField`` instances. + + Parameters + ========== + + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import Function, pi + >>> from sympy.diffgeom import BaseScalarField + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> rho, _ = R2_p.symbols + >>> point = R2_p.point([rho, 0]) + >>> fx, fy = R2_r.base_scalars() + >>> ftheta = BaseScalarField(R2_r, 1) + + >>> fx(point) + rho + >>> fy(point) + 0 + + >>> (fx**2+fy**2).rcall(point) + rho**2 + + >>> g = Function('g') + >>> fg = g(ftheta-pi) + >>> fg.rcall(point) + g(-pi) + + """ + + is_commutative = True + + def __new__(cls, coord_sys, index, **kwargs): + index = _sympify(index) + obj = super().__new__(cls, coord_sys, index) + obj._coord_sys = coord_sys + obj._index = index + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def patch(self): + return self.coord_sys.patch + + @property + def manifold(self): + return self.coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def __call__(self, *args): + """Evaluating the field at a point or doing nothing. + If the argument is a ``Point`` instance, the field is evaluated at that + point. The field is returned itself if the argument is any other + object. It is so in order to have working recursive calling mechanics + for all fields (check the ``__call__`` method of ``Expr``). + """ + point = args[0] + if len(args) != 1 or not isinstance(point, Point): + return self + coords = point.coords(self._coord_sys) + # XXX Calling doit is necessary with all the Subs expressions + # XXX Calling simplify is necessary with all the trig expressions + return simplify(coords[self._index]).doit() + + # XXX Workaround for limitations on the content of args + free_symbols: set[Any] = set() + + +class BaseVectorField(Expr): + r"""Base vector field over a manifold for a given coordinate system. + + Explanation + =========== + + A vector field is an operator taking a scalar field and returning a + directional derivative (which is also a scalar field). + A base vector field is the same type of operator, however the derivation is + specifically done with respect to a chosen coordinate. + + To define a base vector field you need to choose the coordinate system and + the index of the coordinate. + + The use of the vector field after its definition is independent of the + coordinate system in which it was defined, however due to limitations in the + simplification routines you may arrive at more complicated expression if you + use unappropriate coordinate systems. + + Parameters + ========== + coord_sys : CoordSystem + + index : integer + + Examples + ======== + + >>> from sympy import Function + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import BaseVectorField + >>> from sympy import pprint + + >>> x, y = R2_r.symbols + >>> rho, theta = R2_p.symbols + >>> fx, fy = R2_r.base_scalars() + >>> point_p = R2_p.point([rho, theta]) + >>> point_r = R2_r.point([x, y]) + + >>> g = Function('g') + >>> s_field = g(fx, fy) + + >>> v = BaseVectorField(R2_r, 1) + >>> pprint(v(s_field)) + / d \| + |---(g(x, xi))|| + \dxi /|xi=y + >>> pprint(v(s_field).rcall(point_r).doit()) + d + --(g(x, y)) + dy + >>> pprint(v(s_field).rcall(point_p)) + / d \| + |---(g(rho*cos(theta), xi))|| + \dxi /|xi=rho*sin(theta) + + """ + + is_commutative = False + + def __new__(cls, coord_sys, index, **kwargs): + index = _sympify(index) + obj = super().__new__(cls, coord_sys, index) + obj._coord_sys = coord_sys + obj._index = index + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def patch(self): + return self.coord_sys.patch + + @property + def manifold(self): + return self.coord_sys.manifold + + @property + def dim(self): + return self.manifold.dim + + def __call__(self, scalar_field): + """Apply on a scalar field. + The action of a vector field on a scalar field is a directional + differentiation. + If the argument is not a scalar field an error is raised. + """ + if covariant_order(scalar_field) or contravariant_order(scalar_field): + raise ValueError('Only scalar fields can be supplied as arguments to vector fields.') + + if scalar_field is None: + return self + + base_scalars = list(scalar_field.atoms(BaseScalarField)) + + # First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r) + d_var = self._coord_sys._dummy + # TODO: you need a real dummy function for the next line + d_funcs = [Function('_#_%s' % i)(d_var) for i, + b in enumerate(base_scalars)] + d_result = scalar_field.subs(list(zip(base_scalars, d_funcs))) + d_result = d_result.diff(d_var) + + # Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y)) + coords = self._coord_sys.symbols + d_funcs_deriv = [f.diff(d_var) for f in d_funcs] + d_funcs_deriv_sub = [] + for b in base_scalars: + jac = self._coord_sys.jacobian(b._coord_sys, coords) + d_funcs_deriv_sub.append(jac[b._index, self._index]) + d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub))) + + # Remove the dummies + result = d_result.subs(list(zip(d_funcs, base_scalars))) + result = result.subs(list(zip(coords, self._coord_sys.coord_functions()))) + return result.doit() + + +def _find_coords(expr): + # Finds CoordinateSystems existing in expr + fields = expr.atoms(BaseScalarField, BaseVectorField) + result = set() + for f in fields: + result.add(f._coord_sys) + return result + + +class Commutator(Expr): + r"""Commutator of two vector fields. + + Explanation + =========== + + The commutator of two vector fields `v_1` and `v_2` is defined as the + vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal + to `v_1(v_2(f)) - v_2(v_1(f))`. + + Examples + ======== + + + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import Commutator + >>> from sympy import simplify + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> e_r = R2_p.base_vector(0) + + >>> c_xy = Commutator(e_x, e_y) + >>> c_xr = Commutator(e_x, e_r) + >>> c_xy + 0 + + Unfortunately, the current code is not able to compute everything: + + >>> c_xr + Commutator(e_x, e_rho) + >>> simplify(c_xr(fy**2)) + -2*cos(theta)*y**2/(x**2 + y**2) + + """ + def __new__(cls, v1, v2): + if (covariant_order(v1) or contravariant_order(v1) != 1 + or covariant_order(v2) or contravariant_order(v2) != 1): + raise ValueError( + 'Only commutators of vector fields are supported.') + if v1 == v2: + return S.Zero + coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)]) + if len(coord_sys) == 1: + # Only one coordinate systems is used, hence it is easy enough to + # actually evaluate the commutator. + if all(isinstance(v, BaseVectorField) for v in (v1, v2)): + return S.Zero + bases_1, bases_2 = [list(v.atoms(BaseVectorField)) + for v in (v1, v2)] + coeffs_1 = [v1.expand().coeff(b) for b in bases_1] + coeffs_2 = [v2.expand().coeff(b) for b in bases_2] + res = 0 + for c1, b1 in zip(coeffs_1, bases_1): + for c2, b2 in zip(coeffs_2, bases_2): + res += c1*b1(c2)*b2 - c2*b2(c1)*b1 + return res + else: + obj = super().__new__(cls, v1, v2) + obj._v1 = v1 # deprecated assignment + obj._v2 = v2 # deprecated assignment + return obj + + @property + def v1(self): + return self.args[0] + + @property + def v2(self): + return self.args[1] + + def __call__(self, scalar_field): + """Apply on a scalar field. + If the argument is not a scalar field an error is raised. + """ + return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field)) + + +class Differential(Expr): + r"""Return the differential (exterior derivative) of a form field. + + Explanation + =========== + + The differential of a form (i.e. the exterior derivative) has a complicated + definition in the general case. + The differential `df` of the 0-form `f` is defined for any vector field `v` + as `df(v) = v(f)`. + + Examples + ======== + + >>> from sympy import Function + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import Differential + >>> from sympy import pprint + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> g = Function('g') + >>> s_field = g(fx, fy) + >>> dg = Differential(s_field) + + >>> dg + d(g(x, y)) + >>> pprint(dg(e_x)) + / d \| + |---(g(xi, y))|| + \dxi /|xi=x + >>> pprint(dg(e_y)) + / d \| + |---(g(x, xi))|| + \dxi /|xi=y + + Applying the exterior derivative operator twice always results in: + + >>> Differential(dg) + 0 + """ + + is_commutative = False + + def __new__(cls, form_field): + if contravariant_order(form_field): + raise ValueError( + 'A vector field was supplied as an argument to Differential.') + if isinstance(form_field, Differential): + return S.Zero + else: + obj = super().__new__(cls, form_field) + obj._form_field = form_field # deprecated assignment + return obj + + @property + def form_field(self): + return self.args[0] + + def __call__(self, *vector_fields): + """Apply on a list of vector_fields. + + Explanation + =========== + + If the number of vector fields supplied is not equal to 1 + the order of + the form field inside the differential the result is undefined. + + For 1-forms (i.e. differentials of scalar fields) the evaluation is + done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector + field, the differential is returned unchanged. This is done in order to + permit partial contractions for higher forms. + + In the general case the evaluation is done by applying the form field + inside the differential on a list with one less elements than the number + of elements in the original list. Lowering the number of vector fields + is achieved through replacing each pair of fields by their + commutator. + + If the arguments are not vectors or ``None``s an error is raised. + """ + if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None + for a in vector_fields): + raise ValueError('The arguments supplied to Differential should be vector fields or Nones.') + k = len(vector_fields) + if k == 1: + if vector_fields[0]: + return vector_fields[0].rcall(self._form_field) + return self + else: + # For higher form it is more complicated: + # Invariant formula: + # https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula + # df(v1, ... vn) = +/- vi(f(v1..no i..vn)) + # +/- f([vi,vj],v1..no i, no j..vn) + f = self._form_field + v = vector_fields + ret = 0 + for i in range(k): + t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:])) + ret += (-1)**i*t + for j in range(i + 1, k): + c = Commutator(v[i], v[j]) + if c: # TODO this is ugly - the Commutator can be Zero and + # this causes the next line to fail + t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:]) + ret += (-1)**(i + j)*t + return ret + + +class TensorProduct(Expr): + """Tensor product of forms. + + Explanation + =========== + + The tensor product permits the creation of multilinear functionals (i.e. + higher order tensors) out of lower order fields (e.g. 1-forms and vector + fields). However, the higher tensors thus created lack the interesting + features provided by the other type of product, the wedge product, namely + they are not antisymmetric and hence are not form fields. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import TensorProduct + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> TensorProduct(dx, dy)(e_x, e_y) + 1 + >>> TensorProduct(dx, dy)(e_y, e_x) + 0 + >>> TensorProduct(dx, fx*dy)(fx*e_x, e_y) + x**2 + >>> TensorProduct(e_x, e_y)(fx**2, fy**2) + 4*x*y + >>> TensorProduct(e_y, dx)(fy) + dx + + You can nest tensor products. + + >>> tp1 = TensorProduct(dx, dy) + >>> TensorProduct(tp1, dx)(e_x, e_y, e_x) + 1 + + You can make partial contraction for instance when 'raising an index'. + Putting ``None`` in the second argument of ``rcall`` means that the + respective position in the tensor product is left as it is. + + >>> TP = TensorProduct + >>> metric = TP(dx, dx) + 3*TP(dy, dy) + >>> metric.rcall(e_y, None) + 3*dy + + Or automatically pad the args with ``None`` without specifying them. + + >>> metric.rcall(e_y) + 3*dy + + """ + def __new__(cls, *args): + scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0]) + multifields = [m for m in args if covariant_order(m) + contravariant_order(m)] + if multifields: + if len(multifields) == 1: + return scalar*multifields[0] + return scalar*super().__new__(cls, *multifields) + else: + return scalar + + def __call__(self, *fields): + """Apply on a list of fields. + + If the number of input fields supplied is not equal to the order of + the tensor product field, the list of arguments is padded with ``None``'s. + + The list of arguments is divided in sublists depending on the order of + the forms inside the tensor product. The sublists are provided as + arguments to these forms and the resulting expressions are given to the + constructor of ``TensorProduct``. + + """ + tot_order = covariant_order(self) + contravariant_order(self) + tot_args = len(fields) + if tot_args != tot_order: + fields = list(fields) + [None]*(tot_order - tot_args) + orders = [covariant_order(f) + contravariant_order(f) for f in self._args] + indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)] + fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])] + multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)] + return TensorProduct(*multipliers) + + +class WedgeProduct(TensorProduct): + """Wedge product of forms. + + Explanation + =========== + + In the context of integration only completely antisymmetric forms make + sense. The wedge product permits the creation of such forms. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import WedgeProduct + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> WedgeProduct(dx, dy)(e_x, e_y) + 1 + >>> WedgeProduct(dx, dy)(e_y, e_x) + -1 + >>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y) + x**2 + >>> WedgeProduct(e_x, e_y)(fy, None) + -e_x + + You can nest wedge products. + + >>> wp1 = WedgeProduct(dx, dy) + >>> WedgeProduct(wp1, dx)(e_x, e_y, e_x) + 0 + + """ + # TODO the calculation of signatures is slow + # TODO you do not need all these permutations (neither the prefactor) + def __call__(self, *fields): + """Apply on a list of vector_fields. + The expression is rewritten internally in terms of tensor products and evaluated.""" + orders = (covariant_order(e) + contravariant_order(e) for e in self.args) + mul = 1/Mul(*(factorial(o) for o in orders)) + perms = permutations(fields) + perms_par = (Permutation( + p).signature() for p in permutations(range(len(fields)))) + tensor_prod = TensorProduct(*self.args) + return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)]) + + +class LieDerivative(Expr): + """Lie derivative with respect to a vector field. + + Explanation + =========== + + The transport operator that defines the Lie derivative is the pushforward of + the field to be derived along the integral curve of the field with respect + to which one derives. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r, R2_p + >>> from sympy.diffgeom import (LieDerivative, TensorProduct) + + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> e_rho, e_theta = R2_p.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> LieDerivative(e_x, fy) + 0 + >>> LieDerivative(e_x, fx) + 1 + >>> LieDerivative(e_x, e_x) + 0 + + The Lie derivative of a tensor field by another tensor field is equal to + their commutator: + + >>> LieDerivative(e_x, e_rho) + Commutator(e_x, e_rho) + >>> LieDerivative(e_x + e_y, fx) + 1 + + >>> tp = TensorProduct(dx, dy) + >>> LieDerivative(e_x, tp) + LieDerivative(e_x, TensorProduct(dx, dy)) + >>> LieDerivative(e_x, tp) + LieDerivative(e_x, TensorProduct(dx, dy)) + + """ + def __new__(cls, v_field, expr): + expr_form_ord = covariant_order(expr) + if contravariant_order(v_field) != 1 or covariant_order(v_field): + raise ValueError('Lie derivatives are defined only with respect to' + ' vector fields. The supplied argument was not a ' + 'vector field.') + if expr_form_ord > 0: + obj = super().__new__(cls, v_field, expr) + # deprecated assignments + obj._v_field = v_field + obj._expr = expr + return obj + if expr.atoms(BaseVectorField): + return Commutator(v_field, expr) + else: + return v_field.rcall(expr) + + @property + def v_field(self): + return self.args[0] + + @property + def expr(self): + return self.args[1] + + def __call__(self, *args): + v = self.v_field + expr = self.expr + lead_term = v(expr(*args)) + rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:]) + for i in range(len(args))]) + return lead_term - rest + + +class BaseCovarDerivativeOp(Expr): + """Covariant derivative operator with respect to a base vector. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import BaseCovarDerivativeOp + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + + >>> TP = TensorProduct + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + + >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) + >>> ch + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch) + >>> cvd(fx) + 1 + >>> cvd(fx*e_x) + e_x + """ + + def __new__(cls, coord_sys, index, christoffel): + index = _sympify(index) + christoffel = ImmutableDenseNDimArray(christoffel) + obj = super().__new__(cls, coord_sys, index, christoffel) + # deprecated assignments + obj._coord_sys = coord_sys + obj._index = index + obj._christoffel = christoffel + return obj + + @property + def coord_sys(self): + return self.args[0] + + @property + def index(self): + return self.args[1] + + @property + def christoffel(self): + return self.args[2] + + def __call__(self, field): + """Apply on a scalar field. + + The action of a vector field on a scalar field is a directional + differentiation. + If the argument is not a scalar field the behaviour is undefined. + """ + if covariant_order(field) != 0: + raise NotImplementedError() + + field = vectors_in_basis(field, self._coord_sys) + + wrt_vector = self._coord_sys.base_vector(self._index) + wrt_scalar = self._coord_sys.coord_function(self._index) + vectors = list(field.atoms(BaseVectorField)) + + # First step: replace all vectors with something susceptible to + # derivation and do the derivation + # TODO: you need a real dummy function for the next line + d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i, + b in enumerate(vectors)] + d_result = field.subs(list(zip(vectors, d_funcs))) + d_result = wrt_vector(d_result) + + # Second step: backsubstitute the vectors in + d_result = d_result.subs(list(zip(d_funcs, vectors))) + + # Third step: evaluate the derivatives of the vectors + derivs = [] + for v in vectors: + d = Add(*[(self._christoffel[k, wrt_vector._index, v._index] + *v._coord_sys.base_vector(k)) + for k in range(v._coord_sys.dim)]) + derivs.append(d) + to_subs = [wrt_vector(d) for d in d_funcs] + # XXX: This substitution can fail when there are Dummy symbols and the + # cache is disabled: https://github.com/sympy/sympy/issues/17794 + result = d_result.subs(list(zip(to_subs, derivs))) + + # Remove the dummies + result = result.subs(list(zip(d_funcs, vectors))) + return result.doit() + + +class CovarDerivativeOp(Expr): + """Covariant derivative operator. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2_r + >>> from sympy.diffgeom import CovarDerivativeOp + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + >>> TP = TensorProduct + >>> fx, fy = R2_r.base_scalars() + >>> e_x, e_y = R2_r.base_vectors() + >>> dx, dy = R2_r.base_oneforms() + >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) + + >>> ch + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> cvd = CovarDerivativeOp(fx*e_x, ch) + >>> cvd(fx) + x + >>> cvd(fx*e_x) + x*e_x + + """ + + def __new__(cls, wrt, christoffel): + if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1: + raise NotImplementedError() + if contravariant_order(wrt) != 1 or covariant_order(wrt): + raise ValueError('Covariant derivatives are defined only with ' + 'respect to vector fields. The supplied argument ' + 'was not a vector field.') + christoffel = ImmutableDenseNDimArray(christoffel) + obj = super().__new__(cls, wrt, christoffel) + # deprecated assignments + obj._wrt = wrt + obj._christoffel = christoffel + return obj + + @property + def wrt(self): + return self.args[0] + + @property + def christoffel(self): + return self.args[1] + + def __call__(self, field): + vectors = list(self._wrt.atoms(BaseVectorField)) + base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel) + for v in vectors] + return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field) + + +############################################################################### +# Integral curves on vector fields +############################################################################### +def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False): + r"""Return the series expansion for an integral curve of the field. + + Explanation + =========== + + Integral curve is a function `\gamma` taking a parameter in `R` to a point + in the manifold. It verifies the equation: + + `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` + + where the given ``vector_field`` is denoted as `V`. This holds for any + value `t` for the parameter and any scalar field `f`. + + This equation can also be decomposed of a basis of coordinate functions + `V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i` + + This function returns a series expansion of `\gamma(t)` in terms of the + coordinate system ``coord_sys``. The equations and expansions are necessarily + done in coordinate-system-dependent way as there is no other way to + represent movement between points on the manifold (i.e. there is no such + thing as a difference of points for a general manifold). + + Parameters + ========== + vector_field + the vector field for which an integral curve will be given + + param + the argument of the function `\gamma` from R to the curve + + start_point + the point which corresponds to `\gamma(0)` + + n + the order to which to expand + + coord_sys + the coordinate system in which to expand + coeffs (default False) - if True return a list of elements of the expansion + + Examples + ======== + + Use the predefined R2 manifold: + + >>> from sympy.abc import t, x, y + >>> from sympy.diffgeom.rn import R2_p, R2_r + >>> from sympy.diffgeom import intcurve_series + + Specify a starting point and a vector field: + + >>> start_point = R2_r.point([x, y]) + >>> vector_field = R2_r.e_x + + Calculate the series: + + >>> intcurve_series(vector_field, t, start_point, n=3) + Matrix([ + [t + x], + [ y]]) + + Or get the elements of the expansion in a list: + + >>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True) + >>> series[0] + Matrix([ + [x], + [y]]) + >>> series[1] + Matrix([ + [t], + [0]]) + >>> series[2] + Matrix([ + [0], + [0]]) + + The series in the polar coordinate system: + + >>> series = intcurve_series(vector_field, t, start_point, + ... n=3, coord_sys=R2_p, coeffs=True) + >>> series[0] + Matrix([ + [sqrt(x**2 + y**2)], + [ atan2(y, x)]]) + >>> series[1] + Matrix([ + [t*x/sqrt(x**2 + y**2)], + [ -t*y/(x**2 + y**2)]]) + >>> series[2] + Matrix([ + [t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2], + [ t**2*x*y/(x**2 + y**2)**2]]) + + See Also + ======== + + intcurve_diffequ + + """ + if contravariant_order(vector_field) != 1 or covariant_order(vector_field): + raise ValueError('The supplied field was not a vector field.') + + def iter_vfield(scalar_field, i): + """Return ``vector_field`` called `i` times on ``scalar_field``.""" + return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field) + + def taylor_terms_per_coord(coord_function): + """Return the series for one of the coordinates.""" + return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i) + for i in range(n)] + coord_sys = coord_sys if coord_sys else start_point._coord_sys + coord_functions = coord_sys.coord_functions() + taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions] + if coeffs: + return [Matrix(t) for t in zip(*taylor_terms)] + else: + return Matrix([sum(c) for c in taylor_terms]) + + +def intcurve_diffequ(vector_field, param, start_point, coord_sys=None): + r"""Return the differential equation for an integral curve of the field. + + Explanation + =========== + + Integral curve is a function `\gamma` taking a parameter in `R` to a point + in the manifold. It verifies the equation: + + `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` + + where the given ``vector_field`` is denoted as `V`. This holds for any + value `t` for the parameter and any scalar field `f`. + + This function returns the differential equation of `\gamma(t)` in terms of the + coordinate system ``coord_sys``. The equations and expansions are necessarily + done in coordinate-system-dependent way as there is no other way to + represent movement between points on the manifold (i.e. there is no such + thing as a difference of points for a general manifold). + + Parameters + ========== + + vector_field + the vector field for which an integral curve will be given + + param + the argument of the function `\gamma` from R to the curve + + start_point + the point which corresponds to `\gamma(0)` + + coord_sys + the coordinate system in which to give the equations + + Returns + ======= + + a tuple of (equations, initial conditions) + + Examples + ======== + + Use the predefined R2 manifold: + + >>> from sympy.abc import t + >>> from sympy.diffgeom.rn import R2, R2_p, R2_r + >>> from sympy.diffgeom import intcurve_diffequ + + Specify a starting point and a vector field: + + >>> start_point = R2_r.point([0, 1]) + >>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y + + Get the equation: + + >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point) + >>> equations + [f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)] + >>> init_cond + [f_0(0), f_1(0) - 1] + + The series in the polar coordinate system: + + >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) + >>> equations + [Derivative(f_0(t), t), Derivative(f_1(t), t) - 1] + >>> init_cond + [f_0(0) - 1, f_1(0) - pi/2] + + See Also + ======== + + intcurve_series + + """ + if contravariant_order(vector_field) != 1 or covariant_order(vector_field): + raise ValueError('The supplied field was not a vector field.') + coord_sys = coord_sys if coord_sys else start_point._coord_sys + gammas = [Function('f_%d' % i)(param) for i in range( + start_point._coord_sys.dim)] + arbitrary_p = Point(coord_sys, gammas) + coord_functions = coord_sys.coord_functions() + equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p)) + for cf in coord_functions] + init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point)) + for cf in coord_functions] + return equations, init_cond + + +############################################################################### +# Helpers +############################################################################### +def dummyfy(args, exprs): + # TODO Is this a good idea? + d_args = Matrix([s.as_dummy() for s in args]) + reps = dict(zip(args, d_args)) + d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs]) + return d_args, d_exprs + +############################################################################### +# Helpers +############################################################################### +def contravariant_order(expr, _strict=False): + """Return the contravariant order of an expression. + + Examples + ======== + + >>> from sympy.diffgeom import contravariant_order + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.abc import a + + >>> contravariant_order(a) + 0 + >>> contravariant_order(a*R2.x + 2) + 0 + >>> contravariant_order(a*R2.x*R2.e_y + R2.e_x) + 1 + + """ + # TODO move some of this to class methods. + # TODO rewrite using the .as_blah_blah methods + if isinstance(expr, Add): + orders = [contravariant_order(e) for e in expr.args] + if len(set(orders)) != 1: + raise ValueError('Misformed expression containing contravariant fields of varying order.') + return orders[0] + elif isinstance(expr, Mul): + orders = [contravariant_order(e) for e in expr.args] + not_zero = [o for o in orders if o != 0] + if len(not_zero) > 1: + raise ValueError('Misformed expression containing multiplication between vectors.') + return 0 if not not_zero else not_zero[0] + elif isinstance(expr, Pow): + if covariant_order(expr.base) or covariant_order(expr.exp): + raise ValueError( + 'Misformed expression containing a power of a vector.') + return 0 + elif isinstance(expr, BaseVectorField): + return 1 + elif isinstance(expr, TensorProduct): + return sum(contravariant_order(a) for a in expr.args) + elif not _strict or expr.atoms(BaseScalarField): + return 0 + else: # If it does not contain anything related to the diffgeom module and it is _strict + return -1 + + +def covariant_order(expr, _strict=False): + """Return the covariant order of an expression. + + Examples + ======== + + >>> from sympy.diffgeom import covariant_order + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.abc import a + + >>> covariant_order(a) + 0 + >>> covariant_order(a*R2.x + 2) + 0 + >>> covariant_order(a*R2.x*R2.dy + R2.dx) + 1 + + """ + # TODO move some of this to class methods. + # TODO rewrite using the .as_blah_blah methods + if isinstance(expr, Add): + orders = [covariant_order(e) for e in expr.args] + if len(set(orders)) != 1: + raise ValueError('Misformed expression containing form fields of varying order.') + return orders[0] + elif isinstance(expr, Mul): + orders = [covariant_order(e) for e in expr.args] + not_zero = [o for o in orders if o != 0] + if len(not_zero) > 1: + raise ValueError('Misformed expression containing multiplication between forms.') + return 0 if not not_zero else not_zero[0] + elif isinstance(expr, Pow): + if covariant_order(expr.base) or covariant_order(expr.exp): + raise ValueError( + 'Misformed expression containing a power of a form.') + return 0 + elif isinstance(expr, Differential): + return covariant_order(*expr.args) + 1 + elif isinstance(expr, TensorProduct): + return sum(covariant_order(a) for a in expr.args) + elif not _strict or expr.atoms(BaseScalarField): + return 0 + else: # If it does not contain anything related to the diffgeom module and it is _strict + return -1 + + +############################################################################### +# Coordinate transformation functions +############################################################################### +def vectors_in_basis(expr, to_sys): + """Transform all base vectors in base vectors of a specified coord basis. + While the new base vectors are in the new coordinate system basis, any + coefficients are kept in the old system. + + Examples + ======== + + >>> from sympy.diffgeom import vectors_in_basis + >>> from sympy.diffgeom.rn import R2_r, R2_p + + >>> vectors_in_basis(R2_r.e_x, R2_p) + -y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2) + >>> vectors_in_basis(R2_p.e_r, R2_r) + sin(theta)*e_y + cos(theta)*e_x + + """ + vectors = list(expr.atoms(BaseVectorField)) + new_vectors = [] + for v in vectors: + cs = v._coord_sys + jac = cs.jacobian(to_sys, cs.coord_functions()) + new = (jac.T*Matrix(to_sys.base_vectors()))[v._index] + new_vectors.append(new) + return expr.subs(list(zip(vectors, new_vectors))) + + +############################################################################### +# Coordinate-dependent functions +############################################################################### +def twoform_to_matrix(expr): + """Return the matrix representing the twoform. + + For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`, + where `e_i` is the i-th base vector field for the coordinate system in + which the expression of `w` is given. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import twoform_to_matrix, TensorProduct + >>> TP = TensorProduct + + >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + Matrix([ + [1, 0], + [0, 1]]) + >>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + Matrix([ + [x, 0], + [0, 1]]) + >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2) + Matrix([ + [ 1, 0], + [-1/2, 1]]) + + """ + if covariant_order(expr) != 2 or contravariant_order(expr): + raise ValueError('The input expression is not a two-form.') + coord_sys = _find_coords(expr) + if len(coord_sys) != 1: + raise ValueError('The input expression concerns more than one ' + 'coordinate systems, hence there is no unambiguous ' + 'way to choose a coordinate system for the matrix.') + coord_sys = coord_sys.pop() + vectors = coord_sys.base_vectors() + expr = expr.expand() + matrix_content = [[expr.rcall(v1, v2) for v1 in vectors] + for v2 in vectors] + return Matrix(matrix_content) + + +def metric_to_Christoffel_1st(expr): + """Return the nested list of Christoffel symbols for the given metric. + This returns the Christoffel symbol of first kind that represents the + Levi-Civita connection for the given metric. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]] + + """ + matrix = twoform_to_matrix(expr) + if not matrix.is_symmetric(): + raise ValueError( + 'The two-form representing the metric is not symmetric.') + coord_sys = _find_coords(expr).pop() + deriv_matrices = [matrix.applyfunc(d) for d in coord_sys.base_vectors()] + indices = list(range(coord_sys.dim)) + christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2 + for k in indices] + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(christoffel) + + +def metric_to_Christoffel_2nd(expr): + """Return the nested list of Christoffel symbols for the given metric. + This returns the Christoffel symbol of second kind that represents the + Levi-Civita connection for the given metric. + + Examples + ======== + + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] + >>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]] + + """ + ch_1st = metric_to_Christoffel_1st(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + # XXX workaround, inverting a matrix does not work if it contains non + # symbols + #matrix = twoform_to_matrix(expr).inv() + matrix = twoform_to_matrix(expr) + s_fields = set() + for e in matrix: + s_fields.update(e.atoms(BaseScalarField)) + s_fields = list(s_fields) + dums = coord_sys.symbols + matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields))) + # XXX end of workaround + christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices]) + for k in indices] + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(christoffel) + + +def metric_to_Riemann_components(expr): + """Return the components of the Riemann tensor expressed in a given basis. + + Given a metric it calculates the components of the Riemann tensor in the + canonical basis of the coordinate system in which the metric expression is + given. + + Examples + ======== + + >>> from sympy import exp + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]] + >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ + R2.r**2*TP(R2.dtheta, R2.dtheta) + >>> non_trivial_metric + exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) + >>> riemann = metric_to_Riemann_components(non_trivial_metric) + >>> riemann[0, :, :, :] + [[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]] + >>> riemann[1, :, :, :] + [[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]] + + """ + ch_2nd = metric_to_Christoffel_2nd(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + deriv_ch = [[[[d(ch_2nd[i, j, k]) + for d in coord_sys.base_vectors()] + for k in indices] + for j in indices] + for i in indices] + riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu] + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices]) + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu] + for nu in indices] + for mu in indices] + for sig in indices] + for rho in indices] + return ImmutableDenseNDimArray(riemann) + + +def metric_to_Ricci_components(expr): + + """Return the components of the Ricci tensor expressed in a given basis. + + Given a metric it calculates the components of the Ricci tensor in the + canonical basis of the coordinate system in which the metric expression is + given. + + Examples + ======== + + >>> from sympy import exp + >>> from sympy.diffgeom.rn import R2 + >>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct + >>> TP = TensorProduct + + >>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + [[0, 0], [0, 0]] + >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ + R2.r**2*TP(R2.dtheta, R2.dtheta) + >>> non_trivial_metric + exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) + >>> metric_to_Ricci_components(non_trivial_metric) + [[1/rho, 0], [0, exp(-2*rho)*rho]] + + """ + riemann = metric_to_Riemann_components(expr) + coord_sys = _find_coords(expr).pop() + indices = list(range(coord_sys.dim)) + ricci = [[Add(*[riemann[k, i, k, j] for k in indices]) + for j in indices] + for i in indices] + return ImmutableDenseNDimArray(ricci) + +############################################################################### +# Classes for deprecation +############################################################################### + +class _deprecated_container: + # This class gives deprecation warning. + # When deprecated features are completely deleted, this should be removed as well. + # See https://github.com/sympy/sympy/pull/19368 + def __init__(self, message, data): + super().__init__(data) + self.message = message + + def warn(self): + sympy_deprecation_warning( + self.message, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-diffgeom-mutable", + stacklevel=4 + ) + + def __iter__(self): + self.warn() + return super().__iter__() + + def __getitem__(self, key): + self.warn() + return super().__getitem__(key) + + def __contains__(self, key): + self.warn() + return super().__contains__(key) + + +class _deprecated_list(_deprecated_container, list): + pass + + +class _deprecated_dict(_deprecated_container, dict): + pass + + +# Import at end to avoid cyclic imports +from sympy.simplify.simplify import simplify diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py new file mode 100644 index 0000000000000000000000000000000000000000..a7317a7d1191e61a074fcaa07e1859b11bdc0d10 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py @@ -0,0 +1,145 @@ +from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r +from sympy.diffgeom import intcurve_series, Differential, WedgeProduct +from sympy.core import symbols, Function, Derivative +from sympy.simplify import trigsimp, simplify +from sympy.functions import sqrt, atan2, sin, cos +from sympy.matrices import Matrix + +# Most of the functionality is covered in the +# test_functional_diffgeom_ch* tests which are based on the +# example from the paper of Sussman and Wisdom. +# If they do not cover something, additional tests are added in other test +# functions. + +# From "Functional Differential Geometry" as of 2011 +# by Sussman and Wisdom. + + +def test_functional_diffgeom_ch2(): + x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) + x, y = symbols('x, y', real=True) + f = Function('f') + + assert (R2_p.point_to_coords(R2_r.point([x0, y0])) == + Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)])) + assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) == + Matrix([r0*cos(theta0), r0*sin(theta0)])) + + assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix( + [[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]]) + + field = f(R2.x, R2.y) + p1_in_rect = R2_r.point([x0, y0]) + p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)]) + assert field.rcall(p1_in_rect) == f(x0, y0) + assert field.rcall(p1_in_polar) == f(x0, y0) + + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + assert R2.x(p_r) == x0 + assert R2.x(p_p) == r0*cos(theta0) + assert R2.r(p_p) == r0 + assert R2.r(p_r) == sqrt(x0**2 + y0**2) + assert R2.theta(p_r) == atan2(y0, x0) + + h = R2.x*R2.r**2 + R2.y**3 + assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3 + assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0) + + +def test_functional_diffgeom_ch3(): + x0, y0 = symbols('x0, y0', real=True) + x, y, t = symbols('x, y, t', real=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + + s_field = f(R2.x, R2.y) + v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y + assert v_field.rcall(s_field).rcall(p_r).doit() == b1( + x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0) + + assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0 + v = R2.e_x + 2*R2.e_y + s = R2.r**2 + 3*R2.x + assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True) + series_x, series_y = zip(*series) + assert all( + [term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x)]) + assert all( + [term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y)]) + + +def test_functional_diffgeom_ch4(): + x0, y0, theta0 = symbols('x0, y0, theta0', real=True) + x, y, r, theta = symbols('x, y, r, theta', real=True) + r0 = symbols('r0', positive=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + + f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy + assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0) + assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0) + + s_field_r = f(R2.x, R2.y) + df = Differential(s_field_r) + assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0) + assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0) + + s_field_p = f(R2.r, R2.theta) + df = Differential(s_field_p) + assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == ( + cos(theta0)*Derivative(f(r0, theta0), r0) - + sin(theta0)*Derivative(f(r0, theta0), theta0)/r0) + assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == ( + sin(theta0)*Derivative(f(r0, theta0), r0) + + cos(theta0)*Derivative(f(r0, theta0), theta0)/r0) + + assert R2.dx(R2.e_x).rcall(p_r) == 1 + assert R2.dx(R2.e_x) == 1 + assert R2.dx(R2.e_y).rcall(p_r) == 0 + assert R2.dx(R2.e_y) == 0 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + assert R2.dx(circ).rcall(p_r).doit() == -y0 + assert R2.dy(circ).rcall(p_r) == x0 + assert R2.dr(circ).rcall(p_r) == 0 + assert simplify(R2.dtheta(circ).rcall(p_r)) == 1 + + assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0 + + +def test_functional_diffgeom_ch6(): + u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True) + + u = u0*R2.e_x + u1*R2.e_y + v = v0*R2.e_x + v1*R2.e_y + wp = WedgeProduct(R2.dx, R2.dy) + assert wp(u, v) == u0*v1 - u1*v0 + + u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z + v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z + w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z + wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz) + assert wp( + u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det() + + a, b, c = symbols('a, b, c', cls=Function) + a_f = a(R3_r.x, R3_r.y, R3_r.z) + b_f = b(R3_r.x, R3_r.y, R3_r.z) + c_f = c(R3_r.x, R3_r.y, R3_r.z) + theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz + dtheta = Differential(theta) + da = Differential(a_f) + db = Differential(b_f) + dc = Differential(c_f) + expr = dtheta - WedgeProduct( + da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz) + assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py new file mode 100644 index 0000000000000000000000000000000000000000..48ddc7f8065f2b69bcd8eca4726a21c5901514ec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py @@ -0,0 +1,91 @@ +r''' +unit test describing the hyperbolic half-plane with the Poincare metric. This +is a basic model of hyperbolic geometry on the (positive) half-space + +{(x,y) \in R^2 | y > 0} + +with the Riemannian metric + +ds^2 = (dx^2 + dy^2)/y^2 + +It has constant negative scalar curvature = -2 + +https://en.wikipedia.org/wiki/Poincare_half-plane_model +''' +from sympy.matrices.dense import diag +from sympy.diffgeom import (twoform_to_matrix, + metric_to_Christoffel_1st, metric_to_Christoffel_2nd, + metric_to_Riemann_components, metric_to_Ricci_components) +import sympy.diffgeom.rn +from sympy.tensor.array import ImmutableDenseNDimArray + + +def test_H2(): + TP = sympy.diffgeom.TensorProduct + R2 = sympy.diffgeom.rn.R2 + y = R2.y + dy = R2.dy + dx = R2.dx + g = (TP(dx, dx) + TP(dy, dy))*y**(-2) + automat = twoform_to_matrix(g) + mat = diag(y**(-2), y**(-2)) + assert mat == automat + + gamma1 = metric_to_Christoffel_1st(g) + assert gamma1[0, 0, 0] == 0 + assert gamma1[0, 0, 1] == -y**(-3) + assert gamma1[0, 1, 0] == -y**(-3) + assert gamma1[0, 1, 1] == 0 + + assert gamma1[1, 1, 1] == -y**(-3) + assert gamma1[1, 1, 0] == 0 + assert gamma1[1, 0, 1] == 0 + assert gamma1[1, 0, 0] == y**(-3) + + gamma2 = metric_to_Christoffel_2nd(g) + assert gamma2[0, 0, 0] == 0 + assert gamma2[0, 0, 1] == -y**(-1) + assert gamma2[0, 1, 0] == -y**(-1) + assert gamma2[0, 1, 1] == 0 + + assert gamma2[1, 1, 1] == -y**(-1) + assert gamma2[1, 1, 0] == 0 + assert gamma2[1, 0, 1] == 0 + assert gamma2[1, 0, 0] == y**(-1) + + Rm = metric_to_Riemann_components(g) + assert Rm[0, 0, 0, 0] == 0 + assert Rm[0, 0, 0, 1] == 0 + assert Rm[0, 0, 1, 0] == 0 + assert Rm[0, 0, 1, 1] == 0 + + assert Rm[0, 1, 0, 0] == 0 + assert Rm[0, 1, 0, 1] == -y**(-2) + assert Rm[0, 1, 1, 0] == y**(-2) + assert Rm[0, 1, 1, 1] == 0 + + assert Rm[1, 0, 0, 0] == 0 + assert Rm[1, 0, 0, 1] == y**(-2) + assert Rm[1, 0, 1, 0] == -y**(-2) + assert Rm[1, 0, 1, 1] == 0 + + assert Rm[1, 1, 0, 0] == 0 + assert Rm[1, 1, 0, 1] == 0 + assert Rm[1, 1, 1, 0] == 0 + assert Rm[1, 1, 1, 1] == 0 + + Ric = metric_to_Ricci_components(g) + assert Ric[0, 0] == -y**(-2) + assert Ric[0, 1] == 0 + assert Ric[1, 0] == 0 + assert Ric[0, 0] == -y**(-2) + + assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2)) + + ## scalar curvature is -2 + #TODO - it would be nice to have index contraction built-in + R = (Ric[0, 0] + Ric[1, 1])*y**2 + assert R == -2 + + ## Gauss curvature is -1 + assert R/2 == -1 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..736e3ee9960884742a09809dc8b951864ee53fe0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_comprehension.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_comprehension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee7d23765aa57efc9d8285c5dbe6bb36bd7346a3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_comprehension.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_derivatives.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d663344a3ab565d654b2c68d56baadb9f44ef49 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/array_derivatives.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/arrayop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/arrayop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c152facb0baee674f83a57573a9706740ce54ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/arrayop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/dense_ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/dense_ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af38eefc422555cf9652bb7d1fdadc7f228b98d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/dense_ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/mutable_ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/mutable_ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e81a8b4576ea486c2eb233f8e14cc990f6185af6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/mutable_ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3328f803e207da3fb8d8caf15fb60fc8cd2bcc01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/sparse_ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/sparse_ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..829c89e0d54d0b0e1d8fdfd9277bbc2d44c1e04a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/__pycache__/sparse_ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a12fdccda5d3ad7228bd9be7752c034be9494b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcb7e972cb72ddf26d6ecae0b36bfff888def726 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49837b5b1799ae66ed5fa56d00fc1ed396c88572 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e1218942473af4bb8bebb7d58dcf312cfb9268 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b7c6cbe603dc1cbfe35be7c5e2d952150dcb23f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1db32ea94ccf36b63de5a1284ee661476fc39cf7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0637c5f666eb927a3d4084ce8a9f03cbf319e4d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..956b32b257c4716299db8c556d775166c48fa217 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4ea5ebe7b056511bcbdc48b477929e1ca5500fe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_indexed_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_indexed_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a416f4df186d15b1c0709be145e3e9cc657377 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_indexed_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dbb7ad727bd12e558cd640481669f4a14d2eae6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ab0327411bb82fc6c503c56acb6d9bbca0350d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/array_expressions.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/array_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..a8cf214f241557218fac27a9d962664d61448983 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/array_expressions.py @@ -0,0 +1,1967 @@ +import collections.abc +import operator +from collections import defaultdict, Counter +from functools import reduce +import itertools +from itertools import accumulate +from typing import Optional, List, Tuple as tTuple + +import typing + +from sympy.core.numbers import Integer +from sympy.core.relational import Equality +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.expr import Expr +from sympy.core.function import (Function, Lambda) +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import (Dummy, Symbol) +from sympy.matrices.common import MatrixCommon +from sympy.matrices.expressions.diagonal import diagonalize_vector +from sympy.matrices.expressions.matexpr import MatrixExpr +from sympy.matrices.expressions.special import ZeroMatrix +from sympy.tensor.array.arrayop import (permutedims, tensorcontraction, tensordiagonal, tensorproduct) +from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray +from sympy.tensor.array.ndim_array import NDimArray +from sympy.tensor.indexed import (Indexed, IndexedBase) +from sympy.matrices.expressions.matexpr import MatrixElement +from sympy.tensor.array.expressions.utils import _apply_recursively_over_nested_lists, _sort_contraction_indices, \ + _get_mapping_from_subranks, _build_push_indices_up_func_transformation, _get_contraction_links, \ + _build_push_indices_down_func_transformation +from sympy.combinatorics import Permutation +from sympy.combinatorics.permutations import _af_invert +from sympy.core.sympify import _sympify + + +class _ArrayExpr(Expr): + shape: tTuple[Expr, ...] + + def __getitem__(self, item): + if not isinstance(item, collections.abc.Iterable): + item = (item,) + ArrayElement._check_shape(self, item) + return self._get(item) + + def _get(self, item): + return _get_array_element_or_slice(self, item) + + +class ArraySymbol(_ArrayExpr): + """ + Symbol representing an array expression + """ + + def __new__(cls, symbol, shape: typing.Iterable) -> "ArraySymbol": + if isinstance(symbol, str): + symbol = Symbol(symbol) + # symbol = _sympify(symbol) + shape = Tuple(*map(_sympify, shape)) + obj = Expr.__new__(cls, symbol, shape) + return obj + + @property + def name(self): + return self._args[0] + + @property + def shape(self): + return self._args[1] + + def as_explicit(self): + if not all(i.is_Integer for i in self.shape): + raise ValueError("cannot express explicit array with symbolic shape") + data = [self[i] for i in itertools.product(*[range(j) for j in self.shape])] + return ImmutableDenseNDimArray(data).reshape(*self.shape) + + +class ArrayElement(Expr): + """ + An element of an array. + """ + + _diff_wrt = True + is_symbol = True + is_commutative = True + + def __new__(cls, name, indices): + if isinstance(name, str): + name = Symbol(name) + name = _sympify(name) + if not isinstance(indices, collections.abc.Iterable): + indices = (indices,) + indices = _sympify(tuple(indices)) + cls._check_shape(name, indices) + obj = Expr.__new__(cls, name, indices) + return obj + + @classmethod + def _check_shape(cls, name, indices): + indices = tuple(indices) + if hasattr(name, "shape"): + index_error = IndexError("number of indices does not match shape of the array") + if len(indices) != len(name.shape): + raise index_error + if any((i >= s) == True for i, s in zip(indices, name.shape)): + raise ValueError("shape is out of bounds") + if any((i < 0) == True for i in indices): + raise ValueError("shape contains negative values") + + @property + def name(self): + return self._args[0] + + @property + def indices(self): + return self._args[1] + + def _eval_derivative(self, s): + if not isinstance(s, ArrayElement): + return S.Zero + + if s == self: + return S.One + + if s.name != self.name: + return S.Zero + + return Mul.fromiter(KroneckerDelta(i, j) for i, j in zip(self.indices, s.indices)) + + +class ZeroArray(_ArrayExpr): + """ + Symbolic array of zeros. Equivalent to ``ZeroMatrix`` for matrices. + """ + + def __new__(cls, *shape): + if len(shape) == 0: + return S.Zero + shape = map(_sympify, shape) + obj = Expr.__new__(cls, *shape) + return obj + + @property + def shape(self): + return self._args + + def as_explicit(self): + if not all(i.is_Integer for i in self.shape): + raise ValueError("Cannot return explicit form for symbolic shape.") + return ImmutableDenseNDimArray.zeros(*self.shape) + + def _get(self, item): + return S.Zero + + +class OneArray(_ArrayExpr): + """ + Symbolic array of ones. + """ + + def __new__(cls, *shape): + if len(shape) == 0: + return S.One + shape = map(_sympify, shape) + obj = Expr.__new__(cls, *shape) + return obj + + @property + def shape(self): + return self._args + + def as_explicit(self): + if not all(i.is_Integer for i in self.shape): + raise ValueError("Cannot return explicit form for symbolic shape.") + return ImmutableDenseNDimArray([S.One for i in range(reduce(operator.mul, self.shape))]).reshape(*self.shape) + + def _get(self, item): + return S.One + + +class _CodegenArrayAbstract(Basic): + + @property + def subranks(self): + """ + Returns the ranks of the objects in the uppermost tensor product inside + the current object. In case no tensor products are contained, return + the atomic ranks. + + Examples + ======== + + >>> from sympy.tensor.array import tensorproduct, tensorcontraction + >>> from sympy import MatrixSymbol + >>> M = MatrixSymbol("M", 3, 3) + >>> N = MatrixSymbol("N", 3, 3) + >>> P = MatrixSymbol("P", 3, 3) + + Important: do not confuse the rank of the matrix with the rank of an array. + + >>> tp = tensorproduct(M, N, P) + >>> tp.subranks + [2, 2, 2] + + >>> co = tensorcontraction(tp, (1, 2), (3, 4)) + >>> co.subranks + [2, 2, 2] + """ + return self._subranks[:] + + def subrank(self): + """ + The sum of ``subranks``. + """ + return sum(self.subranks) + + @property + def shape(self): + return self._shape + + def doit(self, **hints): + deep = hints.get("deep", True) + if deep: + return self.func(*[arg.doit(**hints) for arg in self.args])._canonicalize() + else: + return self._canonicalize() + +class ArrayTensorProduct(_CodegenArrayAbstract): + r""" + Class to represent the tensor product of array-like objects. + """ + + def __new__(cls, *args, **kwargs): + args = [_sympify(arg) for arg in args] + + canonicalize = kwargs.pop("canonicalize", False) + + ranks = [get_rank(arg) for arg in args] + + obj = Basic.__new__(cls, *args) + obj._subranks = ranks + shapes = [get_shape(i) for i in args] + + if any(i is None for i in shapes): + obj._shape = None + else: + obj._shape = tuple(j for i in shapes for j in i) + if canonicalize: + return obj._canonicalize() + return obj + + def _canonicalize(self): + args = self.args + args = self._flatten(args) + + ranks = [get_rank(arg) for arg in args] + + # Check if there are nested permutation and lift them up: + permutation_cycles = [] + for i, arg in enumerate(args): + if not isinstance(arg, PermuteDims): + continue + permutation_cycles.extend([[k + sum(ranks[:i]) for k in j] for j in arg.permutation.cyclic_form]) + args[i] = arg.expr + if permutation_cycles: + return _permute_dims(_array_tensor_product(*args), Permutation(sum(ranks)-1)*Permutation(permutation_cycles)) + + if len(args) == 1: + return args[0] + + # If any object is a ZeroArray, return a ZeroArray: + if any(isinstance(arg, (ZeroArray, ZeroMatrix)) for arg in args): + shapes = reduce(operator.add, [get_shape(i) for i in args], ()) + return ZeroArray(*shapes) + + # If there are contraction objects inside, transform the whole + # expression into `ArrayContraction`: + contractions = {i: arg for i, arg in enumerate(args) if isinstance(arg, ArrayContraction)} + if contractions: + ranks = [_get_subrank(arg) if isinstance(arg, ArrayContraction) else get_rank(arg) for arg in args] + cumulative_ranks = list(accumulate([0] + ranks))[:-1] + tp = _array_tensor_product(*[arg.expr if isinstance(arg, ArrayContraction) else arg for arg in args]) + contraction_indices = [tuple(cumulative_ranks[i] + k for k in j) for i, arg in contractions.items() for j in arg.contraction_indices] + return _array_contraction(tp, *contraction_indices) + + diagonals = {i: arg for i, arg in enumerate(args) if isinstance(arg, ArrayDiagonal)} + if diagonals: + inverse_permutation = [] + last_perm = [] + ranks = [get_rank(arg) for arg in args] + cumulative_ranks = list(accumulate([0] + ranks))[:-1] + for i, arg in enumerate(args): + if isinstance(arg, ArrayDiagonal): + i1 = get_rank(arg) - len(arg.diagonal_indices) + i2 = len(arg.diagonal_indices) + inverse_permutation.extend([cumulative_ranks[i] + j for j in range(i1)]) + last_perm.extend([cumulative_ranks[i] + j for j in range(i1, i1 + i2)]) + else: + inverse_permutation.extend([cumulative_ranks[i] + j for j in range(get_rank(arg))]) + inverse_permutation.extend(last_perm) + tp = _array_tensor_product(*[arg.expr if isinstance(arg, ArrayDiagonal) else arg for arg in args]) + ranks2 = [_get_subrank(arg) if isinstance(arg, ArrayDiagonal) else get_rank(arg) for arg in args] + cumulative_ranks2 = list(accumulate([0] + ranks2))[:-1] + diagonal_indices = [tuple(cumulative_ranks2[i] + k for k in j) for i, arg in diagonals.items() for j in arg.diagonal_indices] + return _permute_dims(_array_diagonal(tp, *diagonal_indices), _af_invert(inverse_permutation)) + + return self.func(*args, canonicalize=False) + + @classmethod + def _flatten(cls, args): + args = [i for arg in args for i in (arg.args if isinstance(arg, cls) else [arg])] + return args + + def as_explicit(self): + return tensorproduct(*[arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args]) + + +class ArrayAdd(_CodegenArrayAbstract): + r""" + Class for elementwise array additions. + """ + + def __new__(cls, *args, **kwargs): + args = [_sympify(arg) for arg in args] + ranks = [get_rank(arg) for arg in args] + ranks = list(set(ranks)) + if len(ranks) != 1: + raise ValueError("summing arrays of different ranks") + shapes = [arg.shape for arg in args] + if len({i for i in shapes if i is not None}) > 1: + raise ValueError("mismatching shapes in addition") + + canonicalize = kwargs.pop("canonicalize", False) + + obj = Basic.__new__(cls, *args) + obj._subranks = ranks + if any(i is None for i in shapes): + obj._shape = None + else: + obj._shape = shapes[0] + if canonicalize: + return obj._canonicalize() + return obj + + def _canonicalize(self): + args = self.args + + # Flatten: + args = self._flatten_args(args) + + shapes = [get_shape(arg) for arg in args] + args = [arg for arg in args if not isinstance(arg, (ZeroArray, ZeroMatrix))] + if len(args) == 0: + if any(i for i in shapes if i is None): + raise NotImplementedError("cannot handle addition of ZeroMatrix/ZeroArray and undefined shape object") + return ZeroArray(*shapes[0]) + elif len(args) == 1: + return args[0] + return self.func(*args, canonicalize=False) + + @classmethod + def _flatten_args(cls, args): + new_args = [] + for arg in args: + if isinstance(arg, ArrayAdd): + new_args.extend(arg.args) + else: + new_args.append(arg) + return new_args + + def as_explicit(self): + return reduce( + operator.add, + [arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args]) + + +class PermuteDims(_CodegenArrayAbstract): + r""" + Class to represent permutation of axes of arrays. + + Examples + ======== + + >>> from sympy.tensor.array import permutedims + >>> from sympy import MatrixSymbol + >>> M = MatrixSymbol("M", 3, 3) + >>> cg = permutedims(M, [1, 0]) + + The object ``cg`` represents the transposition of ``M``, as the permutation + ``[1, 0]`` will act on its indices by switching them: + + `M_{ij} \Rightarrow M_{ji}` + + This is evident when transforming back to matrix form: + + >>> from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix + >>> convert_array_to_matrix(cg) + M.T + + >>> N = MatrixSymbol("N", 3, 2) + >>> cg = permutedims(N, [1, 0]) + >>> cg.shape + (2, 3) + + There are optional parameters that can be used as alternative to the permutation: + + >>> from sympy.tensor.array.expressions import ArraySymbol, PermuteDims + >>> M = ArraySymbol("M", (1, 2, 3, 4, 5)) + >>> expr = PermuteDims(M, index_order_old="ijklm", index_order_new="kijml") + >>> expr + PermuteDims(M, (0 2 1)(3 4)) + >>> expr.shape + (3, 1, 2, 5, 4) + + Permutations of tensor products are simplified in order to achieve a + standard form: + + >>> from sympy.tensor.array import tensorproduct + >>> M = MatrixSymbol("M", 4, 5) + >>> tp = tensorproduct(M, N) + >>> tp.shape + (4, 5, 3, 2) + >>> perm1 = permutedims(tp, [2, 3, 1, 0]) + + The args ``(M, N)`` have been sorted and the permutation has been + simplified, the expression is equivalent: + + >>> perm1.expr.args + (N, M) + >>> perm1.shape + (3, 2, 5, 4) + >>> perm1.permutation + (2 3) + + The permutation in its array form has been simplified from + ``[2, 3, 1, 0]`` to ``[0, 1, 3, 2]``, as the arguments of the tensor + product `M` and `N` have been switched: + + >>> perm1.permutation.array_form + [0, 1, 3, 2] + + We can nest a second permutation: + + >>> perm2 = permutedims(perm1, [1, 0, 2, 3]) + >>> perm2.shape + (2, 3, 5, 4) + >>> perm2.permutation.array_form + [1, 0, 3, 2] + """ + + def __new__(cls, expr, permutation=None, index_order_old=None, index_order_new=None, **kwargs): + from sympy.combinatorics import Permutation + expr = _sympify(expr) + expr_rank = get_rank(expr) + permutation = cls._get_permutation_from_arguments(permutation, index_order_old, index_order_new, expr_rank) + permutation = Permutation(permutation) + permutation_size = permutation.size + if permutation_size != expr_rank: + raise ValueError("Permutation size must be the length of the shape of expr") + + canonicalize = kwargs.pop("canonicalize", False) + + obj = Basic.__new__(cls, expr, permutation) + obj._subranks = [get_rank(expr)] + shape = get_shape(expr) + if shape is None: + obj._shape = None + else: + obj._shape = tuple(shape[permutation(i)] for i in range(len(shape))) + if canonicalize: + return obj._canonicalize() + return obj + + def _canonicalize(self): + expr = self.expr + permutation = self.permutation + if isinstance(expr, PermuteDims): + subexpr = expr.expr + subperm = expr.permutation + permutation = permutation * subperm + expr = subexpr + if isinstance(expr, ArrayContraction): + expr, permutation = self._PermuteDims_denestarg_ArrayContraction(expr, permutation) + if isinstance(expr, ArrayTensorProduct): + expr, permutation = self._PermuteDims_denestarg_ArrayTensorProduct(expr, permutation) + if isinstance(expr, (ZeroArray, ZeroMatrix)): + return ZeroArray(*[expr.shape[i] for i in permutation.array_form]) + plist = permutation.array_form + if plist == sorted(plist): + return expr + return self.func(expr, permutation, canonicalize=False) + + @property + def expr(self): + return self.args[0] + + @property + def permutation(self): + return self.args[1] + + @classmethod + def _PermuteDims_denestarg_ArrayTensorProduct(cls, expr, permutation): + # Get the permutation in its image-form: + perm_image_form = _af_invert(permutation.array_form) + args = list(expr.args) + # Starting index global position for every arg: + cumul = list(accumulate([0] + expr.subranks)) + # Split `perm_image_form` into a list of list corresponding to the indices + # of every argument: + perm_image_form_in_components = [perm_image_form[cumul[i]:cumul[i+1]] for i in range(len(args))] + # Create an index, target-position-key array: + ps = [(i, sorted(comp)) for i, comp in enumerate(perm_image_form_in_components)] + # Sort the array according to the target-position-key: + # In this way, we define a canonical way to sort the arguments according + # to the permutation. + ps.sort(key=lambda x: x[1]) + # Read the inverse-permutation (i.e. image-form) of the args: + perm_args_image_form = [i[0] for i in ps] + # Apply the args-permutation to the `args`: + args_sorted = [args[i] for i in perm_args_image_form] + # Apply the args-permutation to the array-form of the permutation of the axes (of `expr`): + perm_image_form_sorted_args = [perm_image_form_in_components[i] for i in perm_args_image_form] + new_permutation = Permutation(_af_invert([j for i in perm_image_form_sorted_args for j in i])) + return _array_tensor_product(*args_sorted), new_permutation + + @classmethod + def _PermuteDims_denestarg_ArrayContraction(cls, expr, permutation): + if not isinstance(expr, ArrayContraction): + return expr, permutation + if not isinstance(expr.expr, ArrayTensorProduct): + return expr, permutation + args = expr.expr.args + subranks = [get_rank(arg) for arg in expr.expr.args] + + contraction_indices = expr.contraction_indices + contraction_indices_flat = [j for i in contraction_indices for j in i] + cumul = list(accumulate([0] + subranks)) + + # Spread the permutation in its array form across the args in the corresponding + # tensor-product arguments with free indices: + permutation_array_blocks_up = [] + image_form = _af_invert(permutation.array_form) + counter = 0 + for i, e in enumerate(subranks): + current = [] + for j in range(cumul[i], cumul[i+1]): + if j in contraction_indices_flat: + continue + current.append(image_form[counter]) + counter += 1 + permutation_array_blocks_up.append(current) + + # Get the map of axis repositioning for every argument of tensor-product: + index_blocks = [list(range(cumul[i], cumul[i+1])) for i, e in enumerate(expr.subranks)] + index_blocks_up = expr._push_indices_up(expr.contraction_indices, index_blocks) + inverse_permutation = permutation**(-1) + index_blocks_up_permuted = [[inverse_permutation(j) for j in i if j is not None] for i in index_blocks_up] + + # Sorting key is a list of tuple, first element is the index of `args`, second element of + # the tuple is the sorting key to sort `args` of the tensor product: + sorting_keys = list(enumerate(index_blocks_up_permuted)) + sorting_keys.sort(key=lambda x: x[1]) + + # Now we can get the permutation acting on the args in its image-form: + new_perm_image_form = [i[0] for i in sorting_keys] + # Apply the args-level permutation to various elements: + new_index_blocks = [index_blocks[i] for i in new_perm_image_form] + new_index_perm_array_form = _af_invert([j for i in new_index_blocks for j in i]) + new_args = [args[i] for i in new_perm_image_form] + new_contraction_indices = [tuple(new_index_perm_array_form[j] for j in i) for i in contraction_indices] + new_expr = _array_contraction(_array_tensor_product(*new_args), *new_contraction_indices) + new_permutation = Permutation(_af_invert([j for i in [permutation_array_blocks_up[k] for k in new_perm_image_form] for j in i])) + return new_expr, new_permutation + + @classmethod + def _check_permutation_mapping(cls, expr, permutation): + subranks = expr.subranks + index2arg = [i for i, arg in enumerate(expr.args) for j in range(expr.subranks[i])] + permuted_indices = [permutation(i) for i in range(expr.subrank())] + new_args = list(expr.args) + arg_candidate_index = index2arg[permuted_indices[0]] + current_indices = [] + new_permutation = [] + inserted_arg_cand_indices = set() + for i, idx in enumerate(permuted_indices): + if index2arg[idx] != arg_candidate_index: + new_permutation.extend(current_indices) + current_indices = [] + arg_candidate_index = index2arg[idx] + current_indices.append(idx) + arg_candidate_rank = subranks[arg_candidate_index] + if len(current_indices) == arg_candidate_rank: + new_permutation.extend(sorted(current_indices)) + local_current_indices = [j - min(current_indices) for j in current_indices] + i1 = index2arg[i] + new_args[i1] = _permute_dims(new_args[i1], Permutation(local_current_indices)) + inserted_arg_cand_indices.add(arg_candidate_index) + current_indices = [] + new_permutation.extend(current_indices) + + # TODO: swap args positions in order to simplify the expression: + # TODO: this should be in a function + args_positions = list(range(len(new_args))) + # Get possible shifts: + maps = {} + cumulative_subranks = [0] + list(accumulate(subranks)) + for i in range(len(subranks)): + s = {index2arg[new_permutation[j]] for j in range(cumulative_subranks[i], cumulative_subranks[i+1])} + if len(s) != 1: + continue + elem = next(iter(s)) + if i != elem: + maps[i] = elem + + # Find cycles in the map: + lines = [] + current_line = [] + while maps: + if len(current_line) == 0: + k, v = maps.popitem() + current_line.append(k) + else: + k = current_line[-1] + if k not in maps: + current_line = [] + continue + v = maps.pop(k) + if v in current_line: + lines.append(current_line) + current_line = [] + continue + current_line.append(v) + for line in lines: + for i, e in enumerate(line): + args_positions[line[(i + 1) % len(line)]] = e + + # TODO: function in order to permute the args: + permutation_blocks = [[new_permutation[cumulative_subranks[i] + j] for j in range(e)] for i, e in enumerate(subranks)] + new_args = [new_args[i] for i in args_positions] + new_permutation_blocks = [permutation_blocks[i] for i in args_positions] + new_permutation2 = [j for i in new_permutation_blocks for j in i] + return _array_tensor_product(*new_args), Permutation(new_permutation2) # **(-1) + + @classmethod + def _check_if_there_are_closed_cycles(cls, expr, permutation): + args = list(expr.args) + subranks = expr.subranks + cyclic_form = permutation.cyclic_form + cumulative_subranks = [0] + list(accumulate(subranks)) + cyclic_min = [min(i) for i in cyclic_form] + cyclic_max = [max(i) for i in cyclic_form] + cyclic_keep = [] + for i, cycle in enumerate(cyclic_form): + flag = True + for j in range(len(cumulative_subranks) - 1): + if cyclic_min[i] >= cumulative_subranks[j] and cyclic_max[i] < cumulative_subranks[j+1]: + # Found a sinkable cycle. + args[j] = _permute_dims(args[j], Permutation([[k - cumulative_subranks[j] for k in cyclic_form[i]]])) + flag = False + break + if flag: + cyclic_keep.append(cyclic_form[i]) + return _array_tensor_product(*args), Permutation(cyclic_keep, size=permutation.size) + + def nest_permutation(self): + r""" + DEPRECATED. + """ + ret = self._nest_permutation(self.expr, self.permutation) + if ret is None: + return self + return ret + + @classmethod + def _nest_permutation(cls, expr, permutation): + if isinstance(expr, ArrayTensorProduct): + return _permute_dims(*cls._check_if_there_are_closed_cycles(expr, permutation)) + elif isinstance(expr, ArrayContraction): + # Invert tree hierarchy: put the contraction above. + cycles = permutation.cyclic_form + newcycles = ArrayContraction._convert_outer_indices_to_inner_indices(expr, *cycles) + newpermutation = Permutation(newcycles) + new_contr_indices = [tuple(newpermutation(j) for j in i) for i in expr.contraction_indices] + return _array_contraction(PermuteDims(expr.expr, newpermutation), *new_contr_indices) + elif isinstance(expr, ArrayAdd): + return _array_add(*[PermuteDims(arg, permutation) for arg in expr.args]) + return None + + def as_explicit(self): + expr = self.expr + if hasattr(expr, "as_explicit"): + expr = expr.as_explicit() + return permutedims(expr, self.permutation) + + @classmethod + def _get_permutation_from_arguments(cls, permutation, index_order_old, index_order_new, dim): + if permutation is None: + if index_order_new is None or index_order_old is None: + raise ValueError("Permutation not defined") + return PermuteDims._get_permutation_from_index_orders(index_order_old, index_order_new, dim) + else: + if index_order_new is not None: + raise ValueError("index_order_new cannot be defined with permutation") + if index_order_old is not None: + raise ValueError("index_order_old cannot be defined with permutation") + return permutation + + @classmethod + def _get_permutation_from_index_orders(cls, index_order_old, index_order_new, dim): + if len(set(index_order_new)) != dim: + raise ValueError("wrong number of indices in index_order_new") + if len(set(index_order_old)) != dim: + raise ValueError("wrong number of indices in index_order_old") + if len(set.symmetric_difference(set(index_order_new), set(index_order_old))) > 0: + raise ValueError("index_order_new and index_order_old must have the same indices") + permutation = [index_order_old.index(i) for i in index_order_new] + return permutation + + +class ArrayDiagonal(_CodegenArrayAbstract): + r""" + Class to represent the diagonal operator. + + Explanation + =========== + + In a 2-dimensional array it returns the diagonal, this looks like the + operation: + + `A_{ij} \rightarrow A_{ii}` + + The diagonal over axes 1 and 2 (the second and third) of the tensor product + of two 2-dimensional arrays `A \otimes B` is + + `\Big[ A_{ab} B_{cd} \Big]_{abcd} \rightarrow \Big[ A_{ai} B_{id} \Big]_{adi}` + + In this last example the array expression has been reduced from + 4-dimensional to 3-dimensional. Notice that no contraction has occurred, + rather there is a new index `i` for the diagonal, contraction would have + reduced the array to 2 dimensions. + + Notice that the diagonalized out dimensions are added as new dimensions at + the end of the indices. + """ + + def __new__(cls, expr, *diagonal_indices, **kwargs): + expr = _sympify(expr) + diagonal_indices = [Tuple(*sorted(i)) for i in diagonal_indices] + canonicalize = kwargs.get("canonicalize", False) + + shape = get_shape(expr) + if shape is not None: + cls._validate(expr, *diagonal_indices, **kwargs) + # Get new shape: + positions, shape = cls._get_positions_shape(shape, diagonal_indices) + else: + positions = None + if len(diagonal_indices) == 0: + return expr + obj = Basic.__new__(cls, expr, *diagonal_indices) + obj._positions = positions + obj._subranks = _get_subranks(expr) + obj._shape = shape + if canonicalize: + return obj._canonicalize() + return obj + + def _canonicalize(self): + expr = self.expr + diagonal_indices = self.diagonal_indices + trivial_diags = [i for i in diagonal_indices if len(i) == 1] + if len(trivial_diags) > 0: + trivial_pos = {e[0]: i for i, e in enumerate(diagonal_indices) if len(e) == 1} + diag_pos = {e: i for i, e in enumerate(diagonal_indices) if len(e) > 1} + diagonal_indices_short = [i for i in diagonal_indices if len(i) > 1] + rank1 = get_rank(self) + rank2 = len(diagonal_indices) + rank3 = rank1 - rank2 + inv_permutation = [] + counter1 = 0 + indices_down = ArrayDiagonal._push_indices_down(diagonal_indices_short, list(range(rank1)), get_rank(expr)) + for i in indices_down: + if i in trivial_pos: + inv_permutation.append(rank3 + trivial_pos[i]) + elif isinstance(i, (Integer, int)): + inv_permutation.append(counter1) + counter1 += 1 + else: + inv_permutation.append(rank3 + diag_pos[i]) + permutation = _af_invert(inv_permutation) + if len(diagonal_indices_short) > 0: + return _permute_dims(_array_diagonal(expr, *diagonal_indices_short), permutation) + else: + return _permute_dims(expr, permutation) + if isinstance(expr, ArrayAdd): + return self._ArrayDiagonal_denest_ArrayAdd(expr, *diagonal_indices) + if isinstance(expr, ArrayDiagonal): + return self._ArrayDiagonal_denest_ArrayDiagonal(expr, *diagonal_indices) + if isinstance(expr, PermuteDims): + return self._ArrayDiagonal_denest_PermuteDims(expr, *diagonal_indices) + if isinstance(expr, (ZeroArray, ZeroMatrix)): + positions, shape = self._get_positions_shape(expr.shape, diagonal_indices) + return ZeroArray(*shape) + return self.func(expr, *diagonal_indices, canonicalize=False) + + @staticmethod + def _validate(expr, *diagonal_indices, **kwargs): + # Check that no diagonalization happens on indices with mismatched + # dimensions: + shape = get_shape(expr) + for i in diagonal_indices: + if any(j >= len(shape) for j in i): + raise ValueError("index is larger than expression shape") + if len({shape[j] for j in i}) != 1: + raise ValueError("diagonalizing indices of different dimensions") + if not kwargs.get("allow_trivial_diags", False) and len(i) <= 1: + raise ValueError("need at least two axes to diagonalize") + if len(set(i)) != len(i): + raise ValueError("axis index cannot be repeated") + + @staticmethod + def _remove_trivial_dimensions(shape, *diagonal_indices): + return [tuple(j for j in i) for i in diagonal_indices if shape[i[0]] != 1] + + @property + def expr(self): + return self.args[0] + + @property + def diagonal_indices(self): + return self.args[1:] + + @staticmethod + def _flatten(expr, *outer_diagonal_indices): + inner_diagonal_indices = expr.diagonal_indices + all_inner = [j for i in inner_diagonal_indices for j in i] + all_inner.sort() + # TODO: add API for total rank and cumulative rank: + total_rank = _get_subrank(expr) + inner_rank = len(all_inner) + outer_rank = total_rank - inner_rank + shifts = [0 for i in range(outer_rank)] + counter = 0 + pointer = 0 + for i in range(outer_rank): + while pointer < inner_rank and counter >= all_inner[pointer]: + counter += 1 + pointer += 1 + shifts[i] += pointer + counter += 1 + outer_diagonal_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_diagonal_indices) + diagonal_indices = inner_diagonal_indices + outer_diagonal_indices + return _array_diagonal(expr.expr, *diagonal_indices) + + @classmethod + def _ArrayDiagonal_denest_ArrayAdd(cls, expr, *diagonal_indices): + return _array_add(*[_array_diagonal(arg, *diagonal_indices) for arg in expr.args]) + + @classmethod + def _ArrayDiagonal_denest_ArrayDiagonal(cls, expr, *diagonal_indices): + return cls._flatten(expr, *diagonal_indices) + + @classmethod + def _ArrayDiagonal_denest_PermuteDims(cls, expr: PermuteDims, *diagonal_indices): + back_diagonal_indices = [[expr.permutation(j) for j in i] for i in diagonal_indices] + nondiag = [i for i in range(get_rank(expr)) if not any(i in j for j in diagonal_indices)] + back_nondiag = [expr.permutation(i) for i in nondiag] + remap = {e: i for i, e in enumerate(sorted(back_nondiag))} + new_permutation1 = [remap[i] for i in back_nondiag] + shift = len(new_permutation1) + diag_block_perm = [i + shift for i in range(len(back_diagonal_indices))] + new_permutation = new_permutation1 + diag_block_perm + return _permute_dims( + _array_diagonal( + expr.expr, + *back_diagonal_indices + ), + new_permutation + ) + + def _push_indices_down_nonstatic(self, indices): + transform = lambda x: self._positions[x] if x < len(self._positions) else None + return _apply_recursively_over_nested_lists(transform, indices) + + def _push_indices_up_nonstatic(self, indices): + + def transform(x): + for i, e in enumerate(self._positions): + if (isinstance(e, int) and x == e) or (isinstance(e, tuple) and x in e): + return i + + return _apply_recursively_over_nested_lists(transform, indices) + + @classmethod + def _push_indices_down(cls, diagonal_indices, indices, rank): + positions, shape = cls._get_positions_shape(range(rank), diagonal_indices) + transform = lambda x: positions[x] if x < len(positions) else None + return _apply_recursively_over_nested_lists(transform, indices) + + @classmethod + def _push_indices_up(cls, diagonal_indices, indices, rank): + positions, shape = cls._get_positions_shape(range(rank), diagonal_indices) + + def transform(x): + for i, e in enumerate(positions): + if (isinstance(e, int) and x == e) or (isinstance(e, (tuple, Tuple)) and (x in e)): + return i + + return _apply_recursively_over_nested_lists(transform, indices) + + @classmethod + def _get_positions_shape(cls, shape, diagonal_indices): + data1 = tuple((i, shp) for i, shp in enumerate(shape) if not any(i in j for j in diagonal_indices)) + pos1, shp1 = zip(*data1) if data1 else ((), ()) + data2 = tuple((i, shape[i[0]]) for i in diagonal_indices) + pos2, shp2 = zip(*data2) if data2 else ((), ()) + positions = pos1 + pos2 + shape = shp1 + shp2 + return positions, shape + + def as_explicit(self): + expr = self.expr + if hasattr(expr, "as_explicit"): + expr = expr.as_explicit() + return tensordiagonal(expr, *self.diagonal_indices) + + +class ArrayElementwiseApplyFunc(_CodegenArrayAbstract): + + def __new__(cls, function, element): + + if not isinstance(function, Lambda): + d = Dummy('d') + function = Lambda(d, function(d)) + + obj = _CodegenArrayAbstract.__new__(cls, function, element) + obj._subranks = _get_subranks(element) + return obj + + @property + def function(self): + return self.args[0] + + @property + def expr(self): + return self.args[1] + + @property + def shape(self): + return self.expr.shape + + def _get_function_fdiff(self): + d = Dummy("d") + function = self.function(d) + fdiff = function.diff(d) + if isinstance(fdiff, Function): + fdiff = type(fdiff) + else: + fdiff = Lambda(d, fdiff) + return fdiff + + def as_explicit(self): + expr = self.expr + if hasattr(expr, "as_explicit"): + expr = expr.as_explicit() + return expr.applyfunc(self.function) + + +class ArrayContraction(_CodegenArrayAbstract): + r""" + This class is meant to represent contractions of arrays in a form easily + processable by the code printers. + """ + + def __new__(cls, expr, *contraction_indices, **kwargs): + contraction_indices = _sort_contraction_indices(contraction_indices) + expr = _sympify(expr) + + canonicalize = kwargs.get("canonicalize", False) + + obj = Basic.__new__(cls, expr, *contraction_indices) + obj._subranks = _get_subranks(expr) + obj._mapping = _get_mapping_from_subranks(obj._subranks) + + free_indices_to_position = {i: i for i in range(sum(obj._subranks)) if all(i not in cind for cind in contraction_indices)} + obj._free_indices_to_position = free_indices_to_position + + shape = get_shape(expr) + cls._validate(expr, *contraction_indices) + if shape: + shape = tuple(shp for i, shp in enumerate(shape) if not any(i in j for j in contraction_indices)) + obj._shape = shape + if canonicalize: + return obj._canonicalize() + return obj + + def _canonicalize(self): + expr = self.expr + contraction_indices = self.contraction_indices + + if len(contraction_indices) == 0: + return expr + + if isinstance(expr, ArrayContraction): + return self._ArrayContraction_denest_ArrayContraction(expr, *contraction_indices) + + if isinstance(expr, (ZeroArray, ZeroMatrix)): + return self._ArrayContraction_denest_ZeroArray(expr, *contraction_indices) + + if isinstance(expr, PermuteDims): + return self._ArrayContraction_denest_PermuteDims(expr, *contraction_indices) + + if isinstance(expr, ArrayTensorProduct): + expr, contraction_indices = self._sort_fully_contracted_args(expr, contraction_indices) + expr, contraction_indices = self._lower_contraction_to_addends(expr, contraction_indices) + if len(contraction_indices) == 0: + return expr + + if isinstance(expr, ArrayDiagonal): + return self._ArrayContraction_denest_ArrayDiagonal(expr, *contraction_indices) + + if isinstance(expr, ArrayAdd): + return self._ArrayContraction_denest_ArrayAdd(expr, *contraction_indices) + + # Check single index contractions on 1-dimensional axes: + contraction_indices = [i for i in contraction_indices if len(i) > 1 or get_shape(expr)[i[0]] != 1] + if len(contraction_indices) == 0: + return expr + + return self.func(expr, *contraction_indices, canonicalize=False) + + def __mul__(self, other): + if other == 1: + return self + else: + raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.") + + def __rmul__(self, other): + if other == 1: + return self + else: + raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.") + + @staticmethod + def _validate(expr, *contraction_indices): + shape = get_shape(expr) + if shape is None: + return + + # Check that no contraction happens when the shape is mismatched: + for i in contraction_indices: + if len({shape[j] for j in i if shape[j] != -1}) != 1: + raise ValueError("contracting indices of different dimensions") + + @classmethod + def _push_indices_down(cls, contraction_indices, indices): + flattened_contraction_indices = [j for i in contraction_indices for j in i] + flattened_contraction_indices.sort() + transform = _build_push_indices_down_func_transformation(flattened_contraction_indices) + return _apply_recursively_over_nested_lists(transform, indices) + + @classmethod + def _push_indices_up(cls, contraction_indices, indices): + flattened_contraction_indices = [j for i in contraction_indices for j in i] + flattened_contraction_indices.sort() + transform = _build_push_indices_up_func_transformation(flattened_contraction_indices) + return _apply_recursively_over_nested_lists(transform, indices) + + @classmethod + def _lower_contraction_to_addends(cls, expr, contraction_indices): + if isinstance(expr, ArrayAdd): + raise NotImplementedError() + if not isinstance(expr, ArrayTensorProduct): + return expr, contraction_indices + subranks = expr.subranks + cumranks = list(accumulate([0] + subranks)) + contraction_indices_remaining = [] + contraction_indices_args = [[] for i in expr.args] + backshift = set() + for i, contraction_group in enumerate(contraction_indices): + for j in range(len(expr.args)): + if not isinstance(expr.args[j], ArrayAdd): + continue + if all(cumranks[j] <= k < cumranks[j+1] for k in contraction_group): + contraction_indices_args[j].append([k - cumranks[j] for k in contraction_group]) + backshift.update(contraction_group) + break + else: + contraction_indices_remaining.append(contraction_group) + if len(contraction_indices_remaining) == len(contraction_indices): + return expr, contraction_indices + total_rank = get_rank(expr) + shifts = list(accumulate([1 if i in backshift else 0 for i in range(total_rank)])) + contraction_indices_remaining = [Tuple.fromiter(j - shifts[j] for j in i) for i in contraction_indices_remaining] + ret = _array_tensor_product(*[ + _array_contraction(arg, *contr) for arg, contr in zip(expr.args, contraction_indices_args) + ]) + return ret, contraction_indices_remaining + + def split_multiple_contractions(self): + """ + Recognize multiple contractions and attempt at rewriting them as paired-contractions. + + This allows some contractions involving more than two indices to be + rewritten as multiple contractions involving two indices, thus allowing + the expression to be rewritten as a matrix multiplication line. + + Examples: + + * `A_ij b_j0 C_jk` ===> `A*DiagMatrix(b)*C` + + Care for: + - matrix being diagonalized (i.e. `A_ii`) + - vectors being diagonalized (i.e. `a_i0`) + + Multiple contractions can be split into matrix multiplications if + not more than two arguments are non-diagonals or non-vectors. + Vectors get diagonalized while diagonal matrices remain diagonal. + The non-diagonal matrices can be at the beginning or at the end + of the final matrix multiplication line. + """ + + editor = _EditArrayContraction(self) + + contraction_indices = self.contraction_indices + + onearray_insert = [] + + for indl, links in enumerate(contraction_indices): + if len(links) <= 2: + continue + + # Check multiple contractions: + # + # Examples: + # + # * `A_ij b_j0 C_jk` ===> `A*DiagMatrix(b)*C \otimes OneArray(1)` with permutation (1 2) + # + # Care for: + # - matrix being diagonalized (i.e. `A_ii`) + # - vectors being diagonalized (i.e. `a_i0`) + + # Multiple contractions can be split into matrix multiplications if + # not more than three arguments are non-diagonals or non-vectors. + # + # Vectors get diagonalized while diagonal matrices remain diagonal. + # The non-diagonal matrices can be at the beginning or at the end + # of the final matrix multiplication line. + + positions = editor.get_mapping_for_index(indl) + + # Also consider the case of diagonal matrices being contracted: + current_dimension = self.expr.shape[links[0]] + + not_vectors = [] + vectors = [] + for arg_ind, rel_ind in positions: + arg = editor.args_with_ind[arg_ind] + mat = arg.element + abs_arg_start, abs_arg_end = editor.get_absolute_range(arg) + other_arg_pos = 1-rel_ind + other_arg_abs = abs_arg_start + other_arg_pos + if ((1 not in mat.shape) or + ((current_dimension == 1) is True and mat.shape != (1, 1)) or + any(other_arg_abs in l for li, l in enumerate(contraction_indices) if li != indl) + ): + not_vectors.append((arg, rel_ind)) + else: + vectors.append((arg, rel_ind)) + if len(not_vectors) > 2: + # If more than two arguments in the multiple contraction are + # non-vectors and non-diagonal matrices, we cannot find a way + # to split this contraction into a matrix multiplication line: + continue + # Three cases to handle: + # - zero non-vectors + # - one non-vector + # - two non-vectors + for v, rel_ind in vectors: + v.element = diagonalize_vector(v.element) + vectors_to_loop = not_vectors[:1] + vectors + not_vectors[1:] + first_not_vector, rel_ind = vectors_to_loop[0] + new_index = first_not_vector.indices[rel_ind] + + for v, rel_ind in vectors_to_loop[1:-1]: + v.indices[rel_ind] = new_index + new_index = editor.get_new_contraction_index() + assert v.indices.index(None) == 1 - rel_ind + v.indices[v.indices.index(None)] = new_index + onearray_insert.append(v) + + last_vec, rel_ind = vectors_to_loop[-1] + last_vec.indices[rel_ind] = new_index + + for v in onearray_insert: + editor.insert_after(v, _ArgE(OneArray(1), [None])) + + return editor.to_array_contraction() + + def flatten_contraction_of_diagonal(self): + if not isinstance(self.expr, ArrayDiagonal): + return self + contraction_down = self.expr._push_indices_down(self.expr.diagonal_indices, self.contraction_indices) + new_contraction_indices = [] + diagonal_indices = self.expr.diagonal_indices[:] + for i in contraction_down: + contraction_group = list(i) + for j in i: + diagonal_with = [k for k in diagonal_indices if j in k] + contraction_group.extend([l for k in diagonal_with for l in k]) + diagonal_indices = [k for k in diagonal_indices if k not in diagonal_with] + new_contraction_indices.append(sorted(set(contraction_group))) + + new_contraction_indices = ArrayDiagonal._push_indices_up(diagonal_indices, new_contraction_indices) + return _array_contraction( + _array_diagonal( + self.expr.expr, + *diagonal_indices + ), + *new_contraction_indices + ) + + @staticmethod + def _get_free_indices_to_position_map(free_indices, contraction_indices): + free_indices_to_position = {} + flattened_contraction_indices = [j for i in contraction_indices for j in i] + counter = 0 + for ind in free_indices: + while counter in flattened_contraction_indices: + counter += 1 + free_indices_to_position[ind] = counter + counter += 1 + return free_indices_to_position + + @staticmethod + def _get_index_shifts(expr): + """ + Get the mapping of indices at the positions before the contraction + occurs. + + Examples + ======== + + >>> from sympy.tensor.array import tensorproduct, tensorcontraction + >>> from sympy import MatrixSymbol + >>> M = MatrixSymbol("M", 3, 3) + >>> N = MatrixSymbol("N", 3, 3) + >>> cg = tensorcontraction(tensorproduct(M, N), [1, 2]) + >>> cg._get_index_shifts(cg) + [0, 2] + + Indeed, ``cg`` after the contraction has two dimensions, 0 and 1. They + need to be shifted by 0 and 2 to get the corresponding positions before + the contraction (that is, 0 and 3). + """ + inner_contraction_indices = expr.contraction_indices + all_inner = [j for i in inner_contraction_indices for j in i] + all_inner.sort() + # TODO: add API for total rank and cumulative rank: + total_rank = _get_subrank(expr) + inner_rank = len(all_inner) + outer_rank = total_rank - inner_rank + shifts = [0 for i in range(outer_rank)] + counter = 0 + pointer = 0 + for i in range(outer_rank): + while pointer < inner_rank and counter >= all_inner[pointer]: + counter += 1 + pointer += 1 + shifts[i] += pointer + counter += 1 + return shifts + + @staticmethod + def _convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices): + shifts = ArrayContraction._get_index_shifts(expr) + outer_contraction_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_contraction_indices) + return outer_contraction_indices + + @staticmethod + def _flatten(expr, *outer_contraction_indices): + inner_contraction_indices = expr.contraction_indices + outer_contraction_indices = ArrayContraction._convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices) + contraction_indices = inner_contraction_indices + outer_contraction_indices + return _array_contraction(expr.expr, *contraction_indices) + + @classmethod + def _ArrayContraction_denest_ArrayContraction(cls, expr, *contraction_indices): + return cls._flatten(expr, *contraction_indices) + + @classmethod + def _ArrayContraction_denest_ZeroArray(cls, expr, *contraction_indices): + contraction_indices_flat = [j for i in contraction_indices for j in i] + shape = [e for i, e in enumerate(expr.shape) if i not in contraction_indices_flat] + return ZeroArray(*shape) + + @classmethod + def _ArrayContraction_denest_ArrayAdd(cls, expr, *contraction_indices): + return _array_add(*[_array_contraction(i, *contraction_indices) for i in expr.args]) + + @classmethod + def _ArrayContraction_denest_PermuteDims(cls, expr, *contraction_indices): + permutation = expr.permutation + plist = permutation.array_form + new_contraction_indices = [tuple(permutation(j) for j in i) for i in contraction_indices] + new_plist = [i for i in plist if not any(i in j for j in new_contraction_indices)] + new_plist = cls._push_indices_up(new_contraction_indices, new_plist) + return _permute_dims( + _array_contraction(expr.expr, *new_contraction_indices), + Permutation(new_plist) + ) + + @classmethod + def _ArrayContraction_denest_ArrayDiagonal(cls, expr: 'ArrayDiagonal', *contraction_indices): + diagonal_indices = list(expr.diagonal_indices) + down_contraction_indices = expr._push_indices_down(expr.diagonal_indices, contraction_indices, get_rank(expr.expr)) + # Flatten diagonally contracted indices: + down_contraction_indices = [[k for j in i for k in (j if isinstance(j, (tuple, Tuple)) else [j])] for i in down_contraction_indices] + new_contraction_indices = [] + for contr_indgrp in down_contraction_indices: + ind = contr_indgrp[:] + for j, diag_indgrp in enumerate(diagonal_indices): + if diag_indgrp is None: + continue + if any(i in diag_indgrp for i in contr_indgrp): + ind.extend(diag_indgrp) + diagonal_indices[j] = None + new_contraction_indices.append(sorted(set(ind))) + + new_diagonal_indices_down = [i for i in diagonal_indices if i is not None] + new_diagonal_indices = ArrayContraction._push_indices_up(new_contraction_indices, new_diagonal_indices_down) + return _array_diagonal( + _array_contraction(expr.expr, *new_contraction_indices), + *new_diagonal_indices + ) + + @classmethod + def _sort_fully_contracted_args(cls, expr, contraction_indices): + if expr.shape is None: + return expr, contraction_indices + cumul = list(accumulate([0] + expr.subranks)) + index_blocks = [list(range(cumul[i], cumul[i+1])) for i in range(len(expr.args))] + contraction_indices_flat = {j for i in contraction_indices for j in i} + fully_contracted = [all(j in contraction_indices_flat for j in range(cumul[i], cumul[i+1])) for i, arg in enumerate(expr.args)] + new_pos = sorted(range(len(expr.args)), key=lambda x: (0, default_sort_key(expr.args[x])) if fully_contracted[x] else (1,)) + new_args = [expr.args[i] for i in new_pos] + new_index_blocks_flat = [j for i in new_pos for j in index_blocks[i]] + index_permutation_array_form = _af_invert(new_index_blocks_flat) + new_contraction_indices = [tuple(index_permutation_array_form[j] for j in i) for i in contraction_indices] + new_contraction_indices = _sort_contraction_indices(new_contraction_indices) + return _array_tensor_product(*new_args), new_contraction_indices + + def _get_contraction_tuples(self): + r""" + Return tuples containing the argument index and position within the + argument of the index position. + + Examples + ======== + + >>> from sympy import MatrixSymbol + >>> from sympy.abc import N + >>> from sympy.tensor.array import tensorproduct, tensorcontraction + >>> A = MatrixSymbol("A", N, N) + >>> B = MatrixSymbol("B", N, N) + + >>> cg = tensorcontraction(tensorproduct(A, B), (1, 2)) + >>> cg._get_contraction_tuples() + [[(0, 1), (1, 0)]] + + Notes + ===== + + Here the contraction pair `(1, 2)` meaning that the 2nd and 3rd indices + of the tensor product `A\otimes B` are contracted, has been transformed + into `(0, 1)` and `(1, 0)`, identifying the same indices in a different + notation. `(0, 1)` is the second index (1) of the first argument (i.e. + 0 or `A`). `(1, 0)` is the first index (i.e. 0) of the second + argument (i.e. 1 or `B`). + """ + mapping = self._mapping + return [[mapping[j] for j in i] for i in self.contraction_indices] + + @staticmethod + def _contraction_tuples_to_contraction_indices(expr, contraction_tuples): + # TODO: check that `expr` has `.subranks`: + ranks = expr.subranks + cumulative_ranks = [0] + list(accumulate(ranks)) + return [tuple(cumulative_ranks[j]+k for j, k in i) for i in contraction_tuples] + + @property + def free_indices(self): + return self._free_indices[:] + + @property + def free_indices_to_position(self): + return dict(self._free_indices_to_position) + + @property + def expr(self): + return self.args[0] + + @property + def contraction_indices(self): + return self.args[1:] + + def _contraction_indices_to_components(self): + expr = self.expr + if not isinstance(expr, ArrayTensorProduct): + raise NotImplementedError("only for contractions of tensor products") + ranks = expr.subranks + mapping = {} + counter = 0 + for i, rank in enumerate(ranks): + for j in range(rank): + mapping[counter] = (i, j) + counter += 1 + return mapping + + def sort_args_by_name(self): + """ + Sort arguments in the tensor product so that their order is lexicographical. + + Examples + ======== + + >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array + >>> from sympy import MatrixSymbol + >>> from sympy.abc import N + >>> A = MatrixSymbol("A", N, N) + >>> B = MatrixSymbol("B", N, N) + >>> C = MatrixSymbol("C", N, N) + >>> D = MatrixSymbol("D", N, N) + + >>> cg = convert_matrix_to_array(C*D*A*B) + >>> cg + ArrayContraction(ArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5)) + >>> cg.sort_args_by_name() + ArrayContraction(ArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7)) + """ + expr = self.expr + if not isinstance(expr, ArrayTensorProduct): + return self + args = expr.args + sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1])) + pos_sorted, args_sorted = zip(*sorted_data) + reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)} + contraction_tuples = self._get_contraction_tuples() + contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples] + c_tp = _array_tensor_product(*args_sorted) + new_contr_indices = self._contraction_tuples_to_contraction_indices( + c_tp, + contraction_tuples + ) + return _array_contraction(c_tp, *new_contr_indices) + + def _get_contraction_links(self): + r""" + Returns a dictionary of links between arguments in the tensor product + being contracted. + + See the example for an explanation of the values. + + Examples + ======== + + >>> from sympy import MatrixSymbol + >>> from sympy.abc import N + >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array + >>> A = MatrixSymbol("A", N, N) + >>> B = MatrixSymbol("B", N, N) + >>> C = MatrixSymbol("C", N, N) + >>> D = MatrixSymbol("D", N, N) + + Matrix multiplications are pairwise contractions between neighboring + matrices: + + `A_{ij} B_{jk} C_{kl} D_{lm}` + + >>> cg = convert_matrix_to_array(A*B*C*D) + >>> cg + ArrayContraction(ArrayTensorProduct(B, C, A, D), (0, 5), (1, 2), (3, 6)) + + >>> cg._get_contraction_links() + {0: {0: (2, 1), 1: (1, 0)}, 1: {0: (0, 1), 1: (3, 0)}, 2: {1: (0, 0)}, 3: {0: (1, 1)}} + + This dictionary is interpreted as follows: argument in position 0 (i.e. + matrix `A`) has its second index (i.e. 1) contracted to `(1, 0)`, that + is argument in position 1 (matrix `B`) on the first index slot of `B`, + this is the contraction provided by the index `j` from `A`. + + The argument in position 1 (that is, matrix `B`) has two contractions, + the ones provided by the indices `j` and `k`, respectively the first + and second indices (0 and 1 in the sub-dict). The link `(0, 1)` and + `(2, 0)` respectively. `(0, 1)` is the index slot 1 (the 2nd) of + argument in position 0 (that is, `A_{\ldot j}`), and so on. + """ + args, dlinks = _get_contraction_links([self], self.subranks, *self.contraction_indices) + return dlinks + + def as_explicit(self): + expr = self.expr + if hasattr(expr, "as_explicit"): + expr = expr.as_explicit() + return tensorcontraction(expr, *self.contraction_indices) + + +class Reshape(_CodegenArrayAbstract): + """ + Reshape the dimensions of an array expression. + + Examples + ======== + + >>> from sympy.tensor.array.expressions import ArraySymbol, Reshape + >>> A = ArraySymbol("A", (6,)) + >>> A.shape + (6,) + >>> Reshape(A, (3, 2)).shape + (3, 2) + + Check the component-explicit forms: + + >>> A.as_explicit() + [A[0], A[1], A[2], A[3], A[4], A[5]] + >>> Reshape(A, (3, 2)).as_explicit() + [[A[0], A[1]], [A[2], A[3]], [A[4], A[5]]] + + """ + + def __new__(cls, expr, shape): + expr = _sympify(expr) + if not isinstance(shape, Tuple): + shape = Tuple(*shape) + if Equality(Mul.fromiter(expr.shape), Mul.fromiter(shape)) == False: + raise ValueError("shape mismatch") + obj = Expr.__new__(cls, expr, shape) + obj._shape = tuple(shape) + obj._expr = expr + return obj + + @property + def shape(self): + return self._shape + + @property + def expr(self): + return self._expr + + def doit(self, *args, **kwargs): + if kwargs.get("deep", True): + expr = self.expr.doit(*args, **kwargs) + else: + expr = self.expr + if isinstance(expr, (MatrixCommon, NDimArray)): + return expr.reshape(*self.shape) + return Reshape(expr, self.shape) + + def as_explicit(self): + ee = self.expr + if hasattr(ee, "as_explicit"): + ee = ee.as_explicit() + if isinstance(ee, MatrixCommon): + from sympy import Array + ee = Array(ee) + elif isinstance(ee, MatrixExpr): + return self + return ee.reshape(*self.shape) + + +class _ArgE: + """ + The ``_ArgE`` object contains references to the array expression + (``.element``) and a list containing the information about index + contractions (``.indices``). + + Index contractions are numbered and contracted indices show the number of + the contraction. Uncontracted indices have ``None`` value. + + For example: + ``_ArgE(M, [None, 3])`` + This object means that expression ``M`` is part of an array contraction + and has two indices, the first is not contracted (value ``None``), + the second index is contracted to the 4th (i.e. number ``3``) group of the + array contraction object. + """ + indices: List[Optional[int]] + + def __init__(self, element, indices: Optional[List[Optional[int]]] = None): + self.element = element + if indices is None: + self.indices = [None for i in range(get_rank(element))] + else: + self.indices = indices + + def __str__(self): + return "_ArgE(%s, %s)" % (self.element, self.indices) + + __repr__ = __str__ + + +class _IndPos: + """ + Index position, requiring two integers in the constructor: + + - arg: the position of the argument in the tensor product, + - rel: the relative position of the index inside the argument. + """ + def __init__(self, arg: int, rel: int): + self.arg = arg + self.rel = rel + + def __str__(self): + return "_IndPos(%i, %i)" % (self.arg, self.rel) + + __repr__ = __str__ + + def __iter__(self): + yield from [self.arg, self.rel] + + +class _EditArrayContraction: + """ + Utility class to help manipulate array contraction objects. + + This class takes as input an ``ArrayContraction`` object and turns it into + an editable object. + + The field ``args_with_ind`` of this class is a list of ``_ArgE`` objects + which can be used to easily edit the contraction structure of the + expression. + + Once editing is finished, the ``ArrayContraction`` object may be recreated + by calling the ``.to_array_contraction()`` method. + """ + + def __init__(self, base_array: typing.Union[ArrayContraction, ArrayDiagonal, ArrayTensorProduct]): + + expr: Basic + diagonalized: tTuple[tTuple[int, ...], ...] + contraction_indices: List[tTuple[int]] + if isinstance(base_array, ArrayContraction): + mapping = _get_mapping_from_subranks(base_array.subranks) + expr = base_array.expr + contraction_indices = base_array.contraction_indices + diagonalized = () + elif isinstance(base_array, ArrayDiagonal): + + if isinstance(base_array.expr, ArrayContraction): + mapping = _get_mapping_from_subranks(base_array.expr.subranks) + expr = base_array.expr.expr + diagonalized = ArrayContraction._push_indices_down(base_array.expr.contraction_indices, base_array.diagonal_indices) + contraction_indices = base_array.expr.contraction_indices + elif isinstance(base_array.expr, ArrayTensorProduct): + mapping = {} + expr = base_array.expr + diagonalized = base_array.diagonal_indices + contraction_indices = [] + else: + mapping = {} + expr = base_array.expr + diagonalized = base_array.diagonal_indices + contraction_indices = [] + + elif isinstance(base_array, ArrayTensorProduct): + expr = base_array + contraction_indices = [] + diagonalized = () + else: + raise NotImplementedError() + + if isinstance(expr, ArrayTensorProduct): + args = list(expr.args) + else: + args = [expr] + + args_with_ind: List[_ArgE] = [_ArgE(arg) for arg in args] + for i, contraction_tuple in enumerate(contraction_indices): + for j in contraction_tuple: + arg_pos, rel_pos = mapping[j] + args_with_ind[arg_pos].indices[rel_pos] = i + self.args_with_ind: List[_ArgE] = args_with_ind + self.number_of_contraction_indices: int = len(contraction_indices) + self._track_permutation: Optional[List[List[int]]] = None + + mapping = _get_mapping_from_subranks(base_array.subranks) + + # Trick: add diagonalized indices as negative indices into the editor object: + for i, e in enumerate(diagonalized): + for j in e: + arg_pos, rel_pos = mapping[j] + self.args_with_ind[arg_pos].indices[rel_pos] = -1 - i + + def insert_after(self, arg: _ArgE, new_arg: _ArgE): + pos = self.args_with_ind.index(arg) + self.args_with_ind.insert(pos + 1, new_arg) + + def get_new_contraction_index(self): + self.number_of_contraction_indices += 1 + return self.number_of_contraction_indices - 1 + + def refresh_indices(self): + updates = {} + for arg_with_ind in self.args_with_ind: + updates.update({i: -1 for i in arg_with_ind.indices if i is not None}) + for i, e in enumerate(sorted(updates)): + updates[e] = i + self.number_of_contraction_indices = len(updates) + for arg_with_ind in self.args_with_ind: + arg_with_ind.indices = [updates.get(i, None) for i in arg_with_ind.indices] + + def merge_scalars(self): + scalars = [] + for arg_with_ind in self.args_with_ind: + if len(arg_with_ind.indices) == 0: + scalars.append(arg_with_ind) + for i in scalars: + self.args_with_ind.remove(i) + scalar = Mul.fromiter([i.element for i in scalars]) + if len(self.args_with_ind) == 0: + self.args_with_ind.append(_ArgE(scalar)) + else: + from sympy.tensor.array.expressions.from_array_to_matrix import _a2m_tensor_product + self.args_with_ind[0].element = _a2m_tensor_product(scalar, self.args_with_ind[0].element) + + def to_array_contraction(self): + + # Count the ranks of the arguments: + counter = 0 + # Create a collector for the new diagonal indices: + diag_indices = defaultdict(list) + + count_index_freq = Counter() + for arg_with_ind in self.args_with_ind: + count_index_freq.update(Counter(arg_with_ind.indices)) + + free_index_count = count_index_freq[None] + + # Construct the inverse permutation: + inv_perm1 = [] + inv_perm2 = [] + # Keep track of which diagonal indices have already been processed: + done = set() + + # Counter for the diagonal indices: + counter4 = 0 + + for arg_with_ind in self.args_with_ind: + # If some diagonalization axes have been removed, they should be + # permuted in order to keep the permutation. + # Add permutation here + counter2 = 0 # counter for the indices + for i in arg_with_ind.indices: + if i is None: + inv_perm1.append(counter4) + counter2 += 1 + counter4 += 1 + continue + if i >= 0: + continue + # Reconstruct the diagonal indices: + diag_indices[-1 - i].append(counter + counter2) + if count_index_freq[i] == 1 and i not in done: + inv_perm1.append(free_index_count - 1 - i) + done.add(i) + elif i not in done: + inv_perm2.append(free_index_count - 1 - i) + done.add(i) + counter2 += 1 + # Remove negative indices to restore a proper editor object: + arg_with_ind.indices = [i if i is not None and i >= 0 else None for i in arg_with_ind.indices] + counter += len([i for i in arg_with_ind.indices if i is None or i < 0]) + + inverse_permutation = inv_perm1 + inv_perm2 + permutation = _af_invert(inverse_permutation) + + # Get the diagonal indices after the detection of HadamardProduct in the expression: + diag_indices_filtered = [tuple(v) for v in diag_indices.values() if len(v) > 1] + + self.merge_scalars() + self.refresh_indices() + args = [arg.element for arg in self.args_with_ind] + contraction_indices = self.get_contraction_indices() + expr = _array_contraction(_array_tensor_product(*args), *contraction_indices) + expr2 = _array_diagonal(expr, *diag_indices_filtered) + if self._track_permutation is not None: + permutation2 = _af_invert([j for i in self._track_permutation for j in i]) + expr2 = _permute_dims(expr2, permutation2) + + expr3 = _permute_dims(expr2, permutation) + return expr3 + + def get_contraction_indices(self) -> List[List[int]]: + contraction_indices: List[List[int]] = [[] for i in range(self.number_of_contraction_indices)] + current_position: int = 0 + for i, arg_with_ind in enumerate(self.args_with_ind): + for j in arg_with_ind.indices: + if j is not None: + contraction_indices[j].append(current_position) + current_position += 1 + return contraction_indices + + def get_mapping_for_index(self, ind) -> List[_IndPos]: + if ind >= self.number_of_contraction_indices: + raise ValueError("index value exceeding the index range") + positions: List[_IndPos] = [] + for i, arg_with_ind in enumerate(self.args_with_ind): + for j, arg_ind in enumerate(arg_with_ind.indices): + if ind == arg_ind: + positions.append(_IndPos(i, j)) + return positions + + def get_contraction_indices_to_ind_rel_pos(self) -> List[List[_IndPos]]: + contraction_indices: List[List[_IndPos]] = [[] for i in range(self.number_of_contraction_indices)] + for i, arg_with_ind in enumerate(self.args_with_ind): + for j, ind in enumerate(arg_with_ind.indices): + if ind is not None: + contraction_indices[ind].append(_IndPos(i, j)) + return contraction_indices + + def count_args_with_index(self, index: int) -> int: + """ + Count the number of arguments that have the given index. + """ + counter: int = 0 + for arg_with_ind in self.args_with_ind: + if index in arg_with_ind.indices: + counter += 1 + return counter + + def get_args_with_index(self, index: int) -> List[_ArgE]: + """ + Get a list of arguments having the given index. + """ + ret: List[_ArgE] = [i for i in self.args_with_ind if index in i.indices] + return ret + + @property + def number_of_diagonal_indices(self): + data = set() + for arg in self.args_with_ind: + data.update({i for i in arg.indices if i is not None and i < 0}) + return len(data) + + def track_permutation_start(self): + permutation = [] + perm_diag = [] + counter = 0 + counter2 = -1 + for arg_with_ind in self.args_with_ind: + perm = [] + for i in arg_with_ind.indices: + if i is not None: + if i < 0: + perm_diag.append(counter2) + counter2 -= 1 + continue + perm.append(counter) + counter += 1 + permutation.append(perm) + max_ind = max([max(i) if i else -1 for i in permutation]) if permutation else -1 + perm_diag = [max_ind - i for i in perm_diag] + self._track_permutation = permutation + [perm_diag] + + def track_permutation_merge(self, destination: _ArgE, from_element: _ArgE): + index_destination = self.args_with_ind.index(destination) + index_element = self.args_with_ind.index(from_element) + self._track_permutation[index_destination].extend(self._track_permutation[index_element]) # type: ignore + self._track_permutation.pop(index_element) # type: ignore + + def get_absolute_free_range(self, arg: _ArgE) -> typing.Tuple[int, int]: + """ + Return the range of the free indices of the arg as absolute positions + among all free indices. + """ + counter = 0 + for arg_with_ind in self.args_with_ind: + number_free_indices = len([i for i in arg_with_ind.indices if i is None]) + if arg_with_ind == arg: + return counter, counter + number_free_indices + counter += number_free_indices + raise IndexError("argument not found") + + def get_absolute_range(self, arg: _ArgE) -> typing.Tuple[int, int]: + """ + Return the absolute range of indices for arg, disregarding dummy + indices. + """ + counter = 0 + for arg_with_ind in self.args_with_ind: + number_indices = len(arg_with_ind.indices) + if arg_with_ind == arg: + return counter, counter + number_indices + counter += number_indices + raise IndexError("argument not found") + + +def get_rank(expr): + if isinstance(expr, (MatrixExpr, MatrixElement)): + return 2 + if isinstance(expr, _CodegenArrayAbstract): + return len(expr.shape) + if isinstance(expr, NDimArray): + return expr.rank() + if isinstance(expr, Indexed): + return expr.rank + if isinstance(expr, IndexedBase): + shape = expr.shape + if shape is None: + return -1 + else: + return len(shape) + if hasattr(expr, "shape"): + return len(expr.shape) + return 0 + + +def _get_subrank(expr): + if isinstance(expr, _CodegenArrayAbstract): + return expr.subrank() + return get_rank(expr) + + +def _get_subranks(expr): + if isinstance(expr, _CodegenArrayAbstract): + return expr.subranks + else: + return [get_rank(expr)] + + +def get_shape(expr): + if hasattr(expr, "shape"): + return expr.shape + return () + + +def nest_permutation(expr): + if isinstance(expr, PermuteDims): + return expr.nest_permutation() + else: + return expr + + +def _array_tensor_product(*args, **kwargs): + return ArrayTensorProduct(*args, canonicalize=True, **kwargs) + + +def _array_contraction(expr, *contraction_indices, **kwargs): + return ArrayContraction(expr, *contraction_indices, canonicalize=True, **kwargs) + + +def _array_diagonal(expr, *diagonal_indices, **kwargs): + return ArrayDiagonal(expr, *diagonal_indices, canonicalize=True, **kwargs) + + +def _permute_dims(expr, permutation, **kwargs): + return PermuteDims(expr, permutation, canonicalize=True, **kwargs) + + +def _array_add(*args, **kwargs): + return ArrayAdd(*args, canonicalize=True, **kwargs) + + +def _get_array_element_or_slice(expr, indices): + return ArrayElement(expr, indices) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_indexed.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..1929c3401e131cca0a83080131ead9198b37bcbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_indexed.py @@ -0,0 +1,12 @@ +from sympy.tensor.array.expressions import from_array_to_indexed +from sympy.utilities.decorator import deprecated + + +_conv_to_from_decorator = deprecated( + "module has been renamed by replacing 'conv_' with 'from_' in its name", + deprecated_since_version="1.11", + active_deprecations_target="deprecated-conv-array-expr-module-names", +) + + +convert_array_to_indexed = _conv_to_from_decorator(from_array_to_indexed.convert_array_to_indexed) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_matrix.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..2708e74aaa98d6ee38eae46d97d4483a546e0776 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_array_to_matrix.py @@ -0,0 +1,6 @@ +from sympy.tensor.array.expressions import from_array_to_matrix +from sympy.tensor.array.expressions.conv_array_to_indexed import _conv_to_from_decorator + +convert_array_to_matrix = _conv_to_from_decorator(from_array_to_matrix.convert_array_to_matrix) +_array2matrix = _conv_to_from_decorator(from_array_to_matrix._array2matrix) +_remove_trivial_dims = _conv_to_from_decorator(from_array_to_matrix._remove_trivial_dims) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_indexed_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_indexed_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..6058b31f20778834ea23a01553d594b7965eb6bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_indexed_to_array.py @@ -0,0 +1,4 @@ +from sympy.tensor.array.expressions import from_indexed_to_array +from sympy.tensor.array.expressions.conv_array_to_indexed import _conv_to_from_decorator + +convert_indexed_to_array = _conv_to_from_decorator(from_indexed_to_array.convert_indexed_to_array) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_matrix_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_matrix_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..46469df60703c237527c0b2834235309640afe7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/conv_matrix_to_array.py @@ -0,0 +1,4 @@ +from sympy.tensor.array.expressions import from_matrix_to_array +from sympy.tensor.array.expressions.conv_array_to_indexed import _conv_to_from_decorator + +convert_matrix_to_array = _conv_to_from_decorator(from_matrix_to_array.convert_matrix_to_array) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_indexed.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..9eb86e7cfbe31ebfe7c9649803d9cb5e34b98276 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_indexed.py @@ -0,0 +1,84 @@ +import collections.abc +import operator +from itertools import accumulate + +from sympy import Mul, Sum, Dummy, Add +from sympy.tensor.array.expressions import PermuteDims, ArrayAdd, ArrayElementwiseApplyFunc, Reshape +from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, get_rank, ArrayContraction, \ + ArrayDiagonal, get_shape, _get_array_element_or_slice, _ArrayExpr +from sympy.tensor.array.expressions.utils import _apply_permutation_to_list + + +def convert_array_to_indexed(expr, indices): + return _ConvertArrayToIndexed().do_convert(expr, indices) + + +class _ConvertArrayToIndexed: + + def __init__(self): + self.count_dummies = 0 + + def do_convert(self, expr, indices): + if isinstance(expr, ArrayTensorProduct): + cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args])) + indices_grp = [indices[cumul[i]:cumul[i+1]] for i in range(len(expr.args))] + return Mul.fromiter(self.do_convert(arg, ind) for arg, ind in zip(expr.args, indices_grp)) + if isinstance(expr, ArrayContraction): + new_indices = [None for i in range(get_rank(expr.expr))] + limits = [] + bottom_shape = get_shape(expr.expr) + for contraction_index_grp in expr.contraction_indices: + d = Dummy(f"d{self.count_dummies}") + self.count_dummies += 1 + dim = bottom_shape[contraction_index_grp[0]] + limits.append((d, 0, dim-1)) + for i in contraction_index_grp: + new_indices[i] = d + j = 0 + for i in range(len(new_indices)): + if new_indices[i] is None: + new_indices[i] = indices[j] + j += 1 + newexpr = self.do_convert(expr.expr, new_indices) + return Sum(newexpr, *limits) + if isinstance(expr, ArrayDiagonal): + new_indices = [None for i in range(get_rank(expr.expr))] + ind_pos = expr._push_indices_down(expr.diagonal_indices, list(range(len(indices))), get_rank(expr)) + for i, index in zip(ind_pos, indices): + if isinstance(i, collections.abc.Iterable): + for j in i: + new_indices[j] = index + else: + new_indices[i] = index + newexpr = self.do_convert(expr.expr, new_indices) + return newexpr + if isinstance(expr, PermuteDims): + permuted_indices = _apply_permutation_to_list(expr.permutation, indices) + return self.do_convert(expr.expr, permuted_indices) + if isinstance(expr, ArrayAdd): + return Add.fromiter(self.do_convert(arg, indices) for arg in expr.args) + if isinstance(expr, _ArrayExpr): + return expr.__getitem__(tuple(indices)) + if isinstance(expr, ArrayElementwiseApplyFunc): + return expr.function(self.do_convert(expr.expr, indices)) + if isinstance(expr, Reshape): + shape_up = expr.shape + shape_down = get_shape(expr.expr) + cumul = list(accumulate([1] + list(reversed(shape_up)), operator.mul)) + one_index = Add.fromiter(i*s for i, s in zip(reversed(indices), cumul)) + dest_indices = [None for _ in shape_down] + c = 1 + for i, e in enumerate(reversed(shape_down)): + if c == 1: + if i == len(shape_down) - 1: + dest_indices[i] = one_index + else: + dest_indices[i] = one_index % e + elif i == len(shape_down) - 1: + dest_indices[i] = one_index // c + else: + dest_indices[i] = one_index // c % e + c *= e + dest_indices.reverse() + return self.do_convert(expr.expr, dest_indices) + return _get_array_element_or_slice(expr, indices) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_matrix.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..69acd9cac52d8c8817409278f82cf41a098aaf6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_array_to_matrix.py @@ -0,0 +1,1003 @@ +import itertools +from collections import defaultdict +from typing import Tuple as tTuple, Union as tUnion, FrozenSet, Dict as tDict, List, Optional +from functools import singledispatch +from itertools import accumulate + +from sympy import MatMul, Basic, Wild, KroneckerProduct +from sympy.assumptions.ask import (Q, ask) +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.matrices.expressions.diagonal import DiagMatrix +from sympy.matrices.expressions.hadamard import hadamard_product, HadamardPower +from sympy.matrices.expressions.matexpr import MatrixExpr +from sympy.matrices.expressions.special import (Identity, ZeroMatrix, OneMatrix) +from sympy.matrices.expressions.trace import Trace +from sympy.matrices.expressions.transpose import Transpose +from sympy.combinatorics.permutations import _af_invert, Permutation +from sympy.matrices.common import MatrixCommon +from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction +from sympy.matrices.expressions.matexpr import MatrixElement +from sympy.tensor.array.expressions.array_expressions import PermuteDims, ArrayDiagonal, \ + ArrayTensorProduct, OneArray, get_rank, _get_subrank, ZeroArray, ArrayContraction, \ + ArrayAdd, _CodegenArrayAbstract, get_shape, ArrayElementwiseApplyFunc, _ArrayExpr, _EditArrayContraction, _ArgE, \ + ArrayElement, _array_tensor_product, _array_contraction, _array_diagonal, _array_add, _permute_dims +from sympy.tensor.array.expressions.utils import _get_mapping_from_subranks + + +def _get_candidate_for_matmul_from_contraction(scan_indices: List[Optional[int]], remaining_args: List[_ArgE]) -> tTuple[Optional[_ArgE], bool, int]: + + scan_indices_int: List[int] = [i for i in scan_indices if i is not None] + if len(scan_indices_int) == 0: + return None, False, -1 + + transpose: bool = False + candidate: Optional[_ArgE] = None + candidate_index: int = -1 + for arg_with_ind2 in remaining_args: + if not isinstance(arg_with_ind2.element, MatrixExpr): + continue + for index in scan_indices_int: + if candidate_index != -1 and candidate_index != index: + # A candidate index has already been selected, check + # repetitions only for that index: + continue + if index in arg_with_ind2.indices: + if set(arg_with_ind2.indices) == {index}: + # Index repeated twice in arg_with_ind2 + candidate = None + break + if candidate is None: + candidate = arg_with_ind2 + candidate_index = index + transpose = (index == arg_with_ind2.indices[1]) + else: + # Index repeated more than twice, break + candidate = None + break + return candidate, transpose, candidate_index + + +def _insert_candidate_into_editor(editor: _EditArrayContraction, arg_with_ind: _ArgE, candidate: _ArgE, transpose1: bool, transpose2: bool): + other = candidate.element + other_index: Optional[int] + if transpose2: + other = Transpose(other) + other_index = candidate.indices[0] + else: + other_index = candidate.indices[1] + new_element = (Transpose(arg_with_ind.element) if transpose1 else arg_with_ind.element) * other + editor.args_with_ind.remove(candidate) + new_arge = _ArgE(new_element) + return new_arge, other_index + + +def _support_function_tp1_recognize(contraction_indices, args): + if len(contraction_indices) == 0: + return _a2m_tensor_product(*args) + + ac = _array_contraction(_array_tensor_product(*args), *contraction_indices) + editor = _EditArrayContraction(ac) + editor.track_permutation_start() + + while True: + flag_stop = True + for i, arg_with_ind in enumerate(editor.args_with_ind): + if not isinstance(arg_with_ind.element, MatrixExpr): + continue + + first_index = arg_with_ind.indices[0] + second_index = arg_with_ind.indices[1] + + first_frequency = editor.count_args_with_index(first_index) + second_frequency = editor.count_args_with_index(second_index) + + if first_index is not None and first_frequency == 1 and first_index == second_index: + flag_stop = False + arg_with_ind.element = Trace(arg_with_ind.element)._normalize() + arg_with_ind.indices = [] + break + + scan_indices = [] + if first_frequency == 2: + scan_indices.append(first_index) + if second_frequency == 2: + scan_indices.append(second_index) + + candidate, transpose, found_index = _get_candidate_for_matmul_from_contraction(scan_indices, editor.args_with_ind[i+1:]) + if candidate is not None: + flag_stop = False + editor.track_permutation_merge(arg_with_ind, candidate) + transpose1 = found_index == first_index + new_arge, other_index = _insert_candidate_into_editor(editor, arg_with_ind, candidate, transpose1, transpose) + if found_index == first_index: + new_arge.indices = [second_index, other_index] + else: + new_arge.indices = [first_index, other_index] + set_indices = set(new_arge.indices) + if len(set_indices) == 1 and set_indices != {None}: + # This is a trace: + new_arge.element = Trace(new_arge.element)._normalize() + new_arge.indices = [] + editor.args_with_ind[i] = new_arge + # TODO: is this break necessary? + break + + if flag_stop: + break + + editor.refresh_indices() + return editor.to_array_contraction() + + +def _find_trivial_matrices_rewrite(expr: ArrayTensorProduct): + # If there are matrices of trivial shape in the tensor product (i.e. shape + # (1, 1)), try to check if there is a suitable non-trivial MatMul where the + # expression can be inserted. + + # For example, if "a" has shape (1, 1) and "b" has shape (k, 1), the + # expressions "_array_tensor_product(a, b*b.T)" can be rewritten as + # "b*a*b.T" + + trivial_matrices = [] + pos: Optional[int] = None + first: Optional[MatrixExpr] = None + second: Optional[MatrixExpr] = None + removed: List[int] = [] + counter: int = 0 + args: List[Optional[Basic]] = list(expr.args) + for i, arg in enumerate(expr.args): + if isinstance(arg, MatrixExpr): + if arg.shape == (1, 1): + trivial_matrices.append(arg) + args[i] = None + removed.extend([counter, counter+1]) + elif pos is None and isinstance(arg, MatMul): + margs = arg.args + for j, e in enumerate(margs): + if isinstance(e, MatrixExpr) and e.shape[1] == 1: + pos = i + first = MatMul.fromiter(margs[:j+1]) + second = MatMul.fromiter(margs[j+1:]) + break + counter += get_rank(arg) + if pos is None: + return expr, [] + args[pos] = (first*MatMul.fromiter(i for i in trivial_matrices)*second).doit() + return _array_tensor_product(*[i for i in args if i is not None]), removed + + +def _find_trivial_kronecker_products_broadcast(expr: ArrayTensorProduct): + newargs: List[Basic] = [] + removed = [] + count_dims = 0 + for i, arg in enumerate(expr.args): + count_dims += get_rank(arg) + shape = get_shape(arg) + current_range = [count_dims-i for i in range(len(shape), 0, -1)] + if (shape == (1, 1) and len(newargs) > 0 and 1 not in get_shape(newargs[-1]) and + isinstance(newargs[-1], MatrixExpr) and isinstance(arg, MatrixExpr)): + # KroneckerProduct object allows the trick of broadcasting: + newargs[-1] = KroneckerProduct(newargs[-1], arg) + removed.extend(current_range) + elif 1 not in shape and len(newargs) > 0 and get_shape(newargs[-1]) == (1, 1): + # Broadcast: + newargs[-1] = KroneckerProduct(newargs[-1], arg) + prev_range = [i for i in range(min(current_range)) if i not in removed] + removed.extend(prev_range[-2:]) + else: + newargs.append(arg) + return _array_tensor_product(*newargs), removed + + +@singledispatch +def _array2matrix(expr): + return expr + + +@_array2matrix.register(ZeroArray) +def _(expr: ZeroArray): + if get_rank(expr) == 2: + return ZeroMatrix(*expr.shape) + else: + return expr + + +@_array2matrix.register(ArrayTensorProduct) +def _(expr: ArrayTensorProduct): + return _a2m_tensor_product(*[_array2matrix(arg) for arg in expr.args]) + + +@_array2matrix.register(ArrayContraction) +def _(expr: ArrayContraction): + expr = expr.flatten_contraction_of_diagonal() + expr = identify_removable_identity_matrices(expr) + expr = expr.split_multiple_contractions() + expr = identify_hadamard_products(expr) + if not isinstance(expr, ArrayContraction): + return _array2matrix(expr) + subexpr = expr.expr + contraction_indices: tTuple[tTuple[int]] = expr.contraction_indices + if contraction_indices == ((0,), (1,)) or ( + contraction_indices == ((0,),) and subexpr.shape[1] == 1 + ) or ( + contraction_indices == ((1,),) and subexpr.shape[0] == 1 + ): + shape = subexpr.shape + subexpr = _array2matrix(subexpr) + if isinstance(subexpr, MatrixExpr): + return OneMatrix(1, shape[0])*subexpr*OneMatrix(shape[1], 1) + if isinstance(subexpr, ArrayTensorProduct): + newexpr = _array_contraction(_array2matrix(subexpr), *contraction_indices) + contraction_indices = newexpr.contraction_indices + if any(i > 2 for i in newexpr.subranks): + addends = _array_add(*[_a2m_tensor_product(*j) for j in itertools.product(*[i.args if isinstance(i, + ArrayAdd) else [i] for i in expr.expr.args])]) + newexpr = _array_contraction(addends, *contraction_indices) + if isinstance(newexpr, ArrayAdd): + ret = _array2matrix(newexpr) + return ret + assert isinstance(newexpr, ArrayContraction) + ret = _support_function_tp1_recognize(contraction_indices, list(newexpr.expr.args)) + return ret + elif not isinstance(subexpr, _CodegenArrayAbstract): + ret = _array2matrix(subexpr) + if isinstance(ret, MatrixExpr): + assert expr.contraction_indices == ((0, 1),) + return _a2m_trace(ret) + else: + return _array_contraction(ret, *expr.contraction_indices) + + +@_array2matrix.register(ArrayDiagonal) +def _(expr: ArrayDiagonal): + pexpr = _array_diagonal(_array2matrix(expr.expr), *expr.diagonal_indices) + pexpr = identify_hadamard_products(pexpr) + if isinstance(pexpr, ArrayDiagonal): + pexpr = _array_diag2contr_diagmatrix(pexpr) + if expr == pexpr: + return expr + return _array2matrix(pexpr) + + +@_array2matrix.register(PermuteDims) +def _(expr: PermuteDims): + if expr.permutation.array_form == [1, 0]: + return _a2m_transpose(_array2matrix(expr.expr)) + elif isinstance(expr.expr, ArrayTensorProduct): + ranks = expr.expr.subranks + inv_permutation = expr.permutation**(-1) + newrange = [inv_permutation(i) for i in range(sum(ranks))] + newpos = [] + counter = 0 + for rank in ranks: + newpos.append(newrange[counter:counter+rank]) + counter += rank + newargs = [] + newperm = [] + scalars = [] + for pos, arg in zip(newpos, expr.expr.args): + if len(pos) == 0: + scalars.append(_array2matrix(arg)) + elif pos == sorted(pos): + newargs.append((_array2matrix(arg), pos[0])) + newperm.extend(pos) + elif len(pos) == 2: + newargs.append((_a2m_transpose(_array2matrix(arg)), pos[0])) + newperm.extend(reversed(pos)) + else: + raise NotImplementedError() + newargs = [i[0] for i in newargs] + return _permute_dims(_a2m_tensor_product(*scalars, *newargs), _af_invert(newperm)) + elif isinstance(expr.expr, ArrayContraction): + mat_mul_lines = _array2matrix(expr.expr) + if not isinstance(mat_mul_lines, ArrayTensorProduct): + return _permute_dims(mat_mul_lines, expr.permutation) + # TODO: this assumes that all arguments are matrices, it may not be the case: + permutation = Permutation(2*len(mat_mul_lines.args)-1)*expr.permutation + permuted = [permutation(i) for i in range(2*len(mat_mul_lines.args))] + args_array = [None for i in mat_mul_lines.args] + for i in range(len(mat_mul_lines.args)): + p1 = permuted[2*i] + p2 = permuted[2*i+1] + if p1 // 2 != p2 // 2: + return _permute_dims(mat_mul_lines, permutation) + if p1 > p2: + args_array[i] = _a2m_transpose(mat_mul_lines.args[p1 // 2]) + else: + args_array[i] = mat_mul_lines.args[p1 // 2] + return _a2m_tensor_product(*args_array) + else: + return expr + + +@_array2matrix.register(ArrayAdd) +def _(expr: ArrayAdd): + addends = [_array2matrix(arg) for arg in expr.args] + return _a2m_add(*addends) + + +@_array2matrix.register(ArrayElementwiseApplyFunc) +def _(expr: ArrayElementwiseApplyFunc): + subexpr = _array2matrix(expr.expr) + if isinstance(subexpr, MatrixExpr): + if subexpr.shape != (1, 1): + d = expr.function.bound_symbols[0] + w = Wild("w", exclude=[d]) + p = Wild("p", exclude=[d]) + m = expr.function.expr.match(w*d**p) + if m is not None: + return m[w]*HadamardPower(subexpr, m[p]) + return ElementwiseApplyFunction(expr.function, subexpr) + else: + return ArrayElementwiseApplyFunc(expr.function, subexpr) + + +@_array2matrix.register(ArrayElement) +def _(expr: ArrayElement): + ret = _array2matrix(expr.name) + if isinstance(ret, MatrixExpr): + return MatrixElement(ret, *expr.indices) + return ArrayElement(ret, expr.indices) + + +@singledispatch +def _remove_trivial_dims(expr): + return expr, [] + + +@_remove_trivial_dims.register(ArrayTensorProduct) +def _(expr: ArrayTensorProduct): + # Recognize expressions like [x, y] with shape (k, 1, k, 1) as `x*y.T`. + # The matrix expression has to be equivalent to the tensor product of the + # matrices, with trivial dimensions (i.e. dim=1) dropped. + # That is, add contractions over trivial dimensions: + + removed = [] + newargs = [] + cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args])) + pending = None + prev_i = None + for i, arg in enumerate(expr.args): + current_range = list(range(cumul[i], cumul[i+1])) + if isinstance(arg, OneArray): + removed.extend(current_range) + continue + if not isinstance(arg, (MatrixExpr, MatrixCommon)): + rarg, rem = _remove_trivial_dims(arg) + removed.extend(rem) + newargs.append(rarg) + continue + elif getattr(arg, "is_Identity", False) and arg.shape == (1, 1): + if arg.shape == (1, 1): + # Ignore identity matrices of shape (1, 1) - they are equivalent to scalar 1. + removed.extend(current_range) + continue + elif arg.shape == (1, 1): + arg, _ = _remove_trivial_dims(arg) + # Matrix is equivalent to scalar: + if len(newargs) == 0: + newargs.append(arg) + elif 1 in get_shape(newargs[-1]): + if newargs[-1].shape[1] == 1: + newargs[-1] = newargs[-1]*arg + else: + newargs[-1] = arg*newargs[-1] + removed.extend(current_range) + else: + newargs.append(arg) + elif 1 in arg.shape: + k = [i for i in arg.shape if i != 1][0] + if pending is None: + pending = k + prev_i = i + newargs.append(arg) + elif pending == k: + prev = newargs[-1] + if prev.shape[0] == 1: + d1 = cumul[prev_i] + prev = _a2m_transpose(prev) + else: + d1 = cumul[prev_i] + 1 + if arg.shape[1] == 1: + d2 = cumul[i] + 1 + arg = _a2m_transpose(arg) + else: + d2 = cumul[i] + newargs[-1] = prev*arg + pending = None + removed.extend([d1, d2]) + else: + newargs.append(arg) + pending = k + prev_i = i + else: + newargs.append(arg) + pending = None + newexpr, newremoved = _a2m_tensor_product(*newargs), sorted(removed) + if isinstance(newexpr, ArrayTensorProduct): + newexpr, newremoved2 = _find_trivial_matrices_rewrite(newexpr) + newremoved = _combine_removed(-1, newremoved, newremoved2) + if isinstance(newexpr, ArrayTensorProduct): + newexpr, newremoved2 = _find_trivial_kronecker_products_broadcast(newexpr) + newremoved = _combine_removed(-1, newremoved, newremoved2) + return newexpr, newremoved + + +@_remove_trivial_dims.register(ArrayAdd) +def _(expr: ArrayAdd): + rec = [_remove_trivial_dims(arg) for arg in expr.args] + newargs, removed = zip(*rec) + if len({get_shape(i) for i in newargs}) > 1: + return expr, [] + if len(removed) == 0: + return expr, removed + removed1 = removed[0] + return _a2m_add(*newargs), removed1 + + +@_remove_trivial_dims.register(PermuteDims) +def _(expr: PermuteDims): + subexpr, subremoved = _remove_trivial_dims(expr.expr) + p = expr.permutation.array_form + pinv = _af_invert(expr.permutation.array_form) + shift = list(accumulate([1 if i in subremoved else 0 for i in range(len(p))])) + premoved = [pinv[i] for i in subremoved] + p2 = [e - shift[e] for i, e in enumerate(p) if e not in subremoved] + # TODO: check if subremoved should be permuted as well... + newexpr = _permute_dims(subexpr, p2) + premoved = sorted(premoved) + if newexpr != expr: + newexpr, removed2 = _remove_trivial_dims(_array2matrix(newexpr)) + premoved = _combine_removed(-1, premoved, removed2) + return newexpr, premoved + + +@_remove_trivial_dims.register(ArrayContraction) +def _(expr: ArrayContraction): + new_expr, removed0 = _array_contraction_to_diagonal_multiple_identity(expr) + if new_expr != expr: + new_expr2, removed1 = _remove_trivial_dims(_array2matrix(new_expr)) + removed = _combine_removed(-1, removed0, removed1) + return new_expr2, removed + rank1 = get_rank(expr) + expr, removed1 = remove_identity_matrices(expr) + if not isinstance(expr, ArrayContraction): + expr2, removed2 = _remove_trivial_dims(expr) + return expr2, _combine_removed(rank1, removed1, removed2) + newexpr, removed2 = _remove_trivial_dims(expr.expr) + shifts = list(accumulate([1 if i in removed2 else 0 for i in range(get_rank(expr.expr))])) + new_contraction_indices = [tuple(j for j in i if j not in removed2) for i in expr.contraction_indices] + # Remove possible empty tuples "()": + new_contraction_indices = [i for i in new_contraction_indices if len(i) > 0] + contraction_indices_flat = [j for i in expr.contraction_indices for j in i] + removed2 = [i for i in removed2 if i not in contraction_indices_flat] + new_contraction_indices = [tuple(j - shifts[j] for j in i) for i in new_contraction_indices] + # Shift removed2: + removed2 = ArrayContraction._push_indices_up(expr.contraction_indices, removed2) + removed = _combine_removed(rank1, removed1, removed2) + return _array_contraction(newexpr, *new_contraction_indices), list(removed) + + +def _remove_diagonalized_identity_matrices(expr: ArrayDiagonal): + assert isinstance(expr, ArrayDiagonal) + editor = _EditArrayContraction(expr) + mapping = {i: {j for j in editor.args_with_ind if i in j.indices} for i in range(-1, -1-editor.number_of_diagonal_indices, -1)} + removed = [] + counter: int = 0 + for i, arg_with_ind in enumerate(editor.args_with_ind): + counter += len(arg_with_ind.indices) + if isinstance(arg_with_ind.element, Identity): + if None in arg_with_ind.indices and any(i is not None and (i < 0) == True for i in arg_with_ind.indices): + diag_ind = [j for j in arg_with_ind.indices if j is not None][0] + other = [j for j in mapping[diag_ind] if j != arg_with_ind][0] + if not isinstance(other.element, MatrixExpr): + continue + if 1 not in other.element.shape: + continue + if None not in other.indices: + continue + editor.args_with_ind[i].element = None + none_index = other.indices.index(None) + other.element = DiagMatrix(other.element) + other_range = editor.get_absolute_range(other) + removed.extend([other_range[0] + none_index]) + editor.args_with_ind = [i for i in editor.args_with_ind if i.element is not None] + removed = ArrayDiagonal._push_indices_up(expr.diagonal_indices, removed, get_rank(expr.expr)) + return editor.to_array_contraction(), removed + + +@_remove_trivial_dims.register(ArrayDiagonal) +def _(expr: ArrayDiagonal): + newexpr, removed = _remove_trivial_dims(expr.expr) + shifts = list(accumulate([0] + [1 if i in removed else 0 for i in range(get_rank(expr.expr))])) + new_diag_indices_map = {i: tuple(j for j in i if j not in removed) for i in expr.diagonal_indices} + for old_diag_tuple, new_diag_tuple in new_diag_indices_map.items(): + if len(new_diag_tuple) == 1: + removed = [i for i in removed if i not in old_diag_tuple] + new_diag_indices = [tuple(j - shifts[j] for j in i) for i in new_diag_indices_map.values()] + rank = get_rank(expr.expr) + removed = ArrayDiagonal._push_indices_up(expr.diagonal_indices, removed, rank) + removed = sorted(set(removed)) + # If there are single axes to diagonalize remaining, it means that their + # corresponding dimension has been removed, they no longer need diagonalization: + new_diag_indices = [i for i in new_diag_indices if len(i) > 0] + if len(new_diag_indices) > 0: + newexpr2 = _array_diagonal(newexpr, *new_diag_indices, allow_trivial_diags=True) + else: + newexpr2 = newexpr + if isinstance(newexpr2, ArrayDiagonal): + newexpr3, removed2 = _remove_diagonalized_identity_matrices(newexpr2) + removed = _combine_removed(-1, removed, removed2) + return newexpr3, removed + else: + return newexpr2, removed + + +@_remove_trivial_dims.register(ElementwiseApplyFunction) +def _(expr: ElementwiseApplyFunction): + subexpr, removed = _remove_trivial_dims(expr.expr) + if subexpr.shape == (1, 1): + # TODO: move this to ElementwiseApplyFunction + return expr.function(subexpr), removed + [0, 1] + return ElementwiseApplyFunction(expr.function, subexpr), [] + + +@_remove_trivial_dims.register(ArrayElementwiseApplyFunc) +def _(expr: ArrayElementwiseApplyFunc): + subexpr, removed = _remove_trivial_dims(expr.expr) + return ArrayElementwiseApplyFunc(expr.function, subexpr), removed + + +def convert_array_to_matrix(expr): + r""" + Recognize matrix expressions in codegen objects. + + If more than one matrix multiplication line have been detected, return a + list with the matrix expressions. + + Examples + ======== + + >>> from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array + >>> from sympy.tensor.array import tensorcontraction, tensorproduct + >>> from sympy import MatrixSymbol, Sum + >>> from sympy.abc import i, j, k, l, N + >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array + >>> from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix + >>> A = MatrixSymbol("A", N, N) + >>> B = MatrixSymbol("B", N, N) + >>> C = MatrixSymbol("C", N, N) + >>> D = MatrixSymbol("D", N, N) + + >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1)) + >>> cg = convert_indexed_to_array(expr) + >>> convert_array_to_matrix(cg) + A*B + >>> cg = convert_indexed_to_array(expr, first_indices=[k]) + >>> convert_array_to_matrix(cg) + B.T*A.T + + Transposition is detected: + + >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1)) + >>> cg = convert_indexed_to_array(expr) + >>> convert_array_to_matrix(cg) + A.T*B + >>> cg = convert_indexed_to_array(expr, first_indices=[k]) + >>> convert_array_to_matrix(cg) + B.T*A + + Detect the trace: + + >>> expr = Sum(A[i, i], (i, 0, N-1)) + >>> cg = convert_indexed_to_array(expr) + >>> convert_array_to_matrix(cg) + Trace(A) + + Recognize some more complex traces: + + >>> expr = Sum(A[i, j]*B[j, i], (i, 0, N-1), (j, 0, N-1)) + >>> cg = convert_indexed_to_array(expr) + >>> convert_array_to_matrix(cg) + Trace(A*B) + + More complicated expressions: + + >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1)) + >>> cg = convert_indexed_to_array(expr) + >>> convert_array_to_matrix(cg) + A*B.T*A.T + + Expressions constructed from matrix expressions do not contain literal + indices, the positions of free indices are returned instead: + + >>> expr = A*B + >>> cg = convert_matrix_to_array(expr) + >>> convert_array_to_matrix(cg) + A*B + + If more than one line of matrix multiplications is detected, return + separate matrix multiplication factors embedded in a tensor product object: + + >>> cg = tensorcontraction(tensorproduct(A, B, C, D), (1, 2), (5, 6)) + >>> convert_array_to_matrix(cg) + ArrayTensorProduct(A*B, C*D) + + The two lines have free indices at axes 0, 3 and 4, 7, respectively. + """ + rec = _array2matrix(expr) + rec, removed = _remove_trivial_dims(rec) + return rec + + +def _array_diag2contr_diagmatrix(expr: ArrayDiagonal): + if isinstance(expr.expr, ArrayTensorProduct): + args = list(expr.expr.args) + diag_indices = list(expr.diagonal_indices) + mapping = _get_mapping_from_subranks([_get_subrank(arg) for arg in args]) + tuple_links = [[mapping[j] for j in i] for i in diag_indices] + contr_indices = [] + total_rank = get_rank(expr) + replaced = [False for arg in args] + for i, (abs_pos, rel_pos) in enumerate(zip(diag_indices, tuple_links)): + if len(abs_pos) != 2: + continue + (pos1_outer, pos1_inner), (pos2_outer, pos2_inner) = rel_pos + arg1 = args[pos1_outer] + arg2 = args[pos2_outer] + if get_rank(arg1) != 2 or get_rank(arg2) != 2: + if replaced[pos1_outer]: + diag_indices[i] = None + if replaced[pos2_outer]: + diag_indices[i] = None + continue + pos1_in2 = 1 - pos1_inner + pos2_in2 = 1 - pos2_inner + if arg1.shape[pos1_in2] == 1: + if arg1.shape[pos1_inner] != 1: + darg1 = DiagMatrix(arg1) + else: + darg1 = arg1 + args.append(darg1) + contr_indices.append(((pos2_outer, pos2_inner), (len(args)-1, pos1_inner))) + total_rank += 1 + diag_indices[i] = None + args[pos1_outer] = OneArray(arg1.shape[pos1_in2]) + replaced[pos1_outer] = True + elif arg2.shape[pos2_in2] == 1: + if arg2.shape[pos2_inner] != 1: + darg2 = DiagMatrix(arg2) + else: + darg2 = arg2 + args.append(darg2) + contr_indices.append(((pos1_outer, pos1_inner), (len(args)-1, pos2_inner))) + total_rank += 1 + diag_indices[i] = None + args[pos2_outer] = OneArray(arg2.shape[pos2_in2]) + replaced[pos2_outer] = True + diag_indices_new = [i for i in diag_indices if i is not None] + cumul = list(accumulate([0] + [get_rank(arg) for arg in args])) + contr_indices2 = [tuple(cumul[a] + b for a, b in i) for i in contr_indices] + tc = _array_contraction( + _array_tensor_product(*args), *contr_indices2 + ) + td = _array_diagonal(tc, *diag_indices_new) + return td + return expr + + +def _a2m_mul(*args): + if not any(isinstance(i, _CodegenArrayAbstract) for i in args): + from sympy.matrices.expressions.matmul import MatMul + return MatMul(*args).doit() + else: + return _array_contraction( + _array_tensor_product(*args), + *[(2*i-1, 2*i) for i in range(1, len(args))] + ) + + +def _a2m_tensor_product(*args): + scalars = [] + arrays = [] + for arg in args: + if isinstance(arg, (MatrixExpr, _ArrayExpr, _CodegenArrayAbstract)): + arrays.append(arg) + else: + scalars.append(arg) + scalar = Mul.fromiter(scalars) + if len(arrays) == 0: + return scalar + if scalar != 1: + if isinstance(arrays[0], _CodegenArrayAbstract): + arrays = [scalar] + arrays + else: + arrays[0] *= scalar + return _array_tensor_product(*arrays) + + +def _a2m_add(*args): + if not any(isinstance(i, _CodegenArrayAbstract) for i in args): + from sympy.matrices.expressions.matadd import MatAdd + return MatAdd(*args).doit() + else: + return _array_add(*args) + + +def _a2m_trace(arg): + if isinstance(arg, _CodegenArrayAbstract): + return _array_contraction(arg, (0, 1)) + else: + from sympy.matrices.expressions.trace import Trace + return Trace(arg) + + +def _a2m_transpose(arg): + if isinstance(arg, _CodegenArrayAbstract): + return _permute_dims(arg, [1, 0]) + else: + from sympy.matrices.expressions.transpose import Transpose + return Transpose(arg).doit() + + +def identify_hadamard_products(expr: tUnion[ArrayContraction, ArrayDiagonal]): + + editor: _EditArrayContraction = _EditArrayContraction(expr) + + map_contr_to_args: tDict[FrozenSet, List[_ArgE]] = defaultdict(list) + map_ind_to_inds: tDict[Optional[int], int] = defaultdict(int) + for arg_with_ind in editor.args_with_ind: + for ind in arg_with_ind.indices: + map_ind_to_inds[ind] += 1 + if None in arg_with_ind.indices: + continue + map_contr_to_args[frozenset(arg_with_ind.indices)].append(arg_with_ind) + + k: FrozenSet[int] + v: List[_ArgE] + for k, v in map_contr_to_args.items(): + make_trace: bool = False + if len(k) == 1 and next(iter(k)) >= 0 and sum([next(iter(k)) in i for i in map_contr_to_args]) == 1: + # This is a trace: the arguments are fully contracted with only one + # index, and the index isn't used anywhere else: + make_trace = True + first_element = S.One + elif len(k) != 2: + # Hadamard product only defined for matrices: + continue + if len(v) == 1: + # Hadamard product with a single argument makes no sense: + continue + for ind in k: + if map_ind_to_inds[ind] <= 2: + # There is no other contraction, skip: + continue + + def check_transpose(x): + x = [i if i >= 0 else -1-i for i in x] + return x == sorted(x) + + # Check if expression is a trace: + if all([map_ind_to_inds[j] == len(v) and j >= 0 for j in k]) and all([j >= 0 for j in k]): + # This is a trace + make_trace = True + first_element = v[0].element + if not check_transpose(v[0].indices): + first_element = first_element.T + hadamard_factors = v[1:] + else: + hadamard_factors = v + + # This is a Hadamard product: + + hp = hadamard_product(*[i.element if check_transpose(i.indices) else Transpose(i.element) for i in hadamard_factors]) + hp_indices = v[0].indices + if not check_transpose(hadamard_factors[0].indices): + hp_indices = list(reversed(hp_indices)) + if make_trace: + hp = Trace(first_element*hp.T)._normalize() + hp_indices = [] + editor.insert_after(v[0], _ArgE(hp, hp_indices)) + for i in v: + editor.args_with_ind.remove(i) + + return editor.to_array_contraction() + + +def identify_removable_identity_matrices(expr): + editor = _EditArrayContraction(expr) + + flag = True + while flag: + flag = False + for arg_with_ind in editor.args_with_ind: + if isinstance(arg_with_ind.element, Identity): + k = arg_with_ind.element.shape[0] + # Candidate for removal: + if arg_with_ind.indices == [None, None]: + # Free identity matrix, will be cleared by _remove_trivial_dims: + continue + elif None in arg_with_ind.indices: + ind = [j for j in arg_with_ind.indices if j is not None][0] + counted = editor.count_args_with_index(ind) + if counted == 1: + # Identity matrix contracted only on one index with itself, + # transform to a OneArray(k) element: + editor.insert_after(arg_with_ind, OneArray(k)) + editor.args_with_ind.remove(arg_with_ind) + flag = True + break + elif counted > 2: + # Case counted = 2 is a matrix multiplication by identity matrix, skip it. + # Case counted > 2 is a multiple contraction, + # this is a case where the contraction becomes a diagonalization if the + # identity matrix is dropped. + continue + elif arg_with_ind.indices[0] == arg_with_ind.indices[1]: + ind = arg_with_ind.indices[0] + counted = editor.count_args_with_index(ind) + if counted > 1: + editor.args_with_ind.remove(arg_with_ind) + flag = True + break + else: + # This is a trace, skip it as it will be recognized somewhere else: + pass + elif ask(Q.diagonal(arg_with_ind.element)): + if arg_with_ind.indices == [None, None]: + continue + elif None in arg_with_ind.indices: + pass + elif arg_with_ind.indices[0] == arg_with_ind.indices[1]: + ind = arg_with_ind.indices[0] + counted = editor.count_args_with_index(ind) + if counted == 3: + # A_ai B_bi D_ii ==> A_ai D_ij B_bj + ind_new = editor.get_new_contraction_index() + other_args = [j for j in editor.args_with_ind if j != arg_with_ind] + other_args[1].indices = [ind_new if j == ind else j for j in other_args[1].indices] + arg_with_ind.indices = [ind, ind_new] + flag = True + break + + return editor.to_array_contraction() + + +def remove_identity_matrices(expr: ArrayContraction): + editor = _EditArrayContraction(expr) + removed: List[int] = [] + + permutation_map = {} + + free_indices = list(accumulate([0] + [sum([i is None for i in arg.indices]) for arg in editor.args_with_ind])) + free_map = {k: v for k, v in zip(editor.args_with_ind, free_indices[:-1])} + + update_pairs = {} + + for ind in range(editor.number_of_contraction_indices): + args = editor.get_args_with_index(ind) + identity_matrices = [i for i in args if isinstance(i.element, Identity)] + number_identity_matrices = len(identity_matrices) + # If the contraction involves a non-identity matrix and multiple identity matrices: + if number_identity_matrices != len(args) - 1 or number_identity_matrices == 0: + continue + # Get the non-identity element: + non_identity = [i for i in args if not isinstance(i.element, Identity)][0] + # Check that all identity matrices have at least one free index + # (otherwise they would be contractions to some other elements) + if any([None not in i.indices for i in identity_matrices]): + continue + # Mark the identity matrices for removal: + for i in identity_matrices: + i.element = None + removed.extend(range(free_map[i], free_map[i] + len([j for j in i.indices if j is None]))) + last_removed = removed.pop(-1) + update_pairs[last_removed, ind] = non_identity.indices[:] + # Remove the indices from the non-identity matrix, as the contraction + # no longer exists: + non_identity.indices = [None if i == ind else i for i in non_identity.indices] + + removed.sort() + + shifts = list(accumulate([1 if i in removed else 0 for i in range(get_rank(expr))])) + for (last_removed, ind), non_identity_indices in update_pairs.items(): + pos = [free_map[non_identity] + i for i, e in enumerate(non_identity_indices) if e == ind] + assert len(pos) == 1 + for j in pos: + permutation_map[j] = last_removed + + editor.args_with_ind = [i for i in editor.args_with_ind if i.element is not None] + ret_expr = editor.to_array_contraction() + permutation = [] + counter = 0 + counter2 = 0 + for j in range(get_rank(expr)): + if j in removed: + continue + if counter2 in permutation_map: + target = permutation_map[counter2] + permutation.append(target - shifts[target]) + counter2 += 1 + else: + while counter in permutation_map.values(): + counter += 1 + permutation.append(counter) + counter += 1 + counter2 += 1 + ret_expr2 = _permute_dims(ret_expr, _af_invert(permutation)) + return ret_expr2, removed + + +def _combine_removed(dim: int, removed1: List[int], removed2: List[int]) -> List[int]: + # Concatenate two axis removal operations as performed by + # _remove_trivial_dims, + removed1 = sorted(removed1) + removed2 = sorted(removed2) + i = 0 + j = 0 + removed = [] + while True: + if j >= len(removed2): + while i < len(removed1): + removed.append(removed1[i]) + i += 1 + break + elif i < len(removed1) and removed1[i] <= i + removed2[j]: + removed.append(removed1[i]) + i += 1 + else: + removed.append(i + removed2[j]) + j += 1 + return removed + + +def _array_contraction_to_diagonal_multiple_identity(expr: ArrayContraction): + editor = _EditArrayContraction(expr) + editor.track_permutation_start() + removed: List[int] = [] + diag_index_counter: int = 0 + for i in range(editor.number_of_contraction_indices): + identities = [] + args = [] + for j, arg in enumerate(editor.args_with_ind): + if i not in arg.indices: + continue + if isinstance(arg.element, Identity): + identities.append(arg) + else: + args.append(arg) + if len(identities) == 0: + continue + if len(args) + len(identities) < 3: + continue + new_diag_ind = -1 - diag_index_counter + diag_index_counter += 1 + # Variable "flag" to control whether to skip this contraction set: + flag: bool = True + for i1, id1 in enumerate(identities): + if None not in id1.indices: + flag = True + break + free_pos = list(range(*editor.get_absolute_free_range(id1)))[0] + editor._track_permutation[-1].append(free_pos) # type: ignore + id1.element = None + flag = False + break + if flag: + continue + for arg in identities[:i1] + identities[i1+1:]: + arg.element = None + removed.extend(range(*editor.get_absolute_free_range(arg))) + for arg in args: + arg.indices = [new_diag_ind if j == i else j for j in arg.indices] + for j, e in enumerate(editor.args_with_ind): + if e.element is None: + editor._track_permutation[j] = None # type: ignore + editor._track_permutation = [i for i in editor._track_permutation if i is not None] # type: ignore + # Renumber permutation array form in order to deal with deleted positions: + remap = {e: i for i, e in enumerate(sorted({k for j in editor._track_permutation for k in j}))} + editor._track_permutation = [[remap[j] for j in i] for i in editor._track_permutation] + editor.args_with_ind = [i for i in editor.args_with_ind if i.element is not None] + new_expr = editor.to_array_contraction() + return new_expr, removed diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_matrix_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_matrix_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..8f66961727f6338318d65876a7768802773e4f2d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/from_matrix_to_array.py @@ -0,0 +1,87 @@ +from sympy import KroneckerProduct +from sympy.core.basic import Basic +from sympy.core.function import Lambda +from sympy.core.mul import Mul +from sympy.core.numbers import Integer +from sympy.core.power import Pow +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, symbols) +from sympy.matrices.expressions.hadamard import (HadamardPower, HadamardProduct) +from sympy.matrices.expressions.matadd import MatAdd +from sympy.matrices.expressions.matmul import MatMul +from sympy.matrices.expressions.matpow import MatPow +from sympy.matrices.expressions.trace import Trace +from sympy.matrices.expressions.transpose import Transpose +from sympy.matrices.expressions.matexpr import MatrixExpr +from sympy.tensor.array.expressions.array_expressions import \ + ArrayElementwiseApplyFunc, _array_tensor_product, _array_contraction, \ + _array_diagonal, _array_add, _permute_dims, Reshape + + +def convert_matrix_to_array(expr: Basic) -> Basic: + if isinstance(expr, MatMul): + args_nonmat = [] + args = [] + for arg in expr.args: + if isinstance(arg, MatrixExpr): + args.append(arg) + else: + args_nonmat.append(convert_matrix_to_array(arg)) + contractions = [(2*i+1, 2*i+2) for i in range(len(args)-1)] + scalar = _array_tensor_product(*args_nonmat) if args_nonmat else S.One + if scalar == 1: + tprod = _array_tensor_product( + *[convert_matrix_to_array(arg) for arg in args]) + else: + tprod = _array_tensor_product( + scalar, + *[convert_matrix_to_array(arg) for arg in args]) + return _array_contraction( + tprod, + *contractions + ) + elif isinstance(expr, MatAdd): + return _array_add( + *[convert_matrix_to_array(arg) for arg in expr.args] + ) + elif isinstance(expr, Transpose): + return _permute_dims( + convert_matrix_to_array(expr.args[0]), [1, 0] + ) + elif isinstance(expr, Trace): + inner_expr: MatrixExpr = convert_matrix_to_array(expr.arg) # type: ignore + return _array_contraction(inner_expr, (0, len(inner_expr.shape) - 1)) + elif isinstance(expr, Mul): + return _array_tensor_product(*[convert_matrix_to_array(i) for i in expr.args]) + elif isinstance(expr, Pow): + base = convert_matrix_to_array(expr.base) + if (expr.exp > 0) == True: + return _array_tensor_product(*[base for i in range(expr.exp)]) + else: + return expr + elif isinstance(expr, MatPow): + base = convert_matrix_to_array(expr.base) + if expr.exp.is_Integer != True: + b = symbols("b", cls=Dummy) + return ArrayElementwiseApplyFunc(Lambda(b, b**expr.exp), convert_matrix_to_array(base)) + elif (expr.exp > 0) == True: + return convert_matrix_to_array(MatMul.fromiter(base for i in range(expr.exp))) + else: + return expr + elif isinstance(expr, HadamardProduct): + tp = _array_tensor_product(*[convert_matrix_to_array(arg) for arg in expr.args]) + diag = [[2*i for i in range(len(expr.args))], [2*i+1 for i in range(len(expr.args))]] + return _array_diagonal(tp, *diag) + elif isinstance(expr, HadamardPower): + base, exp = expr.args + if isinstance(exp, Integer) and exp > 0: + return convert_matrix_to_array(HadamardProduct.fromiter(base for i in range(exp))) + else: + d = Dummy("d") + return ArrayElementwiseApplyFunc(Lambda(d, d**exp), base) + elif isinstance(expr, KroneckerProduct): + kp_args = [convert_matrix_to_array(arg) for arg in expr.args] + permutation = [2*i for i in range(len(kp_args))] + [2*i + 1 for i in range(len(kp_args))] + return Reshape(_permute_dims(_array_tensor_product(*kp_args), permutation), expr.shape) + else: + return expr diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ba56fa660fa95337e1feb1055390ca42b509794 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07fa8b89801de2625400abda93f22b2b052ff831 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90eb85aad5e16e084b90298311377f8423fcce86 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ea41a3a1e01515ae21b650d669ee706e4b7bd2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e35e501650d05b88f6b8d9748964fe67b6427a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28f757c556ac67ea775e7a8ca6136a9f765e4ceb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88685902d01cecf11b0c7895158c80157d0dc205 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c60fddd35f11c982ff323bed0ac35d2f334bd5f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82541d21371440111778d329aa05815b9c93019a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_array_expressions.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_array_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..63fb79ab7ced7bff5ecb55b1764f43e29f98609d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_array_expressions.py @@ -0,0 +1,808 @@ +import random + +from sympy import tensordiagonal, eye, KroneckerDelta, Array +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.matrices.expressions.diagonal import DiagMatrix +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.matrices.expressions.special import ZeroMatrix +from sympy.tensor.array.arrayop import (permutedims, tensorcontraction, tensorproduct) +from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray +from sympy.combinatorics import Permutation +from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, ArraySymbol, ArrayElement, \ + PermuteDims, ArrayContraction, ArrayTensorProduct, ArrayDiagonal, \ + ArrayAdd, nest_permutation, ArrayElementwiseApplyFunc, _EditArrayContraction, _ArgE, _array_tensor_product, \ + _array_contraction, _array_diagonal, _array_add, _permute_dims, Reshape +from sympy.testing.pytest import raises + +i, j, k, l, m, n = symbols("i j k l m n") + + +M = ArraySymbol("M", (k, k)) +N = ArraySymbol("N", (k, k)) +P = ArraySymbol("P", (k, k)) +Q = ArraySymbol("Q", (k, k)) + +A = ArraySymbol("A", (k, k)) +B = ArraySymbol("B", (k, k)) +C = ArraySymbol("C", (k, k)) +D = ArraySymbol("D", (k, k)) + +X = ArraySymbol("X", (k, k)) +Y = ArraySymbol("Y", (k, k)) + +a = ArraySymbol("a", (k, 1)) +b = ArraySymbol("b", (k, 1)) +c = ArraySymbol("c", (k, 1)) +d = ArraySymbol("d", (k, 1)) + + +def test_array_symbol_and_element(): + A = ArraySymbol("A", (2,)) + A0 = ArrayElement(A, (0,)) + A1 = ArrayElement(A, (1,)) + assert A[0] == A0 + assert A[1] != A0 + assert A.as_explicit() == ImmutableDenseNDimArray([A0, A1]) + + A2 = tensorproduct(A, A) + assert A2.shape == (2, 2) + # TODO: not yet supported: + # assert A2.as_explicit() == Array([[A[0]*A[0], A[1]*A[0]], [A[0]*A[1], A[1]*A[1]]]) + A3 = tensorcontraction(A2, (0, 1)) + assert A3.shape == () + # TODO: not yet supported: + # assert A3.as_explicit() == Array([]) + + A = ArraySymbol("A", (2, 3, 4)) + Ae = A.as_explicit() + assert Ae == ImmutableDenseNDimArray( + [[[ArrayElement(A, (i, j, k)) for k in range(4)] for j in range(3)] for i in range(2)]) + + p = _permute_dims(A, Permutation(0, 2, 1)) + assert isinstance(p, PermuteDims) + + A = ArraySymbol("A", (2,)) + raises(IndexError, lambda: A[()]) + raises(IndexError, lambda: A[0, 1]) + raises(ValueError, lambda: A[-1]) + raises(ValueError, lambda: A[2]) + + O = OneArray(3, 4) + Z = ZeroArray(m, n) + + raises(IndexError, lambda: O[()]) + raises(IndexError, lambda: O[1, 2, 3]) + raises(ValueError, lambda: O[3, 0]) + raises(ValueError, lambda: O[0, 4]) + + assert O[1, 2] == 1 + assert Z[1, 2] == 0 + + +def test_zero_array(): + assert ZeroArray() == 0 + assert ZeroArray().is_Integer + + za = ZeroArray(3, 2, 4) + assert za.shape == (3, 2, 4) + za_e = za.as_explicit() + assert za_e.shape == (3, 2, 4) + + m, n, k = symbols("m n k") + za = ZeroArray(m, n, k, 2) + assert za.shape == (m, n, k, 2) + raises(ValueError, lambda: za.as_explicit()) + + +def test_one_array(): + assert OneArray() == 1 + assert OneArray().is_Integer + + oa = OneArray(3, 2, 4) + assert oa.shape == (3, 2, 4) + oa_e = oa.as_explicit() + assert oa_e.shape == (3, 2, 4) + + m, n, k = symbols("m n k") + oa = OneArray(m, n, k, 2) + assert oa.shape == (m, n, k, 2) + raises(ValueError, lambda: oa.as_explicit()) + + +def test_arrayexpr_contraction_construction(): + + cg = _array_contraction(A) + assert cg == A + + cg = _array_contraction(_array_tensor_product(A, B), (1, 0)) + assert cg == _array_contraction(_array_tensor_product(A, B), (0, 1)) + + cg = _array_contraction(_array_tensor_product(M, N), (0, 1)) + indtup = cg._get_contraction_tuples() + assert indtup == [[(0, 0), (0, 1)]] + assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(0, 1)] + + cg = _array_contraction(_array_tensor_product(M, N), (1, 2)) + indtup = cg._get_contraction_tuples() + assert indtup == [[(0, 1), (1, 0)]] + assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(1, 2)] + + cg = _array_contraction(_array_tensor_product(M, M, N), (1, 4), (2, 5)) + indtup = cg._get_contraction_tuples() + assert indtup == [[(0, 0), (1, 1)], [(0, 1), (2, 0)]] + assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(0, 3), (1, 4)] + + # Test removal of trivial contraction: + assert _array_contraction(a, (1,)) == a + assert _array_contraction( + _array_tensor_product(a, b), (0, 2), (1,), (3,)) == _array_contraction( + _array_tensor_product(a, b), (0, 2)) + + +def test_arrayexpr_array_flatten(): + + # Flatten nested ArrayTensorProduct objects: + expr1 = _array_tensor_product(M, N) + expr2 = _array_tensor_product(P, Q) + expr = _array_tensor_product(expr1, expr2) + assert expr == _array_tensor_product(M, N, P, Q) + assert expr.args == (M, N, P, Q) + + # Flatten mixed ArrayTensorProduct and ArrayContraction objects: + cg1 = _array_contraction(expr1, (1, 2)) + cg2 = _array_contraction(expr2, (0, 3)) + + expr = _array_tensor_product(cg1, cg2) + assert expr == _array_contraction(_array_tensor_product(M, N, P, Q), (1, 2), (4, 7)) + + expr = _array_tensor_product(M, cg1) + assert expr == _array_contraction(_array_tensor_product(M, M, N), (3, 4)) + + # Flatten nested ArrayContraction objects: + cgnested = _array_contraction(cg1, (0, 1)) + assert cgnested == _array_contraction(_array_tensor_product(M, N), (0, 3), (1, 2)) + + cgnested = _array_contraction(_array_tensor_product(cg1, cg2), (0, 3)) + assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 6), (1, 2), (4, 7)) + + cg3 = _array_contraction(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4)) + cgnested = _array_contraction(cg3, (0, 1)) + assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 5), (1, 3), (2, 4)) + + cgnested = _array_contraction(cg3, (0, 3), (1, 2)) + assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 7), (1, 3), (2, 4), (5, 6)) + + cg4 = _array_contraction(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7)) + cgnested = _array_contraction(cg4, (0, 1)) + assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 5), (3, 7)) + + cgnested = _array_contraction(cg4, (0, 1), (2, 3)) + assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 5), (3, 7), (4, 6)) + + cg = _array_diagonal(cg4) + assert cg == cg4 + assert isinstance(cg, type(cg4)) + + # Flatten nested ArrayDiagonal objects: + cg1 = _array_diagonal(expr1, (1, 2)) + cg2 = _array_diagonal(expr2, (0, 3)) + cg3 = _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4)) + cg4 = _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7)) + + cgnested = _array_diagonal(cg1, (0, 1)) + assert cgnested == _array_diagonal(_array_tensor_product(M, N), (1, 2), (0, 3)) + + cgnested = _array_diagonal(cg3, (1, 2)) + assert cgnested == _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4), (5, 6)) + + cgnested = _array_diagonal(cg4, (1, 2)) + assert cgnested == _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7), (2, 4)) + + cg = _array_add(M, N) + cg2 = _array_add(cg, P) + assert isinstance(cg2, ArrayAdd) + assert cg2.args == (M, N, P) + assert cg2.shape == (k, k) + + expr = _array_tensor_product(_array_diagonal(X, (0, 1)), _array_diagonal(A, (0, 1))) + assert expr == _array_diagonal(_array_tensor_product(X, A), (0, 1), (2, 3)) + + expr1 = _array_diagonal(_array_tensor_product(X, A), (1, 2)) + expr2 = _array_tensor_product(expr1, a) + assert expr2 == _permute_dims(_array_diagonal(_array_tensor_product(X, A, a), (1, 2)), [0, 1, 4, 2, 3]) + + expr1 = _array_contraction(_array_tensor_product(X, A), (1, 2)) + expr2 = _array_tensor_product(expr1, a) + assert isinstance(expr2, ArrayContraction) + assert isinstance(expr2.expr, ArrayTensorProduct) + + cg = _array_tensor_product(_array_diagonal(_array_tensor_product(A, X, Y), (0, 3), (1, 5)), a, b) + assert cg == _permute_dims(_array_diagonal(_array_tensor_product(A, X, Y, a, b), (0, 3), (1, 5)), [0, 1, 6, 7, 2, 3, 4, 5]) + + +def test_arrayexpr_array_diagonal(): + cg = _array_diagonal(M, (1, 0)) + assert cg == _array_diagonal(M, (0, 1)) + + cg = _array_diagonal(_array_tensor_product(M, N, P), (4, 1), (2, 0)) + assert cg == _array_diagonal(_array_tensor_product(M, N, P), (1, 4), (0, 2)) + + cg = _array_diagonal(_array_tensor_product(M, N), (1, 2), (3,), allow_trivial_diags=True) + assert cg == _permute_dims(_array_diagonal(_array_tensor_product(M, N), (1, 2)), [0, 2, 1]) + + Ax = ArraySymbol("Ax", shape=(1, 2, 3, 4, 3, 5, 6, 2, 7)) + cg = _array_diagonal(Ax, (1, 7), (3,), (2, 4), (6,), allow_trivial_diags=True) + assert cg == _permute_dims(_array_diagonal(Ax, (1, 7), (2, 4)), [0, 2, 4, 5, 1, 6, 3]) + + cg = _array_diagonal(M, (0,), allow_trivial_diags=True) + assert cg == _permute_dims(M, [1, 0]) + + raises(ValueError, lambda: _array_diagonal(M, (0, 0))) + + +def test_arrayexpr_array_shape(): + expr = _array_tensor_product(M, N, P, Q) + assert expr.shape == (k, k, k, k, k, k, k, k) + Z = MatrixSymbol("Z", m, n) + expr = _array_tensor_product(M, Z) + assert expr.shape == (k, k, m, n) + expr2 = _array_contraction(expr, (0, 1)) + assert expr2.shape == (m, n) + expr2 = _array_diagonal(expr, (0, 1)) + assert expr2.shape == (m, n, k) + exprp = _permute_dims(expr, [2, 1, 3, 0]) + assert exprp.shape == (m, k, n, k) + expr3 = _array_tensor_product(N, Z) + expr2 = _array_add(expr, expr3) + assert expr2.shape == (k, k, m, n) + + # Contraction along axes with discordant dimensions: + raises(ValueError, lambda: _array_contraction(expr, (1, 2))) + # Also diagonal needs the same dimensions: + raises(ValueError, lambda: _array_diagonal(expr, (1, 2))) + # Diagonal requires at least to axes to compute the diagonal: + raises(ValueError, lambda: _array_diagonal(expr, (1,))) + + +def test_arrayexpr_permutedims_sink(): + + cg = _permute_dims(_array_tensor_product(M, N), [0, 1, 3, 2], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_tensor_product(M, _permute_dims(N, [1, 0])) + + cg = _permute_dims(_array_tensor_product(M, N), [1, 0, 3, 2], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_tensor_product(_permute_dims(M, [1, 0]), _permute_dims(N, [1, 0])) + + cg = _permute_dims(_array_tensor_product(M, N), [3, 2, 1, 0], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_tensor_product(_permute_dims(N, [1, 0]), _permute_dims(M, [1, 0])) + + cg = _permute_dims(_array_contraction(_array_tensor_product(M, N), (1, 2)), [1, 0], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_contraction(_permute_dims(_array_tensor_product(M, N), [[0, 3]]), (1, 2)) + + cg = _permute_dims(_array_tensor_product(M, N), [1, 0, 3, 2], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_tensor_product(_permute_dims(M, [1, 0]), _permute_dims(N, [1, 0])) + + cg = _permute_dims(_array_contraction(_array_tensor_product(M, N, P), (1, 2), (3, 4)), [1, 0], nest_permutation=False) + sunk = nest_permutation(cg) + assert sunk == _array_contraction(_permute_dims(_array_tensor_product(M, N, P), [[0, 5]]), (1, 2), (3, 4)) + + +def test_arrayexpr_push_indices_up_and_down(): + + indices = list(range(12)) + + contr_diag_indices = [(0, 6), (2, 8)] + assert ArrayContraction._push_indices_down(contr_diag_indices, indices) == (1, 3, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15) + assert ArrayContraction._push_indices_up(contr_diag_indices, indices) == (None, 0, None, 1, 2, 3, None, 4, None, 5, 6, 7) + + assert ArrayDiagonal._push_indices_down(contr_diag_indices, indices, 10) == (1, 3, 4, 5, 7, 9, (0, 6), (2, 8), None, None, None, None) + assert ArrayDiagonal._push_indices_up(contr_diag_indices, indices, 10) == (6, 0, 7, 1, 2, 3, 6, 4, 7, 5, None, None) + + contr_diag_indices = [(1, 2), (7, 8)] + assert ArrayContraction._push_indices_down(contr_diag_indices, indices) == (0, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15) + assert ArrayContraction._push_indices_up(contr_diag_indices, indices) == (0, None, None, 1, 2, 3, 4, None, None, 5, 6, 7) + + assert ArrayDiagonal._push_indices_down(contr_diag_indices, indices, 10) == (0, 3, 4, 5, 6, 9, (1, 2), (7, 8), None, None, None, None) + assert ArrayDiagonal._push_indices_up(contr_diag_indices, indices, 10) == (0, 6, 6, 1, 2, 3, 4, 7, 7, 5, None, None) + + +def test_arrayexpr_split_multiple_contractions(): + a = MatrixSymbol("a", k, 1) + b = MatrixSymbol("b", k, 1) + A = MatrixSymbol("A", k, k) + B = MatrixSymbol("B", k, k) + C = MatrixSymbol("C", k, k) + X = MatrixSymbol("X", k, k) + + cg = _array_contraction(_array_tensor_product(A.T, a, b, b.T, (A*X*b).applyfunc(cos)), (1, 2, 8), (5, 6, 9)) + expected = _array_contraction(_array_tensor_product(A.T, DiagMatrix(a), OneArray(1), b, b.T, (A*X*b).applyfunc(cos)), (1, 3), (2, 9), (6, 7, 10)) + assert cg.split_multiple_contractions().dummy_eq(expected) + + # Check no overlap of lines: + + cg = _array_contraction(_array_tensor_product(A, a, C, a, B), (1, 2, 4), (5, 6, 8), (3, 7)) + assert cg.split_multiple_contractions() == cg + + cg = _array_contraction(_array_tensor_product(a, b, A), (0, 2, 4), (1, 3)) + assert cg.split_multiple_contractions() == cg + + +def test_arrayexpr_nested_permutations(): + + cg = _permute_dims(_permute_dims(M, (1, 0)), (1, 0)) + assert cg == M + + times = 3 + plist1 = [list(range(6)) for i in range(times)] + plist2 = [list(range(6)) for i in range(times)] + + for i in range(times): + random.shuffle(plist1[i]) + random.shuffle(plist2[i]) + + plist1.append([2, 5, 4, 1, 0, 3]) + plist2.append([3, 5, 0, 4, 1, 2]) + + plist1.append([2, 5, 4, 0, 3, 1]) + plist2.append([3, 0, 5, 1, 2, 4]) + + plist1.append([5, 4, 2, 0, 3, 1]) + plist2.append([4, 5, 0, 2, 3, 1]) + + Me = M.subs(k, 3).as_explicit() + Ne = N.subs(k, 3).as_explicit() + Pe = P.subs(k, 3).as_explicit() + cge = tensorproduct(Me, Ne, Pe) + + for permutation_array1, permutation_array2 in zip(plist1, plist2): + p1 = Permutation(permutation_array1) + p2 = Permutation(permutation_array2) + + cg = _permute_dims( + _permute_dims( + _array_tensor_product(M, N, P), + p1), + p2 + ) + result = _permute_dims( + _array_tensor_product(M, N, P), + p2*p1 + ) + assert cg == result + + # Check that `permutedims` behaves the same way with explicit-component arrays: + result1 = _permute_dims(_permute_dims(cge, p1), p2) + result2 = _permute_dims(cge, p2*p1) + assert result1 == result2 + + +def test_arrayexpr_contraction_permutation_mix(): + + Me = M.subs(k, 3).as_explicit() + Ne = N.subs(k, 3).as_explicit() + + cg1 = _array_contraction(PermuteDims(_array_tensor_product(M, N), Permutation([0, 2, 1, 3])), (2, 3)) + cg2 = _array_contraction(_array_tensor_product(M, N), (1, 3)) + assert cg1 == cg2 + cge1 = tensorcontraction(permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3)) + cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3)) + assert cge1 == cge2 + + cg1 = _permute_dims(_array_tensor_product(M, N), Permutation([0, 1, 3, 2])) + cg2 = _array_tensor_product(M, _permute_dims(N, Permutation([1, 0]))) + assert cg1 == cg2 + + cg1 = _array_contraction( + _permute_dims( + _array_tensor_product(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])), + (1, 2), (3, 5) + ) + cg2 = _array_contraction( + _array_tensor_product(M, N, P, _permute_dims(Q, Permutation([1, 0]))), + (1, 5), (2, 3) + ) + assert cg1 == cg2 + + cg1 = _array_contraction( + _permute_dims( + _array_tensor_product(M, N, P, Q), Permutation([1, 0, 4, 6, 2, 7, 5, 3])), + (0, 1), (2, 6), (3, 7) + ) + cg2 = _permute_dims( + _array_contraction( + _array_tensor_product(M, P, Q, N), + (0, 1), (2, 3), (4, 7)), + [1, 0] + ) + assert cg1 == cg2 + + cg1 = _array_contraction( + _permute_dims( + _array_tensor_product(M, N, P, Q), Permutation([1, 0, 4, 6, 7, 2, 5, 3])), + (0, 1), (2, 6), (3, 7) + ) + cg2 = _permute_dims( + _array_contraction( + _array_tensor_product(_permute_dims(M, [1, 0]), N, P, Q), + (0, 1), (3, 6), (4, 5) + ), + Permutation([1, 0]) + ) + assert cg1 == cg2 + + +def test_arrayexpr_permute_tensor_product(): + cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 1, 0, 5, 4, 6, 7])) + cg2 = _array_tensor_product(N, _permute_dims(M, [1, 0]), + _permute_dims(P, [1, 0]), Q) + assert cg1 == cg2 + + # TODO: reverse operation starting with `PermuteDims` and getting down to `bb`... + cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 4, 5, 0, 1, 6, 7])) + cg2 = _array_tensor_product(N, P, M, Q) + assert cg1 == cg2 + + cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 4, 6, 5, 7, 0, 1])) + assert cg1.expr == _array_tensor_product(N, P, Q, M) + assert cg1.permutation == Permutation([0, 1, 2, 4, 3, 5, 6, 7]) + + cg1 = _array_contraction( + _permute_dims( + _array_tensor_product(N, Q, Q, M), + [2, 1, 5, 4, 0, 3, 6, 7]), + [1, 2, 6]) + cg2 = _permute_dims(_array_contraction(_array_tensor_product(Q, Q, N, M), (3, 5, 6)), [0, 2, 3, 1, 4]) + assert cg1 == cg2 + + cg1 = _array_contraction( + _array_contraction( + _array_contraction( + _array_contraction( + _permute_dims( + _array_tensor_product(N, Q, Q, M), + [2, 1, 5, 4, 0, 3, 6, 7]), + [1, 2, 6]), + [1, 3, 4]), + [1]), + [0]) + cg2 = _array_contraction(_array_tensor_product(M, N, Q, Q), (0, 3, 5), (1, 4, 7), (2,), (6,)) + assert cg1 == cg2 + + +def test_arrayexpr_canonicalize_diagonal__permute_dims(): + tp = _array_tensor_product(M, Q, N, P) + expr = _array_diagonal( + _permute_dims(tp, [0, 1, 2, 4, 7, 6, 3, 5]), (2, 4, 5), (6, 7), + (0, 3)) + result = _array_diagonal(tp, (2, 6, 7), (3, 5), (0, 4)) + assert expr == result + + tp = _array_tensor_product(M, N, P, Q) + expr = _array_diagonal(_permute_dims(tp, [0, 5, 2, 4, 1, 6, 3, 7]), (1, 2, 6), (3, 4)) + result = _array_diagonal(_array_tensor_product(M, P, N, Q), (3, 4, 5), (1, 2)) + assert expr == result + + +def test_arrayexpr_canonicalize_diagonal_contraction(): + tp = _array_tensor_product(M, N, P, Q) + expr = _array_contraction(_array_diagonal(tp, (1, 3, 4)), (0, 3)) + result = _array_diagonal(_array_contraction(_array_tensor_product(M, N, P, Q), (0, 6)), (0, 2, 3)) + assert expr == result + + expr = _array_contraction(_array_diagonal(tp, (0, 1, 2, 3, 7)), (1, 2, 3)) + result = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 1, 2, 3, 5, 6, 7)) + assert expr == result + + expr = _array_contraction(_array_diagonal(tp, (0, 2, 6, 7)), (1, 2, 3)) + result = _array_diagonal(_array_contraction(tp, (3, 4, 5)), (0, 2, 3, 4)) + assert expr == result + + td = _array_diagonal(_array_tensor_product(M, N, P, Q), (0, 3)) + expr = _array_contraction(td, (2, 1), (0, 4, 6, 5, 3)) + result = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 1, 3, 5, 6, 7), (2, 4)) + assert expr == result + + +def test_arrayexpr_array_wrong_permutation_size(): + cg = _array_tensor_product(M, N) + raises(ValueError, lambda: _permute_dims(cg, [1, 0])) + raises(ValueError, lambda: _permute_dims(cg, [1, 0, 2, 3, 5, 4])) + + +def test_arrayexpr_nested_array_elementwise_add(): + cg = _array_contraction(_array_add( + _array_tensor_product(M, N), + _array_tensor_product(N, M) + ), (1, 2)) + result = _array_add( + _array_contraction(_array_tensor_product(M, N), (1, 2)), + _array_contraction(_array_tensor_product(N, M), (1, 2)) + ) + assert cg == result + + cg = _array_diagonal(_array_add( + _array_tensor_product(M, N), + _array_tensor_product(N, M) + ), (1, 2)) + result = _array_add( + _array_diagonal(_array_tensor_product(M, N), (1, 2)), + _array_diagonal(_array_tensor_product(N, M), (1, 2)) + ) + assert cg == result + + +def test_arrayexpr_array_expr_zero_array(): + za1 = ZeroArray(k, l, m, n) + zm1 = ZeroMatrix(m, n) + + za2 = ZeroArray(k, m, m, n) + zm2 = ZeroMatrix(m, m) + zm3 = ZeroMatrix(k, k) + + assert _array_tensor_product(M, N, za1) == ZeroArray(k, k, k, k, k, l, m, n) + assert _array_tensor_product(M, N, zm1) == ZeroArray(k, k, k, k, m, n) + + assert _array_contraction(za1, (3,)) == ZeroArray(k, l, m) + assert _array_contraction(zm1, (1,)) == ZeroArray(m) + assert _array_contraction(za2, (1, 2)) == ZeroArray(k, n) + assert _array_contraction(zm2, (0, 1)) == 0 + + assert _array_diagonal(za2, (1, 2)) == ZeroArray(k, n, m) + assert _array_diagonal(zm2, (0, 1)) == ZeroArray(m) + + assert _permute_dims(za1, [2, 1, 3, 0]) == ZeroArray(m, l, n, k) + assert _permute_dims(zm1, [1, 0]) == ZeroArray(n, m) + + assert _array_add(za1) == za1 + assert _array_add(zm1) == ZeroArray(m, n) + tp1 = _array_tensor_product(MatrixSymbol("A", k, l), MatrixSymbol("B", m, n)) + assert _array_add(tp1, za1) == tp1 + tp2 = _array_tensor_product(MatrixSymbol("C", k, l), MatrixSymbol("D", m, n)) + assert _array_add(tp1, za1, tp2) == _array_add(tp1, tp2) + assert _array_add(M, zm3) == M + assert _array_add(M, N, zm3) == _array_add(M, N) + + +def test_arrayexpr_array_expr_applyfunc(): + + A = ArraySymbol("A", (3, k, 2)) + aaf = ArrayElementwiseApplyFunc(sin, A) + assert aaf.shape == (3, k, 2) + + +def test_edit_array_contraction(): + cg = _array_contraction(_array_tensor_product(A, B, C, D), (1, 2, 5)) + ecg = _EditArrayContraction(cg) + assert ecg.to_array_contraction() == cg + + ecg.args_with_ind[1], ecg.args_with_ind[2] = ecg.args_with_ind[2], ecg.args_with_ind[1] + assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, C, B, D), (1, 3, 4)) + + ci = ecg.get_new_contraction_index() + new_arg = _ArgE(X) + new_arg.indices = [ci, ci] + ecg.args_with_ind.insert(2, new_arg) + assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, C, X, B, D), (1, 3, 6), (4, 5)) + + assert ecg.get_contraction_indices() == [[1, 3, 6], [4, 5]] + assert [[tuple(j) for j in i] for i in ecg.get_contraction_indices_to_ind_rel_pos()] == [[(0, 1), (1, 1), (3, 0)], [(2, 0), (2, 1)]] + assert [list(i) for i in ecg.get_mapping_for_index(0)] == [[0, 1], [1, 1], [3, 0]] + assert [list(i) for i in ecg.get_mapping_for_index(1)] == [[2, 0], [2, 1]] + raises(ValueError, lambda: ecg.get_mapping_for_index(2)) + + ecg.args_with_ind.pop(1) + assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, B, D), (1, 4), (2, 3)) + + ecg.args_with_ind[0].indices[1] = ecg.args_with_ind[1].indices[0] + ecg.args_with_ind[1].indices[1] = ecg.args_with_ind[2].indices[0] + assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, B, D), (1, 2), (3, 4)) + + ecg.insert_after(ecg.args_with_ind[1], _ArgE(C)) + assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, C, B, D), (1, 2), (3, 6)) + + +def test_array_expressions_no_canonicalization(): + + tp = _array_tensor_product(M, N, P) + + # ArrayTensorProduct: + + expr = ArrayTensorProduct(tp, N) + assert str(expr) == "ArrayTensorProduct(ArrayTensorProduct(M, N, P), N)" + assert expr.doit() == ArrayTensorProduct(M, N, P, N) + + expr = ArrayTensorProduct(ArrayContraction(M, (0, 1)), N) + assert str(expr) == "ArrayTensorProduct(ArrayContraction(M, (0, 1)), N)" + assert expr.doit() == ArrayContraction(ArrayTensorProduct(M, N), (0, 1)) + + expr = ArrayTensorProduct(ArrayDiagonal(M, (0, 1)), N) + assert str(expr) == "ArrayTensorProduct(ArrayDiagonal(M, (0, 1)), N)" + assert expr.doit() == PermuteDims(ArrayDiagonal(ArrayTensorProduct(M, N), (0, 1)), [2, 0, 1]) + + expr = ArrayTensorProduct(PermuteDims(M, [1, 0]), N) + assert str(expr) == "ArrayTensorProduct(PermuteDims(M, (0 1)), N)" + assert expr.doit() == PermuteDims(ArrayTensorProduct(M, N), [1, 0, 2, 3]) + + # ArrayContraction: + + expr = ArrayContraction(_array_contraction(tp, (0, 2)), (0, 1)) + assert isinstance(expr, ArrayContraction) + assert isinstance(expr.expr, ArrayContraction) + assert str(expr) == "ArrayContraction(ArrayContraction(ArrayTensorProduct(M, N, P), (0, 2)), (0, 1))" + assert expr.doit() == ArrayContraction(tp, (0, 2), (1, 3)) + + expr = ArrayContraction(ArrayContraction(ArrayContraction(tp, (0, 1)), (0, 1)), (0, 1)) + assert expr.doit() == ArrayContraction(tp, (0, 1), (2, 3), (4, 5)) + # assert expr._canonicalize() == ArrayContraction(ArrayContraction(tp, (0, 1)), (0, 1), (2, 3)) + + expr = ArrayContraction(ArrayDiagonal(tp, (0, 1)), (0, 1)) + assert str(expr) == "ArrayContraction(ArrayDiagonal(ArrayTensorProduct(M, N, P), (0, 1)), (0, 1))" + assert expr.doit() == ArrayDiagonal(ArrayContraction(ArrayTensorProduct(N, M, P), (0, 1)), (0, 1)) + + expr = ArrayContraction(PermuteDims(M, [1, 0]), (0, 1)) + assert str(expr) == "ArrayContraction(PermuteDims(M, (0 1)), (0, 1))" + assert expr.doit() == ArrayContraction(M, (0, 1)) + + # ArrayDiagonal: + + expr = ArrayDiagonal(ArrayDiagonal(tp, (0, 2)), (0, 1)) + assert str(expr) == "ArrayDiagonal(ArrayDiagonal(ArrayTensorProduct(M, N, P), (0, 2)), (0, 1))" + assert expr.doit() == ArrayDiagonal(tp, (0, 2), (1, 3)) + + expr = ArrayDiagonal(ArrayDiagonal(ArrayDiagonal(tp, (0, 1)), (0, 1)), (0, 1)) + assert expr.doit() == ArrayDiagonal(tp, (0, 1), (2, 3), (4, 5)) + assert expr._canonicalize() == expr.doit() + + expr = ArrayDiagonal(ArrayContraction(tp, (0, 1)), (0, 1)) + assert str(expr) == "ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, P), (0, 1)), (0, 1))" + assert expr.doit() == expr + + expr = ArrayDiagonal(PermuteDims(M, [1, 0]), (0, 1)) + assert str(expr) == "ArrayDiagonal(PermuteDims(M, (0 1)), (0, 1))" + assert expr.doit() == ArrayDiagonal(M, (0, 1)) + + # ArrayAdd: + + expr = ArrayAdd(M) + assert isinstance(expr, ArrayAdd) + assert expr.doit() == M + + expr = ArrayAdd(ArrayAdd(M, N), P) + assert str(expr) == "ArrayAdd(ArrayAdd(M, N), P)" + assert expr.doit() == ArrayAdd(M, N, P) + + expr = ArrayAdd(M, ArrayAdd(N, ArrayAdd(P, M))) + assert expr.doit() == ArrayAdd(M, N, P, M) + assert expr._canonicalize() == ArrayAdd(M, N, ArrayAdd(P, M)) + + expr = ArrayAdd(M, ZeroArray(k, k), N) + assert str(expr) == "ArrayAdd(M, ZeroArray(k, k), N)" + assert expr.doit() == ArrayAdd(M, N) + + # PermuteDims: + + expr = PermuteDims(PermuteDims(M, [1, 0]), [1, 0]) + assert str(expr) == "PermuteDims(PermuteDims(M, (0 1)), (0 1))" + assert expr.doit() == M + + expr = PermuteDims(PermuteDims(PermuteDims(M, [1, 0]), [1, 0]), [1, 0]) + assert expr.doit() == PermuteDims(M, [1, 0]) + assert expr._canonicalize() == expr.doit() + + # Reshape + + expr = Reshape(A, (k**2,)) + assert expr.shape == (k**2,) + assert isinstance(expr, Reshape) + + +def test_array_expr_construction_with_functions(): + + tp = tensorproduct(M, N) + assert tp == ArrayTensorProduct(M, N) + + expr = tensorproduct(A, eye(2)) + assert expr == ArrayTensorProduct(A, eye(2)) + + # Contraction: + + expr = tensorcontraction(M, (0, 1)) + assert expr == ArrayContraction(M, (0, 1)) + + expr = tensorcontraction(tp, (1, 2)) + assert expr == ArrayContraction(tp, (1, 2)) + + expr = tensorcontraction(tensorcontraction(tp, (1, 2)), (0, 1)) + assert expr == ArrayContraction(tp, (0, 3), (1, 2)) + + # Diagonalization: + + expr = tensordiagonal(M, (0, 1)) + assert expr == ArrayDiagonal(M, (0, 1)) + + expr = tensordiagonal(tensordiagonal(tp, (0, 1)), (0, 1)) + assert expr == ArrayDiagonal(tp, (0, 1), (2, 3)) + + # Permutation of dimensions: + + expr = permutedims(M, [1, 0]) + assert expr == PermuteDims(M, [1, 0]) + + expr = permutedims(PermuteDims(tp, [1, 0, 2, 3]), [0, 1, 3, 2]) + assert expr == PermuteDims(tp, [1, 0, 3, 2]) + + expr = PermuteDims(tp, index_order_new=["a", "b", "c", "d"], index_order_old=["d", "c", "b", "a"]) + assert expr == PermuteDims(tp, [3, 2, 1, 0]) + + arr = Array(range(32)).reshape(2, 2, 2, 2, 2) + expr = PermuteDims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c']) + assert expr == PermuteDims(arr, [2, 0, 4, 3, 1]) + assert expr.as_explicit() == permutedims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c']) + + +def test_array_element_expressions(): + # Check commutative property: + assert M[0, 0]*N[0, 0] == N[0, 0]*M[0, 0] + + # Check derivatives: + assert M[0, 0].diff(M[0, 0]) == 1 + assert M[0, 0].diff(M[1, 0]) == 0 + assert M[0, 0].diff(N[0, 0]) == 0 + assert M[0, 1].diff(M[i, j]) == KroneckerDelta(i, 0)*KroneckerDelta(j, 1) + assert M[0, 1].diff(N[i, j]) == 0 + + K4 = ArraySymbol("K4", shape=(k, k, k, k)) + + assert K4[i, j, k, l].diff(K4[1, 2, 3, 4]) == ( + KroneckerDelta(i, 1)*KroneckerDelta(j, 2)*KroneckerDelta(k, 3)*KroneckerDelta(l, 4) + ) + + +def test_array_expr_reshape(): + + A = MatrixSymbol("A", 2, 2) + B = ArraySymbol("B", (2, 2, 2)) + C = Array([1, 2, 3, 4]) + + expr = Reshape(A, (4,)) + assert expr.expr == A + assert expr.shape == (4,) + assert expr.as_explicit() == Array([A[0, 0], A[0, 1], A[1, 0], A[1, 1]]) + + expr = Reshape(B, (2, 4)) + assert expr.expr == B + assert expr.shape == (2, 4) + ee = expr.as_explicit() + assert isinstance(ee, ImmutableDenseNDimArray) + assert ee.shape == (2, 4) + assert ee == Array([[B[0, 0, 0], B[0, 0, 1], B[0, 1, 0], B[0, 1, 1]], [B[1, 0, 0], B[1, 0, 1], B[1, 1, 0], B[1, 1, 1]]]) + + expr = Reshape(A, (k, 2)) + assert expr.shape == (k, 2) + + raises(ValueError, lambda: Reshape(A, (2, 3))) + raises(ValueError, lambda: Reshape(A, (3,))) + + expr = Reshape(C, (2, 2)) + assert expr.expr == C + assert expr.shape == (2, 2) + assert expr.doit() == Array([[1, 2], [3, 4]]) + + +def test_array_expr_as_explicit_with_explicit_component_arrays(): + # Test if .as_explicit() works with explicit-component arrays + # nested in array expressions: + from sympy.abc import x, y, z, t + A = Array([[x, y], [z, t]]) + assert ArrayTensorProduct(A, A).as_explicit() == tensorproduct(A, A) + assert ArrayDiagonal(A, (0, 1)).as_explicit() == tensordiagonal(A, (0, 1)) + assert ArrayContraction(A, (0, 1)).as_explicit() == tensorcontraction(A, (0, 1)) + assert ArrayAdd(A, A).as_explicit() == A + A + assert ArrayElementwiseApplyFunc(sin, A).as_explicit() == A.applyfunc(sin) + assert PermuteDims(A, [1, 0]).as_explicit() == permutedims(A, [1, 0]) + assert Reshape(A, [4]).as_explicit() == A.reshape(4) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0fcf63f2607b23feb38758e4f0994de4f0384b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py @@ -0,0 +1,78 @@ +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.matrices.expressions.special import Identity +from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction +from sympy.tensor.array.expressions.array_expressions import ArraySymbol, ArrayTensorProduct, \ + PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, ArrayContraction, _permute_dims, Reshape +from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive + +k = symbols("k") + +I = Identity(k) +X = MatrixSymbol("X", k, k) +x = MatrixSymbol("x", k, 1) + +A = MatrixSymbol("A", k, k) +B = MatrixSymbol("B", k, k) +C = MatrixSymbol("C", k, k) +D = MatrixSymbol("D", k, k) + +A1 = ArraySymbol("A", (3, 2, k)) + + +def test_arrayexpr_derivatives1(): + + res = array_derive(X, X) + assert res == PermuteDims(ArrayTensorProduct(I, I), [0, 2, 1, 3]) + + cg = ArrayTensorProduct(A, X, B) + res = array_derive(cg, X) + assert res == _permute_dims( + ArrayTensorProduct(I, A, I, B), + [0, 4, 2, 3, 1, 5, 6, 7]) + + cg = ArrayContraction(X, (0, 1)) + res = array_derive(cg, X) + assert res == ArrayContraction(ArrayTensorProduct(I, I), (1, 3)) + + cg = ArrayDiagonal(X, (0, 1)) + res = array_derive(cg, X) + assert res == ArrayDiagonal(ArrayTensorProduct(I, I), (1, 3)) + + cg = ElementwiseApplyFunction(sin, X) + res = array_derive(cg, X) + assert res.dummy_eq(ArrayDiagonal( + ArrayTensorProduct( + ElementwiseApplyFunction(cos, X), + I, + I + ), (0, 3), (1, 5))) + + cg = ArrayElementwiseApplyFunc(sin, X) + res = array_derive(cg, X) + assert res.dummy_eq(ArrayDiagonal( + ArrayTensorProduct( + I, + I, + ArrayElementwiseApplyFunc(cos, X) + ), (1, 4), (3, 5))) + + res = array_derive(A1, A1) + assert res == PermuteDims( + ArrayTensorProduct(Identity(3), Identity(2), Identity(k)), + [0, 2, 4, 1, 3, 5] + ) + + cg = ArrayElementwiseApplyFunc(sin, A1) + res = array_derive(cg, A1) + assert res.dummy_eq(ArrayDiagonal( + ArrayTensorProduct( + Identity(3), Identity(2), Identity(k), + ArrayElementwiseApplyFunc(cos, A1) + ), (1, 6), (3, 7), (5, 8) + )) + + cg = Reshape(A, (k**2,)) + res = array_derive(cg, A) + assert res == Reshape(PermuteDims(ArrayTensorProduct(I, I), [0, 2, 1, 3]), (k, k, k**2)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_as_explicit.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_as_explicit.py new file mode 100644 index 0000000000000000000000000000000000000000..30cc61b1ee651ca032e165cd67926fa33c71354f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_as_explicit.py @@ -0,0 +1,63 @@ +from sympy.core.symbol import Symbol +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.tensor.array.arrayop import (permutedims, tensorcontraction, tensordiagonal, tensorproduct) +from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray +from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, ArraySymbol, \ + ArrayTensorProduct, PermuteDims, ArrayDiagonal, ArrayContraction, ArrayAdd +from sympy.testing.pytest import raises + + +def test_array_as_explicit_call(): + + assert ZeroArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray.zeros(3, 2, 4) + assert OneArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray([1 for i in range(3*2*4)]).reshape(3, 2, 4) + + k = Symbol("k") + X = ArraySymbol("X", (k, 3, 2)) + raises(ValueError, lambda: X.as_explicit()) + raises(ValueError, lambda: ZeroArray(k, 2, 3).as_explicit()) + raises(ValueError, lambda: OneArray(2, k, 2).as_explicit()) + + A = ArraySymbol("A", (3, 3)) + B = ArraySymbol("B", (3, 3)) + + texpr = tensorproduct(A, B) + assert isinstance(texpr, ArrayTensorProduct) + assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit()) + + texpr = tensorcontraction(A, (0, 1)) + assert isinstance(texpr, ArrayContraction) + assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2] + + texpr = tensordiagonal(A, (0, 1)) + assert isinstance(texpr, ArrayDiagonal) + assert texpr.as_explicit() == ImmutableDenseNDimArray([A[0, 0], A[1, 1], A[2, 2]]) + + texpr = permutedims(A, [1, 0]) + assert isinstance(texpr, PermuteDims) + assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0]) + + +def test_array_as_explicit_matrix_symbol(): + + A = MatrixSymbol("A", 3, 3) + B = MatrixSymbol("B", 3, 3) + + texpr = tensorproduct(A, B) + assert isinstance(texpr, ArrayTensorProduct) + assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit()) + + texpr = tensorcontraction(A, (0, 1)) + assert isinstance(texpr, ArrayContraction) + assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2] + + texpr = tensordiagonal(A, (0, 1)) + assert isinstance(texpr, ArrayDiagonal) + assert texpr.as_explicit() == ImmutableDenseNDimArray([A[0, 0], A[1, 1], A[2, 2]]) + + texpr = permutedims(A, [1, 0]) + assert isinstance(texpr, PermuteDims) + assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0]) + + expr = ArrayAdd(ArrayTensorProduct(A, B), ArrayTensorProduct(B, A)) + assert expr.as_explicit() == expr.args[0].as_explicit() + expr.args[1].as_explicit() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b713fbec94ab7808c5a8a778b3313402d9d0c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py @@ -0,0 +1,61 @@ +from sympy import Sum, Dummy, sin +from sympy.tensor.array.expressions import ArraySymbol, ArrayTensorProduct, ArrayContraction, PermuteDims, \ + ArrayDiagonal, ArrayAdd, OneArray, ZeroArray, convert_indexed_to_array, ArrayElementwiseApplyFunc, Reshape +from sympy.tensor.array.expressions.from_array_to_indexed import convert_array_to_indexed + +from sympy.abc import i, j, k, l, m, n, o + + +def test_convert_array_to_indexed_main(): + A = ArraySymbol("A", (3, 3, 3)) + B = ArraySymbol("B", (3, 3)) + C = ArraySymbol("C", (3, 3)) + + d_ = Dummy("d_") + + assert convert_array_to_indexed(A, [i, j, k]) == A[i, j, k] + + expr = ArrayTensorProduct(A, B, C) + conv = convert_array_to_indexed(expr, [i,j,k,l,m,n,o]) + assert conv == A[i,j,k]*B[l,m]*C[n,o] + assert convert_indexed_to_array(conv, [i,j,k,l,m,n,o]) == expr + + expr = ArrayContraction(A, (0, 2)) + assert convert_array_to_indexed(expr, [i]).dummy_eq(Sum(A[d_, i, d_], (d_, 0, 2))) + + expr = ArrayDiagonal(A, (0, 2)) + assert convert_array_to_indexed(expr, [i, j]) == A[j, i, j] + + expr = PermuteDims(A, [1, 2, 0]) + conv = convert_array_to_indexed(expr, [i, j, k]) + assert conv == A[k, i, j] + assert convert_indexed_to_array(conv, [i, j, k]) == expr + + expr = ArrayAdd(B, C, PermuteDims(C, [1, 0])) + conv = convert_array_to_indexed(expr, [i, j]) + assert conv == B[i, j] + C[i, j] + C[j, i] + assert convert_indexed_to_array(conv, [i, j]) == expr + + expr = ArrayElementwiseApplyFunc(sin, A) + conv = convert_array_to_indexed(expr, [i, j, k]) + assert conv == sin(A[i, j, k]) + assert convert_indexed_to_array(conv, [i, j, k]).dummy_eq(expr) + + assert convert_array_to_indexed(OneArray(3, 3), [i, j]) == 1 + assert convert_array_to_indexed(ZeroArray(3, 3), [i, j]) == 0 + + expr = Reshape(A, (27,)) + assert convert_array_to_indexed(expr, [i]) == A[i // 9, i // 3 % 3, i % 3] + + X = ArraySymbol("X", (2, 3, 4, 5, 6)) + expr = Reshape(X, (2*3*4*5*6,)) + assert convert_array_to_indexed(expr, [i]) == X[i // 360, i // 120 % 3, i // 30 % 4, i // 6 % 5, i % 6] + + expr = Reshape(X, (4, 9, 2, 2, 5)) + one_index = 180*i + 20*j + 10*k + 5*l + m + expected = X[one_index // (3*4*5*6), one_index // (4*5*6) % 3, one_index // (5*6) % 4, one_index // 6 % 5, one_index % 6] + assert convert_array_to_indexed(expr, [i, j, k, l, m]) == expected + + X = ArraySymbol("X", (2*3*5,)) + expr = Reshape(X, (2, 3, 5)) + assert convert_array_to_indexed(expr, [i, j, k]) == X[15*i + 5*j + k] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..26839d5e7cec0554948c6b726482f9d8ca250b1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py @@ -0,0 +1,689 @@ +from sympy import Lambda, S, Dummy, KroneckerProduct +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import cos, sin +from sympy.matrices.expressions.hadamard import HadamardProduct, HadamardPower +from sympy.matrices.expressions.special import (Identity, OneMatrix, ZeroMatrix) +from sympy.matrices.expressions.matexpr import MatrixElement +from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array +from sympy.tensor.array.expressions.from_array_to_matrix import _support_function_tp1_recognize, \ + _array_diag2contr_diagmatrix, convert_array_to_matrix, _remove_trivial_dims, _array2matrix, \ + _combine_removed, identify_removable_identity_matrices, _array_contraction_to_diagonal_multiple_identity +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.combinatorics import Permutation +from sympy.matrices.expressions.diagonal import DiagMatrix, DiagonalMatrix +from sympy.matrices import Trace, MatMul, Transpose +from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, \ + ArrayElement, ArraySymbol, ArrayElementwiseApplyFunc, _array_tensor_product, _array_contraction, \ + _array_diagonal, _permute_dims, PermuteDims, ArrayAdd, ArrayDiagonal, ArrayContraction, ArrayTensorProduct +from sympy.testing.pytest import raises + + +i, j, k, l, m, n = symbols("i j k l m n") + +I = Identity(k) +I1 = Identity(1) + +M = MatrixSymbol("M", k, k) +N = MatrixSymbol("N", k, k) +P = MatrixSymbol("P", k, k) +Q = MatrixSymbol("Q", k, k) + +A = MatrixSymbol("A", k, k) +B = MatrixSymbol("B", k, k) +C = MatrixSymbol("C", k, k) +D = MatrixSymbol("D", k, k) + +X = MatrixSymbol("X", k, k) +Y = MatrixSymbol("Y", k, k) + +a = MatrixSymbol("a", k, 1) +b = MatrixSymbol("b", k, 1) +c = MatrixSymbol("c", k, 1) +d = MatrixSymbol("d", k, 1) + +x = MatrixSymbol("x", k, 1) +y = MatrixSymbol("y", k, 1) + + +def test_arrayexpr_convert_array_to_matrix(): + + cg = _array_contraction(_array_tensor_product(M), (0, 1)) + assert convert_array_to_matrix(cg) == Trace(M) + + cg = _array_contraction(_array_tensor_product(M, N), (0, 1), (2, 3)) + assert convert_array_to_matrix(cg) == Trace(M) * Trace(N) + + cg = _array_contraction(_array_tensor_product(M, N), (0, 3), (1, 2)) + assert convert_array_to_matrix(cg) == Trace(M * N) + + cg = _array_contraction(_array_tensor_product(M, N), (0, 2), (1, 3)) + assert convert_array_to_matrix(cg) == Trace(M * N.T) + + cg = convert_matrix_to_array(M * N * P) + assert convert_array_to_matrix(cg) == M * N * P + + cg = convert_matrix_to_array(M * N.T * P) + assert convert_array_to_matrix(cg) == M * N.T * P + + cg = _array_contraction(_array_tensor_product(M,N,P,Q), (1, 2), (5, 6)) + assert convert_array_to_matrix(cg) == _array_tensor_product(M * N, P * Q) + + cg = _array_contraction(_array_tensor_product(-2, M, N), (1, 2)) + assert convert_array_to_matrix(cg) == -2 * M * N + + a = MatrixSymbol("a", k, 1) + b = MatrixSymbol("b", k, 1) + c = MatrixSymbol("c", k, 1) + cg = PermuteDims( + _array_contraction( + _array_tensor_product( + a, + ArrayAdd( + _array_tensor_product(b, c), + _array_tensor_product(c, b), + ) + ), (2, 4)), [0, 1, 3, 2]) + assert convert_array_to_matrix(cg) == a * (b.T * c + c.T * b) + + za = ZeroArray(m, n) + assert convert_array_to_matrix(za) == ZeroMatrix(m, n) + + cg = _array_tensor_product(3, M) + assert convert_array_to_matrix(cg) == 3 * M + + # Partial conversion to matrix multiplication: + expr = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 4, 6)) + assert convert_array_to_matrix(expr) == _array_contraction(_array_tensor_product(M.T*N, P, Q), (0, 2, 4)) + + x = MatrixSymbol("x", k, 1) + cg = PermuteDims( + _array_contraction(_array_tensor_product(OneArray(1), x, OneArray(1), DiagMatrix(Identity(1))), + (0, 5)), Permutation(1, 2, 3)) + assert convert_array_to_matrix(cg) == x + + expr = ArrayAdd(M, PermuteDims(M, [1, 0])) + assert convert_array_to_matrix(expr) == M + Transpose(M) + + +def test_arrayexpr_convert_array_to_matrix2(): + cg = _array_contraction(_array_tensor_product(M, N), (1, 3)) + assert convert_array_to_matrix(cg) == M * N.T + + cg = PermuteDims(_array_tensor_product(M, N), Permutation([0, 1, 3, 2])) + assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T) + + cg = _array_tensor_product(M, PermuteDims(N, Permutation([1, 0]))) + assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T) + + cg = _array_contraction( + PermuteDims( + _array_tensor_product(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])), + (1, 2), (3, 5) + ) + assert convert_array_to_matrix(cg) == _array_tensor_product(M * P.T * Trace(N), Q.T) + + cg = _array_contraction( + _array_tensor_product(M, N, P, PermuteDims(Q, Permutation([1, 0]))), + (1, 5), (2, 3) + ) + assert convert_array_to_matrix(cg) == _array_tensor_product(M * P.T * Trace(N), Q.T) + + cg = _array_tensor_product(M, PermuteDims(N, [1, 0])) + assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T) + + cg = _array_tensor_product(PermuteDims(M, [1, 0]), PermuteDims(N, [1, 0])) + assert convert_array_to_matrix(cg) == _array_tensor_product(M.T, N.T) + + cg = _array_tensor_product(PermuteDims(N, [1, 0]), PermuteDims(M, [1, 0])) + assert convert_array_to_matrix(cg) == _array_tensor_product(N.T, M.T) + + cg = _array_contraction(M, (0,), (1,)) + assert convert_array_to_matrix(cg) == OneMatrix(1, k)*M*OneMatrix(k, 1) + + cg = _array_contraction(x, (0,), (1,)) + assert convert_array_to_matrix(cg) == OneMatrix(1, k)*x + + Xm = MatrixSymbol("Xm", m, n) + cg = _array_contraction(Xm, (0,), (1,)) + assert convert_array_to_matrix(cg) == OneMatrix(1, m)*Xm*OneMatrix(n, 1) + + +def test_arrayexpr_convert_array_to_diagonalized_vector(): + + # Check matrix recognition over trivial dimensions: + + cg = _array_tensor_product(a, b) + assert convert_array_to_matrix(cg) == a * b.T + + cg = _array_tensor_product(I1, a, b) + assert convert_array_to_matrix(cg) == a * b.T + + # Recognize trace inside a tensor product: + + cg = _array_contraction(_array_tensor_product(A, B, C), (0, 3), (1, 2)) + assert convert_array_to_matrix(cg) == Trace(A * B) * C + + # Transform diagonal operator to contraction: + + cg = _array_diagonal(_array_tensor_product(A, a), (1, 2)) + assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(A, OneArray(1), DiagMatrix(a)), (1, 3)) + assert convert_array_to_matrix(cg) == A * DiagMatrix(a) + + cg = _array_diagonal(_array_tensor_product(a, b), (0, 2)) + assert _array_diag2contr_diagmatrix(cg) == _permute_dims( + _array_contraction(_array_tensor_product(DiagMatrix(a), OneArray(1), b), (0, 3)), [1, 2, 0] + ) + assert convert_array_to_matrix(cg) == b.T * DiagMatrix(a) + + cg = _array_diagonal(_array_tensor_product(A, a), (0, 2)) + assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(A, OneArray(1), DiagMatrix(a)), (0, 3)) + assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a) + + cg = _array_diagonal(_array_tensor_product(I, x, I1), (0, 2), (3, 5)) + assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(I, OneArray(1), I1, DiagMatrix(x)), (0, 5)) + assert convert_array_to_matrix(cg) == DiagMatrix(x) + + cg = _array_diagonal(_array_tensor_product(I, x, A, B), (1, 2), (5, 6)) + assert _array_diag2contr_diagmatrix(cg) == _array_diagonal(_array_contraction(_array_tensor_product(I, OneArray(1), A, B, DiagMatrix(x)), (1, 7)), (5, 6)) + # TODO: this is returning a wrong result: + # convert_array_to_matrix(cg) + + cg = _array_diagonal(_array_tensor_product(I1, a, b), (1, 3, 5)) + assert convert_array_to_matrix(cg) == a*b.T + + cg = _array_diagonal(_array_tensor_product(I1, a, b), (1, 3)) + assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(OneArray(1), a, b, I1), (2, 6)) + assert convert_array_to_matrix(cg) == a*b.T + + cg = _array_diagonal(_array_tensor_product(x, I1), (1, 2)) + assert isinstance(cg, ArrayDiagonal) + assert cg.diagonal_indices == ((1, 2),) + assert convert_array_to_matrix(cg) == x + + cg = _array_diagonal(_array_tensor_product(x, I), (0, 2)) + assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(OneArray(1), I, DiagMatrix(x)), (1, 3)) + assert convert_array_to_matrix(cg).doit() == DiagMatrix(x) + + raises(ValueError, lambda: _array_diagonal(x, (1,))) + + # Ignore identity matrices with contractions: + + cg = _array_contraction(_array_tensor_product(I, A, I, I), (0, 2), (1, 3), (5, 7)) + assert cg.split_multiple_contractions() == cg + assert convert_array_to_matrix(cg) == Trace(A) * I + + cg = _array_contraction(_array_tensor_product(Trace(A) * I, I, I), (1, 5), (3, 4)) + assert cg.split_multiple_contractions() == cg + assert convert_array_to_matrix(cg).doit() == Trace(A) * I + + # Add DiagMatrix when required: + + cg = _array_contraction(_array_tensor_product(A, a), (1, 2)) + assert cg.split_multiple_contractions() == cg + assert convert_array_to_matrix(cg) == A * a + + cg = _array_contraction(_array_tensor_product(A, a, B), (1, 2, 4)) + assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), B), (1, 2), (3, 5)) + assert convert_array_to_matrix(cg) == A * DiagMatrix(a) * B + + cg = _array_contraction(_array_tensor_product(A, a, B), (0, 2, 4)) + assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), B), (0, 2), (3, 5)) + assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a) * B + + cg = _array_contraction(_array_tensor_product(A, a, b, a.T, B), (0, 2, 4, 7, 9)) + assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), + DiagMatrix(b), OneArray(1), DiagMatrix(a), OneArray(1), B), + (0, 2), (3, 5), (6, 9), (8, 12)) + assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a) * DiagMatrix(b) * DiagMatrix(a) * B.T + + cg = _array_contraction(_array_tensor_product(I1, I1, I1), (1, 2, 4)) + assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(I1, I1, OneArray(1), I1), (1, 2), (3, 5)) + assert convert_array_to_matrix(cg) == 1 + + cg = _array_contraction(_array_tensor_product(I, I, I, I, A), (1, 2, 8), (5, 6, 9)) + assert convert_array_to_matrix(cg.split_multiple_contractions()).doit() == A + + cg = _array_contraction(_array_tensor_product(A, a, C, a, B), (1, 2, 4), (5, 6, 8)) + expected = _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), C, DiagMatrix(a), OneArray(1), B), (1, 3), (2, 5), (6, 7), (8, 10)) + assert cg.split_multiple_contractions() == expected + assert convert_array_to_matrix(cg) == A * DiagMatrix(a) * C * DiagMatrix(a) * B + + cg = _array_contraction(_array_tensor_product(a, I1, b, I1, (a.T*b).applyfunc(cos)), (1, 2, 8), (5, 6, 9)) + expected = _array_contraction(_array_tensor_product(a, I1, OneArray(1), b, I1, OneArray(1), (a.T*b).applyfunc(cos)), + (1, 3), (2, 10), (6, 8), (7, 11)) + assert cg.split_multiple_contractions().dummy_eq(expected) + assert convert_array_to_matrix(cg).doit().dummy_eq(MatMul(a, (a.T * b).applyfunc(cos), b.T)) + + +def test_arrayexpr_convert_array_contraction_tp_additions(): + a = ArrayAdd( + _array_tensor_product(M, N), + _array_tensor_product(N, M) + ) + tp = _array_tensor_product(P, a, Q) + expr = _array_contraction(tp, (3, 4)) + expected = _array_tensor_product( + P, + ArrayAdd( + _array_contraction(_array_tensor_product(M, N), (1, 2)), + _array_contraction(_array_tensor_product(N, M), (1, 2)), + ), + Q + ) + assert expr == expected + assert convert_array_to_matrix(expr) == _array_tensor_product(P, M * N + N * M, Q) + + expr = _array_contraction(tp, (1, 2), (3, 4), (5, 6)) + result = _array_contraction( + _array_tensor_product( + P, + ArrayAdd( + _array_contraction(_array_tensor_product(M, N), (1, 2)), + _array_contraction(_array_tensor_product(N, M), (1, 2)), + ), + Q + ), (1, 2), (3, 4)) + assert expr == result + assert convert_array_to_matrix(expr) == P * (M * N + N * M) * Q + + +def test_arrayexpr_convert_array_to_implicit_matmul(): + # Trivial dimensions are suppressed, so the result can be expressed in matrix form: + + cg = _array_tensor_product(a, b) + assert convert_array_to_matrix(cg) == a * b.T + + cg = _array_tensor_product(a, b, I) + assert convert_array_to_matrix(cg) == _array_tensor_product(a*b.T, I) + + cg = _array_tensor_product(I, a, b) + assert convert_array_to_matrix(cg) == _array_tensor_product(I, a*b.T) + + cg = _array_tensor_product(a, I, b) + assert convert_array_to_matrix(cg) == _array_tensor_product(a, I, b) + + cg = _array_contraction(_array_tensor_product(I, I), (1, 2)) + assert convert_array_to_matrix(cg) == I + + cg = PermuteDims(_array_tensor_product(I, Identity(1)), [0, 2, 1, 3]) + assert convert_array_to_matrix(cg) == I + + +def test_arrayexpr_convert_array_to_matrix_remove_trivial_dims(): + + # Tensor Product: + assert _remove_trivial_dims(_array_tensor_product(a, b)) == (a * b.T, [1, 3]) + assert _remove_trivial_dims(_array_tensor_product(a.T, b)) == (a * b.T, [0, 3]) + assert _remove_trivial_dims(_array_tensor_product(a, b.T)) == (a * b.T, [1, 2]) + assert _remove_trivial_dims(_array_tensor_product(a.T, b.T)) == (a * b.T, [0, 2]) + + assert _remove_trivial_dims(_array_tensor_product(I, a.T, b.T)) == (_array_tensor_product(I, a * b.T), [2, 4]) + assert _remove_trivial_dims(_array_tensor_product(a.T, I, b.T)) == (_array_tensor_product(a.T, I, b.T), []) + + assert _remove_trivial_dims(_array_tensor_product(a, I)) == (_array_tensor_product(a, I), []) + assert _remove_trivial_dims(_array_tensor_product(I, a)) == (_array_tensor_product(I, a), []) + + assert _remove_trivial_dims(_array_tensor_product(a.T, b.T, c, d)) == ( + _array_tensor_product(a * b.T, c * d.T), [0, 2, 5, 7]) + assert _remove_trivial_dims(_array_tensor_product(a.T, I, b.T, c, d, I)) == ( + _array_tensor_product(a.T, I, b*c.T, d, I), [4, 7]) + + # Addition: + + cg = ArrayAdd(_array_tensor_product(a, b), _array_tensor_product(c, d)) + assert _remove_trivial_dims(cg) == (a * b.T + c * d.T, [1, 3]) + + # Permute Dims: + + cg = PermuteDims(_array_tensor_product(a, b), Permutation(3)(1, 2)) + assert _remove_trivial_dims(cg) == (a * b.T, [2, 3]) + + cg = PermuteDims(_array_tensor_product(a, I, b), Permutation(5)(1, 2, 3, 4)) + assert _remove_trivial_dims(cg) == (cg, []) + + cg = PermuteDims(_array_tensor_product(I, b, a), Permutation(5)(1, 2, 4, 5, 3)) + assert _remove_trivial_dims(cg) == (PermuteDims(_array_tensor_product(I, b * a.T), [0, 2, 3, 1]), [4, 5]) + + # Diagonal: + + cg = _array_diagonal(_array_tensor_product(M, a), (1, 2)) + assert _remove_trivial_dims(cg) == (cg, []) + + # Contraction: + + cg = _array_contraction(_array_tensor_product(M, a), (1, 2)) + assert _remove_trivial_dims(cg) == (cg, []) + + # A few more cases to test the removal and shift of nested removed axes + # with array contractions and array diagonals: + tp = _array_tensor_product( + OneMatrix(1, 1), + M, + x, + OneMatrix(1, 1), + Identity(1), + ) + + expr = _array_contraction(tp, (1, 8)) + rexpr, removed = _remove_trivial_dims(expr) + assert removed == [0, 5, 6, 7] + + expr = _array_contraction(tp, (1, 8), (3, 4)) + rexpr, removed = _remove_trivial_dims(expr) + assert removed == [0, 3, 4, 5] + + expr = _array_diagonal(tp, (1, 8)) + rexpr, removed = _remove_trivial_dims(expr) + assert removed == [0, 5, 6, 7, 8] + + expr = _array_diagonal(tp, (1, 8), (3, 4)) + rexpr, removed = _remove_trivial_dims(expr) + assert removed == [0, 3, 4, 5, 6] + + expr = _array_diagonal(_array_contraction(_array_tensor_product(A, x, I, I1), (1, 2, 5)), (1, 4)) + rexpr, removed = _remove_trivial_dims(expr) + assert removed == [2, 3] + + cg = _array_diagonal(_array_tensor_product(PermuteDims(_array_tensor_product(x, I1), Permutation(1, 2, 3)), (x.T*x).applyfunc(sqrt)), (2, 4), (3, 5)) + rexpr, removed = _remove_trivial_dims(cg) + assert removed == [1, 2] + + # Contractions with identity matrices need to be followed by a permutation + # in order + cg = _array_contraction(_array_tensor_product(A, B, C, M, I), (1, 8)) + ret, removed = _remove_trivial_dims(cg) + assert ret == PermuteDims(_array_tensor_product(A, B, C, M), [0, 2, 3, 4, 5, 6, 7, 1]) + assert removed == [] + + cg = _array_contraction(_array_tensor_product(A, B, C, M, I), (1, 8), (3, 4)) + ret, removed = _remove_trivial_dims(cg) + assert ret == PermuteDims(_array_contraction(_array_tensor_product(A, B, C, M), (3, 4)), [0, 2, 3, 4, 5, 1]) + assert removed == [] + + # Trivial matrices are sometimes inserted into MatMul expressions: + + cg = _array_tensor_product(b*b.T, a.T*a) + ret, removed = _remove_trivial_dims(cg) + assert ret == b*a.T*a*b.T + assert removed == [2, 3] + + Xs = ArraySymbol("X", (3, 2, k)) + cg = _array_tensor_product(M, Xs, b.T*c, a*a.T, b*b.T, c.T*d) + ret, removed = _remove_trivial_dims(cg) + assert ret == _array_tensor_product(M, Xs, a*b.T*c*c.T*d*a.T, b*b.T) + assert removed == [5, 6, 11, 12] + + cg = _array_diagonal(_array_tensor_product(I, I1, x), (1, 4), (3, 5)) + assert _remove_trivial_dims(cg) == (PermuteDims(_array_diagonal(_array_tensor_product(I, x), (1, 2)), Permutation(1, 2)), [1]) + + expr = _array_diagonal(_array_tensor_product(x, I, y), (0, 2)) + assert _remove_trivial_dims(expr) == (PermuteDims(_array_tensor_product(DiagMatrix(x), y), [1, 2, 3, 0]), [0]) + + expr = _array_diagonal(_array_tensor_product(x, I, y), (0, 2), (3, 4)) + assert _remove_trivial_dims(expr) == (expr, []) + + +def test_arrayexpr_convert_array_to_matrix_diag2contraction_diagmatrix(): + cg = _array_diagonal(_array_tensor_product(M, a), (1, 2)) + res = _array_diag2contr_diagmatrix(cg) + assert res.shape == cg.shape + assert res == _array_contraction(_array_tensor_product(M, OneArray(1), DiagMatrix(a)), (1, 3)) + + raises(ValueError, lambda: _array_diagonal(_array_tensor_product(a, M), (1, 2))) + + cg = _array_diagonal(_array_tensor_product(a.T, M), (1, 2)) + res = _array_diag2contr_diagmatrix(cg) + assert res.shape == cg.shape + assert res == _array_contraction(_array_tensor_product(OneArray(1), M, DiagMatrix(a.T)), (1, 4)) + + cg = _array_diagonal(_array_tensor_product(a.T, M, N, b.T), (1, 2), (4, 7)) + res = _array_diag2contr_diagmatrix(cg) + assert res.shape == cg.shape + assert res == _array_contraction( + _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a.T), DiagMatrix(b.T)), (1, 7), (3, 9)) + + cg = _array_diagonal(_array_tensor_product(a, M, N, b.T), (0, 2), (4, 7)) + res = _array_diag2contr_diagmatrix(cg) + assert res.shape == cg.shape + assert res == _array_contraction( + _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a), DiagMatrix(b.T)), (1, 6), (3, 9)) + + cg = _array_diagonal(_array_tensor_product(a, M, N, b.T), (0, 4), (3, 7)) + res = _array_diag2contr_diagmatrix(cg) + assert res.shape == cg.shape + assert res == _array_contraction( + _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a), DiagMatrix(b.T)), (3, 6), (2, 9)) + + I1 = Identity(1) + x = MatrixSymbol("x", k, 1) + A = MatrixSymbol("A", k, k) + cg = _array_diagonal(_array_tensor_product(x, A.T, I1), (0, 2)) + assert _array_diag2contr_diagmatrix(cg).shape == cg.shape + assert _array2matrix(cg).shape == cg.shape + + +def test_arrayexpr_convert_array_to_matrix_support_function(): + + assert _support_function_tp1_recognize([], [2 * k]) == 2 * k + + assert _support_function_tp1_recognize([(1, 2)], [A, 2 * k, B, 3]) == 6 * k * A * B + + assert _support_function_tp1_recognize([(0, 3), (1, 2)], [A, B]) == Trace(A * B) + + assert _support_function_tp1_recognize([(1, 2)], [A, B]) == A * B + assert _support_function_tp1_recognize([(0, 2)], [A, B]) == A.T * B + assert _support_function_tp1_recognize([(1, 3)], [A, B]) == A * B.T + assert _support_function_tp1_recognize([(0, 3)], [A, B]) == A.T * B.T + + assert _support_function_tp1_recognize([(1, 2), (5, 6)], [A, B, C, D]) == _array_tensor_product(A * B, C * D) + assert _support_function_tp1_recognize([(1, 4), (3, 6)], [A, B, C, D]) == PermuteDims( + _array_tensor_product(A * C, B * D), [0, 2, 1, 3]) + + assert _support_function_tp1_recognize([(0, 3), (1, 4)], [A, B, C]) == B * A * C + + assert _support_function_tp1_recognize([(9, 10), (1, 2), (5, 6), (3, 4), (7, 8)], + [X, Y, A, B, C, D]) == X * Y * A * B * C * D + + assert _support_function_tp1_recognize([(9, 10), (1, 2), (5, 6), (3, 4)], + [X, Y, A, B, C, D]) == _array_tensor_product(X * Y * A * B, C * D) + + assert _support_function_tp1_recognize([(1, 7), (3, 8), (4, 11)], [X, Y, A, B, C, D]) == PermuteDims( + _array_tensor_product(X * B.T, Y * C, A.T * D.T), [0, 2, 4, 1, 3, 5] + ) + + assert _support_function_tp1_recognize([(0, 1), (3, 6), (5, 8)], [X, A, B, C, D]) == PermuteDims( + _array_tensor_product(Trace(X) * A * C, B * D), [0, 2, 1, 3]) + + assert _support_function_tp1_recognize([(1, 2), (3, 4), (5, 6), (7, 8)], [A, A, B, C, D]) == A ** 2 * B * C * D + assert _support_function_tp1_recognize([(1, 2), (3, 4), (5, 6), (7, 8)], [X, A, B, C, D]) == X * A * B * C * D + + assert _support_function_tp1_recognize([(1, 6), (3, 8), (5, 10)], [X, Y, A, B, C, D]) == PermuteDims( + _array_tensor_product(X * B, Y * C, A * D), [0, 2, 4, 1, 3, 5] + ) + + assert _support_function_tp1_recognize([(1, 4), (3, 6)], [A, B, C, D]) == PermuteDims( + _array_tensor_product(A * C, B * D), [0, 2, 1, 3]) + + assert _support_function_tp1_recognize([(0, 4), (1, 7), (2, 5), (3, 8)], [X, A, B, C, D]) == C*X.T*B*A*D + + assert _support_function_tp1_recognize([(0, 4), (1, 7), (2, 5), (3, 8)], [X, A, B, C, D]) == C*X.T*B*A*D + + +def test_convert_array_to_hadamard_products(): + + expr = HadamardProduct(M, N) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == expr + + expr = HadamardProduct(M, N)*P + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == expr + + expr = Q*HadamardProduct(M, N)*P + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == expr + + expr = Q*HadamardProduct(M, N.T)*P + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == expr + + expr = HadamardProduct(M, N)*HadamardProduct(Q, P) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert expr == ret + + expr = P.T*HadamardProduct(M, N)*HadamardProduct(Q, P) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert expr == ret + + # ArrayDiagonal should be converted + cg = _array_diagonal(_array_tensor_product(M, N, Q), (1, 3), (0, 2, 4)) + ret = convert_array_to_matrix(cg) + expected = PermuteDims(_array_diagonal(_array_tensor_product(HadamardProduct(M.T, N.T), Q), (1, 2)), [1, 0, 2]) + assert expected == ret + + # Special case that should return the same expression: + cg = _array_diagonal(_array_tensor_product(HadamardProduct(M, N), Q), (0, 2)) + ret = convert_array_to_matrix(cg) + assert ret == cg + + # Hadamard products with traces: + + expr = Trace(HadamardProduct(M, N)) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == Trace(HadamardProduct(M.T, N.T)) + + expr = Trace(A*HadamardProduct(M, N)) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == Trace(HadamardProduct(M, N)*A) + + expr = Trace(HadamardProduct(A, M)*N) + cg = convert_matrix_to_array(expr) + ret = convert_array_to_matrix(cg) + assert ret == Trace(HadamardProduct(M.T, N)*A) + + # These should not be converted into Hadamard products: + + cg = _array_diagonal(_array_tensor_product(M, N), (0, 1, 2, 3)) + ret = convert_array_to_matrix(cg) + assert ret == cg + + cg = _array_diagonal(_array_tensor_product(A), (0, 1)) + ret = convert_array_to_matrix(cg) + assert ret == cg + + cg = _array_diagonal(_array_tensor_product(M, N, P), (0, 2, 4), (1, 3, 5)) + assert convert_array_to_matrix(cg) == HadamardProduct(M, N, P) + + cg = _array_diagonal(_array_tensor_product(M, N, P), (0, 3, 4), (1, 2, 5)) + assert convert_array_to_matrix(cg) == HadamardProduct(M, P, N.T) + + cg = _array_diagonal(_array_tensor_product(I, I1, x), (1, 4), (3, 5)) + assert convert_array_to_matrix(cg) == DiagMatrix(x) + + +def test_identify_removable_identity_matrices(): + + D = DiagonalMatrix(MatrixSymbol("D", k, k)) + + cg = _array_contraction(_array_tensor_product(A, B, I), (1, 2, 4, 5)) + expected = _array_contraction(_array_tensor_product(A, B), (1, 2)) + assert identify_removable_identity_matrices(cg) == expected + + cg = _array_contraction(_array_tensor_product(A, B, C, I), (1, 3, 5, 6, 7)) + expected = _array_contraction(_array_tensor_product(A, B, C), (1, 3, 5)) + assert identify_removable_identity_matrices(cg) == expected + + # Tests with diagonal matrices: + + cg = _array_contraction(_array_tensor_product(A, B, D), (1, 2, 4, 5)) + ret = identify_removable_identity_matrices(cg) + expected = _array_contraction(_array_tensor_product(A, B, D), (1, 4), (2, 5)) + assert ret == expected + + cg = _array_contraction(_array_tensor_product(A, B, D, M, N), (1, 2, 4, 5, 6, 8)) + ret = identify_removable_identity_matrices(cg) + assert ret == cg + + +def test_combine_removed(): + + assert _combine_removed(6, [0, 1, 2], [0, 1, 2]) == [0, 1, 2, 3, 4, 5] + assert _combine_removed(8, [2, 5], [1, 3, 4]) == [1, 2, 4, 5, 6] + assert _combine_removed(8, [7], []) == [7] + + +def test_array_contraction_to_diagonal_multiple_identities(): + + expr = _array_contraction(_array_tensor_product(A, B, I, C), (1, 2, 4), (5, 6)) + assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, []) + assert convert_array_to_matrix(expr) == _array_contraction(_array_tensor_product(A, B, C), (1, 2, 4)) + + expr = _array_contraction(_array_tensor_product(A, I, I), (1, 2, 4)) + assert _array_contraction_to_diagonal_multiple_identity(expr) == (A, [2]) + assert convert_array_to_matrix(expr) == A + + expr = _array_contraction(_array_tensor_product(A, I, I, B), (1, 2, 4), (3, 6)) + assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, []) + + expr = _array_contraction(_array_tensor_product(A, I, I, B), (1, 2, 3, 4, 6)) + assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, []) + + +def test_convert_array_element_to_matrix(): + + expr = ArrayElement(M, (i, j)) + assert convert_array_to_matrix(expr) == MatrixElement(M, i, j) + + expr = ArrayElement(_array_contraction(_array_tensor_product(M, N), (1, 3)), (i, j)) + assert convert_array_to_matrix(expr) == MatrixElement(M*N.T, i, j) + + expr = ArrayElement(_array_tensor_product(M, N), (i, j, m, n)) + assert convert_array_to_matrix(expr) == expr + + +def test_convert_array_elementwise_function_to_matrix(): + + d = Dummy("d") + + expr = ArrayElementwiseApplyFunc(Lambda(d, sin(d)), x.T*y) + assert convert_array_to_matrix(expr) == sin(x.T*y) + + expr = ArrayElementwiseApplyFunc(Lambda(d, d**2), x.T*y) + assert convert_array_to_matrix(expr) == (x.T*y)**2 + + expr = ArrayElementwiseApplyFunc(Lambda(d, sin(d)), x) + assert convert_array_to_matrix(expr).dummy_eq(x.applyfunc(sin)) + + expr = ArrayElementwiseApplyFunc(Lambda(d, 1 / (2 * sqrt(d))), x) + assert convert_array_to_matrix(expr) == S.Half * HadamardPower(x, -S.Half) + + +def test_array2matrix(): + # See issue https://github.com/sympy/sympy/pull/22877 + expr = PermuteDims(ArrayContraction(ArrayTensorProduct(x, I, I1, x), (0, 3), (1, 7)), Permutation(2, 3)) + expected = PermuteDims(ArrayTensorProduct(x*x.T, I1), Permutation(3)(1, 2)) + assert _array2matrix(expr) == expected + + +def test_recognize_broadcasting(): + expr = ArrayTensorProduct(x.T*x, A) + assert _remove_trivial_dims(expr) == (KroneckerProduct(x.T*x, A), [0, 1]) + + expr = ArrayTensorProduct(A, x.T*x) + assert _remove_trivial_dims(expr) == (KroneckerProduct(A, x.T*x), [2, 3]) + + expr = ArrayTensorProduct(A, B, x.T*x, C) + assert _remove_trivial_dims(expr) == (ArrayTensorProduct(A, KroneckerProduct(B, x.T*x), C), [4, 5]) + + # Always prefer matrix multiplication to Kronecker product, if possible: + expr = ArrayTensorProduct(a, b, x.T*x) + assert _remove_trivial_dims(expr) == (a*x.T*x*b.T, [1, 3, 4, 5]) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..258062eadeca041ae3c864dabeefd5165f1cef11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py @@ -0,0 +1,205 @@ +from sympy import tanh +from sympy.concrete.summations import Sum +from sympy.core.symbol import symbols +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.matrices.expressions.special import Identity +from sympy.tensor.array.expressions import ArrayElementwiseApplyFunc +from sympy.tensor.indexed import IndexedBase +from sympy.combinatorics import Permutation +from sympy.tensor.array.expressions.array_expressions import ArrayContraction, ArrayTensorProduct, \ + ArrayDiagonal, ArrayAdd, PermuteDims, ArrayElement, _array_tensor_product, _array_contraction, _array_diagonal, \ + _array_add, _permute_dims, ArraySymbol, OneArray +from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix +from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array, _convert_indexed_to_array +from sympy.testing.pytest import raises + + +A, B = symbols("A B", cls=IndexedBase) +i, j, k, l, m, n = symbols("i j k l m n") +d0, d1, d2, d3 = symbols("d0:4") + +I = Identity(k) + +M = MatrixSymbol("M", k, k) +N = MatrixSymbol("N", k, k) +P = MatrixSymbol("P", k, k) +Q = MatrixSymbol("Q", k, k) + +a = MatrixSymbol("a", k, 1) +b = MatrixSymbol("b", k, 1) +c = MatrixSymbol("c", k, 1) +d = MatrixSymbol("d", k, 1) + + +def test_arrayexpr_convert_index_to_array_support_function(): + expr = M[i, j] + assert _convert_indexed_to_array(expr) == (M, (i, j)) + expr = M[i, j]*N[k, l] + assert _convert_indexed_to_array(expr) == (ArrayTensorProduct(M, N), (i, j, k, l)) + expr = M[i, j]*N[j, k] + assert _convert_indexed_to_array(expr) == (ArrayDiagonal(ArrayTensorProduct(M, N), (1, 2)), (i, k, j)) + expr = Sum(M[i, j]*N[j, k], (j, 0, k-1)) + assert _convert_indexed_to_array(expr) == (ArrayContraction(ArrayTensorProduct(M, N), (1, 2)), (i, k)) + expr = M[i, j] + N[i, j] + assert _convert_indexed_to_array(expr) == (ArrayAdd(M, N), (i, j)) + expr = M[i, j] + N[j, i] + assert _convert_indexed_to_array(expr) == (ArrayAdd(M, PermuteDims(N, Permutation([1, 0]))), (i, j)) + expr = M[i, j] + M[j, i] + assert _convert_indexed_to_array(expr) == (ArrayAdd(M, PermuteDims(M, Permutation([1, 0]))), (i, j)) + expr = (M*N*P)[i, j] + assert _convert_indexed_to_array(expr) == (_array_contraction(ArrayTensorProduct(M, N, P), (1, 2), (3, 4)), (i, j)) + expr = expr.function # Disregard summation in previous expression + ret1, ret2 = _convert_indexed_to_array(expr) + assert ret1 == ArrayDiagonal(ArrayTensorProduct(M, N, P), (1, 2), (3, 4)) + assert str(ret2) == "(i, j, _i_1, _i_2)" + expr = KroneckerDelta(i, j)*M[i, k] + assert _convert_indexed_to_array(expr) == (M, ({i, j}, k)) + expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*M[i, l] + assert _convert_indexed_to_array(expr) == (M, ({i, j, k}, l)) + expr = KroneckerDelta(j, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l]) + assert _convert_indexed_to_array(expr) == (_array_diagonal(_array_add( + ArrayTensorProduct(M, N), + _permute_dims(ArrayTensorProduct(M, N), Permutation(0, 2)(1, 3)) + ), (1, 2)), (i, l, frozenset({j, k}))) + expr = KroneckerDelta(j, m)*KroneckerDelta(m, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l]) + assert _convert_indexed_to_array(expr) == (_array_diagonal(_array_add( + ArrayTensorProduct(M, N), + _permute_dims(ArrayTensorProduct(M, N), Permutation(0, 2)(1, 3)) + ), (1, 2)), (i, l, frozenset({j, m, k}))) + expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*KroneckerDelta(k,m)*M[i, 0]*KroneckerDelta(m, n) + assert _convert_indexed_to_array(expr) == (M, ({i, j, k, m, n}, 0)) + expr = M[i, i] + assert _convert_indexed_to_array(expr) == (ArrayDiagonal(M, (0, 1)), (i,)) + + +def test_arrayexpr_convert_indexed_to_array_expression(): + + s = Sum(A[i]*B[i], (i, 0, 3)) + cg = convert_indexed_to_array(s) + assert cg == ArrayContraction(ArrayTensorProduct(A, B), (0, 1)) + + expr = M*N + result = ArrayContraction(ArrayTensorProduct(M, N), (1, 2)) + elem = expr[i, j] + assert convert_indexed_to_array(elem) == result + + expr = M*N*M + elem = expr[i, j] + result = _array_contraction(_array_tensor_product(M, M, N), (1, 4), (2, 5)) + cg = convert_indexed_to_array(elem) + assert cg == result + + cg = convert_indexed_to_array((M * N * P)[i, j]) + assert cg == _array_contraction(ArrayTensorProduct(M, N, P), (1, 2), (3, 4)) + + cg = convert_indexed_to_array((M * N.T * P)[i, j]) + assert cg == _array_contraction(ArrayTensorProduct(M, N, P), (1, 3), (2, 4)) + + expr = -2*M*N + elem = expr[i, j] + cg = convert_indexed_to_array(elem) + assert cg == ArrayContraction(ArrayTensorProduct(-2, M, N), (1, 2)) + + +def test_arrayexpr_convert_array_element_to_array_expression(): + A = ArraySymbol("A", (k,)) + B = ArraySymbol("B", (k,)) + + s = Sum(A[i]*B[i], (i, 0, k-1)) + cg = convert_indexed_to_array(s) + assert cg == ArrayContraction(ArrayTensorProduct(A, B), (0, 1)) + + s = A[i]*B[i] + cg = convert_indexed_to_array(s) + assert cg == ArrayDiagonal(ArrayTensorProduct(A, B), (0, 1)) + + s = A[i]*B[j] + cg = convert_indexed_to_array(s, [i, j]) + assert cg == ArrayTensorProduct(A, B) + cg = convert_indexed_to_array(s, [j, i]) + assert cg == ArrayTensorProduct(B, A) + + s = tanh(A[i]*B[j]) + cg = convert_indexed_to_array(s, [i, j]) + assert cg.dummy_eq(ArrayElementwiseApplyFunc(tanh, ArrayTensorProduct(A, B))) + + +def test_arrayexpr_convert_indexed_to_array_and_back_to_matrix(): + + expr = a.T*b + elem = expr[0, 0] + cg = convert_indexed_to_array(elem) + assert cg == ArrayElement(ArrayContraction(ArrayTensorProduct(a, b), (0, 2)), [0, 0]) + + expr = M[i,j] + N[i,j] + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == M + N + + expr = M[i,j] + N[j,i] + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == M + N.T + + expr = M[i,j]*N[k,l] + N[i,j]*M[k,l] + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == ArrayAdd( + ArrayTensorProduct(M, N), + ArrayTensorProduct(N, M)) + + expr = (M*N*P)[i, j] + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == M * N * P + + expr = Sum(M[i,j]*(N*P)[j,m], (j, 0, k-1)) + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == M * N * P + + expr = Sum((P[j, m] + P[m, j])*(M[i,j]*N[m,n] + N[i,j]*M[m,n]), (j, 0, k-1), (m, 0, k-1)) + p1, p2 = _convert_indexed_to_array(expr) + assert convert_array_to_matrix(p1) == M * P * N + M * P.T * N + N * P * M + N * P.T * M + + +def test_arrayexpr_convert_indexed_to_array_out_of_bounds(): + + expr = Sum(M[i, i], (i, 0, 4)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + expr = Sum(M[i, i], (i, 0, k)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + expr = Sum(M[i, i], (i, 1, k-1)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + + expr = Sum(M[i, j]*N[j,m], (j, 0, 4)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + expr = Sum(M[i, j]*N[j,m], (j, 0, k)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + expr = Sum(M[i, j]*N[j,m], (j, 1, k-1)) + raises(ValueError, lambda: convert_indexed_to_array(expr)) + + +def test_arrayexpr_convert_indexed_to_array_broadcast(): + A = ArraySymbol("A", (3, 3)) + B = ArraySymbol("B", (3, 3)) + + expr = A[i, j] + B[k, l] + O2 = OneArray(3, 3) + expected = ArrayAdd(ArrayTensorProduct(A, O2), ArrayTensorProduct(O2, B)) + assert convert_indexed_to_array(expr) == expected + assert convert_indexed_to_array(expr, [i, j, k, l]) == expected + assert convert_indexed_to_array(expr, [l, k, i, j]) == ArrayAdd(PermuteDims(ArrayTensorProduct(O2, A), [1, 0, 2, 3]), PermuteDims(ArrayTensorProduct(B, O2), [1, 0, 2, 3])) + + expr = A[i, j] + B[j, k] + O1 = OneArray(3) + assert convert_indexed_to_array(expr, [i, j, k]) == ArrayAdd(ArrayTensorProduct(A, O1), ArrayTensorProduct(O1, B)) + + C = ArraySymbol("C", (d0, d1)) + D = ArraySymbol("D", (d3, d1)) + + expr = C[i, j] + D[k, j] + assert convert_indexed_to_array(expr, [i, j, k]) == ArrayAdd(ArrayTensorProduct(C, OneArray(d3)), PermuteDims(ArrayTensorProduct(OneArray(d0), D), [0, 2, 1])) + + X = ArraySymbol("X", (5, 3)) + + expr = X[i, n] - X[j, n] + assert convert_indexed_to_array(expr, [i, j, n]) == ArrayAdd(ArrayTensorProduct(-1, OneArray(5), X), PermuteDims(ArrayTensorProduct(X, OneArray(5)), [0, 2, 1])) + + raises(ValueError, lambda: convert_indexed_to_array(C[i, j] + D[i, j])) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py new file mode 100644 index 0000000000000000000000000000000000000000..142585882588df6aa0e4648d9d8881ea755f42a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py @@ -0,0 +1,128 @@ +from sympy import Lambda, KroneckerProduct +from sympy.core.symbol import symbols, Dummy +from sympy.matrices.expressions.hadamard import (HadamardPower, HadamardProduct) +from sympy.matrices.expressions.inverse import Inverse +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.matrices.expressions.matpow import MatPow +from sympy.matrices.expressions.special import Identity +from sympy.matrices.expressions.trace import Trace +from sympy.matrices.expressions.transpose import Transpose +from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayContraction, \ + PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, _array_contraction, _array_tensor_product, Reshape +from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix +from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array + +i, j, k, l, m, n = symbols("i j k l m n") + +I = Identity(k) + +M = MatrixSymbol("M", k, k) +N = MatrixSymbol("N", k, k) +P = MatrixSymbol("P", k, k) +Q = MatrixSymbol("Q", k, k) + +A = MatrixSymbol("A", k, k) +B = MatrixSymbol("B", k, k) +C = MatrixSymbol("C", k, k) +D = MatrixSymbol("D", k, k) + +X = MatrixSymbol("X", k, k) +Y = MatrixSymbol("Y", k, k) + +a = MatrixSymbol("a", k, 1) +b = MatrixSymbol("b", k, 1) +c = MatrixSymbol("c", k, 1) +d = MatrixSymbol("d", k, 1) + + +def test_arrayexpr_convert_matrix_to_array(): + + expr = M*N + result = ArrayContraction(ArrayTensorProduct(M, N), (1, 2)) + assert convert_matrix_to_array(expr) == result + + expr = M*N*M + result = _array_contraction(ArrayTensorProduct(M, N, M), (1, 2), (3, 4)) + assert convert_matrix_to_array(expr) == result + + expr = Transpose(M) + assert convert_matrix_to_array(expr) == PermuteDims(M, [1, 0]) + + expr = M*Transpose(N) + assert convert_matrix_to_array(expr) == _array_contraction(_array_tensor_product(M, PermuteDims(N, [1, 0])), (1, 2)) + + expr = 3*M*N + res = convert_matrix_to_array(expr) + rexpr = convert_array_to_matrix(res) + assert expr == rexpr + + expr = 3*M + N*M.T*M + 4*k*N + res = convert_matrix_to_array(expr) + rexpr = convert_array_to_matrix(res) + assert expr == rexpr + + expr = Inverse(M)*N + rexpr = convert_array_to_matrix(convert_matrix_to_array(expr)) + assert expr == rexpr + + expr = M**2 + rexpr = convert_array_to_matrix(convert_matrix_to_array(expr)) + assert expr == rexpr + + expr = M*(2*N + 3*M) + res = convert_matrix_to_array(expr) + rexpr = convert_array_to_matrix(res) + assert expr == rexpr + + expr = Trace(M) + result = ArrayContraction(M, (0, 1)) + assert convert_matrix_to_array(expr) == result + + expr = 3*Trace(M) + result = ArrayContraction(ArrayTensorProduct(3, M), (0, 1)) + assert convert_matrix_to_array(expr) == result + + expr = 3*Trace(Trace(M) * M) + result = ArrayContraction(ArrayTensorProduct(3, M, M), (0, 1), (2, 3)) + assert convert_matrix_to_array(expr) == result + + expr = 3*Trace(M)**2 + result = ArrayContraction(ArrayTensorProduct(3, M, M), (0, 1), (2, 3)) + assert convert_matrix_to_array(expr) == result + + expr = HadamardProduct(M, N) + result = ArrayDiagonal(ArrayTensorProduct(M, N), (0, 2), (1, 3)) + assert convert_matrix_to_array(expr) == result + + expr = HadamardProduct(M*N, N*M) + result = ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, N, M), (1, 2), (5, 6)), (0, 2), (1, 3)) + assert convert_matrix_to_array(expr) == result + + expr = HadamardPower(M, 2) + result = ArrayDiagonal(ArrayTensorProduct(M, M), (0, 2), (1, 3)) + assert convert_matrix_to_array(expr) == result + + expr = HadamardPower(M*N, 2) + result = ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, M, N), (1, 2), (5, 6)), (0, 2), (1, 3)) + assert convert_matrix_to_array(expr) == result + + expr = HadamardPower(M, n) + d0 = Dummy("d0") + result = ArrayElementwiseApplyFunc(Lambda(d0, d0**n), M) + assert convert_matrix_to_array(expr).dummy_eq(result) + + expr = M**2 + assert isinstance(expr, MatPow) + assert convert_matrix_to_array(expr) == ArrayContraction(ArrayTensorProduct(M, M), (1, 2)) + + expr = a.T*b + cg = convert_matrix_to_array(expr) + assert cg == ArrayContraction(ArrayTensorProduct(a, b), (0, 2)) + + expr = KroneckerProduct(A, B) + cg = convert_matrix_to_array(expr) + assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B), [0, 2, 1, 3]), (k**2, k**2)) + + expr = KroneckerProduct(A, B, C, D) + cg = convert_matrix_to_array(expr) + assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B, C, D), [0, 2, 4, 6, 1, 3, 5, 7]), (k**4, k**4)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..b41b6105410a308e7774fce760b235497d0303bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py @@ -0,0 +1,22 @@ +from sympy import MatrixSymbol, symbols, Sum +from sympy.tensor.array.expressions import conv_array_to_indexed, from_array_to_indexed, ArrayTensorProduct, \ + ArrayContraction, conv_array_to_matrix, from_array_to_matrix, conv_matrix_to_array, from_matrix_to_array, \ + conv_indexed_to_array, from_indexed_to_array +from sympy.testing.pytest import warns +from sympy.utilities.exceptions import SymPyDeprecationWarning + + +def test_deprecated_conv_module_results(): + + M = MatrixSymbol("M", 3, 3) + N = MatrixSymbol("N", 3, 3) + i, j, d = symbols("i j d") + + x = ArrayContraction(ArrayTensorProduct(M, N), (1, 2)) + y = Sum(M[i, d]*N[d, j], (d, 0, 2)) + + with warns(SymPyDeprecationWarning, test_stacklevel=False): + assert conv_array_to_indexed.convert_array_to_indexed(x, [i, j]).dummy_eq(from_array_to_indexed.convert_array_to_indexed(x, [i, j])) + assert conv_array_to_matrix.convert_array_to_matrix(x) == from_array_to_matrix.convert_array_to_matrix(x) + assert conv_matrix_to_array.convert_matrix_to_array(M*N) == from_matrix_to_array.convert_matrix_to_array(M*N) + assert conv_indexed_to_array.convert_indexed_to_array(y) == from_indexed_to_array.convert_indexed_to_array(y) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/utils.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e55c0e6ed47cdc9ff1c24cc92f006998aeb86822 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/expressions/utils.py @@ -0,0 +1,123 @@ +import bisect +from collections import defaultdict + +from sympy.combinatorics import Permutation +from sympy.core.containers import Tuple +from sympy.core.numbers import Integer + + +def _get_mapping_from_subranks(subranks): + mapping = {} + counter = 0 + for i, rank in enumerate(subranks): + for j in range(rank): + mapping[counter] = (i, j) + counter += 1 + return mapping + + +def _get_contraction_links(args, subranks, *contraction_indices): + mapping = _get_mapping_from_subranks(subranks) + contraction_tuples = [[mapping[j] for j in i] for i in contraction_indices] + dlinks = defaultdict(dict) + for links in contraction_tuples: + if len(links) == 2: + (arg1, pos1), (arg2, pos2) = links + dlinks[arg1][pos1] = (arg2, pos2) + dlinks[arg2][pos2] = (arg1, pos1) + continue + + return args, dict(dlinks) + + +def _sort_contraction_indices(pairing_indices): + pairing_indices = [Tuple(*sorted(i)) for i in pairing_indices] + pairing_indices.sort(key=lambda x: min(x)) + return pairing_indices + + +def _get_diagonal_indices(flattened_indices): + axes_contraction = defaultdict(list) + for i, ind in enumerate(flattened_indices): + if isinstance(ind, (int, Integer)): + # If the indices is a number, there can be no diagonal operation: + continue + axes_contraction[ind].append(i) + axes_contraction = {k: v for k, v in axes_contraction.items() if len(v) > 1} + # Put the diagonalized indices at the end: + ret_indices = [i for i in flattened_indices if i not in axes_contraction] + diag_indices = list(axes_contraction) + diag_indices.sort(key=lambda x: flattened_indices.index(x)) + diagonal_indices = [tuple(axes_contraction[i]) for i in diag_indices] + ret_indices += diag_indices + ret_indices = tuple(ret_indices) + return diagonal_indices, ret_indices + + +def _get_argindex(subindices, ind): + for i, sind in enumerate(subindices): + if ind == sind: + return i + if isinstance(sind, (set, frozenset)) and ind in sind: + return i + raise IndexError("%s not found in %s" % (ind, subindices)) + + +def _apply_recursively_over_nested_lists(func, arr): + if isinstance(arr, (tuple, list, Tuple)): + return tuple(_apply_recursively_over_nested_lists(func, i) for i in arr) + elif isinstance(arr, Tuple): + return Tuple.fromiter(_apply_recursively_over_nested_lists(func, i) for i in arr) + else: + return func(arr) + + +def _build_push_indices_up_func_transformation(flattened_contraction_indices): + shifts = {0: 0} + i = 0 + cumulative = 0 + while i < len(flattened_contraction_indices): + j = 1 + while i+j < len(flattened_contraction_indices): + if flattened_contraction_indices[i] + j != flattened_contraction_indices[i+j]: + break + j += 1 + cumulative += j + shifts[flattened_contraction_indices[i]] = cumulative + i += j + shift_keys = sorted(shifts.keys()) + + def func(idx): + return shifts[shift_keys[bisect.bisect_right(shift_keys, idx)-1]] + + def transform(j): + if j in flattened_contraction_indices: + return None + else: + return j - func(j) + + return transform + + +def _build_push_indices_down_func_transformation(flattened_contraction_indices): + N = flattened_contraction_indices[-1]+2 + + shifts = [i for i in range(N) if i not in flattened_contraction_indices] + + def transform(j): + if j < len(shifts): + return shifts[j] + else: + return j + shifts[-1] - len(shifts) + 1 + + return transform + + +def _apply_permutation_to_list(perm: Permutation, target_list: list): + """ + Permute a list according to the given permutation. + """ + new_list = [None for i in range(perm.size)] + for i, e in enumerate(target_list): + new_list[perm(i)] = e + return new_list diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_array_derivatives.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_array_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e79fa034183085f01193c744c83ff20c368a68 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_array_derivatives.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_arrayop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_arrayop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a23eca8168c8e0473af64207131bbcbca7267d2e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_arrayop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_immutable_ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_immutable_ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66e877bdfb88132d80ca502c95ec9c6e520f3b13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_immutable_ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_mutable_ndim_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_mutable_ndim_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfbccb425999ec4e7932e15d81be5a1089bbef28 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/__pycache__/test_mutable_ndim_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/test_immutable_ndim_array.py b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/test_immutable_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..c6bed4b605c424284b4752592b03b13a9178aac8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/tensor/array/tests/test_immutable_ndim_array.py @@ -0,0 +1,452 @@ +from copy import copy + +from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray +from sympy.core.containers import Dict +from sympy.core.function import diff +from sympy.core.numbers import Rational +from sympy.core.singleton import S +from sympy.core.symbol import (Symbol, symbols) +from sympy.matrices import SparseMatrix +from sympy.tensor.indexed import (Indexed, IndexedBase) +from sympy.matrices import Matrix +from sympy.tensor.array.sparse_ndim_array import ImmutableSparseNDimArray +from sympy.testing.pytest import raises + + +def test_ndim_array_initiation(): + arr_with_no_elements = ImmutableDenseNDimArray([], shape=(0,)) + assert len(arr_with_no_elements) == 0 + assert arr_with_no_elements.rank() == 1 + + raises(ValueError, lambda: ImmutableDenseNDimArray([0], shape=(0,))) + raises(ValueError, lambda: ImmutableDenseNDimArray([1, 2, 3], shape=(0,))) + raises(ValueError, lambda: ImmutableDenseNDimArray([], shape=())) + + raises(ValueError, lambda: ImmutableSparseNDimArray([0], shape=(0,))) + raises(ValueError, lambda: ImmutableSparseNDimArray([1, 2, 3], shape=(0,))) + raises(ValueError, lambda: ImmutableSparseNDimArray([], shape=())) + + arr_with_one_element = ImmutableDenseNDimArray([23]) + assert len(arr_with_one_element) == 1 + assert arr_with_one_element[0] == 23 + assert arr_with_one_element[:] == ImmutableDenseNDimArray([23]) + assert arr_with_one_element.rank() == 1 + + arr_with_symbol_element = ImmutableDenseNDimArray([Symbol('x')]) + assert len(arr_with_symbol_element) == 1 + assert arr_with_symbol_element[0] == Symbol('x') + assert arr_with_symbol_element[:] == ImmutableDenseNDimArray([Symbol('x')]) + assert arr_with_symbol_element.rank() == 1 + + number5 = 5 + vector = ImmutableDenseNDimArray.zeros(number5) + assert len(vector) == number5 + assert vector.shape == (number5,) + assert vector.rank() == 1 + + vector = ImmutableSparseNDimArray.zeros(number5) + assert len(vector) == number5 + assert vector.shape == (number5,) + assert vector._sparse_array == Dict() + assert vector.rank() == 1 + + n_dim_array = ImmutableDenseNDimArray(range(3**4), (3, 3, 3, 3,)) + assert len(n_dim_array) == 3 * 3 * 3 * 3 + assert n_dim_array.shape == (3, 3, 3, 3) + assert n_dim_array.rank() == 4 + + array_shape = (3, 3, 3, 3) + sparse_array = ImmutableSparseNDimArray.zeros(*array_shape) + assert len(sparse_array._sparse_array) == 0 + assert len(sparse_array) == 3 * 3 * 3 * 3 + assert n_dim_array.shape == array_shape + assert n_dim_array.rank() == 4 + + one_dim_array = ImmutableDenseNDimArray([2, 3, 1]) + assert len(one_dim_array) == 3 + assert one_dim_array.shape == (3,) + assert one_dim_array.rank() == 1 + assert one_dim_array.tolist() == [2, 3, 1] + + shape = (3, 3) + array_with_many_args = ImmutableSparseNDimArray.zeros(*shape) + assert len(array_with_many_args) == 3 * 3 + assert array_with_many_args.shape == shape + assert array_with_many_args[0, 0] == 0 + assert array_with_many_args.rank() == 2 + + shape = (int(3), int(3)) + array_with_long_shape = ImmutableSparseNDimArray.zeros(*shape) + assert len(array_with_long_shape) == 3 * 3 + assert array_with_long_shape.shape == shape + assert array_with_long_shape[int(0), int(0)] == 0 + assert array_with_long_shape.rank() == 2 + + vector_with_long_shape = ImmutableDenseNDimArray(range(5), int(5)) + assert len(vector_with_long_shape) == 5 + assert vector_with_long_shape.shape == (int(5),) + assert vector_with_long_shape.rank() == 1 + raises(ValueError, lambda: vector_with_long_shape[int(5)]) + + from sympy.abc import x + for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]: + rank_zero_array = ArrayType(x) + assert len(rank_zero_array) == 1 + assert rank_zero_array.shape == () + assert rank_zero_array.rank() == 0 + assert rank_zero_array[()] == x + raises(ValueError, lambda: rank_zero_array[0]) + + +def test_reshape(): + array = ImmutableDenseNDimArray(range(50), 50) + assert array.shape == (50,) + assert array.rank() == 1 + + array = array.reshape(5, 5, 2) + assert array.shape == (5, 5, 2) + assert array.rank() == 3 + assert len(array) == 50 + + +def test_getitem(): + for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]: + array = ArrayType(range(24)).reshape(2, 3, 4) + assert array.tolist() == [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] + assert array[0] == ArrayType([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) + assert array[0, 0] == ArrayType([0, 1, 2, 3]) + value = 0 + for i in range(2): + for j in range(3): + for k in range(4): + assert array[i, j, k] == value + value += 1 + + raises(ValueError, lambda: array[3, 4, 5]) + raises(ValueError, lambda: array[3, 4, 5, 6]) + raises(ValueError, lambda: array[3, 4, 5, 3:4]) + + +def test_iterator(): + array = ImmutableDenseNDimArray(range(4), (2, 2)) + assert array[0] == ImmutableDenseNDimArray([0, 1]) + assert array[1] == ImmutableDenseNDimArray([2, 3]) + + array = array.reshape(4) + j = 0 + for i in array: + assert i == j + j += 1 + + +def test_sparse(): + sparse_array = ImmutableSparseNDimArray([0, 0, 0, 1], (2, 2)) + assert len(sparse_array) == 2 * 2 + # dictionary where all data is, only non-zero entries are actually stored: + assert len(sparse_array._sparse_array) == 1 + + assert sparse_array.tolist() == [[0, 0], [0, 1]] + + for i, j in zip(sparse_array, [[0, 0], [0, 1]]): + assert i == ImmutableSparseNDimArray(j) + + def sparse_assignment(): + sparse_array[0, 0] = 123 + + assert len(sparse_array._sparse_array) == 1 + raises(TypeError, sparse_assignment) + assert len(sparse_array._sparse_array) == 1 + assert sparse_array[0, 0] == 0 + assert sparse_array/0 == ImmutableSparseNDimArray([[S.NaN, S.NaN], [S.NaN, S.ComplexInfinity]], (2, 2)) + + # test for large scale sparse array + # equality test + assert ImmutableSparseNDimArray.zeros(100000, 200000) == ImmutableSparseNDimArray.zeros(100000, 200000) + + # __mul__ and __rmul__ + a = ImmutableSparseNDimArray({200001: 1}, (100000, 200000)) + assert a * 3 == ImmutableSparseNDimArray({200001: 3}, (100000, 200000)) + assert 3 * a == ImmutableSparseNDimArray({200001: 3}, (100000, 200000)) + assert a * 0 == ImmutableSparseNDimArray({}, (100000, 200000)) + assert 0 * a == ImmutableSparseNDimArray({}, (100000, 200000)) + + # __truediv__ + assert a/3 == ImmutableSparseNDimArray({200001: Rational(1, 3)}, (100000, 200000)) + + # __neg__ + assert -a == ImmutableSparseNDimArray({200001: -1}, (100000, 200000)) + + +def test_calculation(): + + a = ImmutableDenseNDimArray([1]*9, (3, 3)) + b = ImmutableDenseNDimArray([9]*9, (3, 3)) + + c = a + b + for i in c: + assert i == ImmutableDenseNDimArray([10, 10, 10]) + + assert c == ImmutableDenseNDimArray([10]*9, (3, 3)) + assert c == ImmutableSparseNDimArray([10]*9, (3, 3)) + + c = b - a + for i in c: + assert i == ImmutableDenseNDimArray([8, 8, 8]) + + assert c == ImmutableDenseNDimArray([8]*9, (3, 3)) + assert c == ImmutableSparseNDimArray([8]*9, (3, 3)) + + +def test_ndim_array_converting(): + dense_array = ImmutableDenseNDimArray([1, 2, 3, 4], (2, 2)) + alist = dense_array.tolist() + + assert alist == [[1, 2], [3, 4]] + + matrix = dense_array.tomatrix() + assert (isinstance(matrix, Matrix)) + + for i in range(len(dense_array)): + assert dense_array[dense_array._get_tuple_index(i)] == matrix[i] + assert matrix.shape == dense_array.shape + + assert ImmutableDenseNDimArray(matrix) == dense_array + assert ImmutableDenseNDimArray(matrix.as_immutable()) == dense_array + assert ImmutableDenseNDimArray(matrix.as_mutable()) == dense_array + + sparse_array = ImmutableSparseNDimArray([1, 2, 3, 4], (2, 2)) + alist = sparse_array.tolist() + + assert alist == [[1, 2], [3, 4]] + + matrix = sparse_array.tomatrix() + assert(isinstance(matrix, SparseMatrix)) + + for i in range(len(sparse_array)): + assert sparse_array[sparse_array._get_tuple_index(i)] == matrix[i] + assert matrix.shape == sparse_array.shape + + assert ImmutableSparseNDimArray(matrix) == sparse_array + assert ImmutableSparseNDimArray(matrix.as_immutable()) == sparse_array + assert ImmutableSparseNDimArray(matrix.as_mutable()) == sparse_array + + +def test_converting_functions(): + arr_list = [1, 2, 3, 4] + arr_matrix = Matrix(((1, 2), (3, 4))) + + # list + arr_ndim_array = ImmutableDenseNDimArray(arr_list, (2, 2)) + assert (isinstance(arr_ndim_array, ImmutableDenseNDimArray)) + assert arr_matrix.tolist() == arr_ndim_array.tolist() + + # Matrix + arr_ndim_array = ImmutableDenseNDimArray(arr_matrix) + assert (isinstance(arr_ndim_array, ImmutableDenseNDimArray)) + assert arr_matrix.tolist() == arr_ndim_array.tolist() + assert arr_matrix.shape == arr_ndim_array.shape + + +def test_equality(): + first_list = [1, 2, 3, 4] + second_list = [1, 2, 3, 4] + third_list = [4, 3, 2, 1] + assert first_list == second_list + assert first_list != third_list + + first_ndim_array = ImmutableDenseNDimArray(first_list, (2, 2)) + second_ndim_array = ImmutableDenseNDimArray(second_list, (2, 2)) + fourth_ndim_array = ImmutableDenseNDimArray(first_list, (2, 2)) + + assert first_ndim_array == second_ndim_array + + def assignment_attempt(a): + a[0, 0] = 0 + + raises(TypeError, lambda: assignment_attempt(second_ndim_array)) + assert first_ndim_array == second_ndim_array + assert first_ndim_array == fourth_ndim_array + + +def test_arithmetic(): + a = ImmutableDenseNDimArray([3 for i in range(9)], (3, 3)) + b = ImmutableDenseNDimArray([7 for i in range(9)], (3, 3)) + + c1 = a + b + c2 = b + a + assert c1 == c2 + + d1 = a - b + d2 = b - a + assert d1 == d2 * (-1) + + e1 = a * 5 + e2 = 5 * a + e3 = copy(a) + e3 *= 5 + assert e1 == e2 == e3 + + f1 = a / 5 + f2 = copy(a) + f2 /= 5 + assert f1 == f2 + assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \ + f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5) + + assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \ + == type(e1) == type(e2) == type(e3) == type(f1) + + z0 = -a + assert z0 == ImmutableDenseNDimArray([-3 for i in range(9)], (3, 3)) + + +def test_higher_dimenions(): + m3 = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4)) + + assert m3.tolist() == [[[10, 11, 12, 13], + [14, 15, 16, 17], + [18, 19, 20, 21]], + + [[22, 23, 24, 25], + [26, 27, 28, 29], + [30, 31, 32, 33]]] + + assert m3._get_tuple_index(0) == (0, 0, 0) + assert m3._get_tuple_index(1) == (0, 0, 1) + assert m3._get_tuple_index(4) == (0, 1, 0) + assert m3._get_tuple_index(12) == (1, 0, 0) + + assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]' + + m3_rebuilt = ImmutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]) + assert m3 == m3_rebuilt + + m3_other = ImmutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4)) + + assert m3 == m3_other + + +def test_rebuild_immutable_arrays(): + sparr = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4)) + densarr = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4)) + + assert sparr == sparr.func(*sparr.args) + assert densarr == densarr.func(*densarr.args) + + +def test_slices(): + md = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4)) + + assert md[:] == ImmutableDenseNDimArray(range(10, 34), (2, 3, 4)) + assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) + assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) + assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) + assert md[:, :, :] == md + + sd = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4)) + assert sd == ImmutableSparseNDimArray(md) + + assert sd[:] == ImmutableSparseNDimArray(range(10, 34), (2, 3, 4)) + assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) + assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) + assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) + assert sd[:, :, :] == sd + + +def test_diff_and_applyfunc(): + from sympy.abc import x, y, z + md = ImmutableDenseNDimArray([[x, y], [x*z, x*y*z]]) + assert md.diff(x) == ImmutableDenseNDimArray([[1, 0], [z, y*z]]) + assert diff(md, x) == ImmutableDenseNDimArray([[1, 0], [z, y*z]]) + + sd = ImmutableSparseNDimArray(md) + assert sd == ImmutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2)) + assert sd.diff(x) == ImmutableSparseNDimArray([[1, 0], [z, y*z]]) + assert diff(sd, x) == ImmutableSparseNDimArray([[1, 0], [z, y*z]]) + + mdn = md.applyfunc(lambda x: x*3) + assert mdn == ImmutableDenseNDimArray([[3*x, 3*y], [3*x*z, 3*x*y*z]]) + assert md != mdn + + sdn = sd.applyfunc(lambda x: x/2) + assert sdn == ImmutableSparseNDimArray([[x/2, y/2], [x*z/2, x*y*z/2]]) + assert sd != sdn + + sdp = sd.applyfunc(lambda x: x+1) + assert sdp == ImmutableSparseNDimArray([[x + 1, y + 1], [x*z + 1, x*y*z + 1]]) + assert sd != sdp + + +def test_op_priority(): + from sympy.abc import x + md = ImmutableDenseNDimArray([1, 2, 3]) + e1 = (1+x)*md + e2 = md*(1+x) + assert e1 == ImmutableDenseNDimArray([1+x, 2+2*x, 3+3*x]) + assert e1 == e2 + + sd = ImmutableSparseNDimArray([1, 2, 3]) + e3 = (1+x)*sd + e4 = sd*(1+x) + assert e3 == ImmutableDenseNDimArray([1+x, 2+2*x, 3+3*x]) + assert e3 == e4 + + +def test_symbolic_indexing(): + x, y, z, w = symbols("x y z w") + M = ImmutableDenseNDimArray([[x, y], [z, w]]) + i, j = symbols("i, j") + Mij = M[i, j] + assert isinstance(Mij, Indexed) + Ms = ImmutableSparseNDimArray([[2, 3*x], [4, 5]]) + msij = Ms[i, j] + assert isinstance(msij, Indexed) + for oi, oj in [(0, 0), (0, 1), (1, 0), (1, 1)]: + assert Mij.subs({i: oi, j: oj}) == M[oi, oj] + assert msij.subs({i: oi, j: oj}) == Ms[oi, oj] + A = IndexedBase("A", (0, 2)) + assert A[0, 0].subs(A, M) == x + assert A[i, j].subs(A, M) == M[i, j] + assert M[i, j].subs(M, A) == A[i, j] + + assert isinstance(M[3 * i - 2, j], Indexed) + assert M[3 * i - 2, j].subs({i: 1, j: 0}) == M[1, 0] + assert isinstance(M[i, 0], Indexed) + assert M[i, 0].subs(i, 0) == M[0, 0] + assert M[0, i].subs(i, 1) == M[0, 1] + + assert M[i, j].diff(x) == ImmutableDenseNDimArray([[1, 0], [0, 0]])[i, j] + assert Ms[i, j].diff(x) == ImmutableSparseNDimArray([[0, 3], [0, 0]])[i, j] + + Mo = ImmutableDenseNDimArray([1, 2, 3]) + assert Mo[i].subs(i, 1) == 2 + Mos = ImmutableSparseNDimArray([1, 2, 3]) + assert Mos[i].subs(i, 1) == 2 + + raises(ValueError, lambda: M[i, 2]) + raises(ValueError, lambda: M[i, -1]) + raises(ValueError, lambda: M[2, i]) + raises(ValueError, lambda: M[-1, i]) + + raises(ValueError, lambda: Ms[i, 2]) + raises(ValueError, lambda: Ms[i, -1]) + raises(ValueError, lambda: Ms[2, i]) + raises(ValueError, lambda: Ms[-1, i]) + + +def test_issue_12665(): + # Testing Python 3 hash of immutable arrays: + arr = ImmutableDenseNDimArray([1, 2, 3]) + # This should NOT raise an exception: + hash(arr) + + +def test_zeros_without_shape(): + arr = ImmutableDenseNDimArray.zeros() + assert arr == ImmutableDenseNDimArray(0) + +def test_issue_21870(): + a0 = ImmutableDenseNDimArray(0) + assert a0.rank() == 0 + a1 = ImmutableDenseNDimArray(a0) + assert a1.rank() == 0