diff --git a/.gitattributes b/.gitattributes index 9e81af29e13904825c56c4b1669ff1df112418a0..62b753c51ce328f242fcdc409404f0b755dc59fa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -128,3 +128,4 @@ env-llmeval/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filte env-llmeval/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36ec3b6fdc77aac60b7a5534a58b454c1560c619 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__init__.py @@ -0,0 +1,43 @@ +"""Integration functions that integrate a SymPy expression. + + Examples + ======== + + >>> from sympy import integrate, sin + >>> from sympy.abc import x + >>> integrate(1/x,x) + log(x) + >>> integrate(sin(x),x) + -cos(x) +""" +from .integrals import integrate, Integral, line_integrate +from .transforms import (mellin_transform, inverse_mellin_transform, + MellinTransform, InverseMellinTransform, + laplace_transform, inverse_laplace_transform, + LaplaceTransform, InverseLaplaceTransform, + fourier_transform, inverse_fourier_transform, + FourierTransform, InverseFourierTransform, + sine_transform, inverse_sine_transform, + SineTransform, InverseSineTransform, + cosine_transform, inverse_cosine_transform, + CosineTransform, InverseCosineTransform, + hankel_transform, inverse_hankel_transform, + HankelTransform, InverseHankelTransform) +from .singularityfunctions import singularityintegrate + +__all__ = [ + 'integrate', 'Integral', 'line_integrate', + + 'mellin_transform', 'inverse_mellin_transform', 'MellinTransform', + 'InverseMellinTransform', 'laplace_transform', + 'inverse_laplace_transform', 'LaplaceTransform', + 'InverseLaplaceTransform', 'fourier_transform', + 'inverse_fourier_transform', 'FourierTransform', + 'InverseFourierTransform', 'sine_transform', 'inverse_sine_transform', + 'SineTransform', 'InverseSineTransform', 'cosine_transform', + 'inverse_cosine_transform', 'CosineTransform', 'InverseCosineTransform', + 'hankel_transform', 'inverse_hankel_transform', 'HankelTransform', + 'InverseHankelTransform', + + 'singularityintegrate', +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b0161911414eb066311b9554bf583d5ded599cd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/deltafunctions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/deltafunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e21c39b8ae62c1ac97ed6c521d56f96c13e696f2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/deltafunctions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/heurisch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/heurisch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2471b71cd93189c424f034b51ab2efc595d84dc7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/heurisch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/integrals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/integrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c3af1dea5270545f2e110b802cc34754b0a4fe1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/integrals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/intpoly.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/intpoly.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5114a8035c99ccc3e7d73bab23a6389806b31858 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/intpoly.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/laplace.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/laplace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40572e24af6d91ebb5511e4c7d53eb9f37626ad7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/laplace.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/manualintegrate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/manualintegrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69d694380e45deffbf8dcfc8eff46edb37037f9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/manualintegrate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..585e790cf1a1ea41bd75b116a919af5ea08d1148 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint_doc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint_doc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2875ecdba5a30957c0ce7415e7d970899fca518 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/meijerint_doc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/prde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/prde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580177ecf33c4a3d7916554f6b85e1d45dd1109f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/prde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/quadrature.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b267350fcb93cd2979fe1f91e301a7abbf7749f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/quadrature.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rationaltools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rationaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d75d46f4e9adcf63cc75b86128754b34017efaf0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rationaltools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645e2cf1e9fa4a43ce368e67f37db5df7a4f8bd3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/rde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/risch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/risch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8377c8e54e11130095f98db8c99463a5a4bb0825 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/risch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/singularityfunctions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/singularityfunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3620f06ce7636c4f6c03a98c7dba636911760ff3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/singularityfunctions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/transforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aba656d85a2ca4ad86c20767df8a8f769d3eea43 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/transforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/trigonometry.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/trigonometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351be36ba789df5fd8afe254546d5ffb41ae37f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/__pycache__/trigonometry.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/deltafunctions.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/deltafunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..ae9fef0b0010a313e0866a54d978024dd475f882 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/deltafunctions.py @@ -0,0 +1,201 @@ +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.core.sorting import default_sort_key +from sympy.functions import DiracDelta, Heaviside +from .integrals import Integral, integrate + + +def change_mul(node, x): + """change_mul(node, x) + + Rearranges the operands of a product, bringing to front any simple + DiracDelta expression. + + Explanation + =========== + + If no simple DiracDelta expression was found, then all the DiracDelta + expressions are simplified (using DiracDelta.expand(diracdelta=True, wrt=x)). + + Return: (dirac, new node) + Where: + o dirac is either a simple DiracDelta expression or None (if no simple + expression was found); + o new node is either a simplified DiracDelta expressions or None (if it + could not be simplified). + + Examples + ======== + + >>> from sympy import DiracDelta, cos + >>> from sympy.integrals.deltafunctions import change_mul + >>> from sympy.abc import x, y + >>> change_mul(x*y*DiracDelta(x)*cos(x), x) + (DiracDelta(x), x*y*cos(x)) + >>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x) + (None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2) + >>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x) + (None, None) + + See Also + ======== + + sympy.functions.special.delta_functions.DiracDelta + deltaintegrate + """ + + new_args = [] + dirac = None + + #Sorting is needed so that we consistently collapse the same delta; + #However, we must preserve the ordering of non-commutative terms + c, nc = node.args_cnc() + sorted_args = sorted(c, key=default_sort_key) + sorted_args.extend(nc) + + for arg in sorted_args: + if arg.is_Pow and isinstance(arg.base, DiracDelta): + new_args.append(arg.func(arg.base, arg.exp - 1)) + arg = arg.base + if dirac is None and (isinstance(arg, DiracDelta) and arg.is_simple(x)): + dirac = arg + else: + new_args.append(arg) + if not dirac: # there was no simple dirac + new_args = [] + for arg in sorted_args: + if isinstance(arg, DiracDelta): + new_args.append(arg.expand(diracdelta=True, wrt=x)) + elif arg.is_Pow and isinstance(arg.base, DiracDelta): + new_args.append(arg.func(arg.base.expand(diracdelta=True, wrt=x), arg.exp)) + else: + new_args.append(arg) + if new_args != sorted_args: + nnode = Mul(*new_args).expand() + else: # if the node didn't change there is nothing to do + nnode = None + return (None, nnode) + return (dirac, Mul(*new_args)) + + +def deltaintegrate(f, x): + """ + deltaintegrate(f, x) + + Explanation + =========== + + The idea for integration is the following: + + - If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)), + we try to simplify it. + + If we could simplify it, then we integrate the resulting expression. + We already know we can integrate a simplified expression, because only + simple DiracDelta expressions are involved. + + If we couldn't simplify it, there are two cases: + + 1) The expression is a simple expression: we return the integral, + taking care if we are dealing with a Derivative or with a proper + DiracDelta. + + 2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do + nothing at all. + + - If the node is a multiplication node having a DiracDelta term: + + First we expand it. + + If the expansion did work, then we try to integrate the expansion. + + If not, we try to extract a simple DiracDelta term, then we have two + cases: + + 1) We have a simple DiracDelta term, so we return the integral. + + 2) We didn't have a simple term, but we do have an expression with + simplified DiracDelta terms, so we integrate this expression. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy.integrals.deltafunctions import deltaintegrate + >>> from sympy import sin, cos, DiracDelta + >>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x) + sin(1)*cos(1)*Heaviside(x - 1) + >>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y) + z**2*DiracDelta(x - z)*Heaviside(y - z) + + See Also + ======== + + sympy.functions.special.delta_functions.DiracDelta + sympy.integrals.integrals.Integral + """ + if not f.has(DiracDelta): + return None + + # g(x) = DiracDelta(h(x)) + if f.func == DiracDelta: + h = f.expand(diracdelta=True, wrt=x) + if h == f: # can't simplify the expression + #FIXME: the second term tells whether is DeltaDirac or Derivative + #For integrating derivatives of DiracDelta we need the chain rule + if f.is_simple(x): + if (len(f.args) <= 1 or f.args[1] == 0): + return Heaviside(f.args[0]) + else: + return (DiracDelta(f.args[0], f.args[1] - 1) / + f.args[0].as_poly().LC()) + else: # let's try to integrate the simplified expression + fh = integrate(h, x) + return fh + elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e + g = f.expand() + if f != g: # the expansion worked + fh = integrate(g, x) + if fh is not None and not isinstance(fh, Integral): + return fh + else: + # no expansion performed, try to extract a simple DiracDelta term + deltaterm, rest_mult = change_mul(f, x) + + if not deltaterm: + if rest_mult: + fh = integrate(rest_mult, x) + return fh + else: + from sympy.solvers import solve + deltaterm = deltaterm.expand(diracdelta=True, wrt=x) + if deltaterm.is_Mul: # Take out any extracted factors + deltaterm, rest_mult_2 = change_mul(deltaterm, x) + rest_mult = rest_mult*rest_mult_2 + point = solve(deltaterm.args[0], x)[0] + + # Return the largest hyperreal term left after + # repeated integration by parts. For example, + # + # integrate(y*DiracDelta(x, 1),x) == y*DiracDelta(x,0), not 0 + # + # This is so Integral(y*DiracDelta(x).diff(x),x).doit() + # will return y*DiracDelta(x) instead of 0 or DiracDelta(x), + # both of which are correct everywhere the value is defined + # but give wrong answers for nested integration. + n = (0 if len(deltaterm.args)==1 else deltaterm.args[1]) + m = 0 + while n >= 0: + r = S.NegativeOne**n*rest_mult.diff(x, n).subs(x, point) + if r.is_zero: + n -= 1 + m += 1 + else: + if m == 0: + return r*Heaviside(x - point) + else: + return r*DiracDelta(x,m-1) + # In some very weak sense, x=0 is still a singularity, + # but we hope will not be of any practical consequence. + return S.Zero + return None diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/heurisch.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/heurisch.py new file mode 100644 index 0000000000000000000000000000000000000000..344edf250a2edb24916df826114eae3c25d2207d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/heurisch.py @@ -0,0 +1,771 @@ +from __future__ import annotations + +from itertools import permutations +from functools import reduce + +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.mul import Mul +from sympy.core.symbol import Wild, Dummy, Symbol +from sympy.core.basic import sympify +from sympy.core.numbers import Rational, pi, I +from sympy.core.relational import Eq, Ne +from sympy.core.singleton import S +from sympy.core.sorting import ordered +from sympy.core.traversal import iterfreeargs + +from sympy.functions import exp, sin, cos, tan, cot, asin, atan +from sympy.functions import log, sinh, cosh, tanh, coth, asinh +from sympy.functions import sqrt, erf, erfi, li, Ei +from sympy.functions import besselj, bessely, besseli, besselk +from sympy.functions import hankel1, hankel2, jn, yn +from sympy.functions.elementary.complexes import Abs, re, im, sign, arg +from sympy.functions.elementary.exponential import LambertW +from sympy.functions.elementary.integers import floor, ceiling +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.special.delta_functions import Heaviside, DiracDelta + +from sympy.simplify.radsimp import collect + +from sympy.logic.boolalg import And, Or +from sympy.utilities.iterables import uniq + +from sympy.polys import quo, gcd, lcm, factor_list, cancel, PolynomialError +from sympy.polys.monomials import itermonomials +from sympy.polys.polyroots import root_factors + +from sympy.polys.rings import PolyRing +from sympy.polys.solvers import solve_lin_sys +from sympy.polys.constructor import construct_domain + +from sympy.integrals.integrals import integrate + + +def components(f, x): + """ + Returns a set of all functional components of the given expression + which includes symbols, function applications and compositions and + non-integer powers. Fractional powers are collected with + minimal, positive exponents. + + Examples + ======== + + >>> from sympy import cos, sin + >>> from sympy.abc import x + >>> from sympy.integrals.heurisch import components + + >>> components(sin(x)*cos(x)**2, x) + {x, sin(x), cos(x)} + + See Also + ======== + + heurisch + """ + result = set() + + if f.has_free(x): + if f.is_symbol and f.is_commutative: + result.add(f) + elif f.is_Function or f.is_Derivative: + for g in f.args: + result |= components(g, x) + + result.add(f) + elif f.is_Pow: + result |= components(f.base, x) + + if not f.exp.is_Integer: + if f.exp.is_Rational: + result.add(f.base**Rational(1, f.exp.q)) + else: + result |= components(f.exp, x) | {f} + else: + for g in f.args: + result |= components(g, x) + + return result + +# name -> [] of symbols +_symbols_cache: dict[str, list[Dummy]] = {} + + +# NB @cacheit is not convenient here +def _symbols(name, n): + """get vector of symbols local to this module""" + try: + lsyms = _symbols_cache[name] + except KeyError: + lsyms = [] + _symbols_cache[name] = lsyms + + while len(lsyms) < n: + lsyms.append( Dummy('%s%i' % (name, len(lsyms))) ) + + return lsyms[:n] + + +def heurisch_wrapper(f, x, rewrite=False, hints=None, mappings=None, retries=3, + degree_offset=0, unnecessary_permutations=None, + _try_heurisch=None): + """ + A wrapper around the heurisch integration algorithm. + + Explanation + =========== + + This method takes the result from heurisch and checks for poles in the + denominator. For each of these poles, the integral is reevaluated, and + the final integration result is given in terms of a Piecewise. + + Examples + ======== + + >>> from sympy import cos, symbols + >>> from sympy.integrals.heurisch import heurisch, heurisch_wrapper + >>> n, x = symbols('n x') + >>> heurisch(cos(n*x), x) + sin(n*x)/n + >>> heurisch_wrapper(cos(n*x), x) + Piecewise((sin(n*x)/n, Ne(n, 0)), (x, True)) + + See Also + ======== + + heurisch + """ + from sympy.solvers.solvers import solve, denoms + f = sympify(f) + if not f.has_free(x): + return f*x + + res = heurisch(f, x, rewrite, hints, mappings, retries, degree_offset, + unnecessary_permutations, _try_heurisch) + if not isinstance(res, Basic): + return res + + # We consider each denominator in the expression, and try to find + # cases where one or more symbolic denominator might be zero. The + # conditions for these cases are stored in the list slns. + # + # Since denoms returns a set we use ordered. This is important because the + # ordering of slns determines the order of the resulting Piecewise so we + # need a deterministic order here to make the output deterministic. + slns = [] + for d in ordered(denoms(res)): + try: + slns += solve([d], dict=True, exclude=(x,)) + except NotImplementedError: + pass + if not slns: + return res + slns = list(uniq(slns)) + # Remove the solutions corresponding to poles in the original expression. + slns0 = [] + for d in denoms(f): + try: + slns0 += solve([d], dict=True, exclude=(x,)) + except NotImplementedError: + pass + slns = [s for s in slns if s not in slns0] + if not slns: + return res + if len(slns) > 1: + eqs = [] + for sub_dict in slns: + eqs.extend([Eq(key, value) for key, value in sub_dict.items()]) + slns = solve(eqs, dict=True, exclude=(x,)) + slns + # For each case listed in the list slns, we reevaluate the integral. + pairs = [] + for sub_dict in slns: + expr = heurisch(f.subs(sub_dict), x, rewrite, hints, mappings, retries, + degree_offset, unnecessary_permutations, + _try_heurisch) + cond = And(*[Eq(key, value) for key, value in sub_dict.items()]) + generic = Or(*[Ne(key, value) for key, value in sub_dict.items()]) + if expr is None: + expr = integrate(f.subs(sub_dict),x) + pairs.append((expr, cond)) + # If there is one condition, put the generic case first. Otherwise, + # doing so may lead to longer Piecewise formulas + if len(pairs) == 1: + pairs = [(heurisch(f, x, rewrite, hints, mappings, retries, + degree_offset, unnecessary_permutations, + _try_heurisch), + generic), + (pairs[0][0], True)] + else: + pairs.append((heurisch(f, x, rewrite, hints, mappings, retries, + degree_offset, unnecessary_permutations, + _try_heurisch), + True)) + return Piecewise(*pairs) + +class BesselTable: + """ + Derivatives of Bessel functions of orders n and n-1 + in terms of each other. + + See the docstring of DiffCache. + """ + + def __init__(self): + self.table = {} + self.n = Dummy('n') + self.z = Dummy('z') + self._create_table() + + def _create_table(t): + table, n, z = t.table, t.n, t.z + for f in (besselj, bessely, hankel1, hankel2): + table[f] = (f(n-1, z) - n*f(n, z)/z, + (n-1)*f(n-1, z)/z - f(n, z)) + + f = besseli + table[f] = (f(n-1, z) - n*f(n, z)/z, + (n-1)*f(n-1, z)/z + f(n, z)) + f = besselk + table[f] = (-f(n-1, z) - n*f(n, z)/z, + (n-1)*f(n-1, z)/z - f(n, z)) + + for f in (jn, yn): + table[f] = (f(n-1, z) - (n+1)*f(n, z)/z, + (n-1)*f(n-1, z)/z - f(n, z)) + + def diffs(t, f, n, z): + if f in t.table: + diff0, diff1 = t.table[f] + repl = [(t.n, n), (t.z, z)] + return (diff0.subs(repl), diff1.subs(repl)) + + def has(t, f): + return f in t.table + +_bessel_table = None + +class DiffCache: + """ + Store for derivatives of expressions. + + Explanation + =========== + + The standard form of the derivative of a Bessel function of order n + contains two Bessel functions of orders n-1 and n+1, respectively. + Such forms cannot be used in parallel Risch algorithm, because + there is a linear recurrence relation between the three functions + while the algorithm expects that functions and derivatives are + represented in terms of algebraically independent transcendentals. + + The solution is to take two of the functions, e.g., those of orders + n and n-1, and to express the derivatives in terms of the pair. + To guarantee that the proper form is used the two derivatives are + cached as soon as one is encountered. + + Derivatives of other functions are also cached at no extra cost. + All derivatives are with respect to the same variable `x`. + """ + + def __init__(self, x): + self.cache = {} + self.x = x + + global _bessel_table + if not _bessel_table: + _bessel_table = BesselTable() + + def get_diff(self, f): + cache = self.cache + + if f in cache: + pass + elif (not hasattr(f, 'func') or + not _bessel_table.has(f.func)): + cache[f] = cancel(f.diff(self.x)) + else: + n, z = f.args + d0, d1 = _bessel_table.diffs(f.func, n, z) + dz = self.get_diff(z) + cache[f] = d0*dz + cache[f.func(n-1, z)] = d1*dz + + return cache[f] + +def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3, + degree_offset=0, unnecessary_permutations=None, + _try_heurisch=None): + """ + Compute indefinite integral using heuristic Risch algorithm. + + Explanation + =========== + + This is a heuristic approach to indefinite integration in finite + terms using the extended heuristic (parallel) Risch algorithm, based + on Manuel Bronstein's "Poor Man's Integrator". + + The algorithm supports various classes of functions including + transcendental elementary or special functions like Airy, + Bessel, Whittaker and Lambert. + + Note that this algorithm is not a decision procedure. If it isn't + able to compute the antiderivative for a given function, then this is + not a proof that such a functions does not exist. One should use + recursive Risch algorithm in such case. It's an open question if + this algorithm can be made a full decision procedure. + + This is an internal integrator procedure. You should use top level + 'integrate' function in most cases, as this procedure needs some + preprocessing steps and otherwise may fail. + + Specification + ============= + + heurisch(f, x, rewrite=False, hints=None) + + where + f : expression + x : symbol + + rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh' + hints -> a list of functions that may appear in anti-derivate + + - hints = None --> no suggestions at all + - hints = [ ] --> try to figure out + - hints = [f1, ..., fn] --> we know better + + Examples + ======== + + >>> from sympy import tan + >>> from sympy.integrals.heurisch import heurisch + >>> from sympy.abc import x, y + + >>> heurisch(y*tan(x), x) + y*log(tan(x)**2 + 1)/2 + + See Manuel Bronstein's "Poor Man's Integrator": + + References + ========== + + .. [1] https://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html + + For more information on the implemented algorithm refer to: + + .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration + Method and its Implementation in Maple, Proceedings of + ISSAC'89, ACM Press, 212-217. + + .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I), + Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157. + + .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III): + Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6. + + .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch + Algorithm (II), ACM Transactions on Mathematical + Software 11 (1985), 356-362. + + See Also + ======== + + sympy.integrals.integrals.Integral.doit + sympy.integrals.integrals.Integral + sympy.integrals.heurisch.components + """ + f = sympify(f) + + # There are some functions that Heurisch cannot currently handle, + # so do not even try. + # Set _try_heurisch=True to skip this check + if _try_heurisch is not True: + if f.has(Abs, re, im, sign, Heaviside, DiracDelta, floor, ceiling, arg): + return + + if not f.has_free(x): + return f*x + + if not f.is_Add: + indep, f = f.as_independent(x) + else: + indep = S.One + + rewritables = { + (sin, cos, cot): tan, + (sinh, cosh, coth): tanh, + } + + if rewrite: + for candidates, rule in rewritables.items(): + f = f.rewrite(candidates, rule) + else: + for candidates in rewritables.keys(): + if f.has(*candidates): + break + else: + rewrite = True + + terms = components(f, x) + dcache = DiffCache(x) + + if hints is not None: + if not hints: + a = Wild('a', exclude=[x]) + b = Wild('b', exclude=[x]) + c = Wild('c', exclude=[x]) + + for g in set(terms): # using copy of terms + if g.is_Function: + if isinstance(g, li): + M = g.args[0].match(a*x**b) + + if M is not None: + terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) + #terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) + #terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) + #terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) ) + + elif isinstance(g, exp): + M = g.args[0].match(a*x**2) + + if M is not None: + if M[a].is_positive: + terms.add(erfi(sqrt(M[a])*x)) + else: # M[a].is_negative or unknown + terms.add(erf(sqrt(-M[a])*x)) + + M = g.args[0].match(a*x**2 + b*x + c) + + if M is not None: + if M[a].is_positive: + terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* + erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a])))) + elif M[a].is_negative: + terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* + erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a])))) + + M = g.args[0].match(a*log(x)**2) + + if M is not None: + if M[a].is_positive: + terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a])))) + if M[a].is_negative: + terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a])))) + + elif g.is_Pow: + if g.exp.is_Rational and g.exp.q == 2: + M = g.base.match(a*x**2 + b) + + if M is not None and M[b].is_positive: + if M[a].is_positive: + terms.add(asinh(sqrt(M[a]/M[b])*x)) + elif M[a].is_negative: + terms.add(asin(sqrt(-M[a]/M[b])*x)) + + M = g.base.match(a*x**2 - b) + + if M is not None and M[b].is_positive: + if M[a].is_positive: + dF = 1/sqrt(M[a]*x**2 - M[b]) + F = log(2*sqrt(M[a])*sqrt(M[a]*x**2 - M[b]) + 2*M[a]*x)/sqrt(M[a]) + dcache.cache[F] = dF # hack: F.diff(x) doesn't automatically simplify to f + terms.add(F) + elif M[a].is_negative: + terms.add(-M[b]/2*sqrt(-M[a])* + atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b]))) + + else: + terms |= set(hints) + + for g in set(terms): # using copy of terms + terms |= components(dcache.get_diff(g), x) + + # XXX: The commented line below makes heurisch more deterministic wrt + # PYTHONHASHSEED and the iteration order of sets. There are other places + # where sets are iterated over but this one is possibly the most important. + # Theoretically the order here should not matter but different orderings + # can expose potential bugs in the different code paths so potentially it + # is better to keep the non-determinism. + # + # terms = list(ordered(terms)) + + # TODO: caching is significant factor for why permutations work at all. Change this. + V = _symbols('x', len(terms)) + + + # sort mapping expressions from largest to smallest (last is always x). + mapping = list(reversed(list(zip(*ordered( # + [(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) # + rev_mapping = {v: k for k, v in mapping} # + if mappings is None: # + # optimizing the number of permutations of mapping # + assert mapping[-1][0] == x # if not, find it and correct this comment + unnecessary_permutations = [mapping.pop(-1)] + mappings = permutations(mapping) + else: + unnecessary_permutations = unnecessary_permutations or [] + + def _substitute(expr): + return expr.subs(mapping) + + for mapping in mappings: + mapping = list(mapping) + mapping = mapping + unnecessary_permutations + diffs = [ _substitute(dcache.get_diff(g)) for g in terms ] + denoms = [ g.as_numer_denom()[1] for g in diffs ] + if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V): + denom = reduce(lambda p, q: lcm(p, q, *V), denoms) + break + else: + if not rewrite: + result = heurisch(f, x, rewrite=True, hints=hints, + unnecessary_permutations=unnecessary_permutations) + + if result is not None: + return indep*result + return None + + numers = [ cancel(denom*g) for g in diffs ] + def _derivation(h): + return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ]) + + def _deflation(p): + for y in V: + if not p.has(y): + continue + + if _derivation(p) is not S.Zero: + c, q = p.as_poly(y).primitive() + return _deflation(c)*gcd(q, q.diff(y)).as_expr() + + return p + + def _splitter(p): + for y in V: + if not p.has(y): + continue + + if _derivation(y) is not S.Zero: + c, q = p.as_poly(y).primitive() + + q = q.as_expr() + + h = gcd(q, _derivation(q), y) + s = quo(h, gcd(q, q.diff(y), y), y) + + c_split = _splitter(c) + + if s.as_poly(y).degree() == 0: + return (c_split[0], q * c_split[1]) + + q_split = _splitter(cancel(q / s)) + + return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1]) + + return (S.One, p) + + special = {} + + for term in terms: + if term.is_Function: + if isinstance(term, tan): + special[1 + _substitute(term)**2] = False + elif isinstance(term, tanh): + special[1 + _substitute(term)] = False + special[1 - _substitute(term)] = False + elif isinstance(term, LambertW): + special[_substitute(term)] = True + + F = _substitute(f) + + P, Q = F.as_numer_denom() + + u_split = _splitter(denom) + v_split = _splitter(Q) + + polys = set(list(v_split) + [ u_split[0] ] + list(special.keys())) + + s = u_split[0] * Mul(*[ k for k, v in special.items() if v ]) + polified = [ p.as_poly(*V) for p in [s, P, Q] ] + + if None in polified: + return None + + #--- definitions for _integrate + a, b, c = [ p.total_degree() for p in polified ] + + poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr() + + def _exponent(g): + if g.is_Pow: + if g.exp.is_Rational and g.exp.q != 1: + if g.exp.p > 0: + return g.exp.p + g.exp.q - 1 + else: + return abs(g.exp.p + g.exp.q) + else: + return 1 + elif not g.is_Atom and g.args: + return max([ _exponent(h) for h in g.args ]) + else: + return 1 + + A, B = _exponent(f), a + max(b, c) + + if A > 1 and B > 1: + monoms = tuple(ordered(itermonomials(V, A + B - 1 + degree_offset))) + else: + monoms = tuple(ordered(itermonomials(V, A + B + degree_offset))) + + poly_coeffs = _symbols('A', len(monoms)) + + poly_part = Add(*[ poly_coeffs[i]*monomial + for i, monomial in enumerate(monoms) ]) + + reducibles = set() + + for poly in ordered(polys): + coeff, factors = factor_list(poly, *V) + reducibles.add(coeff) + for fact, mul in factors: + reducibles.add(fact) + + def _integrate(field=None): + atans = set() + pairs = set() + + if field == 'Q': + irreducibles = set(reducibles) + else: + setV = set(V) + irreducibles = set() + for poly in ordered(reducibles): + zV = setV & set(iterfreeargs(poly)) + for z in ordered(zV): + s = set(root_factors(poly, z, filter=field)) + irreducibles |= s + break + + log_part, atan_part = [], [] + + for poly in ordered(irreducibles): + m = collect(poly, I, evaluate=False) + y = m.get(I, S.Zero) + if y: + x = m.get(S.One, S.Zero) + if x.has(I) or y.has(I): + continue # nontrivial x + I*y + pairs.add((x, y)) + irreducibles.remove(poly) + + while pairs: + x, y = pairs.pop() + if (x, -y) in pairs: + pairs.remove((x, -y)) + # Choosing b with no minus sign + if y.could_extract_minus_sign(): + y = -y + irreducibles.add(x*x + y*y) + atans.add(atan(x/y)) + else: + irreducibles.add(x + I*y) + + + B = _symbols('B', len(irreducibles)) + C = _symbols('C', len(atans)) + + # Note: the ordering matters here + for poly, b in reversed(list(zip(ordered(irreducibles), B))): + if poly.has(*V): + poly_coeffs.append(b) + log_part.append(b * log(poly)) + + for poly, c in reversed(list(zip(ordered(atans), C))): + if poly.has(*V): + poly_coeffs.append(c) + atan_part.append(c * poly) + + # TODO: Currently it's better to use symbolic expressions here instead + # of rational functions, because it's simpler and FracElement doesn't + # give big speed improvement yet. This is because cancellation is slow + # due to slow polynomial GCD algorithms. If this gets improved then + # revise this code. + candidate = poly_part/poly_denom + Add(*log_part) + Add(*atan_part) + h = F - _derivation(candidate) / denom + raw_numer = h.as_numer_denom()[0] + + # Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field + # that we have to determine. We can't use simply atoms() because log(3), + # sqrt(y) and similar expressions can appear, leading to non-trivial + # domains. + syms = set(poly_coeffs) | set(V) + non_syms = set() + + def find_non_syms(expr): + if expr.is_Integer or expr.is_Rational: + pass # ignore trivial numbers + elif expr in syms: + pass # ignore variables + elif not expr.has_free(*syms): + non_syms.add(expr) + elif expr.is_Add or expr.is_Mul or expr.is_Pow: + list(map(find_non_syms, expr.args)) + else: + # TODO: Non-polynomial expression. This should have been + # filtered out at an earlier stage. + raise PolynomialError + + try: + find_non_syms(raw_numer) + except PolynomialError: + return None + else: + ground, _ = construct_domain(non_syms, field=True) + + coeff_ring = PolyRing(poly_coeffs, ground) + ring = PolyRing(V, coeff_ring) + try: + numer = ring.from_expr(raw_numer) + except ValueError: + raise PolynomialError + solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False) + + if solution is None: + return None + else: + return candidate.xreplace(solution).xreplace( + dict(zip(poly_coeffs, [S.Zero]*len(poly_coeffs)))) + + if all(isinstance(_, Symbol) for _ in V): + more_free = F.free_symbols - set(V) + else: + Fd = F.as_dummy() + more_free = Fd.xreplace(dict(zip(V, (Dummy() for _ in V))) + ).free_symbols & Fd.free_symbols + if not more_free: + # all free generators are identified in V + solution = _integrate('Q') + + if solution is None: + solution = _integrate() + else: + solution = _integrate() + + if solution is not None: + antideriv = solution.subs(rev_mapping) + antideriv = cancel(antideriv).expand() + + if antideriv.is_Add: + antideriv = antideriv.as_independent(x)[1] + + return indep*antideriv + else: + if retries >= 0: + result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations) + + if result is not None: + return indep*result + + return None diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/integrals.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..8f9e684bbd411074d1d39968e78012ab0c877918 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/integrals.py @@ -0,0 +1,1633 @@ +from typing import Tuple as tTuple + +from sympy.concrete.expr_with_limits import AddWithLimits +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.expr import Expr +from sympy.core.exprtools import factor_terms +from sympy.core.function import diff +from sympy.core.logic import fuzzy_bool +from sympy.core.mul import Mul +from sympy.core.numbers import oo, pi +from sympy.core.relational import Ne +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, Symbol, Wild) +from sympy.core.sympify import sympify +from sympy.functions import Piecewise, sqrt, piecewise_fold, tan, cot, atan +from sympy.functions.elementary.exponential import log +from sympy.functions.elementary.integers import floor +from sympy.functions.elementary.complexes import Abs, sign +from sympy.functions.elementary.miscellaneous import Min, Max +from .rationaltools import ratint +from sympy.matrices import MatrixBase +from sympy.polys import Poly, PolynomialError +from sympy.series.formal import FormalPowerSeries +from sympy.series.limits import limit +from sympy.series.order import Order +from sympy.tensor.functions import shape +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.iterables import is_sequence +from sympy.utilities.misc import filldedent + + +class Integral(AddWithLimits): + """Represents unevaluated integral.""" + + __slots__ = () + + args: tTuple[Expr, Tuple] + + def __new__(cls, function, *symbols, **assumptions): + """Create an unevaluated integral. + + Explanation + =========== + + Arguments are an integrand followed by one or more limits. + + If no limits are given and there is only one free symbol in the + expression, that symbol will be used, otherwise an error will be + raised. + + >>> from sympy import Integral + >>> from sympy.abc import x, y + >>> Integral(x) + Integral(x, x) + >>> Integral(y) + Integral(y, y) + + When limits are provided, they are interpreted as follows (using + ``x`` as though it were the variable of integration): + + (x,) or x - indefinite integral + (x, a) - "evaluate at" integral is an abstract antiderivative + (x, a, b) - definite integral + + The ``as_dummy`` method can be used to see which symbols cannot be + targeted by subs: those with a prepended underscore cannot be + changed with ``subs``. (Also, the integration variables themselves -- + the first element of a limit -- can never be changed by subs.) + + >>> i = Integral(x, x) + >>> at = Integral(x, (x, x)) + >>> i.as_dummy() + Integral(x, x) + >>> at.as_dummy() + Integral(_0, (_0, x)) + + """ + + #This will help other classes define their own definitions + #of behaviour with Integral. + if hasattr(function, '_eval_Integral'): + return function._eval_Integral(*symbols, **assumptions) + + if isinstance(function, Poly): + sympy_deprecation_warning( + """ + integrate(Poly) and Integral(Poly) are deprecated. Instead, + use the Poly.integrate() method, or convert the Poly to an + Expr first with the Poly.as_expr() method. + """, + deprecated_since_version="1.6", + active_deprecations_target="deprecated-integrate-poly") + + obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions) + return obj + + def __getnewargs__(self): + return (self.function,) + tuple([tuple(xab) for xab in self.limits]) + + @property + def free_symbols(self): + """ + This method returns the symbols that will exist when the + integral is evaluated. This is useful if one is trying to + determine whether an integral depends on a certain + symbol or not. + + Examples + ======== + + >>> from sympy import Integral + >>> from sympy.abc import x, y + >>> Integral(x, (x, y, 1)).free_symbols + {y} + + See Also + ======== + + sympy.concrete.expr_with_limits.ExprWithLimits.function + sympy.concrete.expr_with_limits.ExprWithLimits.limits + sympy.concrete.expr_with_limits.ExprWithLimits.variables + """ + return super().free_symbols + + def _eval_is_zero(self): + # This is a very naive and quick test, not intended to do the integral to + # answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi)) + # is zero but this routine should return None for that case. But, like + # Mul, there are trivial situations for which the integral will be + # zero so we check for those. + if self.function.is_zero: + return True + got_none = False + for l in self.limits: + if len(l) == 3: + z = (l[1] == l[2]) or (l[1] - l[2]).is_zero + if z: + return True + elif z is None: + got_none = True + free = self.function.free_symbols + for xab in self.limits: + if len(xab) == 1: + free.add(xab[0]) + continue + if len(xab) == 2 and xab[0] not in free: + if xab[1].is_zero: + return True + elif xab[1].is_zero is None: + got_none = True + # take integration symbol out of free since it will be replaced + # with the free symbols in the limits + free.discard(xab[0]) + # add in the new symbols + for i in xab[1:]: + free.update(i.free_symbols) + if self.function.is_zero is False and got_none is False: + return False + + def transform(self, x, u): + r""" + Performs a change of variables from `x` to `u` using the relationship + given by `x` and `u` which will define the transformations `f` and `F` + (which are inverses of each other) as follows: + + 1) If `x` is a Symbol (which is a variable of integration) then `u` + will be interpreted as some function, f(u), with inverse F(u). + This, in effect, just makes the substitution of x with f(x). + + 2) If `u` is a Symbol then `x` will be interpreted as some function, + F(x), with inverse f(u). This is commonly referred to as + u-substitution. + + Once f and F have been identified, the transformation is made as + follows: + + .. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x) + \frac{\mathrm{d}}{\mathrm{d}x} + + where `F(x)` is the inverse of `f(x)` and the limits and integrand have + been corrected so as to retain the same value after integration. + + Notes + ===== + + The mappings, F(x) or f(u), must lead to a unique integral. Linear + or rational linear expression, ``2*x``, ``1/x`` and ``sqrt(x)``, will + always work; quadratic expressions like ``x**2 - 1`` are acceptable + as long as the resulting integrand does not depend on the sign of + the solutions (see examples). + + The integral will be returned unchanged if ``x`` is not a variable of + integration. + + ``x`` must be (or contain) only one of of the integration variables. If + ``u`` has more than one free symbol then it should be sent as a tuple + (``u``, ``uvar``) where ``uvar`` identifies which variable is replacing + the integration variable. + XXX can it contain another integration variable? + + Examples + ======== + + >>> from sympy.abc import a, x, u + >>> from sympy import Integral, cos, sqrt + + >>> i = Integral(x*cos(x**2 - 1), (x, 0, 1)) + + transform can change the variable of integration + + >>> i.transform(x, u) + Integral(u*cos(u**2 - 1), (u, 0, 1)) + + transform can perform u-substitution as long as a unique + integrand is obtained: + + >>> i.transform(x**2 - 1, u) + Integral(cos(u)/2, (u, -1, 0)) + + This attempt fails because x = +/-sqrt(u + 1) and the + sign does not cancel out of the integrand: + + >>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u) + Traceback (most recent call last): + ... + ValueError: + The mapping between F(x) and f(u) did not give a unique integrand. + + transform can do a substitution. Here, the previous + result is transformed back into the original expression + using "u-substitution": + + >>> ui = _ + >>> _.transform(sqrt(u + 1), x) == i + True + + We can accomplish the same with a regular substitution: + + >>> ui.transform(u, x**2 - 1) == i + True + + If the `x` does not contain a symbol of integration then + the integral will be returned unchanged. Integral `i` does + not have an integration variable `a` so no change is made: + + >>> i.transform(a, x) == i + True + + When `u` has more than one free symbol the symbol that is + replacing `x` must be identified by passing `u` as a tuple: + + >>> Integral(x, (x, 0, 1)).transform(x, (u + a, u)) + Integral(a + u, (u, -a, 1 - a)) + >>> Integral(x, (x, 0, 1)).transform(x, (u + a, a)) + Integral(a + u, (a, -u, 1 - u)) + + See Also + ======== + + sympy.concrete.expr_with_limits.ExprWithLimits.variables : Lists the integration variables + as_dummy : Replace integration variables with dummy ones + """ + d = Dummy('d') + + xfree = x.free_symbols.intersection(self.variables) + if len(xfree) > 1: + raise ValueError( + 'F(x) can only contain one of: %s' % self.variables) + xvar = xfree.pop() if xfree else d + + if xvar not in self.variables: + return self + + u = sympify(u) + if isinstance(u, Expr): + ufree = u.free_symbols + if len(ufree) == 0: + raise ValueError(filldedent(''' + f(u) cannot be a constant''')) + if len(ufree) > 1: + raise ValueError(filldedent(''' + When f(u) has more than one free symbol, the one replacing x + must be identified: pass f(u) as (f(u), u)''')) + uvar = ufree.pop() + else: + u, uvar = u + if uvar not in u.free_symbols: + raise ValueError(filldedent(''' + Expecting a tuple (expr, symbol) where symbol identified + a free symbol in expr, but symbol is not in expr's free + symbols.''')) + if not isinstance(uvar, Symbol): + # This probably never evaluates to True + raise ValueError(filldedent(''' + Expecting a tuple (expr, symbol) but didn't get + a symbol; got %s''' % uvar)) + + if x.is_Symbol and u.is_Symbol: + return self.xreplace({x: u}) + + if not x.is_Symbol and not u.is_Symbol: + raise ValueError('either x or u must be a symbol') + + if uvar == xvar: + return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar}) + + if uvar in self.limits: + raise ValueError(filldedent(''' + u must contain the same variable as in x + or a variable that is not already an integration variable''')) + + from sympy.solvers.solvers import solve + if not x.is_Symbol: + F = [x.subs(xvar, d)] + soln = solve(u - x, xvar, check=False) + if not soln: + raise ValueError('no solution for solve(F(x) - f(u), x)') + f = [fi.subs(uvar, d) for fi in soln] + else: + f = [u.subs(uvar, d)] + from sympy.simplify.simplify import posify + pdiff, reps = posify(u - x) + puvar = uvar.subs([(v, k) for k, v in reps.items()]) + soln = [s.subs(reps) for s in solve(pdiff, puvar)] + if not soln: + raise ValueError('no solution for solve(F(x) - f(u), u)') + F = [fi.subs(xvar, d) for fi in soln] + + newfuncs = {(self.function.subs(xvar, fi)*fi.diff(d) + ).subs(d, uvar) for fi in f} + if len(newfuncs) > 1: + raise ValueError(filldedent(''' + The mapping between F(x) and f(u) did not give + a unique integrand.''')) + newfunc = newfuncs.pop() + + def _calc_limit_1(F, a, b): + """ + replace d with a, using subs if possible, otherwise limit + where sign of b is considered + """ + wok = F.subs(d, a) + if wok is S.NaN or wok.is_finite is False and a.is_finite: + return limit(sign(b)*F, d, a) + return wok + + def _calc_limit(a, b): + """ + replace d with a, using subs if possible, otherwise limit + where sign of b is considered + """ + avals = list({_calc_limit_1(Fi, a, b) for Fi in F}) + if len(avals) > 1: + raise ValueError(filldedent(''' + The mapping between F(x) and f(u) did not + give a unique limit.''')) + return avals[0] + + newlimits = [] + for xab in self.limits: + sym = xab[0] + if sym == xvar: + if len(xab) == 3: + a, b = xab[1:] + a, b = _calc_limit(a, b), _calc_limit(b, a) + if fuzzy_bool(a - b > 0): + a, b = b, a + newfunc = -newfunc + newlimits.append((uvar, a, b)) + elif len(xab) == 2: + a = _calc_limit(xab[1], 1) + newlimits.append((uvar, a)) + else: + newlimits.append(uvar) + else: + newlimits.append(xab) + + return self.func(newfunc, *newlimits) + + def doit(self, **hints): + """ + Perform the integration using any hints given. + + Examples + ======== + + >>> from sympy import Piecewise, S + >>> from sympy.abc import x, t + >>> p = x**2 + Piecewise((0, x/t < 0), (1, True)) + >>> p.integrate((t, S(4)/5, 1), (x, -1, 1)) + 1/3 + + See Also + ======== + + sympy.integrals.trigonometry.trigintegrate + sympy.integrals.heurisch.heurisch + sympy.integrals.rationaltools.ratint + as_sum : Approximate the integral using a sum + """ + if not hints.get('integrals', True): + return self + + deep = hints.get('deep', True) + meijerg = hints.get('meijerg', None) + conds = hints.get('conds', 'piecewise') + risch = hints.get('risch', None) + heurisch = hints.get('heurisch', None) + manual = hints.get('manual', None) + if len(list(filter(None, (manual, meijerg, risch, heurisch)))) > 1: + raise ValueError("At most one of manual, meijerg, risch, heurisch can be True") + elif manual: + meijerg = risch = heurisch = False + elif meijerg: + manual = risch = heurisch = False + elif risch: + manual = meijerg = heurisch = False + elif heurisch: + manual = meijerg = risch = False + eval_kwargs = {"meijerg": meijerg, "risch": risch, "manual": manual, "heurisch": heurisch, + "conds": conds} + + if conds not in ('separate', 'piecewise', 'none'): + raise ValueError('conds must be one of "separate", "piecewise", ' + '"none", got: %s' % conds) + + if risch and any(len(xab) > 1 for xab in self.limits): + raise ValueError('risch=True is only allowed for indefinite integrals.') + + # check for the trivial zero + if self.is_zero: + return S.Zero + + # hacks to handle integrals of + # nested summations + from sympy.concrete.summations import Sum + if isinstance(self.function, Sum): + if any(v in self.function.limits[0] for v in self.variables): + raise ValueError('Limit of the sum cannot be an integration variable.') + if any(l.is_infinite for l in self.function.limits[0][1:]): + return self + _i = self + _sum = self.function + return _sum.func(_i.func(_sum.function, *_i.limits).doit(), *_sum.limits).doit() + + # now compute and check the function + function = self.function + if deep: + function = function.doit(**hints) + if function.is_zero: + return S.Zero + + # hacks to handle special cases + if isinstance(function, MatrixBase): + return function.applyfunc( + lambda f: self.func(f, *self.limits).doit(**hints)) + + if isinstance(function, FormalPowerSeries): + if len(self.limits) > 1: + raise NotImplementedError + xab = self.limits[0] + if len(xab) > 1: + return function.integrate(xab, **eval_kwargs) + else: + return function.integrate(xab[0], **eval_kwargs) + + # There is no trivial answer and special handling + # is done so continue + + # first make sure any definite limits have integration + # variables with matching assumptions + reps = {} + for xab in self.limits: + if len(xab) != 3: + # it makes sense to just make + # all x real but in practice with the + # current state of integration...this + # doesn't work out well + # x = xab[0] + # if x not in reps and not x.is_real: + # reps[x] = Dummy(real=True) + continue + x, a, b = xab + l = (a, b) + if all(i.is_nonnegative for i in l) and not x.is_nonnegative: + d = Dummy(positive=True) + elif all(i.is_nonpositive for i in l) and not x.is_nonpositive: + d = Dummy(negative=True) + elif all(i.is_real for i in l) and not x.is_real: + d = Dummy(real=True) + else: + d = None + if d: + reps[x] = d + if reps: + undo = {v: k for k, v in reps.items()} + did = self.xreplace(reps).doit(**hints) + if isinstance(did, tuple): # when separate=True + did = tuple([i.xreplace(undo) for i in did]) + else: + did = did.xreplace(undo) + return did + + # continue with existing assumptions + undone_limits = [] + # ulj = free symbols of any undone limits' upper and lower limits + ulj = set() + for xab in self.limits: + # compute uli, the free symbols in the + # Upper and Lower limits of limit I + if len(xab) == 1: + uli = set(xab[:1]) + elif len(xab) == 2: + uli = xab[1].free_symbols + elif len(xab) == 3: + uli = xab[1].free_symbols.union(xab[2].free_symbols) + # this integral can be done as long as there is no blocking + # limit that has been undone. An undone limit is blocking if + # it contains an integration variable that is in this limit's + # upper or lower free symbols or vice versa + if xab[0] in ulj or any(v[0] in uli for v in undone_limits): + undone_limits.append(xab) + ulj.update(uli) + function = self.func(*([function] + [xab])) + factored_function = function.factor() + if not isinstance(factored_function, Integral): + function = factored_function + continue + + if function.has(Abs, sign) and ( + (len(xab) < 3 and all(x.is_extended_real for x in xab)) or + (len(xab) == 3 and all(x.is_extended_real and not x.is_infinite for + x in xab[1:]))): + # some improper integrals are better off with Abs + xr = Dummy("xr", real=True) + function = (function.xreplace({xab[0]: xr}) + .rewrite(Piecewise).xreplace({xr: xab[0]})) + elif function.has(Min, Max): + function = function.rewrite(Piecewise) + if (function.has(Piecewise) and + not isinstance(function, Piecewise)): + function = piecewise_fold(function) + if isinstance(function, Piecewise): + if len(xab) == 1: + antideriv = function._eval_integral(xab[0], + **eval_kwargs) + else: + antideriv = self._eval_integral( + function, xab[0], **eval_kwargs) + else: + # There are a number of tradeoffs in using the + # Meijer G method. It can sometimes be a lot faster + # than other methods, and sometimes slower. And + # there are certain types of integrals for which it + # is more likely to work than others. These + # heuristics are incorporated in deciding what + # integration methods to try, in what order. See the + # integrate() docstring for details. + def try_meijerg(function, xab): + ret = None + if len(xab) == 3 and meijerg is not False: + x, a, b = xab + try: + res = meijerint_definite(function, x, a, b) + except NotImplementedError: + _debug('NotImplementedError ' + 'from meijerint_definite') + res = None + if res is not None: + f, cond = res + if conds == 'piecewise': + u = self.func(function, (x, a, b)) + # if Piecewise modifies cond too + # much it may not be recognized by + # _condsimp pattern matching so just + # turn off all evaluation + return Piecewise((f, cond), (u, True), + evaluate=False) + elif conds == 'separate': + if len(self.limits) != 1: + raise ValueError(filldedent(''' + conds=separate not supported in + multiple integrals''')) + ret = f, cond + else: + ret = f + return ret + + meijerg1 = meijerg + if (meijerg is not False and + len(xab) == 3 and xab[1].is_extended_real and xab[2].is_extended_real + and not function.is_Poly and + (xab[1].has(oo, -oo) or xab[2].has(oo, -oo))): + ret = try_meijerg(function, xab) + if ret is not None: + function = ret + continue + meijerg1 = False + # If the special meijerg code did not succeed in + # finding a definite integral, then the code using + # meijerint_indefinite will not either (it might + # find an antiderivative, but the answer is likely + # to be nonsensical). Thus if we are requested to + # only use Meijer G-function methods, we give up at + # this stage. Otherwise we just disable G-function + # methods. + if meijerg1 is False and meijerg is True: + antideriv = None + else: + antideriv = self._eval_integral( + function, xab[0], **eval_kwargs) + if antideriv is None and meijerg is True: + ret = try_meijerg(function, xab) + if ret is not None: + function = ret + continue + + final = hints.get('final', True) + # dotit may be iterated but floor terms making atan and acot + # continuous should only be added in the final round + if (final and not isinstance(antideriv, Integral) and + antideriv is not None): + for atan_term in antideriv.atoms(atan): + atan_arg = atan_term.args[0] + # Checking `atan_arg` to be linear combination of `tan` or `cot` + for tan_part in atan_arg.atoms(tan): + x1 = Dummy('x1') + tan_exp1 = atan_arg.subs(tan_part, x1) + # The coefficient of `tan` should be constant + coeff = tan_exp1.diff(x1) + if x1 not in coeff.free_symbols: + a = tan_part.args[0] + antideriv = antideriv.subs(atan_term, Add(atan_term, + sign(coeff)*pi*floor((a-pi/2)/pi))) + for cot_part in atan_arg.atoms(cot): + x1 = Dummy('x1') + cot_exp1 = atan_arg.subs(cot_part, x1) + # The coefficient of `cot` should be constant + coeff = cot_exp1.diff(x1) + if x1 not in coeff.free_symbols: + a = cot_part.args[0] + antideriv = antideriv.subs(atan_term, Add(atan_term, + sign(coeff)*pi*floor((a)/pi))) + + if antideriv is None: + undone_limits.append(xab) + function = self.func(*([function] + [xab])).factor() + factored_function = function.factor() + if not isinstance(factored_function, Integral): + function = factored_function + continue + else: + if len(xab) == 1: + function = antideriv + else: + if len(xab) == 3: + x, a, b = xab + elif len(xab) == 2: + x, b = xab + a = None + else: + raise NotImplementedError + + if deep: + if isinstance(a, Basic): + a = a.doit(**hints) + if isinstance(b, Basic): + b = b.doit(**hints) + + if antideriv.is_Poly: + gens = list(antideriv.gens) + gens.remove(x) + + antideriv = antideriv.as_expr() + + function = antideriv._eval_interval(x, a, b) + function = Poly(function, *gens) + else: + def is_indef_int(g, x): + return (isinstance(g, Integral) and + any(i == (x,) for i in g.limits)) + + def eval_factored(f, x, a, b): + # _eval_interval for integrals with + # (constant) factors + # a single indefinite integral is assumed + args = [] + for g in Mul.make_args(f): + if is_indef_int(g, x): + args.append(g._eval_interval(x, a, b)) + else: + args.append(g) + return Mul(*args) + + integrals, others, piecewises = [], [], [] + for f in Add.make_args(antideriv): + if any(is_indef_int(g, x) + for g in Mul.make_args(f)): + integrals.append(f) + elif any(isinstance(g, Piecewise) + for g in Mul.make_args(f)): + piecewises.append(piecewise_fold(f)) + else: + others.append(f) + uneval = Add(*[eval_factored(f, x, a, b) + for f in integrals]) + try: + evalued = Add(*others)._eval_interval(x, a, b) + evalued_pw = piecewise_fold(Add(*piecewises))._eval_interval(x, a, b) + function = uneval + evalued + evalued_pw + except NotImplementedError: + # This can happen if _eval_interval depends in a + # complicated way on limits that cannot be computed + undone_limits.append(xab) + function = self.func(*([function] + [xab])) + factored_function = function.factor() + if not isinstance(factored_function, Integral): + function = factored_function + return function + + def _eval_derivative(self, sym): + """Evaluate the derivative of the current Integral object by + differentiating under the integral sign [1], using the Fundamental + Theorem of Calculus [2] when possible. + + Explanation + =========== + + Whenever an Integral is encountered that is equivalent to zero or + has an integrand that is independent of the variable of integration + those integrals are performed. All others are returned as Integral + instances which can be resolved with doit() (provided they are integrable). + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign + .. [2] https://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus + + Examples + ======== + + >>> from sympy import Integral + >>> from sympy.abc import x, y + >>> i = Integral(x + y, y, (y, 1, x)) + >>> i.diff(x) + Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x)) + >>> i.doit().diff(x) == i.diff(x).doit() + True + >>> i.diff(y) + 0 + + The previous must be true since there is no y in the evaluated integral: + + >>> i.free_symbols + {x} + >>> i.doit() + 2*x**3/3 - x/2 - 1/6 + + """ + + # differentiate under the integral sign; we do not + # check for regularity conditions (TODO), see issue 4215 + + # get limits and the function + f, limits = self.function, list(self.limits) + + # the order matters if variables of integration appear in the limits + # so work our way in from the outside to the inside. + limit = limits.pop(-1) + if len(limit) == 3: + x, a, b = limit + elif len(limit) == 2: + x, b = limit + a = None + else: + a = b = None + x = limit[0] + + if limits: # f is the argument to an integral + f = self.func(f, *tuple(limits)) + + # assemble the pieces + def _do(f, ab): + dab_dsym = diff(ab, sym) + if not dab_dsym: + return S.Zero + if isinstance(f, Integral): + limits = [(x, x) if (len(l) == 1 and l[0] == x) else l + for l in f.limits] + f = self.func(f.function, *limits) + return f.subs(x, ab)*dab_dsym + + rv = S.Zero + if b is not None: + rv += _do(f, b) + if a is not None: + rv -= _do(f, a) + if len(limit) == 1 and sym == x: + # the dummy variable *is* also the real-world variable + arg = f + rv += arg + else: + # the dummy variable might match sym but it's + # only a dummy and the actual variable is determined + # by the limits, so mask off the variable of integration + # while differentiating + u = Dummy('u') + arg = f.subs(x, u).diff(sym).subs(u, x) + if arg: + rv += self.func(arg, (x, a, b)) + return rv + + def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None, + heurisch=None, conds='piecewise',final=None): + """ + Calculate the anti-derivative to the function f(x). + + Explanation + =========== + + The following algorithms are applied (roughly in this order): + + 1. Simple heuristics (based on pattern matching and integral table): + + - most frequently used functions (e.g. polynomials, products of + trig functions) + + 2. Integration of rational functions: + + - A complete algorithm for integrating rational functions is + implemented (the Lazard-Rioboo-Trager algorithm). The algorithm + also uses the partial fraction decomposition algorithm + implemented in apart() as a preprocessor to make this process + faster. Note that the integral of a rational function is always + elementary, but in general, it may include a RootSum. + + 3. Full Risch algorithm: + + - The Risch algorithm is a complete decision + procedure for integrating elementary functions, which means that + given any elementary function, it will either compute an + elementary antiderivative, or else prove that none exists. + Currently, part of transcendental case is implemented, meaning + elementary integrals containing exponentials, logarithms, and + (soon!) trigonometric functions can be computed. The algebraic + case, e.g., functions containing roots, is much more difficult + and is not implemented yet. + + - If the routine fails (because the integrand is not elementary, or + because a case is not implemented yet), it continues on to the + next algorithms below. If the routine proves that the integrals + is nonelementary, it still moves on to the algorithms below, + because we might be able to find a closed-form solution in terms + of special functions. If risch=True, however, it will stop here. + + 4. The Meijer G-Function algorithm: + + - This algorithm works by first rewriting the integrand in terms of + very general Meijer G-Function (meijerg in SymPy), integrating + it, and then rewriting the result back, if possible. This + algorithm is particularly powerful for definite integrals (which + is actually part of a different method of Integral), since it can + compute closed-form solutions of definite integrals even when no + closed-form indefinite integral exists. But it also is capable + of computing many indefinite integrals as well. + + - Another advantage of this method is that it can use some results + about the Meijer G-Function to give a result in terms of a + Piecewise expression, which allows to express conditionally + convergent integrals. + + - Setting meijerg=True will cause integrate() to use only this + method. + + 5. The "manual integration" algorithm: + + - This algorithm tries to mimic how a person would find an + antiderivative by hand, for example by looking for a + substitution or applying integration by parts. This algorithm + does not handle as many integrands but can return results in a + more familiar form. + + - Sometimes this algorithm can evaluate parts of an integral; in + this case integrate() will try to evaluate the rest of the + integrand using the other methods here. + + - Setting manual=True will cause integrate() to use only this + method. + + 6. The Heuristic Risch algorithm: + + - This is a heuristic version of the Risch algorithm, meaning that + it is not deterministic. This is tried as a last resort because + it can be very slow. It is still used because not enough of the + full Risch algorithm is implemented, so that there are still some + integrals that can only be computed using this method. The goal + is to implement enough of the Risch and Meijer G-function methods + so that this can be deleted. + + Setting heurisch=True will cause integrate() to use only this + method. Set heurisch=False to not use it. + + """ + + from sympy.integrals.risch import risch_integrate, NonElementaryIntegral + from sympy.integrals.manualintegrate import manualintegrate + + if risch: + try: + return risch_integrate(f, x, conds=conds) + except NotImplementedError: + return None + + if manual: + try: + result = manualintegrate(f, x) + if result is not None and result.func != Integral: + return result + except (ValueError, PolynomialError): + pass + + eval_kwargs = {"meijerg": meijerg, "risch": risch, "manual": manual, + "heurisch": heurisch, "conds": conds} + + # if it is a poly(x) then let the polynomial integrate itself (fast) + # + # It is important to make this check first, otherwise the other code + # will return a SymPy expression instead of a Polynomial. + # + # see Polynomial for details. + if isinstance(f, Poly) and not (manual or meijerg or risch): + # Note: this is deprecated, but the deprecation warning is already + # issued in the Integral constructor. + return f.integrate(x) + + # Piecewise antiderivatives need to call special integrate. + if isinstance(f, Piecewise): + return f.piecewise_integrate(x, **eval_kwargs) + + # let's cut it short if `f` does not depend on `x`; if + # x is only a dummy, that will be handled below + if not f.has(x): + return f*x + + # try to convert to poly(x) and then integrate if successful (fast) + poly = f.as_poly(x) + if poly is not None and not (manual or meijerg or risch): + return poly.integrate().as_expr() + + if risch is not False: + try: + result, i = risch_integrate(f, x, separate_integral=True, + conds=conds) + except NotImplementedError: + pass + else: + if i: + # There was a nonelementary integral. Try integrating it. + + # if no part of the NonElementaryIntegral is integrated by + # the Risch algorithm, then use the original function to + # integrate, instead of re-written one + if result == 0: + return NonElementaryIntegral(f, x).doit(risch=False) + else: + return result + i.doit(risch=False) + else: + return result + + # since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ... + # we are going to handle Add terms separately, + # if `f` is not Add -- we only have one term + + # Note that in general, this is a bad idea, because Integral(g1) + + # Integral(g2) might not be computable, even if Integral(g1 + g2) is. + # For example, Integral(x**x + x**x*log(x)). But many heuristics only + # work term-wise. So we compute this step last, after trying + # risch_integrate. We also try risch_integrate again in this loop, + # because maybe the integral is a sum of an elementary part and a + # nonelementary part (like erf(x) + exp(x)). risch_integrate() is + # quite fast, so this is acceptable. + from sympy.simplify.fu import sincos_to_sum + parts = [] + args = Add.make_args(f) + for g in args: + coeff, g = g.as_independent(x) + + # g(x) = const + if g is S.One and not meijerg: + parts.append(coeff*x) + continue + + # g(x) = expr + O(x**n) + order_term = g.getO() + + if order_term is not None: + h = self._eval_integral(g.removeO(), x, **eval_kwargs) + + if h is not None: + h_order_expr = self._eval_integral(order_term.expr, x, **eval_kwargs) + + if h_order_expr is not None: + h_order_term = order_term.func( + h_order_expr, *order_term.variables) + parts.append(coeff*(h + h_order_term)) + continue + + # NOTE: if there is O(x**n) and we fail to integrate then + # there is no point in trying other methods because they + # will fail, too. + return None + + # c + # g(x) = (a*x+b) + if g.is_Pow and not g.exp.has(x) and not meijerg: + a = Wild('a', exclude=[x]) + b = Wild('b', exclude=[x]) + + M = g.base.match(a*x + b) + + if M is not None: + if g.exp == -1: + h = log(g.base) + elif conds != 'piecewise': + h = g.base**(g.exp + 1) / (g.exp + 1) + else: + h1 = log(g.base) + h2 = g.base**(g.exp + 1) / (g.exp + 1) + h = Piecewise((h2, Ne(g.exp, -1)), (h1, True)) + + parts.append(coeff * h / M[a]) + continue + + # poly(x) + # g(x) = ------- + # poly(x) + if g.is_rational_function(x) and not (manual or meijerg or risch): + parts.append(coeff * ratint(g, x)) + continue + + if not (manual or meijerg or risch): + # g(x) = Mul(trig) + h = trigintegrate(g, x, conds=conds) + if h is not None: + parts.append(coeff * h) + continue + + # g(x) has at least a DiracDelta term + h = deltaintegrate(g, x) + if h is not None: + parts.append(coeff * h) + continue + + from .singularityfunctions import singularityintegrate + # g(x) has at least a Singularity Function term + h = singularityintegrate(g, x) + if h is not None: + parts.append(coeff * h) + continue + + # Try risch again. + if risch is not False: + try: + h, i = risch_integrate(g, x, + separate_integral=True, conds=conds) + except NotImplementedError: + h = None + else: + if i: + h = h + i.doit(risch=False) + + parts.append(coeff*h) + continue + + # fall back to heurisch + if heurisch is not False: + from sympy.integrals.heurisch import (heurisch as heurisch_, + heurisch_wrapper) + try: + if conds == 'piecewise': + h = heurisch_wrapper(g, x, hints=[]) + else: + h = heurisch_(g, x, hints=[]) + except PolynomialError: + # XXX: this exception means there is a bug in the + # implementation of heuristic Risch integration + # algorithm. + h = None + else: + h = None + + if meijerg is not False and h is None: + # rewrite using G functions + try: + h = meijerint_indefinite(g, x) + except NotImplementedError: + _debug('NotImplementedError from meijerint_definite') + if h is not None: + parts.append(coeff * h) + continue + + if h is None and manual is not False: + try: + result = manualintegrate(g, x) + if result is not None and not isinstance(result, Integral): + if result.has(Integral) and not manual: + # Try to have other algorithms do the integrals + # manualintegrate can't handle, + # unless we were asked to use manual only. + # Keep the rest of eval_kwargs in case another + # method was set to False already + new_eval_kwargs = eval_kwargs + new_eval_kwargs["manual"] = False + new_eval_kwargs["final"] = False + result = result.func(*[ + arg.doit(**new_eval_kwargs) if + arg.has(Integral) else arg + for arg in result.args + ]).expand(multinomial=False, + log=False, + power_exp=False, + power_base=False) + if not result.has(Integral): + parts.append(coeff * result) + continue + except (ValueError, PolynomialError): + # can't handle some SymPy expressions + pass + + # if we failed maybe it was because we had + # a product that could have been expanded, + # so let's try an expansion of the whole + # thing before giving up; we don't try this + # at the outset because there are things + # that cannot be solved unless they are + # NOT expanded e.g., x**x*(1+log(x)). There + # should probably be a checker somewhere in this + # routine to look for such cases and try to do + # collection on the expressions if they are already + # in an expanded form + if not h and len(args) == 1: + f = sincos_to_sum(f).expand(mul=True, deep=False) + if f.is_Add: + # Note: risch will be identical on the expanded + # expression, but maybe it will be able to pick out parts, + # like x*(exp(x) + erf(x)). + return self._eval_integral(f, x, **eval_kwargs) + + if h is not None: + parts.append(coeff * h) + else: + return None + + return Add(*parts) + + def _eval_lseries(self, x, logx=None, cdir=0): + expr = self.as_dummy() + symb = x + for l in expr.limits: + if x in l[1:]: + symb = l[0] + break + for term in expr.function.lseries(symb, logx): + yield integrate(term, *expr.limits) + + def _eval_nseries(self, x, n, logx=None, cdir=0): + expr = self.as_dummy() + symb = x + for l in expr.limits: + if x in l[1:]: + symb = l[0] + break + terms, order = expr.function.nseries( + x=symb, n=n, logx=logx).as_coeff_add(Order) + order = [o.subs(symb, x) for o in order] + return integrate(terms, *expr.limits) + Add(*order)*x + + def _eval_as_leading_term(self, x, logx=None, cdir=0): + series_gen = self.args[0].lseries(x) + for leading_term in series_gen: + if leading_term != 0: + break + return integrate(leading_term, *self.args[1:]) + + def _eval_simplify(self, **kwargs): + expr = factor_terms(self) + if isinstance(expr, Integral): + from sympy.simplify.simplify import simplify + return expr.func(*[simplify(i, **kwargs) for i in expr.args]) + return expr.simplify(**kwargs) + + def as_sum(self, n=None, method="midpoint", evaluate=True): + """ + Approximates a definite integral by a sum. + + Parameters + ========== + + n : + The number of subintervals to use, optional. + method : + One of: 'left', 'right', 'midpoint', 'trapezoid'. + evaluate : bool + If False, returns an unevaluated Sum expression. The default + is True, evaluate the sum. + + Notes + ===== + + These methods of approximate integration are described in [1]. + + Examples + ======== + + >>> from sympy import Integral, sin, sqrt + >>> from sympy.abc import x, n + >>> e = Integral(sin(x), (x, 3, 7)) + >>> e + Integral(sin(x), (x, 3, 7)) + + For demonstration purposes, this interval will only be split into 2 + regions, bounded by [3, 5] and [5, 7]. + + The left-hand rule uses function evaluations at the left of each + interval: + + >>> e.as_sum(2, 'left') + 2*sin(5) + 2*sin(3) + + The midpoint rule uses evaluations at the center of each interval: + + >>> e.as_sum(2, 'midpoint') + 2*sin(4) + 2*sin(6) + + The right-hand rule uses function evaluations at the right of each + interval: + + >>> e.as_sum(2, 'right') + 2*sin(5) + 2*sin(7) + + The trapezoid rule uses function evaluations on both sides of the + intervals. This is equivalent to taking the average of the left and + right hand rule results: + + >>> e.as_sum(2, 'trapezoid') + 2*sin(5) + sin(3) + sin(7) + >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _ + True + + Here, the discontinuity at x = 0 can be avoided by using the + midpoint or right-hand method: + + >>> e = Integral(1/sqrt(x), (x, 0, 1)) + >>> e.as_sum(5).n(4) + 1.730 + >>> e.as_sum(10).n(4) + 1.809 + >>> e.doit().n(4) # the actual value is 2 + 2.000 + + The left- or trapezoid method will encounter the discontinuity and + return infinity: + + >>> e.as_sum(5, 'left') + zoo + + The number of intervals can be symbolic. If omitted, a dummy symbol + will be used for it. + + >>> e = Integral(x**2, (x, 0, 2)) + >>> e.as_sum(n, 'right').expand() + 8/3 + 4/n + 4/(3*n**2) + + This shows that the midpoint rule is more accurate, as its error + term decays as the square of n: + + >>> e.as_sum(method='midpoint').expand() + 8/3 - 2/(3*_n**2) + + A symbolic sum is returned with evaluate=False: + + >>> e.as_sum(n, 'midpoint', evaluate=False) + 2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n + + See Also + ======== + + Integral.doit : Perform the integration using any hints + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Riemann_summation_methods + """ + + from sympy.concrete.summations import Sum + limits = self.limits + if len(limits) > 1: + raise NotImplementedError( + "Multidimensional midpoint rule not implemented yet") + else: + limit = limits[0] + if (len(limit) != 3 or limit[1].is_finite is False or + limit[2].is_finite is False): + raise ValueError("Expecting a definite integral over " + "a finite interval.") + if n is None: + n = Dummy('n', integer=True, positive=True) + else: + n = sympify(n) + if (n.is_positive is False or n.is_integer is False or + n.is_finite is False): + raise ValueError("n must be a positive integer, got %s" % n) + x, a, b = limit + dx = (b - a)/n + k = Dummy('k', integer=True, positive=True) + f = self.function + + if method == "left": + result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n)) + elif method == "right": + result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n)) + elif method == "midpoint": + result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n)) + elif method == "trapezoid": + result = dx*((f.subs(x, a) + f.subs(x, b))/2 + + Sum(f.subs(x, a + k*dx), (k, 1, n - 1))) + else: + raise ValueError("Unknown method %s" % method) + return result.doit() if evaluate else result + + def principal_value(self, **kwargs): + """ + Compute the Cauchy Principal Value of the definite integral of a real function in the given interval + on the real axis. + + Explanation + =========== + + In mathematics, the Cauchy principal value, is a method for assigning values to certain improper + integrals which would otherwise be undefined. + + Examples + ======== + + >>> from sympy import Integral, oo + >>> from sympy.abc import x + >>> Integral(x+1, (x, -oo, oo)).principal_value() + oo + >>> f = 1 / (x**3) + >>> Integral(f, (x, -oo, oo)).principal_value() + 0 + >>> Integral(f, (x, -10, 10)).principal_value() + 0 + >>> Integral(f, (x, -10, oo)).principal_value() + Integral(f, (x, -oo, 10)).principal_value() + 0 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Cauchy_principal_value + .. [2] https://mathworld.wolfram.com/CauchyPrincipalValue.html + """ + if len(self.limits) != 1 or len(list(self.limits[0])) != 3: + raise ValueError("You need to insert a variable, lower_limit, and upper_limit correctly to calculate " + "cauchy's principal value") + x, a, b = self.limits[0] + if not (a.is_comparable and b.is_comparable and a <= b): + raise ValueError("The lower_limit must be smaller than or equal to the upper_limit to calculate " + "cauchy's principal value. Also, a and b need to be comparable.") + if a == b: + return S.Zero + + from sympy.calculus.singularities import singularities + + r = Dummy('r') + f = self.function + singularities_list = [s for s in singularities(f, x) if s.is_comparable and a <= s <= b] + for i in singularities_list: + if i in (a, b): + raise ValueError( + 'The principal value is not defined in the given interval due to singularity at %d.' % (i)) + F = integrate(f, x, **kwargs) + if F.has(Integral): + return self + if a is -oo and b is oo: + I = limit(F - F.subs(x, -x), x, oo) + else: + I = limit(F, x, b, '-') - limit(F, x, a, '+') + for s in singularities_list: + I += limit(((F.subs(x, s - r)) - F.subs(x, s + r)), r, 0, '+') + return I + + + +def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): + """integrate(f, var, ...) + + .. deprecated:: 1.6 + + Using ``integrate()`` with :class:`~.Poly` is deprecated. Use + :meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`. + + Explanation + =========== + + Compute definite or indefinite integral of one or more variables + using Risch-Norman algorithm and table lookup. This procedure is + able to handle elementary algebraic and transcendental functions + and also a huge class of special functions, including Airy, + Bessel, Whittaker and Lambert. + + var can be: + + - a symbol -- indefinite integration + - a tuple (symbol, a) -- indefinite integration with result + given with ``a`` replacing ``symbol`` + - a tuple (symbol, a, b) -- definite integration + + Several variables can be specified, in which case the result is + multiple integration. (If var is omitted and the integrand is + univariate, the indefinite integral in that variable will be performed.) + + Indefinite integrals are returned without terms that are independent + of the integration variables. (see examples) + + Definite improper integrals often entail delicate convergence + conditions. Pass conds='piecewise', 'separate' or 'none' to have + these returned, respectively, as a Piecewise function, as a separate + result (i.e. result will be a tuple), or not at all (default is + 'piecewise'). + + **Strategy** + + SymPy uses various approaches to definite integration. One method is to + find an antiderivative for the integrand, and then use the fundamental + theorem of calculus. Various functions are implemented to integrate + polynomial, rational and trigonometric functions, and integrands + containing DiracDelta terms. + + SymPy also implements the part of the Risch algorithm, which is a decision + procedure for integrating elementary functions, i.e., the algorithm can + either find an elementary antiderivative, or prove that one does not + exist. There is also a (very successful, albeit somewhat slow) general + implementation of the heuristic Risch algorithm. This algorithm will + eventually be phased out as more of the full Risch algorithm is + implemented. See the docstring of Integral._eval_integral() for more + details on computing the antiderivative using algebraic methods. + + The option risch=True can be used to use only the (full) Risch algorithm. + This is useful if you want to know if an elementary function has an + elementary antiderivative. If the indefinite Integral returned by this + function is an instance of NonElementaryIntegral, that means that the + Risch algorithm has proven that integral to be non-elementary. Note that + by default, additional methods (such as the Meijer G method outlined + below) are tried on these integrals, as they may be expressible in terms + of special functions, so if you only care about elementary answers, use + risch=True. Also note that an unevaluated Integral returned by this + function is not necessarily a NonElementaryIntegral, even with risch=True, + as it may just be an indication that the particular part of the Risch + algorithm needed to integrate that function is not yet implemented. + + Another family of strategies comes from re-writing the integrand in + terms of so-called Meijer G-functions. Indefinite integrals of a + single G-function can always be computed, and the definite integral + of a product of two G-functions can be computed from zero to + infinity. Various strategies are implemented to rewrite integrands + as G-functions, and use this information to compute integrals (see + the ``meijerint`` module). + + The option manual=True can be used to use only an algorithm that tries + to mimic integration by hand. This algorithm does not handle as many + integrands as the other algorithms implemented but may return results in + a more familiar form. The ``manualintegrate`` module has functions that + return the steps used (see the module docstring for more information). + + In general, the algebraic methods work best for computing + antiderivatives of (possibly complicated) combinations of elementary + functions. The G-function methods work best for computing definite + integrals from zero to infinity of moderately complicated + combinations of special functions, or indefinite integrals of very + simple combinations of special functions. + + The strategy employed by the integration code is as follows: + + - If computing a definite integral, and both limits are real, + and at least one limit is +- oo, try the G-function method of + definite integration first. + + - Try to find an antiderivative, using all available methods, ordered + by performance (that is try fastest method first, slowest last; in + particular polynomial integration is tried first, Meijer + G-functions second to last, and heuristic Risch last). + + - If still not successful, try G-functions irrespective of the + limits. + + The option meijerg=True, False, None can be used to, respectively: + always use G-function methods and no others, never use G-function + methods, or use all available methods (in order as described above). + It defaults to None. + + Examples + ======== + + >>> from sympy import integrate, log, exp, oo + >>> from sympy.abc import a, x, y + + >>> integrate(x*y, x) + x**2*y/2 + + >>> integrate(log(x), x) + x*log(x) - x + + >>> integrate(log(x), (x, 1, a)) + a*log(a) - a + 1 + + >>> integrate(x) + x**2/2 + + Terms that are independent of x are dropped by indefinite integration: + + >>> from sympy import sqrt + >>> integrate(sqrt(1 + x), (x, 0, x)) + 2*(x + 1)**(3/2)/3 - 2/3 + >>> integrate(sqrt(1 + x), x) + 2*(x + 1)**(3/2)/3 + + >>> integrate(x*y) + Traceback (most recent call last): + ... + ValueError: specify integration variables to integrate x*y + + Note that ``integrate(x)`` syntax is meant only for convenience + in interactive sessions and should be avoided in library code. + + >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' + Piecewise((gamma(a + 1), re(a) > -1), + (Integral(x**a*exp(-x), (x, 0, oo)), True)) + + >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') + gamma(a + 1) + + >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') + (gamma(a + 1), re(a) > -1) + + See Also + ======== + + Integral, Integral.doit + + """ + doit_flags = { + 'deep': False, + 'meijerg': meijerg, + 'conds': conds, + 'risch': risch, + 'heurisch': heurisch, + 'manual': manual + } + integral = Integral(*args, **kwargs) + + if isinstance(integral, Integral): + return integral.doit(**doit_flags) + else: + new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a + for a in integral.args] + return integral.func(*new_args) + + +def line_integrate(field, curve, vars): + """line_integrate(field, Curve, variables) + + Compute the line integral. + + Examples + ======== + + >>> from sympy import Curve, line_integrate, E, ln + >>> from sympy.abc import x, y, t + >>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2))) + >>> line_integrate(x + y, C, [x, y]) + 3*sqrt(2) + + See Also + ======== + + sympy.integrals.integrals.integrate, Integral + """ + from sympy.geometry import Curve + F = sympify(field) + if not F: + raise ValueError( + "Expecting function specifying field as first argument.") + if not isinstance(curve, Curve): + raise ValueError("Expecting Curve entity as second argument.") + if not is_sequence(vars): + raise ValueError("Expecting ordered iterable for variables.") + if len(curve.functions) != len(vars): + raise ValueError("Field variable size does not match curve dimension.") + + if curve.parameter in vars: + raise ValueError("Curve parameter clashes with field parameters.") + + # Calculate derivatives for line parameter functions + # F(r) -> F(r(t)) and finally F(r(t)*r'(t)) + Ft = F + dldt = 0 + for i, var in enumerate(vars): + _f = curve.functions[i] + _dn = diff(_f, curve.parameter) + # ...arc length + dldt = dldt + (_dn * _dn) + Ft = Ft.subs(var, _f) + Ft = Ft * sqrt(dldt) + + integral = Integral(Ft, curve.limits).doit(deep=False) + return integral + + +### Property function dispatching ### + +@shape.register(Integral) +def _(expr): + return shape(expr.function) + +# Delayed imports +from .deltafunctions import deltaintegrate +from .meijerint import meijerint_definite, meijerint_indefinite, _debug +from .trigonometry import trigintegrate diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/intpoly.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/intpoly.py new file mode 100644 index 0000000000000000000000000000000000000000..f08daaa64061f495fbe3c0b73a302ff013487713 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/intpoly.py @@ -0,0 +1,1302 @@ +""" +Module to implement integration of uni/bivariate polynomials over +2D Polytopes and uni/bi/trivariate polynomials over 3D Polytopes. + +Uses evaluation techniques as described in Chin et al. (2015) [1]. + + +References +=========== + +.. [1] Chin, Eric B., Jean B. Lasserre, and N. Sukumar. "Numerical integration +of homogeneous functions on convex and nonconvex polygons and polyhedra." +Computational Mechanics 56.6 (2015): 967-981 + +PDF link : http://dilbert.engr.ucdavis.edu/~suku/quadrature/cls-integration.pdf +""" + +from functools import cmp_to_key + +from sympy.abc import x, y, z +from sympy.core import S, diff, Expr, Symbol +from sympy.core.sympify import _sympify +from sympy.geometry import Segment2D, Polygon, Point, Point2D +from sympy.polys.polytools import LC, gcd_list, degree_list, Poly +from sympy.simplify.simplify import nsimplify + + +def polytope_integrate(poly, expr=None, *, clockwise=False, max_degree=None): + """Integrates polynomials over 2/3-Polytopes. + + Explanation + =========== + + This function accepts the polytope in ``poly`` and the function in ``expr`` + (uni/bi/trivariate polynomials are implemented) and returns + the exact integral of ``expr`` over ``poly``. + + Parameters + ========== + + poly : The input Polygon. + + expr : The input polynomial. + + clockwise : Binary value to sort input points of 2-Polytope clockwise.(Optional) + + max_degree : The maximum degree of any monomial of the input polynomial.(Optional) + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import Point, Polygon + >>> from sympy.integrals.intpoly import polytope_integrate + >>> polygon = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0)) + >>> polys = [1, x, y, x*y, x**2*y, x*y**2] + >>> expr = x*y + >>> polytope_integrate(polygon, expr) + 1/4 + >>> polytope_integrate(polygon, polys, max_degree=3) + {1: 1, x: 1/2, y: 1/2, x*y: 1/4, x*y**2: 1/6, x**2*y: 1/6} + """ + if clockwise: + if isinstance(poly, Polygon): + poly = Polygon(*point_sort(poly.vertices), evaluate=False) + else: + raise TypeError("clockwise=True works for only 2-Polytope" + "V-representation input") + + if isinstance(poly, Polygon): + # For Vertex Representation(2D case) + hp_params = hyperplane_parameters(poly) + facets = poly.sides + elif len(poly[0]) == 2: + # For Hyperplane Representation(2D case) + plen = len(poly) + if len(poly[0][0]) == 2: + intersections = [intersection(poly[(i - 1) % plen], poly[i], + "plane2D") + for i in range(0, plen)] + hp_params = poly + lints = len(intersections) + facets = [Segment2D(intersections[i], + intersections[(i + 1) % lints]) + for i in range(lints)] + else: + raise NotImplementedError("Integration for H-representation 3D" + "case not implemented yet.") + else: + # For Vertex Representation(3D case) + vertices = poly[0] + facets = poly[1:] + hp_params = hyperplane_parameters(facets, vertices) + + if max_degree is None: + if expr is None: + raise TypeError('Input expression must be a valid SymPy expression') + return main_integrate3d(expr, facets, vertices, hp_params) + + if max_degree is not None: + result = {} + if expr is not None: + f_expr = [] + for e in expr: + _ = decompose(e) + if len(_) == 1 and not _.popitem()[0]: + f_expr.append(e) + elif Poly(e).total_degree() <= max_degree: + f_expr.append(e) + expr = f_expr + + if not isinstance(expr, list) and expr is not None: + raise TypeError('Input polynomials must be list of expressions') + + if len(hp_params[0][0]) == 3: + result_dict = main_integrate3d(0, facets, vertices, hp_params, + max_degree) + else: + result_dict = main_integrate(0, facets, hp_params, max_degree) + + if expr is None: + return result_dict + + for poly in expr: + poly = _sympify(poly) + if poly not in result: + if poly.is_zero: + result[S.Zero] = S.Zero + continue + integral_value = S.Zero + monoms = decompose(poly, separate=True) + for monom in monoms: + monom = nsimplify(monom) + coeff, m = strip(monom) + integral_value += result_dict[m] * coeff + result[poly] = integral_value + return result + + if expr is None: + raise TypeError('Input expression must be a valid SymPy expression') + + return main_integrate(expr, facets, hp_params) + + +def strip(monom): + if monom.is_zero: + return S.Zero, S.Zero + elif monom.is_number: + return monom, S.One + else: + coeff = LC(monom) + return coeff, monom / coeff + +def _polynomial_integrate(polynomials, facets, hp_params): + dims = (x, y) + dim_length = len(dims) + integral_value = S.Zero + for deg in polynomials: + poly_contribute = S.Zero + facet_count = 0 + for hp in hp_params: + value_over_boundary = integration_reduction(facets, + facet_count, + hp[0], hp[1], + polynomials[deg], + dims, deg) + poly_contribute += value_over_boundary * (hp[1] / norm(hp[0])) + facet_count += 1 + poly_contribute /= (dim_length + deg) + integral_value += poly_contribute + + return integral_value + + +def main_integrate3d(expr, facets, vertices, hp_params, max_degree=None): + """Function to translate the problem of integrating uni/bi/tri-variate + polynomials over a 3-Polytope to integrating over its faces. + This is done using Generalized Stokes' Theorem and Euler's Theorem. + + Parameters + ========== + + expr : + The input polynomial. + facets : + Faces of the 3-Polytope(expressed as indices of `vertices`). + vertices : + Vertices that constitute the Polytope. + hp_params : + Hyperplane Parameters of the facets. + max_degree : optional + Max degree of constituent monomial in given list of polynomial. + + Examples + ======== + + >>> from sympy.integrals.intpoly import main_integrate3d, \ + hyperplane_parameters + >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + >>> vertices = cube[0] + >>> faces = cube[1:] + >>> hp_params = hyperplane_parameters(faces, vertices) + >>> main_integrate3d(1, faces, vertices, hp_params) + -125 + """ + result = {} + dims = (x, y, z) + dim_length = len(dims) + if max_degree: + grad_terms = gradient_terms(max_degree, 3) + flat_list = [term for z_terms in grad_terms + for x_term in z_terms + for term in x_term] + + for term in flat_list: + result[term[0]] = 0 + + for facet_count, hp in enumerate(hp_params): + a, b = hp[0], hp[1] + x0 = vertices[facets[facet_count][0]] + + for i, monom in enumerate(flat_list): + # Every monomial is a tuple : + # (term, x_degree, y_degree, z_degree, value over boundary) + expr, x_d, y_d, z_d, z_index, y_index, x_index, _ = monom + degree = x_d + y_d + z_d + if b.is_zero: + value_over_face = S.Zero + else: + value_over_face = \ + integration_reduction_dynamic(facets, facet_count, a, + b, expr, degree, dims, + x_index, y_index, + z_index, x0, grad_terms, + i, vertices, hp) + monom[7] = value_over_face + result[expr] += value_over_face * \ + (b / norm(a)) / (dim_length + x_d + y_d + z_d) + return result + else: + integral_value = S.Zero + polynomials = decompose(expr) + for deg in polynomials: + poly_contribute = S.Zero + facet_count = 0 + for i, facet in enumerate(facets): + hp = hp_params[i] + if hp[1].is_zero: + continue + pi = polygon_integrate(facet, hp, i, facets, vertices, expr, deg) + poly_contribute += pi *\ + (hp[1] / norm(tuple(hp[0]))) + facet_count += 1 + poly_contribute /= (dim_length + deg) + integral_value += poly_contribute + return integral_value + + +def main_integrate(expr, facets, hp_params, max_degree=None): + """Function to translate the problem of integrating univariate/bivariate + polynomials over a 2-Polytope to integrating over its boundary facets. + This is done using Generalized Stokes's Theorem and Euler's Theorem. + + Parameters + ========== + + expr : + The input polynomial. + facets : + Facets(Line Segments) of the 2-Polytope. + hp_params : + Hyperplane Parameters of the facets. + max_degree : optional + The maximum degree of any monomial of the input polynomial. + + >>> from sympy.abc import x, y + >>> from sympy.integrals.intpoly import main_integrate,\ + hyperplane_parameters + >>> from sympy import Point, Polygon + >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + >>> facets = triangle.sides + >>> hp_params = hyperplane_parameters(triangle) + >>> main_integrate(x**2 + y**2, facets, hp_params) + 325/6 + """ + dims = (x, y) + dim_length = len(dims) + result = {} + + if max_degree: + grad_terms = [[0, 0, 0, 0]] + gradient_terms(max_degree) + + for facet_count, hp in enumerate(hp_params): + a, b = hp[0], hp[1] + x0 = facets[facet_count].points[0] + + for i, monom in enumerate(grad_terms): + # Every monomial is a tuple : + # (term, x_degree, y_degree, value over boundary) + m, x_d, y_d, _ = monom + value = result.get(m, None) + degree = S.Zero + if b.is_zero: + value_over_boundary = S.Zero + else: + degree = x_d + y_d + value_over_boundary = \ + integration_reduction_dynamic(facets, facet_count, a, + b, m, degree, dims, x_d, + y_d, max_degree, x0, + grad_terms, i) + monom[3] = value_over_boundary + if value is not None: + result[m] += value_over_boundary * \ + (b / norm(a)) / (dim_length + degree) + else: + result[m] = value_over_boundary * \ + (b / norm(a)) / (dim_length + degree) + return result + else: + if not isinstance(expr, list): + polynomials = decompose(expr) + return _polynomial_integrate(polynomials, facets, hp_params) + else: + return {e: _polynomial_integrate(decompose(e), facets, hp_params) for e in expr} + + +def polygon_integrate(facet, hp_param, index, facets, vertices, expr, degree): + """Helper function to integrate the input uni/bi/trivariate polynomial + over a certain face of the 3-Polytope. + + Parameters + ========== + + facet : + Particular face of the 3-Polytope over which ``expr`` is integrated. + index : + The index of ``facet`` in ``facets``. + facets : + Faces of the 3-Polytope(expressed as indices of `vertices`). + vertices : + Vertices that constitute the facet. + expr : + The input polynomial. + degree : + Degree of ``expr``. + + Examples + ======== + + >>> from sympy.integrals.intpoly import polygon_integrate + >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + >>> facet = cube[1] + >>> facets = cube[1:] + >>> vertices = cube[0] + >>> polygon_integrate(facet, [(0, 1, 0), 5], 0, facets, vertices, 1, 0) + -25 + """ + expr = S(expr) + if expr.is_zero: + return S.Zero + result = S.Zero + x0 = vertices[facet[0]] + facet_len = len(facet) + for i, fac in enumerate(facet): + side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) + result += distance_to_side(x0, side, hp_param[0]) *\ + lineseg_integrate(facet, i, side, expr, degree) + if not expr.is_number: + expr = diff(expr, x) * x0[0] + diff(expr, y) * x0[1] +\ + diff(expr, z) * x0[2] + result += polygon_integrate(facet, hp_param, index, facets, vertices, + expr, degree - 1) + result /= (degree + 2) + return result + + +def distance_to_side(point, line_seg, A): + """Helper function to compute the signed distance between given 3D point + and a line segment. + + Parameters + ========== + + point : 3D Point + line_seg : Line Segment + + Examples + ======== + + >>> from sympy.integrals.intpoly import distance_to_side + >>> point = (0, 0, 0) + >>> distance_to_side(point, [(0, 0, 1), (0, 1, 0)], (1, 0, 0)) + -sqrt(2)/2 + """ + x1, x2 = line_seg + rev_normal = [-1 * S(i)/norm(A) for i in A] + vector = [x2[i] - x1[i] for i in range(0, 3)] + vector = [vector[i]/norm(vector) for i in range(0, 3)] + + n_side = cross_product((0, 0, 0), rev_normal, vector) + vectorx0 = [line_seg[0][i] - point[i] for i in range(0, 3)] + dot_product = sum([vectorx0[i] * n_side[i] for i in range(0, 3)]) + + return dot_product + + +def lineseg_integrate(polygon, index, line_seg, expr, degree): + """Helper function to compute the line integral of ``expr`` over ``line_seg``. + + Parameters + =========== + + polygon : + Face of a 3-Polytope. + index : + Index of line_seg in polygon. + line_seg : + Line Segment. + + Examples + ======== + + >>> from sympy.integrals.intpoly import lineseg_integrate + >>> polygon = [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] + >>> line_seg = [(0, 5, 0), (5, 5, 0)] + >>> lineseg_integrate(polygon, 0, line_seg, 1, 0) + 5 + """ + expr = _sympify(expr) + if expr.is_zero: + return S.Zero + result = S.Zero + x0 = line_seg[0] + distance = norm(tuple([line_seg[1][i] - line_seg[0][i] for i in + range(3)])) + if isinstance(expr, Expr): + expr_dict = {x: line_seg[1][0], + y: line_seg[1][1], + z: line_seg[1][2]} + result += distance * expr.subs(expr_dict) + else: + result += distance * expr + + expr = diff(expr, x) * x0[0] + diff(expr, y) * x0[1] +\ + diff(expr, z) * x0[2] + + result += lineseg_integrate(polygon, index, line_seg, expr, degree - 1) + result /= (degree + 1) + return result + + +def integration_reduction(facets, index, a, b, expr, dims, degree): + """Helper method for main_integrate. Returns the value of the input + expression evaluated over the polytope facet referenced by a given index. + + Parameters + =========== + + facets : + List of facets of the polytope. + index : + Index referencing the facet to integrate the expression over. + a : + Hyperplane parameter denoting direction. + b : + Hyperplane parameter denoting distance. + expr : + The expression to integrate over the facet. + dims : + List of symbols denoting axes. + degree : + Degree of the homogeneous polynomial. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy.integrals.intpoly import integration_reduction,\ + hyperplane_parameters + >>> from sympy import Point, Polygon + >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + >>> facets = triangle.sides + >>> a, b = hyperplane_parameters(triangle)[0] + >>> integration_reduction(facets, 0, a, b, 1, (x, y), 0) + 5 + """ + expr = _sympify(expr) + if expr.is_zero: + return expr + + value = S.Zero + x0 = facets[index].points[0] + m = len(facets) + gens = (x, y) + + inner_product = diff(expr, gens[0]) * x0[0] + diff(expr, gens[1]) * x0[1] + + if inner_product != 0: + value += integration_reduction(facets, index, a, b, + inner_product, dims, degree - 1) + + value += left_integral2D(m, index, facets, x0, expr, gens) + + return value/(len(dims) + degree - 1) + + +def left_integral2D(m, index, facets, x0, expr, gens): + """Computes the left integral of Eq 10 in Chin et al. + For the 2D case, the integral is just an evaluation of the polynomial + at the intersection of two facets which is multiplied by the distance + between the first point of facet and that intersection. + + Parameters + ========== + + m : + No. of hyperplanes. + index : + Index of facet to find intersections with. + facets : + List of facets(Line Segments in 2D case). + x0 : + First point on facet referenced by index. + expr : + Input polynomial + gens : + Generators which generate the polynomial + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy.integrals.intpoly import left_integral2D + >>> from sympy import Point, Polygon + >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + >>> facets = triangle.sides + >>> left_integral2D(3, 0, facets, facets[0].points[0], 1, (x, y)) + 5 + """ + value = S.Zero + for j in range(m): + intersect = () + if j in ((index - 1) % m, (index + 1) % m): + intersect = intersection(facets[index], facets[j], "segment2D") + if intersect: + distance_origin = norm(tuple(map(lambda x, y: x - y, + intersect, x0))) + if is_vertex(intersect): + if isinstance(expr, Expr): + if len(gens) == 3: + expr_dict = {gens[0]: intersect[0], + gens[1]: intersect[1], + gens[2]: intersect[2]} + else: + expr_dict = {gens[0]: intersect[0], + gens[1]: intersect[1]} + value += distance_origin * expr.subs(expr_dict) + else: + value += distance_origin * expr + return value + + +def integration_reduction_dynamic(facets, index, a, b, expr, degree, dims, + x_index, y_index, max_index, x0, + monomial_values, monom_index, vertices=None, + hp_param=None): + """The same integration_reduction function which uses a dynamic + programming approach to compute terms by using the values of the integral + of previously computed terms. + + Parameters + ========== + + facets : + Facets of the Polytope. + index : + Index of facet to find intersections with.(Used in left_integral()). + a, b : + Hyperplane parameters. + expr : + Input monomial. + degree : + Total degree of ``expr``. + dims : + Tuple denoting axes variables. + x_index : + Exponent of 'x' in ``expr``. + y_index : + Exponent of 'y' in ``expr``. + max_index : + Maximum exponent of any monomial in ``monomial_values``. + x0 : + First point on ``facets[index]``. + monomial_values : + List of monomial values constituting the polynomial. + monom_index : + Index of monomial whose integration is being found. + vertices : optional + Coordinates of vertices constituting the 3-Polytope. + hp_param : optional + Hyperplane Parameter of the face of the facets[index]. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy.integrals.intpoly import (integration_reduction_dynamic, \ + hyperplane_parameters) + >>> from sympy import Point, Polygon + >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + >>> facets = triangle.sides + >>> a, b = hyperplane_parameters(triangle)[0] + >>> x0 = facets[0].points[0] + >>> monomial_values = [[0, 0, 0, 0], [1, 0, 0, 5],\ + [y, 0, 1, 15], [x, 1, 0, None]] + >>> integration_reduction_dynamic(facets, 0, a, b, x, 1, (x, y), 1, 0, 1,\ + x0, monomial_values, 3) + 25/2 + """ + value = S.Zero + m = len(facets) + + if expr == S.Zero: + return expr + + if len(dims) == 2: + if not expr.is_number: + _, x_degree, y_degree, _ = monomial_values[monom_index] + x_index = monom_index - max_index + \ + x_index - 2 if x_degree > 0 else 0 + y_index = monom_index - 1 if y_degree > 0 else 0 + x_value, y_value =\ + monomial_values[x_index][3], monomial_values[y_index][3] + + value += x_degree * x_value * x0[0] + y_degree * y_value * x0[1] + + value += left_integral2D(m, index, facets, x0, expr, dims) + else: + # For 3D use case the max_index contains the z_degree of the term + z_index = max_index + if not expr.is_number: + x_degree, y_degree, z_degree = y_index,\ + z_index - x_index - y_index, x_index + x_value = monomial_values[z_index - 1][y_index - 1][x_index][7]\ + if x_degree > 0 else 0 + y_value = monomial_values[z_index - 1][y_index][x_index][7]\ + if y_degree > 0 else 0 + z_value = monomial_values[z_index - 1][y_index][x_index - 1][7]\ + if z_degree > 0 else 0 + + value += x_degree * x_value * x0[0] + y_degree * y_value * x0[1] \ + + z_degree * z_value * x0[2] + + value += left_integral3D(facets, index, expr, + vertices, hp_param, degree) + return value / (len(dims) + degree - 1) + + +def left_integral3D(facets, index, expr, vertices, hp_param, degree): + """Computes the left integral of Eq 10 in Chin et al. + + Explanation + =========== + + For the 3D case, this is the sum of the integral values over constituting + line segments of the face (which is accessed by facets[index]) multiplied + by the distance between the first point of facet and that line segment. + + Parameters + ========== + + facets : + List of faces of the 3-Polytope. + index : + Index of face over which integral is to be calculated. + expr : + Input polynomial. + vertices : + List of vertices that constitute the 3-Polytope. + hp_param : + The hyperplane parameters of the face. + degree : + Degree of the ``expr``. + + Examples + ======== + + >>> from sympy.integrals.intpoly import left_integral3D + >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + >>> facets = cube[1:] + >>> vertices = cube[0] + >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0) + -50 + """ + value = S.Zero + facet = facets[index] + x0 = vertices[facet[0]] + facet_len = len(facet) + for i, fac in enumerate(facet): + side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) + value += distance_to_side(x0, side, hp_param[0]) * \ + lineseg_integrate(facet, i, side, expr, degree) + return value + + +def gradient_terms(binomial_power=0, no_of_gens=2): + """Returns a list of all the possible monomials between + 0 and y**binomial_power for 2D case and z**binomial_power + for 3D case. + + Parameters + ========== + + binomial_power : + Power upto which terms are generated. + no_of_gens : + Denotes whether terms are being generated for 2D or 3D case. + + Examples + ======== + + >>> from sympy.integrals.intpoly import gradient_terms + >>> gradient_terms(2) + [[1, 0, 0, 0], [y, 0, 1, 0], [y**2, 0, 2, 0], [x, 1, 0, 0], + [x*y, 1, 1, 0], [x**2, 2, 0, 0]] + >>> gradient_terms(2, 3) + [[[[1, 0, 0, 0, 0, 0, 0, 0]]], [[[y, 0, 1, 0, 1, 0, 0, 0], + [z, 0, 0, 1, 1, 0, 1, 0]], [[x, 1, 0, 0, 1, 1, 0, 0]]], + [[[y**2, 0, 2, 0, 2, 0, 0, 0], [y*z, 0, 1, 1, 2, 0, 1, 0], + [z**2, 0, 0, 2, 2, 0, 2, 0]], [[x*y, 1, 1, 0, 2, 1, 0, 0], + [x*z, 1, 0, 1, 2, 1, 1, 0]], [[x**2, 2, 0, 0, 2, 2, 0, 0]]]] + """ + if no_of_gens == 2: + count = 0 + terms = [None] * int((binomial_power ** 2 + 3 * binomial_power + 2) / 2) + for x_count in range(0, binomial_power + 1): + for y_count in range(0, binomial_power - x_count + 1): + terms[count] = [x**x_count*y**y_count, + x_count, y_count, 0] + count += 1 + else: + terms = [[[[x ** x_count * y ** y_count * + z ** (z_count - y_count - x_count), + x_count, y_count, z_count - y_count - x_count, + z_count, x_count, z_count - y_count - x_count, 0] + for y_count in range(z_count - x_count, -1, -1)] + for x_count in range(0, z_count + 1)] + for z_count in range(0, binomial_power + 1)] + return terms + + +def hyperplane_parameters(poly, vertices=None): + """A helper function to return the hyperplane parameters + of which the facets of the polytope are a part of. + + Parameters + ========== + + poly : + The input 2/3-Polytope. + vertices : + Vertex indices of 3-Polytope. + + Examples + ======== + + >>> from sympy import Point, Polygon + >>> from sympy.integrals.intpoly import hyperplane_parameters + >>> hyperplane_parameters(Polygon(Point(0, 3), Point(5, 3), Point(1, 1))) + [((0, 1), 3), ((1, -2), -1), ((-2, -1), -3)] + >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + >>> hyperplane_parameters(cube[1:], cube[0]) + [([0, -1, 0], -5), ([0, 0, -1], -5), ([-1, 0, 0], -5), + ([0, 1, 0], 0), ([1, 0, 0], 0), ([0, 0, 1], 0)] + """ + if isinstance(poly, Polygon): + vertices = list(poly.vertices) + [poly.vertices[0]] # Close the polygon + params = [None] * (len(vertices) - 1) + + for i in range(len(vertices) - 1): + v1 = vertices[i] + v2 = vertices[i + 1] + + a1 = v1[1] - v2[1] + a2 = v2[0] - v1[0] + b = v2[0] * v1[1] - v2[1] * v1[0] + + factor = gcd_list([a1, a2, b]) + + b = S(b) / factor + a = (S(a1) / factor, S(a2) / factor) + params[i] = (a, b) + else: + params = [None] * len(poly) + for i, polygon in enumerate(poly): + v1, v2, v3 = [vertices[vertex] for vertex in polygon[:3]] + normal = cross_product(v1, v2, v3) + b = sum([normal[j] * v1[j] for j in range(0, 3)]) + fac = gcd_list(normal) + if fac.is_zero: + fac = 1 + normal = [j / fac for j in normal] + b = b / fac + params[i] = (normal, b) + return params + + +def cross_product(v1, v2, v3): + """Returns the cross-product of vectors (v2 - v1) and (v3 - v1) + That is : (v2 - v1) X (v3 - v1) + """ + v2 = [v2[j] - v1[j] for j in range(0, 3)] + v3 = [v3[j] - v1[j] for j in range(0, 3)] + return [v3[2] * v2[1] - v3[1] * v2[2], + v3[0] * v2[2] - v3[2] * v2[0], + v3[1] * v2[0] - v3[0] * v2[1]] + + +def best_origin(a, b, lineseg, expr): + """Helper method for polytope_integrate. Currently not used in the main + algorithm. + + Explanation + =========== + + Returns a point on the lineseg whose vector inner product with the + divergence of `expr` yields an expression with the least maximum + total power. + + Parameters + ========== + + a : + Hyperplane parameter denoting direction. + b : + Hyperplane parameter denoting distance. + lineseg : + Line segment on which to find the origin. + expr : + The expression which determines the best point. + + Algorithm(currently works only for 2D use case) + =============================================== + + 1 > Firstly, check for edge cases. Here that would refer to vertical + or horizontal lines. + + 2 > If input expression is a polynomial containing more than one generator + then find out the total power of each of the generators. + + x**2 + 3 + x*y + x**4*y**5 ---> {x: 7, y: 6} + + If expression is a constant value then pick the first boundary point + of the line segment. + + 3 > First check if a point exists on the line segment where the value of + the highest power generator becomes 0. If not check if the value of + the next highest becomes 0. If none becomes 0 within line segment + constraints then pick the first boundary point of the line segment. + Actually, any point lying on the segment can be picked as best origin + in the last case. + + Examples + ======== + + >>> from sympy.integrals.intpoly import best_origin + >>> from sympy.abc import x, y + >>> from sympy import Point, Segment2D + >>> l = Segment2D(Point(0, 3), Point(1, 1)) + >>> expr = x**3*y**7 + >>> best_origin((2, 1), 3, l, expr) + (0, 3.0) + """ + a1, b1 = lineseg.points[0] + + def x_axis_cut(ls): + """Returns the point where the input line segment + intersects the x-axis. + + Parameters + ========== + + ls : + Line segment + """ + p, q = ls.points + if p.y.is_zero: + return tuple(p) + elif q.y.is_zero: + return tuple(q) + elif p.y/q.y < S.Zero: + return p.y * (p.x - q.x)/(q.y - p.y) + p.x, S.Zero + else: + return () + + def y_axis_cut(ls): + """Returns the point where the input line segment + intersects the y-axis. + + Parameters + ========== + + ls : + Line segment + """ + p, q = ls.points + if p.x.is_zero: + return tuple(p) + elif q.x.is_zero: + return tuple(q) + elif p.x/q.x < S.Zero: + return S.Zero, p.x * (p.y - q.y)/(q.x - p.x) + p.y + else: + return () + + gens = (x, y) + power_gens = {} + + for i in gens: + power_gens[i] = S.Zero + + if len(gens) > 1: + # Special case for vertical and horizontal lines + if len(gens) == 2: + if a[0] == 0: + if y_axis_cut(lineseg): + return S.Zero, b/a[1] + else: + return a1, b1 + elif a[1] == 0: + if x_axis_cut(lineseg): + return b/a[0], S.Zero + else: + return a1, b1 + + if isinstance(expr, Expr): # Find the sum total of power of each + if expr.is_Add: # generator and store in a dictionary. + for monomial in expr.args: + if monomial.is_Pow: + if monomial.args[0] in gens: + power_gens[monomial.args[0]] += monomial.args[1] + else: + for univariate in monomial.args: + term_type = len(univariate.args) + if term_type == 0 and univariate in gens: + power_gens[univariate] += 1 + elif term_type == 2 and univariate.args[0] in gens: + power_gens[univariate.args[0]] +=\ + univariate.args[1] + elif expr.is_Mul: + for term in expr.args: + term_type = len(term.args) + if term_type == 0 and term in gens: + power_gens[term] += 1 + elif term_type == 2 and term.args[0] in gens: + power_gens[term.args[0]] += term.args[1] + elif expr.is_Pow: + power_gens[expr.args[0]] = expr.args[1] + elif expr.is_Symbol: + power_gens[expr] += 1 + else: # If `expr` is a constant take first vertex of the line segment. + return a1, b1 + + # TODO : This part is quite hacky. Should be made more robust with + # TODO : respect to symbol names and scalable w.r.t higher dimensions. + power_gens = sorted(power_gens.items(), key=lambda k: str(k[0])) + if power_gens[0][1] >= power_gens[1][1]: + if y_axis_cut(lineseg): + x0 = (S.Zero, b / a[1]) + elif x_axis_cut(lineseg): + x0 = (b / a[0], S.Zero) + else: + x0 = (a1, b1) + else: + if x_axis_cut(lineseg): + x0 = (b/a[0], S.Zero) + elif y_axis_cut(lineseg): + x0 = (S.Zero, b/a[1]) + else: + x0 = (a1, b1) + else: + x0 = (b/a[0]) + return x0 + + +def decompose(expr, separate=False): + """Decomposes an input polynomial into homogeneous ones of + smaller or equal degree. + + Explanation + =========== + + Returns a dictionary with keys as the degree of the smaller + constituting polynomials. Values are the constituting polynomials. + + Parameters + ========== + + expr : Expr + Polynomial(SymPy expression). + separate : bool + If True then simply return a list of the constituent monomials + If not then break up the polynomial into constituent homogeneous + polynomials. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy.integrals.intpoly import decompose + >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5) + {1: x + y, 2: x**2 + x*y, 5: x**3*y**2 + y**5} + >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5, True) + {x, x**2, y, y**5, x*y, x**3*y**2} + """ + poly_dict = {} + + if isinstance(expr, Expr) and not expr.is_number: + if expr.is_Symbol: + poly_dict[1] = expr + elif expr.is_Add: + symbols = expr.atoms(Symbol) + degrees = [(sum(degree_list(monom, *symbols)), monom) + for monom in expr.args] + if separate: + return {monom[1] for monom in degrees} + else: + for monom in degrees: + degree, term = monom + if poly_dict.get(degree): + poly_dict[degree] += term + else: + poly_dict[degree] = term + elif expr.is_Pow: + _, degree = expr.args + poly_dict[degree] = expr + else: # Now expr can only be of `Mul` type + degree = 0 + for term in expr.args: + term_type = len(term.args) + if term_type == 0 and term.is_Symbol: + degree += 1 + elif term_type == 2: + degree += term.args[1] + poly_dict[degree] = expr + else: + poly_dict[0] = expr + + if separate: + return set(poly_dict.values()) + return poly_dict + + +def point_sort(poly, normal=None, clockwise=True): + """Returns the same polygon with points sorted in clockwise or + anti-clockwise order. + + Note that it's necessary for input points to be sorted in some order + (clockwise or anti-clockwise) for the integration algorithm to work. + As a convention algorithm has been implemented keeping clockwise + orientation in mind. + + Parameters + ========== + + poly: + 2D or 3D Polygon. + normal : optional + The normal of the plane which the 3-Polytope is a part of. + clockwise : bool, optional + Returns points sorted in clockwise order if True and + anti-clockwise if False. + + Examples + ======== + + >>> from sympy.integrals.intpoly import point_sort + >>> from sympy import Point + >>> point_sort([Point(0, 0), Point(1, 0), Point(1, 1)]) + [Point2D(1, 1), Point2D(1, 0), Point2D(0, 0)] + """ + pts = poly.vertices if isinstance(poly, Polygon) else poly + n = len(pts) + if n < 2: + return list(pts) + + order = S.One if clockwise else S.NegativeOne + dim = len(pts[0]) + if dim == 2: + center = Point(sum((vertex.x for vertex in pts)) / n, + sum((vertex.y for vertex in pts)) / n) + else: + center = Point(sum((vertex.x for vertex in pts)) / n, + sum((vertex.y for vertex in pts)) / n, + sum((vertex.z for vertex in pts)) / n) + + def compare(a, b): + if a.x - center.x >= S.Zero and b.x - center.x < S.Zero: + return -order + elif a.x - center.x < 0 and b.x - center.x >= 0: + return order + elif a.x - center.x == 0 and b.x - center.x == 0: + if a.y - center.y >= 0 or b.y - center.y >= 0: + return -order if a.y > b.y else order + return -order if b.y > a.y else order + + det = (a.x - center.x) * (b.y - center.y) -\ + (b.x - center.x) * (a.y - center.y) + if det < 0: + return -order + elif det > 0: + return order + + first = (a.x - center.x) * (a.x - center.x) +\ + (a.y - center.y) * (a.y - center.y) + second = (b.x - center.x) * (b.x - center.x) +\ + (b.y - center.y) * (b.y - center.y) + return -order if first > second else order + + def compare3d(a, b): + det = cross_product(center, a, b) + dot_product = sum([det[i] * normal[i] for i in range(0, 3)]) + if dot_product < 0: + return -order + elif dot_product > 0: + return order + + return sorted(pts, key=cmp_to_key(compare if dim==2 else compare3d)) + + +def norm(point): + """Returns the Euclidean norm of a point from origin. + + Parameters + ========== + + point: + This denotes a point in the dimension_al spac_e. + + Examples + ======== + + >>> from sympy.integrals.intpoly import norm + >>> from sympy import Point + >>> norm(Point(2, 7)) + sqrt(53) + """ + half = S.Half + if isinstance(point, (list, tuple)): + return sum([coord ** 2 for coord in point]) ** half + elif isinstance(point, Point): + if isinstance(point, Point2D): + return (point.x ** 2 + point.y ** 2) ** half + else: + return (point.x ** 2 + point.y ** 2 + point.z) ** half + elif isinstance(point, dict): + return sum(i**2 for i in point.values()) ** half + + +def intersection(geom_1, geom_2, intersection_type): + """Returns intersection between geometric objects. + + Explanation + =========== + + Note that this function is meant for use in integration_reduction and + at that point in the calling function the lines denoted by the segments + surely intersect within segment boundaries. Coincident lines are taken + to be non-intersecting. Also, the hyperplane intersection for 2D case is + also implemented. + + Parameters + ========== + + geom_1, geom_2: + The input line segments. + + Examples + ======== + + >>> from sympy.integrals.intpoly import intersection + >>> from sympy import Point, Segment2D + >>> l1 = Segment2D(Point(1, 1), Point(3, 5)) + >>> l2 = Segment2D(Point(2, 0), Point(2, 5)) + >>> intersection(l1, l2, "segment2D") + (2, 3) + >>> p1 = ((-1, 0), 0) + >>> p2 = ((0, 1), 1) + >>> intersection(p1, p2, "plane2D") + (0, 1) + """ + if intersection_type[:-2] == "segment": + if intersection_type == "segment2D": + x1, y1 = geom_1.points[0] + x2, y2 = geom_1.points[1] + x3, y3 = geom_2.points[0] + x4, y4 = geom_2.points[1] + elif intersection_type == "segment3D": + x1, y1, z1 = geom_1.points[0] + x2, y2, z2 = geom_1.points[1] + x3, y3, z3 = geom_2.points[0] + x4, y4, z4 = geom_2.points[1] + + denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + if denom: + t1 = x1 * y2 - y1 * x2 + t2 = x3 * y4 - x4 * y3 + return (S(t1 * (x3 - x4) - t2 * (x1 - x2)) / denom, + S(t1 * (y3 - y4) - t2 * (y1 - y2)) / denom) + if intersection_type[:-2] == "plane": + if intersection_type == "plane2D": # Intersection of hyperplanes + a1x, a1y = geom_1[0] + a2x, a2y = geom_2[0] + b1, b2 = geom_1[1], geom_2[1] + + denom = a1x * a2y - a2x * a1y + if denom: + return (S(b1 * a2y - b2 * a1y) / denom, + S(b2 * a1x - b1 * a2x) / denom) + + +def is_vertex(ent): + """If the input entity is a vertex return True. + + Parameter + ========= + + ent : + Denotes a geometric entity representing a point. + + Examples + ======== + + >>> from sympy import Point + >>> from sympy.integrals.intpoly import is_vertex + >>> is_vertex((2, 3)) + True + >>> is_vertex((2, 3, 6)) + True + >>> is_vertex(Point(2, 3)) + True + """ + if isinstance(ent, tuple): + if len(ent) in [2, 3]: + return True + elif isinstance(ent, Point): + return True + return False + + +def plot_polytope(poly): + """Plots the 2D polytope using the functions written in plotting + module which in turn uses matplotlib backend. + + Parameter + ========= + + poly: + Denotes a 2-Polytope. + """ + from sympy.plotting.plot import Plot, List2DSeries + + xl = [vertex.x for vertex in poly.vertices] + yl = [vertex.y for vertex in poly.vertices] + + xl.append(poly.vertices[0].x) # Closing the polygon + yl.append(poly.vertices[0].y) + + l2ds = List2DSeries(xl, yl) + p = Plot(l2ds, axes='label_axes=True') + p.show() + + +def plot_polynomial(expr): + """Plots the polynomial using the functions written in + plotting module which in turn uses matplotlib backend. + + Parameter + ========= + + expr: + Denotes a polynomial(SymPy expression). + """ + from sympy.plotting.plot import plot3d, plot + gens = expr.free_symbols + if len(gens) == 2: + plot3d(expr) + else: + plot(expr) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/laplace.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/laplace.py new file mode 100644 index 0000000000000000000000000000000000000000..8f95b848bb3332fe0dcbdf9e5b46dc18d6554408 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/laplace.py @@ -0,0 +1,1761 @@ +"""Laplace Transforms""" +from sympy.core import S, pi, I +from sympy.core.add import Add +from sympy.core.cache import cacheit +from sympy.core.function import ( + AppliedUndef, Derivative, expand, expand_complex, expand_mul, expand_trig, + Lambda, WildFunction, diff) +from sympy.core.mul import Mul, prod +from sympy.core.relational import _canonical, Ge, Gt, Lt, Unequality, Eq +from sympy.core.sorting import ordered +from sympy.core.symbol import Dummy, symbols, Wild +from sympy.functions.elementary.complexes import ( + re, im, arg, Abs, polar_lift, periodic_argument) +from sympy.functions.elementary.exponential import exp, log +from sympy.functions.elementary.hyperbolic import cosh, coth, sinh, asinh +from sympy.functions.elementary.miscellaneous import Max, Min, sqrt +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import cos, sin, atan +from sympy.functions.special.bessel import besseli, besselj, besselk, bessely +from sympy.functions.special.delta_functions import DiracDelta, Heaviside +from sympy.functions.special.error_functions import erf, erfc, Ei +from sympy.functions.special.gamma_functions import digamma, gamma, lowergamma +from sympy.integrals import integrate, Integral +from sympy.integrals.transforms import ( + _simplify, IntegralTransform, IntegralTransformError) +from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And +from sympy.matrices.matrices import MatrixBase +from sympy.polys.matrices.linsolve import _lin_eq2dict +from sympy.polys.polyerrors import PolynomialError +from sympy.polys.polyroots import roots +from sympy.polys.polytools import Poly +from sympy.polys.rationaltools import together +from sympy.polys.rootoftools import RootSum +from sympy.utilities.exceptions import ( + sympy_deprecation_warning, SymPyDeprecationWarning, ignore_warnings) +from sympy.utilities.misc import debug, debugf + + +def _simplifyconds(expr, s, a): + r""" + Naively simplify some conditions occurring in ``expr``, + given that `\operatorname{Re}(s) > a`. + + Examples + ======== + + >>> from sympy.integrals.laplace import _simplifyconds + >>> from sympy.abc import x + >>> from sympy import sympify as S + >>> _simplifyconds(abs(x**2) < 1, x, 1) + False + >>> _simplifyconds(abs(x**2) < 1, x, 2) + False + >>> _simplifyconds(abs(x**2) < 1, x, 0) + Abs(x**2) < 1 + >>> _simplifyconds(abs(1/x**2) < 1, x, 1) + True + >>> _simplifyconds(S(1) < abs(x), x, 1) + True + >>> _simplifyconds(S(1) < abs(1/x), x, 1) + False + + >>> from sympy import Ne + >>> _simplifyconds(Ne(1, x**3), x, 1) + True + >>> _simplifyconds(Ne(1, x**3), x, 2) + True + >>> _simplifyconds(Ne(1, x**3), x, 0) + Ne(1, x**3) + """ + + def power(ex): + if ex == s: + return 1 + if ex.is_Pow and ex.base == s: + return ex.exp + return None + + def bigger(ex1, ex2): + """ Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|. + Else return None. """ + if ex1.has(s) and ex2.has(s): + return None + if isinstance(ex1, Abs): + ex1 = ex1.args[0] + if isinstance(ex2, Abs): + ex2 = ex2.args[0] + if ex1.has(s): + return bigger(1/ex2, 1/ex1) + n = power(ex2) + if n is None: + return None + try: + if n > 0 and (Abs(ex1) <= Abs(a)**n) == S.true: + return False + if n < 0 and (Abs(ex1) >= Abs(a)**n) == S.true: + return True + except TypeError: + pass + + def replie(x, y): + """ simplify x < y """ + if (not (x.is_positive or isinstance(x, Abs)) + or not (y.is_positive or isinstance(y, Abs))): + return (x < y) + r = bigger(x, y) + if r is not None: + return not r + return (x < y) + + def replue(x, y): + b = bigger(x, y) + if b in (True, False): + return True + return Unequality(x, y) + + def repl(ex, *args): + if ex in (True, False): + return bool(ex) + return ex.replace(*args) + from sympy.simplify.radsimp import collect_abs + expr = collect_abs(expr) + expr = repl(expr, Lt, replie) + expr = repl(expr, Gt, lambda x, y: replie(y, x)) + expr = repl(expr, Unequality, replue) + return S(expr) + + +def expand_dirac_delta(expr): + """ + Expand an expression involving DiractDelta to get it as a linear + combination of DiracDelta functions. + """ + return _lin_eq2dict(expr, expr.atoms(DiracDelta)) + + +def _laplace_transform_integration(f, t, s_, simplify=True): + """ The backend function for doing Laplace transforms by integration. + + This backend assumes that the frontend has already split sums + such that `f` is to an addition anymore. + """ + s = Dummy('s') + debugf('[LT _l_t_i ] started with (%s, %s, %s)', (f, t, s)) + debugf('[LT _l_t_i ] and simplify=%s', (simplify, )) + + if f.has(DiracDelta): + return None + + F = integrate(f*exp(-s*t), (t, S.Zero, S.Infinity)) + debugf('[LT _l_t_i ] integrated: %s', (F, )) + + if not F.has(Integral): + return _simplify(F.subs(s, s_), simplify), S.NegativeInfinity, S.true + + if not F.is_Piecewise: + debug('[LT _l_t_i ] not piecewise.') + return None + + F, cond = F.args[0] + if F.has(Integral): + debug('[LT _l_t_i ] integral in unexpected form.') + return None + + def process_conds(conds): + """ Turn ``conds`` into a strip and auxiliary conditions. """ + from sympy.solvers.inequalities import _solve_inequality + a = S.NegativeInfinity + aux = S.true + conds = conjuncts(to_cnf(conds)) + p, q, w1, w2, w3, w4, w5 = symbols( + 'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s]) + patterns = ( + p*Abs(arg((s + w3)*q)) < w2, + p*Abs(arg((s + w3)*q)) <= w2, + Abs(periodic_argument((s + w3)**p*q, w1)) < w2, + Abs(periodic_argument((s + w3)**p*q, w1)) <= w2, + Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) < w2, + Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) <= w2) + for c in conds: + a_ = S.Infinity + aux_ = [] + for d in disjuncts(c): + if d.is_Relational and s in d.rhs.free_symbols: + d = d.reversed + if d.is_Relational and isinstance(d, (Ge, Gt)): + d = d.reversedsign + for pat in patterns: + m = d.match(pat) + if m: + break + if m and m[q].is_positive and m[w2]/m[p] == pi/2: + d = -re(s + m[w3]) < 0 + m = d.match(p - cos(w1*Abs(arg(s*w5))*w2)*Abs(s**w3)**w4 < 0) + if not m: + m = d.match( + cos(p - Abs(periodic_argument(s**w1*w5, q))*w2) * + Abs(s**w3)**w4 < 0) + if not m: + m = d.match( + p - cos( + Abs(periodic_argument(polar_lift(s)**w1*w5, q))*w2 + )*Abs(s**w3)**w4 < 0) + if m and all(m[wild].is_positive for wild in [ + w1, w2, w3, w4, w5]): + d = re(s) > m[p] + d_ = d.replace( + re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t) + if ( + not d.is_Relational or d.rel_op in ('==', '!=') + or d_.has(s) or not d_.has(t)): + aux_ += [d] + continue + soln = _solve_inequality(d_, t) + if not soln.is_Relational or soln.rel_op in ('==', '!='): + aux_ += [d] + continue + if soln.lts == t: + debug('[LT _l_t_i ] convergence not in half-plane.') + return None + else: + a_ = Min(soln.lts, a_) + if a_ is not S.Infinity: + a = Max(a_, a) + else: + aux = And(aux, Or(*aux_)) + return a, aux.canonical if aux.is_Relational else aux + + conds = [process_conds(c) for c in disjuncts(cond)] + conds2 = [x for x in conds if x[1] != + S.false and x[0] is not S.NegativeInfinity] + if not conds2: + conds2 = [x for x in conds if x[1] != S.false] + conds = list(ordered(conds2)) + + def cnt(expr): + if expr in (True, False): + return 0 + return expr.count_ops() + conds.sort(key=lambda x: (-x[0], cnt(x[1]))) + + if not conds: + debug('[LT _l_t_i ] no convergence found.') + return None + a, aux = conds[0] # XXX is [0] always the right one? + + def sbs(expr): + return expr.subs(s, s_) + if simplify: + F = _simplifyconds(F, s, a) + aux = _simplifyconds(aux, s, a) + return _simplify(F.subs(s, s_), simplify), sbs(a), _canonical(sbs(aux)) + + +def _laplace_deep_collect(f, t): + """ + This is an internal helper function that traverses through the epression + tree of `f(t)` and collects arguments. The purpose of it is that + anything like `f(w*t-1*t-c)` will be written as `f((w-1)*t-c)` such that + it can match `f(a*t+b)`. + """ + func = f.func + args = list(f.args) + if len(f.args) == 0: + return f + else: + args = [_laplace_deep_collect(arg, t) for arg in args] + if func.is_Add: + return func(*args).collect(t) + else: + return func(*args) + + +@cacheit +def _laplace_build_rules(): + """ + This is an internal helper function that returns the table of Laplace + transform rules in terms of the time variable `t` and the frequency + variable `s`. It is used by ``_laplace_apply_rules``. Each entry is a + tuple containing: + + (time domain pattern, + frequency-domain replacement, + condition for the rule to be applied, + convergence plane, + preparation function) + + The preparation function is a function with one argument that is applied + to the expression before matching. For most rules it should be + ``_laplace_deep_collect``. + """ + t = Dummy('t') + s = Dummy('s') + a = Wild('a', exclude=[t]) + b = Wild('b', exclude=[t]) + n = Wild('n', exclude=[t]) + tau = Wild('tau', exclude=[t]) + omega = Wild('omega', exclude=[t]) + def dco(f): return _laplace_deep_collect(f, t) + debug('_laplace_build_rules is building rules') + + laplace_transform_rules = [ + (a, a/s, + S.true, S.Zero, dco), # 4.2.1 + (DiracDelta(a*t-b), exp(-s*b/a)/Abs(a), + Or(And(a > 0, b >= 0), And(a < 0, b <= 0)), + S.NegativeInfinity, dco), # Not in Bateman54 + (DiracDelta(a*t-b), S(0), + Or(And(a < 0, b >= 0), And(a > 0, b <= 0)), + S.NegativeInfinity, dco), # Not in Bateman54 + (Heaviside(a*t-b), exp(-s*b/a)/s, + And(a > 0, b > 0), S.Zero, dco), # 4.4.1 + (Heaviside(a*t-b), (1-exp(-s*b/a))/s, + And(a < 0, b < 0), S.Zero, dco), # 4.4.1 + (Heaviside(a*t-b), 1/s, + And(a > 0, b <= 0), S.Zero, dco), # 4.4.1 + (Heaviside(a*t-b), 0, + And(a < 0, b > 0), S.Zero, dco), # 4.4.1 + (t, 1/s**2, + S.true, S.Zero, dco), # 4.2.3 + (1/(a*t+b), -exp(-b/a*s)*Ei(-b/a*s)/a, + Abs(arg(b/a)) < pi, S.Zero, dco), # 4.2.6 + (1/sqrt(a*t+b), sqrt(a*pi/s)*exp(b/a*s)*erfc(sqrt(b/a*s))/a, + Abs(arg(b/a)) < pi, S.Zero, dco), # 4.2.18 + ((a*t+b)**(-S(3)/2), + 2*b**(-S(1)/2)-2*(pi*s/a)**(S(1)/2)*exp(b/a*s) * erfc(sqrt(b/a*s))/a, + Abs(arg(b/a)) < pi, S.Zero, dco), # 4.2.20 + (sqrt(t)/(t+b), sqrt(pi/s)-pi*sqrt(b)*exp(b*s)*erfc(sqrt(b*s)), + Abs(arg(b)) < pi, S.Zero, dco), # 4.2.22 + (1/(a*sqrt(t) + t**(3/2)), pi*a**(S(1)/2)*exp(a*s)*erfc(sqrt(a*s)), + S.true, S.Zero, dco), # Not in Bateman54 + (t**n, gamma(n+1)/s**(n+1), + n > -1, S.Zero, dco), # 4.3.1 + ((a*t+b)**n, lowergamma(n+1, b/a*s)*exp(-b/a*s)/s**(n+1)/a, + And(n > -1, Abs(arg(b/a)) < pi), S.Zero, dco), # 4.3.4 + (t**n/(t+a), a**n*gamma(n+1)*lowergamma(-n, a*s), + And(n > -1, Abs(arg(a)) < pi), S.Zero, dco), # 4.3.7 + (exp(a*t-tau), exp(-tau)/(s-a), + S.true, re(a), dco), # 4.5.1 + (t*exp(a*t-tau), exp(-tau)/(s-a)**2, + S.true, re(a), dco), # 4.5.2 + (t**n*exp(a*t), gamma(n+1)/(s-a)**(n+1), + re(n) > -1, re(a), dco), # 4.5.3 + (exp(-a*t**2), sqrt(pi/4/a)*exp(s**2/4/a)*erfc(s/sqrt(4*a)), + re(a) > 0, S.Zero, dco), # 4.5.21 + (t*exp(-a*t**2), + 1/(2*a)-2/sqrt(pi)/(4*a)**(S(3)/2)*s*erfc(s/sqrt(4*a)), + re(a) > 0, S.Zero, dco), # 4.5.22 + (exp(-a/t), 2*sqrt(a/s)*besselk(1, 2*sqrt(a*s)), + re(a) >= 0, S.Zero, dco), # 4.5.25 + (sqrt(t)*exp(-a/t), + S(1)/2*sqrt(pi/s**3)*(1+2*sqrt(a*s))*exp(-2*sqrt(a*s)), + re(a) >= 0, S.Zero, dco), # 4.5.26 + (exp(-a/t)/sqrt(t), sqrt(pi/s)*exp(-2*sqrt(a*s)), + re(a) >= 0, S.Zero, dco), # 4.5.27 + (exp(-a/t)/(t*sqrt(t)), sqrt(pi/a)*exp(-2*sqrt(a*s)), + re(a) > 0, S.Zero, dco), # 4.5.28 + (t**n*exp(-a/t), 2*(a/s)**((n+1)/2)*besselk(n+1, 2*sqrt(a*s)), + re(a) > 0, S.Zero, dco), # 4.5.29 + (exp(-2*sqrt(a*t)), + s**(-1)-sqrt(pi*a)*s**(-S(3)/2)*exp(a/s) * erfc(sqrt(a/s)), + Abs(arg(a)) < pi, S.Zero, dco), # 4.5.31 + (exp(-2*sqrt(a*t))/sqrt(t), (pi/s)**(S(1)/2)*exp(a/s)*erfc(sqrt(a/s)), + Abs(arg(a)) < pi, S.Zero, dco), # 4.5.33 + (log(a*t), -log(exp(S.EulerGamma)*s/a)/s, + a > 0, S.Zero, dco), # 4.6.1 + (log(1+a*t), -exp(s/a)/s*Ei(-s/a), + Abs(arg(a)) < pi, S.Zero, dco), # 4.6.4 + (log(a*t+b), (log(b)-exp(s/b/a)/s*a*Ei(-s/b))/s*a, + And(a > 0, Abs(arg(b)) < pi), S.Zero, dco), # 4.6.5 + (log(t)/sqrt(t), -sqrt(pi/s)*log(4*s*exp(S.EulerGamma)), + S.true, S.Zero, dco), # 4.6.9 + (t**n*log(t), gamma(n+1)*s**(-n-1)*(digamma(n+1)-log(s)), + re(n) > -1, S.Zero, dco), # 4.6.11 + (log(a*t)**2, (log(exp(S.EulerGamma)*s/a)**2+pi**2/6)/s, + a > 0, S.Zero, dco), # 4.6.13 + (sin(omega*t), omega/(s**2+omega**2), + S.true, Abs(im(omega)), dco), # 4,7,1 + (Abs(sin(omega*t)), omega/(s**2+omega**2)*coth(pi*s/2/omega), + omega > 0, S.Zero, dco), # 4.7.2 + (sin(omega*t)/t, atan(omega/s), + S.true, Abs(im(omega)), dco), # 4.7.16 + (sin(omega*t)**2/t, log(1+4*omega**2/s**2)/4, + S.true, 2*Abs(im(omega)), dco), # 4.7.17 + (sin(omega*t)**2/t**2, + omega*atan(2*omega/s)-s*log(1+4*omega**2/s**2)/4, + S.true, 2*Abs(im(omega)), dco), # 4.7.20 + (sin(2*sqrt(a*t)), sqrt(pi*a)/s/sqrt(s)*exp(-a/s), + S.true, S.Zero, dco), # 4.7.32 + (sin(2*sqrt(a*t))/t, pi*erf(sqrt(a/s)), + S.true, S.Zero, dco), # 4.7.34 + (cos(omega*t), s/(s**2+omega**2), + S.true, Abs(im(omega)), dco), # 4.7.43 + (cos(omega*t)**2, (s**2+2*omega**2)/(s**2+4*omega**2)/s, + S.true, 2*Abs(im(omega)), dco), # 4.7.45 + (sqrt(t)*cos(2*sqrt(a*t)), sqrt(pi)/2*s**(-S(5)/2)*(s-2*a)*exp(-a/s), + S.true, S.Zero, dco), # 4.7.66 + (cos(2*sqrt(a*t))/sqrt(t), sqrt(pi/s)*exp(-a/s), + S.true, S.Zero, dco), # 4.7.67 + (sin(a*t)*sin(b*t), 2*a*b*s/(s**2+(a+b)**2)/(s**2+(a-b)**2), + S.true, Abs(im(a))+Abs(im(b)), dco), # 4.7.78 + (cos(a*t)*sin(b*t), b*(s**2-a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2), + S.true, Abs(im(a))+Abs(im(b)), dco), # 4.7.79 + (cos(a*t)*cos(b*t), s*(s**2+a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2), + S.true, Abs(im(a))+Abs(im(b)), dco), # 4.7.80 + (sinh(a*t), a/(s**2-a**2), + S.true, Abs(re(a)), dco), # 4.9.1 + (cosh(a*t), s/(s**2-a**2), + S.true, Abs(re(a)), dco), # 4.9.2 + (sinh(a*t)**2, 2*a**2/(s**3-4*a**2*s), + S.true, 2*Abs(re(a)), dco), # 4.9.3 + (cosh(a*t)**2, (s**2-2*a**2)/(s**3-4*a**2*s), + S.true, 2*Abs(re(a)), dco), # 4.9.4 + (sinh(a*t)/t, log((s+a)/(s-a))/2, + S.true, Abs(re(a)), dco), # 4.9.12 + (t**n*sinh(a*t), gamma(n+1)/2*((s-a)**(-n-1)-(s+a)**(-n-1)), + n > -2, Abs(a), dco), # 4.9.18 + (t**n*cosh(a*t), gamma(n+1)/2*((s-a)**(-n-1)+(s+a)**(-n-1)), + n > -1, Abs(a), dco), # 4.9.19 + (sinh(2*sqrt(a*t)), sqrt(pi*a)/s/sqrt(s)*exp(a/s), + S.true, S.Zero, dco), # 4.9.34 + (cosh(2*sqrt(a*t)), 1/s+sqrt(pi*a)/s/sqrt(s)*exp(a/s)*erf(sqrt(a/s)), + S.true, S.Zero, dco), # 4.9.35 + ( + sqrt(t)*sinh(2*sqrt(a*t)), + pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a) * + exp(a/s)*erf(sqrt(a/s))-a**(S(1)/2)*s**(-2), + S.true, S.Zero, dco), # 4.9.36 + (sqrt(t)*cosh(2*sqrt(a*t)), pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a)*exp(a/s), + S.true, S.Zero, dco), # 4.9.37 + (sinh(2*sqrt(a*t))/sqrt(t), + pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s) * erf(sqrt(a/s)), + S.true, S.Zero, dco), # 4.9.38 + (cosh(2*sqrt(a*t))/sqrt(t), pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s), + S.true, S.Zero, dco), # 4.9.39 + (sinh(sqrt(a*t))**2/sqrt(t), pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)-1), + S.true, S.Zero, dco), # 4.9.40 + (cosh(sqrt(a*t))**2/sqrt(t), pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)+1), + S.true, S.Zero, dco), # 4.9.41 + (erf(a*t), exp(s**2/(2*a)**2)*erfc(s/(2*a))/s, + 4*Abs(arg(a)) < pi, S.Zero, dco), # 4.12.2 + (erf(sqrt(a*t)), sqrt(a)/sqrt(s+a)/s, + S.true, Max(S.Zero, -re(a)), dco), # 4.12.4 + (exp(a*t)*erf(sqrt(a*t)), sqrt(a)/sqrt(s)/(s-a), + S.true, Max(S.Zero, re(a)), dco), # 4.12.5 + (erf(sqrt(a/t)/2), (1-exp(-sqrt(a*s)))/s, + re(a) > 0, S.Zero, dco), # 4.12.6 + (erfc(sqrt(a*t)), (sqrt(s+a)-sqrt(a))/sqrt(s+a)/s, + S.true, -re(a), dco), # 4.12.9 + (exp(a*t)*erfc(sqrt(a*t)), 1/(s+sqrt(a*s)), + S.true, S.Zero, dco), # 4.12.10 + (erfc(sqrt(a/t)/2), exp(-sqrt(a*s))/s, + re(a) > 0, S.Zero, dco), # 4.2.11 + (besselj(n, a*t), a**n/(sqrt(s**2+a**2)*(s+sqrt(s**2+a**2))**n), + re(n) > -1, Abs(im(a)), dco), # 4.14.1 + (t**b*besselj(n, a*t), + 2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2+a**2)**(-n-S.Half), + And(re(n) > -S.Half, Eq(b, n)), Abs(im(a)), dco), # 4.14.7 + (t**b*besselj(n, a*t), + 2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2+a**2)**(-n-S(3)/2), + And(re(n) > -1, Eq(b, n+1)), Abs(im(a)), dco), # 4.14.8 + (besselj(0, 2*sqrt(a*t)), exp(-a/s)/s, + S.true, S.Zero, dco), # 4.14.25 + (t**(b)*besselj(n, 2*sqrt(a*t)), a**(n/2)*s**(-n-1)*exp(-a/s), + And(re(n) > -1, Eq(b, n*S.Half)), S.Zero, dco), # 4.14.30 + (besselj(0, a*sqrt(t**2+b*t)), + exp(b*s-b*sqrt(s**2+a**2))/sqrt(s**2+a**2), + Abs(arg(b)) < pi, Abs(im(a)), dco), # 4.15.19 + (besseli(n, a*t), a**n/(sqrt(s**2-a**2)*(s+sqrt(s**2-a**2))**n), + re(n) > -1, Abs(re(a)), dco), # 4.16.1 + (t**b*besseli(n, a*t), + 2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2-a**2)**(-n-S.Half), + And(re(n) > -S.Half, Eq(b, n)), Abs(re(a)), dco), # 4.16.6 + (t**b*besseli(n, a*t), + 2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2-a**2)**(-n-S(3)/2), + And(re(n) > -1, Eq(b, n+1)), Abs(re(a)), dco), # 4.16.7 + (t**(b)*besseli(n, 2*sqrt(a*t)), a**(n/2)*s**(-n-1)*exp(a/s), + And(re(n) > -1, Eq(b, n*S.Half)), S.Zero, dco), # 4.16.18 + (bessely(0, a*t), -2/pi*asinh(s/a)/sqrt(s**2+a**2), + S.true, Abs(im(a)), dco), # 4.15.44 + (besselk(0, a*t), log((s + sqrt(s**2-a**2))/a)/(sqrt(s**2-a**2)), + S.true, -re(a), dco) # 4.16.23 + ] + return laplace_transform_rules, t, s + + +def _laplace_rule_timescale(f, t, s): + """ + This function applies the time-scaling rule of the Laplace transform in + a straight-forward way. For example, if it gets ``(f(a*t), t, s)``, it will + compute ``LaplaceTransform(f(t)/a, t, s/a)`` if ``a>0``. + """ + + a = Wild('a', exclude=[t]) + g = WildFunction('g', nargs=1) + ma1 = f.match(g) + if ma1: + arg = ma1[g].args[0].collect(t) + ma2 = arg.match(a*t) + if ma2 and ma2[a].is_positive and ma2[a] != 1: + debug('_laplace_apply_prog rules match:') + debugf(' f: %s _ %s, %s )', (f, ma1, ma2)) + debug(' rule: time scaling (4.1.4)') + r, pr, cr = _laplace_transform(1/ma2[a]*ma1[g].func(t), + t, s/ma2[a], simplify=False) + return (r, pr, cr) + return None + + +def _laplace_rule_heaviside(f, t, s): + """ + This function deals with time-shifted Heaviside step functions. If the time + shift is positive, it applies the time-shift rule of the Laplace transform. + For example, if it gets ``(Heaviside(t-a)*f(t), t, s)``, it will compute + ``exp(-a*s)*LaplaceTransform(f(t+a), t, s)``. + + If the time shift is negative, the Heaviside function is simply removed + as it means nothing to the Laplace transform. + + The function does not remove a factor ``Heaviside(t)``; this is done by + the simple rules. + """ + + a = Wild('a', exclude=[t]) + y = Wild('y') + g = Wild('g') + ma1 = f.match(Heaviside(y)*g) + if ma1: + ma2 = ma1[y].match(t-a) + if ma2 and ma2[a].is_positive: + debug('_laplace_apply_prog_rules match:') + debugf(' f: %s ( %s, %s )', (f, ma1, ma2)) + debug(' rule: time shift (4.1.4)') + r, pr, cr = _laplace_transform(ma1[g].subs(t, t+ma2[a]), t, s, + simplify=False) + return (exp(-ma2[a]*s)*r, pr, cr) + if ma2 and ma2[a].is_negative: + debug('_laplace_apply_prog_rules match:') + debugf(' f: %s ( %s, %s )', (f, ma1, ma2)) + debug(' rule: Heaviside factor, negative time shift (4.1.4)') + r, pr, cr = _laplace_transform(ma1[g], t, s, simplify=False) + return (r, pr, cr) + return None + + +def _laplace_rule_exp(f, t, s): + """ + If this function finds a factor ``exp(a*t)``, it applies the + frequency-shift rule of the Laplace transform and adjusts the convergence + plane accordingly. For example, if it gets ``(exp(-a*t)*f(t), t, s)``, it + will compute ``LaplaceTransform(f(t), t, s+a)``. + """ + + a = Wild('a', exclude=[t]) + y = Wild('y') + z = Wild('z') + ma1 = f.match(exp(y)*z) + if ma1: + ma2 = ma1[y].collect(t).match(a*t) + if ma2: + debug('_laplace_apply_prog_rules match:') + debugf(' f: %s ( %s, %s )', (f, ma1, ma2)) + debug(' rule: multiply with exp (4.1.5)') + r, pr, cr = _laplace_transform(ma1[z], t, s-ma2[a], + simplify=False) + return (r, pr+re(ma2[a]), cr) + return None + + +def _laplace_rule_delta(f, t, s): + """ + If this function finds a factor ``DiracDelta(b*t-a)``, it applies the + masking property of the delta distribution. For example, if it gets + ``(DiracDelta(t-a)*f(t), t, s)``, it will return + ``(f(a)*exp(-a*s), -a, True)``. + """ + # This rule is not in Bateman54 + + a = Wild('a', exclude=[t]) + b = Wild('b', exclude=[t]) + + y = Wild('y') + z = Wild('z') + ma1 = f.match(DiracDelta(y)*z) + if ma1 and not ma1[z].has(DiracDelta): + ma2 = ma1[y].collect(t).match(b*t-a) + if ma2: + debug('_laplace_apply_prog_rules match:') + debugf(' f: %s ( %s, %s )', (f, ma1, ma2)) + debug(' rule: multiply with DiracDelta') + loc = ma2[a]/ma2[b] + if re(loc) >= 0 and im(loc) == 0: + r = exp(-ma2[a]/ma2[b]*s)*ma1[z].subs(t, ma2[a]/ma2[b])/ma2[b] + return (r, S.NegativeInfinity, S.true) + else: + return (0, S.NegativeInfinity, S.true) + if ma1[y].is_polynomial(t): + ro = roots(ma1[y], t) + if roots is not {} and set(ro.values()) == {1}: + slope = diff(ma1[y], t) + r = Add( + *[exp(-x*s)*ma1[z].subs(t, s)/slope.subs(t, x) + for x in list(ro.keys()) if im(x) == 0 and re(x) >= 0]) + return (r, S.NegativeInfinity, S.true) + return None + + +def _laplace_trig_split(fn): + """ + Helper function for `_laplace_rule_trig`. This function returns two terms + `f` and `g`. `f` contains all product terms with sin, cos, sinh, cosh in + them; `g` contains everything else. + """ + trigs = [S.One] + other = [S.One] + for term in Mul.make_args(fn): + if term.has(sin, cos, sinh, cosh, exp): + trigs.append(term) + else: + other.append(term) + f = Mul(*trigs) + g = Mul(*other) + return f, g + + +def _laplace_trig_expsum(f, t): + """ + Helper function for `_laplace_rule_trig`. This function expects the `f` + from `_laplace_trig_split`. It returns two lists `xm` and `xn`. `xm` is + a list of dictionaries with keys `k` and `a` representing a function + `k*exp(a*t)`. `xn` is a list of all terms that cannot be brought into + that form, which may happen, e.g., when a trigonometric function has + another function in its argument. + """ + m = Wild('m') + p = Wild('p', exclude=[t]) + xm = [] + xn = [] + + x1 = f.rewrite(exp).expand() + + for term in Add.make_args(x1): + if not term.has(t): + xm.append({'k': term, 'a': 0, re: 0, im: 0}) + continue + term = term.powsimp(combine='exp') + if (r := term.match(p*exp(m))) is not None: + if (mp := r[m].as_poly(t)) is not None: + mc = mp.all_coeffs() + if len(mc) == 2: + xm.append({ + 'k': r[p]*exp(mc[1]), 'a': mc[0], + re: re(mc[0]), im: im(mc[0])}) + else: + xn.append(term) + else: + xn.append(term) + else: + xn.append(term) + return xm, xn + + +def _laplace_trig_ltex(xm, t, s): + """ + Helper function for `_laplace_rule_trig`. This function takes the list of + exponentials `xm` from `_laplace_trig_expsum` and simplifies complex + conjugate and real symmetric poles. It returns the result as a sum and + the convergence plane. + """ + results = [] + planes = [] + + def _simpc(coeffs): + nc = coeffs.copy() + for k in range(len(nc)): + ri = nc[k].as_real_imag() + if ri[0].has(im): + nc[k] = nc[k].rewrite(cos) + else: + nc[k] = (ri[0] + I*ri[1]).rewrite(cos) + return nc + + def _quadpole(t1, k1, k2, k3, s): + a, k0, a_r, a_i = t1['a'], t1['k'], t1[re], t1[im] + nc = [ + k0 + k1 + k2 + k3, + a*(k0 + k1 - k2 - k3) - 2*I*a_i*k1 + 2*I*a_i*k2, + ( + a**2*(-k0 - k1 - k2 - k3) + + a*(4*I*a_i*k0 + 4*I*a_i*k3) + + 4*a_i**2*k0 + 4*a_i**2*k3), + ( + a**3*(-k0 - k1 + k2 + k3) + + a**2*(4*I*a_i*k0 + 2*I*a_i*k1 - 2*I*a_i*k2 - 4*I*a_i*k3) + + a*(4*a_i**2*k0 - 4*a_i**2*k3)) + ] + dc = [ + S.One, S.Zero, 2*a_i**2 - 2*a_r**2, + S.Zero, a_i**4 + 2*a_i**2*a_r**2 + a_r**4] + n = Add( + *[x*s**y for x, y in zip(_simpc(nc), range(len(nc))[::-1])]) + d = Add( + *[x*s**y for x, y in zip(dc, range(len(dc))[::-1])]) + debugf(' quadpole: (%s) / (%s)', (n, d)) + return n/d + + def _ccpole(t1, k1, s): + a, k0, a_r, a_i = t1['a'], t1['k'], t1[re], t1[im] + nc = [k0 + k1, -a*k0 - a*k1 + 2*I*a_i*k0] + dc = [S.One, -2*a_r, a_i**2 + a_r**2] + n = Add( + *[x*s**y for x, y in zip(_simpc(nc), range(len(nc))[::-1])]) + d = Add( + *[x*s**y for x, y in zip(dc, range(len(dc))[::-1])]) + debugf(' ccpole: (%s) / (%s)', (n, d)) + return n/d + + def _rspole(t1, k2, s): + a, k0, a_r, a_i = t1['a'], t1['k'], t1[re], t1[im] + nc = [k0 + k2, a*k0 - a*k2 - 2*I*a_i*k0] + dc = [S.One, -2*I*a_i, -a_i**2 - a_r**2] + n = Add( + *[x*s**y for x, y in zip(_simpc(nc), range(len(nc))[::-1])]) + d = Add( + *[x*s**y for x, y in zip(dc, range(len(dc))[::-1])]) + debugf(' rspole: (%s) / (%s)', (n, d)) + return n/d + + def _sypole(t1, k3, s): + a, k0 = t1['a'], t1['k'] + nc = [k0 + k3, a*(k0 - k3)] + dc = [S.One, S.Zero, -a**2] + n = Add( + *[x*s**y for x, y in zip(_simpc(nc), range(len(nc))[::-1])]) + d = Add( + *[x*s**y for x, y in zip(dc, range(len(dc))[::-1])]) + debugf(' sypole: (%s) / (%s)', (n, d)) + return n/d + + def _simplepole(t1, s): + a, k0 = t1['a'], t1['k'] + n = k0 + d = s - a + debugf(' simplepole: (%s) / (%s)', (n, d)) + return n/d + + while len(xm) > 0: + t1 = xm.pop() + i_imagsym = None + i_realsym = None + i_pointsym = None + # The following code checks all remaining poles. If t1 is a pole at + # a+b*I, then we check for a-b*I, -a+b*I, and -a-b*I, and + # assign the respective indices to i_imagsym, i_realsym, i_pointsym. + # -a-b*I / i_pointsym only applies if both a and b are != 0. + for i in range(len(xm)): + real_eq = t1[re] == xm[i][re] + realsym = t1[re] == -xm[i][re] + imag_eq = t1[im] == xm[i][im] + imagsym = t1[im] == -xm[i][im] + if realsym and imagsym and t1[re] != 0 and t1[im] != 0: + i_pointsym = i + elif realsym and imag_eq and t1[re] != 0: + i_realsym = i + elif real_eq and imagsym and t1[im] != 0: + i_imagsym = i + + # The next part looks for four possible pole constellations: + # quad: a+b*I, a-b*I, -a+b*I, -a-b*I + # cc: a+b*I, a-b*I (a may be zero) + # quad: a+b*I, -a+b*I (b may be zero) + # point: a+b*I, -a-b*I (a!=0 and b!=0 is needed, but that has been + # asserted when finding i_pointsym above.) + # If none apply, then t1 is a simple pole. + if ( + i_imagsym is not None and i_realsym is not None + and i_pointsym is not None): + results.append( + _quadpole(t1, + xm[i_imagsym]['k'], xm[i_realsym]['k'], + xm[i_pointsym]['k'], s)) + planes.append(Abs(re(t1['a']))) + # The three additional poles have now been used; to pop them + # easily we have to do it from the back. + indices_to_pop = [i_imagsym, i_realsym, i_pointsym] + indices_to_pop.sort(reverse=True) + for i in indices_to_pop: + xm.pop(i) + elif i_imagsym is not None: + results.append(_ccpole(t1, xm[i_imagsym]['k'], s)) + planes.append(t1[re]) + xm.pop(i_imagsym) + elif i_realsym is not None: + results.append(_rspole(t1, xm[i_realsym]['k'], s)) + planes.append(Abs(t1[re])) + xm.pop(i_realsym) + elif i_pointsym is not None: + results.append(_sypole(t1, xm[i_pointsym]['k'], s)) + planes.append(Abs(t1[re])) + xm.pop(i_pointsym) + else: + results.append(_simplepole(t1, s)) + planes.append(t1[re]) + + return Add(*results), Max(*planes) + + +def _laplace_rule_trig(fn, t_, s, doit=True, **hints): + """ + This rule covers trigonometric factors by splitting everything into a + sum of exponential functions and collecting complex conjugate poles and + real symmetric poles. + """ + t = Dummy('t', real=True) + + if not fn.has(sin, cos, sinh, cosh): + return None + + debugf('_laplace_rule_trig: (%s, %s, %s)', (fn, t_, s)) + + f, g = _laplace_trig_split(fn.subs(t_, t)) + debugf(' f = %s\n g = %s', (f, g)) + + xm, xn = _laplace_trig_expsum(f, t) + debugf(' xm = %s\n xn = %s', (xm, xn)) + + if len(xn) > 0: + # not implemented yet + debug(' --> xn is not empty; giving up.') + return None + + if not g.has(t): + r, p = _laplace_trig_ltex(xm, t, s) + return g*r, p, S.true + else: + # Just transform `g` and make frequency-shifted copies + planes = [] + results = [] + G, G_plane, G_cond = _laplace_transform(g, t, s) + for x1 in xm: + results.append(x1['k']*G.subs(s, s-x1['a'])) + planes.append(G_plane+re(x1['a'])) + return Add(*results).subs(t, t_), Max(*planes), G_cond + + +def _laplace_rule_diff(f, t, s, doit=True, **hints): + """ + This function looks for derivatives in the time domain and replaces it + by factors of `s` and initial conditions in the frequency domain. For + example, if it gets ``(diff(f(t), t), t, s)``, it will compute + ``s*LaplaceTransform(f(t), t, s) - f(0)``. + """ + + a = Wild('a', exclude=[t]) + n = Wild('n', exclude=[t]) + g = WildFunction('g') + ma1 = f.match(a*Derivative(g, (t, n))) + if ma1 and ma1[n].is_integer: + m = [z.has(t) for z in ma1[g].args] + if sum(m) == 1: + debug('_laplace_apply_rules match:') + debugf(' f, n: %s, %s', (f, ma1[n])) + debug(' rule: time derivative (4.1.8)') + d = [] + for k in range(ma1[n]): + if k == 0: + y = ma1[g].subs(t, 0) + else: + y = Derivative(ma1[g], (t, k)).subs(t, 0) + d.append(s**(ma1[n]-k-1)*y) + r, pr, cr = _laplace_transform(ma1[g], t, s, simplify=False) + return (ma1[a]*(s**ma1[n]*r - Add(*d)), pr, cr) + return None + + +def _laplace_rule_sdiff(f, t, s, doit=True, **hints): + """ + This function looks for multiplications with polynoimials in `t` as they + correspond to differentiation in the frequency domain. For example, if it + gets ``(t*f(t), t, s)``, it will compute + ``-Derivative(LaplaceTransform(f(t), t, s), s)``. + """ + + if f.is_Mul: + pfac = [1] + ofac = [1] + for fac in Mul.make_args(f): + if fac.is_polynomial(t): + pfac.append(fac) + else: + ofac.append(fac) + if len(pfac) > 1: + pex = prod(pfac) + pc = Poly(pex, t).all_coeffs() + N = len(pc) + if N > 1: + debug('_laplace_apply_rules match:') + debugf(' f, n: %s, %s', (f, pfac)) + debug(' rule: frequency derivative (4.1.6)') + oex = prod(ofac) + r_, p_, c_ = _laplace_transform(oex, t, s, simplify=False) + deri = [r_] + d1 = False + try: + d1 = -diff(deri[-1], s) + except ValueError: + d1 = False + if r_.has(LaplaceTransform): + for k in range(N-1): + deri.append((-1)**(k+1)*Derivative(r_, s, k+1)) + else: + if d1: + deri.append(d1) + for k in range(N-2): + deri.append(-diff(deri[-1], s)) + if d1: + r = Add(*[pc[N-n-1]*deri[n] for n in range(N)]) + return (r, p_, c_) + return None + + +def _laplace_expand(f, t, s, doit=True, **hints): + """ + This function tries to expand its argument with successively stronger + methods: first it will expand on the top level, then it will expand any + multiplications in depth, then it will try all avilable expansion methods, + and finally it will try to expand trigonometric functions. + + If it can expand, it will then compute the Laplace transform of the + expanded term. + """ + + if f.is_Add: + return None + r = expand(f, deep=False) + if r.is_Add: + return _laplace_transform(r, t, s, simplify=False) + r = expand_mul(f) + if r.is_Add: + return _laplace_transform(r, t, s, simplify=False) + r = expand(f) + if r.is_Add: + return _laplace_transform(r, t, s, simplify=False) + if r != f: + return _laplace_transform(r, t, s, simplify=False) + r = expand(expand_trig(f)) + if r.is_Add: + return _laplace_transform(r, t, s, simplify=False) + return None + + +def _laplace_apply_prog_rules(f, t, s): + """ + This function applies all program rules and returns the result if one + of them gives a result. + """ + + prog_rules = [_laplace_rule_heaviside, _laplace_rule_delta, + _laplace_rule_timescale, _laplace_rule_exp, + _laplace_rule_trig, + _laplace_rule_diff, _laplace_rule_sdiff] + + for p_rule in prog_rules: + if (L := p_rule(f, t, s)) is not None: + return L + return None + + +def _laplace_apply_simple_rules(f, t, s): + """ + This function applies all simple rules and returns the result if one + of them gives a result. + """ + simple_rules, t_, s_ = _laplace_build_rules() + prep_old = '' + prep_f = '' + for t_dom, s_dom, check, plane, prep in simple_rules: + if prep_old != prep: + prep_f = prep(f.subs({t: t_})) + prep_old = prep + ma = prep_f.match(t_dom) + if ma: + try: + c = check.xreplace(ma) + except TypeError: + # This may happen if the time function has imaginary + # numbers in it. Then we give up. + continue + if c == S.true: + debug('_laplace_apply_simple_rules match:') + debugf(' f: %s', (f,)) + debugf(' rule: %s o---o %s', (t_dom, s_dom)) + debugf(' match: %s', (ma, )) + return (s_dom.xreplace(ma).subs({s_: s}), + plane.xreplace(ma), S.true) + return None + + +def _laplace_transform(fn, t_, s_, simplify=True): + """ + Front-end function of the Laplace transform. It tries to apply all known + rules recursively, and if everything else fails, it tries to integrate. + """ + debugf('[LT _l_t] (%s, %s, %s)', (fn, t_, s_)) + + terms = Add.make_args(fn) + terms_s = [] + planes = [] + conditions = [] + for ff in terms: + k, ft = ff.as_independent(t_, as_Add=False) + if (r := _laplace_apply_simple_rules(ft, t_, s_)) is not None: + pass + elif (r := _laplace_apply_prog_rules(ft, t_, s_)) is not None: + pass + elif (r := _laplace_expand(ft, t_, s_)) is not None: + pass + elif any(undef.has(t_) for undef in ft.atoms(AppliedUndef)): + # If there are undefined functions f(t) then integration is + # unlikely to do anything useful so we skip it and given an + # unevaluated LaplaceTransform. + r = (LaplaceTransform(ft, t_, s_), S.NegativeInfinity, True) + elif (r := _laplace_transform_integration( + ft, t_, s_, simplify=simplify)) is not None: + pass + else: + r = (LaplaceTransform(ft, t_, s_), S.NegativeInfinity, True) + (ri_, pi_, ci_) = r + terms_s.append(k*ri_) + planes.append(pi_) + conditions.append(ci_) + + result = Add(*terms_s) + if simplify: + result = result.simplify(doit=False) + plane = Max(*planes) + condition = And(*conditions) + + return result, plane, condition + + +class LaplaceTransform(IntegralTransform): + """ + Class representing unevaluated Laplace transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute Laplace transforms, see the :func:`laplace_transform` + docstring. + + If this is called with ``.doit()``, it returns the Laplace transform as an + expression. If it is called with ``.doit(noconds=False)``, it returns a + tuple containing the same expression, a convergence plane, and conditions. + """ + + _name = 'Laplace' + + def _compute_transform(self, f, t, s, **hints): + _simplify = hints.get('simplify', False) + LT = _laplace_transform_integration(f, t, s, simplify=_simplify) + return LT + + def _as_integral(self, f, t, s): + return Integral(f*exp(-s*t), (t, S.Zero, S.Infinity)) + + def _collapse_extra(self, extra): + conds = [] + planes = [] + for plane, cond in extra: + conds.append(cond) + planes.append(plane) + cond = And(*conds) + plane = Max(*planes) + if cond == S.false: + raise IntegralTransformError( + 'Laplace', None, 'No combined convergence.') + return plane, cond + + def doit(self, **hints): + """ + Try to evaluate the transform in closed form. + + Explanation + =========== + + Standard hints are the following: + - ``noconds``: if True, do not return convergence conditions. The + default setting is `True`. + - ``simplify``: if True, it simplifies the final result. The + default setting is `False`. + """ + _noconds = hints.get('noconds', True) + _simplify = hints.get('simplify', False) + + debugf('[LT doit] (%s, %s, %s)', (self.function, + self.function_variable, + self.transform_variable)) + + t_ = self.function_variable + s_ = self.transform_variable + fn = self.function + + r = _laplace_transform(fn, t_, s_, simplify=_simplify) + + if _noconds: + return r[0] + else: + return r + + +def laplace_transform(f, t, s, legacy_matrix=True, **hints): + r""" + Compute the Laplace Transform `F(s)` of `f(t)`, + + .. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t. + + Explanation + =========== + + For all sensible functions, this converges absolutely in a + half-plane + + .. math :: a < \operatorname{Re}(s) + + This function returns ``(F, a, cond)`` where ``F`` is the Laplace + transform of ``f``, `a` is the half-plane of convergence, and `cond` are + auxiliary convergence conditions. + + The implementation is rule-based, and if you are interested in which + rules are applied, and whether integration is attempted, you can switch + debug information on by setting ``sympy.SYMPY_DEBUG=True``. The numbers + of the rules in the debug information (and the code) refer to Bateman's + Tables of Integral Transforms [1]. + + The lower bound is `0-`, meaning that this bound should be approached + from the lower side. This is only necessary if distributions are involved. + At present, it is only done if `f(t)` contains ``DiracDelta``, in which + case the Laplace transform is computed implicitly as + + .. math :: + F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} + f(t) \mathrm{d}t + + by applying rules. + + If the Laplace transform cannot be fully computed in closed form, this + function returns expressions containing unevaluated + :class:`LaplaceTransform` objects. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. If + ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also + not the plane ``a``). + + .. deprecated:: 1.9 + Legacy behavior for matrices where ``laplace_transform`` with + ``noconds=False`` (the default) returns a Matrix whose elements are + tuples. The behavior of ``laplace_transform`` for matrices will change + in a future release of SymPy to return a tuple of the transformed + Matrix and the convergence conditions for the matrix as a whole. Use + ``legacy_matrix=False`` to enable the new behavior. + + Examples + ======== + + >>> from sympy import DiracDelta, exp, laplace_transform + >>> from sympy.abc import t, s, a + >>> laplace_transform(t**4, t, s) + (24/s**5, 0, True) + >>> laplace_transform(t**a, t, s) + (gamma(a + 1)/(s*s**a), 0, re(a) > -1) + >>> laplace_transform(DiracDelta(t)-a*exp(-a*t), t, s, simplify=True) + (s/(a + s), -re(a), True) + + References + ========== + + .. [1] Erdelyi, A. (ed.), Tables of Integral Transforms, Volume 1, + Bateman Manuscript Prooject, McGraw-Hill (1954), available: + https://resolver.caltech.edu/CaltechAUTHORS:20140123-101456353 + + See Also + ======== + + inverse_laplace_transform, mellin_transform, fourier_transform + hankel_transform, inverse_hankel_transform + + """ + + _noconds = hints.get('noconds', False) + _simplify = hints.get('simplify', False) + + if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'): + + conds = not hints.get('noconds', False) + + if conds and legacy_matrix: + adt = 'deprecated-laplace-transform-matrix' + sympy_deprecation_warning( + """ +Calling laplace_transform() on a Matrix with noconds=False (the default) is +deprecated. Either noconds=True or use legacy_matrix=False to get the new +behavior. + """, + deprecated_since_version='1.9', + active_deprecations_target=adt, + ) + # Temporarily disable the deprecation warning for non-Expr objects + # in Matrix + with ignore_warnings(SymPyDeprecationWarning): + return f.applyfunc( + lambda fij: laplace_transform(fij, t, s, **hints)) + else: + elements_trans = [laplace_transform( + fij, t, s, **hints) for fij in f] + if conds: + elements, avals, conditions = zip(*elements_trans) + f_laplace = type(f)(*f.shape, elements) + return f_laplace, Max(*avals), And(*conditions) + else: + return type(f)(*f.shape, elements_trans) + + LT = LaplaceTransform(f, t, s).doit(noconds=False, simplify=_simplify) + + if not _noconds: + return LT + else: + return LT[0] + + +def _inverse_laplace_transform_integration(F, s, t_, plane, simplify=True): + """ The backend function for inverse Laplace transforms. """ + from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp + from sympy.integrals.transforms import inverse_mellin_transform + + # There are two strategies we can try: + # 1) Use inverse mellin transform, related by a simple change of variables. + # 2) Use the inversion integral. + + t = Dummy('t', real=True) + + def pw_simp(*args): + """ Simplify a piecewise expression from hyperexpand. """ + # XXX we break modularity here! + if len(args) != 3: + return Piecewise(*args) + arg = args[2].args[0].argument + coeff, exponent = _get_coeff_exp(arg, t) + e1 = args[0].args[0] + e2 = args[1].args[0] + return ( + Heaviside(1/Abs(coeff) - t**exponent)*e1 + + Heaviside(t**exponent - 1/Abs(coeff))*e2) + + if F.is_rational_function(s): + F = F.apart(s) + + if F.is_Add: + f = Add( + *[_inverse_laplace_transform_integration(X, s, t, plane, simplify) + for X in F.args]) + return _simplify(f.subs(t, t_), simplify), True + + try: + f, cond = inverse_mellin_transform(F, s, exp(-t), (None, S.Infinity), + needeval=True, noconds=False) + except IntegralTransformError: + f = None + if f is None: + f = meijerint_inversion(F, s, t) + if f is None: + return None + if f.is_Piecewise: + f, cond = f.args[0] + if f.has(Integral): + return None + else: + cond = S.true + f = f.replace(Piecewise, pw_simp) + + if f.is_Piecewise: + # many of the functions called below can't work with piecewise + # (b/c it has a bool in args) + return f.subs(t, t_), cond + + u = Dummy('u') + + def simp_heaviside(arg, H0=S.Half): + a = arg.subs(exp(-t), u) + if a.has(t): + return Heaviside(arg, H0) + from sympy.solvers.inequalities import _solve_inequality + rel = _solve_inequality(a > 0, u) + if rel.lts == u: + k = log(rel.gts) + return Heaviside(t + k, H0) + else: + k = log(rel.lts) + return Heaviside(-(t + k), H0) + + f = f.replace(Heaviside, simp_heaviside) + + def simp_exp(arg): + return expand_complex(exp(arg)) + + f = f.replace(exp, simp_exp) + + # TODO it would be nice to fix cosh and sinh ... simplify messes these + # exponentials up + + return _simplify(f.subs(t, t_), simplify), cond + + +def _complete_the_square_in_denom(f, s): + from sympy.simplify.radsimp import fraction + [n, d] = fraction(f) + if d.is_polynomial(s): + cf = d.as_poly(s).all_coeffs() + if len(cf) == 3: + a, b, c = cf + d = a*((s+b/(2*a))**2+c/a-(b/(2*a))**2) + return n/d + + +@cacheit +def _inverse_laplace_build_rules(): + """ + This is an internal helper function that returns the table of inverse + Laplace transform rules in terms of the time variable `t` and the + frequency variable `s`. It is used by `_inverse_laplace_apply_rules`. + """ + s = Dummy('s') + t = Dummy('t') + a = Wild('a', exclude=[s]) + b = Wild('b', exclude=[s]) + c = Wild('c', exclude=[s]) + + debug('_inverse_laplace_build_rules is building rules') + + def _frac(f, s): + try: + return f.factor(s) + except PolynomialError: + return f + + def same(f): return f + # This list is sorted according to the prep function needed. + _ILT_rules = [ + (a/s, a, S.true, same, 1), + (b*(s+a)**(-c), t**(c-1)*exp(-a*t)/gamma(c), c > 0, same, 1), + (1/(s**2+a**2)**2, (sin(a*t) - a*t*cos(a*t))/(2*a**3), + S.true, same, 1), + # The next two rules must be there in that order. For the second + # one, the condition would be a != 0 or, respectively, to take the + # limit a -> 0 after the transform if a == 0. It is much simpler if + # the case a == 0 has its own rule. + (1/(s**b), t**(b - 1)/gamma(b), S.true, same, 1), + (1/(s*(s+a)**b), lowergamma(b, a*t)/(a**b*gamma(b)), + S.true, same, 1) + ] + return _ILT_rules, s, t + + +def _inverse_laplace_apply_simple_rules(f, s, t): + """ + Helper function for the class InverseLaplaceTransform. + """ + if f == 1: + debug('_inverse_laplace_apply_simple_rules match:') + debugf(' f: %s', (1,)) + debugf(' rule: 1 o---o DiracDelta(%s)', (t,)) + return DiracDelta(t), S.true + + _ILT_rules, s_, t_ = _inverse_laplace_build_rules() + _prep = '' + fsubs = f.subs({s: s_}) + + for s_dom, t_dom, check, prep, fac in _ILT_rules: + if _prep != (prep, fac): + _F = prep(fsubs*fac) + _prep = (prep, fac) + ma = _F.match(s_dom) + if ma: + try: + c = check.xreplace(ma) + except TypeError: + continue + if c == S.true: + debug('_inverse_laplace_apply_simple_rules match:') + debugf(' f: %s', (f,)) + debugf(' rule: %s o---o %s', (s_dom, t_dom)) + debugf(' ma: %s', (ma,)) + return Heaviside(t)*t_dom.xreplace(ma).subs({t_: t}), S.true + + return None + + +def _inverse_laplace_time_shift(F, s, t, plane): + """ + Helper function for the class InverseLaplaceTransform. + """ + a = Wild('a', exclude=[s]) + g = Wild('g') + + if not F.has(s): + return F*DiracDelta(t), S.true + ma1 = F.match(exp(a*s)) + if ma1: + if ma1[a].is_negative: + debug('_inverse_laplace_time_shift match:') + debugf(' f: %s', (F,)) + debug(' rule: exp(-a*s) o---o DiracDelta(t-a)') + debugf(' ma: %s', (ma1,)) + return DiracDelta(t+ma1[a]), S.true + else: + debug('_inverse_laplace_time_shift match: negative time shift') + return InverseLaplaceTransform(F, s, t, plane), S.true + + ma1 = F.match(exp(a*s)*g) + if ma1: + if ma1[a].is_negative: + debug('_inverse_laplace_time_shift match:') + debugf(' f: %s', (F,)) + debug(' rule: exp(-a*s)*F(s) o---o Heaviside(t-a)*f(t-a)') + debugf(' ma: %s', (ma1,)) + return _inverse_laplace_transform(ma1[g], s, t+ma1[a], plane) + else: + debug('_inverse_laplace_time_shift match: negative time shift') + return InverseLaplaceTransform(F, s, t, plane), S.true + return None + + +def _inverse_laplace_time_diff(F, s, t, plane): + """ + Helper function for the class InverseLaplaceTransform. + """ + n = Wild('n', exclude=[s]) + g = Wild('g') + + ma1 = F.match(s**n*g) + if ma1 and ma1[n].is_integer and ma1[n].is_positive: + debug('_inverse_laplace_time_diff match:') + debugf(' f: %s', (F,)) + debug(' rule: s**n*F(s) o---o diff(f(t), t, n)') + debugf(' ma: %s', (ma1,)) + r, c = _inverse_laplace_transform(ma1[g], s, t, plane) + r = r.replace(Heaviside(t), 1) + if r.has(InverseLaplaceTransform): + return diff(r, t, ma1[n]), c + else: + return Heaviside(t)*diff(r, t, ma1[n]), c + return None + + +def _inverse_laplace_apply_prog_rules(F, s, t, plane): + """ + Helper function for the class InverseLaplaceTransform. + """ + prog_rules = [_inverse_laplace_time_shift, + _inverse_laplace_time_diff] + + for p_rule in prog_rules: + if (r := p_rule(F, s, t, plane)) is not None: + return r + return None + + +def _inverse_laplace_expand(fn, s, t, plane): + """ + Helper function for the class InverseLaplaceTransform. + """ + if fn.is_Add: + return None + r = expand(fn, deep=False) + if r.is_Add: + return _inverse_laplace_transform(r, s, t, plane) + r = expand_mul(fn) + if r.is_Add: + return _inverse_laplace_transform(r, s, t, plane) + r = expand(fn) + if r.is_Add: + return _inverse_laplace_transform(r, s, t, plane) + if fn.is_rational_function(s): + r = fn.apart(s).doit() + if r.is_Add: + return _inverse_laplace_transform(r, s, t, plane) + return None + + +def _inverse_laplace_rational(fn, s, t, plane, simplify): + """ + Helper function for the class InverseLaplaceTransform. + """ + debugf('[ILT _i_l_r] (%s, %s, %s)', (fn, s, t)) + x_ = symbols('x_') + f = fn.apart(s) + terms = Add.make_args(f) + terms_t = [] + conditions = [S.true] + for term in terms: + [n, d] = term.as_numer_denom() + dc = d.as_poly(s).all_coeffs() + dc_lead = dc[0] + dc = [x/dc_lead for x in dc] + nc = [x/dc_lead for x in n.as_poly(s).all_coeffs()] + if len(dc) == 1: + r = nc[0]*DiracDelta(t) + terms_t.append(r) + elif len(dc) == 2: + r = nc[0]*exp(-dc[1]*t) + terms_t.append(Heaviside(t)*r) + elif len(dc) == 3: + a = dc[1]/2 + b = (dc[2]-a**2).factor() + if len(nc) == 1: + nc = [S.Zero] + nc + l, m = tuple(nc) + if b == 0: + r = (m*t+l*(1-a*t))*exp(-a*t) + else: + hyp = False + if b.is_negative: + b = -b + hyp = True + b2 = list(roots(x_**2-b, x_).keys())[0] + bs = sqrt(b).simplify() + if hyp: + r = ( + l*exp(-a*t)*cosh(b2*t) + (m-a*l) / + bs*exp(-a*t)*sinh(bs*t)) + else: + r = l*exp(-a*t)*cos(b2*t) + (m-a*l)/bs*exp(-a*t)*sin(bs*t) + terms_t.append(Heaviside(t)*r) + else: + ft, cond = _inverse_laplace_transform( + fn, s, t, plane, simplify=True, dorational=False) + terms_t.append(ft) + conditions.append(cond) + + result = Add(*terms_t) + if simplify: + result = result.simplify(doit=False) + debugf('[ILT _i_l_r] returns %s', (result,)) + return result, And(*conditions) + + +def _inverse_laplace_transform( + fn, s_, t_, plane, simplify=True, dorational=True): + """ + Front-end function of the inverse Laplace transform. It tries to apply all + known rules recursively. If everything else fails, it tries to integrate. + """ + terms = Add.make_args(fn) + terms_t = [] + conditions = [] + + debugf('[ILT _i_l_t] (%s, %s, %s)', (fn, s_, t_)) + + for term in terms: + k, f = term.as_independent(s_, as_Add=False) + if ( + dorational and term.is_rational_function(s_) and + ( + r := _inverse_laplace_rational( + f, s_, t_, plane, simplify)) is not None): + pass + elif (r := _inverse_laplace_apply_simple_rules(f, s_, t_)) is not None: + pass + elif (r := _inverse_laplace_expand(f, s_, t_, plane)) is not None: + pass + elif ( + (r := _inverse_laplace_apply_prog_rules(f, s_, t_, plane)) + is not None): + pass + elif any(undef.has(s_) for undef in f.atoms(AppliedUndef)): + # If there are undefined functions f(t) then integration is + # unlikely to do anything useful so we skip it and given an + # unevaluated LaplaceTransform. + r = (InverseLaplaceTransform(f, s_, t_, plane), S.true) + elif ( + r := _inverse_laplace_transform_integration( + f, s_, t_, plane, simplify=simplify)) is not None: + pass + else: + r = (InverseLaplaceTransform(f, s_, t_, plane), S.true) + (ri_, ci_) = r + terms_t.append(k*ri_) + conditions.append(ci_) + + result = Add(*terms_t) + if simplify: + result = result.simplify(doit=False) + condition = And(*conditions) + + return result, condition + + +class InverseLaplaceTransform(IntegralTransform): + """ + Class representing unevaluated inverse Laplace transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse Laplace transforms, see the + :func:`inverse_laplace_transform` docstring. + """ + + _name = 'Inverse Laplace' + _none_sentinel = Dummy('None') + _c = Dummy('c') + + def __new__(cls, F, s, x, plane, **opts): + if plane is None: + plane = InverseLaplaceTransform._none_sentinel + return IntegralTransform.__new__(cls, F, s, x, plane, **opts) + + @property + def fundamental_plane(self): + plane = self.args[3] + if plane is InverseLaplaceTransform._none_sentinel: + plane = None + return plane + + def _compute_transform(self, F, s, t, **hints): + return _inverse_laplace_transform_integration( + F, s, t, self.fundamental_plane, **hints) + + def _as_integral(self, F, s, t): + c = self.__class__._c + return ( + Integral(exp(s*t)*F, (s, c - S.ImaginaryUnit*S.Infinity, + c + S.ImaginaryUnit*S.Infinity)) / + (2*S.Pi*S.ImaginaryUnit)) + + def doit(self, **hints): + """ + Try to evaluate the transform in closed form. + + Explanation + =========== + + Standard hints are the following: + - ``noconds``: if True, do not return convergence conditions. The + default setting is `True`. + - ``simplify``: if True, it simplifies the final result. The + default setting is `False`. + """ + _noconds = hints.get('noconds', True) + _simplify = hints.get('simplify', False) + + debugf('[ILT doit] (%s, %s, %s)', (self.function, + self.function_variable, + self.transform_variable)) + + s_ = self.function_variable + t_ = self.transform_variable + fn = self.function + plane = self.fundamental_plane + + r = _inverse_laplace_transform(fn, s_, t_, plane, simplify=_simplify) + + if _noconds: + return r[0] + else: + return r + + +def inverse_laplace_transform(F, s, t, plane=None, **hints): + r""" + Compute the inverse Laplace transform of `F(s)`, defined as + + .. math :: + f(t) = \frac{1}{2\pi i} \int_{c-i\infty}^{c+i\infty} e^{st} + F(s) \mathrm{d}s, + + for `c` so large that `F(s)` has no singularites in the + half-plane `\operatorname{Re}(s) > c-\epsilon`. + + Explanation + =========== + + The plane can be specified by + argument ``plane``, but will be inferred if passed as None. + + Under certain regularity conditions, this recovers `f(t)` from its + Laplace Transform `F(s)`, for non-negative `t`, and vice + versa. + + If the integral cannot be computed in closed form, this function returns + an unevaluated :class:`InverseLaplaceTransform` object. + + Note that this function will always assume `t` to be real, + regardless of the SymPy assumption on `t`. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + + Examples + ======== + + >>> from sympy import inverse_laplace_transform, exp, Symbol + >>> from sympy.abc import s, t + >>> a = Symbol('a', positive=True) + >>> inverse_laplace_transform(exp(-a*s)/s, s, t) + Heaviside(-a + t) + + See Also + ======== + + laplace_transform + hankel_transform, inverse_hankel_transform + """ + if isinstance(F, MatrixBase) and hasattr(F, 'applyfunc'): + return F.applyfunc( + lambda Fij: inverse_laplace_transform(Fij, s, t, plane, **hints)) + return InverseLaplaceTransform(F, s, t, plane).doit(**hints) + + +def _fast_inverse_laplace(e, s, t): + """Fast inverse Laplace transform of rational function including RootSum""" + a, b, n = symbols('a, b, n', cls=Wild, exclude=[s]) + + def _ilt(e): + if not e.has(s): + return e + elif e.is_Add: + return _ilt_add(e) + elif e.is_Mul: + return _ilt_mul(e) + elif e.is_Pow: + return _ilt_pow(e) + elif isinstance(e, RootSum): + return _ilt_rootsum(e) + else: + raise NotImplementedError + + def _ilt_add(e): + return e.func(*map(_ilt, e.args)) + + def _ilt_mul(e): + coeff, expr = e.as_independent(s) + if expr.is_Mul: + raise NotImplementedError + return coeff * _ilt(expr) + + def _ilt_pow(e): + match = e.match((a*s + b)**n) + if match is not None: + nm, am, bm = match[n], match[a], match[b] + if nm.is_Integer and nm < 0: + return t**(-nm-1)*exp(-(bm/am)*t)/(am**-nm*gamma(-nm)) + if nm == 1: + return exp(-(bm/am)*t) / am + raise NotImplementedError + + def _ilt_rootsum(e): + expr = e.fun.expr + [variable] = e.fun.variables + return RootSum(e.poly, Lambda(variable, together(_ilt(expr)))) + + return _ilt(e) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/manualintegrate.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/manualintegrate.py new file mode 100644 index 0000000000000000000000000000000000000000..b555352776b4eeb6fb85fb05520225a16c280a71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/manualintegrate.py @@ -0,0 +1,2171 @@ +"""Integration method that emulates by-hand techniques. + +This module also provides functionality to get the steps used to evaluate a +particular integral, in the ``integral_steps`` function. This will return +nested ``Rule`` s representing the integration rules used. + +Each ``Rule`` class represents a (maybe parametrized) integration rule, e.g. +``SinRule`` for integrating ``sin(x)`` and ``ReciprocalSqrtQuadraticRule`` +for integrating ``1/sqrt(a+b*x+c*x**2)``. The ``eval`` method returns the +integration result. + +The ``manualintegrate`` function computes the integral by calling ``eval`` +on the rule returned by ``integral_steps``. + +The integrator can be extended with new heuristics and evaluation +techniques. To do so, extend the ``Rule`` class, implement ``eval`` method, +then write a function that accepts an ``IntegralInfo`` object and returns +either a ``Rule`` instance or ``None``. If the new technique requires a new +match, add the key and call to the antiderivative function to integral_steps. +To enable simple substitutions, add the match to find_substitutions. + +""" + +from __future__ import annotations +from typing import NamedTuple, Type, Callable, Sequence +from abc import ABC, abstractmethod +from dataclasses import dataclass +from collections import defaultdict +from collections.abc import Mapping + +from sympy.core.add import Add +from sympy.core.cache import cacheit +from sympy.core.containers import Dict +from sympy.core.expr import Expr +from sympy.core.function import Derivative +from sympy.core.logic import fuzzy_not +from sympy.core.mul import Mul +from sympy.core.numbers import Integer, Number, E +from sympy.core.power import Pow +from sympy.core.relational import Eq, Ne, Boolean +from sympy.core.singleton import S +from sympy.core.symbol import Dummy, Symbol, Wild +from sympy.functions.elementary.complexes import Abs +from sympy.functions.elementary.exponential import exp, log +from sympy.functions.elementary.hyperbolic import (HyperbolicFunction, csch, + cosh, coth, sech, sinh, tanh, asinh) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (TrigonometricFunction, + cos, sin, tan, cot, csc, sec, acos, asin, atan, acot, acsc, asec) +from sympy.functions.special.delta_functions import Heaviside, DiracDelta +from sympy.functions.special.error_functions import (erf, erfi, fresnelc, + fresnels, Ci, Chi, Si, Shi, Ei, li) +from sympy.functions.special.gamma_functions import uppergamma +from sympy.functions.special.elliptic_integrals import elliptic_e, elliptic_f +from sympy.functions.special.polynomials import (chebyshevt, chebyshevu, + legendre, hermite, laguerre, assoc_laguerre, gegenbauer, jacobi, + OrthogonalPolynomial) +from sympy.functions.special.zeta_functions import polylog +from .integrals import Integral +from sympy.logic.boolalg import And +from sympy.ntheory.factor_ import primefactors +from sympy.polys.polytools import degree, lcm_list, gcd_list, Poly +from sympy.simplify.radsimp import fraction +from sympy.simplify.simplify import simplify +from sympy.solvers.solvers import solve +from sympy.strategies.core import switch, do_one, null_safe, condition +from sympy.utilities.iterables import iterable +from sympy.utilities.misc import debug + + +@dataclass +class Rule(ABC): + integrand: Expr + variable: Symbol + + @abstractmethod + def eval(self) -> Expr: + pass + + @abstractmethod + def contains_dont_know(self) -> bool: + pass + + +@dataclass +class AtomicRule(Rule, ABC): + """A simple rule that does not depend on other rules""" + def contains_dont_know(self) -> bool: + return False + + +@dataclass +class ConstantRule(AtomicRule): + """integrate(a, x) -> a*x""" + def eval(self) -> Expr: + return self.integrand * self.variable + + +@dataclass +class ConstantTimesRule(Rule): + """integrate(a*f(x), x) -> a*integrate(f(x), x)""" + constant: Expr + other: Expr + substep: Rule + + def eval(self) -> Expr: + return self.constant * self.substep.eval() + + def contains_dont_know(self) -> bool: + return self.substep.contains_dont_know() + + +@dataclass +class PowerRule(AtomicRule): + """integrate(x**a, x)""" + base: Expr + exp: Expr + + def eval(self) -> Expr: + return Piecewise( + ((self.base**(self.exp + 1))/(self.exp + 1), Ne(self.exp, -1)), + (log(self.base), True), + ) + + +@dataclass +class NestedPowRule(AtomicRule): + """integrate((x**a)**b, x)""" + base: Expr + exp: Expr + + def eval(self) -> Expr: + m = self.base * self.integrand + return Piecewise((m / (self.exp + 1), Ne(self.exp, -1)), + (m * log(self.base), True)) + + +@dataclass +class AddRule(Rule): + """integrate(f(x) + g(x), x) -> integrate(f(x), x) + integrate(g(x), x)""" + substeps: list[Rule] + + def eval(self) -> Expr: + return Add(*(substep.eval() for substep in self.substeps)) + + def contains_dont_know(self) -> bool: + return any(substep.contains_dont_know() for substep in self.substeps) + + +@dataclass +class URule(Rule): + """integrate(f(g(x))*g'(x), x) -> integrate(f(u), u), u = g(x)""" + u_var: Symbol + u_func: Expr + substep: Rule + + def eval(self) -> Expr: + result = self.substep.eval() + if self.u_func.is_Pow: + base, exp_ = self.u_func.as_base_exp() + if exp_ == -1: + # avoid needless -log(1/x) from substitution + result = result.subs(log(self.u_var), -log(base)) + return result.subs(self.u_var, self.u_func) + + def contains_dont_know(self) -> bool: + return self.substep.contains_dont_know() + + +@dataclass +class PartsRule(Rule): + """integrate(u(x)*v'(x), x) -> u(x)*v(x) - integrate(u'(x)*v(x), x)""" + u: Symbol + dv: Expr + v_step: Rule + second_step: Rule | None # None when is a substep of CyclicPartsRule + + def eval(self) -> Expr: + assert self.second_step is not None + v = self.v_step.eval() + return self.u * v - self.second_step.eval() + + def contains_dont_know(self) -> bool: + return self.v_step.contains_dont_know() or ( + self.second_step is not None and self.second_step.contains_dont_know()) + + +@dataclass +class CyclicPartsRule(Rule): + """Apply PartsRule multiple times to integrate exp(x)*sin(x)""" + parts_rules: list[PartsRule] + coefficient: Expr + + def eval(self) -> Expr: + result = [] + sign = 1 + for rule in self.parts_rules: + result.append(sign * rule.u * rule.v_step.eval()) + sign *= -1 + return Add(*result) / (1 - self.coefficient) + + def contains_dont_know(self) -> bool: + return any(substep.contains_dont_know() for substep in self.parts_rules) + + +@dataclass +class TrigRule(AtomicRule, ABC): + pass + + +@dataclass +class SinRule(TrigRule): + """integrate(sin(x), x) -> -cos(x)""" + def eval(self) -> Expr: + return -cos(self.variable) + + +@dataclass +class CosRule(TrigRule): + """integrate(cos(x), x) -> sin(x)""" + def eval(self) -> Expr: + return sin(self.variable) + + +@dataclass +class SecTanRule(TrigRule): + """integrate(sec(x)*tan(x), x) -> sec(x)""" + def eval(self) -> Expr: + return sec(self.variable) + + +@dataclass +class CscCotRule(TrigRule): + """integrate(csc(x)*cot(x), x) -> -csc(x)""" + def eval(self) -> Expr: + return -csc(self.variable) + + +@dataclass +class Sec2Rule(TrigRule): + """integrate(sec(x)**2, x) -> tan(x)""" + def eval(self) -> Expr: + return tan(self.variable) + + +@dataclass +class Csc2Rule(TrigRule): + """integrate(csc(x)**2, x) -> -cot(x)""" + def eval(self) -> Expr: + return -cot(self.variable) + + +@dataclass +class HyperbolicRule(AtomicRule, ABC): + pass + + +@dataclass +class SinhRule(HyperbolicRule): + """integrate(sinh(x), x) -> cosh(x)""" + def eval(self) -> Expr: + return cosh(self.variable) + + +@dataclass +class CoshRule(HyperbolicRule): + """integrate(cosh(x), x) -> sinh(x)""" + def eval(self): + return sinh(self.variable) + + +@dataclass +class ExpRule(AtomicRule): + """integrate(a**x, x) -> a**x/ln(a)""" + base: Expr + exp: Expr + + def eval(self) -> Expr: + return self.integrand / log(self.base) + + +@dataclass +class ReciprocalRule(AtomicRule): + """integrate(1/x, x) -> ln(x)""" + base: Expr + + def eval(self) -> Expr: + return log(self.base) + + +@dataclass +class ArcsinRule(AtomicRule): + """integrate(1/sqrt(1-x**2), x) -> asin(x)""" + def eval(self) -> Expr: + return asin(self.variable) + + +@dataclass +class ArcsinhRule(AtomicRule): + """integrate(1/sqrt(1+x**2), x) -> asin(x)""" + def eval(self) -> Expr: + return asinh(self.variable) + + +@dataclass +class ReciprocalSqrtQuadraticRule(AtomicRule): + """integrate(1/sqrt(a+b*x+c*x**2), x) -> log(2*sqrt(c)*sqrt(a+b*x+c*x**2)+b+2*c*x)/sqrt(c)""" + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + a, b, c, x = self.a, self.b, self.c, self.variable + return log(2*sqrt(c)*sqrt(a+b*x+c*x**2)+b+2*c*x)/sqrt(c) + + +@dataclass +class SqrtQuadraticDenomRule(AtomicRule): + """integrate(poly(x)/sqrt(a+b*x+c*x**2), x)""" + a: Expr + b: Expr + c: Expr + coeffs: list[Expr] + + def eval(self) -> Expr: + a, b, c, coeffs, x = self.a, self.b, self.c, self.coeffs.copy(), self.variable + # Integrate poly/sqrt(a+b*x+c*x**2) using recursion. + # coeffs are coefficients of the polynomial. + # Let I_n = x**n/sqrt(a+b*x+c*x**2), then + # I_n = A * x**(n-1)*sqrt(a+b*x+c*x**2) - B * I_{n-1} - C * I_{n-2} + # where A = 1/(n*c), B = (2*n-1)*b/(2*n*c), C = (n-1)*a/(n*c) + # See https://github.com/sympy/sympy/pull/23608 for proof. + result_coeffs = [] + coeffs = coeffs.copy() + for i in range(len(coeffs)-2): + n = len(coeffs)-1-i + coeff = coeffs[i]/(c*n) + result_coeffs.append(coeff) + coeffs[i+1] -= (2*n-1)*b/2*coeff + coeffs[i+2] -= (n-1)*a*coeff + d, e = coeffs[-1], coeffs[-2] + s = sqrt(a+b*x+c*x**2) + constant = d-b*e/(2*c) + if constant == 0: + I0 = 0 + else: + step = inverse_trig_rule(IntegralInfo(1/s, x), degenerate=False) + I0 = constant*step.eval() + return Add(*(result_coeffs[i]*x**(len(coeffs)-2-i) + for i in range(len(result_coeffs))), e/c)*s + I0 + + +@dataclass +class SqrtQuadraticRule(AtomicRule): + """integrate(sqrt(a+b*x+c*x**2), x)""" + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + step = sqrt_quadratic_rule(IntegralInfo(self.integrand, self.variable), degenerate=False) + return step.eval() + + +@dataclass +class AlternativeRule(Rule): + """Multiple ways to do integration.""" + alternatives: list[Rule] + + def eval(self) -> Expr: + return self.alternatives[0].eval() + + def contains_dont_know(self) -> bool: + return any(substep.contains_dont_know() for substep in self.alternatives) + + +@dataclass +class DontKnowRule(Rule): + """Leave the integral as is.""" + def eval(self) -> Expr: + return Integral(self.integrand, self.variable) + + def contains_dont_know(self) -> bool: + return True + + +@dataclass +class DerivativeRule(AtomicRule): + """integrate(f'(x), x) -> f(x)""" + def eval(self) -> Expr: + assert isinstance(self.integrand, Derivative) + variable_count = list(self.integrand.variable_count) + for i, (var, count) in enumerate(variable_count): + if var == self.variable: + variable_count[i] = (var, count - 1) + break + return Derivative(self.integrand.expr, *variable_count) + + +@dataclass +class RewriteRule(Rule): + """Rewrite integrand to another form that is easier to handle.""" + rewritten: Expr + substep: Rule + + def eval(self) -> Expr: + return self.substep.eval() + + def contains_dont_know(self) -> bool: + return self.substep.contains_dont_know() + + +@dataclass +class CompleteSquareRule(RewriteRule): + """Rewrite a+b*x+c*x**2 to a-b**2/(4*c) + c*(x+b/(2*c))**2""" + pass + + +@dataclass +class PiecewiseRule(Rule): + subfunctions: Sequence[tuple[Rule, bool | Boolean]] + + def eval(self) -> Expr: + return Piecewise(*[(substep.eval(), cond) + for substep, cond in self.subfunctions]) + + def contains_dont_know(self) -> bool: + return any(substep.contains_dont_know() for substep, _ in self.subfunctions) + + +@dataclass +class HeavisideRule(Rule): + harg: Expr + ibnd: Expr + substep: Rule + + def eval(self) -> Expr: + # If we are integrating over x and the integrand has the form + # Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol) + # then there needs to be continuity at -b/m == ibnd, + # so we subtract the appropriate term. + result = self.substep.eval() + return Heaviside(self.harg) * (result - result.subs(self.variable, self.ibnd)) + + def contains_dont_know(self) -> bool: + return self.substep.contains_dont_know() + + +@dataclass +class DiracDeltaRule(AtomicRule): + n: Expr + a: Expr + b: Expr + + def eval(self) -> Expr: + n, a, b, x = self.n, self.a, self.b, self.variable + if n == 0: + return Heaviside(a+b*x)/b + return DiracDelta(a+b*x, n-1)/b + + +@dataclass +class TrigSubstitutionRule(Rule): + theta: Expr + func: Expr + rewritten: Expr + substep: Rule + restriction: bool | Boolean + + def eval(self) -> Expr: + theta, func, x = self.theta, self.func, self.variable + func = func.subs(sec(theta), 1/cos(theta)) + func = func.subs(csc(theta), 1/sin(theta)) + func = func.subs(cot(theta), 1/tan(theta)) + + trig_function = list(func.find(TrigonometricFunction)) + assert len(trig_function) == 1 + trig_function = trig_function[0] + relation = solve(x - func, trig_function) + assert len(relation) == 1 + numer, denom = fraction(relation[0]) + + if isinstance(trig_function, sin): + opposite = numer + hypotenuse = denom + adjacent = sqrt(denom**2 - numer**2) + inverse = asin(relation[0]) + elif isinstance(trig_function, cos): + adjacent = numer + hypotenuse = denom + opposite = sqrt(denom**2 - numer**2) + inverse = acos(relation[0]) + else: # tan + opposite = numer + adjacent = denom + hypotenuse = sqrt(denom**2 + numer**2) + inverse = atan(relation[0]) + + substitution = [ + (sin(theta), opposite/hypotenuse), + (cos(theta), adjacent/hypotenuse), + (tan(theta), opposite/adjacent), + (theta, inverse) + ] + return Piecewise( + (self.substep.eval().subs(substitution).trigsimp(), self.restriction) + ) + + def contains_dont_know(self) -> bool: + return self.substep.contains_dont_know() + + +@dataclass +class ArctanRule(AtomicRule): + """integrate(a/(b*x**2+c), x) -> a/b / sqrt(c/b) * atan(x/sqrt(c/b))""" + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + a, b, c, x = self.a, self.b, self.c, self.variable + return a/b / sqrt(c/b) * atan(x/sqrt(c/b)) + + +@dataclass +class OrthogonalPolyRule(AtomicRule, ABC): + n: Expr + + +@dataclass +class JacobiRule(OrthogonalPolyRule): + a: Expr + b: Expr + + def eval(self) -> Expr: + n, a, b, x = self.n, self.a, self.b, self.variable + return Piecewise( + (2*jacobi(n + 1, a - 1, b - 1, x)/(n + a + b), Ne(n + a + b, 0)), + (x, Eq(n, 0)), + ((a + b + 2)*x**2/4 + (a - b)*x/2, Eq(n, 1))) + + +@dataclass +class GegenbauerRule(OrthogonalPolyRule): + a: Expr + + def eval(self) -> Expr: + n, a, x = self.n, self.a, self.variable + return Piecewise( + (gegenbauer(n + 1, a - 1, x)/(2*(a - 1)), Ne(a, 1)), + (chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)), + (S.Zero, True)) + + +@dataclass +class ChebyshevTRule(OrthogonalPolyRule): + def eval(self) -> Expr: + n, x = self.n, self.variable + return Piecewise( + ((chebyshevt(n + 1, x)/(n + 1) - + chebyshevt(n - 1, x)/(n - 1))/2, Ne(Abs(n), 1)), + (x**2/2, True)) + + +@dataclass +class ChebyshevURule(OrthogonalPolyRule): + def eval(self) -> Expr: + n, x = self.n, self.variable + return Piecewise( + (chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)), + (S.Zero, True)) + + +@dataclass +class LegendreRule(OrthogonalPolyRule): + def eval(self) -> Expr: + n, x = self.n, self.variable + return(legendre(n + 1, x) - legendre(n - 1, x))/(2*n + 1) + + +@dataclass +class HermiteRule(OrthogonalPolyRule): + def eval(self) -> Expr: + n, x = self.n, self.variable + return hermite(n + 1, x)/(2*(n + 1)) + + +@dataclass +class LaguerreRule(OrthogonalPolyRule): + def eval(self) -> Expr: + n, x = self.n, self.variable + return laguerre(n, x) - laguerre(n + 1, x) + + +@dataclass +class AssocLaguerreRule(OrthogonalPolyRule): + a: Expr + + def eval(self) -> Expr: + return -assoc_laguerre(self.n + 1, self.a - 1, self.variable) + + +@dataclass +class IRule(AtomicRule, ABC): + a: Expr + b: Expr + + +@dataclass +class CiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return cos(b)*Ci(a*x) - sin(b)*Si(a*x) + + +@dataclass +class ChiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return cosh(b)*Chi(a*x) + sinh(b)*Shi(a*x) + + +@dataclass +class EiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return exp(b)*Ei(a*x) + + +@dataclass +class SiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return sin(b)*Ci(a*x) + cos(b)*Si(a*x) + + +@dataclass +class ShiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return sinh(b)*Chi(a*x) + cosh(b)*Shi(a*x) + + +@dataclass +class LiRule(IRule): + def eval(self) -> Expr: + a, b, x = self.a, self.b, self.variable + return li(a*x + b)/a + + +@dataclass +class ErfRule(AtomicRule): + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + a, b, c, x = self.a, self.b, self.c, self.variable + if a.is_extended_real: + return Piecewise( + (sqrt(S.Pi/(-a))/2 * exp(c - b**2/(4*a)) * + erf((-2*a*x - b)/(2*sqrt(-a))), a < 0), + (sqrt(S.Pi/a)/2 * exp(c - b**2/(4*a)) * + erfi((2*a*x + b)/(2*sqrt(a))), True)) + return sqrt(S.Pi/a)/2 * exp(c - b**2/(4*a)) * \ + erfi((2*a*x + b)/(2*sqrt(a))) + + +@dataclass +class FresnelCRule(AtomicRule): + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + a, b, c, x = self.a, self.b, self.c, self.variable + return sqrt(S.Pi/(2*a)) * ( + cos(b**2/(4*a) - c)*fresnelc((2*a*x + b)/sqrt(2*a*S.Pi)) + + sin(b**2/(4*a) - c)*fresnels((2*a*x + b)/sqrt(2*a*S.Pi))) + + +@dataclass +class FresnelSRule(AtomicRule): + a: Expr + b: Expr + c: Expr + + def eval(self) -> Expr: + a, b, c, x = self.a, self.b, self.c, self.variable + return sqrt(S.Pi/(2*a)) * ( + cos(b**2/(4*a) - c)*fresnels((2*a*x + b)/sqrt(2*a*S.Pi)) - + sin(b**2/(4*a) - c)*fresnelc((2*a*x + b)/sqrt(2*a*S.Pi))) + + +@dataclass +class PolylogRule(AtomicRule): + a: Expr + b: Expr + + def eval(self) -> Expr: + return polylog(self.b + 1, self.a * self.variable) + + +@dataclass +class UpperGammaRule(AtomicRule): + a: Expr + e: Expr + + def eval(self) -> Expr: + a, e, x = self.a, self.e, self.variable + return x**e * (-a*x)**(-e) * uppergamma(e + 1, -a*x)/a + + +@dataclass +class EllipticFRule(AtomicRule): + a: Expr + d: Expr + + def eval(self) -> Expr: + return elliptic_f(self.variable, self.d/self.a)/sqrt(self.a) + + +@dataclass +class EllipticERule(AtomicRule): + a: Expr + d: Expr + + def eval(self) -> Expr: + return elliptic_e(self.variable, self.d/self.a)*sqrt(self.a) + + +class IntegralInfo(NamedTuple): + integrand: Expr + symbol: Symbol + + +def manual_diff(f, symbol): + """Derivative of f in form expected by find_substitutions + + SymPy's derivatives for some trig functions (like cot) are not in a form + that works well with finding substitutions; this replaces the + derivatives for those particular forms with something that works better. + + """ + if f.args: + arg = f.args[0] + if isinstance(f, tan): + return arg.diff(symbol) * sec(arg)**2 + elif isinstance(f, cot): + return -arg.diff(symbol) * csc(arg)**2 + elif isinstance(f, sec): + return arg.diff(symbol) * sec(arg) * tan(arg) + elif isinstance(f, csc): + return -arg.diff(symbol) * csc(arg) * cot(arg) + elif isinstance(f, Add): + return sum([manual_diff(arg, symbol) for arg in f.args]) + elif isinstance(f, Mul): + if len(f.args) == 2 and isinstance(f.args[0], Number): + return f.args[0] * manual_diff(f.args[1], symbol) + return f.diff(symbol) + +def manual_subs(expr, *args): + """ + A wrapper for `expr.subs(*args)` with additional logic for substitution + of invertible functions. + """ + if len(args) == 1: + sequence = args[0] + if isinstance(sequence, (Dict, Mapping)): + sequence = sequence.items() + elif not iterable(sequence): + raise ValueError("Expected an iterable of (old, new) pairs") + elif len(args) == 2: + sequence = [args] + else: + raise ValueError("subs accepts either 1 or 2 arguments") + + new_subs = [] + for old, new in sequence: + if isinstance(old, log): + # If log(x) = y, then exp(a*log(x)) = exp(a*y) + # that is, x**a = exp(a*y). Replace nontrivial powers of x + # before subs turns them into `exp(y)**a`, but + # do not replace x itself yet, to avoid `log(exp(y))`. + x0 = old.args[0] + expr = expr.replace(lambda x: x.is_Pow and x.base == x0, + lambda x: exp(x.exp*new)) + new_subs.append((x0, exp(new))) + + return expr.subs(list(sequence) + new_subs) + +# Method based on that on SIN, described in "Symbolic Integration: The +# Stormy Decade" + +inverse_trig_functions = (atan, asin, acos, acot, acsc, asec) + + +def find_substitutions(integrand, symbol, u_var): + results = [] + + def test_subterm(u, u_diff): + if u_diff == 0: + return False + substituted = integrand / u_diff + debug("substituted: {}, u: {}, u_var: {}".format(substituted, u, u_var)) + substituted = manual_subs(substituted, u, u_var).cancel() + + if substituted.has_free(symbol): + return False + # avoid increasing the degree of a rational function + if integrand.is_rational_function(symbol) and substituted.is_rational_function(u_var): + deg_before = max([degree(t, symbol) for t in integrand.as_numer_denom()]) + deg_after = max([degree(t, u_var) for t in substituted.as_numer_denom()]) + if deg_after > deg_before: + return False + return substituted.as_independent(u_var, as_Add=False) + + def exp_subterms(term: Expr): + linear_coeffs = [] + terms = [] + n = Wild('n', properties=[lambda n: n.is_Integer]) + for exp_ in term.find(exp): + arg = exp_.args[0] + if symbol not in arg.free_symbols: + continue + match = arg.match(n*symbol) + if match: + linear_coeffs.append(match[n]) + else: + terms.append(exp_) + if linear_coeffs: + terms.append(exp(gcd_list(linear_coeffs)*symbol)) + return terms + + def possible_subterms(term): + if isinstance(term, (TrigonometricFunction, HyperbolicFunction, + *inverse_trig_functions, + exp, log, Heaviside)): + return [term.args[0]] + elif isinstance(term, (chebyshevt, chebyshevu, + legendre, hermite, laguerre)): + return [term.args[1]] + elif isinstance(term, (gegenbauer, assoc_laguerre)): + return [term.args[2]] + elif isinstance(term, jacobi): + return [term.args[3]] + elif isinstance(term, Mul): + r = [] + for u in term.args: + r.append(u) + r.extend(possible_subterms(u)) + return r + elif isinstance(term, Pow): + r = [arg for arg in term.args if arg.has(symbol)] + if term.exp.is_Integer: + r.extend([term.base**d for d in primefactors(term.exp) + if 1 < d < abs(term.args[1])]) + if term.base.is_Add: + r.extend([t for t in possible_subterms(term.base) + if t.is_Pow]) + return r + elif isinstance(term, Add): + r = [] + for arg in term.args: + r.append(arg) + r.extend(possible_subterms(arg)) + return r + return [] + + for u in list(dict.fromkeys(possible_subterms(integrand) + exp_subterms(integrand))): + if u == symbol: + continue + u_diff = manual_diff(u, symbol) + new_integrand = test_subterm(u, u_diff) + if new_integrand is not False: + constant, new_integrand = new_integrand + if new_integrand == integrand.subs(symbol, u_var): + continue + substitution = (u, constant, new_integrand) + if substitution not in results: + results.append(substitution) + + return results + +def rewriter(condition, rewrite): + """Strategy that rewrites an integrand.""" + def _rewriter(integral): + integrand, symbol = integral + debug("Integral: {} is rewritten with {} on symbol: {}".format(integrand, rewrite, symbol)) + if condition(*integral): + rewritten = rewrite(*integral) + if rewritten != integrand: + substep = integral_steps(rewritten, symbol) + if not isinstance(substep, DontKnowRule) and substep: + return RewriteRule(integrand, symbol, rewritten, substep) + return _rewriter + +def proxy_rewriter(condition, rewrite): + """Strategy that rewrites an integrand based on some other criteria.""" + def _proxy_rewriter(criteria): + criteria, integral = criteria + integrand, symbol = integral + debug("Integral: {} is rewritten with {} on symbol: {} and criteria: {}".format(integrand, rewrite, symbol, criteria)) + args = criteria + list(integral) + if condition(*args): + rewritten = rewrite(*args) + if rewritten != integrand: + return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol)) + return _proxy_rewriter + +def multiplexer(conditions): + """Apply the rule that matches the condition, else None""" + def multiplexer_rl(expr): + for key, rule in conditions.items(): + if key(expr): + return rule(expr) + return multiplexer_rl + +def alternatives(*rules): + """Strategy that makes an AlternativeRule out of multiple possible results.""" + def _alternatives(integral): + alts = [] + count = 0 + debug("List of Alternative Rules") + for rule in rules: + count = count + 1 + debug("Rule {}: {}".format(count, rule)) + + result = rule(integral) + if (result and not isinstance(result, DontKnowRule) and + result != integral and result not in alts): + alts.append(result) + if len(alts) == 1: + return alts[0] + elif alts: + doable = [rule for rule in alts if not rule.contains_dont_know()] + if doable: + return AlternativeRule(*integral, doable) + else: + return AlternativeRule(*integral, alts) + return _alternatives + +def constant_rule(integral): + return ConstantRule(*integral) + +def power_rule(integral): + integrand, symbol = integral + base, expt = integrand.as_base_exp() + + if symbol not in expt.free_symbols and isinstance(base, Symbol): + if simplify(expt + 1) == 0: + return ReciprocalRule(integrand, symbol, base) + return PowerRule(integrand, symbol, base, expt) + elif symbol not in base.free_symbols and isinstance(expt, Symbol): + rule = ExpRule(integrand, symbol, base, expt) + + if fuzzy_not(log(base).is_zero): + return rule + elif log(base).is_zero: + return ConstantRule(1, symbol) + + return PiecewiseRule(integrand, symbol, [ + (rule, Ne(log(base), 0)), + (ConstantRule(1, symbol), True) + ]) + +def exp_rule(integral): + integrand, symbol = integral + if isinstance(integrand.args[0], Symbol): + return ExpRule(integrand, symbol, E, integrand.args[0]) + + +def orthogonal_poly_rule(integral): + orthogonal_poly_classes = { + jacobi: JacobiRule, + gegenbauer: GegenbauerRule, + chebyshevt: ChebyshevTRule, + chebyshevu: ChebyshevURule, + legendre: LegendreRule, + hermite: HermiteRule, + laguerre: LaguerreRule, + assoc_laguerre: AssocLaguerreRule + } + orthogonal_poly_var_index = { + jacobi: 3, + gegenbauer: 2, + assoc_laguerre: 2 + } + integrand, symbol = integral + for klass in orthogonal_poly_classes: + if isinstance(integrand, klass): + var_index = orthogonal_poly_var_index.get(klass, 1) + if (integrand.args[var_index] is symbol and not + any(v.has(symbol) for v in integrand.args[:var_index])): + return orthogonal_poly_classes[klass](integrand, symbol, *integrand.args[:var_index]) + + +_special_function_patterns: list[tuple[Type, Expr, Callable | None, tuple]] = [] +_wilds = [] +_symbol = Dummy('x') + + +def special_function_rule(integral): + integrand, symbol = integral + if not _special_function_patterns: + a = Wild('a', exclude=[_symbol], properties=[lambda x: not x.is_zero]) + b = Wild('b', exclude=[_symbol]) + c = Wild('c', exclude=[_symbol]) + d = Wild('d', exclude=[_symbol], properties=[lambda x: not x.is_zero]) + e = Wild('e', exclude=[_symbol], properties=[ + lambda x: not (x.is_nonnegative and x.is_integer)]) + _wilds.extend((a, b, c, d, e)) + # patterns consist of a SymPy class, a wildcard expr, an optional + # condition coded as a lambda (when Wild properties are not enough), + # followed by an applicable rule + linear_pattern = a*_symbol + b + quadratic_pattern = a*_symbol**2 + b*_symbol + c + _special_function_patterns.extend(( + (Mul, exp(linear_pattern, evaluate=False)/_symbol, None, EiRule), + (Mul, cos(linear_pattern, evaluate=False)/_symbol, None, CiRule), + (Mul, cosh(linear_pattern, evaluate=False)/_symbol, None, ChiRule), + (Mul, sin(linear_pattern, evaluate=False)/_symbol, None, SiRule), + (Mul, sinh(linear_pattern, evaluate=False)/_symbol, None, ShiRule), + (Pow, 1/log(linear_pattern, evaluate=False), None, LiRule), + (exp, exp(quadratic_pattern, evaluate=False), None, ErfRule), + (sin, sin(quadratic_pattern, evaluate=False), None, FresnelSRule), + (cos, cos(quadratic_pattern, evaluate=False), None, FresnelCRule), + (Mul, _symbol**e*exp(a*_symbol, evaluate=False), None, UpperGammaRule), + (Mul, polylog(b, a*_symbol, evaluate=False)/_symbol, None, PolylogRule), + (Pow, 1/sqrt(a - d*sin(_symbol, evaluate=False)**2), + lambda a, d: a != d, EllipticFRule), + (Pow, sqrt(a - d*sin(_symbol, evaluate=False)**2), + lambda a, d: a != d, EllipticERule), + )) + _integrand = integrand.subs(symbol, _symbol) + for type_, pattern, constraint, rule in _special_function_patterns: + if isinstance(_integrand, type_): + match = _integrand.match(pattern) + if match: + wild_vals = tuple(match.get(w) for w in _wilds + if match.get(w) is not None) + if constraint is None or constraint(*wild_vals): + return rule(integrand, symbol, *wild_vals) + + +def _add_degenerate_step(generic_cond, generic_step: Rule, degenerate_step: Rule | None) -> Rule: + if degenerate_step is None: + return generic_step + if isinstance(generic_step, PiecewiseRule): + subfunctions = [(substep, (cond & generic_cond).simplify()) + for substep, cond in generic_step.subfunctions] + else: + subfunctions = [(generic_step, generic_cond)] + if isinstance(degenerate_step, PiecewiseRule): + subfunctions += degenerate_step.subfunctions + else: + subfunctions.append((degenerate_step, S.true)) + return PiecewiseRule(generic_step.integrand, generic_step.variable, subfunctions) + + +def nested_pow_rule(integral: IntegralInfo): + # nested (c*(a+b*x)**d)**e + integrand, x = integral + + a_ = Wild('a', exclude=[x]) + b_ = Wild('b', exclude=[x, 0]) + pattern = a_+b_*x + generic_cond = S.true + + class NoMatch(Exception): + pass + + def _get_base_exp(expr: Expr) -> tuple[Expr, Expr]: + if not expr.has_free(x): + return S.One, S.Zero + if expr.is_Mul: + _, terms = expr.as_coeff_mul() + if not terms: + return S.One, S.Zero + results = [_get_base_exp(term) for term in terms] + bases = {b for b, _ in results} + bases.discard(S.One) + if len(bases) == 1: + return bases.pop(), Add(*(e for _, e in results)) + raise NoMatch + if expr.is_Pow: + b, e = expr.base, expr.exp # type: ignore + if e.has_free(x): + raise NoMatch + base_, sub_exp = _get_base_exp(b) + return base_, sub_exp * e + match = expr.match(pattern) + if match: + a, b = match[a_], match[b_] + base_ = x + a/b + nonlocal generic_cond + generic_cond = Ne(b, 0) + return base_, S.One + raise NoMatch + + try: + base, exp_ = _get_base_exp(integrand) + except NoMatch: + return + if generic_cond is S.true: + degenerate_step = None + else: + # equivalent with subs(b, 0) but no need to find b + degenerate_step = ConstantRule(integrand.subs(x, 0), x) + generic_step = NestedPowRule(integrand, x, base, exp_) + return _add_degenerate_step(generic_cond, generic_step, degenerate_step) + + +def inverse_trig_rule(integral: IntegralInfo, degenerate=True): + """ + Set degenerate=False on recursive call where coefficient of quadratic term + is assumed non-zero. + """ + integrand, symbol = integral + base, exp = integrand.as_base_exp() + a = Wild('a', exclude=[symbol]) + b = Wild('b', exclude=[symbol]) + c = Wild('c', exclude=[symbol, 0]) + match = base.match(a + b*symbol + c*symbol**2) + + if not match: + return + + def make_inverse_trig(RuleClass, a, sign_a, c, sign_c, h) -> Rule: + u_var = Dummy("u") + rewritten = 1/sqrt(sign_a*a + sign_c*c*(symbol-h)**2) # a>0, c>0 + quadratic_base = sqrt(c/a)*(symbol-h) + constant = 1/sqrt(c) + u_func = None + if quadratic_base is not symbol: + u_func = quadratic_base + quadratic_base = u_var + standard_form = 1/sqrt(sign_a + sign_c*quadratic_base**2) + substep = RuleClass(standard_form, quadratic_base) + if constant != 1: + substep = ConstantTimesRule(constant*standard_form, symbol, constant, standard_form, substep) + if u_func is not None: + substep = URule(rewritten, symbol, u_var, u_func, substep) + if h != 0: + substep = CompleteSquareRule(integrand, symbol, rewritten, substep) + return substep + + a, b, c = [match.get(i, S.Zero) for i in (a, b, c)] + generic_cond = Ne(c, 0) + if not degenerate or generic_cond is S.true: + degenerate_step = None + elif b.is_zero: + degenerate_step = ConstantRule(a ** exp, symbol) + else: + degenerate_step = sqrt_linear_rule(IntegralInfo((a + b * symbol) ** exp, symbol)) + + if simplify(2*exp + 1) == 0: + h, k = -b/(2*c), a - b**2/(4*c) # rewrite base to k + c*(symbol-h)**2 + non_square_cond = Ne(k, 0) + square_step = None + if non_square_cond is not S.true: + square_step = NestedPowRule(1/sqrt(c*(symbol-h)**2), symbol, symbol-h, S.NegativeOne) + if non_square_cond is S.false: + return square_step + generic_step = ReciprocalSqrtQuadraticRule(integrand, symbol, a, b, c) + step = _add_degenerate_step(non_square_cond, generic_step, square_step) + if k.is_real and c.is_real: + # list of ((rule, base_exp, a, sign_a, b, sign_b), condition) + rules = [] + for args, cond in ( # don't apply ArccoshRule to x**2-1 + ((ArcsinRule, k, 1, -c, -1, h), And(k > 0, c < 0)), # 1-x**2 + ((ArcsinhRule, k, 1, c, 1, h), And(k > 0, c > 0)), # 1+x**2 + ): + if cond is S.true: + return make_inverse_trig(*args) + if cond is not S.false: + rules.append((make_inverse_trig(*args), cond)) + if rules: + if not k.is_positive: # conditions are not thorough, need fall back rule + rules.append((generic_step, S.true)) + step = PiecewiseRule(integrand, symbol, rules) + else: + step = generic_step + return _add_degenerate_step(generic_cond, step, degenerate_step) + if exp == S.Half: + step = SqrtQuadraticRule(integrand, symbol, a, b, c) + return _add_degenerate_step(generic_cond, step, degenerate_step) + + +def add_rule(integral): + integrand, symbol = integral + results = [integral_steps(g, symbol) + for g in integrand.as_ordered_terms()] + return None if None in results else AddRule(integrand, symbol, results) + + +def mul_rule(integral: IntegralInfo): + integrand, symbol = integral + + # Constant times function case + coeff, f = integrand.as_independent(symbol) + if coeff != 1: + next_step = integral_steps(f, symbol) + if next_step is not None: + return ConstantTimesRule(integrand, symbol, coeff, f, next_step) + + +def _parts_rule(integrand, symbol) -> tuple[Expr, Expr, Expr, Expr, Rule] | None: + # LIATE rule: + # log, inverse trig, algebraic, trigonometric, exponential + def pull_out_algebraic(integrand): + integrand = integrand.cancel().together() + # iterating over Piecewise args would not work here + algebraic = ([] if isinstance(integrand, Piecewise) or not integrand.is_Mul + else [arg for arg in integrand.args if arg.is_algebraic_expr(symbol)]) + if algebraic: + u = Mul(*algebraic) + dv = (integrand / u).cancel() + return u, dv + + def pull_out_u(*functions) -> Callable[[Expr], tuple[Expr, Expr] | None]: + def pull_out_u_rl(integrand: Expr) -> tuple[Expr, Expr] | None: + if any(integrand.has(f) for f in functions): + args = [arg for arg in integrand.args + if any(isinstance(arg, cls) for cls in functions)] + if args: + u = Mul(*args) + dv = integrand / u + return u, dv + return None + + return pull_out_u_rl + + liate_rules = [pull_out_u(log), pull_out_u(*inverse_trig_functions), + pull_out_algebraic, pull_out_u(sin, cos), + pull_out_u(exp)] + + + dummy = Dummy("temporary") + # we can integrate log(x) and atan(x) by setting dv = 1 + if isinstance(integrand, (log, *inverse_trig_functions)): + integrand = dummy * integrand + + for index, rule in enumerate(liate_rules): + result = rule(integrand) + + if result: + u, dv = result + + # Don't pick u to be a constant if possible + if symbol not in u.free_symbols and not u.has(dummy): + return None + + u = u.subs(dummy, 1) + dv = dv.subs(dummy, 1) + + # Don't pick a non-polynomial algebraic to be differentiated + if rule == pull_out_algebraic and not u.is_polynomial(symbol): + return None + # Don't trade one logarithm for another + if isinstance(u, log): + rec_dv = 1/dv + if (rec_dv.is_polynomial(symbol) and + degree(rec_dv, symbol) == 1): + return None + + # Can integrate a polynomial times OrthogonalPolynomial + if rule == pull_out_algebraic: + if dv.is_Derivative or dv.has(TrigonometricFunction) or \ + isinstance(dv, OrthogonalPolynomial): + v_step = integral_steps(dv, symbol) + if v_step.contains_dont_know(): + return None + else: + du = u.diff(symbol) + v = v_step.eval() + return u, dv, v, du, v_step + + # make sure dv is amenable to integration + accept = False + if index < 2: # log and inverse trig are usually worth trying + accept = True + elif (rule == pull_out_algebraic and dv.args and + all(isinstance(a, (sin, cos, exp)) + for a in dv.args)): + accept = True + else: + for lrule in liate_rules[index + 1:]: + r = lrule(integrand) + if r and r[0].subs(dummy, 1).equals(dv): + accept = True + break + + if accept: + du = u.diff(symbol) + v_step = integral_steps(simplify(dv), symbol) + if not v_step.contains_dont_know(): + v = v_step.eval() + return u, dv, v, du, v_step + return None + + +def parts_rule(integral): + integrand, symbol = integral + constant, integrand = integrand.as_coeff_Mul() + + result = _parts_rule(integrand, symbol) + + steps = [] + if result: + u, dv, v, du, v_step = result + debug("u : {}, dv : {}, v : {}, du : {}, v_step: {}".format(u, dv, v, du, v_step)) + steps.append(result) + + if isinstance(v, Integral): + return + + # Set a limit on the number of times u can be used + if isinstance(u, (sin, cos, exp, sinh, cosh)): + cachekey = u.xreplace({symbol: _cache_dummy}) + if _parts_u_cache[cachekey] > 2: + return + _parts_u_cache[cachekey] += 1 + + # Try cyclic integration by parts a few times + for _ in range(4): + debug("Cyclic integration {} with v: {}, du: {}, integrand: {}".format(_, v, du, integrand)) + coefficient = ((v * du) / integrand).cancel() + if coefficient == 1: + break + if symbol not in coefficient.free_symbols: + rule = CyclicPartsRule(integrand, symbol, + [PartsRule(None, None, u, dv, v_step, None) + for (u, dv, v, du, v_step) in steps], + (-1) ** len(steps) * coefficient) + if (constant != 1) and rule: + rule = ConstantTimesRule(constant * integrand, symbol, constant, integrand, rule) + return rule + + # _parts_rule is sensitive to constants, factor it out + next_constant, next_integrand = (v * du).as_coeff_Mul() + result = _parts_rule(next_integrand, symbol) + + if result: + u, dv, v, du, v_step = result + u *= next_constant + du *= next_constant + steps.append((u, dv, v, du, v_step)) + else: + break + + def make_second_step(steps, integrand): + if steps: + u, dv, v, du, v_step = steps[0] + return PartsRule(integrand, symbol, u, dv, v_step, make_second_step(steps[1:], v * du)) + return integral_steps(integrand, symbol) + + if steps: + u, dv, v, du, v_step = steps[0] + rule = PartsRule(integrand, symbol, u, dv, v_step, make_second_step(steps[1:], v * du)) + if (constant != 1) and rule: + rule = ConstantTimesRule(constant * integrand, symbol, constant, integrand, rule) + return rule + + +def trig_rule(integral): + integrand, symbol = integral + if integrand == sin(symbol): + return SinRule(integrand, symbol) + if integrand == cos(symbol): + return CosRule(integrand, symbol) + if integrand == sec(symbol)**2: + return Sec2Rule(integrand, symbol) + if integrand == csc(symbol)**2: + return Csc2Rule(integrand, symbol) + + if isinstance(integrand, tan): + rewritten = sin(*integrand.args) / cos(*integrand.args) + elif isinstance(integrand, cot): + rewritten = cos(*integrand.args) / sin(*integrand.args) + elif isinstance(integrand, sec): + arg = integrand.args[0] + rewritten = ((sec(arg)**2 + tan(arg) * sec(arg)) / + (sec(arg) + tan(arg))) + elif isinstance(integrand, csc): + arg = integrand.args[0] + rewritten = ((csc(arg)**2 + cot(arg) * csc(arg)) / + (csc(arg) + cot(arg))) + else: + return + + return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol)) + +def trig_product_rule(integral: IntegralInfo): + integrand, symbol = integral + if integrand == sec(symbol) * tan(symbol): + return SecTanRule(integrand, symbol) + if integrand == csc(symbol) * cot(symbol): + return CscCotRule(integrand, symbol) + + +def quadratic_denom_rule(integral): + integrand, symbol = integral + a = Wild('a', exclude=[symbol]) + b = Wild('b', exclude=[symbol]) + c = Wild('c', exclude=[symbol]) + + match = integrand.match(a / (b * symbol ** 2 + c)) + + if match: + a, b, c = match[a], match[b], match[c] + general_rule = ArctanRule(integrand, symbol, a, b, c) + if b.is_extended_real and c.is_extended_real: + positive_cond = c/b > 0 + if positive_cond is S.true: + return general_rule + coeff = a/(2*sqrt(-c)*sqrt(b)) + constant = sqrt(-c/b) + r1 = 1/(symbol-constant) + r2 = 1/(symbol+constant) + log_steps = [ReciprocalRule(r1, symbol, symbol-constant), + ConstantTimesRule(-r2, symbol, -1, r2, ReciprocalRule(r2, symbol, symbol+constant))] + rewritten = sub = r1 - r2 + negative_step = AddRule(sub, symbol, log_steps) + if coeff != 1: + rewritten = Mul(coeff, sub, evaluate=False) + negative_step = ConstantTimesRule(rewritten, symbol, coeff, sub, negative_step) + negative_step = RewriteRule(integrand, symbol, rewritten, negative_step) + if positive_cond is S.false: + return negative_step + return PiecewiseRule(integrand, symbol, [(general_rule, positive_cond), (negative_step, S.true)]) + return general_rule + + d = Wild('d', exclude=[symbol]) + match2 = integrand.match(a / (b * symbol ** 2 + c * symbol + d)) + if match2: + b, c = match2[b], match2[c] + if b.is_zero: + return + u = Dummy('u') + u_func = symbol + c/(2*b) + integrand2 = integrand.subs(symbol, u - c / (2*b)) + next_step = integral_steps(integrand2, u) + if next_step: + return URule(integrand2, symbol, u, u_func, next_step) + else: + return + e = Wild('e', exclude=[symbol]) + match3 = integrand.match((a* symbol + b) / (c * symbol ** 2 + d * symbol + e)) + if match3: + a, b, c, d, e = match3[a], match3[b], match3[c], match3[d], match3[e] + if c.is_zero: + return + denominator = c * symbol**2 + d * symbol + e + const = a/(2*c) + numer1 = (2*c*symbol+d) + numer2 = - const*d + b + u = Dummy('u') + step1 = URule(integrand, symbol, + u, denominator, integral_steps(u**(-1), u)) + if const != 1: + step1 = ConstantTimesRule(const*numer1/denominator, symbol, + const, numer1/denominator, step1) + if numer2.is_zero: + return step1 + step2 = integral_steps(numer2/denominator, symbol) + substeps = AddRule(integrand, symbol, [step1, step2]) + rewriten = const*numer1/denominator+numer2/denominator + return RewriteRule(integrand, symbol, rewriten, substeps) + + return + + +def sqrt_linear_rule(integral: IntegralInfo): + """ + Substitute common (a+b*x)**(1/n) + """ + integrand, x = integral + a = Wild('a', exclude=[x]) + b = Wild('b', exclude=[x, 0]) + a0 = b0 = 0 + bases, qs, bs = [], [], [] + for pow_ in integrand.find(Pow): # collect all (a+b*x)**(p/q) + base, exp_ = pow_.base, pow_.exp + if exp_.is_Integer or x not in base.free_symbols: # skip 1/x and sqrt(2) + continue + if not exp_.is_Rational: # exclude x**pi + return + match = base.match(a+b*x) + if not match: # skip non-linear + continue # for sqrt(x+sqrt(x)), although base is non-linear, we can still substitute sqrt(x) + a1, b1 = match[a], match[b] + if a0*b1 != a1*b0 or not (b0/b1).is_nonnegative: # cannot transform sqrt(x) to sqrt(x+1) or sqrt(-x) + return + if b0 == 0 or (b0/b1 > 1) is S.true: # choose the latter of sqrt(2*x) and sqrt(x) as representative + a0, b0 = a1, b1 + bases.append(base) + bs.append(b1) + qs.append(exp_.q) + if b0 == 0: # no such pattern found + return + q0: Integer = lcm_list(qs) + u_x = (a0 + b0*x)**(1/q0) + u = Dummy("u") + substituted = integrand.subs({base**(S.One/q): (b/b0)**(S.One/q)*u**(q0/q) + for base, b, q in zip(bases, bs, qs)}).subs(x, (u**q0-a0)/b0) + substep = integral_steps(substituted*u**(q0-1)*q0/b0, u) + if not substep.contains_dont_know(): + step: Rule = URule(integrand, x, u, u_x, substep) + generic_cond = Ne(b0, 0) + if generic_cond is not S.true: # possible degenerate case + simplified = integrand.subs({b: 0 for b in bs}) + degenerate_step = integral_steps(simplified, x) + step = PiecewiseRule(integrand, x, [(step, generic_cond), (degenerate_step, S.true)]) + return step + + +def sqrt_quadratic_rule(integral: IntegralInfo, degenerate=True): + integrand, x = integral + a = Wild('a', exclude=[x]) + b = Wild('b', exclude=[x]) + c = Wild('c', exclude=[x, 0]) + f = Wild('f') + n = Wild('n', properties=[lambda n: n.is_Integer and n.is_odd]) + match = integrand.match(f*sqrt(a+b*x+c*x**2)**n) + if not match: + return + a, b, c, f, n = match[a], match[b], match[c], match[f], match[n] + f_poly = f.as_poly(x) + if f_poly is None: + return + + generic_cond = Ne(c, 0) + if not degenerate or generic_cond is S.true: + degenerate_step = None + elif b.is_zero: + degenerate_step = integral_steps(f*sqrt(a)**n, x) + else: + degenerate_step = sqrt_linear_rule(IntegralInfo(f*sqrt(a+b*x)**n, x)) + + def sqrt_quadratic_denom_rule(numer_poly: Poly, integrand: Expr): + denom = sqrt(a+b*x+c*x**2) + deg = numer_poly.degree() + if deg <= 1: + # integrand == (d+e*x)/sqrt(a+b*x+c*x**2) + e, d = numer_poly.all_coeffs() if deg == 1 else (S.Zero, numer_poly.as_expr()) + # rewrite numerator to A*(2*c*x+b) + B + A = e/(2*c) + B = d-A*b + pre_substitute = (2*c*x+b)/denom + constant_step: Rule | None = None + linear_step: Rule | None = None + if A != 0: + u = Dummy("u") + pow_rule = PowerRule(1/sqrt(u), u, u, -S.Half) + linear_step = URule(pre_substitute, x, u, a+b*x+c*x**2, pow_rule) + if A != 1: + linear_step = ConstantTimesRule(A*pre_substitute, x, A, pre_substitute, linear_step) + if B != 0: + constant_step = inverse_trig_rule(IntegralInfo(1/denom, x), degenerate=False) + if B != 1: + constant_step = ConstantTimesRule(B/denom, x, B, 1/denom, constant_step) # type: ignore + if linear_step and constant_step: + add = Add(A*pre_substitute, B/denom, evaluate=False) + step: Rule | None = RewriteRule(integrand, x, add, AddRule(add, x, [linear_step, constant_step])) + else: + step = linear_step or constant_step + else: + coeffs = numer_poly.all_coeffs() + step = SqrtQuadraticDenomRule(integrand, x, a, b, c, coeffs) + return step + + if n > 0: # rewrite poly * sqrt(s)**(2*k-1) to poly*s**k / sqrt(s) + numer_poly = f_poly * (a+b*x+c*x**2)**((n+1)/2) + rewritten = numer_poly.as_expr()/sqrt(a+b*x+c*x**2) + substep = sqrt_quadratic_denom_rule(numer_poly, rewritten) + generic_step = RewriteRule(integrand, x, rewritten, substep) + elif n == -1: + generic_step = sqrt_quadratic_denom_rule(f_poly, integrand) + else: + return # todo: handle n < -1 case + return _add_degenerate_step(generic_cond, generic_step, degenerate_step) + + +def hyperbolic_rule(integral: tuple[Expr, Symbol]): + integrand, symbol = integral + if isinstance(integrand, HyperbolicFunction) and integrand.args[0] == symbol: + if integrand.func == sinh: + return SinhRule(integrand, symbol) + if integrand.func == cosh: + return CoshRule(integrand, symbol) + u = Dummy('u') + if integrand.func == tanh: + rewritten = sinh(symbol)/cosh(symbol) + return RewriteRule(integrand, symbol, rewritten, + URule(rewritten, symbol, u, cosh(symbol), ReciprocalRule(1/u, u, u))) + if integrand.func == coth: + rewritten = cosh(symbol)/sinh(symbol) + return RewriteRule(integrand, symbol, rewritten, + URule(rewritten, symbol, u, sinh(symbol), ReciprocalRule(1/u, u, u))) + else: + rewritten = integrand.rewrite(tanh) + if integrand.func == sech: + return RewriteRule(integrand, symbol, rewritten, + URule(rewritten, symbol, u, tanh(symbol/2), + ArctanRule(2/(u**2 + 1), u, S(2), S.One, S.One))) + if integrand.func == csch: + return RewriteRule(integrand, symbol, rewritten, + URule(rewritten, symbol, u, tanh(symbol/2), + ReciprocalRule(1/u, u, u))) + +@cacheit +def make_wilds(symbol): + a = Wild('a', exclude=[symbol]) + b = Wild('b', exclude=[symbol]) + m = Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, Integer)]) + n = Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, Integer)]) + + return a, b, m, n + +@cacheit +def sincos_pattern(symbol): + a, b, m, n = make_wilds(symbol) + pattern = sin(a*symbol)**m * cos(b*symbol)**n + + return pattern, a, b, m, n + +@cacheit +def tansec_pattern(symbol): + a, b, m, n = make_wilds(symbol) + pattern = tan(a*symbol)**m * sec(b*symbol)**n + + return pattern, a, b, m, n + +@cacheit +def cotcsc_pattern(symbol): + a, b, m, n = make_wilds(symbol) + pattern = cot(a*symbol)**m * csc(b*symbol)**n + + return pattern, a, b, m, n + +@cacheit +def heaviside_pattern(symbol): + m = Wild('m', exclude=[symbol]) + b = Wild('b', exclude=[symbol]) + g = Wild('g') + pattern = Heaviside(m*symbol + b) * g + + return pattern, m, b, g + +def uncurry(func): + def uncurry_rl(args): + return func(*args) + return uncurry_rl + +def trig_rewriter(rewrite): + def trig_rewriter_rl(args): + a, b, m, n, integrand, symbol = args + rewritten = rewrite(a, b, m, n, integrand, symbol) + if rewritten != integrand: + return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol)) + return trig_rewriter_rl + +sincos_botheven_condition = uncurry( + lambda a, b, m, n, i, s: m.is_even and n.is_even and + m.is_nonnegative and n.is_nonnegative) + +sincos_botheven = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (((1 - cos(2*a*symbol)) / 2) ** (m / 2)) * + (((1 + cos(2*b*symbol)) / 2) ** (n / 2)) )) + +sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3) + +sincos_sinodd = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (1 - cos(a*symbol)**2)**((m - 1) / 2) * + sin(a*symbol) * + cos(b*symbol) ** n)) + +sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3) + +sincos_cosodd = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (1 - sin(b*symbol)**2)**((n - 1) / 2) * + cos(b*symbol) * + sin(a*symbol) ** m)) + +tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4) +tansec_seceven = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (1 + tan(b*symbol)**2) ** (n/2 - 1) * + sec(b*symbol)**2 * + tan(a*symbol) ** m )) + +tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd) +tansec_tanodd = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (sec(a*symbol)**2 - 1) ** ((m - 1) / 2) * + tan(a*symbol) * + sec(b*symbol) ** n )) + +tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0) +tan_tansquared = trig_rewriter( + lambda a, b, m, n, i, symbol: ( sec(a*symbol)**2 - 1)) + +cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4) +cotcsc_csceven = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (1 + cot(b*symbol)**2) ** (n/2 - 1) * + csc(b*symbol)**2 * + cot(a*symbol) ** m )) + +cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd) +cotcsc_cotodd = trig_rewriter( + lambda a, b, m, n, i, symbol: ( (csc(a*symbol)**2 - 1) ** ((m - 1) / 2) * + cot(a*symbol) * + csc(b*symbol) ** n )) + +def trig_sincos_rule(integral): + integrand, symbol = integral + + if any(integrand.has(f) for f in (sin, cos)): + pattern, a, b, m, n = sincos_pattern(symbol) + match = integrand.match(pattern) + if not match: + return + + return multiplexer({ + sincos_botheven_condition: sincos_botheven, + sincos_sinodd_condition: sincos_sinodd, + sincos_cosodd_condition: sincos_cosodd + })(tuple( + [match.get(i, S.Zero) for i in (a, b, m, n)] + + [integrand, symbol])) + +def trig_tansec_rule(integral): + integrand, symbol = integral + + integrand = integrand.subs({ + 1 / cos(symbol): sec(symbol) + }) + + if any(integrand.has(f) for f in (tan, sec)): + pattern, a, b, m, n = tansec_pattern(symbol) + match = integrand.match(pattern) + if not match: + return + + return multiplexer({ + tansec_tanodd_condition: tansec_tanodd, + tansec_seceven_condition: tansec_seceven, + tan_tansquared_condition: tan_tansquared + })(tuple( + [match.get(i, S.Zero) for i in (a, b, m, n)] + + [integrand, symbol])) + +def trig_cotcsc_rule(integral): + integrand, symbol = integral + integrand = integrand.subs({ + 1 / sin(symbol): csc(symbol), + 1 / tan(symbol): cot(symbol), + cos(symbol) / tan(symbol): cot(symbol) + }) + + if any(integrand.has(f) for f in (cot, csc)): + pattern, a, b, m, n = cotcsc_pattern(symbol) + match = integrand.match(pattern) + if not match: + return + + return multiplexer({ + cotcsc_cotodd_condition: cotcsc_cotodd, + cotcsc_csceven_condition: cotcsc_csceven + })(tuple( + [match.get(i, S.Zero) for i in (a, b, m, n)] + + [integrand, symbol])) + +def trig_sindouble_rule(integral): + integrand, symbol = integral + a = Wild('a', exclude=[sin(2*symbol)]) + match = integrand.match(sin(2*symbol)*a) + if match: + sin_double = 2*sin(symbol)*cos(symbol)/sin(2*symbol) + return integral_steps(integrand * sin_double, symbol) + +def trig_powers_products_rule(integral): + return do_one(null_safe(trig_sincos_rule), + null_safe(trig_tansec_rule), + null_safe(trig_cotcsc_rule), + null_safe(trig_sindouble_rule))(integral) + +def trig_substitution_rule(integral): + integrand, symbol = integral + A = Wild('a', exclude=[0, symbol]) + B = Wild('b', exclude=[0, symbol]) + theta = Dummy("theta") + target_pattern = A + B*symbol**2 + + matches = integrand.find(target_pattern) + for expr in matches: + match = expr.match(target_pattern) + a = match.get(A, S.Zero) + b = match.get(B, S.Zero) + + a_positive = ((a.is_number and a > 0) or a.is_positive) + b_positive = ((b.is_number and b > 0) or b.is_positive) + a_negative = ((a.is_number and a < 0) or a.is_negative) + b_negative = ((b.is_number and b < 0) or b.is_negative) + x_func = None + if a_positive and b_positive: + # a**2 + b*x**2. Assume sec(theta) > 0, -pi/2 < theta < pi/2 + x_func = (sqrt(a)/sqrt(b)) * tan(theta) + # Do not restrict the domain: tan(theta) takes on any real + # value on the interval -pi/2 < theta < pi/2 so x takes on + # any value + restriction = True + elif a_positive and b_negative: + # a**2 - b*x**2. Assume cos(theta) > 0, -pi/2 < theta < pi/2 + constant = sqrt(a)/sqrt(-b) + x_func = constant * sin(theta) + restriction = And(symbol > -constant, symbol < constant) + elif a_negative and b_positive: + # b*x**2 - a**2. Assume sin(theta) > 0, 0 < theta < pi + constant = sqrt(-a)/sqrt(b) + x_func = constant * sec(theta) + restriction = And(symbol > -constant, symbol < constant) + if x_func: + # Manually simplify sqrt(trig(theta)**2) to trig(theta) + # Valid due to assumed domain restriction + substitutions = {} + for f in [sin, cos, tan, + sec, csc, cot]: + substitutions[sqrt(f(theta)**2)] = f(theta) + substitutions[sqrt(f(theta)**(-2))] = 1/f(theta) + + replaced = integrand.subs(symbol, x_func).trigsimp() + replaced = manual_subs(replaced, substitutions) + if not replaced.has(symbol): + replaced *= manual_diff(x_func, theta) + replaced = replaced.trigsimp() + secants = replaced.find(1/cos(theta)) + if secants: + replaced = replaced.xreplace({ + 1/cos(theta): sec(theta) + }) + + substep = integral_steps(replaced, theta) + if not substep.contains_dont_know(): + return TrigSubstitutionRule(integrand, symbol, + theta, x_func, replaced, substep, restriction) + +def heaviside_rule(integral): + integrand, symbol = integral + pattern, m, b, g = heaviside_pattern(symbol) + match = integrand.match(pattern) + if match and 0 != match[g]: + # f = Heaviside(m*x + b)*g + substep = integral_steps(match[g], symbol) + m, b = match[m], match[b] + return HeavisideRule(integrand, symbol, m*symbol + b, -b/m, substep) + + +def dirac_delta_rule(integral: IntegralInfo): + integrand, x = integral + if len(integrand.args) == 1: + n = S.Zero + else: + n = integrand.args[1] + if not n.is_Integer or n < 0: + return + a, b = Wild('a', exclude=[x]), Wild('b', exclude=[x, 0]) + match = integrand.args[0].match(a+b*x) + if not match: + return + a, b = match[a], match[b] + generic_cond = Ne(b, 0) + if generic_cond is S.true: + degenerate_step = None + else: + degenerate_step = ConstantRule(DiracDelta(a, n), x) + generic_step = DiracDeltaRule(integrand, x, n, a, b) + return _add_degenerate_step(generic_cond, generic_step, degenerate_step) + + +def substitution_rule(integral): + integrand, symbol = integral + + u_var = Dummy("u") + substitutions = find_substitutions(integrand, symbol, u_var) + count = 0 + if substitutions: + debug("List of Substitution Rules") + ways = [] + for u_func, c, substituted in substitutions: + subrule = integral_steps(substituted, u_var) + count = count + 1 + debug("Rule {}: {}".format(count, subrule)) + + if subrule.contains_dont_know(): + continue + + if simplify(c - 1) != 0: + _, denom = c.as_numer_denom() + if subrule: + subrule = ConstantTimesRule(c * substituted, u_var, c, substituted, subrule) + + if denom.free_symbols: + piecewise = [] + could_be_zero = [] + + if isinstance(denom, Mul): + could_be_zero = denom.args + else: + could_be_zero.append(denom) + + for expr in could_be_zero: + if not fuzzy_not(expr.is_zero): + substep = integral_steps(manual_subs(integrand, expr, 0), symbol) + + if substep: + piecewise.append(( + substep, + Eq(expr, 0) + )) + piecewise.append((subrule, True)) + subrule = PiecewiseRule(substituted, symbol, piecewise) + + ways.append(URule(integrand, symbol, u_var, u_func, subrule)) + + if len(ways) > 1: + return AlternativeRule(integrand, symbol, ways) + elif ways: + return ways[0] + + +partial_fractions_rule = rewriter( + lambda integrand, symbol: integrand.is_rational_function(), + lambda integrand, symbol: integrand.apart(symbol)) + +cancel_rule = rewriter( + # lambda integrand, symbol: integrand.is_algebraic_expr(), + # lambda integrand, symbol: isinstance(integrand, Mul), + lambda integrand, symbol: True, + lambda integrand, symbol: integrand.cancel()) + +distribute_expand_rule = rewriter( + lambda integrand, symbol: ( + all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args) + or isinstance(integrand, Pow) + or isinstance(integrand, Mul)), + lambda integrand, symbol: integrand.expand()) + +trig_expand_rule = rewriter( + # If there are trig functions with different arguments, expand them + lambda integrand, symbol: ( + len({a.args[0] for a in integrand.atoms(TrigonometricFunction)}) > 1), + lambda integrand, symbol: integrand.expand(trig=True)) + +def derivative_rule(integral): + integrand = integral[0] + diff_variables = integrand.variables + undifferentiated_function = integrand.expr + integrand_variables = undifferentiated_function.free_symbols + + if integral.symbol in integrand_variables: + if integral.symbol in diff_variables: + return DerivativeRule(*integral) + else: + return DontKnowRule(integrand, integral.symbol) + else: + return ConstantRule(*integral) + +def rewrites_rule(integral): + integrand, symbol = integral + + if integrand.match(1/cos(symbol)): + rewritten = integrand.subs(1/cos(symbol), sec(symbol)) + return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol)) + +def fallback_rule(integral): + return DontKnowRule(*integral) + +# Cache is used to break cyclic integrals. +# Need to use the same dummy variable in cached expressions for them to match. +# Also record "u" of integration by parts, to avoid infinite repetition. +_integral_cache: dict[Expr, Expr | None] = {} +_parts_u_cache: dict[Expr, int] = defaultdict(int) +_cache_dummy = Dummy("z") + +def integral_steps(integrand, symbol, **options): + """Returns the steps needed to compute an integral. + + Explanation + =========== + + This function attempts to mirror what a student would do by hand as + closely as possible. + + SymPy Gamma uses this to provide a step-by-step explanation of an + integral. The code it uses to format the results of this function can be + found at + https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py. + + Examples + ======== + + >>> from sympy import exp, sin + >>> from sympy.integrals.manualintegrate import integral_steps + >>> from sympy.abc import x + >>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \ + # doctest: +NORMALIZE_WHITESPACE + URule(integrand=exp(x)/(exp(2*x) + 1), variable=x, u_var=_u, u_func=exp(x), + substep=ArctanRule(integrand=1/(_u**2 + 1), variable=_u, a=1, b=1, c=1)) + >>> print(repr(integral_steps(sin(x), x))) \ + # doctest: +NORMALIZE_WHITESPACE + SinRule(integrand=sin(x), variable=x) + >>> print(repr(integral_steps((x**2 + 3)**2, x))) \ + # doctest: +NORMALIZE_WHITESPACE + RewriteRule(integrand=(x**2 + 3)**2, variable=x, rewritten=x**4 + 6*x**2 + 9, + substep=AddRule(integrand=x**4 + 6*x**2 + 9, variable=x, + substeps=[PowerRule(integrand=x**4, variable=x, base=x, exp=4), + ConstantTimesRule(integrand=6*x**2, variable=x, constant=6, other=x**2, + substep=PowerRule(integrand=x**2, variable=x, base=x, exp=2)), + ConstantRule(integrand=9, variable=x)])) + + Returns + ======= + + rule : Rule + The first step; most rules have substeps that must also be + considered. These substeps can be evaluated using ``manualintegrate`` + to obtain a result. + + """ + cachekey = integrand.xreplace({symbol: _cache_dummy}) + if cachekey in _integral_cache: + if _integral_cache[cachekey] is None: + # Stop this attempt, because it leads around in a loop + return DontKnowRule(integrand, symbol) + else: + # TODO: This is for future development, as currently + # _integral_cache gets no values other than None + return (_integral_cache[cachekey].xreplace(_cache_dummy, symbol), + symbol) + else: + _integral_cache[cachekey] = None + + integral = IntegralInfo(integrand, symbol) + + def key(integral): + integrand = integral.integrand + + if symbol not in integrand.free_symbols: + return Number + for cls in (Symbol, TrigonometricFunction, OrthogonalPolynomial): + if isinstance(integrand, cls): + return cls + return type(integrand) + + def integral_is_subclass(*klasses): + def _integral_is_subclass(integral): + k = key(integral) + return k and issubclass(k, klasses) + return _integral_is_subclass + + result = do_one( + null_safe(special_function_rule), + null_safe(switch(key, { + Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule), + null_safe(sqrt_linear_rule), + null_safe(quadratic_denom_rule)), + Symbol: power_rule, + exp: exp_rule, + Add: add_rule, + Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule), + null_safe(heaviside_rule), null_safe(quadratic_denom_rule), + null_safe(sqrt_linear_rule), + null_safe(sqrt_quadratic_rule)), + Derivative: derivative_rule, + TrigonometricFunction: trig_rule, + Heaviside: heaviside_rule, + DiracDelta: dirac_delta_rule, + OrthogonalPolynomial: orthogonal_poly_rule, + Number: constant_rule + })), + do_one( + null_safe(trig_rule), + null_safe(hyperbolic_rule), + null_safe(alternatives( + rewrites_rule, + substitution_rule, + condition( + integral_is_subclass(Mul, Pow), + partial_fractions_rule), + condition( + integral_is_subclass(Mul, Pow), + cancel_rule), + condition( + integral_is_subclass(Mul, log, + *inverse_trig_functions), + parts_rule), + condition( + integral_is_subclass(Mul, Pow), + distribute_expand_rule), + trig_powers_products_rule, + trig_expand_rule + )), + null_safe(condition(integral_is_subclass(Mul, Pow), nested_pow_rule)), + null_safe(trig_substitution_rule) + ), + fallback_rule)(integral) + del _integral_cache[cachekey] + return result + + +def manualintegrate(f, var): + """manualintegrate(f, var) + + Explanation + =========== + + Compute indefinite integral of a single variable using an algorithm that + resembles what a student would do by hand. + + Unlike :func:`~.integrate`, var can only be a single symbol. + + Examples + ======== + + >>> from sympy import sin, cos, tan, exp, log, integrate + >>> from sympy.integrals.manualintegrate import manualintegrate + >>> from sympy.abc import x + >>> manualintegrate(1 / x, x) + log(x) + >>> integrate(1/x) + log(x) + >>> manualintegrate(log(x), x) + x*log(x) - x + >>> integrate(log(x)) + x*log(x) - x + >>> manualintegrate(exp(x) / (1 + exp(2 * x)), x) + atan(exp(x)) + >>> integrate(exp(x) / (1 + exp(2 * x))) + RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x)))) + >>> manualintegrate(cos(x)**4 * sin(x), x) + -cos(x)**5/5 + >>> integrate(cos(x)**4 * sin(x), x) + -cos(x)**5/5 + >>> manualintegrate(cos(x)**4 * sin(x)**3, x) + cos(x)**7/7 - cos(x)**5/5 + >>> integrate(cos(x)**4 * sin(x)**3, x) + cos(x)**7/7 - cos(x)**5/5 + >>> manualintegrate(tan(x), x) + -log(cos(x)) + >>> integrate(tan(x), x) + -log(cos(x)) + + See Also + ======== + + sympy.integrals.integrals.integrate + sympy.integrals.integrals.Integral.doit + sympy.integrals.integrals.Integral + """ + result = integral_steps(f, var).eval() + # Clear the cache of u-parts + _parts_u_cache.clear() + # If we got Piecewise with two parts, put generic first + if isinstance(result, Piecewise) and len(result.args) == 2: + cond = result.args[0][1] + if isinstance(cond, Eq) and result.args[1][1] == True: + result = result.func( + (result.args[1][0], Ne(*cond.args)), + (result.args[0][0], True)) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint.py new file mode 100644 index 0000000000000000000000000000000000000000..0e2ab3e89ab79472a52ea5e84ee28736cbc78e9f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint.py @@ -0,0 +1,2190 @@ +""" +Integrate functions by rewriting them as Meijer G-functions. + +There are three user-visible functions that can be used by other parts of the +sympy library to solve various integration problems: + +- meijerint_indefinite +- meijerint_definite +- meijerint_inversion + +They can be used to compute, respectively, indefinite integrals, definite +integrals over intervals of the real line, and inverse laplace-type integrals +(from c-I*oo to c+I*oo). See the respective docstrings for details. + +The main references for this are: + +[L] Luke, Y. L. (1969), The Special Functions and Their Approximations, + Volume 1 + +[R] Kelly B. Roach. Meijer G Function Representations. + In: Proceedings of the 1997 International Symposium on Symbolic and + Algebraic Computation, pages 205-211, New York, 1997. ACM. + +[P] A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990). + Integrals and Series: More Special Functions, Vol. 3,. + Gordon and Breach Science Publisher +""" + +from __future__ import annotations +import itertools + +from sympy import SYMPY_DEBUG +from sympy.core import S, Expr +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.cache import cacheit +from sympy.core.containers import Tuple +from sympy.core.exprtools import factor_terms +from sympy.core.function import (expand, expand_mul, expand_power_base, + expand_trig, Function) +from sympy.core.mul import Mul +from sympy.core.numbers import ilcm, Rational, pi +from sympy.core.relational import Eq, Ne, _canonical_coeff +from sympy.core.sorting import default_sort_key, ordered +from sympy.core.symbol import Dummy, symbols, Wild, Symbol +from sympy.core.sympify import sympify +from sympy.functions.combinatorial.factorials import factorial +from sympy.functions.elementary.complexes import (re, im, arg, Abs, sign, + unpolarify, polarify, polar_lift, principal_branch, unbranched_argument, + periodic_argument) +from sympy.functions.elementary.exponential import exp, exp_polar, log +from sympy.functions.elementary.integers import ceiling +from sympy.functions.elementary.hyperbolic import (cosh, sinh, + _rewrite_hyperbolics_as_exp, HyperbolicFunction) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold +from sympy.functions.elementary.trigonometric import (cos, sin, sinc, + TrigonometricFunction) +from sympy.functions.special.bessel import besselj, bessely, besseli, besselk +from sympy.functions.special.delta_functions import DiracDelta, Heaviside +from sympy.functions.special.elliptic_integrals import elliptic_k, elliptic_e +from sympy.functions.special.error_functions import (erf, erfc, erfi, Ei, + expint, Si, Ci, Shi, Chi, fresnels, fresnelc) +from sympy.functions.special.gamma_functions import gamma +from sympy.functions.special.hyper import hyper, meijerg +from sympy.functions.special.singularity_functions import SingularityFunction +from .integrals import Integral +from sympy.logic.boolalg import And, Or, BooleanAtom, Not, BooleanFunction +from sympy.polys import cancel, factor +from sympy.utilities.iterables import multiset_partitions +from sympy.utilities.misc import debug as _debug +from sympy.utilities.misc import debugf as _debugf + +# keep this at top for easy reference +z = Dummy('z') + + +def _has(res, *f): + # return True if res has f; in the case of Piecewise + # only return True if *all* pieces have f + res = piecewise_fold(res) + if getattr(res, 'is_Piecewise', False): + return all(_has(i, *f) for i in res.args) + return res.has(*f) + + +def _create_lookup_table(table): + """ Add formulae for the function -> meijerg lookup table. """ + def wild(n): + return Wild(n, exclude=[z]) + p, q, a, b, c = list(map(wild, 'pqabc')) + n = Wild('n', properties=[lambda x: x.is_Integer and x > 0]) + t = p*z**q + + def add(formula, an, ap, bm, bq, arg=t, fac=S.One, cond=True, hint=True): + table.setdefault(_mytype(formula, z), []).append((formula, + [(fac, meijerg(an, ap, bm, bq, arg))], cond, hint)) + + def addi(formula, inst, cond, hint=True): + table.setdefault( + _mytype(formula, z), []).append((formula, inst, cond, hint)) + + def constant(a): + return [(a, meijerg([1], [], [], [0], z)), + (a, meijerg([], [1], [0], [], z))] + table[()] = [(a, constant(a), True, True)] + + # [P], Section 8. + class IsNonPositiveInteger(Function): + + @classmethod + def eval(cls, arg): + arg = unpolarify(arg) + if arg.is_Integer is True: + return arg <= 0 + + # Section 8.4.2 + # TODO this needs more polar_lift (c/f entry for exp) + add(Heaviside(t - b)*(t - b)**(a - 1), [a], [], [], [0], t/b, + gamma(a)*b**(a - 1), And(b > 0)) + add(Heaviside(b - t)*(b - t)**(a - 1), [], [a], [0], [], t/b, + gamma(a)*b**(a - 1), And(b > 0)) + add(Heaviside(z - (b/p)**(1/q))*(t - b)**(a - 1), [a], [], [], [0], t/b, + gamma(a)*b**(a - 1), And(b > 0)) + add(Heaviside((b/p)**(1/q) - z)*(b - t)**(a - 1), [], [a], [0], [], t/b, + gamma(a)*b**(a - 1), And(b > 0)) + add((b + t)**(-a), [1 - a], [], [0], [], t/b, b**(-a)/gamma(a), + hint=Not(IsNonPositiveInteger(a))) + add(Abs(b - t)**(-a), [1 - a], [(1 - a)/2], [0], [(1 - a)/2], t/b, + 2*sin(pi*a/2)*gamma(1 - a)*Abs(b)**(-a), re(a) < 1) + add((t**a - b**a)/(t - b), [0, a], [], [0, a], [], t/b, + b**(a - 1)*sin(a*pi)/pi) + + # 12 + def A1(r, sign, nu): + return pi**Rational(-1, 2)*(-sign*nu/2)**(1 - 2*r) + + def tmpadd(r, sgn): + # XXX the a**2 is bad for matching + add((sqrt(a**2 + t) + sgn*a)**b/(a**2 + t)**r, + [(1 + b)/2, 1 - 2*r + b/2], [], + [(b - sgn*b)/2], [(b + sgn*b)/2], t/a**2, + a**(b - 2*r)*A1(r, sgn, b)) + tmpadd(0, 1) + tmpadd(0, -1) + tmpadd(S.Half, 1) + tmpadd(S.Half, -1) + + # 13 + def tmpadd(r, sgn): + add((sqrt(a + p*z**q) + sgn*sqrt(p)*z**(q/2))**b/(a + p*z**q)**r, + [1 - r + sgn*b/2], [1 - r - sgn*b/2], [0, S.Half], [], + p*z**q/a, a**(b/2 - r)*A1(r, sgn, b)) + tmpadd(0, 1) + tmpadd(0, -1) + tmpadd(S.Half, 1) + tmpadd(S.Half, -1) + # (those after look obscure) + + # Section 8.4.3 + add(exp(polar_lift(-1)*t), [], [], [0], []) + + # TODO can do sin^n, sinh^n by expansion ... where? + # 8.4.4 (hyperbolic functions) + add(sinh(t), [], [1], [S.Half], [1, 0], t**2/4, pi**Rational(3, 2)) + add(cosh(t), [], [S.Half], [0], [S.Half, S.Half], t**2/4, pi**Rational(3, 2)) + + # Section 8.4.5 + # TODO can do t + a. but can also do by expansion... (XXX not really) + add(sin(t), [], [], [S.Half], [0], t**2/4, sqrt(pi)) + add(cos(t), [], [], [0], [S.Half], t**2/4, sqrt(pi)) + + # Section 8.4.6 (sinc function) + add(sinc(t), [], [], [0], [Rational(-1, 2)], t**2/4, sqrt(pi)/2) + + # Section 8.5.5 + def make_log1(subs): + N = subs[n] + return [(S.NegativeOne**N*factorial(N), + meijerg([], [1]*(N + 1), [0]*(N + 1), [], t))] + + def make_log2(subs): + N = subs[n] + return [(factorial(N), + meijerg([1]*(N + 1), [], [], [0]*(N + 1), t))] + # TODO these only hold for positive p, and can be made more general + # but who uses log(x)*Heaviside(a-x) anyway ... + # TODO also it would be nice to derive them recursively ... + addi(log(t)**n*Heaviside(1 - t), make_log1, True) + addi(log(t)**n*Heaviside(t - 1), make_log2, True) + + def make_log3(subs): + return make_log1(subs) + make_log2(subs) + addi(log(t)**n, make_log3, True) + addi(log(t + a), + constant(log(a)) + [(S.One, meijerg([1, 1], [], [1], [0], t/a))], + True) + addi(log(Abs(t - a)), constant(log(Abs(a))) + + [(pi, meijerg([1, 1], [S.Half], [1], [0, S.Half], t/a))], + True) + # TODO log(x)/(x+a) and log(x)/(x-1) can also be done. should they + # be derivable? + # TODO further formulae in this section seem obscure + + # Sections 8.4.9-10 + # TODO + + # Section 8.4.11 + addi(Ei(t), + constant(-S.ImaginaryUnit*pi) + [(S.NegativeOne, meijerg([], [1], [0, 0], [], + t*polar_lift(-1)))], + True) + + # Section 8.4.12 + add(Si(t), [1], [], [S.Half], [0, 0], t**2/4, sqrt(pi)/2) + add(Ci(t), [], [1], [0, 0], [S.Half], t**2/4, -sqrt(pi)/2) + + # Section 8.4.13 + add(Shi(t), [S.Half], [], [0], [Rational(-1, 2), Rational(-1, 2)], polar_lift(-1)*t**2/4, + t*sqrt(pi)/4) + add(Chi(t), [], [S.Half, 1], [0, 0], [S.Half, S.Half], t**2/4, - + pi**S('3/2')/2) + + # generalized exponential integral + add(expint(a, t), [], [a], [a - 1, 0], [], t) + + # Section 8.4.14 + add(erf(t), [1], [], [S.Half], [0], t**2, 1/sqrt(pi)) + # TODO exp(-x)*erf(I*x) does not work + add(erfc(t), [], [1], [0, S.Half], [], t**2, 1/sqrt(pi)) + # This formula for erfi(z) yields a wrong(?) minus sign + #add(erfi(t), [1], [], [S.Half], [0], -t**2, I/sqrt(pi)) + add(erfi(t), [S.Half], [], [0], [Rational(-1, 2)], -t**2, t/sqrt(pi)) + + # Fresnel Integrals + add(fresnels(t), [1], [], [Rational(3, 4)], [0, Rational(1, 4)], pi**2*t**4/16, S.Half) + add(fresnelc(t), [1], [], [Rational(1, 4)], [0, Rational(3, 4)], pi**2*t**4/16, S.Half) + + ##### bessel-type functions ##### + # Section 8.4.19 + add(besselj(a, t), [], [], [a/2], [-a/2], t**2/4) + + # all of the following are derivable + #add(sin(t)*besselj(a, t), [Rational(1, 4), Rational(3, 4)], [], [(1+a)/2], + # [-a/2, a/2, (1-a)/2], t**2, 1/sqrt(2)) + #add(cos(t)*besselj(a, t), [Rational(1, 4), Rational(3, 4)], [], [a/2], + # [-a/2, (1+a)/2, (1-a)/2], t**2, 1/sqrt(2)) + #add(besselj(a, t)**2, [S.Half], [], [a], [-a, 0], t**2, 1/sqrt(pi)) + #add(besselj(a, t)*besselj(b, t), [0, S.Half], [], [(a + b)/2], + # [-(a+b)/2, (a - b)/2, (b - a)/2], t**2, 1/sqrt(pi)) + + # Section 8.4.20 + add(bessely(a, t), [], [-(a + 1)/2], [a/2, -a/2], [-(a + 1)/2], t**2/4) + + # TODO all of the following should be derivable + #add(sin(t)*bessely(a, t), [Rational(1, 4), Rational(3, 4)], [(1 - a - 1)/2], + # [(1 + a)/2, (1 - a)/2], [(1 - a - 1)/2, (1 - 1 - a)/2, (1 - 1 + a)/2], + # t**2, 1/sqrt(2)) + #add(cos(t)*bessely(a, t), [Rational(1, 4), Rational(3, 4)], [(0 - a - 1)/2], + # [(0 + a)/2, (0 - a)/2], [(0 - a - 1)/2, (1 - 0 - a)/2, (1 - 0 + a)/2], + # t**2, 1/sqrt(2)) + #add(besselj(a, t)*bessely(b, t), [0, S.Half], [(a - b - 1)/2], + # [(a + b)/2, (a - b)/2], [(a - b - 1)/2, -(a + b)/2, (b - a)/2], + # t**2, 1/sqrt(pi)) + #addi(bessely(a, t)**2, + # [(2/sqrt(pi), meijerg([], [S.Half, S.Half - a], [0, a, -a], + # [S.Half - a], t**2)), + # (1/sqrt(pi), meijerg([S.Half], [], [a], [-a, 0], t**2))], + # True) + #addi(bessely(a, t)*bessely(b, t), + # [(2/sqrt(pi), meijerg([], [0, S.Half, (1 - a - b)/2], + # [(a + b)/2, (a - b)/2, (b - a)/2, -(a + b)/2], + # [(1 - a - b)/2], t**2)), + # (1/sqrt(pi), meijerg([0, S.Half], [], [(a + b)/2], + # [-(a + b)/2, (a - b)/2, (b - a)/2], t**2))], + # True) + + # Section 8.4.21 ? + # Section 8.4.22 + add(besseli(a, t), [], [(1 + a)/2], [a/2], [-a/2, (1 + a)/2], t**2/4, pi) + # TODO many more formulas. should all be derivable + + # Section 8.4.23 + add(besselk(a, t), [], [], [a/2, -a/2], [], t**2/4, S.Half) + # TODO many more formulas. should all be derivable + + # Complete elliptic integrals K(z) and E(z) + add(elliptic_k(t), [S.Half, S.Half], [], [0], [0], -t, S.Half) + add(elliptic_e(t), [S.Half, 3*S.Half], [], [0], [0], -t, Rational(-1, 2)/2) + + +#################################################################### +# First some helper functions. +#################################################################### + +from sympy.utilities.timeutils import timethis +timeit = timethis('meijerg') + + +def _mytype(f: Basic, x: Symbol) -> tuple[type[Basic], ...]: + """ Create a hashable entity describing the type of f. """ + def key(x: type[Basic]) -> tuple[int, int, str]: + return x.class_key() + + if x not in f.free_symbols: + return () + elif f.is_Function: + return type(f), + return tuple(sorted((t for a in f.args for t in _mytype(a, x)), key=key)) + + +class _CoeffExpValueError(ValueError): + """ + Exception raised by _get_coeff_exp, for internal use only. + """ + pass + + +def _get_coeff_exp(expr, x): + """ + When expr is known to be of the form c*x**b, with c and/or b possibly 1, + return c, b. + + Examples + ======== + + >>> from sympy.abc import x, a, b + >>> from sympy.integrals.meijerint import _get_coeff_exp + >>> _get_coeff_exp(a*x**b, x) + (a, b) + >>> _get_coeff_exp(x, x) + (1, 1) + >>> _get_coeff_exp(2*x, x) + (2, 1) + >>> _get_coeff_exp(x**3, x) + (1, 3) + """ + from sympy.simplify import powsimp + (c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x) + if not m: + return c, S.Zero + [m] = m + if m.is_Pow: + if m.base != x: + raise _CoeffExpValueError('expr not of form a*x**b') + return c, m.exp + elif m == x: + return c, S.One + else: + raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr) + + +def _exponents(expr, x): + """ + Find the exponents of ``x`` (not including zero) in ``expr``. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _exponents + >>> from sympy.abc import x, y + >>> from sympy import sin + >>> _exponents(x, x) + {1} + >>> _exponents(x**2, x) + {2} + >>> _exponents(x**2 + x, x) + {1, 2} + >>> _exponents(x**3*sin(x + x**y) + 1/x, x) + {-1, 1, 3, y} + """ + def _exponents_(expr, x, res): + if expr == x: + res.update([1]) + return + if expr.is_Pow and expr.base == x: + res.update([expr.exp]) + return + for argument in expr.args: + _exponents_(argument, x, res) + res = set() + _exponents_(expr, x, res) + return res + + +def _functions(expr, x): + """ Find the types of functions in expr, to estimate the complexity. """ + return {e.func for e in expr.atoms(Function) if x in e.free_symbols} + + +def _find_splitting_points(expr, x): + """ + Find numbers a such that a linear substitution x -> x + a would + (hopefully) simplify expr. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _find_splitting_points as fsp + >>> from sympy import sin + >>> from sympy.abc import x + >>> fsp(x, x) + {0} + >>> fsp((x-1)**3, x) + {1} + >>> fsp(sin(x+3)*x, x) + {-3, 0} + """ + p, q = [Wild(n, exclude=[x]) for n in 'pq'] + + def compute_innermost(expr, res): + if not isinstance(expr, Expr): + return + m = expr.match(p*x + q) + if m and m[p] != 0: + res.add(-m[q]/m[p]) + return + if expr.is_Atom: + return + for argument in expr.args: + compute_innermost(argument, res) + innermost = set() + compute_innermost(expr, innermost) + return innermost + + +def _split_mul(f, x): + """ + Split expression ``f`` into fac, po, g, where fac is a constant factor, + po = x**s for some s independent of s, and g is "the rest". + + Examples + ======== + + >>> from sympy.integrals.meijerint import _split_mul + >>> from sympy import sin + >>> from sympy.abc import s, x + >>> _split_mul((3*x)**s*sin(x**2)*x, x) + (3**s, x*x**s, sin(x**2)) + """ + fac = S.One + po = S.One + g = S.One + f = expand_power_base(f) + + args = Mul.make_args(f) + for a in args: + if a == x: + po *= x + elif x not in a.free_symbols: + fac *= a + else: + if a.is_Pow and x not in a.exp.free_symbols: + c, t = a.base.as_coeff_mul(x) + if t != (x,): + c, t = expand_mul(a.base).as_coeff_mul(x) + if t == (x,): + po *= x**a.exp + fac *= unpolarify(polarify(c**a.exp, subs=False)) + continue + g *= a + + return fac, po, g + + +def _mul_args(f): + """ + Return a list ``L`` such that ``Mul(*L) == f``. + + If ``f`` is not a ``Mul`` or ``Pow``, ``L=[f]``. + If ``f=g**n`` for an integer ``n``, ``L=[g]*n``. + If ``f`` is a ``Mul``, ``L`` comes from applying ``_mul_args`` to all factors of ``f``. + """ + args = Mul.make_args(f) + gs = [] + for g in args: + if g.is_Pow and g.exp.is_Integer: + n = g.exp + base = g.base + if n < 0: + n = -n + base = 1/base + gs += [base]*n + else: + gs.append(g) + return gs + + +def _mul_as_two_parts(f): + """ + Find all the ways to split ``f`` into a product of two terms. + Return None on failure. + + Explanation + =========== + + Although the order is canonical from multiset_partitions, this is + not necessarily the best order to process the terms. For example, + if the case of len(gs) == 2 is removed and multiset is allowed to + sort the terms, some tests fail. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _mul_as_two_parts + >>> from sympy import sin, exp, ordered + >>> from sympy.abc import x + >>> list(ordered(_mul_as_two_parts(x*sin(x)*exp(x)))) + [(x, exp(x)*sin(x)), (x*exp(x), sin(x)), (x*sin(x), exp(x))] + """ + + gs = _mul_args(f) + if len(gs) < 2: + return None + if len(gs) == 2: + return [tuple(gs)] + return [(Mul(*x), Mul(*y)) for (x, y) in multiset_partitions(gs, 2)] + + +def _inflate_g(g, n): + """ Return C, h such that h is a G function of argument z**n and + g = C*h. """ + # TODO should this be a method of meijerg? + # See: [L, page 150, equation (5)] + def inflate(params, n): + """ (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) """ + return [(a + i)/n for a, i in itertools.product(params, range(n))] + v = S(len(g.ap) - len(g.bq)) + C = n**(1 + g.nu + v/2) + C /= (2*pi)**((n - 1)*g.delta) + return C, meijerg(inflate(g.an, n), inflate(g.aother, n), + inflate(g.bm, n), inflate(g.bother, n), + g.argument**n * n**(n*v)) + + +def _flip_g(g): + """ Turn the G function into one of inverse argument + (i.e. G(1/x) -> G'(x)) """ + # See [L], section 5.2 + def tr(l): + return [1 - a for a in l] + return meijerg(tr(g.bm), tr(g.bother), tr(g.an), tr(g.aother), 1/g.argument) + + +def _inflate_fox_h(g, a): + r""" + Let d denote the integrand in the definition of the G function ``g``. + Consider the function H which is defined in the same way, but with + integrand d/Gamma(a*s) (contour conventions as usual). + + If ``a`` is rational, the function H can be written as C*G, for a constant C + and a G-function G. + + This function returns C, G. + """ + if a < 0: + return _inflate_fox_h(_flip_g(g), -a) + p = S(a.p) + q = S(a.q) + # We use the substitution s->qs, i.e. inflate g by q. We are left with an + # extra factor of Gamma(p*s), for which we use Gauss' multiplication + # theorem. + D, g = _inflate_g(g, q) + z = g.argument + D /= (2*pi)**((1 - p)/2)*p**Rational(-1, 2) + z /= p**p + bs = [(n + 1)/p for n in range(p)] + return D, meijerg(g.an, g.aother, g.bm, list(g.bother) + bs, z) + +_dummies: dict[tuple[str, str], Dummy] = {} + + +def _dummy(name, token, expr, **kwargs): + """ + Return a dummy. This will return the same dummy if the same token+name is + requested more than once, and it is not already in expr. + This is for being cache-friendly. + """ + d = _dummy_(name, token, **kwargs) + if d in expr.free_symbols: + return Dummy(name, **kwargs) + return d + + +def _dummy_(name, token, **kwargs): + """ + Return a dummy associated to name and token. Same effect as declaring + it globally. + """ + global _dummies + if not (name, token) in _dummies: + _dummies[(name, token)] = Dummy(name, **kwargs) + return _dummies[(name, token)] + + +def _is_analytic(f, x): + """ Check if f(x), when expressed using G functions on the positive reals, + will in fact agree with the G functions almost everywhere """ + return not any(x in expr.free_symbols for expr in f.atoms(Heaviside, Abs)) + + +def _condsimp(cond, first=True): + """ + Do naive simplifications on ``cond``. + + Explanation + =========== + + Note that this routine is completely ad-hoc, simplification rules being + added as need arises rather than following any logical pattern. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _condsimp as simp + >>> from sympy import Or, Eq + >>> from sympy.abc import x, y + >>> simp(Or(x < y, Eq(x, y))) + x <= y + """ + if first: + cond = cond.replace(lambda _: _.is_Relational, _canonical_coeff) + first = False + if not isinstance(cond, BooleanFunction): + return cond + p, q, r = symbols('p q r', cls=Wild) + # transforms tests use 0, 4, 5 and 11-14 + # meijer tests use 0, 2, 11, 14 + # joint_rv uses 6, 7 + rules = [ + (Or(p < q, Eq(p, q)), p <= q), # 0 + # The next two obviously are instances of a general pattern, but it is + # easier to spell out the few cases we care about. + (And(Abs(arg(p)) <= pi, Abs(arg(p) - 2*pi) <= pi), + Eq(arg(p) - pi, 0)), # 1 + (And(Abs(2*arg(p) + pi) <= pi, Abs(2*arg(p) - pi) <= pi), + Eq(arg(p), 0)), # 2 + (And(Abs(2*arg(p) + pi) < pi, Abs(2*arg(p) - pi) <= pi), + S.false), # 3 + (And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) <= pi/2), + Eq(arg(p), 0)), # 4 + (And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) < pi/2), + S.false), # 5 + (And(Abs(arg(p**2/2 + 1)) < pi, Ne(Abs(arg(p**2/2 + 1)), pi)), + S.true), # 6 + (Or(Abs(arg(p**2/2 + 1)) < pi, Ne(1/(p**2/2 + 1), 0)), + S.true), # 7 + (And(Abs(unbranched_argument(p)) <= pi, + Abs(unbranched_argument(exp_polar(-2*pi*S.ImaginaryUnit)*p)) <= pi), + Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi)*p), 0)), # 8 + (And(Abs(unbranched_argument(p)) <= pi/2, + Abs(unbranched_argument(exp_polar(-pi*S.ImaginaryUnit)*p)) <= pi/2), + Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi/2)*p), 0)), # 9 + (Or(p <= q, And(p < q, r)), p <= q), # 10 + (Ne(p**2, 1) & (p**2 > 1), p**2 > 1), # 11 + (Ne(1/p, 1) & (cos(Abs(arg(p)))*Abs(p) > 1), Abs(p) > 1), # 12 + (Ne(p, 2) & (cos(Abs(arg(p)))*Abs(p) > 2), Abs(p) > 2), # 13 + ((Abs(arg(p)) < pi/2) & (cos(Abs(arg(p)))*sqrt(Abs(p**2)) > 1), p**2 > 1), # 14 + ] + cond = cond.func(*[_condsimp(_, first) for _ in cond.args]) + change = True + while change: + change = False + for irule, (fro, to) in enumerate(rules): + if fro.func != cond.func: + continue + for n, arg1 in enumerate(cond.args): + if r in fro.args[0].free_symbols: + m = arg1.match(fro.args[1]) + num = 1 + else: + num = 0 + m = arg1.match(fro.args[0]) + if not m: + continue + otherargs = [x.subs(m) for x in fro.args[:num] + fro.args[num + 1:]] + otherlist = [n] + for arg2 in otherargs: + for k, arg3 in enumerate(cond.args): + if k in otherlist: + continue + if arg2 == arg3: + otherlist += [k] + break + if isinstance(arg3, And) and arg2.args[1] == r and \ + isinstance(arg2, And) and arg2.args[0] in arg3.args: + otherlist += [k] + break + if isinstance(arg3, And) and arg2.args[0] == r and \ + isinstance(arg2, And) and arg2.args[1] in arg3.args: + otherlist += [k] + break + if len(otherlist) != len(otherargs) + 1: + continue + newargs = [arg_ for (k, arg_) in enumerate(cond.args) + if k not in otherlist] + [to.subs(m)] + if SYMPY_DEBUG: + if irule not in (0, 2, 4, 5, 6, 7, 11, 12, 13, 14): + print('used new rule:', irule) + cond = cond.func(*newargs) + change = True + break + + # final tweak + def rel_touchup(rel): + if rel.rel_op != '==' or rel.rhs != 0: + return rel + + # handle Eq(*, 0) + LHS = rel.lhs + m = LHS.match(arg(p)**q) + if not m: + m = LHS.match(unbranched_argument(polar_lift(p)**q)) + if not m: + if isinstance(LHS, periodic_argument) and not LHS.args[0].is_polar \ + and LHS.args[1] is S.Infinity: + return (LHS.args[0] > 0) + return rel + return (m[p] > 0) + cond = cond.replace(lambda _: _.is_Relational, rel_touchup) + if SYMPY_DEBUG: + print('_condsimp: ', cond) + return cond + +def _eval_cond(cond): + """ Re-evaluate the conditions. """ + if isinstance(cond, bool): + return cond + return _condsimp(cond.doit()) + +#################################################################### +# Now the "backbone" functions to do actual integration. +#################################################################### + + +def _my_principal_branch(expr, period, full_pb=False): + """ Bring expr nearer to its principal branch by removing superfluous + factors. + This function does *not* guarantee to yield the principal branch, + to avoid introducing opaque principal_branch() objects, + unless full_pb=True. """ + res = principal_branch(expr, period) + if not full_pb: + res = res.replace(principal_branch, lambda x, y: x) + return res + + +def _rewrite_saxena_1(fac, po, g, x): + """ + Rewrite the integral fac*po*g dx, from zero to infinity, as + integral fac*G, where G has argument a*x. Note po=x**s. + Return fac, G. + """ + _, s = _get_coeff_exp(po, x) + a, b = _get_coeff_exp(g.argument, x) + period = g.get_period() + a = _my_principal_branch(a, period) + + # We substitute t = x**b. + C = fac/(Abs(b)*a**((s + 1)/b - 1)) + # Absorb a factor of (at)**((1 + s)/b - 1). + + def tr(l): + return [a + (1 + s)/b - 1 for a in l] + return C, meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), + a*x) + + +def _check_antecedents_1(g, x, helper=False): + r""" + Return a condition under which the mellin transform of g exists. + Any power of x has already been absorbed into the G function, + so this is just $\int_0^\infty g\, dx$. + + See [L, section 5.6.1]. (Note that s=1.) + + If ``helper`` is True, only check if the MT exists at infinity, i.e. if + $\int_1^\infty g\, dx$ exists. + """ + # NOTE if you update these conditions, please update the documentation as well + delta = g.delta + eta, _ = _get_coeff_exp(g.argument, x) + m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)]) + + if p > q: + def tr(l): + return [1 - x for x in l] + return _check_antecedents_1(meijerg(tr(g.bm), tr(g.bother), + tr(g.an), tr(g.aother), x/eta), + x) + + tmp = [-re(b) < 1 for b in g.bm] + [1 < 1 - re(a) for a in g.an] + cond_3 = And(*tmp) + + tmp += [-re(b) < 1 for b in g.bother] + tmp += [1 < 1 - re(a) for a in g.aother] + cond_3_star = And(*tmp) + + cond_4 = (-re(g.nu) + (q + 1 - p)/2 > q - p) + + def debug(*msg): + _debug(*msg) + + def debugf(string, arg): + _debugf(string, arg) + + debug('Checking antecedents for 1 function:') + debugf(' delta=%s, eta=%s, m=%s, n=%s, p=%s, q=%s', + (delta, eta, m, n, p, q)) + debugf(' ap = %s, %s', (list(g.an), list(g.aother))) + debugf(' bq = %s, %s', (list(g.bm), list(g.bother))) + debugf(' cond_3=%s, cond_3*=%s, cond_4=%s', (cond_3, cond_3_star, cond_4)) + + conds = [] + + # case 1 + case1 = [] + tmp1 = [1 <= n, p < q, 1 <= m] + tmp2 = [1 <= p, 1 <= m, Eq(q, p + 1), Not(And(Eq(n, 0), Eq(m, p + 1)))] + tmp3 = [1 <= p, Eq(q, p)] + for k in range(ceiling(delta/2) + 1): + tmp3 += [Ne(Abs(unbranched_argument(eta)), (delta - 2*k)*pi)] + tmp = [delta > 0, Abs(unbranched_argument(eta)) < delta*pi] + extra = [Ne(eta, 0), cond_3] + if helper: + extra = [] + for t in [tmp1, tmp2, tmp3]: + case1 += [And(*(t + tmp + extra))] + conds += case1 + debug(' case 1:', case1) + + # case 2 + extra = [cond_3] + if helper: + extra = [] + case2 = [And(Eq(n, 0), p + 1 <= m, m <= q, + Abs(unbranched_argument(eta)) < delta*pi, *extra)] + conds += case2 + debug(' case 2:', case2) + + # case 3 + extra = [cond_3, cond_4] + if helper: + extra = [] + case3 = [And(p < q, 1 <= m, delta > 0, Eq(Abs(unbranched_argument(eta)), delta*pi), + *extra)] + case3 += [And(p <= q - 2, Eq(delta, 0), Eq(Abs(unbranched_argument(eta)), 0), *extra)] + conds += case3 + debug(' case 3:', case3) + + # TODO altered cases 4-7 + + # extra case from wofram functions site: + # (reproduced verbatim from Prudnikov, section 2.24.2) + # https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/01/ + case_extra = [] + case_extra += [Eq(p, q), Eq(delta, 0), Eq(unbranched_argument(eta), 0), Ne(eta, 0)] + if not helper: + case_extra += [cond_3] + s = [] + for a, b in zip(g.ap, g.bq): + s += [b - a] + case_extra += [re(Add(*s)) < 0] + case_extra = And(*case_extra) + conds += [case_extra] + debug(' extra case:', [case_extra]) + + case_extra_2 = [And(delta > 0, Abs(unbranched_argument(eta)) < delta*pi)] + if not helper: + case_extra_2 += [cond_3] + case_extra_2 = And(*case_extra_2) + conds += [case_extra_2] + debug(' second extra case:', [case_extra_2]) + + # TODO This leaves only one case from the three listed by Prudnikov. + # Investigate if these indeed cover everything; if so, remove the rest. + + return Or(*conds) + + +def _int0oo_1(g, x): + r""" + Evaluate $\int_0^\infty g\, dx$ using G functions, + assuming the necessary conditions are fulfilled. + + Examples + ======== + + >>> from sympy.abc import a, b, c, d, x, y + >>> from sympy import meijerg + >>> from sympy.integrals.meijerint import _int0oo_1 + >>> _int0oo_1(meijerg([a], [b], [c], [d], x*y), x) + gamma(-a)*gamma(c + 1)/(y*gamma(-d)*gamma(b + 1)) + """ + from sympy.simplify import gammasimp + # See [L, section 5.6.1]. Note that s=1. + eta, _ = _get_coeff_exp(g.argument, x) + res = 1/eta + # XXX TODO we should reduce order first + for b in g.bm: + res *= gamma(b + 1) + for a in g.an: + res *= gamma(1 - a - 1) + for b in g.bother: + res /= gamma(1 - b - 1) + for a in g.aother: + res /= gamma(a + 1) + return gammasimp(unpolarify(res)) + + +def _rewrite_saxena(fac, po, g1, g2, x, full_pb=False): + """ + Rewrite the integral ``fac*po*g1*g2`` from 0 to oo in terms of G + functions with argument ``c*x``. + + Explanation + =========== + + Return C, f1, f2 such that integral C f1 f2 from 0 to infinity equals + integral fac ``po``, ``g1``, ``g2`` from 0 to infinity. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _rewrite_saxena + >>> from sympy.abc import s, t, m + >>> from sympy import meijerg + >>> g1 = meijerg([], [], [0], [], s*t) + >>> g2 = meijerg([], [], [m/2], [-m/2], t**2/4) + >>> r = _rewrite_saxena(1, t**0, g1, g2, t) + >>> r[0] + s/(4*sqrt(pi)) + >>> r[1] + meijerg(((), ()), ((-1/2, 0), ()), s**2*t/4) + >>> r[2] + meijerg(((), ()), ((m/2,), (-m/2,)), t/4) + """ + def pb(g): + a, b = _get_coeff_exp(g.argument, x) + per = g.get_period() + return meijerg(g.an, g.aother, g.bm, g.bother, + _my_principal_branch(a, per, full_pb)*x**b) + + _, s = _get_coeff_exp(po, x) + _, b1 = _get_coeff_exp(g1.argument, x) + _, b2 = _get_coeff_exp(g2.argument, x) + if (b1 < 0) == True: + b1 = -b1 + g1 = _flip_g(g1) + if (b2 < 0) == True: + b2 = -b2 + g2 = _flip_g(g2) + if not b1.is_Rational or not b2.is_Rational: + return + m1, n1 = b1.p, b1.q + m2, n2 = b2.p, b2.q + tau = ilcm(m1*n2, m2*n1) + r1 = tau//(m1*n2) + r2 = tau//(m2*n1) + + C1, g1 = _inflate_g(g1, r1) + C2, g2 = _inflate_g(g2, r2) + g1 = pb(g1) + g2 = pb(g2) + + fac *= C1*C2 + a1, b = _get_coeff_exp(g1.argument, x) + a2, _ = _get_coeff_exp(g2.argument, x) + + # arbitrarily tack on the x**s part to g1 + # TODO should we try both? + exp = (s + 1)/b - 1 + fac = fac/(Abs(b) * a1**exp) + + def tr(l): + return [a + exp for a in l] + g1 = meijerg(tr(g1.an), tr(g1.aother), tr(g1.bm), tr(g1.bother), a1*x) + g2 = meijerg(g2.an, g2.aother, g2.bm, g2.bother, a2*x) + + from sympy.simplify import powdenest + return powdenest(fac, polar=True), g1, g2 + + +def _check_antecedents(g1, g2, x): + """ Return a condition under which the integral theorem applies. """ + # Yes, this is madness. + # XXX TODO this is a testing *nightmare* + # NOTE if you update these conditions, please update the documentation as well + + # The following conditions are found in + # [P], Section 2.24.1 + # + # They are also reproduced (verbatim!) at + # https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/ + # + # Note: k=l=r=alpha=1 + sigma, _ = _get_coeff_exp(g1.argument, x) + omega, _ = _get_coeff_exp(g2.argument, x) + s, t, u, v = S([len(g1.bm), len(g1.an), len(g1.ap), len(g1.bq)]) + m, n, p, q = S([len(g2.bm), len(g2.an), len(g2.ap), len(g2.bq)]) + bstar = s + t - (u + v)/2 + cstar = m + n - (p + q)/2 + rho = g1.nu + (u - v)/2 + 1 + mu = g2.nu + (p - q)/2 + 1 + phi = q - p - (v - u) + eta = 1 - (v - u) - mu - rho + psi = (pi*(q - m - n) + Abs(unbranched_argument(omega)))/(q - p) + theta = (pi*(v - s - t) + Abs(unbranched_argument(sigma)))/(v - u) + + _debug('Checking antecedents:') + _debugf(' sigma=%s, s=%s, t=%s, u=%s, v=%s, b*=%s, rho=%s', + (sigma, s, t, u, v, bstar, rho)) + _debugf(' omega=%s, m=%s, n=%s, p=%s, q=%s, c*=%s, mu=%s,', + (omega, m, n, p, q, cstar, mu)) + _debugf(' phi=%s, eta=%s, psi=%s, theta=%s', (phi, eta, psi, theta)) + + def _c1(): + for g in [g1, g2]: + for i, j in itertools.product(g.an, g.bm): + diff = i - j + if diff.is_integer and diff.is_positive: + return False + return True + c1 = _c1() + c2 = And(*[re(1 + i + j) > 0 for i in g1.bm for j in g2.bm]) + c3 = And(*[re(1 + i + j) < 1 + 1 for i in g1.an for j in g2.an]) + c4 = And(*[(p - q)*re(1 + i - 1) - re(mu) > Rational(-3, 2) for i in g1.an]) + c5 = And(*[(p - q)*re(1 + i) - re(mu) > Rational(-3, 2) for i in g1.bm]) + c6 = And(*[(u - v)*re(1 + i - 1) - re(rho) > Rational(-3, 2) for i in g2.an]) + c7 = And(*[(u - v)*re(1 + i) - re(rho) > Rational(-3, 2) for i in g2.bm]) + c8 = (Abs(phi) + 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu - + 1)*(v - u)) > 0) + c9 = (Abs(phi) - 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu - + 1)*(v - u)) > 0) + c10 = (Abs(unbranched_argument(sigma)) < bstar*pi) + c11 = Eq(Abs(unbranched_argument(sigma)), bstar*pi) + c12 = (Abs(unbranched_argument(omega)) < cstar*pi) + c13 = Eq(Abs(unbranched_argument(omega)), cstar*pi) + + # The following condition is *not* implemented as stated on the wolfram + # function site. In the book of Prudnikov there is an additional part + # (the And involving re()). However, I only have this book in russian, and + # I don't read any russian. The following condition is what other people + # have told me it means. + # Worryingly, it is different from the condition implemented in REDUCE. + # The REDUCE implementation: + # https://reduce-algebra.svn.sourceforge.net/svnroot/reduce-algebra/trunk/packages/defint/definta.red + # (search for tst14) + # The Wolfram alpha version: + # https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/03/0014/ + z0 = exp(-(bstar + cstar)*pi*S.ImaginaryUnit) + zos = unpolarify(z0*omega/sigma) + zso = unpolarify(z0*sigma/omega) + if zos == 1/zso: + c14 = And(Eq(phi, 0), bstar + cstar <= 1, + Or(Ne(zos, 1), re(mu + rho + v - u) < 1, + re(mu + rho + q - p) < 1)) + else: + def _cond(z): + '''Returns True if abs(arg(1-z)) < pi, avoiding arg(0). + + Explanation + =========== + + If ``z`` is 1 then arg is NaN. This raises a + TypeError on `NaN < pi`. Previously this gave `False` so + this behavior has been hardcoded here but someone should + check if this NaN is more serious! This NaN is triggered by + test_meijerint() in test_meijerint.py: + `meijerint_definite(exp(x), x, 0, I)` + ''' + return z != 1 and Abs(arg(1 - z)) < pi + + c14 = And(Eq(phi, 0), bstar - 1 + cstar <= 0, + Or(And(Ne(zos, 1), _cond(zos)), + And(re(mu + rho + v - u) < 1, Eq(zos, 1)))) + + c14_alt = And(Eq(phi, 0), cstar - 1 + bstar <= 0, + Or(And(Ne(zso, 1), _cond(zso)), + And(re(mu + rho + q - p) < 1, Eq(zso, 1)))) + + # Since r=k=l=1, in our case there is c14_alt which is the same as calling + # us with (g1, g2) = (g2, g1). The conditions below enumerate all cases + # (i.e. we don't have to try arguments reversed by hand), and indeed try + # all symmetric cases. (i.e. whenever there is a condition involving c14, + # there is also a dual condition which is exactly what we would get when g1, + # g2 were interchanged, *but c14 was unaltered*). + # Hence the following seems correct: + c14 = Or(c14, c14_alt) + + ''' + When `c15` is NaN (e.g. from `psi` being NaN as happens during + 'test_issue_4992' and/or `theta` is NaN as in 'test_issue_6253', + both in `test_integrals.py`) the comparison to 0 formerly gave False + whereas now an error is raised. To keep the old behavior, the value + of NaN is replaced with False but perhaps a closer look at this condition + should be made: XXX how should conditions leading to c15=NaN be handled? + ''' + try: + lambda_c = (q - p)*Abs(omega)**(1/(q - p))*cos(psi) \ + + (v - u)*Abs(sigma)**(1/(v - u))*cos(theta) + # the TypeError might be raised here, e.g. if lambda_c is NaN + if _eval_cond(lambda_c > 0) != False: + c15 = (lambda_c > 0) + else: + def lambda_s0(c1, c2): + return c1*(q - p)*Abs(omega)**(1/(q - p))*sin(psi) \ + + c2*(v - u)*Abs(sigma)**(1/(v - u))*sin(theta) + lambda_s = Piecewise( + ((lambda_s0(+1, +1)*lambda_s0(-1, -1)), + And(Eq(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))), + (lambda_s0(sign(unbranched_argument(omega)), +1)*lambda_s0(sign(unbranched_argument(omega)), -1), + And(Eq(unbranched_argument(sigma), 0), Ne(unbranched_argument(omega), 0))), + (lambda_s0(+1, sign(unbranched_argument(sigma)))*lambda_s0(-1, sign(unbranched_argument(sigma))), + And(Ne(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))), + (lambda_s0(sign(unbranched_argument(omega)), sign(unbranched_argument(sigma))), True)) + tmp = [lambda_c > 0, + And(Eq(lambda_c, 0), Ne(lambda_s, 0), re(eta) > -1), + And(Eq(lambda_c, 0), Eq(lambda_s, 0), re(eta) > 0)] + c15 = Or(*tmp) + except TypeError: + c15 = False + for cond, i in [(c1, 1), (c2, 2), (c3, 3), (c4, 4), (c5, 5), (c6, 6), + (c7, 7), (c8, 8), (c9, 9), (c10, 10), (c11, 11), + (c12, 12), (c13, 13), (c14, 14), (c15, 15)]: + _debugf(' c%s: %s', (i, cond)) + + # We will return Or(*conds) + conds = [] + + def pr(count): + _debugf(' case %s: %s', (count, conds[-1])) + conds += [And(m*n*s*t != 0, bstar.is_positive is True, cstar.is_positive is True, c1, c2, c3, c10, + c12)] # 1 + pr(1) + conds += [And(Eq(u, v), Eq(bstar, 0), cstar.is_positive is True, sigma.is_positive is True, re(rho) < 1, + c1, c2, c3, c12)] # 2 + pr(2) + conds += [And(Eq(p, q), Eq(cstar, 0), bstar.is_positive is True, omega.is_positive is True, re(mu) < 1, + c1, c2, c3, c10)] # 3 + pr(3) + conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0), + sigma.is_positive is True, omega.is_positive is True, re(mu) < 1, re(rho) < 1, + Ne(sigma, omega), c1, c2, c3)] # 4 + pr(4) + conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0), + sigma.is_positive is True, omega.is_positive is True, re(mu + rho) < 1, + Ne(omega, sigma), c1, c2, c3)] # 5 + pr(5) + conds += [And(p > q, s.is_positive is True, bstar.is_positive is True, cstar >= 0, + c1, c2, c3, c5, c10, c13)] # 6 + pr(6) + conds += [And(p < q, t.is_positive is True, bstar.is_positive is True, cstar >= 0, + c1, c2, c3, c4, c10, c13)] # 7 + pr(7) + conds += [And(u > v, m.is_positive is True, cstar.is_positive is True, bstar >= 0, + c1, c2, c3, c7, c11, c12)] # 8 + pr(8) + conds += [And(u < v, n.is_positive is True, cstar.is_positive is True, bstar >= 0, + c1, c2, c3, c6, c11, c12)] # 9 + pr(9) + conds += [And(p > q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True, + re(rho) < 1, c1, c2, c3, c5, c13)] # 10 + pr(10) + conds += [And(p < q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True, + re(rho) < 1, c1, c2, c3, c4, c13)] # 11 + pr(11) + conds += [And(Eq(p, q), u > v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True, + re(mu) < 1, c1, c2, c3, c7, c11)] # 12 + pr(12) + conds += [And(Eq(p, q), u < v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True, + re(mu) < 1, c1, c2, c3, c6, c11)] # 13 + pr(13) + conds += [And(p < q, u > v, bstar >= 0, cstar >= 0, + c1, c2, c3, c4, c7, c11, c13)] # 14 + pr(14) + conds += [And(p > q, u < v, bstar >= 0, cstar >= 0, + c1, c2, c3, c5, c6, c11, c13)] # 15 + pr(15) + conds += [And(p > q, u > v, bstar >= 0, cstar >= 0, + c1, c2, c3, c5, c7, c8, c11, c13, c14)] # 16 + pr(16) + conds += [And(p < q, u < v, bstar >= 0, cstar >= 0, + c1, c2, c3, c4, c6, c9, c11, c13, c14)] # 17 + pr(17) + conds += [And(Eq(t, 0), s.is_positive is True, bstar.is_positive is True, phi.is_positive is True, c1, c2, c10)] # 18 + pr(18) + conds += [And(Eq(s, 0), t.is_positive is True, bstar.is_positive is True, phi.is_negative is True, c1, c3, c10)] # 19 + pr(19) + conds += [And(Eq(n, 0), m.is_positive is True, cstar.is_positive is True, phi.is_negative is True, c1, c2, c12)] # 20 + pr(20) + conds += [And(Eq(m, 0), n.is_positive is True, cstar.is_positive is True, phi.is_positive is True, c1, c3, c12)] # 21 + pr(21) + conds += [And(Eq(s*t, 0), bstar.is_positive is True, cstar.is_positive is True, + c1, c2, c3, c10, c12)] # 22 + pr(22) + conds += [And(Eq(m*n, 0), bstar.is_positive is True, cstar.is_positive is True, + c1, c2, c3, c10, c12)] # 23 + pr(23) + + # The following case is from [Luke1969]. As far as I can tell, it is *not* + # covered by Prudnikov's. + # Let G1 and G2 be the two G-functions. Suppose the integral exists from + # 0 to a > 0 (this is easy the easy part), that G1 is exponential decay at + # infinity, and that the mellin transform of G2 exists. + # Then the integral exists. + mt1_exists = _check_antecedents_1(g1, x, helper=True) + mt2_exists = _check_antecedents_1(g2, x, helper=True) + conds += [And(mt2_exists, Eq(t, 0), u < s, bstar.is_positive is True, c10, c1, c2, c3)] + pr('E1') + conds += [And(mt2_exists, Eq(s, 0), v < t, bstar.is_positive is True, c10, c1, c2, c3)] + pr('E2') + conds += [And(mt1_exists, Eq(n, 0), p < m, cstar.is_positive is True, c12, c1, c2, c3)] + pr('E3') + conds += [And(mt1_exists, Eq(m, 0), q < n, cstar.is_positive is True, c12, c1, c2, c3)] + pr('E4') + + # Let's short-circuit if this worked ... + # the rest is corner-cases and terrible to read. + r = Or(*conds) + if _eval_cond(r) != False: + return r + + conds += [And(m + n > p, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, cstar.is_negative is True, + Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi, + c1, c2, c10, c14, c15)] # 24 + pr(24) + conds += [And(m + n > q, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar.is_negative is True, + Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi, + c1, c3, c10, c14, c15)] # 25 + pr(25) + conds += [And(Eq(p, q - 1), Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, + cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)), + c1, c2, c10, c14, c15)] # 26 + pr(26) + conds += [And(Eq(p, q + 1), Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, + cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)), + c1, c3, c10, c14, c15)] # 27 + pr(27) + conds += [And(p < q - 1, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, + cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)), + Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi, + c1, c2, c10, c14, c15)] # 28 + pr(28) + conds += [And( + p > q + 1, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar >= 0, + cstar*pi < Abs(unbranched_argument(omega)), + Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi, + c1, c3, c10, c14, c15)] # 29 + pr(29) + conds += [And(Eq(n, 0), Eq(phi, 0), s + t > 0, m.is_positive is True, cstar.is_positive is True, bstar.is_negative is True, + Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi, + c1, c2, c12, c14, c15)] # 30 + pr(30) + conds += [And(Eq(m, 0), Eq(phi, 0), s + t > v, n.is_positive is True, cstar.is_positive is True, bstar.is_negative is True, + Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi, + c1, c3, c12, c14, c15)] # 31 + pr(31) + conds += [And(Eq(n, 0), Eq(phi, 0), Eq(u, v - 1), m.is_positive is True, cstar.is_positive is True, + bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)), + Abs(unbranched_argument(sigma)) < (bstar + 1)*pi, + c1, c2, c12, c14, c15)] # 32 + pr(32) + conds += [And(Eq(m, 0), Eq(phi, 0), Eq(u, v + 1), n.is_positive is True, cstar.is_positive is True, + bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)), + Abs(unbranched_argument(sigma)) < (bstar + 1)*pi, + c1, c3, c12, c14, c15)] # 33 + pr(33) + conds += [And( + Eq(n, 0), Eq(phi, 0), u < v - 1, m.is_positive is True, cstar.is_positive is True, bstar >= 0, + bstar*pi < Abs(unbranched_argument(sigma)), + Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi, + c1, c2, c12, c14, c15)] # 34 + pr(34) + conds += [And( + Eq(m, 0), Eq(phi, 0), u > v + 1, n.is_positive is True, cstar.is_positive is True, bstar >= 0, + bstar*pi < Abs(unbranched_argument(sigma)), + Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi, + c1, c3, c12, c14, c15)] # 35 + pr(35) + + return Or(*conds) + + # NOTE An alternative, but as far as I can tell weaker, set of conditions + # can be found in [L, section 5.6.2]. + + +def _int0oo(g1, g2, x): + """ + Express integral from zero to infinity g1*g2 using a G function, + assuming the necessary conditions are fulfilled. + + Examples + ======== + + >>> from sympy.integrals.meijerint import _int0oo + >>> from sympy.abc import s, t, m + >>> from sympy import meijerg, S + >>> g1 = meijerg([], [], [-S(1)/2, 0], [], s**2*t/4) + >>> g2 = meijerg([], [], [m/2], [-m/2], t/4) + >>> _int0oo(g1, g2, t) + 4*meijerg(((1/2, 0), ()), ((m/2,), (-m/2,)), s**(-2))/s**2 + """ + # See: [L, section 5.6.2, equation (1)] + eta, _ = _get_coeff_exp(g1.argument, x) + omega, _ = _get_coeff_exp(g2.argument, x) + + def neg(l): + return [-x for x in l] + a1 = neg(g1.bm) + list(g2.an) + a2 = list(g2.aother) + neg(g1.bother) + b1 = neg(g1.an) + list(g2.bm) + b2 = list(g2.bother) + neg(g1.aother) + return meijerg(a1, a2, b1, b2, omega/eta)/eta + + +def _rewrite_inversion(fac, po, g, x): + """ Absorb ``po`` == x**s into g. """ + _, s = _get_coeff_exp(po, x) + a, b = _get_coeff_exp(g.argument, x) + + def tr(l): + return [t + s/b for t in l] + from sympy.simplify import powdenest + return (powdenest(fac/a**(s/b), polar=True), + meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument)) + + +def _check_antecedents_inversion(g, x): + """ Check antecedents for the laplace inversion integral. """ + _debug('Checking antecedents for inversion:') + z = g.argument + _, e = _get_coeff_exp(z, x) + if e < 0: + _debug(' Flipping G.') + # We want to assume that argument gets large as |x| -> oo + return _check_antecedents_inversion(_flip_g(g), x) + + def statement_half(a, b, c, z, plus): + coeff, exponent = _get_coeff_exp(z, x) + a *= exponent + b *= coeff**c + c *= exponent + conds = [] + wp = b*exp(S.ImaginaryUnit*re(c)*pi/2) + wm = b*exp(-S.ImaginaryUnit*re(c)*pi/2) + if plus: + w = wp + else: + w = wm + conds += [And(Or(Eq(b, 0), re(c) <= 0), re(a) <= -1)] + conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) < 0)] + conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) <= 0, + re(a) <= -1)] + return Or(*conds) + + def statement(a, b, c, z): + """ Provide a convergence statement for z**a * exp(b*z**c), + c/f sphinx docs. """ + return And(statement_half(a, b, c, z, True), + statement_half(a, b, c, z, False)) + + # Notations from [L], section 5.7-10 + m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)]) + tau = m + n - p + nu = q - m - n + rho = (tau - nu)/2 + sigma = q - p + if sigma == 1: + epsilon = S.Half + elif sigma > 1: + epsilon = 1 + else: + epsilon = S.NaN + theta = ((1 - sigma)/2 + Add(*g.bq) - Add(*g.ap))/sigma + delta = g.delta + _debugf(' m=%s, n=%s, p=%s, q=%s, tau=%s, nu=%s, rho=%s, sigma=%s', + (m, n, p, q, tau, nu, rho, sigma)) + _debugf(' epsilon=%s, theta=%s, delta=%s', (epsilon, theta, delta)) + + # First check if the computation is valid. + if not (g.delta >= e/2 or (p >= 1 and p >= q)): + _debug(' Computation not valid for these parameters.') + return False + + # Now check if the inversion integral exists. + + # Test "condition A" + for a, b in itertools.product(g.an, g.bm): + if (a - b).is_integer and a > b: + _debug(' Not a valid G function.') + return False + + # There are two cases. If p >= q, we can directly use a slater expansion + # like [L], 5.2 (11). Note in particular that the asymptotics of such an + # expansion even hold when some of the parameters differ by integers, i.e. + # the formula itself would not be valid! (b/c G functions are cts. in their + # parameters) + # When p < q, we need to use the theorems of [L], 5.10. + + if p >= q: + _debug(' Using asymptotic Slater expansion.') + return And(*[statement(a - 1, 0, 0, z) for a in g.an]) + + def E(z): + return And(*[statement(a - 1, 0, 0, z) for a in g.an]) + + def H(z): + return statement(theta, -sigma, 1/sigma, z) + + def Hp(z): + return statement_half(theta, -sigma, 1/sigma, z, True) + + def Hm(z): + return statement_half(theta, -sigma, 1/sigma, z, False) + + # [L], section 5.10 + conds = [] + # Theorem 1 -- p < q from test above + conds += [And(1 <= n, 1 <= m, rho*pi - delta >= pi/2, delta > 0, + E(z*exp(S.ImaginaryUnit*pi*(nu + 1))))] + # Theorem 2, statements (2) and (3) + conds += [And(p + 1 <= m, m + 1 <= q, delta > 0, delta < pi/2, n == 0, + (m - p + 1)*pi - delta >= pi/2, + Hp(z*exp(S.ImaginaryUnit*pi*(q - m))), + Hm(z*exp(-S.ImaginaryUnit*pi*(q - m))))] + # Theorem 2, statement (5) -- p < q from test above + conds += [And(m == q, n == 0, delta > 0, + (sigma + epsilon)*pi - delta >= pi/2, H(z))] + # Theorem 3, statements (6) and (7) + conds += [And(Or(And(p <= q - 2, 1 <= tau, tau <= sigma/2), + And(p + 1 <= m + n, m + n <= (p + q)/2)), + delta > 0, delta < pi/2, (tau + 1)*pi - delta >= pi/2, + Hp(z*exp(S.ImaginaryUnit*pi*nu)), + Hm(z*exp(-S.ImaginaryUnit*pi*nu)))] + # Theorem 4, statements (10) and (11) -- p < q from test above + conds += [And(1 <= m, rho > 0, delta > 0, delta + rho*pi < pi/2, + (tau + epsilon)*pi - delta >= pi/2, + Hp(z*exp(S.ImaginaryUnit*pi*nu)), + Hm(z*exp(-S.ImaginaryUnit*pi*nu)))] + # Trivial case + conds += [m == 0] + + # TODO + # Theorem 5 is quite general + # Theorem 6 contains special cases for q=p+1 + + return Or(*conds) + + +def _int_inversion(g, x, t): + """ + Compute the laplace inversion integral, assuming the formula applies. + """ + b, a = _get_coeff_exp(g.argument, x) + C, g = _inflate_fox_h(meijerg(g.an, g.aother, g.bm, g.bother, b/t**a), -a) + return C/t*g + + +#################################################################### +# Finally, the real meat. +#################################################################### + +_lookup_table = None + + +@cacheit +@timeit +def _rewrite_single(f, x, recursive=True): + """ + Try to rewrite f as a sum of single G functions of the form + C*x**s*G(a*x**b), where b is a rational number and C is independent of x. + We guarantee that result.argument.as_coeff_mul(x) returns (a, (x**b,)) + or (a, ()). + Returns a list of tuples (C, s, G) and a condition cond. + Returns None on failure. + """ + from .transforms import (mellin_transform, inverse_mellin_transform, + IntegralTransformError, MellinTransformStripError) + + global _lookup_table + if not _lookup_table: + _lookup_table = {} + _create_lookup_table(_lookup_table) + + if isinstance(f, meijerg): + coeff, m = factor(f.argument, x).as_coeff_mul(x) + if len(m) > 1: + return None + m = m[0] + if m.is_Pow: + if m.base != x or not m.exp.is_Rational: + return None + elif m != x: + return None + return [(1, 0, meijerg(f.an, f.aother, f.bm, f.bother, coeff*m))], True + + f_ = f + f = f.subs(x, z) + t = _mytype(f, z) + if t in _lookup_table: + l = _lookup_table[t] + for formula, terms, cond, hint in l: + subs = f.match(formula, old=True) + if subs: + subs_ = {} + for fro, to in subs.items(): + subs_[fro] = unpolarify(polarify(to, lift=True), + exponents_only=True) + subs = subs_ + if not isinstance(hint, bool): + hint = hint.subs(subs) + if hint == False: + continue + if not isinstance(cond, (bool, BooleanAtom)): + cond = unpolarify(cond.subs(subs)) + if _eval_cond(cond) == False: + continue + if not isinstance(terms, list): + terms = terms(subs) + res = [] + for fac, g in terms: + r1 = _get_coeff_exp(unpolarify(fac.subs(subs).subs(z, x), + exponents_only=True), x) + try: + g = g.subs(subs).subs(z, x) + except ValueError: + continue + # NOTE these substitutions can in principle introduce oo, + # zoo and other absurdities. It shouldn't matter, + # but better be safe. + if Tuple(*(r1 + (g,))).has(S.Infinity, S.ComplexInfinity, S.NegativeInfinity): + continue + g = meijerg(g.an, g.aother, g.bm, g.bother, + unpolarify(g.argument, exponents_only=True)) + res.append(r1 + (g,)) + if res: + return res, cond + + # try recursive mellin transform + if not recursive: + return None + _debug('Trying recursive Mellin transform method.') + + def my_imt(F, s, x, strip): + """ Calling simplify() all the time is slow and not helpful, since + most of the time it only factors things in a way that has to be + un-done anyway. But sometimes it can remove apparent poles. """ + # XXX should this be in inverse_mellin_transform? + try: + return inverse_mellin_transform(F, s, x, strip, + as_meijerg=True, needeval=True) + except MellinTransformStripError: + from sympy.simplify import simplify + return inverse_mellin_transform( + simplify(cancel(expand(F))), s, x, strip, + as_meijerg=True, needeval=True) + f = f_ + s = _dummy('s', 'rewrite-single', f) + # to avoid infinite recursion, we have to force the two g functions case + + def my_integrator(f, x): + r = _meijerint_definite_4(f, x, only_double=True) + if r is not None: + from sympy.simplify import hyperexpand + res, cond = r + res = _my_unpolarify(hyperexpand(res, rewrite='nonrepsmall')) + return Piecewise((res, cond), + (Integral(f, (x, S.Zero, S.Infinity)), True)) + return Integral(f, (x, S.Zero, S.Infinity)) + try: + F, strip, _ = mellin_transform(f, x, s, integrator=my_integrator, + simplify=False, needeval=True) + g = my_imt(F, s, x, strip) + except IntegralTransformError: + g = None + if g is None: + # We try to find an expression by analytic continuation. + # (also if the dummy is already in the expression, there is no point in + # putting in another one) + a = _dummy_('a', 'rewrite-single') + if a not in f.free_symbols and _is_analytic(f, x): + try: + F, strip, _ = mellin_transform(f.subs(x, a*x), x, s, + integrator=my_integrator, + needeval=True, simplify=False) + g = my_imt(F, s, x, strip).subs(a, 1) + except IntegralTransformError: + g = None + if g is None or g.has(S.Infinity, S.NaN, S.ComplexInfinity): + _debug('Recursive Mellin transform failed.') + return None + args = Add.make_args(g) + res = [] + for f in args: + c, m = f.as_coeff_mul(x) + if len(m) > 1: + raise NotImplementedError('Unexpected form...') + g = m[0] + a, b = _get_coeff_exp(g.argument, x) + res += [(c, 0, meijerg(g.an, g.aother, g.bm, g.bother, + unpolarify(polarify( + a, lift=True), exponents_only=True) + *x**b))] + _debug('Recursive Mellin transform worked:', g) + return res, True + + +def _rewrite1(f, x, recursive=True): + """ + Try to rewrite ``f`` using a (sum of) single G functions with argument a*x**b. + Return fac, po, g such that f = fac*po*g, fac is independent of ``x``. + and po = x**s. + Here g is a result from _rewrite_single. + Return None on failure. + """ + fac, po, g = _split_mul(f, x) + g = _rewrite_single(g, x, recursive) + if g: + return fac, po, g[0], g[1] + + +def _rewrite2(f, x): + """ + Try to rewrite ``f`` as a product of two G functions of arguments a*x**b. + Return fac, po, g1, g2 such that f = fac*po*g1*g2, where fac is + independent of x and po is x**s. + Here g1 and g2 are results of _rewrite_single. + Returns None on failure. + """ + fac, po, g = _split_mul(f, x) + if any(_rewrite_single(expr, x, False) is None for expr in _mul_args(g)): + return None + l = _mul_as_two_parts(g) + if not l: + return None + l = list(ordered(l, [ + lambda p: max(len(_exponents(p[0], x)), len(_exponents(p[1], x))), + lambda p: max(len(_functions(p[0], x)), len(_functions(p[1], x))), + lambda p: max(len(_find_splitting_points(p[0], x)), + len(_find_splitting_points(p[1], x)))])) + + for recursive, (fac1, fac2) in itertools.product((False, True), l): + g1 = _rewrite_single(fac1, x, recursive) + g2 = _rewrite_single(fac2, x, recursive) + if g1 and g2: + cond = And(g1[1], g2[1]) + if cond != False: + return fac, po, g1[0], g2[0], cond + + +def meijerint_indefinite(f, x): + """ + Compute an indefinite integral of ``f`` by rewriting it as a G function. + + Examples + ======== + + >>> from sympy.integrals.meijerint import meijerint_indefinite + >>> from sympy import sin + >>> from sympy.abc import x + >>> meijerint_indefinite(sin(x), x) + -cos(x) + """ + f = sympify(f) + results = [] + for a in sorted(_find_splitting_points(f, x) | {S.Zero}, key=default_sort_key): + res = _meijerint_indefinite_1(f.subs(x, x + a), x) + if not res: + continue + res = res.subs(x, x - a) + if _has(res, hyper, meijerg): + results.append(res) + else: + return res + if f.has(HyperbolicFunction): + _debug('Try rewriting hyperbolics in terms of exp.') + rv = meijerint_indefinite( + _rewrite_hyperbolics_as_exp(f), x) + if rv: + if not isinstance(rv, list): + from sympy.simplify.radsimp import collect + return collect(factor_terms(rv), rv.atoms(exp)) + results.extend(rv) + if results: + return next(ordered(results)) + + +def _meijerint_indefinite_1(f, x): + """ Helper that does not attempt any substitution. """ + _debug('Trying to compute the indefinite integral of', f, 'wrt', x) + from sympy.simplify import hyperexpand, powdenest + + gs = _rewrite1(f, x) + if gs is None: + # Note: the code that calls us will do expand() and try again + return None + + fac, po, gl, cond = gs + _debug(' could rewrite:', gs) + res = S.Zero + for C, s, g in gl: + a, b = _get_coeff_exp(g.argument, x) + _, c = _get_coeff_exp(po, x) + c += s + + # we do a substitution t=a*x**b, get integrand fac*t**rho*g + fac_ = fac * C / (b*a**((1 + c)/b)) + rho = (c + 1)/b - 1 + + # we now use t**rho*G(params, t) = G(params + rho, t) + # [L, page 150, equation (4)] + # and integral G(params, t) dt = G(1, params+1, 0, t) + # (or a similar expression with 1 and 0 exchanged ... pick the one + # which yields a well-defined function) + # [R, section 5] + # (Note that this dummy will immediately go away again, so we + # can safely pass S.One for ``expr``.) + t = _dummy('t', 'meijerint-indefinite', S.One) + + def tr(p): + return [a + rho + 1 for a in p] + if any(b.is_integer and (b <= 0) == True for b in tr(g.bm)): + r = -meijerg( + tr(g.an), tr(g.aother) + [1], tr(g.bm) + [0], tr(g.bother), t) + else: + r = meijerg( + tr(g.an) + [1], tr(g.aother), tr(g.bm), tr(g.bother) + [0], t) + # The antiderivative is most often expected to be defined + # in the neighborhood of x = 0. + if b.is_extended_nonnegative and not f.subs(x, 0).has(S.NaN, S.ComplexInfinity): + place = 0 # Assume we can expand at zero + else: + place = None + r = hyperexpand(r.subs(t, a*x**b), place=place) + + # now substitute back + # Note: we really do want the powers of x to combine. + res += powdenest(fac_*r, polar=True) + + def _clean(res): + """This multiplies out superfluous powers of x we created, and chops off + constants: + + >> _clean(x*(exp(x)/x - 1/x) + 3) + exp(x) + + cancel is used before mul_expand since it is possible for an + expression to have an additive constant that does not become isolated + with simple expansion. Such a situation was identified in issue 6369: + + Examples + ======== + + >>> from sympy import sqrt, cancel + >>> from sympy.abc import x + >>> a = sqrt(2*x + 1) + >>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2 + >>> bad.expand().as_independent(x)[0] + 0 + >>> cancel(bad).expand().as_independent(x)[0] + 1 + """ + res = expand_mul(cancel(res), deep=False) + return Add._from_args(res.as_coeff_add(x)[1]) + + res = piecewise_fold(res, evaluate=None) + if res.is_Piecewise: + newargs = [] + for e, c in res.args: + e = _my_unpolarify(_clean(e)) + newargs += [(e, c)] + res = Piecewise(*newargs, evaluate=False) + else: + res = _my_unpolarify(_clean(res)) + return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True)) + + +@timeit +def meijerint_definite(f, x, a, b): + """ + Integrate ``f`` over the interval [``a``, ``b``], by rewriting it as a product + of two G functions, or as a single G function. + + Return res, cond, where cond are convergence conditions. + + Examples + ======== + + >>> from sympy.integrals.meijerint import meijerint_definite + >>> from sympy import exp, oo + >>> from sympy.abc import x + >>> meijerint_definite(exp(-x**2), x, -oo, oo) + (sqrt(pi), True) + + This function is implemented as a succession of functions + meijerint_definite, _meijerint_definite_2, _meijerint_definite_3, + _meijerint_definite_4. Each function in the list calls the next one + (presumably) several times. This means that calling meijerint_definite + can be very costly. + """ + # This consists of three steps: + # 1) Change the integration limits to 0, oo + # 2) Rewrite in terms of G functions + # 3) Evaluate the integral + # + # There are usually several ways of doing this, and we want to try all. + # This function does (1), calls _meijerint_definite_2 for step (2). + _debugf('Integrating %s wrt %s from %s to %s.', (f, x, a, b)) + f = sympify(f) + if f.has(DiracDelta): + _debug('Integrand has DiracDelta terms - giving up.') + return None + + if f.has(SingularityFunction): + _debug('Integrand has Singularity Function terms - giving up.') + return None + + f_, x_, a_, b_ = f, x, a, b + + # Let's use a dummy in case any of the boundaries has x. + d = Dummy('x') + f = f.subs(x, d) + x = d + + if a == b: + return (S.Zero, True) + + results = [] + if a is S.NegativeInfinity and b is not S.Infinity: + return meijerint_definite(f.subs(x, -x), x, -b, -a) + + elif a is S.NegativeInfinity: + # Integrating -oo to oo. We need to find a place to split the integral. + _debug(' Integrating -oo to +oo.') + innermost = _find_splitting_points(f, x) + _debug(' Sensible splitting points:', innermost) + for c in sorted(innermost, key=default_sort_key, reverse=True) + [S.Zero]: + _debug(' Trying to split at', c) + if not c.is_extended_real: + _debug(' Non-real splitting point.') + continue + res1 = _meijerint_definite_2(f.subs(x, x + c), x) + if res1 is None: + _debug(' But could not compute first integral.') + continue + res2 = _meijerint_definite_2(f.subs(x, c - x), x) + if res2 is None: + _debug(' But could not compute second integral.') + continue + res1, cond1 = res1 + res2, cond2 = res2 + cond = _condsimp(And(cond1, cond2)) + if cond == False: + _debug(' But combined condition is always false.') + continue + res = res1 + res2 + return res, cond + + elif a is S.Infinity: + res = meijerint_definite(f, x, b, S.Infinity) + return -res[0], res[1] + + elif (a, b) == (S.Zero, S.Infinity): + # This is a common case - try it directly first. + res = _meijerint_definite_2(f, x) + if res: + if _has(res[0], meijerg): + results.append(res) + else: + return res + + else: + if b is S.Infinity: + for split in _find_splitting_points(f, x): + if (a - split >= 0) == True: + _debugf('Trying x -> x + %s', split) + res = _meijerint_definite_2(f.subs(x, x + split) + *Heaviside(x + split - a), x) + if res: + if _has(res[0], meijerg): + results.append(res) + else: + return res + + f = f.subs(x, x + a) + b = b - a + a = 0 + if b is not S.Infinity: + phi = exp(S.ImaginaryUnit*arg(b)) + b = Abs(b) + f = f.subs(x, phi*x) + f *= Heaviside(b - x)*phi + b = S.Infinity + + _debug('Changed limits to', a, b) + _debug('Changed function to', f) + res = _meijerint_definite_2(f, x) + if res: + if _has(res[0], meijerg): + results.append(res) + else: + return res + if f_.has(HyperbolicFunction): + _debug('Try rewriting hyperbolics in terms of exp.') + rv = meijerint_definite( + _rewrite_hyperbolics_as_exp(f_), x_, a_, b_) + if rv: + if not isinstance(rv, list): + from sympy.simplify.radsimp import collect + rv = (collect(factor_terms(rv[0]), rv[0].atoms(exp)),) + rv[1:] + return rv + results.extend(rv) + if results: + return next(ordered(results)) + + +def _guess_expansion(f, x): + """ Try to guess sensible rewritings for integrand f(x). """ + res = [(f, 'original integrand')] + + orig = res[-1][0] + saw = {orig} + expanded = expand_mul(orig) + if expanded not in saw: + res += [(expanded, 'expand_mul')] + saw.add(expanded) + + expanded = expand(orig) + if expanded not in saw: + res += [(expanded, 'expand')] + saw.add(expanded) + + if orig.has(TrigonometricFunction, HyperbolicFunction): + expanded = expand_mul(expand_trig(orig)) + if expanded not in saw: + res += [(expanded, 'expand_trig, expand_mul')] + saw.add(expanded) + + if orig.has(cos, sin): + from sympy.simplify.fu import sincos_to_sum + reduced = sincos_to_sum(orig) + if reduced not in saw: + res += [(reduced, 'trig power reduction')] + saw.add(reduced) + + return res + + +def _meijerint_definite_2(f, x): + """ + Try to integrate f dx from zero to infinity. + + The body of this function computes various 'simplifications' + f1, f2, ... of f (e.g. by calling expand_mul(), trigexpand() + - see _guess_expansion) and calls _meijerint_definite_3 with each of + these in succession. + If _meijerint_definite_3 succeeds with any of the simplified functions, + returns this result. + """ + # This function does preparation for (2), calls + # _meijerint_definite_3 for (2) and (3) combined. + + # use a positive dummy - we integrate from 0 to oo + # XXX if a nonnegative symbol is used there will be test failures + dummy = _dummy('x', 'meijerint-definite2', f, positive=True) + f = f.subs(x, dummy) + x = dummy + + if f == 0: + return S.Zero, True + + for g, explanation in _guess_expansion(f, x): + _debug('Trying', explanation) + res = _meijerint_definite_3(g, x) + if res: + return res + + +def _meijerint_definite_3(f, x): + """ + Try to integrate f dx from zero to infinity. + + This function calls _meijerint_definite_4 to try to compute the + integral. If this fails, it tries using linearity. + """ + res = _meijerint_definite_4(f, x) + if res and res[1] != False: + return res + if f.is_Add: + _debug('Expanding and evaluating all terms.') + ress = [_meijerint_definite_4(g, x) for g in f.args] + if all(r is not None for r in ress): + conds = [] + res = S.Zero + for r, c in ress: + res += r + conds += [c] + c = And(*conds) + if c != False: + return res, c + + +def _my_unpolarify(f): + return _eval_cond(unpolarify(f)) + + +@timeit +def _meijerint_definite_4(f, x, only_double=False): + """ + Try to integrate f dx from zero to infinity. + + Explanation + =========== + + This function tries to apply the integration theorems found in literature, + i.e. it tries to rewrite f as either one or a product of two G-functions. + + The parameter ``only_double`` is used internally in the recursive algorithm + to disable trying to rewrite f as a single G-function. + """ + from sympy.simplify import hyperexpand + # This function does (2) and (3) + _debug('Integrating', f) + # Try single G function. + if not only_double: + gs = _rewrite1(f, x, recursive=False) + if gs is not None: + fac, po, g, cond = gs + _debug('Could rewrite as single G function:', fac, po, g) + res = S.Zero + for C, s, f in g: + if C == 0: + continue + C, f = _rewrite_saxena_1(fac*C, po*x**s, f, x) + res += C*_int0oo_1(f, x) + cond = And(cond, _check_antecedents_1(f, x)) + if cond == False: + break + cond = _my_unpolarify(cond) + if cond == False: + _debug('But cond is always False.') + else: + _debug('Result before branch substitutions is:', res) + return _my_unpolarify(hyperexpand(res)), cond + + # Try two G functions. + gs = _rewrite2(f, x) + if gs is not None: + for full_pb in [False, True]: + fac, po, g1, g2, cond = gs + _debug('Could rewrite as two G functions:', fac, po, g1, g2) + res = S.Zero + for C1, s1, f1 in g1: + for C2, s2, f2 in g2: + r = _rewrite_saxena(fac*C1*C2, po*x**(s1 + s2), + f1, f2, x, full_pb) + if r is None: + _debug('Non-rational exponents.') + return + C, f1_, f2_ = r + _debug('Saxena subst for yielded:', C, f1_, f2_) + cond = And(cond, _check_antecedents(f1_, f2_, x)) + if cond == False: + break + res += C*_int0oo(f1_, f2_, x) + else: + continue + break + cond = _my_unpolarify(cond) + if cond == False: + _debugf('But cond is always False (full_pb=%s).', full_pb) + else: + _debugf('Result before branch substitutions is: %s', (res, )) + if only_double: + return res, cond + return _my_unpolarify(hyperexpand(res)), cond + + +def meijerint_inversion(f, x, t): + r""" + Compute the inverse laplace transform + $\int_{c+i\infty}^{c-i\infty} f(x) e^{tx}\, dx$, + for real c larger than the real part of all singularities of ``f``. + + Note that ``t`` is always assumed real and positive. + + Return None if the integral does not exist or could not be evaluated. + + Examples + ======== + + >>> from sympy.abc import x, t + >>> from sympy.integrals.meijerint import meijerint_inversion + >>> meijerint_inversion(1/x, x, t) + Heaviside(t) + """ + f_ = f + t_ = t + t = Dummy('t', polar=True) # We don't want sqrt(t**2) = abs(t) etc + f = f.subs(t_, t) + _debug('Laplace-inverting', f) + if not _is_analytic(f, x): + _debug('But expression is not analytic.') + return None + # Exponentials correspond to shifts; we filter them out and then + # shift the result later. If we are given an Add this will not + # work, but the calling code will take care of that. + shift = S.Zero + + if f.is_Mul: + args = list(f.args) + elif isinstance(f, exp): + args = [f] + else: + args = None + + if args: + newargs = [] + exponentials = [] + while args: + arg = args.pop() + if isinstance(arg, exp): + arg2 = expand(arg) + if arg2.is_Mul: + args += arg2.args + continue + try: + a, b = _get_coeff_exp(arg.args[0], x) + except _CoeffExpValueError: + b = 0 + if b == 1: + exponentials.append(a) + else: + newargs.append(arg) + elif arg.is_Pow: + arg2 = expand(arg) + if arg2.is_Mul: + args += arg2.args + continue + if x not in arg.base.free_symbols: + try: + a, b = _get_coeff_exp(arg.exp, x) + except _CoeffExpValueError: + b = 0 + if b == 1: + exponentials.append(a*log(arg.base)) + newargs.append(arg) + else: + newargs.append(arg) + shift = Add(*exponentials) + f = Mul(*newargs) + + if x not in f.free_symbols: + _debug('Expression consists of constant and exp shift:', f, shift) + cond = Eq(im(shift), 0) + if cond == False: + _debug('but shift is nonreal, cannot be a Laplace transform') + return None + res = f*DiracDelta(t + shift) + _debug('Result is a delta function, possibly conditional:', res, cond) + # cond is True or Eq + return Piecewise((res.subs(t, t_), cond)) + + gs = _rewrite1(f, x) + if gs is not None: + fac, po, g, cond = gs + _debug('Could rewrite as single G function:', fac, po, g) + res = S.Zero + for C, s, f in g: + C, f = _rewrite_inversion(fac*C, po*x**s, f, x) + res += C*_int_inversion(f, x, t) + cond = And(cond, _check_antecedents_inversion(f, x)) + if cond == False: + break + cond = _my_unpolarify(cond) + if cond == False: + _debug('But cond is always False.') + else: + _debug('Result before branch substitution:', res) + from sympy.simplify import hyperexpand + res = _my_unpolarify(hyperexpand(res)) + if not res.has(Heaviside): + res *= Heaviside(t) + res = res.subs(t, t + shift) + if not isinstance(cond, bool): + cond = cond.subs(t, t + shift) + from .transforms import InverseLaplaceTransform + return Piecewise((res.subs(t, t_), cond), + (InverseLaplaceTransform(f_.subs(t, t_), x, t_, None), True)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint_doc.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..712a52e183006528b8c50a56207d44742e28f0c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/meijerint_doc.py @@ -0,0 +1,37 @@ +""" This module cooks up a docstring when imported. Its only purpose is to + be displayed in the sphinx documentation. """ + +from __future__ import annotations +from typing import Any + +from sympy.integrals.meijerint import _create_lookup_table +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.relational import Eq +from sympy.core.symbol import Symbol +from sympy.printing.latex import latex + +t: dict[tuple[type[Basic], ...], list[Any]] = {} +_create_lookup_table(t) + + +doc = "" +for about, category in t.items(): + if about == (): + doc += 'Elementary functions:\n\n' + else: + doc += 'Functions involving ' + ', '.join('`%s`' % latex( + list(category[0][0].atoms(func))[0]) for func in about) + ':\n\n' + for formula, gs, cond, hint in category: + if not isinstance(gs, list): + g = Symbol('\\text{generated}') + else: + g = Add(*[fac*f for (fac, f) in gs]) + obj = Eq(formula, g) + if cond is True: + cond = "" + else: + cond = ',\\text{ if } %s' % latex(cond) + doc += ".. math::\n %s%s\n\n" % (latex(obj), cond) + +__doc__ = doc diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/prde.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/prde.py new file mode 100644 index 0000000000000000000000000000000000000000..80a8b44e236e858764ac8a761f05f07670e7c86a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/prde.py @@ -0,0 +1,1332 @@ +""" +Algorithms for solving Parametric Risch Differential Equations. + +The methods used for solving Parametric Risch Differential Equations parallel +those for solving Risch Differential Equations. See the outline in the +docstring of rde.py for more information. + +The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in +K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such +that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist. + +For the algorithms here G is a list of tuples of factions of the terms on the +right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on +the right hand side of the equation (i.e., qi in k[t]). See the docstring of +each function for more information. +""" +import itertools +from functools import reduce + +from sympy.core import Dummy, ilcm, Add, Mul, Pow, S +from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, + bound_degree) +from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, + residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, + recognize_log_derivative) +from sympy.polys import Poly, lcm, cancel, sqf_list +from sympy.polys.polymatrix import PolyMatrix as Matrix +from sympy.solvers import solve + +zeros = Matrix.zeros +eye = Matrix.eye + + +def prde_normal_denom(fa, fd, G, DE): + """ + Parametric Risch Differential Equation - Normal part of the denominator. + + Explanation + =========== + + Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly + normalized with respect to t, return the tuple (a, b, G, h) such that + a, h in k[t], b in k, G = [g1, ..., gm] in k(t)^m, and for any solution + c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)), + q == y*h in k satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)). + """ + dn, ds = splitfactor(fd, DE) + Gas, Gds = list(zip(*G)) + gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) + en, es = splitfactor(gd, DE) + + p = dn.gcd(en) + h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) + + a = dn*h + c = a*h + + ba = a*fa - dn*derivation(h, DE)*fd + ba, bd = ba.cancel(fd, include=True) + + G = [(c*A).cancel(D, include=True) for A, D in G] + + return (a, (ba, bd), G, h) + +def real_imag(ba, bd, gen): + """ + Helper function, to get the real and imaginary part of a rational function + evaluated at sqrt(-1) without actually evaluating it at sqrt(-1). + + Explanation + =========== + + Separates the even and odd power terms by checking the degree of terms wrt + mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part + of the numerator ba[1] is the imaginary part and bd is the denominator + of the rational function. + """ + bd = bd.as_poly(gen).as_dict() + ba = ba.as_poly(gen).as_dict() + denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] + denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] + bd_real = sum(r for r in denom_real) + bd_imag = sum(r for r in denom_imag) + num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] + num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] + ba_real = sum(r for r in num_real) + ba_imag = sum(r for r in num_imag) + ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) + bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) + return (ba[0], ba[1], bd) + + +def prde_special_denom(a, ba, bd, G, DE, case='auto'): + """ + Parametric Risch Differential Equation - Special part of the denominator. + + Explanation + =========== + + Case is one of {'exp', 'tan', 'primitive'} for the hyperexponential, + hypertangent, and primitive cases, respectively. For the hyperexponential + (resp. hypertangent) case, given a derivation D on k[t] and a in k[t], + b in k, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in + k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp. + gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in + k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in + Const(k) and q in k of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in + k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)). + + For case == 'primitive', k == k[t], so it returns (a, b, G, 1) in this + case. + """ + # TODO: Merge this with the very similar special_denom() in rde.py + if case == 'auto': + case = DE.case + + if case == 'exp': + p = Poly(DE.t, DE.t) + elif case == 'tan': + p = Poly(DE.t**2 + 1, DE.t) + elif case in ('primitive', 'base'): + B = ba.quo(bd) + return (a, B, G, Poly(1, DE.t)) + else: + raise ValueError("case must be one of {'exp', 'tan', 'primitive', " + "'base'}, not %s." % case) + + nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) + nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G]) + n = min(0, nc - min(0, nb)) + if not nb: + # Possible cancellation. + if case == 'exp': + dcoeff = DE.d.quo(Poly(DE.t, DE.t)) + with DecrementLevel(DE): # We are guaranteed to not have problems, + # because case != 'base'. + alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) + etaa, etad = frac_in(dcoeff, DE.t) + A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) + if A is not None: + Q, m, z = A + if Q == 1: + n = min(n, m) + + elif case == 'tan': + dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) + with DecrementLevel(DE): # We are guaranteed to not have problems, + # because case != 'base'. + betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) + betad = alphad + etaa, etad = frac_in(dcoeff, DE.t) + if recognize_log_derivative(Poly(2, DE.t)*betaa, betad, DE): + A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) + B = parametric_log_deriv(betaa, betad, etaa, etad, DE) + if A is not None and B is not None: + Q, s, z = A + # TODO: Add test + if Q == 1: + n = min(n, s/2) + + N = max(0, -nb) + pN = p**N + pn = p**-n # This is 1/h + + A = a*pN + B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN + G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] + h = pn + + # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n) + return (A, B, G, h) + + +def prde_linear_constraints(a, b, G, DE): + """ + Parametric Risch Differential Equation - Generate linear constraints on the constants. + + Explanation + =========== + + Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and + G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a + matrix M with entries in k(t) such that for any solution c1, ..., cm in + Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)), + (c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy + a*Dp + b*p == Sum(ci*qi, (i, 1, m)). + + Because M has entries in k(t), and because Matrix does not play well with + Poly, M will be a Matrix of Basic expressions. + """ + m = len(G) + + Gns, Gds = list(zip(*G)) + d = reduce(lambda i, j: i.lcm(j), Gds) + d = Poly(d, field=True) + Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G] + + if not all(ri.is_zero for _, ri in Q): + N = max(ri.degree(DE.t) for _, ri in Q) + M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i), DE.t) + else: + M = Matrix(0, m, [], DE.t) # No constraints, return the empty matrix. + + qs, _ = list(zip(*Q)) + return (qs, M) + +def poly_linear_constraints(p, d): + """ + Given p = [p1, ..., pm] in k[t]^m and d in k[t], return + q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such + that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible + by d if and only if (c1, ..., cm) is a solution of Mx = 0, in + which case the quotient is Sum(ci*qi, (i, 1, m)). + """ + m = len(p) + q, r = zip(*[pi.div(d) for pi in p]) + + if not all(ri.is_zero for ri in r): + n = max(ri.degree() for ri in r) + M = Matrix(n + 1, m, lambda i, j: r[j].nth(i), d.gens) + else: + M = Matrix(0, m, [], d.gens) # No constraints. + + return q, M + +def constant_system(A, u, DE): + """ + Generate a system for the constant solutions. + + Explanation + =========== + + Given a differential field (K, D) with constant field C = Const(K), a Matrix + A, and a vector (Matrix) u with coefficients in K, returns the tuple + (B, v, s), where B is a Matrix with coefficients in C and v is a vector + (Matrix) such that either v has coefficients in C, in which case s is True + and the solutions in C of Ax == u are exactly all the solutions of Bx == v, + or v has a non-constant coefficient, in which case s is False Ax == u has no + constant solution. + + This algorithm is used both in solving parametric problems and in + determining if an element a of K is a derivative of an element of K or the + logarithmic derivative of a K-radical using the structure theorem approach. + + Because Poly does not play well with Matrix yet, this algorithm assumes that + all matrix entries are Basic expressions. + """ + if not A: + return A, u + Au = A.row_join(u) + Au, _ = Au.rref() + # Warning: This will NOT return correct results if cancel() cannot reduce + # an identically zero expression to 0. The danger is that we might + # incorrectly prove that an integral is nonelementary (such as + # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). + # But this is a limitation in computer algebra in general, and implicit + # in the correctness of the Risch Algorithm is the computability of the + # constant field (actually, this same correctness problem exists in any + # algorithm that uses rref()). + # + # We therefore limit ourselves to constant fields that are computable + # via the cancel() function, in order to prevent a speed bottleneck from + # calling some more complex simplification function (rational function + # coefficients will fall into this class). Furthermore, (I believe) this + # problem will only crop up if the integral explicitly contains an + # expression in the constant field that is identically zero, but cannot + # be reduced to such by cancel(). Therefore, a careful user can avoid this + # problem entirely by being careful with the sorts of expressions that + # appear in his integrand in the variables other than the integration + # variable (the structure theorems should be able to completely decide these + # problems in the integration variable). + + A, u = Au[:, :-1], Au[:, -1] + + D = lambda x: derivation(x, DE, basic=True) + + for j, i in itertools.product(range(A.cols), range(A.rows)): + if A[i, j].expr.has(*DE.T): + # This assumes that const(F(t0, ..., tn) == const(K) == F + Ri = A[i, :] + # Rm+1; m = A.rows + DAij = D(A[i, j]) + Rm1 = Ri.applyfunc(lambda x: D(x) / DAij) + um1 = D(u[i]) / DAij + + Aj = A[:, j] + A = A - Aj * Rm1 + u = u - Aj * um1 + + A = A.col_join(Rm1) + u = u.col_join(Matrix([um1], u.gens)) + + return (A, u) + + +def prde_spde(a, b, Q, n, DE): + """ + Special Polynomial Differential Equation algorithm: Parametric Version. + + Explanation + =========== + + Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t] + with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with + Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution + c1, ..., cm in Const(k) and q in k[t] of degree at most n of + a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has + degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m)) + """ + R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) + + A = a + B = b + derivation(a, DE) + Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)] + R = list(R) + n1 = n - a.degree(DE.t) + + return (A, B, Qq, R, n1) + + +def prde_no_cancel_b_large(b, Q, n, DE): + """ + Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough. + + Explanation + =========== + + Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with + b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns + h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that + if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and + Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where + d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. + """ + db = b.degree(DE.t) + m = len(Q) + H = [Poly(0, DE.t)]*m + + for N, i in itertools.product(range(n, -1, -1), range(m)): # [n, ..., 0] + si = Q[i].nth(N + db)/b.LC() + sitn = Poly(si*DE.t**N, DE.t) + H[i] = H[i] + sitn + Q[i] = Q[i] - derivation(sitn, DE) - b*sitn + + if all(qi.is_zero for qi in Q): + dc = -1 + else: + dc = max([qi.degree(DE.t) for qi in Q]) + M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i), DE.t) + A, u = constant_system(M, zeros(dc + 1, 1, DE.t), DE) + c = eye(m, DE.t) + A = A.row_join(zeros(A.rows, m, DE.t)).col_join(c.row_join(-c)) + + return (H, A) + + +def prde_no_cancel_b_small(b, Q, n, DE): + """ + Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough. + + Explanation + =========== + + Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with + deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns + h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that + if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and + Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where + d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. + """ + m = len(Q) + H = [Poly(0, DE.t)]*m + + for N, i in itertools.product(range(n, 0, -1), range(m)): # [n, ..., 1] + si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) + sitn = Poly(si*DE.t**N, DE.t) + H[i] = H[i] + sitn + Q[i] = Q[i] - derivation(sitn, DE) - b*sitn + + if b.degree(DE.t) > 0: + for i in range(m): + si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) + H[i] = H[i] + si + Q[i] = Q[i] - derivation(si, DE) - b*si + if all(qi.is_zero for qi in Q): + dc = -1 + else: + dc = max([qi.degree(DE.t) for qi in Q]) + M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i), DE.t) + A, u = constant_system(M, zeros(dc + 1, 1, DE.t), DE) + c = eye(m, DE.t) + A = A.row_join(zeros(A.rows, m, DE.t)).col_join(c.row_join(-c)) + return (H, A) + + # else: b is in k, deg(qi) < deg(Dt) + + t = DE.t + if DE.case != 'base': + with DecrementLevel(DE): + t0 = DE.t # k = k0(t0) + ba, bd = frac_in(b, t0, field=True) + Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q] + f, B = param_rischDE(ba, bd, Q0, DE) + + # f = [f1, ..., fr] in k^r and B is a matrix with + # m + r columns and entries in Const(k) = Const(k0) + # such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has + # a solution y0 in k with c1, ..., cm in Const(k) + # if and only y0 = Sum(dj*fj, (j, 1, r)) where + # d1, ..., dr ar in Const(k) and + # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0. + + # Transform fractions (fa, fd) in f into constant + # polynomials fa/fd in k[t]. + # (Is there a better way?) + f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) + for fa, fd in f] + B = Matrix.from_Matrix(B.to_Matrix(), t) + else: + # Base case. Dy == 0 for all y in k and b == 0. + # Dy + b*y = Sum(ci*qi) is solvable if and only if + # Sum(ci*qi) == 0 in which case the solutions are + # y = d1*f1 for f1 = 1 and any d1 in Const(k) = k. + + f = [Poly(1, t, field=True)] # r = 1 + B = Matrix([[qi.TC() for qi in Q] + [S.Zero]], DE.t) + # The condition for solvability is + # B*Matrix([c1, ..., cm, d1]) == 0 + # There are no constraints on d1. + + # Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero. + d = max([qi.degree(DE.t) for qi in Q]) + if d > 0: + M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1), DE.t) + A, _ = constant_system(M, zeros(d, 1, DE.t), DE) + else: + # No constraints on the hj. + A = Matrix(0, m, [], DE.t) + + # Solutions of the original equation are + # y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)), + # where ei == ci (i = 1, ..., m), when + # A*Matrix([c1, ..., cm]) == 0 and + # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0 + + # Build combined constraint matrix with m + r + m columns. + + r = len(f) + I = eye(m, DE.t) + A = A.row_join(zeros(A.rows, r + m, DE.t)) + B = B.row_join(zeros(B.rows, m, DE.t)) + C = I.row_join(zeros(m, r, DE.t)).row_join(-I) + + return f + H, A.col_join(B).col_join(C) + + +def prde_cancel_liouvillian(b, Q, n, DE): + """ + Pg, 237. + """ + H = [] + + # Why use DecrementLevel? Below line answers that: + # Assuming that we can solve such problems over 'k' (not k[t]) + if DE.case == 'primitive': + with DecrementLevel(DE): + ba, bd = frac_in(b, DE.t, field=True) + + for i in range(n, -1, -1): + if DE.case == 'exp': # this re-checking can be avoided + with DecrementLevel(DE): + ba, bd = frac_in(b + (i*(derivation(DE.t, DE)/DE.t)).as_poly(b.gens), + DE.t, field=True) + with DecrementLevel(DE): + Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q] + fi, Ai = param_rischDE(ba, bd, Qy, DE) + fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) + for fa, fd in fi] + Ai = Ai.set_gens(DE.t) + + ri = len(fi) + + if i == n: + M = Ai + else: + M = Ai.col_join(M.row_join(zeros(M.rows, ri, DE.t))) + + Fi, hi = [None]*ri, [None]*ri + + # from eq. on top of p.238 (unnumbered) + for j in range(ri): + hji = fi[j] * (DE.t**i).as_poly(fi[j].gens) + hi[j] = hji + # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) + Fi[j] = -(derivation(hji, DE) - b*hji) + + H += hi + # in the next loop instead of Q it has + # to be Q + Fi taking its place + Q = Q + Fi + + return (H, M) + + +def param_poly_rischDE(a, b, q, n, DE): + """Polynomial solutions of a parametric Risch differential equation. + + Explanation + =========== + + Given a derivation D in k[t], a, b in k[t] relatively prime, and q + = [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and + a matrix A with m + r columns and entries in Const(k) such that + a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n + in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj, + (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, + d1, ..., dr) is a solution of Ax == 0. + """ + m = len(q) + if n < 0: + # Only the trivial zero solution is possible. + # Find relations between the qi. + if all(qi.is_zero for qi in q): + return [], zeros(1, m, DE.t) # No constraints. + + N = max([qi.degree(DE.t) for qi in q]) + M = Matrix(N + 1, m, lambda i, j: q[j].nth(i), DE.t) + A, _ = constant_system(M, zeros(M.rows, 1, DE.t), DE) + + return [], A + + if a.is_ground: + # Normalization: a = 1. + a = a.LC() + b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q] + + if not b.is_zero and (DE.case == 'base' or + b.degree() > max(0, DE.d.degree() - 1)): + return prde_no_cancel_b_large(b, q, n, DE) + + elif ((b.is_zero or b.degree() < DE.d.degree() - 1) + and (DE.case == 'base' or DE.d.degree() >= 2)): + return prde_no_cancel_b_small(b, q, n, DE) + + elif (DE.d.degree() >= 2 and + b.degree() == DE.d.degree() - 1 and + n > -b.as_poly().LC()/DE.d.as_poly().LC()): + raise NotImplementedError("prde_no_cancel_b_equal() is " + "not yet implemented.") + + else: + # Liouvillian cases + if DE.case in ('primitive', 'exp'): + return prde_cancel_liouvillian(b, q, n, DE) + else: + raise NotImplementedError("non-linear and hypertangent " + "cases have not yet been implemented") + + # else: deg(a) > 0 + + # Iterate SPDE as long as possible cumulating coefficient + # and terms for the recovery of original solutions. + alpha, beta = a.one, [a.zero]*m + while n >= 0: # and a, b relatively prime + a, b, q, r, n = prde_spde(a, b, q, n, DE) + beta = [betai + alpha*ri for betai, ri in zip(beta, r)] + alpha *= a + # Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to + # solutions alpha*p + Sum(ci*betai) of the initial equation. + d = a.gcd(b) + if not d.is_ground: + break + + # a*Dp + b*p = Sum(ci*qi) may have a polynomial solution + # only if the sum is divisible by d. + + qq, M = poly_linear_constraints(q, d) + # qq = [qq1, ..., qqm] where qqi = qi.quo(d). + # M is a matrix with m columns an entries in k. + # Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is + # divisible by d if and only if M*Matrix([f1, ..., fm]) == 0, + # in which case the quotient is Sum(fi*qqi). + + A, _ = constant_system(M, zeros(M.rows, 1, DE.t), DE) + # A is a matrix with m columns and entries in Const(k). + # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero + # for c1, ..., cm in Const(k) if and only if + # A*Matrix([c1, ...,cm]) == 0. + + V = A.nullspace() + # V = [v1, ..., vu] where each vj is a column matrix with + # entries aj1, ..., ajm in Const(k). + # Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi). + # Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji) + # (i = 1, ..., m) for some d1, ..., du in Const(k). + # In that case, solutions of + # a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) + # are the same as those of + # (a/d)*Dp + (b/d)*p = Sum(dj*rj) + # where rj = Sum(aji*qqi). + + if not V: # No non-trivial solution. + return [], eye(m, DE.t) # Could return A, but this has + # the minimum number of rows. + + Mqq = Matrix([qq]) # A single row. + r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru] + + # Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to + # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial + # equation. These are equal to alpha*p + Sum(dj*fj) where + # fj = Sum(aji*betai). + Mbeta = Matrix([beta]) + f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu] + + # + # Solve the reduced equation recursively. + # + g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) + + # g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v + # columns and entries in Const(k) such that + # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n + # in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in + # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. + # The solutions of the original equation are then + # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)). + + # Collect solution components. + h = f + [alpha*gk for gk in g] + + # Build combined relation matrix. + A = -eye(m, DE.t) + for vj in V: + A = A.row_join(vj) + A = A.row_join(zeros(m, len(g), DE.t)) + A = A.col_join(zeros(B.rows, m, DE.t).row_join(B)) + + return h, A + + +def param_rischDE(fa, fd, G, DE): + """ + Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). + + Explanation + =========== + + Given a derivation D in k(t), f in k(t), and G + = [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and + a matrix A with m + r columns and entries in Const(k) such that + Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y + in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj, + (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, + d1, ..., dr) is a solution of Ax == 0. + + Elements of k(t) are tuples (a, d) with a and d in k[t]. + """ + m = len(G) + q, (fa, fd) = weak_normalizer(fa, fd, DE) + # Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi) + # correspond to solutions y = z/q of the original equation. + gamma = q + G = [(q*ga).cancel(gd, include=True) for ga, gd in G] + + a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE) + # Solutions q in k of a*Dq + b*q = Sum(ci*Gi) correspond + # to solutions z = q/hn of the weakly normalized equation. + gamma *= hn + + A, B, G, hs = prde_special_denom(a, ba, bd, G, DE) + # Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond + # to solutions q = p/hs of the previous equation. + gamma *= hs + + g = A.gcd(B) + a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for + gia, gid in G] + + # a*Dp + b*p = Sum(ci*gi) may have a polynomial solution + # only if the sum is in k[t]. + + q, M = prde_linear_constraints(a, b, g, DE) + + # q = [q1, ..., qm] where qi in k[t] is the polynomial component + # of the partial fraction expansion of gi. + # M is a matrix with m columns and entries in k. + # Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k, + # is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0, + # in which case the sum is equal to Sum(fi*qi). + + M, _ = constant_system(M, zeros(M.rows, 1, DE.t), DE) + # M is a matrix with m columns and entries in Const(k). + # Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k) + # if and only if M*Matrix([c1, ..., cm]) == 0, + # in which case the sum is Sum(ci*qi). + + ## Reduce number of constants at this point + + V = M.nullspace() + # V = [v1, ..., vu] where each vj is a column matrix with + # entries aj1, ..., ajm in Const(k). + # Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u). + # Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji) + # (i = 1, ..., m) for some d1, ..., du in Const(k). + # In that case, + # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) + # where rj = Sum(aji*qi) (j = 1, ..., u) in k[t]. + + if not V: # No non-trivial solution + return [], eye(m, DE.t) + + Mq = Matrix([q]) # A single row. + r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru] + + # Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions + # y = p/gamma of the initial equation with ci = Sum(dj*aji). + + try: + # We try n=5. At least for prde_spde, it will always + # terminate no matter what n is. + n = bound_degree(a, b, r, DE, parametric=True) + except NotImplementedError: + # A temporary bound is set. Eventually, it will be removed. + # the currently added test case takes large time + # even with n=5, and much longer with large n's. + n = 5 + + h, B = param_poly_rischDE(a, b, r, n, DE) + + # h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v + # columns and entries in Const(k) such that + # a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n + # in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in + # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. + # The solutions of the original equation for ci = Sum(dj*aji) + # (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma. + + ## Build combined relation matrix with m + u + v columns. + + A = -eye(m, DE.t) + for vj in V: + A = A.row_join(vj) + A = A.row_join(zeros(m, len(h), DE.t)) + A = A.col_join(zeros(B.rows, m, DE.t).row_join(B)) + + ## Eliminate d1, ..., du. + + W = A.nullspace() + + # W = [w1, ..., wt] where each wl is a column matrix with + # entries blk (k = 1, ..., m + u + v) in Const(k). + # The vectors (bl1, ..., blm) generate the space of those + # constant families (c1, ..., cm) for which a solution of + # the equation Dy + f*y == Sum(ci*Gi) exists. They generate + # the space and form a basis except possibly when Dy + f*y == 0 + # is solvable in k(t}. The corresponding solutions are + # y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u. + + v = len(h) + shape = (len(W), m+v) + elements = [wl[:m] + wl[-v:] for wl in W] # excise dj's. + items = [e for row in elements for e in row] + + # Need to set the shape in case W is empty + M = Matrix(*shape, items, DE.t) + N = M.nullspace() + + # N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column + # vectors generating the space of linear relations between + # c1, ..., cm, e1, ..., ev. + + C = Matrix([ni[:] for ni in N], DE.t) # rows n1, ..., ns. + + return [hk.cancel(gamma, include=True) for hk in h], C + + +def limited_integrate_reduce(fa, fd, G, DE): + """ + Simpler version of step 1 & 2 for the limited integration problem. + + Explanation + =========== + + Given a derivation D on k(t) and f, g1, ..., gn in k(t), return + (a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer, + g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t), + c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k, and + p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore, + if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian + over k, then deg(p) <= N. + + So that the special part is always computed, this function calls the more + general prde_special_denom() automatically if it cannot determine that + S1irr == Sirr. Furthermore, it will automatically call bound_degree() when + t is linear and non-Liouvillian, which for the transcendental case, implies + that Dt == a*t + b with for some a, b in k*. + """ + dn, ds = splitfactor(fd, DE) + E = [splitfactor(gd, DE) for _, gd in G] + En, Es = list(zip(*E)) + c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm) + hn = c.gcd(c.diff(DE.t)) + a = hn + b = -derivation(hn, DE) + N = 0 + + # These are the cases where we know that S1irr = Sirr, but there could be + # others, and this algorithm will need to be extended to handle them. + if DE.case in ('base', 'primitive', 'exp', 'tan'): + hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm) + a = hn*hs + b -= (hn*derivation(hs, DE)).quo(hs) + mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for + ga, gd in G])) + # So far, all the above are also nonlinear or Liouvillian, but if this + # changes, then this will need to be updated to call bound_degree() + # as per the docstring of this function (DE.case == 'other_linear'). + N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu) + else: + # TODO: implement this + raise NotImplementedError + + V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] + return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) + + +def limited_integrate(fa, fd, G, DE): + """ + Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n)) + """ + fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() + # interpreting limited integration problem as a + # parametric Risch DE problem + Fa = Poly(0, DE.t) + Fd = Poly(1, DE.t) + G = [(fa, fd)] + G + h, A = param_rischDE(Fa, Fd, G, DE) + V = A.nullspace() + V = [v for v in V if v[0] != 0] + if not V: + return None + else: + # we can take any vector from V, we take V[0] + c0 = V[0][0] + # v = [-1, c1, ..., cm, d1, ..., dr] + v = V[0]/(-c0) + r = len(h) + m = len(v) - r - 1 + C = list(v[1: m + 1]) + y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \ + for i in range(r)]) + y_num, y_den = y.as_numer_denom() + Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) + Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() + return Y, C + + +def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): + """ + Parametric logarithmic derivative heuristic. + + Explanation + =========== + + Given a derivation D on k[t], f in k(t), and a hyperexponential monomial + theta over k(t), raises either NotImplementedError, in which case the + heuristic failed, or returns None, in which case it has proven that no + solution exists, or returns a solution (n, m, v) of the equation + n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0. + + If this heuristic fails, the structure theorem approach will need to be + used. + + The argument w == Dtheta/theta + """ + # TODO: finish writing this and write tests + c1 = c1 or Dummy('c1') + + p, a = fa.div(fd) + q, b = wa.div(wd) + + B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) + C = max(p.degree(DE.t), q.degree(DE.t)) + + if q.degree(DE.t) > B: + eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)] + s = solve(eqs, c1) + if not s or not s[c1].is_Rational: + # deg(q) > B, no solution for c. + return None + + M, N = s[c1].as_numer_denom() + M_poly = M.as_poly(q.gens) + N_poly = N.as_poly(q.gens) + + nfmwa = N_poly*fa*wd - M_poly*wa*fd + nfmwd = fd*wd + Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE, 'auto') + if Qv is None: + # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. + return None + + Q, v = Qv + + if Q.is_zero or v.is_zero: + return None + + return (Q*N, Q*M, v) + + if p.degree(DE.t) > B: + return None + + c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) + l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) + ln, ls = splitfactor(l, DE) + z = ls*ln.gcd(ln.diff(DE.t)) + + if not z.has(DE.t): + # TODO: We treat this as 'no solution', until the structure + # theorem version of parametric_log_deriv is implemented. + return None + + u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) + u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) + + eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] + s = solve(eqs, c1) + if not s or not s[c1].is_Rational: + # deg(q) <= B, no solution for c. + return None + + M, N = s[c1].as_numer_denom() + + nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd + nfmwd = fd*wd + Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) + if Qv is None: + # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. + return None + + Q, v = Qv + + if Q.is_zero or v.is_zero: + return None + + return (Q*N, Q*M, v) + + +def parametric_log_deriv(fa, fd, wa, wd, DE): + # TODO: Write the full algorithm using the structure theorems. +# try: + A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) +# except NotImplementedError: + # Heuristic failed, we have to use the full method. + # TODO: This could be implemented more efficiently. + # It isn't too worrisome, because the heuristic handles most difficult + # cases. + return A + + +def is_deriv_k(fa, fd, DE): + r""" + Checks if Df/f is the derivative of an element of k(t). + + Explanation + =========== + + a in k(t) is the derivative of an element of k(t) if there exists b in k(t) + such that a = Db. Either returns (ans, u), such that Df/f == Du, or None, + which means that Df/f is not the derivative of an element of k(t). ans is + a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful + for seeing exactly which elements of k(t) produce u. + + This function uses the structure theorem approach, which says that for any + f in K, Df/f is the derivative of a element of K if and only if there are ri + in QQ such that:: + + --- --- Dt + \ r * Dt + \ r * i Df + / i i / i --- = --. + --- --- t f + i in L i in E i + K/C(x) K/C(x) + + + Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is + transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i + in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic + monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i + is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some + a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of + hyperexponential monomials of K over C(x)). If K is an elementary extension + over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the + transcendence degree of K over C(x). Furthermore, because Const_D(K) == + Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and + deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) + and L_K/C(x) are disjoint. + + The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed + recursively using this same function. Therefore, it is required to pass + them as indices to D (or T). E_args are the arguments of the + hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] == + exp(E_args[i])). This is needed to compute the final answer u such that + Df/f == Du. + + log(f) will be the same as u up to a additive constant. This is because + they will both behave the same as monomials. For example, both log(x) and + log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant. + Therefore, the term const is returned. const is such that + log(const) + f == u. This is calculated by dividing the arguments of one + logarithm from the other. Therefore, it is necessary to pass the arguments + of the logarithmic terms in L_args. + + To handle the case where we are given Df/f, not f, use is_deriv_k_in_field(). + + See also + ======== + is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical + + """ + # Compute Df/f + dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa + dfa, dfd = dfa.cancel(dfd, include=True) + + # Our assumption here is that each monomial is recursively transcendental + if len(DE.exts) != len(DE.D): + if [i for i in DE.cases if i == 'tan'] or \ + ({i for i in DE.cases if i == 'primitive'} - + set(DE.indices('log'))): + raise NotImplementedError("Real version of the structure " + "theorems with hypertangent support is not yet implemented.") + + # TODO: What should really be done in this case? + raise NotImplementedError("Nonelementary extensions not supported " + "in the structure theorems.") + + E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] + L_part = [DE.D[i].as_expr() for i in DE.indices('log')] + + # The expression dfa/dfd might not be polynomial in any of its symbols so we + # use a Dummy as the generator for PolyMatrix. + dum = Dummy() + lhs = Matrix([E_part + L_part], dum) + rhs = Matrix([dfa.as_expr()/dfd.as_expr()], dum) + + A, u = constant_system(lhs, rhs, DE) + + u = u.to_Matrix() # Poly to Expr + + if not A or not all(derivation(i, DE, basic=True).is_zero for i in u): + # If the elements of u are not all constant + # Note: See comment in constant_system + + # Also note: derivation(basic=True) calls cancel() + return None + else: + if not all(i.is_Rational for i in u): + raise NotImplementedError("Cannot work with non-rational " + "coefficients in this case.") + else: + terms = ([DE.extargs[i] for i in DE.indices('exp')] + + [DE.T[i] for i in DE.indices('log')]) + ans = list(zip(terms, u)) + result = Add(*[Mul(i, j) for i, j in ans]) + argterms = ([DE.T[i] for i in DE.indices('exp')] + + [DE.extargs[i] for i in DE.indices('log')]) + l = [] + ld = [] + for i, j in zip(argterms, u): + # We need to get around things like sqrt(x**2) != x + # and also sqrt(x**2 + 2*x + 1) != x + 1 + # Issue 10798: i need not be a polynomial + i, d = i.as_numer_denom() + icoeff, iterms = sqf_list(i) + l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms]))) + dcoeff, dterms = sqf_list(d) + ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms]))) + const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) + + return (ans, result, const) + + +def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): + r""" + Checks if Df is the logarithmic derivative of a k(t)-radical. + + Explanation + =========== + + b in k(t) can be written as the logarithmic derivative of a k(t) radical if + there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u. + Either returns (ans, u, n, const) or None, which means that Df cannot be + written as the logarithmic derivative of a k(t)-radical. ans is a list of + tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for + seeing exactly what elements of k(t) produce u. + + This function uses the structure theorem approach, which says that for any + f in K, Df is the logarithmic derivative of a K-radical if and only if there + are ri in QQ such that:: + + --- --- Dt + \ r * Dt + \ r * i + / i i / i --- = Df. + --- --- t + i in L i in E i + K/C(x) K/C(x) + + + Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is + transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i + in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic + monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i + is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some + a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of + hyperexponential monomials of K over C(x)). If K is an elementary extension + over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the + transcendence degree of K over C(x). Furthermore, because Const_D(K) == + Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and + deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) + and L_K/C(x) are disjoint. + + The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed + recursively using this same function. Therefore, it is required to pass + them as indices to D (or T). L_args are the arguments of the logarithms + indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is + needed to compute the final answer u such that n*f == Du/u. + + exp(f) will be the same as u up to a multiplicative constant. This is + because they will both behave the same as monomials. For example, both + exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const + is returned. const is such that exp(const)*f == u. This is calculated by + subtracting the arguments of one exponential from the other. Therefore, it + is necessary to pass the arguments of the exponential terms in E_args. + + To handle the case where we are given Df, not f, use + is_log_deriv_k_t_radical_in_field(). + + See also + ======== + + is_log_deriv_k_t_radical_in_field, is_deriv_k + + """ + if Df: + dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, + include=True) + else: + dfa, dfd = fa, fd + + # Our assumption here is that each monomial is recursively transcendental + if len(DE.exts) != len(DE.D): + if [i for i in DE.cases if i == 'tan'] or \ + ({i for i in DE.cases if i == 'primitive'} - + set(DE.indices('log'))): + raise NotImplementedError("Real version of the structure " + "theorems with hypertangent support is not yet implemented.") + + # TODO: What should really be done in this case? + raise NotImplementedError("Nonelementary extensions not supported " + "in the structure theorems.") + + E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] + L_part = [DE.D[i].as_expr() for i in DE.indices('log')] + + # The expression dfa/dfd might not be polynomial in any of its symbols so we + # use a Dummy as the generator for PolyMatrix. + dum = Dummy() + lhs = Matrix([E_part + L_part], dum) + rhs = Matrix([dfa.as_expr()/dfd.as_expr()], dum) + + A, u = constant_system(lhs, rhs, DE) + + u = u.to_Matrix() # Poly to Expr + + if not A or not all(derivation(i, DE, basic=True).is_zero for i in u): + # If the elements of u are not all constant + # Note: See comment in constant_system + + # Also note: derivation(basic=True) calls cancel() + return None + else: + if not all(i.is_Rational for i in u): + # TODO: But maybe we can tell if they're not rational, like + # log(2)/log(3). Also, there should be an option to continue + # anyway, even if the result might potentially be wrong. + raise NotImplementedError("Cannot work with non-rational " + "coefficients in this case.") + else: + n = reduce(ilcm, [i.as_numer_denom()[1] for i in u]) + u *= n + terms = ([DE.T[i] for i in DE.indices('exp')] + + [DE.extargs[i] for i in DE.indices('log')]) + ans = list(zip(terms, u)) + result = Mul(*[Pow(i, j) for i, j in ans]) + + # exp(f) will be the same as result up to a multiplicative + # constant. We now find the log of that constant. + argterms = ([DE.extargs[i] for i in DE.indices('exp')] + + [DE.T[i] for i in DE.indices('log')]) + const = cancel(fa.as_expr()/fd.as_expr() - + Add(*[Mul(i, j/n) for i, j in zip(argterms, u)])) + + return (ans, result, n, const) + + +def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): + """ + Checks if f can be written as the logarithmic derivative of a k(t)-radical. + + Explanation + =========== + + It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) + for any given fa, fd, DE in that it finds the solution in the + given field not in some (possibly unspecified extension) and + "in_field" with the function name is used to indicate that. + + f in k(t) can be written as the logarithmic derivative of a k(t) radical if + there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u. + Either returns (n, u) or None, which means that f cannot be written as the + logarithmic derivative of a k(t)-radical. + + case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive, + hyperexponential, and hypertangent cases, respectively. If case is 'auto', + it will attempt to determine the type of the derivation automatically. + + See also + ======== + is_log_deriv_k_t_radical, is_deriv_k + + """ + fa, fd = fa.cancel(fd, include=True) + + # f must be simple + n, s = splitfactor(fd, DE) + if not s.is_one: + pass + + z = z or Dummy('z') + H, b = residue_reduce(fa, fd, DE, z=z) + if not b: + # I will have to verify, but I believe that the answer should be + # None in this case. This should never happen for the + # functions given when solving the parametric logarithmic + # derivative problem when integration elementary functions (see + # Bronstein's book, page 255), so most likely this indicates a bug. + return None + + roots = [(i, i.real_roots()) for i, _ in H] + if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for + i, j in roots): + # If f is the logarithmic derivative of a k(t)-radical, then all the + # roots of the resultant must be rational numbers. + return None + + # [(a, i), ...], where i*log(a) is a term in the log-part of the integral + # of f + respolys, residues = list(zip(*roots)) or [[], []] + # Note: this might be empty, but everything below should work find in that + # case (it should be the same as if it were [[1, 1]]) + residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for + i in residues[j]] + + # TODO: finish writing this and write tests + + p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) + + p = p.as_poly(DE.t) + if p is None: + # f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical + return None + + if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): + return None + + if case == 'auto': + case = DE.case + + if case == 'exp': + wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) + with DecrementLevel(DE): + pa, pd = frac_in(p, DE.t, cancel=True) + wa, wd = frac_in((wa, wd), DE.t) + A = parametric_log_deriv(pa, pd, wa, wd, DE) + if A is None: + return None + n, e, u = A + u *= DE.t**e + + elif case == 'primitive': + with DecrementLevel(DE): + pa, pd = frac_in(p, DE.t) + A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') + if A is None: + return None + n, u = A + + elif case == 'base': + # TODO: we can use more efficient residue reduction from ratint() + if not fd.is_sqf or fa.degree() >= fd.degree(): + # f is the logarithmic derivative in the base case if and only if + # f = fa/fd, fd is square-free, deg(fa) < deg(fd), and + # gcd(fa, fd) == 1. The last condition is handled by cancel() above. + return None + # Note: if residueterms = [], returns (1, 1) + # f had better be 0 in that case. + n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S.One) + u = Mul(*[Pow(i, j*n) for i, j in residueterms]) + return (n, u) + + elif case == 'tan': + raise NotImplementedError("The hypertangent case is " + "not yet implemented for is_log_deriv_k_t_radical_in_field()") + + elif case in ('other_linear', 'other_nonlinear'): + # XXX: If these are supported by the structure theorems, change to NotImplementedError. + raise ValueError("The %s case is not supported in this function." % case) + + else: + raise ValueError("case must be one of {'primitive', 'exp', 'tan', " + "'base', 'auto'}, not %s" % case) + + common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in + residueterms]] + [n], S.One) + residueterms = [(i, j*common_denom) for i, j in residueterms] + m = common_denom//n + if common_denom != n*m: # Verify exact division + raise ValueError("Inexact division") + u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) + + return (common_denom, u) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/quadrature.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..b518bd427dc9980d6a941d2e1ef4d139c5f0f5f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/quadrature.py @@ -0,0 +1,617 @@ +from sympy.core import S, Dummy, pi +from sympy.functions.combinatorial.factorials import factorial +from sympy.functions.elementary.trigonometric import sin, cos +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.special.gamma_functions import gamma +from sympy.polys.orthopolys import (legendre_poly, laguerre_poly, + hermite_poly, jacobi_poly) +from sympy.polys.rootoftools import RootOf + + +def gauss_legendre(n, n_digits): + r""" + Computes the Gauss-Legendre quadrature [1]_ points and weights. + + Explanation + =========== + + The Gauss-Legendre quadrature approximates the integral: + + .. math:: + \int_{-1}^1 f(x)\,dx \approx \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of `P_n` + and the weights `w_i` are given by: + + .. math:: + w_i = \frac{2}{\left(1-x_i^2\right) \left(P'_n(x_i)\right)^2} + + Parameters + ========== + + n : + The order of quadrature. + n_digits : + Number of significant digits of the points and weights to return. + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_legendre + >>> x, w = gauss_legendre(3, 5) + >>> x + [-0.7746, 0, 0.7746] + >>> w + [0.55556, 0.88889, 0.55556] + >>> x, w = gauss_legendre(4, 5) + >>> x + [-0.86114, -0.33998, 0.33998, 0.86114] + >>> w + [0.34785, 0.65215, 0.65215, 0.34785] + + See Also + ======== + + gauss_laguerre, gauss_gen_laguerre, gauss_hermite, gauss_chebyshev_t, gauss_chebyshev_u, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gaussian_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/legendre_rule/legendre_rule.html + """ + x = Dummy("x") + p = legendre_poly(n, x, polys=True) + pd = p.diff(x) + xi = [] + w = [] + for r in p.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append((2/((1-r**2) * pd.subs(x, r)**2)).n(n_digits)) + return xi, w + + +def gauss_laguerre(n, n_digits): + r""" + Computes the Gauss-Laguerre quadrature [1]_ points and weights. + + Explanation + =========== + + The Gauss-Laguerre quadrature approximates the integral: + + .. math:: + \int_0^{\infty} e^{-x} f(x)\,dx \approx \sum_{i=1}^n w_i f(x_i) + + + The nodes `x_i` of an order `n` quadrature rule are the roots of `L_n` + and the weights `w_i` are given by: + + .. math:: + w_i = \frac{x_i}{(n+1)^2 \left(L_{n+1}(x_i)\right)^2} + + Parameters + ========== + + n : + The order of quadrature. + n_digits : + Number of significant digits of the points and weights to return. + + Returns + ======= + + (x, w) : The ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_laguerre + >>> x, w = gauss_laguerre(3, 5) + >>> x + [0.41577, 2.2943, 6.2899] + >>> w + [0.71109, 0.27852, 0.010389] + >>> x, w = gauss_laguerre(6, 5) + >>> x + [0.22285, 1.1889, 2.9927, 5.7751, 9.8375, 15.983] + >>> w + [0.45896, 0.417, 0.11337, 0.010399, 0.00026102, 8.9855e-7] + + See Also + ======== + + gauss_legendre, gauss_gen_laguerre, gauss_hermite, gauss_chebyshev_t, gauss_chebyshev_u, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gauss%E2%80%93Laguerre_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/laguerre_rule/laguerre_rule.html + """ + x = Dummy("x") + p = laguerre_poly(n, x, polys=True) + p1 = laguerre_poly(n+1, x, polys=True) + xi = [] + w = [] + for r in p.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append((r/((n+1)**2 * p1.subs(x, r)**2)).n(n_digits)) + return xi, w + + +def gauss_hermite(n, n_digits): + r""" + Computes the Gauss-Hermite quadrature [1]_ points and weights. + + Explanation + =========== + + The Gauss-Hermite quadrature approximates the integral: + + .. math:: + \int_{-\infty}^{\infty} e^{-x^2} f(x)\,dx \approx + \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of `H_n` + and the weights `w_i` are given by: + + .. math:: + w_i = \frac{2^{n-1} n! \sqrt{\pi}}{n^2 \left(H_{n-1}(x_i)\right)^2} + + Parameters + ========== + + n : + The order of quadrature. + n_digits : + Number of significant digits of the points and weights to return. + + Returns + ======= + + (x, w) : The ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_hermite + >>> x, w = gauss_hermite(3, 5) + >>> x + [-1.2247, 0, 1.2247] + >>> w + [0.29541, 1.1816, 0.29541] + + >>> x, w = gauss_hermite(6, 5) + >>> x + [-2.3506, -1.3358, -0.43608, 0.43608, 1.3358, 2.3506] + >>> w + [0.00453, 0.15707, 0.72463, 0.72463, 0.15707, 0.00453] + + See Also + ======== + + gauss_legendre, gauss_laguerre, gauss_gen_laguerre, gauss_chebyshev_t, gauss_chebyshev_u, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gauss-Hermite_Quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/hermite_rule/hermite_rule.html + .. [3] https://people.sc.fsu.edu/~jburkardt/cpp_src/gen_hermite_rule/gen_hermite_rule.html + """ + x = Dummy("x") + p = hermite_poly(n, x, polys=True) + p1 = hermite_poly(n-1, x, polys=True) + xi = [] + w = [] + for r in p.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append(((2**(n-1) * factorial(n) * sqrt(pi)) / + (n**2 * p1.subs(x, r)**2)).n(n_digits)) + return xi, w + + +def gauss_gen_laguerre(n, alpha, n_digits): + r""" + Computes the generalized Gauss-Laguerre quadrature [1]_ points and weights. + + Explanation + =========== + + The generalized Gauss-Laguerre quadrature approximates the integral: + + .. math:: + \int_{0}^\infty x^{\alpha} e^{-x} f(x)\,dx \approx + \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of + `L^{\alpha}_n` and the weights `w_i` are given by: + + .. math:: + w_i = \frac{\Gamma(\alpha+n)} + {n \Gamma(n) L^{\alpha}_{n-1}(x_i) L^{\alpha+1}_{n-1}(x_i)} + + Parameters + ========== + + n : + The order of quadrature. + + alpha : + The exponent of the singularity, `\alpha > -1`. + + n_digits : + Number of significant digits of the points and weights to return. + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy import S + >>> from sympy.integrals.quadrature import gauss_gen_laguerre + >>> x, w = gauss_gen_laguerre(3, -S.Half, 5) + >>> x + [0.19016, 1.7845, 5.5253] + >>> w + [1.4493, 0.31413, 0.00906] + + >>> x, w = gauss_gen_laguerre(4, 3*S.Half, 5) + >>> x + [0.97851, 2.9904, 6.3193, 11.712] + >>> w + [0.53087, 0.67721, 0.11895, 0.0023152] + + See Also + ======== + + gauss_legendre, gauss_laguerre, gauss_hermite, gauss_chebyshev_t, gauss_chebyshev_u, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gauss%E2%80%93Laguerre_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/gen_laguerre_rule/gen_laguerre_rule.html + """ + x = Dummy("x") + p = laguerre_poly(n, x, alpha=alpha, polys=True) + p1 = laguerre_poly(n-1, x, alpha=alpha, polys=True) + p2 = laguerre_poly(n-1, x, alpha=alpha+1, polys=True) + xi = [] + w = [] + for r in p.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append((gamma(alpha+n) / + (n*gamma(n)*p1.subs(x, r)*p2.subs(x, r))).n(n_digits)) + return xi, w + + +def gauss_chebyshev_t(n, n_digits): + r""" + Computes the Gauss-Chebyshev quadrature [1]_ points and weights of + the first kind. + + Explanation + =========== + + The Gauss-Chebyshev quadrature of the first kind approximates the integral: + + .. math:: + \int_{-1}^{1} \frac{1}{\sqrt{1-x^2}} f(x)\,dx \approx + \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of `T_n` + and the weights `w_i` are given by: + + .. math:: + w_i = \frac{\pi}{n} + + Parameters + ========== + + n : + The order of quadrature. + + n_digits : + Number of significant digits of the points and weights to return. + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_chebyshev_t + >>> x, w = gauss_chebyshev_t(3, 5) + >>> x + [0.86602, 0, -0.86602] + >>> w + [1.0472, 1.0472, 1.0472] + + >>> x, w = gauss_chebyshev_t(6, 5) + >>> x + [0.96593, 0.70711, 0.25882, -0.25882, -0.70711, -0.96593] + >>> w + [0.5236, 0.5236, 0.5236, 0.5236, 0.5236, 0.5236] + + See Also + ======== + + gauss_legendre, gauss_laguerre, gauss_hermite, gauss_gen_laguerre, gauss_chebyshev_u, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/chebyshev1_rule/chebyshev1_rule.html + """ + xi = [] + w = [] + for i in range(1, n+1): + xi.append((cos((2*i-S.One)/(2*n)*S.Pi)).n(n_digits)) + w.append((S.Pi/n).n(n_digits)) + return xi, w + + +def gauss_chebyshev_u(n, n_digits): + r""" + Computes the Gauss-Chebyshev quadrature [1]_ points and weights of + the second kind. + + Explanation + =========== + + The Gauss-Chebyshev quadrature of the second kind approximates the + integral: + + .. math:: + \int_{-1}^{1} \sqrt{1-x^2} f(x)\,dx \approx \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of `U_n` + and the weights `w_i` are given by: + + .. math:: + w_i = \frac{\pi}{n+1} \sin^2 \left(\frac{i}{n+1}\pi\right) + + Parameters + ========== + + n : the order of quadrature + + n_digits : number of significant digits of the points and weights to return + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_chebyshev_u + >>> x, w = gauss_chebyshev_u(3, 5) + >>> x + [0.70711, 0, -0.70711] + >>> w + [0.3927, 0.7854, 0.3927] + + >>> x, w = gauss_chebyshev_u(6, 5) + >>> x + [0.90097, 0.62349, 0.22252, -0.22252, -0.62349, -0.90097] + >>> w + [0.084489, 0.27433, 0.42658, 0.42658, 0.27433, 0.084489] + + See Also + ======== + + gauss_legendre, gauss_laguerre, gauss_hermite, gauss_gen_laguerre, gauss_chebyshev_t, gauss_jacobi, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/chebyshev2_rule/chebyshev2_rule.html + """ + xi = [] + w = [] + for i in range(1, n+1): + xi.append((cos(i/(n+S.One)*S.Pi)).n(n_digits)) + w.append((S.Pi/(n+S.One)*sin(i*S.Pi/(n+S.One))**2).n(n_digits)) + return xi, w + + +def gauss_jacobi(n, alpha, beta, n_digits): + r""" + Computes the Gauss-Jacobi quadrature [1]_ points and weights. + + Explanation + =========== + + The Gauss-Jacobi quadrature of the first kind approximates the integral: + + .. math:: + \int_{-1}^1 (1-x)^\alpha (1+x)^\beta f(x)\,dx \approx + \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of + `P^{(\alpha,\beta)}_n` and the weights `w_i` are given by: + + .. math:: + w_i = -\frac{2n+\alpha+\beta+2}{n+\alpha+\beta+1} + \frac{\Gamma(n+\alpha+1)\Gamma(n+\beta+1)} + {\Gamma(n+\alpha+\beta+1)(n+1)!} + \frac{2^{\alpha+\beta}}{P'_n(x_i) + P^{(\alpha,\beta)}_{n+1}(x_i)} + + Parameters + ========== + + n : the order of quadrature + + alpha : the first parameter of the Jacobi Polynomial, `\alpha > -1` + + beta : the second parameter of the Jacobi Polynomial, `\beta > -1` + + n_digits : number of significant digits of the points and weights to return + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy import S + >>> from sympy.integrals.quadrature import gauss_jacobi + >>> x, w = gauss_jacobi(3, S.Half, -S.Half, 5) + >>> x + [-0.90097, -0.22252, 0.62349] + >>> w + [1.7063, 1.0973, 0.33795] + + >>> x, w = gauss_jacobi(6, 1, 1, 5) + >>> x + [-0.87174, -0.5917, -0.2093, 0.2093, 0.5917, 0.87174] + >>> w + [0.050584, 0.22169, 0.39439, 0.39439, 0.22169, 0.050584] + + See Also + ======== + + gauss_legendre, gauss_laguerre, gauss_hermite, gauss_gen_laguerre, + gauss_chebyshev_t, gauss_chebyshev_u, gauss_lobatto + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gauss%E2%80%93Jacobi_quadrature + .. [2] https://people.sc.fsu.edu/~jburkardt/cpp_src/jacobi_rule/jacobi_rule.html + .. [3] https://people.sc.fsu.edu/~jburkardt/cpp_src/gegenbauer_rule/gegenbauer_rule.html + """ + x = Dummy("x") + p = jacobi_poly(n, alpha, beta, x, polys=True) + pd = p.diff(x) + pn = jacobi_poly(n+1, alpha, beta, x, polys=True) + xi = [] + w = [] + for r in p.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append(( + - (2*n+alpha+beta+2) / (n+alpha+beta+S.One) * + (gamma(n+alpha+1)*gamma(n+beta+1)) / + (gamma(n+alpha+beta+S.One)*gamma(n+2)) * + 2**(alpha+beta) / (pd.subs(x, r) * pn.subs(x, r))).n(n_digits)) + return xi, w + + +def gauss_lobatto(n, n_digits): + r""" + Computes the Gauss-Lobatto quadrature [1]_ points and weights. + + Explanation + =========== + + The Gauss-Lobatto quadrature approximates the integral: + + .. math:: + \int_{-1}^1 f(x)\,dx \approx \sum_{i=1}^n w_i f(x_i) + + The nodes `x_i` of an order `n` quadrature rule are the roots of `P'_(n-1)` + and the weights `w_i` are given by: + + .. math:: + &w_i = \frac{2}{n(n-1) \left[P_{n-1}(x_i)\right]^2},\quad x\neq\pm 1\\ + &w_i = \frac{2}{n(n-1)},\quad x=\pm 1 + + Parameters + ========== + + n : the order of quadrature + + n_digits : number of significant digits of the points and weights to return + + Returns + ======= + + (x, w) : the ``x`` and ``w`` are lists of points and weights as Floats. + The points `x_i` and weights `w_i` are returned as ``(x, w)`` + tuple of lists. + + Examples + ======== + + >>> from sympy.integrals.quadrature import gauss_lobatto + >>> x, w = gauss_lobatto(3, 5) + >>> x + [-1, 0, 1] + >>> w + [0.33333, 1.3333, 0.33333] + >>> x, w = gauss_lobatto(4, 5) + >>> x + [-1, -0.44721, 0.44721, 1] + >>> w + [0.16667, 0.83333, 0.83333, 0.16667] + + See Also + ======== + + gauss_legendre,gauss_laguerre, gauss_gen_laguerre, gauss_hermite, gauss_chebyshev_t, gauss_chebyshev_u, gauss_jacobi + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Lobatto_rules + .. [2] https://web.archive.org/web/20200118141346/http://people.math.sfu.ca/~cbm/aands/page_888.htm + """ + x = Dummy("x") + p = legendre_poly(n-1, x, polys=True) + pd = p.diff(x) + xi = [] + w = [] + for r in pd.real_roots(): + if isinstance(r, RootOf): + r = r.eval_rational(S.One/10**(n_digits+2)) + xi.append(r.n(n_digits)) + w.append((2/(n*(n-1) * p.subs(x, r)**2)).n(n_digits)) + + xi.insert(0, -1) + xi.append(1) + w.insert(0, (S(2)/(n*(n-1))).n(n_digits)) + w.append((S(2)/(n*(n-1))).n(n_digits)) + return xi, w diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rationaltools.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rationaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..844dcc5febef7556150a87bfef199d07ca4a9dcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rationaltools.py @@ -0,0 +1,418 @@ +"""This module implements tools for integrating rational functions. """ + +from sympy.core.function import Lambda +from sympy.core.numbers import I +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, Symbol, symbols) +from sympy.functions.elementary.exponential import log +from sympy.functions.elementary.trigonometric import atan +from sympy.polys.polyroots import roots +from sympy.polys.polytools import cancel +from sympy.polys.rootoftools import RootSum +from sympy.polys import Poly, resultant, ZZ + + +def ratint(f, x, **flags): + """ + Performs indefinite integration of rational functions. + + Explanation + =========== + + Given a field :math:`K` and a rational function :math:`f = p/q`, + where :math:`p` and :math:`q` are polynomials in :math:`K[x]`, + returns a function :math:`g` such that :math:`f = g'`. + + Examples + ======== + + >>> from sympy.integrals.rationaltools import ratint + >>> from sympy.abc import x + + >>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x) + (12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1) + + References + ========== + + .. [1] M. Bronstein, Symbolic Integration I: Transcendental + Functions, Second Edition, Springer-Verlag, 2005, pp. 35-70 + + See Also + ======== + + sympy.integrals.integrals.Integral.doit + sympy.integrals.rationaltools.ratint_logpart + sympy.integrals.rationaltools.ratint_ratpart + + """ + if isinstance(f, tuple): + p, q = f + else: + p, q = f.as_numer_denom() + + p, q = Poly(p, x, composite=False, field=True), Poly(q, x, composite=False, field=True) + + coeff, p, q = p.cancel(q) + poly, p = p.div(q) + + result = poly.integrate(x).as_expr() + + if p.is_zero: + return coeff*result + + g, h = ratint_ratpart(p, q, x) + + P, Q = h.as_numer_denom() + + P = Poly(P, x) + Q = Poly(Q, x) + + q, r = P.div(Q) + + result += g + q.integrate(x).as_expr() + + if not r.is_zero: + symbol = flags.get('symbol', 't') + + if not isinstance(symbol, Symbol): + t = Dummy(symbol) + else: + t = symbol.as_dummy() + + L = ratint_logpart(r, Q, x, t) + + real = flags.get('real') + + if real is None: + if isinstance(f, tuple): + p, q = f + atoms = p.atoms() | q.atoms() + else: + atoms = f.atoms() + + for elt in atoms - {x}: + if not elt.is_extended_real: + real = False + break + else: + real = True + + eps = S.Zero + + if not real: + for h, q in L: + _, h = h.primitive() + eps += RootSum( + q, Lambda(t, t*log(h.as_expr())), quadratic=True) + else: + for h, q in L: + _, h = h.primitive() + R = log_to_real(h, q, x, t) + + if R is not None: + eps += R + else: + eps += RootSum( + q, Lambda(t, t*log(h.as_expr())), quadratic=True) + + result += eps + + return coeff*result + + +def ratint_ratpart(f, g, x): + """ + Horowitz-Ostrogradsky algorithm. + + Explanation + =========== + + Given a field K and polynomials f and g in K[x], such that f and g + are coprime and deg(f) < deg(g), returns fractions A and B in K(x), + such that f/g = A' + B and B has square-free denominator. + + Examples + ======== + + >>> from sympy.integrals.rationaltools import ratint_ratpart + >>> from sympy.abc import x, y + >>> from sympy import Poly + >>> ratint_ratpart(Poly(1, x, domain='ZZ'), + ... Poly(x + 1, x, domain='ZZ'), x) + (0, 1/(x + 1)) + >>> ratint_ratpart(Poly(1, x, domain='EX'), + ... Poly(x**2 + y**2, x, domain='EX'), x) + (0, 1/(x**2 + y**2)) + >>> ratint_ratpart(Poly(36, x, domain='ZZ'), + ... Poly(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x, domain='ZZ'), x) + ((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2)) + + See Also + ======== + + ratint, ratint_logpart + """ + from sympy.solvers.solvers import solve + + f = Poly(f, x) + g = Poly(g, x) + + u, v, _ = g.cofactors(g.diff()) + + n = u.degree() + m = v.degree() + + A_coeffs = [ Dummy('a' + str(n - i)) for i in range(0, n) ] + B_coeffs = [ Dummy('b' + str(m - i)) for i in range(0, m) ] + + C_coeffs = A_coeffs + B_coeffs + + A = Poly(A_coeffs, x, domain=ZZ[C_coeffs]) + B = Poly(B_coeffs, x, domain=ZZ[C_coeffs]) + + H = f - A.diff()*v + A*(u.diff()*v).quo(u) - B*u + + result = solve(H.coeffs(), C_coeffs) + + A = A.as_expr().subs(result) + B = B.as_expr().subs(result) + + rat_part = cancel(A/u.as_expr(), x) + log_part = cancel(B/v.as_expr(), x) + + return rat_part, log_part + + +def ratint_logpart(f, g, x, t=None): + r""" + Lazard-Rioboo-Trager algorithm. + + Explanation + =========== + + Given a field K and polynomials f and g in K[x], such that f and g + are coprime, deg(f) < deg(g) and g is square-free, returns a list + of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i + in K[t, x] and q_i in K[t], and:: + + ___ ___ + d f d \ ` \ ` + -- - = -- ) ) a log(s_i(a, x)) + dx g dx /__, /__, + i=1..n a | q_i(a) = 0 + + Examples + ======== + + >>> from sympy.integrals.rationaltools import ratint_logpart + >>> from sympy.abc import x + >>> from sympy import Poly + >>> ratint_logpart(Poly(1, x, domain='ZZ'), + ... Poly(x**2 + x + 1, x, domain='ZZ'), x) + [(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'), + ...Poly(3*_t**2 + 1, _t, domain='ZZ'))] + >>> ratint_logpart(Poly(12, x, domain='ZZ'), + ... Poly(x**2 - x - 2, x, domain='ZZ'), x) + [(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'), + ...Poly(-_t**2 + 16, _t, domain='ZZ'))] + + See Also + ======== + + ratint, ratint_ratpart + """ + f, g = Poly(f, x), Poly(g, x) + + t = t or Dummy('t') + a, b = g, f - g.diff()*Poly(t, x) + + res, R = resultant(a, b, includePRS=True) + res = Poly(res, t, composite=False) + + assert res, "BUG: resultant(%s, %s) cannot be zero" % (a, b) + + R_map, H = {}, [] + + for r in R: + R_map[r.degree()] = r + + def _include_sign(c, sqf): + if c.is_extended_real and (c < 0) == True: + h, k = sqf[0] + c_poly = c.as_poly(h.gens) + sqf[0] = h*c_poly, k + + C, res_sqf = res.sqf_list() + _include_sign(C, res_sqf) + + for q, i in res_sqf: + _, q = q.primitive() + + if g.degree() == i: + H.append((g, q)) + else: + h = R_map[i] + h_lc = Poly(h.LC(), t, field=True) + + c, h_lc_sqf = h_lc.sqf_list(all=True) + _include_sign(c, h_lc_sqf) + + for a, j in h_lc_sqf: + h = h.quo(Poly(a.gcd(q)**j, x)) + + inv, coeffs = h_lc.invert(q), [S.One] + + for coeff in h.coeffs()[1:]: + coeff = coeff.as_poly(inv.gens) + T = (inv*coeff).rem(q) + coeffs.append(T.as_expr()) + + h = Poly(dict(list(zip(h.monoms(), coeffs))), x) + + H.append((h, q)) + + return H + + +def log_to_atan(f, g): + """ + Convert complex logarithms to real arctangents. + + Explanation + =========== + + Given a real field K and polynomials f and g in K[x], with g != 0, + returns a sum h of arctangents of polynomials in K[x], such that: + + dh d f + I g + -- = -- I log( ------- ) + dx dx f - I g + + Examples + ======== + + >>> from sympy.integrals.rationaltools import log_to_atan + >>> from sympy.abc import x + >>> from sympy import Poly, sqrt, S + >>> log_to_atan(Poly(x, x, domain='ZZ'), Poly(1, x, domain='ZZ')) + 2*atan(x) + >>> log_to_atan(Poly(x + S(1)/2, x, domain='QQ'), + ... Poly(sqrt(3)/2, x, domain='EX')) + 2*atan(2*sqrt(3)*x/3 + sqrt(3)/3) + + See Also + ======== + + log_to_real + """ + if f.degree() < g.degree(): + f, g = -g, f + + f = f.to_field() + g = g.to_field() + + p, q = f.div(g) + + if q.is_zero: + return 2*atan(p.as_expr()) + else: + s, t, h = g.gcdex(-f) + u = (f*s + g*t).quo(h) + A = 2*atan(u.as_expr()) + + return A + log_to_atan(s, t) + + +def log_to_real(h, q, x, t): + r""" + Convert complex logarithms to real functions. + + Explanation + =========== + + Given real field K and polynomials h in K[t,x] and q in K[t], + returns real function f such that: + ___ + df d \ ` + -- = -- ) a log(h(a, x)) + dx dx /__, + a | q(a) = 0 + + Examples + ======== + + >>> from sympy.integrals.rationaltools import log_to_real + >>> from sympy.abc import x, y + >>> from sympy import Poly, S + >>> log_to_real(Poly(x + 3*y/2 + S(1)/2, x, domain='QQ[y]'), + ... Poly(3*y**2 + 1, y, domain='ZZ'), x, y) + 2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3 + >>> log_to_real(Poly(x**2 - 1, x, domain='ZZ'), + ... Poly(-2*y + 1, y, domain='ZZ'), x, y) + log(x**2 - 1)/2 + + See Also + ======== + + log_to_atan + """ + from sympy.simplify.radsimp import collect + u, v = symbols('u,v', cls=Dummy) + + H = h.as_expr().subs({t: u + I*v}).expand() + Q = q.as_expr().subs({t: u + I*v}).expand() + + H_map = collect(H, I, evaluate=False) + Q_map = collect(Q, I, evaluate=False) + + a, b = H_map.get(S.One, S.Zero), H_map.get(I, S.Zero) + c, d = Q_map.get(S.One, S.Zero), Q_map.get(I, S.Zero) + + R = Poly(resultant(c, d, v), u) + + R_u = roots(R, filter='R') + + if len(R_u) != R.count_roots(): + return None + + result = S.Zero + + for r_u in R_u.keys(): + C = Poly(c.subs({u: r_u}), v) + R_v = roots(C, filter='R') + + if len(R_v) != C.count_roots(): + return None + + R_v_paired = [] # take one from each pair of conjugate roots + for r_v in R_v: + if r_v not in R_v_paired and -r_v not in R_v_paired: + if r_v.is_negative or r_v.could_extract_minus_sign(): + R_v_paired.append(-r_v) + elif not r_v.is_zero: + R_v_paired.append(r_v) + + for r_v in R_v_paired: + + D = d.subs({u: r_u, v: r_v}) + + if D.evalf(chop=True) != 0: + continue + + A = Poly(a.subs({u: r_u, v: r_v}), x) + B = Poly(b.subs({u: r_u, v: r_v}), x) + + AB = (A**2 + B**2).as_expr() + + result += r_u*log(AB) + r_v*log_to_atan(A, B) + + R_q = roots(q, filter='R') + + if len(R_q) != q.count_roots(): + return None + + for r in R_q.keys(): + result += r*log(h.as_expr().subs(t, r)) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rde.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rde.py new file mode 100644 index 0000000000000000000000000000000000000000..975b8db328976aa0c22659d6643138e34ef5b623 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/rde.py @@ -0,0 +1,800 @@ +""" +Algorithms for solving the Risch differential equation. + +Given a differential field K of characteristic 0 that is a simple +monomial extension of a base field k and f, g in K, the Risch +Differential Equation problem is to decide if there exist y in K such +that Dy + f*y == g and to find one if there are some. If t is a +monomial over k and the coefficients of f and g are in k(t), then y is +in k(t), and the outline of the algorithm here is given as: + +1. Compute the normal part n of the denominator of y. The problem is +then reduced to finding y' in k, where y == y'/n. +2. Compute the special part s of the denominator of y. The problem is +then reduced to finding y'' in k[t], where y == y''/(n*s) +3. Bound the degree of y''. +4. Reduce the equation Dy + f*y == g to a similar equation with f, g in +k[t]. +5. Find the solutions in k[t] of bounded degree of the reduced equation. + +See Chapter 6 of "Symbolic Integration I: Transcendental Functions" by +Manuel Bronstein. See also the docstring of risch.py. +""" + +from operator import mul +from functools import reduce + +from sympy.core import oo +from sympy.core.symbol import Dummy + +from sympy.polys import Poly, gcd, ZZ, cancel + +from sympy.functions.elementary.complexes import (im, re) +from sympy.functions.elementary.miscellaneous import sqrt + +from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, + splitfactor, NonElementaryIntegralException, DecrementLevel, recognize_log_derivative) + +# TODO: Add messages to NonElementaryIntegralException errors + + +def order_at(a, p, t): + """ + Computes the order of a at p, with respect to t. + + Explanation + =========== + + For a, p in k[t], the order of a at p is defined as nu_p(a) = max({n + in Z+ such that p**n|a}), where a != 0. If a == 0, nu_p(a) = +oo. + + To compute the order at a rational function, a/b, use the fact that + nu_p(a/b) == nu_p(a) - nu_p(b). + """ + if a.is_zero: + return oo + if p == Poly(t, t): + return a.as_poly(t).ET()[0][0] + + # Uses binary search for calculating the power. power_list collects the tuples + # (p^k,k) where each k is some power of 2. After deciding the largest k + # such that k is power of 2 and p^k|a the loop iteratively calculates + # the actual power. + power_list = [] + p1 = p + r = a.rem(p1) + tracks_power = 1 + while r.is_zero: + power_list.append((p1,tracks_power)) + p1 = p1*p1 + tracks_power *= 2 + r = a.rem(p1) + n = 0 + product = Poly(1, t) + while len(power_list) != 0: + final = power_list.pop() + productf = product*final[0] + r = a.rem(productf) + if r.is_zero: + n += final[1] + product = productf + return n + + +def order_at_oo(a, d, t): + """ + Computes the order of a/d at oo (infinity), with respect to t. + + For f in k(t), the order or f at oo is defined as deg(d) - deg(a), where + f == a/d. + """ + if a.is_zero: + return oo + return d.degree(t) - a.degree(t) + + +def weak_normalizer(a, d, DE, z=None): + """ + Weak normalization. + + Explanation + =========== + + Given a derivation D on k[t] and f == a/d in k(t), return q in k[t] + such that f - Dq/q is weakly normalized with respect to t. + + f in k(t) is said to be "weakly normalized" with respect to t if + residue_p(f) is not a positive integer for any normal irreducible p + in k[t] such that f is in R_p (Definition 6.1.1). If f has an + elementary integral, this is equivalent to no logarithm of + integral(f) whose argument depends on t has a positive integer + coefficient, where the arguments of the logarithms not in k(t) are + in k[t]. + + Returns (q, f - Dq/q) + """ + z = z or Dummy('z') + dn, ds = splitfactor(d, DE) + + # Compute d1, where dn == d1*d2**2*...*dn**n is a square-free + # factorization of d. + g = gcd(dn, dn.diff(DE.t)) + d_sqf_part = dn.quo(g) + d1 = d_sqf_part.quo(gcd(d_sqf_part, g)) + + a1, b = gcdex_diophantine(d.quo(d1).as_poly(DE.t), d1.as_poly(DE.t), + a.as_poly(DE.t)) + r = (a - Poly(z, DE.t)*derivation(d1, DE)).as_poly(DE.t).resultant( + d1.as_poly(DE.t)) + r = Poly(r, z) + + if not r.expr.has(z): + return (Poly(1, DE.t), (a, d)) + + N = [i for i in r.real_roots() if i in ZZ and i > 0] + + q = reduce(mul, [gcd(a - Poly(n, DE.t)*derivation(d1, DE), d1) for n in N], + Poly(1, DE.t)) + + dq = derivation(q, DE) + sn = q*a - d*dq + sd = q*d + sn, sd = sn.cancel(sd, include=True) + + return (q, (sn, sd)) + + +def normal_denom(fa, fd, ga, gd, DE): + """ + Normal part of the denominator. + + Explanation + =========== + + Given a derivation D on k[t] and f, g in k(t) with f weakly + normalized with respect to t, either raise NonElementaryIntegralException, + in which case the equation Dy + f*y == g has no solution in k(t), or the + quadruplet (a, b, c, h) such that a, h in k[t], b, c in k, and for any + solution y in k(t) of Dy + f*y == g, q = y*h in k satisfies + a*Dq + b*q == c. + + This constitutes step 1 in the outline given in the rde.py docstring. + """ + dn, ds = splitfactor(fd, DE) + en, es = splitfactor(gd, DE) + + p = dn.gcd(en) + h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) + + a = dn*h + c = a*h + if c.div(en)[1]: + # en does not divide dn*h**2 + raise NonElementaryIntegralException + ca = c*ga + ca, cd = ca.cancel(gd, include=True) + + ba = a*fa - dn*derivation(h, DE)*fd + ba, bd = ba.cancel(fd, include=True) + + # (dn*h, dn*h*f - dn*Dh, dn*h**2*g, h) + return (a, (ba, bd), (ca, cd), h) + + +def special_denom(a, ba, bd, ca, cd, DE, case='auto'): + """ + Special part of the denominator. + + Explanation + =========== + + case is one of {'exp', 'tan', 'primitive'} for the hyperexponential, + hypertangent, and primitive cases, respectively. For the + hyperexponential (resp. hypertangent) case, given a derivation D on + k[t] and a in k[t], b, c, in k with Dt/t in k (resp. Dt/(t**2 + 1) in + k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp. + gcd(a, t**2 + 1) == 1), return the quadruplet (A, B, C, 1/h) such that + A, B, C, h in k[t] and for any solution q in k of a*Dq + b*q == c, + r = qh in k[t] satisfies A*Dr + B*r == C. + + For ``case == 'primitive'``, k == k[t], so it returns (a, b, c, 1) in + this case. + + This constitutes step 2 of the outline given in the rde.py docstring. + """ + # TODO: finish writing this and write tests + + if case == 'auto': + case = DE.case + + if case == 'exp': + p = Poly(DE.t, DE.t) + elif case == 'tan': + p = Poly(DE.t**2 + 1, DE.t) + elif case in ('primitive', 'base'): + B = ba.to_field().quo(bd) + C = ca.to_field().quo(cd) + return (a, B, C, Poly(1, DE.t)) + else: + raise ValueError("case must be one of {'exp', 'tan', 'primitive', " + "'base'}, not %s." % case) + + nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) + nc = order_at(ca, p, DE.t) - order_at(cd, p, DE.t) + + n = min(0, nc - min(0, nb)) + if not nb: + # Possible cancellation. + from .prde import parametric_log_deriv + if case == 'exp': + dcoeff = DE.d.quo(Poly(DE.t, DE.t)) + with DecrementLevel(DE): # We are guaranteed to not have problems, + # because case != 'base'. + alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) + etaa, etad = frac_in(dcoeff, DE.t) + A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) + if A is not None: + Q, m, z = A + if Q == 1: + n = min(n, m) + + elif case == 'tan': + dcoeff = DE.d.quo(Poly(DE.t**2+1, DE.t)) + with DecrementLevel(DE): # We are guaranteed to not have problems, + # because case != 'base'. + alphaa, alphad = frac_in(im(-ba.eval(sqrt(-1))/bd.eval(sqrt(-1))/a.eval(sqrt(-1))), DE.t) + betaa, betad = frac_in(re(-ba.eval(sqrt(-1))/bd.eval(sqrt(-1))/a.eval(sqrt(-1))), DE.t) + etaa, etad = frac_in(dcoeff, DE.t) + + if recognize_log_derivative(Poly(2, DE.t)*betaa, betad, DE): + A = parametric_log_deriv(alphaa*Poly(sqrt(-1), DE.t)*betad+alphad*betaa, alphad*betad, etaa, etad, DE) + if A is not None: + Q, m, z = A + if Q == 1: + n = min(n, m) + N = max(0, -nb, n - nc) + pN = p**N + pn = p**-n + + A = a*pN + B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN + C = (ca*pN*pn).quo(cd) + h = pn + + # (a*p**N, (b + n*a*Dp/p)*p**N, c*p**(N - n), p**-n) + return (A, B, C, h) + + +def bound_degree(a, b, cQ, DE, case='auto', parametric=False): + """ + Bound on polynomial solutions. + + Explanation + =========== + + Given a derivation D on k[t] and ``a``, ``b``, ``c`` in k[t] with ``a != 0``, return + n in ZZ such that deg(q) <= n for any solution q in k[t] of + a*Dq + b*q == c, when parametric=False, or deg(q) <= n for any solution + c1, ..., cm in Const(k) and q in k[t] of a*Dq + b*q == Sum(ci*gi, (i, 1, m)) + when parametric=True. + + For ``parametric=False``, ``cQ`` is ``c``, a ``Poly``; for ``parametric=True``, ``cQ`` is Q == + [q1, ..., qm], a list of Polys. + + This constitutes step 3 of the outline given in the rde.py docstring. + """ + # TODO: finish writing this and write tests + + if case == 'auto': + case = DE.case + + da = a.degree(DE.t) + db = b.degree(DE.t) + + # The parametric and regular cases are identical, except for this part + if parametric: + dc = max([i.degree(DE.t) for i in cQ]) + else: + dc = cQ.degree(DE.t) + + alpha = cancel(-b.as_poly(DE.t).LC().as_expr()/ + a.as_poly(DE.t).LC().as_expr()) + + if case == 'base': + n = max(0, dc - max(db, da - 1)) + if db == da - 1 and alpha.is_Integer: + n = max(0, alpha, dc - db) + + elif case == 'primitive': + if db > da: + n = max(0, dc - db) + else: + n = max(0, dc - da + 1) + + etaa, etad = frac_in(DE.d, DE.T[DE.level - 1]) + + t1 = DE.t + with DecrementLevel(DE): + alphaa, alphad = frac_in(alpha, DE.t) + if db == da - 1: + from .prde import limited_integrate + # if alpha == m*Dt + Dz for z in k and m in ZZ: + try: + (za, zd), m = limited_integrate(alphaa, alphad, [(etaa, etad)], + DE) + except NonElementaryIntegralException: + pass + else: + if len(m) != 1: + raise ValueError("Length of m should be 1") + n = max(n, m[0]) + + elif db == da: + # if alpha == Dz/z for z in k*: + # beta = -lc(a*Dz + b*z)/(z*lc(a)) + # if beta == m*Dt + Dw for w in k and m in ZZ: + # n = max(n, m) + from .prde import is_log_deriv_k_t_radical_in_field + A = is_log_deriv_k_t_radical_in_field(alphaa, alphad, DE) + if A is not None: + aa, z = A + if aa == 1: + beta = -(a*derivation(z, DE).as_poly(t1) + + b*z.as_poly(t1)).LC()/(z.as_expr()*a.LC()) + betaa, betad = frac_in(beta, DE.t) + from .prde import limited_integrate + try: + (za, zd), m = limited_integrate(betaa, betad, + [(etaa, etad)], DE) + except NonElementaryIntegralException: + pass + else: + if len(m) != 1: + raise ValueError("Length of m should be 1") + n = max(n, m[0].as_expr()) + + elif case == 'exp': + from .prde import parametric_log_deriv + + n = max(0, dc - max(db, da)) + if da == db: + etaa, etad = frac_in(DE.d.quo(Poly(DE.t, DE.t)), DE.T[DE.level - 1]) + with DecrementLevel(DE): + alphaa, alphad = frac_in(alpha, DE.t) + A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) + if A is not None: + # if alpha == m*Dt/t + Dz/z for z in k* and m in ZZ: + # n = max(n, m) + a, m, z = A + if a == 1: + n = max(n, m) + + elif case in ('tan', 'other_nonlinear'): + delta = DE.d.degree(DE.t) + lam = DE.d.LC() + alpha = cancel(alpha/lam) + n = max(0, dc - max(da + delta - 1, db)) + if db == da + delta - 1 and alpha.is_Integer: + n = max(0, alpha, dc - db) + + else: + raise ValueError("case must be one of {'exp', 'tan', 'primitive', " + "'other_nonlinear', 'base'}, not %s." % case) + + return n + + +def spde(a, b, c, n, DE): + """ + Rothstein's Special Polynomial Differential Equation algorithm. + + Explanation + =========== + + Given a derivation D on k[t], an integer n and ``a``,``b``,``c`` in k[t] with + ``a != 0``, either raise NonElementaryIntegralException, in which case the + equation a*Dq + b*q == c has no solution of degree at most ``n`` in + k[t], or return the tuple (B, C, m, alpha, beta) such that B, C, + alpha, beta in k[t], m in ZZ, and any solution q in k[t] of degree + at most n of a*Dq + b*q == c must be of the form + q == alpha*h + beta, where h in k[t], deg(h) <= m, and Dh + B*h == C. + + This constitutes step 4 of the outline given in the rde.py docstring. + """ + zero = Poly(0, DE.t) + + alpha = Poly(1, DE.t) + beta = Poly(0, DE.t) + + while True: + if c.is_zero: + return (zero, zero, 0, zero, beta) # -1 is more to the point + if (n < 0) is True: + raise NonElementaryIntegralException + + g = a.gcd(b) + if not c.rem(g).is_zero: # g does not divide c + raise NonElementaryIntegralException + + a, b, c = a.quo(g), b.quo(g), c.quo(g) + + if a.degree(DE.t) == 0: + b = b.to_field().quo(a) + c = c.to_field().quo(a) + return (b, c, n, alpha, beta) + + r, z = gcdex_diophantine(b, a, c) + b += derivation(a, DE) + c = z - derivation(r, DE) + n -= a.degree(DE.t) + + beta += alpha * r + alpha *= a + +def no_cancel_b_large(b, c, n, DE): + """ + Poly Risch Differential Equation - No cancellation: deg(b) large enough. + + Explanation + =========== + + Given a derivation D on k[t], ``n`` either an integer or +oo, and ``b``,``c`` + in k[t] with ``b != 0`` and either D == d/dt or + deg(b) > max(0, deg(D) - 1), either raise NonElementaryIntegralException, in + which case the equation ``Dq + b*q == c`` has no solution of degree at + most n in k[t], or a solution q in k[t] of this equation with + ``deg(q) < n``. + """ + q = Poly(0, DE.t) + + while not c.is_zero: + m = c.degree(DE.t) - b.degree(DE.t) + if not 0 <= m <= n: # n < 0 or m < 0 or m > n + raise NonElementaryIntegralException + + p = Poly(c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC()*DE.t**m, DE.t, + expand=False) + q = q + p + n = m - 1 + c = c - derivation(p, DE) - b*p + + return q + + +def no_cancel_b_small(b, c, n, DE): + """ + Poly Risch Differential Equation - No cancellation: deg(b) small enough. + + Explanation + =========== + + Given a derivation D on k[t], ``n`` either an integer or +oo, and ``b``,``c`` + in k[t] with deg(b) < deg(D) - 1 and either D == d/dt or + deg(D) >= 2, either raise NonElementaryIntegralException, in which case the + equation Dq + b*q == c has no solution of degree at most n in k[t], + or a solution q in k[t] of this equation with deg(q) <= n, or the + tuple (h, b0, c0) such that h in k[t], b0, c0, in k, and for any + solution q in k[t] of degree at most n of Dq + bq == c, y == q - h + is a solution in k of Dy + b0*y == c0. + """ + q = Poly(0, DE.t) + + while not c.is_zero: + if n == 0: + m = 0 + else: + m = c.degree(DE.t) - DE.d.degree(DE.t) + 1 + + if not 0 <= m <= n: # n < 0 or m < 0 or m > n + raise NonElementaryIntegralException + + if m > 0: + p = Poly(c.as_poly(DE.t).LC()/(m*DE.d.as_poly(DE.t).LC())*DE.t**m, + DE.t, expand=False) + else: + if b.degree(DE.t) != c.degree(DE.t): + raise NonElementaryIntegralException + if b.degree(DE.t) == 0: + return (q, b.as_poly(DE.T[DE.level - 1]), + c.as_poly(DE.T[DE.level - 1])) + p = Poly(c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC(), DE.t, + expand=False) + + q = q + p + n = m - 1 + c = c - derivation(p, DE) - b*p + + return q + + +# TODO: better name for this function +def no_cancel_equal(b, c, n, DE): + """ + Poly Risch Differential Equation - No cancellation: deg(b) == deg(D) - 1 + + Explanation + =========== + + Given a derivation D on k[t] with deg(D) >= 2, n either an integer + or +oo, and b, c in k[t] with deg(b) == deg(D) - 1, either raise + NonElementaryIntegralException, in which case the equation Dq + b*q == c has + no solution of degree at most n in k[t], or a solution q in k[t] of + this equation with deg(q) <= n, or the tuple (h, m, C) such that h + in k[t], m in ZZ, and C in k[t], and for any solution q in k[t] of + degree at most n of Dq + b*q == c, y == q - h is a solution in k[t] + of degree at most m of Dy + b*y == C. + """ + q = Poly(0, DE.t) + lc = cancel(-b.as_poly(DE.t).LC()/DE.d.as_poly(DE.t).LC()) + if lc.is_Integer and lc.is_positive: + M = lc + else: + M = -1 + + while not c.is_zero: + m = max(M, c.degree(DE.t) - DE.d.degree(DE.t) + 1) + + if not 0 <= m <= n: # n < 0 or m < 0 or m > n + raise NonElementaryIntegralException + + u = cancel(m*DE.d.as_poly(DE.t).LC() + b.as_poly(DE.t).LC()) + if u.is_zero: + return (q, m, c) + if m > 0: + p = Poly(c.as_poly(DE.t).LC()/u*DE.t**m, DE.t, expand=False) + else: + if c.degree(DE.t) != DE.d.degree(DE.t) - 1: + raise NonElementaryIntegralException + else: + p = c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC() + + q = q + p + n = m - 1 + c = c - derivation(p, DE) - b*p + + return q + + +def cancel_primitive(b, c, n, DE): + """ + Poly Risch Differential Equation - Cancellation: Primitive case. + + Explanation + =========== + + Given a derivation D on k[t], n either an integer or +oo, ``b`` in k, and + ``c`` in k[t] with Dt in k and ``b != 0``, either raise + NonElementaryIntegralException, in which case the equation Dq + b*q == c + has no solution of degree at most n in k[t], or a solution q in k[t] of + this equation with deg(q) <= n. + """ + # Delayed imports + from .prde import is_log_deriv_k_t_radical_in_field + with DecrementLevel(DE): + ba, bd = frac_in(b, DE.t) + A = is_log_deriv_k_t_radical_in_field(ba, bd, DE) + if A is not None: + n, z = A + if n == 1: # b == Dz/z + raise NotImplementedError("is_deriv_in_field() is required to " + " solve this problem.") + # if z*c == Dp for p in k[t] and deg(p) <= n: + # return p/z + # else: + # raise NonElementaryIntegralException + + if c.is_zero: + return c # return 0 + + if n < c.degree(DE.t): + raise NonElementaryIntegralException + + q = Poly(0, DE.t) + while not c.is_zero: + m = c.degree(DE.t) + if n < m: + raise NonElementaryIntegralException + with DecrementLevel(DE): + a2a, a2d = frac_in(c.LC(), DE.t) + sa, sd = rischDE(ba, bd, a2a, a2d, DE) + stm = Poly(sa.as_expr()/sd.as_expr()*DE.t**m, DE.t, expand=False) + q += stm + n = m - 1 + c -= b*stm + derivation(stm, DE) + + return q + + +def cancel_exp(b, c, n, DE): + """ + Poly Risch Differential Equation - Cancellation: Hyperexponential case. + + Explanation + =========== + + Given a derivation D on k[t], n either an integer or +oo, ``b`` in k, and + ``c`` in k[t] with Dt/t in k and ``b != 0``, either raise + NonElementaryIntegralException, in which case the equation Dq + b*q == c + has no solution of degree at most n in k[t], or a solution q in k[t] of + this equation with deg(q) <= n. + """ + from .prde import parametric_log_deriv + eta = DE.d.quo(Poly(DE.t, DE.t)).as_expr() + + with DecrementLevel(DE): + etaa, etad = frac_in(eta, DE.t) + ba, bd = frac_in(b, DE.t) + A = parametric_log_deriv(ba, bd, etaa, etad, DE) + if A is not None: + a, m, z = A + if a == 1: + raise NotImplementedError("is_deriv_in_field() is required to " + "solve this problem.") + # if c*z*t**m == Dp for p in k and q = p/(z*t**m) in k[t] and + # deg(q) <= n: + # return q + # else: + # raise NonElementaryIntegralException + + if c.is_zero: + return c # return 0 + + if n < c.degree(DE.t): + raise NonElementaryIntegralException + + q = Poly(0, DE.t) + while not c.is_zero: + m = c.degree(DE.t) + if n < m: + raise NonElementaryIntegralException + # a1 = b + m*Dt/t + a1 = b.as_expr() + with DecrementLevel(DE): + # TODO: Write a dummy function that does this idiom + a1a, a1d = frac_in(a1, DE.t) + a1a = a1a*etad + etaa*a1d*Poly(m, DE.t) + a1d = a1d*etad + + a2a, a2d = frac_in(c.LC(), DE.t) + + sa, sd = rischDE(a1a, a1d, a2a, a2d, DE) + stm = Poly(sa.as_expr()/sd.as_expr()*DE.t**m, DE.t, expand=False) + q += stm + n = m - 1 + c -= b*stm + derivation(stm, DE) # deg(c) becomes smaller + return q + + +def solve_poly_rde(b, cQ, n, DE, parametric=False): + """ + Solve a Polynomial Risch Differential Equation with degree bound ``n``. + + This constitutes step 4 of the outline given in the rde.py docstring. + + For parametric=False, cQ is c, a Poly; for parametric=True, cQ is Q == + [q1, ..., qm], a list of Polys. + """ + # No cancellation + if not b.is_zero and (DE.case == 'base' or + b.degree(DE.t) > max(0, DE.d.degree(DE.t) - 1)): + + if parametric: + # Delayed imports + from .prde import prde_no_cancel_b_large + return prde_no_cancel_b_large(b, cQ, n, DE) + return no_cancel_b_large(b, cQ, n, DE) + + elif (b.is_zero or b.degree(DE.t) < DE.d.degree(DE.t) - 1) and \ + (DE.case == 'base' or DE.d.degree(DE.t) >= 2): + + if parametric: + from .prde import prde_no_cancel_b_small + return prde_no_cancel_b_small(b, cQ, n, DE) + + R = no_cancel_b_small(b, cQ, n, DE) + + if isinstance(R, Poly): + return R + else: + # XXX: Might k be a field? (pg. 209) + h, b0, c0 = R + with DecrementLevel(DE): + b0, c0 = b0.as_poly(DE.t), c0.as_poly(DE.t) + if b0 is None: # See above comment + raise ValueError("b0 should be a non-Null value") + if c0 is None: + raise ValueError("c0 should be a non-Null value") + y = solve_poly_rde(b0, c0, n, DE).as_poly(DE.t) + return h + y + + elif DE.d.degree(DE.t) >= 2 and b.degree(DE.t) == DE.d.degree(DE.t) - 1 and \ + n > -b.as_poly(DE.t).LC()/DE.d.as_poly(DE.t).LC(): + + # TODO: Is this check necessary, and if so, what should it do if it fails? + # b comes from the first element returned from spde() + if not b.as_poly(DE.t).LC().is_number: + raise TypeError("Result should be a number") + + if parametric: + raise NotImplementedError("prde_no_cancel_b_equal() is not yet " + "implemented.") + + R = no_cancel_equal(b, cQ, n, DE) + + if isinstance(R, Poly): + return R + else: + h, m, C = R + # XXX: Or should it be rischDE()? + y = solve_poly_rde(b, C, m, DE) + return h + y + + else: + # Cancellation + if b.is_zero: + raise NotImplementedError("Remaining cases for Poly (P)RDE are " + "not yet implemented (is_deriv_in_field() required).") + else: + if DE.case == 'exp': + if parametric: + raise NotImplementedError("Parametric RDE cancellation " + "hyperexponential case is not yet implemented.") + return cancel_exp(b, cQ, n, DE) + + elif DE.case == 'primitive': + if parametric: + raise NotImplementedError("Parametric RDE cancellation " + "primitive case is not yet implemented.") + return cancel_primitive(b, cQ, n, DE) + + else: + raise NotImplementedError("Other Poly (P)RDE cancellation " + "cases are not yet implemented (%s)." % DE.case) + + if parametric: + raise NotImplementedError("Remaining cases for Poly PRDE not yet " + "implemented.") + raise NotImplementedError("Remaining cases for Poly RDE not yet " + "implemented.") + + +def rischDE(fa, fd, ga, gd, DE): + """ + Solve a Risch Differential Equation: Dy + f*y == g. + + Explanation + =========== + + See the outline in the docstring of rde.py for more information + about the procedure used. Either raise NonElementaryIntegralException, in + which case there is no solution y in the given differential field, + or return y in k(t) satisfying Dy + f*y == g, or raise + NotImplementedError, in which case, the algorithms necessary to + solve the given Risch Differential Equation have not yet been + implemented. + """ + _, (fa, fd) = weak_normalizer(fa, fd, DE) + a, (ba, bd), (ca, cd), hn = normal_denom(fa, fd, ga, gd, DE) + A, B, C, hs = special_denom(a, ba, bd, ca, cd, DE) + try: + # Until this is fully implemented, use oo. Note that this will almost + # certainly cause non-termination in spde() (unless A == 1), and + # *might* lead to non-termination in the next step for a nonelementary + # integral (I don't know for certain yet). Fortunately, spde() is + # currently written recursively, so this will just give + # RuntimeError: maximum recursion depth exceeded. + n = bound_degree(A, B, C, DE) + except NotImplementedError: + # Useful for debugging: + # import warnings + # warnings.warn("rischDE: Proceeding with n = oo; may cause " + # "non-termination.") + n = oo + + B, C, m, alpha, beta = spde(A, B, C, n, DE) + if C.is_zero: + y = C + else: + y = solve_poly_rde(B, C, m, DE) + + return (alpha*y + beta, hn*hs) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/risch.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/risch.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f7a2d583a0ea0a64b77cf7df9de2e3b0ae8ab9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/risch.py @@ -0,0 +1,1858 @@ +""" +The Risch Algorithm for transcendental function integration. + +The core algorithms for the Risch algorithm are here. The subproblem +algorithms are in the rde.py and prde.py files for the Risch +Differential Equation solver and the parametric problems solvers, +respectively. All important information concerning the differential extension +for an integrand is stored in a DifferentialExtension object, which in the code +is usually called DE. Throughout the code and Inside the DifferentialExtension +object, the conventions/attribute names are that the base domain is QQ and each +differential extension is x, t0, t1, ..., tn-1 = DE.t. DE.x is the variable of +integration (Dx == 1), DE.D is a list of the derivatives of +x, t1, t2, ..., tn-1 = t, DE.T is the list [x, t1, t2, ..., tn-1], DE.t is the +outer-most variable of the differential extension at the given level (the level +can be adjusted using DE.increment_level() and DE.decrement_level()), +k is the field C(x, t0, ..., tn-2), where C is the constant field. The +numerator of a fraction is denoted by a and the denominator by +d. If the fraction is named f, fa == numer(f) and fd == denom(f). +Fractions are returned as tuples (fa, fd). DE.d and DE.t are used to +represent the topmost derivation and extension variable, respectively. +The docstring of a function signifies whether an argument is in k[t], in +which case it will just return a Poly in t, or in k(t), in which case it +will return the fraction (fa, fd). Other variable names probably come +from the names used in Bronstein's book. +""" +from types import GeneratorType +from functools import reduce + +from sympy.core.function import Lambda +from sympy.core.mul import Mul +from sympy.core.numbers import ilcm, I, oo +from sympy.core.power import Pow +from sympy.core.relational import Ne +from sympy.core.singleton import S +from sympy.core.sorting import ordered, default_sort_key +from sympy.core.symbol import Dummy, Symbol +from sympy.functions.elementary.exponential import log, exp +from sympy.functions.elementary.hyperbolic import (cosh, coth, sinh, + tanh) +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (atan, sin, cos, + tan, acot, cot, asin, acos) +from .integrals import integrate, Integral +from .heurisch import _symbols +from sympy.polys.polyerrors import DomainError, PolynomialError +from sympy.polys.polytools import (real_roots, cancel, Poly, gcd, + reduced) +from sympy.polys.rootoftools import RootSum +from sympy.utilities.iterables import numbered_symbols + + +def integer_powers(exprs): + """ + Rewrites a list of expressions as integer multiples of each other. + + Explanation + =========== + + For example, if you have [x, x/2, x**2 + 1, 2*x/3], then you can rewrite + this as [(x/6) * 6, (x/6) * 3, (x**2 + 1) * 1, (x/6) * 4]. This is useful + in the Risch integration algorithm, where we must write exp(x) + exp(x/2) + as (exp(x/2))**2 + exp(x/2), but not as exp(x) + sqrt(exp(x)) (this is + because only the transcendental case is implemented and we therefore cannot + integrate algebraic extensions). The integer multiples returned by this + function for each term are the smallest possible (their content equals 1). + + Returns a list of tuples where the first element is the base term and the + second element is a list of `(item, factor)` terms, where `factor` is the + integer multiplicative factor that must multiply the base term to obtain + the original item. + + The easiest way to understand this is to look at an example: + + >>> from sympy.abc import x + >>> from sympy.integrals.risch import integer_powers + >>> integer_powers([x, x/2, x**2 + 1, 2*x/3]) + [(x/6, [(x, 6), (x/2, 3), (2*x/3, 4)]), (x**2 + 1, [(x**2 + 1, 1)])] + + We can see how this relates to the example at the beginning of the + docstring. It chose x/6 as the first base term. Then, x can be written as + (x/2) * 2, so we get (0, 2), and so on. Now only element (x**2 + 1) + remains, and there are no other terms that can be written as a rational + multiple of that, so we get that it can be written as (x**2 + 1) * 1. + + """ + # Here is the strategy: + + # First, go through each term and determine if it can be rewritten as a + # rational multiple of any of the terms gathered so far. + # cancel(a/b).is_Rational is sufficient for this. If it is a multiple, we + # add its multiple to the dictionary. + + terms = {} + for term in exprs: + for trm, trm_list in terms.items(): + a = cancel(term/trm) + if a.is_Rational: + trm_list.append((term, a)) + break + else: + terms[term] = [(term, S.One)] + + # After we have done this, we have all the like terms together, so we just + # need to find a common denominator so that we can get the base term and + # integer multiples such that each term can be written as an integer + # multiple of the base term, and the content of the integers is 1. + + newterms = {} + for term, term_list in terms.items(): + common_denom = reduce(ilcm, [i.as_numer_denom()[1] for _, i in + term_list]) + newterm = term/common_denom + newmults = [(i, j*common_denom) for i, j in term_list] + newterms[newterm] = newmults + + return sorted(iter(newterms.items()), key=lambda item: item[0].sort_key()) + + +class DifferentialExtension: + """ + A container for all the information relating to a differential extension. + + Explanation + =========== + + The attributes of this object are (see also the docstring of __init__): + + - f: The original (Expr) integrand. + - x: The variable of integration. + - T: List of variables in the extension. + - D: List of derivations in the extension; corresponds to the elements of T. + - fa: Poly of the numerator of the integrand. + - fd: Poly of the denominator of the integrand. + - Tfuncs: Lambda() representations of each element of T (except for x). + For back-substitution after integration. + - backsubs: A (possibly empty) list of further substitutions to be made on + the final integral to make it look more like the integrand. + - exts: + - extargs: + - cases: List of string representations of the cases of T. + - t: The top level extension variable, as defined by the current level + (see level below). + - d: The top level extension derivation, as defined by the current + derivation (see level below). + - case: The string representation of the case of self.d. + (Note that self.T and self.D will always contain the complete extension, + regardless of the level. Therefore, you should ALWAYS use DE.t and DE.d + instead of DE.T[-1] and DE.D[-1]. If you want to have a list of the + derivations or variables only up to the current level, use + DE.D[:len(DE.D) + DE.level + 1] and DE.T[:len(DE.T) + DE.level + 1]. Note + that, in particular, the derivation() function does this.) + + The following are also attributes, but will probably not be useful other + than in internal use: + - newf: Expr form of fa/fd. + - level: The number (between -1 and -len(self.T)) such that + self.T[self.level] == self.t and self.D[self.level] == self.d. + Use the methods self.increment_level() and self.decrement_level() to change + the current level. + """ + # __slots__ is defined mainly so we can iterate over all the attributes + # of the class easily (the memory use doesn't matter too much, since we + # only create one DifferentialExtension per integration). Also, it's nice + # to have a safeguard when debugging. + __slots__ = ('f', 'x', 'T', 'D', 'fa', 'fd', 'Tfuncs', 'backsubs', + 'exts', 'extargs', 'cases', 'case', 't', 'd', 'newf', 'level', + 'ts', 'dummy') + + def __init__(self, f=None, x=None, handle_first='log', dummy=False, extension=None, rewrite_complex=None): + """ + Tries to build a transcendental extension tower from ``f`` with respect to ``x``. + + Explanation + =========== + + If it is successful, creates a DifferentialExtension object with, among + others, the attributes fa, fd, D, T, Tfuncs, and backsubs such that + fa and fd are Polys in T[-1] with rational coefficients in T[:-1], + fa/fd == f, and D[i] is a Poly in T[i] with rational coefficients in + T[:i] representing the derivative of T[i] for each i from 1 to len(T). + Tfuncs is a list of Lambda objects for back replacing the functions + after integrating. Lambda() is only used (instead of lambda) to make + them easier to test and debug. Note that Tfuncs corresponds to the + elements of T, except for T[0] == x, but they should be back-substituted + in reverse order. backsubs is a (possibly empty) back-substitution list + that should be applied on the completed integral to make it look more + like the original integrand. + + If it is unsuccessful, it raises NotImplementedError. + + You can also create an object by manually setting the attributes as a + dictionary to the extension keyword argument. You must include at least + D. Warning, any attribute that is not given will be set to None. The + attributes T, t, d, cases, case, x, and level are set automatically and + do not need to be given. The functions in the Risch Algorithm will NOT + check to see if an attribute is None before using it. This also does not + check to see if the extension is valid (non-algebraic) or even if it is + self-consistent. Therefore, this should only be used for + testing/debugging purposes. + """ + # XXX: If you need to debug this function, set the break point here + + if extension: + if 'D' not in extension: + raise ValueError("At least the key D must be included with " + "the extension flag to DifferentialExtension.") + for attr in extension: + setattr(self, attr, extension[attr]) + + self._auto_attrs() + + return + elif f is None or x is None: + raise ValueError("Either both f and x or a manual extension must " + "be given.") + + if handle_first not in ('log', 'exp'): + raise ValueError("handle_first must be 'log' or 'exp', not %s." % + str(handle_first)) + + # f will be the original function, self.f might change if we reset + # (e.g., we pull out a constant from an exponential) + self.f = f + self.x = x + # setting the default value 'dummy' + self.dummy = dummy + self.reset() + exp_new_extension, log_new_extension = True, True + + # case of 'automatic' choosing + if rewrite_complex is None: + rewrite_complex = I in self.f.atoms() + + if rewrite_complex: + rewritables = { + (sin, cos, cot, tan, sinh, cosh, coth, tanh): exp, + (asin, acos, acot, atan): log, + } + # rewrite the trigonometric components + for candidates, rule in rewritables.items(): + self.newf = self.newf.rewrite(candidates, rule) + self.newf = cancel(self.newf) + else: + if any(i.has(x) for i in self.f.atoms(sin, cos, tan, atan, asin, acos)): + raise NotImplementedError("Trigonometric extensions are not " + "supported (yet!)") + + exps = set() + pows = set() + numpows = set() + sympows = set() + logs = set() + symlogs = set() + + while True: + if self.newf.is_rational_function(*self.T): + break + + if not exp_new_extension and not log_new_extension: + # We couldn't find a new extension on the last pass, so I guess + # we can't do it. + raise NotImplementedError("Couldn't find an elementary " + "transcendental extension for %s. Try using a " % str(f) + + "manual extension with the extension flag.") + + exps, pows, numpows, sympows, log_new_extension = \ + self._rewrite_exps_pows(exps, pows, numpows, sympows, log_new_extension) + + logs, symlogs = self._rewrite_logs(logs, symlogs) + + if handle_first == 'exp' or not log_new_extension: + exp_new_extension = self._exp_part(exps) + if exp_new_extension is None: + # reset and restart + self.f = self.newf + self.reset() + exp_new_extension = True + continue + + if handle_first == 'log' or not exp_new_extension: + log_new_extension = self._log_part(logs) + + self.fa, self.fd = frac_in(self.newf, self.t) + self._auto_attrs() + + return + + def __getattr__(self, attr): + # Avoid AttributeErrors when debugging + if attr not in self.__slots__: + raise AttributeError("%s has no attribute %s" % (repr(self), repr(attr))) + return None + + def _rewrite_exps_pows(self, exps, pows, numpows, + sympows, log_new_extension): + """ + Rewrite exps/pows for better processing. + """ + from .prde import is_deriv_k + + # Pre-preparsing. + ################# + # Get all exp arguments, so we can avoid ahead of time doing + # something like t1 = exp(x), t2 = exp(x/2) == sqrt(t1). + + # Things like sqrt(exp(x)) do not automatically simplify to + # exp(x/2), so they will be viewed as algebraic. The easiest way + # to handle this is to convert all instances of exp(a)**Rational + # to exp(Rational*a) before doing anything else. Note that the + # _exp_part code can generate terms of this form, so we do need to + # do this at each pass (or else modify it to not do that). + + ratpows = [i for i in self.newf.atoms(Pow) + if (isinstance(i.base, exp) and i.exp.is_Rational)] + + ratpows_repl = [ + (i, i.base.base**(i.exp*i.base.exp)) for i in ratpows] + self.backsubs += [(j, i) for i, j in ratpows_repl] + self.newf = self.newf.xreplace(dict(ratpows_repl)) + + # To make the process deterministic, the args are sorted + # so that functions with smaller op-counts are processed first. + # Ties are broken with the default_sort_key. + + # XXX Although the method is deterministic no additional work + # has been done to guarantee that the simplest solution is + # returned and that it would be affected be using different + # variables. Though it is possible that this is the case + # one should know that it has not been done intentionally, so + # further improvements may be possible. + + # TODO: This probably doesn't need to be completely recomputed at + # each pass. + exps = update_sets(exps, self.newf.atoms(exp), + lambda i: i.exp.is_rational_function(*self.T) and + i.exp.has(*self.T)) + pows = update_sets(pows, self.newf.atoms(Pow), + lambda i: i.exp.is_rational_function(*self.T) and + i.exp.has(*self.T)) + numpows = update_sets(numpows, set(pows), + lambda i: not i.base.has(*self.T)) + sympows = update_sets(sympows, set(pows) - set(numpows), + lambda i: i.base.is_rational_function(*self.T) and + not i.exp.is_Integer) + + # The easiest way to deal with non-base E powers is to convert them + # into base E, integrate, and then convert back. + for i in ordered(pows): + old = i + new = exp(i.exp*log(i.base)) + # If exp is ever changed to automatically reduce exp(x*log(2)) + # to 2**x, then this will break. The solution is to not change + # exp to do that :) + if i in sympows: + if i.exp.is_Rational: + raise NotImplementedError("Algebraic extensions are " + "not supported (%s)." % str(i)) + # We can add a**b only if log(a) in the extension, because + # a**b == exp(b*log(a)). + basea, based = frac_in(i.base, self.t) + A = is_deriv_k(basea, based, self) + if A is None: + # Nonelementary monomial (so far) + + # TODO: Would there ever be any benefit from just + # adding log(base) as a new monomial? + # ANSWER: Yes, otherwise we can't integrate x**x (or + # rather prove that it has no elementary integral) + # without first manually rewriting it as exp(x*log(x)) + self.newf = self.newf.xreplace({old: new}) + self.backsubs += [(new, old)] + log_new_extension = self._log_part([log(i.base)]) + exps = update_sets(exps, self.newf.atoms(exp), lambda i: + i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) + continue + ans, u, const = A + newterm = exp(i.exp*(log(const) + u)) + # Under the current implementation, exp kills terms + # only if they are of the form a*log(x), where a is a + # Number. This case should have already been killed by the + # above tests. Again, if this changes to kill more than + # that, this will break, which maybe is a sign that you + # shouldn't be changing that. Actually, if anything, this + # auto-simplification should be removed. See + # https://groups.google.com/group/sympy/browse_thread/thread/a61d48235f16867f + + self.newf = self.newf.xreplace({i: newterm}) + + elif i not in numpows: + continue + else: + # i in numpows + newterm = new + # TODO: Just put it in self.Tfuncs + self.backsubs.append((new, old)) + self.newf = self.newf.xreplace({old: newterm}) + exps.append(newterm) + + return exps, pows, numpows, sympows, log_new_extension + + def _rewrite_logs(self, logs, symlogs): + """ + Rewrite logs for better processing. + """ + atoms = self.newf.atoms(log) + logs = update_sets(logs, atoms, + lambda i: i.args[0].is_rational_function(*self.T) and + i.args[0].has(*self.T)) + symlogs = update_sets(symlogs, atoms, + lambda i: i.has(*self.T) and i.args[0].is_Pow and + i.args[0].base.is_rational_function(*self.T) and + not i.args[0].exp.is_Integer) + + # We can handle things like log(x**y) by converting it to y*log(x) + # This will fix not only symbolic exponents of the argument, but any + # non-Integer exponent, like log(sqrt(x)). The exponent can also + # depend on x, like log(x**x). + for i in ordered(symlogs): + # Unlike in the exponential case above, we do not ever + # potentially add new monomials (above we had to add log(a)). + # Therefore, there is no need to run any is_deriv functions + # here. Just convert log(a**b) to b*log(a) and let + # log_new_extension() handle it from there. + lbase = log(i.args[0].base) + logs.append(lbase) + new = i.args[0].exp*lbase + self.newf = self.newf.xreplace({i: new}) + self.backsubs.append((new, i)) + + # remove any duplicates + logs = sorted(set(logs), key=default_sort_key) + + return logs, symlogs + + def _auto_attrs(self): + """ + Set attributes that are generated automatically. + """ + if not self.T: + # i.e., when using the extension flag and T isn't given + self.T = [i.gen for i in self.D] + if not self.x: + self.x = self.T[0] + self.cases = [get_case(d, t) for d, t in zip(self.D, self.T)] + self.level = -1 + self.t = self.T[self.level] + self.d = self.D[self.level] + self.case = self.cases[self.level] + + def _exp_part(self, exps): + """ + Try to build an exponential extension. + + Returns + ======= + + Returns True if there was a new extension, False if there was no new + extension but it was able to rewrite the given exponentials in terms + of the existing extension, and None if the entire extension building + process should be restarted. If the process fails because there is no + way around an algebraic extension (e.g., exp(log(x)/2)), it will raise + NotImplementedError. + """ + from .prde import is_log_deriv_k_t_radical + new_extension = False + restart = False + expargs = [i.exp for i in exps] + ip = integer_powers(expargs) + for arg, others in ip: + # Minimize potential problems with algebraic substitution + others.sort(key=lambda i: i[1]) + + arga, argd = frac_in(arg, self.t) + A = is_log_deriv_k_t_radical(arga, argd, self) + + if A is not None: + ans, u, n, const = A + # if n is 1 or -1, it's algebraic, but we can handle it + if n == -1: + # This probably will never happen, because + # Rational.as_numer_denom() returns the negative term in + # the numerator. But in case that changes, reduce it to + # n == 1. + n = 1 + u **= -1 + const *= -1 + ans = [(i, -j) for i, j in ans] + + if n == 1: + # Example: exp(x + x**2) over QQ(x, exp(x), exp(x**2)) + self.newf = self.newf.xreplace({exp(arg): exp(const)*Mul(*[ + u**power for u, power in ans])}) + self.newf = self.newf.xreplace({exp(p*exparg): + exp(const*p) * Mul(*[u**power for u, power in ans]) + for exparg, p in others}) + # TODO: Add something to backsubs to put exp(const*p) + # back together. + + continue + + else: + # Bad news: we have an algebraic radical. But maybe we + # could still avoid it by choosing a different extension. + # For example, integer_powers() won't handle exp(x/2 + 1) + # over QQ(x, exp(x)), but if we pull out the exp(1), it + # will. Or maybe we have exp(x + x**2/2), over + # QQ(x, exp(x), exp(x**2)), which is exp(x)*sqrt(exp(x**2)), + # but if we use QQ(x, exp(x), exp(x**2/2)), then they will + # all work. + # + # So here is what we do: If there is a non-zero const, pull + # it out and retry. Also, if len(ans) > 1, then rewrite + # exp(arg) as the product of exponentials from ans, and + # retry that. If const == 0 and len(ans) == 1, then we + # assume that it would have been handled by either + # integer_powers() or n == 1 above if it could be handled, + # so we give up at that point. For example, you can never + # handle exp(log(x)/2) because it equals sqrt(x). + + if const or len(ans) > 1: + rad = Mul(*[term**(power/n) for term, power in ans]) + self.newf = self.newf.xreplace({exp(p*exparg): + exp(const*p)*rad for exparg, p in others}) + self.newf = self.newf.xreplace(dict(list(zip(reversed(self.T), + reversed([f(self.x) for f in self.Tfuncs]))))) + restart = True + break + else: + # TODO: give algebraic dependence in error string + raise NotImplementedError("Cannot integrate over " + "algebraic extensions.") + + else: + arga, argd = frac_in(arg, self.t) + darga = (argd*derivation(Poly(arga, self.t), self) - + arga*derivation(Poly(argd, self.t), self)) + dargd = argd**2 + darga, dargd = darga.cancel(dargd, include=True) + darg = darga.as_expr()/dargd.as_expr() + self.t = next(self.ts) + self.T.append(self.t) + self.extargs.append(arg) + self.exts.append('exp') + self.D.append(darg.as_poly(self.t, expand=False)*Poly(self.t, + self.t, expand=False)) + if self.dummy: + i = Dummy("i") + else: + i = Symbol('i') + self.Tfuncs += [Lambda(i, exp(arg.subs(self.x, i)))] + self.newf = self.newf.xreplace( + {exp(exparg): self.t**p for exparg, p in others}) + new_extension = True + + if restart: + return None + return new_extension + + def _log_part(self, logs): + """ + Try to build a logarithmic extension. + + Returns + ======= + + Returns True if there was a new extension and False if there was no new + extension but it was able to rewrite the given logarithms in terms + of the existing extension. Unlike with exponential extensions, there + is no way that a logarithm is not transcendental over and cannot be + rewritten in terms of an already existing extension in a non-algebraic + way, so this function does not ever return None or raise + NotImplementedError. + """ + from .prde import is_deriv_k + new_extension = False + logargs = [i.args[0] for i in logs] + for arg in ordered(logargs): + # The log case is easier, because whenever a logarithm is algebraic + # over the base field, it is of the form a1*t1 + ... an*tn + c, + # which is a polynomial, so we can just replace it with that. + # In other words, we don't have to worry about radicals. + arga, argd = frac_in(arg, self.t) + A = is_deriv_k(arga, argd, self) + if A is not None: + ans, u, const = A + newterm = log(const) + u + self.newf = self.newf.xreplace({log(arg): newterm}) + continue + + else: + arga, argd = frac_in(arg, self.t) + darga = (argd*derivation(Poly(arga, self.t), self) - + arga*derivation(Poly(argd, self.t), self)) + dargd = argd**2 + darg = darga.as_expr()/dargd.as_expr() + self.t = next(self.ts) + self.T.append(self.t) + self.extargs.append(arg) + self.exts.append('log') + self.D.append(cancel(darg.as_expr()/arg).as_poly(self.t, + expand=False)) + if self.dummy: + i = Dummy("i") + else: + i = Symbol('i') + self.Tfuncs += [Lambda(i, log(arg.subs(self.x, i)))] + self.newf = self.newf.xreplace({log(arg): self.t}) + new_extension = True + + return new_extension + + @property + def _important_attrs(self): + """ + Returns some of the more important attributes of self. + + Explanation + =========== + + Used for testing and debugging purposes. + + The attributes are (fa, fd, D, T, Tfuncs, backsubs, + exts, extargs). + """ + return (self.fa, self.fd, self.D, self.T, self.Tfuncs, + self.backsubs, self.exts, self.extargs) + + # NOTE: this printing doesn't follow the Python's standard + # eval(repr(DE)) == DE, where DE is the DifferentialExtension object, + # also this printing is supposed to contain all the important + # attributes of a DifferentialExtension object + def __repr__(self): + # no need to have GeneratorType object printed in it + r = [(attr, getattr(self, attr)) for attr in self.__slots__ + if not isinstance(getattr(self, attr), GeneratorType)] + return self.__class__.__name__ + '(dict(%r))' % (r) + + # fancy printing of DifferentialExtension object + def __str__(self): + return (self.__class__.__name__ + '({fa=%s, fd=%s, D=%s})' % + (self.fa, self.fd, self.D)) + + # should only be used for debugging purposes, internally + # f1 = f2 = log(x) at different places in code execution + # may return D1 != D2 as True, since 'level' or other attribute + # may differ + def __eq__(self, other): + for attr in self.__class__.__slots__: + d1, d2 = getattr(self, attr), getattr(other, attr) + if not (isinstance(d1, GeneratorType) or d1 == d2): + return False + return True + + def reset(self): + """ + Reset self to an initial state. Used by __init__. + """ + self.t = self.x + self.T = [self.x] + self.D = [Poly(1, self.x)] + self.level = -1 + self.exts = [None] + self.extargs = [None] + if self.dummy: + self.ts = numbered_symbols('t', cls=Dummy) + else: + # For testing + self.ts = numbered_symbols('t') + # For various things that we change to make things work that we need to + # change back when we are done. + self.backsubs = [] + self.Tfuncs = [] + self.newf = self.f + + def indices(self, extension): + """ + Parameters + ========== + + extension : str + Represents a valid extension type. + + Returns + ======= + + list: A list of indices of 'exts' where extension of + type 'extension' is present. + + Examples + ======== + + >>> from sympy.integrals.risch import DifferentialExtension + >>> from sympy import log, exp + >>> from sympy.abc import x + >>> DE = DifferentialExtension(log(x) + exp(x), x, handle_first='exp') + >>> DE.indices('log') + [2] + >>> DE.indices('exp') + [1] + + """ + return [i for i, ext in enumerate(self.exts) if ext == extension] + + def increment_level(self): + """ + Increment the level of self. + + Explanation + =========== + + This makes the working differential extension larger. self.level is + given relative to the end of the list (-1, -2, etc.), so we do not need + do worry about it when building the extension. + """ + if self.level >= -1: + raise ValueError("The level of the differential extension cannot " + "be incremented any further.") + + self.level += 1 + self.t = self.T[self.level] + self.d = self.D[self.level] + self.case = self.cases[self.level] + return None + + def decrement_level(self): + """ + Decrease the level of self. + + Explanation + =========== + + This makes the working differential extension smaller. self.level is + given relative to the end of the list (-1, -2, etc.), so we do not need + do worry about it when building the extension. + """ + if self.level <= -len(self.T): + raise ValueError("The level of the differential extension cannot " + "be decremented any further.") + + self.level -= 1 + self.t = self.T[self.level] + self.d = self.D[self.level] + self.case = self.cases[self.level] + return None + + +def update_sets(seq, atoms, func): + s = set(seq) + s = atoms.intersection(s) + new = atoms - s + s.update(list(filter(func, new))) + return list(s) + + +class DecrementLevel: + """ + A context manager for decrementing the level of a DifferentialExtension. + """ + __slots__ = ('DE',) + + def __init__(self, DE): + self.DE = DE + return + + def __enter__(self): + self.DE.decrement_level() + + def __exit__(self, exc_type, exc_value, traceback): + self.DE.increment_level() + + +class NonElementaryIntegralException(Exception): + """ + Exception used by subroutines within the Risch algorithm to indicate to one + another that the function being integrated does not have an elementary + integral in the given differential field. + """ + # TODO: Rewrite algorithms below to use this (?) + + # TODO: Pass through information about why the integral was nonelementary, + # and store that in the resulting NonElementaryIntegral somehow. + pass + + +def gcdex_diophantine(a, b, c): + """ + Extended Euclidean Algorithm, Diophantine version. + + Explanation + =========== + + Given ``a``, ``b`` in K[x] and ``c`` in (a, b), the ideal generated by ``a`` and + ``b``, return (s, t) such that s*a + t*b == c and either s == 0 or s.degree() + < b.degree(). + """ + # Extended Euclidean Algorithm (Diophantine Version) pg. 13 + # TODO: This should go in densetools.py. + # XXX: Bettter name? + + s, g = a.half_gcdex(b) + s *= c.exquo(g) # Inexact division means c is not in (a, b) + if s and s.degree() >= b.degree(): + _, s = s.div(b) + t = (c - s*a).exquo(b) + return (s, t) + + +def frac_in(f, t, *, cancel=False, **kwargs): + """ + Returns the tuple (fa, fd), where fa and fd are Polys in t. + + Explanation + =========== + + This is a common idiom in the Risch Algorithm functions, so we abstract + it out here. ``f`` should be a basic expression, a Poly, or a tuple (fa, fd), + where fa and fd are either basic expressions or Polys, and f == fa/fd. + **kwargs are applied to Poly. + """ + if isinstance(f, tuple): + fa, fd = f + f = fa.as_expr()/fd.as_expr() + fa, fd = f.as_expr().as_numer_denom() + fa, fd = fa.as_poly(t, **kwargs), fd.as_poly(t, **kwargs) + if cancel: + fa, fd = fa.cancel(fd, include=True) + if fa is None or fd is None: + raise ValueError("Could not turn %s into a fraction in %s." % (f, t)) + return (fa, fd) + + +def as_poly_1t(p, t, z): + """ + (Hackish) way to convert an element ``p`` of K[t, 1/t] to K[t, z]. + + In other words, ``z == 1/t`` will be a dummy variable that Poly can handle + better. + + See issue 5131. + + Examples + ======== + + >>> from sympy import random_poly + >>> from sympy.integrals.risch import as_poly_1t + >>> from sympy.abc import x, z + + >>> p1 = random_poly(x, 10, -10, 10) + >>> p2 = random_poly(x, 10, -10, 10) + >>> p = p1 + p2.subs(x, 1/x) + >>> as_poly_1t(p, x, z).as_expr().subs(z, 1/x) == p + True + """ + # TODO: Use this on the final result. That way, we can avoid answers like + # (...)*exp(-x). + pa, pd = frac_in(p, t, cancel=True) + if not pd.is_monomial: + # XXX: Is there a better Poly exception that we could raise here? + # Either way, if you see this (from the Risch Algorithm) it indicates + # a bug. + raise PolynomialError("%s is not an element of K[%s, 1/%s]." % (p, t, t)) + d = pd.degree(t) + one_t_part = pa.slice(0, d + 1) + r = pd.degree() - pa.degree() + t_part = pa - one_t_part + try: + t_part = t_part.to_field().exquo(pd) + except DomainError as e: + # issue 4950 + raise NotImplementedError(e) + # Compute the negative degree parts. + one_t_part = Poly.from_list(reversed(one_t_part.rep.rep), *one_t_part.gens, + domain=one_t_part.domain) + if 0 < r < oo: + one_t_part *= Poly(t**r, t) + + one_t_part = one_t_part.replace(t, z) # z will be 1/t + if pd.nth(d): + one_t_part *= Poly(1/pd.nth(d), z, expand=False) + ans = t_part.as_poly(t, z, expand=False) + one_t_part.as_poly(t, z, + expand=False) + + return ans + + +def derivation(p, DE, coefficientD=False, basic=False): + """ + Computes Dp. + + Explanation + =========== + + Given the derivation D with D = d/dx and p is a polynomial in t over + K(x), return Dp. + + If coefficientD is True, it computes the derivation kD + (kappaD), which is defined as kD(sum(ai*Xi**i, (i, 0, n))) == + sum(Dai*Xi**i, (i, 1, n)) (Definition 3.2.2, page 80). X in this case is + T[-1], so coefficientD computes the derivative just with respect to T[:-1], + with T[-1] treated as a constant. + + If ``basic=True``, the returns a Basic expression. Elements of D can still be + instances of Poly. + """ + if basic: + r = 0 + else: + r = Poly(0, DE.t) + + t = DE.t + if coefficientD: + if DE.level <= -len(DE.T): + # 'base' case, the answer is 0. + return r + DE.decrement_level() + + D = DE.D[:len(DE.D) + DE.level + 1] + T = DE.T[:len(DE.T) + DE.level + 1] + + for d, v in zip(D, T): + pv = p.as_poly(v) + if pv is None or basic: + pv = p.as_expr() + + if basic: + r += d.as_expr()*pv.diff(v) + else: + r += (d.as_expr()*pv.diff(v).as_expr()).as_poly(t) + + if basic: + r = cancel(r) + if coefficientD: + DE.increment_level() + + return r + + +def get_case(d, t): + """ + Returns the type of the derivation d. + + Returns one of {'exp', 'tan', 'base', 'primitive', 'other_linear', + 'other_nonlinear'}. + """ + if not d.expr.has(t): + if d.is_one: + return 'base' + return 'primitive' + if d.rem(Poly(t, t)).is_zero: + return 'exp' + if d.rem(Poly(1 + t**2, t)).is_zero: + return 'tan' + if d.degree(t) > 1: + return 'other_nonlinear' + return 'other_linear' + + +def splitfactor(p, DE, coefficientD=False, z=None): + """ + Splitting factorization. + + Explanation + =========== + + Given a derivation D on k[t] and ``p`` in k[t], return (p_n, p_s) in + k[t] x k[t] such that p = p_n*p_s, p_s is special, and each square + factor of p_n is normal. + + Page. 100 + """ + kinv = [1/x for x in DE.T[:DE.level]] + if z: + kinv.append(z) + + One = Poly(1, DE.t, domain=p.get_domain()) + Dp = derivation(p, DE, coefficientD=coefficientD) + # XXX: Is this right? + if p.is_zero: + return (p, One) + + if not p.expr.has(DE.t): + s = p.as_poly(*kinv).gcd(Dp.as_poly(*kinv)).as_poly(DE.t) + n = p.exquo(s) + return (n, s) + + if not Dp.is_zero: + h = p.gcd(Dp).to_field() + g = p.gcd(p.diff(DE.t)).to_field() + s = h.exquo(g) + + if s.degree(DE.t) == 0: + return (p, One) + + q_split = splitfactor(p.exquo(s), DE, coefficientD=coefficientD) + + return (q_split[0], q_split[1]*s) + else: + return (p, One) + + +def splitfactor_sqf(p, DE, coefficientD=False, z=None, basic=False): + """ + Splitting Square-free Factorization. + + Explanation + =========== + + Given a derivation D on k[t] and ``p`` in k[t], returns (N1, ..., Nm) + and (S1, ..., Sm) in k[t]^m such that p = + (N1*N2**2*...*Nm**m)*(S1*S2**2*...*Sm**m) is a splitting + factorization of ``p`` and the Ni and Si are square-free and coprime. + """ + # TODO: This algorithm appears to be faster in every case + # TODO: Verify this and splitfactor() for multiple extensions + kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level] + if z: + kkinv = [z] + + S = [] + N = [] + p_sqf = p.sqf_list_include() + if p.is_zero: + return (((p, 1),), ()) + + for pi, i in p_sqf: + Si = pi.as_poly(*kkinv).gcd(derivation(pi, DE, + coefficientD=coefficientD,basic=basic).as_poly(*kkinv)).as_poly(DE.t) + pi = Poly(pi, DE.t) + Si = Poly(Si, DE.t) + Ni = pi.exquo(Si) + if not Si.is_one: + S.append((Si, i)) + if not Ni.is_one: + N.append((Ni, i)) + + return (tuple(N), tuple(S)) + + +def canonical_representation(a, d, DE): + """ + Canonical Representation. + + Explanation + =========== + + Given a derivation D on k[t] and f = a/d in k(t), return (f_p, f_s, + f_n) in k[t] x k(t) x k(t) such that f = f_p + f_s + f_n is the + canonical representation of f (f_p is a polynomial, f_s is reduced + (has a special denominator), and f_n is simple (has a normal + denominator). + """ + # Make d monic + l = Poly(1/d.LC(), DE.t) + a, d = a.mul(l), d.mul(l) + + q, r = a.div(d) + dn, ds = splitfactor(d, DE) + + b, c = gcdex_diophantine(dn.as_poly(DE.t), ds.as_poly(DE.t), r.as_poly(DE.t)) + b, c = b.as_poly(DE.t), c.as_poly(DE.t) + + return (q, (b, ds), (c, dn)) + + +def hermite_reduce(a, d, DE): + """ + Hermite Reduction - Mack's Linear Version. + + Given a derivation D on k(t) and f = a/d in k(t), returns g, h, r in + k(t) such that f = Dg + h + r, h is simple, and r is reduced. + + """ + # Make d monic + l = Poly(1/d.LC(), DE.t) + a, d = a.mul(l), d.mul(l) + + fp, fs, fn = canonical_representation(a, d, DE) + a, d = fn + l = Poly(1/d.LC(), DE.t) + a, d = a.mul(l), d.mul(l) + + ga = Poly(0, DE.t) + gd = Poly(1, DE.t) + + dd = derivation(d, DE) + dm = gcd(d.to_field(), dd.to_field()).as_poly(DE.t) + ds, _ = d.div(dm) + + while dm.degree(DE.t) > 0: + + ddm = derivation(dm, DE) + dm2 = gcd(dm.to_field(), ddm.to_field()) + dms, _ = dm.div(dm2) + ds_ddm = ds.mul(ddm) + ds_ddm_dm, _ = ds_ddm.div(dm) + + b, c = gcdex_diophantine(-ds_ddm_dm.as_poly(DE.t), + dms.as_poly(DE.t), a.as_poly(DE.t)) + b, c = b.as_poly(DE.t), c.as_poly(DE.t) + + db = derivation(b, DE).as_poly(DE.t) + ds_dms, _ = ds.div(dms) + a = c.as_poly(DE.t) - db.mul(ds_dms).as_poly(DE.t) + + ga = ga*dm + b*gd + gd = gd*dm + ga, gd = ga.cancel(gd, include=True) + dm = dm2 + + q, r = a.div(ds) + ga, gd = ga.cancel(gd, include=True) + + r, d = r.cancel(ds, include=True) + rra = q*fs[1] + fp*fs[1] + fs[0] + rrd = fs[1] + rra, rrd = rra.cancel(rrd, include=True) + + return ((ga, gd), (r, d), (rra, rrd)) + + +def polynomial_reduce(p, DE): + """ + Polynomial Reduction. + + Explanation + =========== + + Given a derivation D on k(t) and p in k[t] where t is a nonlinear + monomial over k, return q, r in k[t] such that p = Dq + r, and + deg(r) < deg_t(Dt). + """ + q = Poly(0, DE.t) + while p.degree(DE.t) >= DE.d.degree(DE.t): + m = p.degree(DE.t) - DE.d.degree(DE.t) + 1 + q0 = Poly(DE.t**m, DE.t).mul(Poly(p.as_poly(DE.t).LC()/ + (m*DE.d.LC()), DE.t)) + q += q0 + p = p - derivation(q0, DE) + + return (q, p) + + +def laurent_series(a, d, F, n, DE): + """ + Contribution of ``F`` to the full partial fraction decomposition of A/D. + + Explanation + =========== + + Given a field K of characteristic 0 and ``A``,``D``,``F`` in K[x] with D monic, + nonzero, coprime with A, and ``F`` the factor of multiplicity n in the square- + free factorization of D, return the principal parts of the Laurent series of + A/D at all the zeros of ``F``. + """ + if F.degree()==0: + return 0 + Z = _symbols('z', n) + z = Symbol('z') + Z.insert(0, z) + delta_a = Poly(0, DE.t) + delta_d = Poly(1, DE.t) + + E = d.quo(F**n) + ha, hd = (a, E*Poly(z**n, DE.t)) + dF = derivation(F,DE) + B, _ = gcdex_diophantine(E, F, Poly(1,DE.t)) + C, _ = gcdex_diophantine(dF, F, Poly(1,DE.t)) + + # initialization + F_store = F + V, DE_D_list, H_list= [], [], [] + + for j in range(0, n): + # jth derivative of z would be substituted with dfnth/(j+1) where dfnth =(d^n)f/(dx)^n + F_store = derivation(F_store, DE) + v = (F_store.as_expr())/(j + 1) + V.append(v) + DE_D_list.append(Poly(Z[j + 1],Z[j])) + + DE_new = DifferentialExtension(extension = {'D': DE_D_list}) #a differential indeterminate + for j in range(0, n): + zEha = Poly(z**(n + j), DE.t)*E**(j + 1)*ha + zEhd = hd + Pa, Pd = cancel((zEha, zEhd))[1], cancel((zEha, zEhd))[2] + Q = Pa.quo(Pd) + for i in range(0, j + 1): + Q = Q.subs(Z[i], V[i]) + Dha = (hd*derivation(ha, DE, basic=True).as_poly(DE.t) + + ha*derivation(hd, DE, basic=True).as_poly(DE.t) + + hd*derivation(ha, DE_new, basic=True).as_poly(DE.t) + + ha*derivation(hd, DE_new, basic=True).as_poly(DE.t)) + Dhd = Poly(j + 1, DE.t)*hd**2 + ha, hd = Dha, Dhd + + Ff, _ = F.div(gcd(F, Q)) + F_stara, F_stard = frac_in(Ff, DE.t) + if F_stara.degree(DE.t) - F_stard.degree(DE.t) > 0: + QBC = Poly(Q, DE.t)*B**(1 + j)*C**(n + j) + H = QBC + H_list.append(H) + H = (QBC*F_stard).rem(F_stara) + alphas = real_roots(F_stara) + for alpha in list(alphas): + delta_a = delta_a*Poly((DE.t - alpha)**(n - j), DE.t) + Poly(H.eval(alpha), DE.t) + delta_d = delta_d*Poly((DE.t - alpha)**(n - j), DE.t) + return (delta_a, delta_d, H_list) + + +def recognize_derivative(a, d, DE, z=None): + """ + Compute the squarefree factorization of the denominator of f + and for each Di the polynomial H in K[x] (see Theorem 2.7.1), using the + LaurentSeries algorithm. Write Di = GiEi where Gj = gcd(Hn, Di) and + gcd(Ei,Hn) = 1. Since the residues of f at the roots of Gj are all 0, and + the residue of f at a root alpha of Ei is Hi(a) != 0, f is the derivative of a + rational function if and only if Ei = 1 for each i, which is equivalent to + Di | H[-1] for each i. + """ + flag =True + a, d = a.cancel(d, include=True) + _, r = a.div(d) + Np, Sp = splitfactor_sqf(d, DE, coefficientD=True, z=z) + + j = 1 + for s, _ in Sp: + delta_a, delta_d, H = laurent_series(r, d, s, j, DE) + g = gcd(d, H[-1]).as_poly() + if g is not d: + flag = False + break + j = j + 1 + return flag + + +def recognize_log_derivative(a, d, DE, z=None): + """ + There exists a v in K(x)* such that f = dv/v + where f a rational function if and only if f can be written as f = A/D + where D is squarefree,deg(A) < deg(D), gcd(A, D) = 1, + and all the roots of the Rothstein-Trager resultant are integers. In that case, + any of the Rothstein-Trager, Lazard-Rioboo-Trager or Czichowski algorithm + produces u in K(x) such that du/dx = uf. + """ + + z = z or Dummy('z') + a, d = a.cancel(d, include=True) + _, a = a.div(d) + + pz = Poly(z, DE.t) + Dd = derivation(d, DE) + q = a - pz*Dd + r, _ = d.resultant(q, includePRS=True) + r = Poly(r, z) + Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z) + + for s, _ in Sp: + # TODO also consider the complex roots which should + # turn the flag false + a = real_roots(s.as_poly(z)) + + if not all(j.is_Integer for j in a): + return False + return True + +def residue_reduce(a, d, DE, z=None, invert=True): + """ + Lazard-Rioboo-Rothstein-Trager resultant reduction. + + Explanation + =========== + + Given a derivation ``D`` on k(t) and f in k(t) simple, return g + elementary over k(t) and a Boolean b in {True, False} such that f - + Dg in k[t] if b == True or f + h and f + h - Dg do not have an + elementary integral over k(t) for any h in k (reduced) if b == + False. + + Returns (G, b), where G is a tuple of tuples of the form (s_i, S_i), + such that g = Add(*[RootSum(s_i, lambda z: z*log(S_i(z, t))) for + S_i, s_i in G]). f - Dg is the remaining integral, which is elementary + only if b == True, and hence the integral of f is elementary only if + b == True. + + f - Dg is not calculated in this function because that would require + explicitly calculating the RootSum. Use residue_reduce_derivation(). + """ + # TODO: Use log_to_atan() from rationaltools.py + # If r = residue_reduce(...), then the logarithmic part is given by: + # sum([RootSum(a[0].as_poly(z), lambda i: i*log(a[1].as_expr()).subs(z, + # i)).subs(t, log(x)) for a in r[0]]) + + z = z or Dummy('z') + a, d = a.cancel(d, include=True) + a, d = a.to_field().mul_ground(1/d.LC()), d.to_field().mul_ground(1/d.LC()) + kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level] + + if a.is_zero: + return ([], True) + _, a = a.div(d) + + pz = Poly(z, DE.t) + + Dd = derivation(d, DE) + q = a - pz*Dd + + if Dd.degree(DE.t) <= d.degree(DE.t): + r, R = d.resultant(q, includePRS=True) + else: + r, R = q.resultant(d, includePRS=True) + + R_map, H = {}, [] + for i in R: + R_map[i.degree()] = i + + r = Poly(r, z) + Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z) + + for s, i in Sp: + if i == d.degree(DE.t): + s = Poly(s, z).monic() + H.append((s, d)) + else: + h = R_map.get(i) + if h is None: + continue + h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True) + + h_lc_sqf = h_lc.sqf_list_include(all=True) + + for a, j in h_lc_sqf: + h = Poly(h, DE.t, field=True).exquo(Poly(gcd(a, s**j, *kkinv), + DE.t)) + + s = Poly(s, z).monic() + + if invert: + h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True, expand=False) + inv, coeffs = h_lc.as_poly(z, field=True).invert(s), [S.One] + + for coeff in h.coeffs()[1:]: + L = reduced(inv*coeff.as_poly(inv.gens), [s])[1] + coeffs.append(L.as_expr()) + + h = Poly(dict(list(zip(h.monoms(), coeffs))), DE.t) + + H.append((s, h)) + + b = not any(cancel(i.as_expr()).has(DE.t, z) for i, _ in Np) + + return (H, b) + + +def residue_reduce_to_basic(H, DE, z): + """ + Converts the tuple returned by residue_reduce() into a Basic expression. + """ + # TODO: check what Lambda does with RootOf + i = Dummy('i') + s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) + + return sum(RootSum(a[0].as_poly(z), Lambda(i, i*log(a[1].as_expr()).subs( + {z: i}).subs(s))) for a in H) + + +def residue_reduce_derivation(H, DE, z): + """ + Computes the derivation of an expression returned by residue_reduce(). + + In general, this is a rational function in t, so this returns an + as_expr() result. + """ + # TODO: verify that this is correct for multiple extensions + i = Dummy('i') + return S(sum(RootSum(a[0].as_poly(z), Lambda(i, i*derivation(a[1], + DE).as_expr().subs(z, i)/a[1].as_expr().subs(z, i))) for a in H)) + + +def integrate_primitive_polynomial(p, DE): + """ + Integration of primitive polynomials. + + Explanation + =========== + + Given a primitive monomial t over k, and ``p`` in k[t], return q in k[t], + r in k, and a bool b in {True, False} such that r = p - Dq is in k if b is + True, or r = p - Dq does not have an elementary integral over k(t) if b is + False. + """ + Zero = Poly(0, DE.t) + q = Poly(0, DE.t) + + if not p.expr.has(DE.t): + return (Zero, p, True) + + from .prde import limited_integrate + while True: + if not p.expr.has(DE.t): + return (q, p, True) + + Dta, Dtb = frac_in(DE.d, DE.T[DE.level - 1]) + + with DecrementLevel(DE): # We had better be integrating the lowest extension (x) + # with ratint(). + a = p.LC() + aa, ad = frac_in(a, DE.t) + + try: + rv = limited_integrate(aa, ad, [(Dta, Dtb)], DE) + if rv is None: + raise NonElementaryIntegralException + (ba, bd), c = rv + except NonElementaryIntegralException: + return (q, p, False) + + m = p.degree(DE.t) + q0 = c[0].as_poly(DE.t)*Poly(DE.t**(m + 1)/(m + 1), DE.t) + \ + (ba.as_expr()/bd.as_expr()).as_poly(DE.t)*Poly(DE.t**m, DE.t) + + p = p - derivation(q0, DE) + q = q + q0 + + +def integrate_primitive(a, d, DE, z=None): + """ + Integration of primitive functions. + + Explanation + =========== + + Given a primitive monomial t over k and f in k(t), return g elementary over + k(t), i in k(t), and b in {True, False} such that i = f - Dg is in k if b + is True or i = f - Dg does not have an elementary integral over k(t) if b + is False. + + This function returns a Basic expression for the first argument. If b is + True, the second argument is Basic expression in k to recursively integrate. + If b is False, the second argument is an unevaluated Integral, which has + been proven to be nonelementary. + """ + # XXX: a and d must be canceled, or this might return incorrect results + z = z or Dummy("z") + s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) + + g1, h, r = hermite_reduce(a, d, DE) + g2, b = residue_reduce(h[0], h[1], DE, z=z) + if not b: + i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) - + g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() - + residue_reduce_derivation(g2, DE, z)) + i = NonElementaryIntegral(cancel(i).subs(s), DE.x) + return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + + residue_reduce_to_basic(g2, DE, z), i, b) + + # h - Dg2 + r + p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, + DE, z) + r[0].as_expr()/r[1].as_expr()) + p = p.as_poly(DE.t) + + q, i, b = integrate_primitive_polynomial(p, DE) + + ret = ((g1[0].as_expr()/g1[1].as_expr() + q.as_expr()).subs(s) + + residue_reduce_to_basic(g2, DE, z)) + if not b: + # TODO: This does not do the right thing when b is False + i = NonElementaryIntegral(cancel(i.as_expr()).subs(s), DE.x) + else: + i = cancel(i.as_expr()) + + return (ret, i, b) + + +def integrate_hyperexponential_polynomial(p, DE, z): + """ + Integration of hyperexponential polynomials. + + Explanation + =========== + + Given a hyperexponential monomial t over k and ``p`` in k[t, 1/t], return q in + k[t, 1/t] and a bool b in {True, False} such that p - Dq in k if b is True, + or p - Dq does not have an elementary integral over k(t) if b is False. + """ + t1 = DE.t + dtt = DE.d.exquo(Poly(DE.t, DE.t)) + qa = Poly(0, DE.t) + qd = Poly(1, DE.t) + b = True + + if p.is_zero: + return(qa, qd, b) + + from sympy.integrals.rde import rischDE + + with DecrementLevel(DE): + for i in range(-p.degree(z), p.degree(t1) + 1): + if not i: + continue + elif i < 0: + # If you get AttributeError: 'NoneType' object has no attribute 'nth' + # then this should really not have expand=False + # But it shouldn't happen because p is already a Poly in t and z + a = p.as_poly(z, expand=False).nth(-i) + else: + # If you get AttributeError: 'NoneType' object has no attribute 'nth' + # then this should really not have expand=False + a = p.as_poly(t1, expand=False).nth(i) + + aa, ad = frac_in(a, DE.t, field=True) + aa, ad = aa.cancel(ad, include=True) + iDt = Poly(i, t1)*dtt + iDta, iDtd = frac_in(iDt, DE.t, field=True) + try: + va, vd = rischDE(iDta, iDtd, Poly(aa, DE.t), Poly(ad, DE.t), DE) + va, vd = frac_in((va, vd), t1, cancel=True) + except NonElementaryIntegralException: + b = False + else: + qa = qa*vd + va*Poly(t1**i)*qd + qd *= vd + + return (qa, qd, b) + + +def integrate_hyperexponential(a, d, DE, z=None, conds='piecewise'): + """ + Integration of hyperexponential functions. + + Explanation + =========== + + Given a hyperexponential monomial t over k and f in k(t), return g + elementary over k(t), i in k(t), and a bool b in {True, False} such that + i = f - Dg is in k if b is True or i = f - Dg does not have an elementary + integral over k(t) if b is False. + + This function returns a Basic expression for the first argument. If b is + True, the second argument is Basic expression in k to recursively integrate. + If b is False, the second argument is an unevaluated Integral, which has + been proven to be nonelementary. + """ + # XXX: a and d must be canceled, or this might return incorrect results + z = z or Dummy("z") + s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) + + g1, h, r = hermite_reduce(a, d, DE) + g2, b = residue_reduce(h[0], h[1], DE, z=z) + if not b: + i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) - + g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() - + residue_reduce_derivation(g2, DE, z)) + i = NonElementaryIntegral(cancel(i.subs(s)), DE.x) + return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + + residue_reduce_to_basic(g2, DE, z), i, b) + + # p should be a polynomial in t and 1/t, because Sirr == k[t, 1/t] + # h - Dg2 + r + p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, + DE, z) + r[0].as_expr()/r[1].as_expr()) + pp = as_poly_1t(p, DE.t, z) + + qa, qd, b = integrate_hyperexponential_polynomial(pp, DE, z) + + i = pp.nth(0, 0) + + ret = ((g1[0].as_expr()/g1[1].as_expr()).subs(s) \ + + residue_reduce_to_basic(g2, DE, z)) + + qas = qa.as_expr().subs(s) + qds = qd.as_expr().subs(s) + if conds == 'piecewise' and DE.x not in qds.free_symbols: + # We have to be careful if the exponent is S.Zero! + + # XXX: Does qd = 0 always necessarily correspond to the exponential + # equaling 1? + ret += Piecewise( + (qas/qds, Ne(qds, 0)), + (integrate((p - i).subs(DE.t, 1).subs(s), DE.x), True) + ) + else: + ret += qas/qds + + if not b: + i = p - (qd*derivation(qa, DE) - qa*derivation(qd, DE)).as_expr()/\ + (qd**2).as_expr() + i = NonElementaryIntegral(cancel(i).subs(s), DE.x) + return (ret, i, b) + + +def integrate_hypertangent_polynomial(p, DE): + """ + Integration of hypertangent polynomials. + + Explanation + =========== + + Given a differential field k such that sqrt(-1) is not in k, a + hypertangent monomial t over k, and p in k[t], return q in k[t] and + c in k such that p - Dq - c*D(t**2 + 1)/(t**1 + 1) is in k and p - + Dq does not have an elementary integral over k(t) if Dc != 0. + """ + # XXX: Make sure that sqrt(-1) is not in k. + q, r = polynomial_reduce(p, DE) + a = DE.d.exquo(Poly(DE.t**2 + 1, DE.t)) + c = Poly(r.nth(1)/(2*a.as_expr()), DE.t) + return (q, c) + + +def integrate_nonlinear_no_specials(a, d, DE, z=None): + """ + Integration of nonlinear monomials with no specials. + + Explanation + =========== + + Given a nonlinear monomial t over k such that Sirr ({p in k[t] | p is + special, monic, and irreducible}) is empty, and f in k(t), returns g + elementary over k(t) and a Boolean b in {True, False} such that f - Dg is + in k if b == True, or f - Dg does not have an elementary integral over k(t) + if b == False. + + This function is applicable to all nonlinear extensions, but in the case + where it returns b == False, it will only have proven that the integral of + f - Dg is nonelementary if Sirr is empty. + + This function returns a Basic expression. + """ + # TODO: Integral from k? + # TODO: split out nonelementary integral + # XXX: a and d must be canceled, or this might not return correct results + z = z or Dummy("z") + s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) + + g1, h, r = hermite_reduce(a, d, DE) + g2, b = residue_reduce(h[0], h[1], DE, z=z) + if not b: + return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + + residue_reduce_to_basic(g2, DE, z), b) + + # Because f has no specials, this should be a polynomial in t, or else + # there is a bug. + p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, + DE, z).as_expr() + r[0].as_expr()/r[1].as_expr()).as_poly(DE.t) + q1, q2 = polynomial_reduce(p, DE) + + if q2.expr.has(DE.t): + b = False + else: + b = True + + ret = (cancel(g1[0].as_expr()/g1[1].as_expr() + q1.as_expr()).subs(s) + + residue_reduce_to_basic(g2, DE, z)) + return (ret, b) + + +class NonElementaryIntegral(Integral): + """ + Represents a nonelementary Integral. + + Explanation + =========== + + If the result of integrate() is an instance of this class, it is + guaranteed to be nonelementary. Note that integrate() by default will try + to find any closed-form solution, even in terms of special functions which + may themselves not be elementary. To make integrate() only give + elementary solutions, or, in the cases where it can prove the integral to + be nonelementary, instances of this class, use integrate(risch=True). + In this case, integrate() may raise NotImplementedError if it cannot make + such a determination. + + integrate() uses the deterministic Risch algorithm to integrate elementary + functions or prove that they have no elementary integral. In some cases, + this algorithm can split an integral into an elementary and nonelementary + part, so that the result of integrate will be the sum of an elementary + expression and a NonElementaryIntegral. + + Examples + ======== + + >>> from sympy import integrate, exp, log, Integral + >>> from sympy.abc import x + + >>> a = integrate(exp(-x**2), x, risch=True) + >>> print(a) + Integral(exp(-x**2), x) + >>> type(a) + + + >>> expr = (2*log(x)**2 - log(x) - x**2)/(log(x)**3 - x**2*log(x)) + >>> b = integrate(expr, x, risch=True) + >>> print(b) + -log(-x + log(x))/2 + log(x + log(x))/2 + Integral(1/log(x), x) + >>> type(b.atoms(Integral).pop()) + + + """ + # TODO: This is useful in and of itself, because isinstance(result, + # NonElementaryIntegral) will tell if the integral has been proven to be + # elementary. But should we do more? Perhaps a no-op .doit() if + # elementary=True? Or maybe some information on why the integral is + # nonelementary. + pass + + +def risch_integrate(f, x, extension=None, handle_first='log', + separate_integral=False, rewrite_complex=None, + conds='piecewise'): + r""" + The Risch Integration Algorithm. + + Explanation + =========== + + Only transcendental functions are supported. Currently, only exponentials + and logarithms are supported, but support for trigonometric functions is + forthcoming. + + If this function returns an unevaluated Integral in the result, it means + that it has proven that integral to be nonelementary. Any errors will + result in raising NotImplementedError. The unevaluated Integral will be + an instance of NonElementaryIntegral, a subclass of Integral. + + handle_first may be either 'exp' or 'log'. This changes the order in + which the extension is built, and may result in a different (but + equivalent) solution (for an example of this, see issue 5109). It is also + possible that the integral may be computed with one but not the other, + because not all cases have been implemented yet. It defaults to 'log' so + that the outer extension is exponential when possible, because more of the + exponential case has been implemented. + + If ``separate_integral`` is ``True``, the result is returned as a tuple (ans, i), + where the integral is ans + i, ans is elementary, and i is either a + NonElementaryIntegral or 0. This useful if you want to try further + integrating the NonElementaryIntegral part using other algorithms to + possibly get a solution in terms of special functions. It is False by + default. + + Examples + ======== + + >>> from sympy.integrals.risch import risch_integrate + >>> from sympy import exp, log, pprint + >>> from sympy.abc import x + + First, we try integrating exp(-x**2). Except for a constant factor of + 2/sqrt(pi), this is the famous error function. + + >>> pprint(risch_integrate(exp(-x**2), x)) + / + | + | 2 + | -x + | e dx + | + / + + The unevaluated Integral in the result means that risch_integrate() has + proven that exp(-x**2) does not have an elementary anti-derivative. + + In many cases, risch_integrate() can split out the elementary + anti-derivative part from the nonelementary anti-derivative part. + For example, + + >>> pprint(risch_integrate((2*log(x)**2 - log(x) - x**2)/(log(x)**3 - + ... x**2*log(x)), x)) + / + | + log(-x + log(x)) log(x + log(x)) | 1 + - ---------------- + --------------- + | ------ dx + 2 2 | log(x) + | + / + + This means that it has proven that the integral of 1/log(x) is + nonelementary. This function is also known as the logarithmic integral, + and is often denoted as Li(x). + + risch_integrate() currently only accepts purely transcendental functions + with exponentials and logarithms, though note that this can include + nested exponentials and logarithms, as well as exponentials with bases + other than E. + + >>> pprint(risch_integrate(exp(x)*exp(exp(x)), x)) + / x\ + \e / + e + >>> pprint(risch_integrate(exp(exp(x)), x)) + / + | + | / x\ + | \e / + | e dx + | + / + + >>> pprint(risch_integrate(x*x**x*log(x) + x**x + x*x**x, x)) + x + x*x + >>> pprint(risch_integrate(x**x, x)) + / + | + | x + | x dx + | + / + + >>> pprint(risch_integrate(-1/(x*log(x)*log(log(x))**2), x)) + 1 + ----------- + log(log(x)) + + """ + f = S(f) + + DE = extension or DifferentialExtension(f, x, handle_first=handle_first, + dummy=True, rewrite_complex=rewrite_complex) + fa, fd = DE.fa, DE.fd + + result = S.Zero + for case in reversed(DE.cases): + if not fa.expr.has(DE.t) and not fd.expr.has(DE.t) and not case == 'base': + DE.decrement_level() + fa, fd = frac_in((fa, fd), DE.t) + continue + + fa, fd = fa.cancel(fd, include=True) + if case == 'exp': + ans, i, b = integrate_hyperexponential(fa, fd, DE, conds=conds) + elif case == 'primitive': + ans, i, b = integrate_primitive(fa, fd, DE) + elif case == 'base': + # XXX: We can't call ratint() directly here because it doesn't + # handle polynomials correctly. + ans = integrate(fa.as_expr()/fd.as_expr(), DE.x, risch=False) + b = False + i = S.Zero + else: + raise NotImplementedError("Only exponential and logarithmic " + "extensions are currently supported.") + + result += ans + if b: + DE.decrement_level() + fa, fd = frac_in(i, DE.t) + else: + result = result.subs(DE.backsubs) + if not i.is_zero: + i = NonElementaryIntegral(i.function.subs(DE.backsubs),i.limits) + if not separate_integral: + result += i + return result + else: + + if isinstance(i, NonElementaryIntegral): + return (result, i) + else: + return (result, 0) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/singularityfunctions.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/singularityfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc13af3dc9a66dfb286612118bedfa7718becaa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/singularityfunctions.py @@ -0,0 +1,63 @@ +from sympy.functions import SingularityFunction, DiracDelta +from sympy.integrals import integrate + + +def singularityintegrate(f, x): + """ + This function handles the indefinite integrations of Singularity functions. + The ``integrate`` function calls this function internally whenever an + instance of SingularityFunction is passed as argument. + + Explanation + =========== + + The idea for integration is the following: + + - If we are dealing with a SingularityFunction expression, + i.e. ``SingularityFunction(x, a, n)``, we just return + ``SingularityFunction(x, a, n + 1)/(n + 1)`` if ``n >= 0`` and + ``SingularityFunction(x, a, n + 1)`` if ``n < 0``. + + - If the node is a multiplication or power node having a + SingularityFunction term we rewrite the whole expression in terms of + Heaviside and DiracDelta and then integrate the output. Lastly, we + rewrite the output of integration back in terms of SingularityFunction. + + - If none of the above case arises, we return None. + + Examples + ======== + + >>> from sympy.integrals.singularityfunctions import singularityintegrate + >>> from sympy import SingularityFunction, symbols, Function + >>> x, a, n, y = symbols('x a n y') + >>> f = Function('f') + >>> singularityintegrate(SingularityFunction(x, a, 3), x) + SingularityFunction(x, a, 4)/4 + >>> singularityintegrate(5*SingularityFunction(x, 5, -2), x) + 5*SingularityFunction(x, 5, -1) + >>> singularityintegrate(6*SingularityFunction(x, 5, -1), x) + 6*SingularityFunction(x, 5, 0) + >>> singularityintegrate(x*SingularityFunction(x, 0, -1), x) + 0 + >>> singularityintegrate(SingularityFunction(x, 1, -1) * f(x), x) + f(1)*SingularityFunction(x, 1, 0) + + """ + + if not f.has(SingularityFunction): + return None + + if isinstance(f, SingularityFunction): + x, a, n = f.args + if n.is_positive or n.is_zero: + return SingularityFunction(x, a, n + 1)/(n + 1) + elif n in (-1, -2): + return SingularityFunction(x, a, n + 1) + + if f.is_Mul or f.is_Pow: + + expr = f.rewrite(DiracDelta) + expr = integrate(expr, x) + return expr.rewrite(SingularityFunction) + return None diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3044fae4183d1db1bacbcfc206f74d596ae6bd4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_deltafunctions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_deltafunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea62961ad0e97a4d9eebc2129f8f8faca819b03 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_deltafunctions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_failing_integrals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_failing_integrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d908ee433b38533032d6306cb7aca63b06a0a64 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_failing_integrals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_heurisch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_heurisch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e111ed8af95d7bb8e0b45391644f27951a2e2fcd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_heurisch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_lineintegrals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_lineintegrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9801eaac057bc832bfbaa478ed358ee6f10b22f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_lineintegrals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_manual.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_manual.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa13b464ebfbc9017bb0912f6f53a04794e50216 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_manual.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_prde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_prde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31423abbc35a50ca34236191b830502a029eae1c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_prde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_quadrature.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fc4528c901f958686d90fc06f8fda118f8f1f1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rationaltools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rationaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846052271c9c1ef680ee2b2867bf1919b7d48522 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rationaltools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rde.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ad65a78fd3dbc5b5a21cd021af1855e3707b2d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_rde.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_risch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_risch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e31937ad2761c733ae2fe3f60aa3fedead35e18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_risch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_singularityfunctions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_singularityfunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4dd12ebb04bbe553e89774d99975a6b1104360 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_singularityfunctions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_trigonometry.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_trigonometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8596392c2671a3e5e84c89c4b4a9d30ee8a1048 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/__pycache__/test_trigonometry.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_deltafunctions.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_deltafunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..d4fd567349b50f795e08d583fd08db67b1596577 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_deltafunctions.py @@ -0,0 +1,79 @@ +from sympy.core.function import Function +from sympy.core.numbers import (Rational, pi) +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.functions.special.delta_functions import (DiracDelta, Heaviside) +from sympy.integrals.deltafunctions import change_mul, deltaintegrate + +f = Function("f") +x_1, x_2, x, y, z = symbols("x_1 x_2 x y z") + + +def test_change_mul(): + assert change_mul(x, x) == (None, None) + assert change_mul(x*y, x) == (None, None) + assert change_mul(x*y*DiracDelta(x), x) == (DiracDelta(x), x*y) + assert change_mul(x*y*DiracDelta(x)*DiracDelta(y), x) == \ + (DiracDelta(x), x*y*DiracDelta(y)) + assert change_mul(DiracDelta(x)**2, x) == \ + (DiracDelta(x), DiracDelta(x)) + assert change_mul(y*DiracDelta(x)**2, x) == \ + (DiracDelta(x), y*DiracDelta(x)) + + +def test_deltaintegrate(): + assert deltaintegrate(x, x) is None + assert deltaintegrate(x + DiracDelta(x), x) is None + assert deltaintegrate(DiracDelta(x, 0), x) == Heaviside(x) + for n in range(10): + assert deltaintegrate(DiracDelta(x, n + 1), x) == DiracDelta(x, n) + assert deltaintegrate(DiracDelta(x), x) == Heaviside(x) + assert deltaintegrate(DiracDelta(-x), x) == Heaviside(x) + assert deltaintegrate(DiracDelta(x - y), x) == Heaviside(x - y) + assert deltaintegrate(DiracDelta(y - x), x) == Heaviside(x - y) + + assert deltaintegrate(x*DiracDelta(x), x) == 0 + assert deltaintegrate((x - y)*DiracDelta(x - y), x) == 0 + + assert deltaintegrate(DiracDelta(x)**2, x) == DiracDelta(0)*Heaviside(x) + assert deltaintegrate(y*DiracDelta(x)**2, x) == \ + y*DiracDelta(0)*Heaviside(x) + assert deltaintegrate(DiracDelta(x, 1), x) == DiracDelta(x, 0) + assert deltaintegrate(y*DiracDelta(x, 1), x) == y*DiracDelta(x, 0) + assert deltaintegrate(DiracDelta(x, 1)**2, x) == -DiracDelta(0, 2)*Heaviside(x) + assert deltaintegrate(y*DiracDelta(x, 1)**2, x) == -y*DiracDelta(0, 2)*Heaviside(x) + + + assert deltaintegrate(DiracDelta(x) * f(x), x) == f(0) * Heaviside(x) + assert deltaintegrate(DiracDelta(-x) * f(x), x) == f(0) * Heaviside(x) + assert deltaintegrate(DiracDelta(x - 1) * f(x), x) == f(1) * Heaviside(x - 1) + assert deltaintegrate(DiracDelta(1 - x) * f(x), x) == f(1) * Heaviside(x - 1) + assert deltaintegrate(DiracDelta(x**2 + x - 2), x) == \ + Heaviside(x - 1)/3 + Heaviside(x + 2)/3 + + p = cos(x)*(DiracDelta(x) + DiracDelta(x**2 - 1))*sin(x)*(x - pi) + assert deltaintegrate(p, x) - (-pi*(cos(1)*Heaviside(-1 + x)*sin(1)/2 - \ + cos(1)*Heaviside(1 + x)*sin(1)/2) + \ + cos(1)*Heaviside(1 + x)*sin(1)/2 + \ + cos(1)*Heaviside(-1 + x)*sin(1)/2) == 0 + + p = x_2*DiracDelta(x - x_2)*DiracDelta(x_2 - x_1) + assert deltaintegrate(p, x_2) == x*DiracDelta(x - x_1)*Heaviside(x_2 - x) + + p = x*y**2*z*DiracDelta(y - x)*DiracDelta(y - z)*DiracDelta(x - z) + assert deltaintegrate(p, y) == x**3*z*DiracDelta(x - z)**2*Heaviside(y - x) + assert deltaintegrate((x + 1)*DiracDelta(2*x), x) == S.Half * Heaviside(x) + assert deltaintegrate((x + 1)*DiracDelta(x*Rational(2, 3) + Rational(4, 9)), x) == \ + S.Half * Heaviside(x + Rational(2, 3)) + + a, b, c = symbols('a b c', commutative=False) + assert deltaintegrate(DiracDelta(x - y)*f(x - b)*f(x - a), x) == \ + f(y - b)*f(y - a)*Heaviside(x - y) + + p = f(x - a)*DiracDelta(x - y)*f(x - c)*f(x - b) + assert deltaintegrate(p, x) == f(y - a)*f(y - c)*f(y - b)*Heaviside(x - y) + + p = DiracDelta(x - z)*f(x - b)*f(x - a)*DiracDelta(x - y) + assert deltaintegrate(p, x) == DiracDelta(y - z)*f(y - b)*f(y - a) * \ + Heaviside(x - y) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_failing_integrals.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_failing_integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..5ce72e14a512ffd30bc8ed5b8341ba4444d4c49f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_failing_integrals.py @@ -0,0 +1,272 @@ +# A collection of failing integrals from the issues. + +from sympy.core.numbers import (I, Rational, oo, pi) +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.functions.elementary.complexes import sign +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.hyperbolic import (sech, sinh) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (acos, atan, cos, sin, tan) +from sympy.functions.special.delta_functions import DiracDelta +from sympy.functions.special.gamma_functions import gamma +from sympy.integrals.integrals import (Integral, integrate) + + +from sympy.testing.pytest import XFAIL, SKIP, slow, skip, ON_CI + +from sympy.abc import x, k, c, y, b, h, a, m, z, n, t + + +@SKIP("Too slow for @slow") +@XFAIL +def test_issue_3880(): + # integrate_hyperexponential(Poly(t*2*(1 - t0**2)*t0*(x**3 + x**2), t), Poly((1 + t0**2)**2*2*(x**2 + x + 1), t), [Poly(1, x), Poly(1 + t0**2, t0), Poly(t, t)], [x, t0, t], [exp, tan]) + assert not integrate(exp(x)*cos(2*x)*sin(2*x) * (x**3 + x**2)/(2*(x**2 + x + 1)), x).has(Integral) + + +@XFAIL +def test_issue_4212(): + assert not integrate(sign(x), x).has(Integral) + + +@XFAIL +def test_issue_4511(): + # This works, but gives a complicated answer. The correct answer is x - cos(x). + # If current answer is simplified, 1 - cos(x) + x is obtained. + # The last one is what Maple gives. It is also quite slow. + assert integrate(cos(x)**2 / (1 - sin(x))) in [x - cos(x), 1 - cos(x) + x, + -2/(tan((S.Half)*x)**2 + 1) + x] + + +@XFAIL +def test_integrate_DiracDelta_fails(): + # issue 6427 + assert integrate(integrate(integrate( + DiracDelta(x - y - z), (z, 0, oo)), (y, 0, 1)), (x, 0, 1)) == S.Half + + +@XFAIL +@slow +def test_issue_4525(): + # Warning: takes a long time + assert not integrate((x**m * (1 - x)**n * (a + b*x + c*x**2))/(1 + x**2), (x, 0, 1)).has(Integral) + + +@XFAIL +@slow +def test_issue_4540(): + if ON_CI: + skip("Too slow for CI.") + # Note, this integral is probably nonelementary + assert not integrate( + (sin(1/x) - x*exp(x)) / + ((-sin(1/x) + x*exp(x))*x + x*sin(1/x)), x).has(Integral) + + +@XFAIL +@slow +def test_issue_4891(): + # Requires the hypergeometric function. + assert not integrate(cos(x)**y, x).has(Integral) + + +@XFAIL +@slow +def test_issue_1796a(): + assert not integrate(exp(2*b*x)*exp(-a*x**2), x).has(Integral) + + +@XFAIL +def test_issue_4895b(): + assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, -oo, 0)).has(Integral) + + +@XFAIL +def test_issue_4895c(): + assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, -oo, oo)).has(Integral) + + +@XFAIL +def test_issue_4895d(): + assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, 0, oo)).has(Integral) + + +@XFAIL +@slow +def test_issue_4941(): + if ON_CI: + skip("Too slow for CI.") + assert not integrate(sqrt(1 + sinh(x/20)**2), (x, -25, 25)).has(Integral) + + +@XFAIL +def test_issue_4992(): + # Nonelementary integral. Requires hypergeometric/Meijer-G handling. + assert not integrate(log(x) * x**(k - 1) * exp(-x) / gamma(k), (x, 0, oo)).has(Integral) + + +@XFAIL +def test_issue_16396a(): + i = integrate(1/(1+sqrt(tan(x))), (x, pi/3, pi/6)) + assert not i.has(Integral) + + +@XFAIL +def test_issue_16396b(): + i = integrate(x*sin(x)/(1+cos(x)**2), (x, 0, pi)) + assert not i.has(Integral) + + +@XFAIL +def test_issue_16046(): + assert integrate(exp(exp(I*x)), [x, 0, 2*pi]) == 2*pi + + +@XFAIL +def test_issue_15925a(): + assert not integrate(sqrt((1+sin(x))**2+(cos(x))**2), (x, -pi/2, pi/2)).has(Integral) + + +@XFAIL +@slow +def test_issue_15925b(): + if ON_CI: + skip("Too slow for CI.") + assert not integrate(sqrt((-12*cos(x)**2*sin(x))**2+(12*cos(x)*sin(x)**2)**2), + (x, 0, pi/6)).has(Integral) + + +@XFAIL +def test_issue_15925b_manual(): + assert not integrate(sqrt((-12*cos(x)**2*sin(x))**2+(12*cos(x)*sin(x)**2)**2), + (x, 0, pi/6), manual=True).has(Integral) + + +@XFAIL +@slow +def test_issue_15227(): + if ON_CI: + skip("Too slow for CI.") + i = integrate(log(1-x)*log((1+x)**2)/x, (x, 0, 1)) + assert not i.has(Integral) + # assert i == -5*zeta(3)/4 + + +@XFAIL +@slow +def test_issue_14716(): + i = integrate(log(x + 5)*cos(pi*x),(x, S.Half, 1)) + assert not i.has(Integral) + # Mathematica can not solve it either, but + # integrate(log(x + 5)*cos(pi*x),(x, S.Half, 1)).transform(x, y - 5).doit() + # works + # assert i == -log(Rational(11, 2))/pi - Si(pi*Rational(11, 2))/pi + Si(6*pi)/pi + + +@XFAIL +def test_issue_14709a(): + i = integrate(x*acos(1 - 2*x/h), (x, 0, h)) + assert not i.has(Integral) + # assert i == 5*h**2*pi/16 + + +@slow +@XFAIL +def test_issue_14398(): + assert not integrate(exp(x**2)*cos(x), x).has(Integral) + + +@XFAIL +def test_issue_14074(): + i = integrate(log(sin(x)), (x, 0, pi/2)) + assert not i.has(Integral) + # assert i == -pi*log(2)/2 + + +@XFAIL +@slow +def test_issue_14078b(): + i = integrate((atan(4*x)-atan(2*x))/x, (x, 0, oo)) + assert not i.has(Integral) + # assert i == pi*log(2)/2 + + +@XFAIL +def test_issue_13792(): + i = integrate(log(1/x) / (1 - x), (x, 0, 1)) + assert not i.has(Integral) + # assert i in [polylog(2, -exp_polar(I*pi)), pi**2/6] + + +@XFAIL +def test_issue_11845a(): + assert not integrate(exp(y - x**3), (x, 0, 1)).has(Integral) + + +@XFAIL +def test_issue_11845b(): + assert not integrate(exp(-y - x**3), (x, 0, 1)).has(Integral) + + +@XFAIL +def test_issue_11813(): + assert not integrate((a - x)**Rational(-1, 2)*x, (x, 0, a)).has(Integral) + + +@XFAIL +def test_issue_11254c(): + assert not integrate(sech(x)**2, (x, 0, 1)).has(Integral) + + +@XFAIL +def test_issue_10584(): + assert not integrate(sqrt(x**2 + 1/x**2), x).has(Integral) + + +@XFAIL +def test_issue_9101(): + assert not integrate(log(x + sqrt(x**2 + y**2 + z**2)), z).has(Integral) + + +@XFAIL +def test_issue_7147(): + assert not integrate(x/sqrt(a*x**2 + b*x + c)**3, x).has(Integral) + + +@XFAIL +def test_issue_7109(): + assert not integrate(sqrt(a**2/(a**2 - x**2)), x).has(Integral) + + +@XFAIL +def test_integrate_Piecewise_rational_over_reals(): + f = Piecewise( + (0, t - 478.515625*pi < 0), + (13.2075145209219*pi/(0.000871222*t + 0.995)**2, t - 478.515625*pi >= 0)) + + assert abs((integrate(f, (t, 0, oo)) - 15235.9375*pi).evalf()) <= 1e-7 + + +@XFAIL +def test_issue_4311_slow(): + # Not slow when bypassing heurish + assert not integrate(x*abs(9-x**2), x).has(Integral) + +@XFAIL +def test_issue_20370(): + a = symbols('a', positive=True) + assert integrate((1 + a * cos(x))**-1, (x, 0, 2 * pi)) == (2 * pi / sqrt(1 - a**2)) + + +@XFAIL +def test_polylog(): + # log(1/x)*log(x+1)-polylog(2, -x) + assert not integrate(log(1/x)/(x + 1), x).has(Integral) + + +@XFAIL +def test_polylog_manual(): + # Make sure _parts_rule does not go into an infinite loop here + assert not integrate(log(1/x)/(x + 1), x, manual=True).has(Integral) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_intpoly.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_intpoly.py new file mode 100644 index 0000000000000000000000000000000000000000..ddbaad1fbdeca53ccab8e8b22758a6ad2d89836e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_intpoly.py @@ -0,0 +1,627 @@ +from sympy.functions.elementary.complexes import Abs +from sympy.functions.elementary.miscellaneous import sqrt + +from sympy.core import S, Rational + +from sympy.integrals.intpoly import (decompose, best_origin, distance_to_side, + polytope_integrate, point_sort, + hyperplane_parameters, main_integrate3d, + main_integrate, polygon_integrate, + lineseg_integrate, integration_reduction, + integration_reduction_dynamic, is_vertex) + +from sympy.geometry.line import Segment2D +from sympy.geometry.polygon import Polygon +from sympy.geometry.point import Point, Point2D +from sympy.abc import x, y, z + +from sympy.testing.pytest import slow + + +def test_decompose(): + assert decompose(x) == {1: x} + assert decompose(x**2) == {2: x**2} + assert decompose(x*y) == {2: x*y} + assert decompose(x + y) == {1: x + y} + assert decompose(x**2 + y) == {1: y, 2: x**2} + assert decompose(8*x**2 + 4*y + 7) == {0: 7, 1: 4*y, 2: 8*x**2} + assert decompose(x**2 + 3*y*x) == {2: x**2 + 3*x*y} + assert decompose(9*x**2 + y + 4*x + x**3 + y**2*x + 3) ==\ + {0: 3, 1: 4*x + y, 2: 9*x**2, 3: x**3 + x*y**2} + + assert decompose(x, True) == {x} + assert decompose(x ** 2, True) == {x**2} + assert decompose(x * y, True) == {x * y} + assert decompose(x + y, True) == {x, y} + assert decompose(x ** 2 + y, True) == {y, x ** 2} + assert decompose(8 * x ** 2 + 4 * y + 7, True) == {7, 4*y, 8*x**2} + assert decompose(x ** 2 + 3 * y * x, True) == {x ** 2, 3 * x * y} + assert decompose(9 * x ** 2 + y + 4 * x + x ** 3 + y ** 2 * x + 3, True) == \ + {3, y, 4*x, 9*x**2, x*y**2, x**3} + + +def test_best_origin(): + expr1 = y ** 2 * x ** 5 + y ** 5 * x ** 7 + 7 * x + x ** 12 + y ** 7 * x + + l1 = Segment2D(Point(0, 3), Point(1, 1)) + l2 = Segment2D(Point(S(3) / 2, 0), Point(S(3) / 2, 3)) + l3 = Segment2D(Point(0, S(3) / 2), Point(3, S(3) / 2)) + l4 = Segment2D(Point(0, 2), Point(2, 0)) + l5 = Segment2D(Point(0, 2), Point(1, 1)) + l6 = Segment2D(Point(2, 0), Point(1, 1)) + + assert best_origin((2, 1), 3, l1, expr1) == (0, 3) + # XXX: Should these return exact Rational output? Maybe best_origin should + # sympify its arguments... + assert best_origin((2, 0), 3, l2, x ** 7) == (1.5, 0) + assert best_origin((0, 2), 3, l3, x ** 7) == (0, 1.5) + assert best_origin((1, 1), 2, l4, x ** 7 * y ** 3) == (0, 2) + assert best_origin((1, 1), 2, l4, x ** 3 * y ** 7) == (2, 0) + assert best_origin((1, 1), 2, l5, x ** 2 * y ** 9) == (0, 2) + assert best_origin((1, 1), 2, l6, x ** 9 * y ** 2) == (2, 0) + + +@slow +def test_polytope_integrate(): + # Convex 2-Polytopes + # Vertex representation + assert polytope_integrate(Polygon(Point(0, 0), Point(0, 2), + Point(4, 0)), 1) == 4 + assert polytope_integrate(Polygon(Point(0, 0), Point(0, 1), + Point(1, 1), Point(1, 0)), x * y) ==\ + Rational(1, 4) + assert polytope_integrate(Polygon(Point(0, 3), Point(5, 3), Point(1, 1)), + 6*x**2 - 40*y) == Rational(-935, 3) + + assert polytope_integrate(Polygon(Point(0, 0), Point(0, sqrt(3)), + Point(sqrt(3), sqrt(3)), + Point(sqrt(3), 0)), 1) == 3 + + hexagon = Polygon(Point(0, 0), Point(-sqrt(3) / 2, S.Half), + Point(-sqrt(3) / 2, S(3) / 2), Point(0, 2), + Point(sqrt(3) / 2, S(3) / 2), Point(sqrt(3) / 2, S.Half)) + + assert polytope_integrate(hexagon, 1) == S(3*sqrt(3)) / 2 + + # Hyperplane representation + assert polytope_integrate([((-1, 0), 0), ((1, 2), 4), + ((0, -1), 0)], 1) == 4 + assert polytope_integrate([((-1, 0), 0), ((0, 1), 1), + ((1, 0), 1), ((0, -1), 0)], x * y) == Rational(1, 4) + assert polytope_integrate([((0, 1), 3), ((1, -2), -1), + ((-2, -1), -3)], 6*x**2 - 40*y) == Rational(-935, 3) + assert polytope_integrate([((-1, 0), 0), ((0, sqrt(3)), 3), + ((sqrt(3), 0), 3), ((0, -1), 0)], 1) == 3 + + hexagon = [((Rational(-1, 2), -sqrt(3) / 2), 0), + ((-1, 0), sqrt(3) / 2), + ((Rational(-1, 2), sqrt(3) / 2), sqrt(3)), + ((S.Half, sqrt(3) / 2), sqrt(3)), + ((1, 0), sqrt(3) / 2), + ((S.Half, -sqrt(3) / 2), 0)] + assert polytope_integrate(hexagon, 1) == S(3*sqrt(3)) / 2 + + # Non-convex polytopes + # Vertex representation + assert polytope_integrate(Polygon(Point(-1, -1), Point(-1, 1), + Point(1, 1), Point(0, 0), + Point(1, -1)), 1) == 3 + assert polytope_integrate(Polygon(Point(-1, -1), Point(-1, 1), + Point(0, 0), Point(1, 1), + Point(1, -1), Point(0, 0)), 1) == 2 + # Hyperplane representation + assert polytope_integrate([((-1, 0), 1), ((0, 1), 1), ((1, -1), 0), + ((1, 1), 0), ((0, -1), 1)], 1) == 3 + assert polytope_integrate([((-1, 0), 1), ((1, 1), 0), ((-1, 1), 0), + ((1, 0), 1), ((-1, -1), 0), + ((1, -1), 0)], 1) == 2 + + # Tests for 2D polytopes mentioned in Chin et al(Page 10): + # http://dilbert.engr.ucdavis.edu/~suku/quadrature/cls-integration.pdf + fig1 = Polygon(Point(1.220, -0.827), Point(-1.490, -4.503), + Point(-3.766, -1.622), Point(-4.240, -0.091), + Point(-3.160, 4), Point(-0.981, 4.447), + Point(0.132, 4.027)) + assert polytope_integrate(fig1, x**2 + x*y + y**2) ==\ + S(2031627344735367)/(8*10**12) + + fig2 = Polygon(Point(4.561, 2.317), Point(1.491, -1.315), + Point(-3.310, -3.164), Point(-4.845, -3.110), + Point(-4.569, 1.867)) + assert polytope_integrate(fig2, x**2 + x*y + y**2) ==\ + S(517091313866043)/(16*10**11) + + fig3 = Polygon(Point(-2.740, -1.888), Point(-3.292, 4.233), + Point(-2.723, -0.697), Point(-0.643, -3.151)) + assert polytope_integrate(fig3, x**2 + x*y + y**2) ==\ + S(147449361647041)/(8*10**12) + + fig4 = Polygon(Point(0.211, -4.622), Point(-2.684, 3.851), + Point(0.468, 4.879), Point(4.630, -1.325), + Point(-0.411, -1.044)) + assert polytope_integrate(fig4, x**2 + x*y + y**2) ==\ + S(180742845225803)/(10**12) + + # Tests for many polynomials with maximum degree given(2D case). + tri = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + polys = [] + expr1 = x**9*y + x**7*y**3 + 2*x**2*y**8 + expr2 = x**6*y**4 + x**5*y**5 + 2*y**10 + expr3 = x**10 + x**9*y + x**8*y**2 + x**5*y**5 + polys.extend((expr1, expr2, expr3)) + result_dict = polytope_integrate(tri, polys, max_degree=10) + assert result_dict[expr1] == Rational(615780107, 594) + assert result_dict[expr2] == Rational(13062161, 27) + assert result_dict[expr3] == Rational(1946257153, 924) + + tri = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + expr1 = x**7*y**1 + 2*x**2*y**6 + expr2 = x**6*y**4 + x**5*y**5 + 2*y**10 + expr3 = x**10 + x**9*y + x**8*y**2 + x**5*y**5 + polys.extend((expr1, expr2, expr3)) + assert polytope_integrate(tri, polys, max_degree=9) == \ + {x**7*y + 2*x**2*y**6: Rational(489262, 9)} + + # Tests when all integral of all monomials up to a max_degree is to be + # calculated. + assert polytope_integrate(Polygon(Point(0, 0), Point(0, 1), + Point(1, 1), Point(1, 0)), + max_degree=4) == {0: 0, 1: 1, x: S.Half, + x ** 2 * y ** 2: S.One / 9, + x ** 4: S.One / 5, + y ** 4: S.One / 5, + y: S.Half, + x * y ** 2: S.One / 6, + y ** 2: S.One / 3, + x ** 3: S.One / 4, + x ** 2 * y: S.One / 6, + x ** 3 * y: S.One / 8, + x * y: S.One / 4, + y ** 3: S.One / 4, + x ** 2: S.One / 3, + x * y ** 3: S.One / 8} + + # Tests for 3D polytopes + cube1 = [[(0, 0, 0), (0, 6, 6), (6, 6, 6), (3, 6, 0), + (0, 6, 0), (6, 0, 6), (3, 0, 0), (0, 0, 6)], + [1, 2, 3, 4], [3, 2, 5, 6], [1, 7, 5, 2], [0, 6, 5, 7], + [1, 4, 0, 7], [0, 4, 3, 6]] + assert polytope_integrate(cube1, 1) == S(162) + + # 3D Test cases in Chin et al(2015) + cube2 = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0), + (5, 0, 5), (5, 5, 0), (5, 5, 5)], + [3, 7, 6, 2], [1, 5, 7, 3], [5, 4, 6, 7], [0, 4, 5, 1], + [2, 0, 1, 3], [2, 6, 4, 0]] + + cube3 = [[(0, 0, 0), (5, 0, 0), (5, 4, 0), (3, 2, 0), (3, 5, 0), + (0, 5, 0), (0, 0, 5), (5, 0, 5), (5, 4, 5), (3, 2, 5), + (3, 5, 5), (0, 5, 5)], + [6, 11, 5, 0], [1, 7, 6, 0], [5, 4, 3, 2, 1, 0], [11, 10, 4, 5], + [10, 9, 3, 4], [9, 8, 2, 3], [8, 7, 1, 2], [7, 8, 9, 10, 11, 6]] + + cube4 = [[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1), + (S.One / 4, S.One / 4, S.One / 4)], + [0, 2, 1], [1, 3, 0], [4, 2, 3], [4, 3, 1], + [0, 1, 2], [2, 4, 1], [0, 3, 2]] + + assert polytope_integrate(cube2, x ** 2 + y ** 2 + x * y + z ** 2) ==\ + Rational(15625, 4) + assert polytope_integrate(cube3, x ** 2 + y ** 2 + x * y + z ** 2) ==\ + S(33835) / 12 + assert polytope_integrate(cube4, x ** 2 + y ** 2 + x * y + z ** 2) ==\ + S(37) / 960 + + # Test cases from Mathematica's PolyhedronData library + octahedron = [[(S.NegativeOne / sqrt(2), 0, 0), (0, S.One / sqrt(2), 0), + (0, 0, S.NegativeOne / sqrt(2)), (0, 0, S.One / sqrt(2)), + (0, S.NegativeOne / sqrt(2), 0), (S.One / sqrt(2), 0, 0)], + [3, 4, 5], [3, 5, 1], [3, 1, 0], [3, 0, 4], [4, 0, 2], + [4, 2, 5], [2, 0, 1], [5, 2, 1]] + + assert polytope_integrate(octahedron, 1) == sqrt(2) / 3 + + great_stellated_dodecahedron =\ + [[(-0.32491969623290634095, 0, 0.42532540417601993887), + (0.32491969623290634095, 0, -0.42532540417601993887), + (-0.52573111211913359231, 0, 0.10040570794311363956), + (0.52573111211913359231, 0, -0.10040570794311363956), + (-0.10040570794311363956, -0.3090169943749474241, 0.42532540417601993887), + (-0.10040570794311363956, 0.30901699437494742410, 0.42532540417601993887), + (0.10040570794311363956, -0.3090169943749474241, -0.42532540417601993887), + (0.10040570794311363956, 0.30901699437494742410, -0.42532540417601993887), + (-0.16245984811645317047, -0.5, 0.10040570794311363956), + (-0.16245984811645317047, 0.5, 0.10040570794311363956), + (0.16245984811645317047, -0.5, -0.10040570794311363956), + (0.16245984811645317047, 0.5, -0.10040570794311363956), + (-0.42532540417601993887, -0.3090169943749474241, -0.10040570794311363956), + (-0.42532540417601993887, 0.30901699437494742410, -0.10040570794311363956), + (-0.26286555605956679615, 0.1909830056250525759, -0.42532540417601993887), + (-0.26286555605956679615, -0.1909830056250525759, -0.42532540417601993887), + (0.26286555605956679615, 0.1909830056250525759, 0.42532540417601993887), + (0.26286555605956679615, -0.1909830056250525759, 0.42532540417601993887), + (0.42532540417601993887, -0.3090169943749474241, 0.10040570794311363956), + (0.42532540417601993887, 0.30901699437494742410, 0.10040570794311363956)], + [12, 3, 0, 6, 16], [17, 7, 0, 3, 13], + [9, 6, 0, 7, 8], [18, 2, 1, 4, 14], + [15, 5, 1, 2, 19], [11, 4, 1, 5, 10], + [8, 19, 2, 18, 9], [10, 13, 3, 12, 11], + [16, 14, 4, 11, 12], [13, 10, 5, 15, 17], + [14, 16, 6, 9, 18], [19, 8, 7, 17, 15]] + # Actual volume is : 0.163118960624632 + assert Abs(polytope_integrate(great_stellated_dodecahedron, 1) -\ + 0.163118960624632) < 1e-12 + + expr = x **2 + y ** 2 + z ** 2 + octahedron_five_compound = [[(0, -0.7071067811865475244, 0), + (0, 0.70710678118654752440, 0), + (0.1148764602736805918, + -0.35355339059327376220, -0.60150095500754567366), + (0.1148764602736805918, 0.35355339059327376220, + -0.60150095500754567366), + (0.18587401723009224507, + -0.57206140281768429760, 0.37174803446018449013), + (0.18587401723009224507, 0.57206140281768429760, + 0.37174803446018449013), + (0.30075047750377283683, -0.21850801222441053540, + 0.60150095500754567366), + (0.30075047750377283683, 0.21850801222441053540, + 0.60150095500754567366), + (0.48662449473386508189, -0.35355339059327376220, + -0.37174803446018449013), + (0.48662449473386508189, 0.35355339059327376220, + -0.37174803446018449013), + (-0.60150095500754567366, 0, -0.37174803446018449013), + (-0.30075047750377283683, -0.21850801222441053540, + -0.60150095500754567366), + (-0.30075047750377283683, 0.21850801222441053540, + -0.60150095500754567366), + (0.60150095500754567366, 0, 0.37174803446018449013), + (0.4156269377774534286, -0.57206140281768429760, 0), + (0.4156269377774534286, 0.57206140281768429760, 0), + (0.37174803446018449013, 0, -0.60150095500754567366), + (-0.4156269377774534286, -0.57206140281768429760, 0), + (-0.4156269377774534286, 0.57206140281768429760, 0), + (-0.67249851196395732696, -0.21850801222441053540, 0), + (-0.67249851196395732696, 0.21850801222441053540, 0), + (0.67249851196395732696, -0.21850801222441053540, 0), + (0.67249851196395732696, 0.21850801222441053540, 0), + (-0.37174803446018449013, 0, 0.60150095500754567366), + (-0.48662449473386508189, -0.35355339059327376220, + 0.37174803446018449013), + (-0.48662449473386508189, 0.35355339059327376220, + 0.37174803446018449013), + (-0.18587401723009224507, -0.57206140281768429760, + -0.37174803446018449013), + (-0.18587401723009224507, 0.57206140281768429760, + -0.37174803446018449013), + (-0.11487646027368059176, -0.35355339059327376220, + 0.60150095500754567366), + (-0.11487646027368059176, 0.35355339059327376220, + 0.60150095500754567366)], + [0, 10, 16], [23, 10, 0], [16, 13, 0], + [0, 13, 23], [16, 10, 1], [1, 10, 23], + [1, 13, 16], [23, 13, 1], [2, 4, 19], + [22, 4, 2], [2, 19, 27], [27, 22, 2], + [20, 5, 3], [3, 5, 21], [26, 20, 3], + [3, 21, 26], [29, 19, 4], [4, 22, 29], + [5, 20, 28], [28, 21, 5], [6, 8, 15], + [17, 8, 6], [6, 15, 25], [25, 17, 6], + [14, 9, 7], [7, 9, 18], [24, 14, 7], + [7, 18, 24], [8, 12, 15], [17, 12, 8], + [14, 11, 9], [9, 11, 18], [11, 14, 24], + [24, 18, 11], [25, 15, 12], [12, 17, 25], + [29, 27, 19], [20, 26, 28], [28, 26, 21], + [22, 27, 29]] + assert Abs(polytope_integrate(octahedron_five_compound, expr)) - 0.353553\ + < 1e-6 + + cube_five_compound = [[(-0.1624598481164531631, -0.5, -0.6881909602355867691), + (-0.1624598481164531631, 0.5, -0.6881909602355867691), + (0.1624598481164531631, -0.5, 0.68819096023558676910), + (0.1624598481164531631, 0.5, 0.68819096023558676910), + (-0.52573111211913359231, 0, -0.6881909602355867691), + (0.52573111211913359231, 0, 0.68819096023558676910), + (-0.26286555605956679615, -0.8090169943749474241, + -0.1624598481164531631), + (-0.26286555605956679615, 0.8090169943749474241, + -0.1624598481164531631), + (0.26286555605956680301, -0.8090169943749474241, + 0.1624598481164531631), + (0.26286555605956680301, 0.8090169943749474241, + 0.1624598481164531631), + (-0.42532540417601993887, -0.3090169943749474241, + 0.68819096023558676910), + (-0.42532540417601993887, 0.30901699437494742410, + 0.68819096023558676910), + (0.42532540417601996609, -0.3090169943749474241, + -0.6881909602355867691), + (0.42532540417601996609, 0.30901699437494742410, + -0.6881909602355867691), + (-0.6881909602355867691, -0.5, 0.1624598481164531631), + (-0.6881909602355867691, 0.5, 0.1624598481164531631), + (0.68819096023558676910, -0.5, -0.1624598481164531631), + (0.68819096023558676910, 0.5, -0.1624598481164531631), + (-0.85065080835203998877, 0, -0.1624598481164531631), + (0.85065080835203993218, 0, 0.1624598481164531631)], + [18, 10, 3, 7], [13, 19, 8, 0], [18, 0, 8, 10], + [3, 19, 13, 7], [18, 7, 13, 0], [8, 19, 3, 10], + [6, 2, 11, 18], [1, 9, 19, 12], [11, 9, 1, 18], + [6, 12, 19, 2], [1, 12, 6, 18], [11, 2, 19, 9], + [4, 14, 11, 7], [17, 5, 8, 12], [4, 12, 8, 14], + [11, 5, 17, 7], [4, 7, 17, 12], [8, 5, 11, 14], + [6, 10, 15, 4], [13, 9, 5, 16], [15, 9, 13, 4], + [6, 16, 5, 10], [13, 16, 6, 4], [15, 10, 5, 9], + [14, 15, 1, 0], [16, 17, 3, 2], [14, 2, 3, 15], + [1, 17, 16, 0], [14, 0, 16, 2], [3, 17, 1, 15]] + assert Abs(polytope_integrate(cube_five_compound, expr) - 1.25) < 1e-12 + + echidnahedron = [[(0, 0, -2.4898982848827801995), + (0, 0, 2.4898982848827802734), + (0, -4.2360679774997896964, -2.4898982848827801995), + (0, -4.2360679774997896964, 2.4898982848827802734), + (0, 4.2360679774997896964, -2.4898982848827801995), + (0, 4.2360679774997896964, 2.4898982848827802734), + (-4.0287400534704067567, -1.3090169943749474241, -2.4898982848827801995), + (-4.0287400534704067567, -1.3090169943749474241, 2.4898982848827802734), + (-4.0287400534704067567, 1.3090169943749474241, -2.4898982848827801995), + (-4.0287400534704067567, 1.3090169943749474241, 2.4898982848827802734), + (4.0287400534704069747, -1.3090169943749474241, -2.4898982848827801995), + (4.0287400534704069747, -1.3090169943749474241, 2.4898982848827802734), + (4.0287400534704069747, 1.3090169943749474241, -2.4898982848827801995), + (4.0287400534704069747, 1.3090169943749474241, 2.4898982848827802734), + (-2.4898982848827801995, -3.4270509831248422723, -2.4898982848827801995), + (-2.4898982848827801995, -3.4270509831248422723, 2.4898982848827802734), + (-2.4898982848827801995, 3.4270509831248422723, -2.4898982848827801995), + (-2.4898982848827801995, 3.4270509831248422723, 2.4898982848827802734), + (2.4898982848827802734, -3.4270509831248422723, -2.4898982848827801995), + (2.4898982848827802734, -3.4270509831248422723, 2.4898982848827802734), + (2.4898982848827802734, 3.4270509831248422723, -2.4898982848827801995), + (2.4898982848827802734, 3.4270509831248422723, 2.4898982848827802734), + (-4.7169310137059934362, -0.8090169943749474241, -1.1135163644116066184), + (-4.7169310137059934362, 0.8090169943749474241, -1.1135163644116066184), + (4.7169310137059937438, -0.8090169943749474241, 1.11351636441160673519), + (4.7169310137059937438, 0.8090169943749474241, 1.11351636441160673519), + (-4.2916056095299737777, -2.1180339887498948482, 1.11351636441160673519), + (-4.2916056095299737777, 2.1180339887498948482, 1.11351636441160673519), + (4.2916056095299737777, -2.1180339887498948482, -1.1135163644116066184), + (4.2916056095299737777, 2.1180339887498948482, -1.1135163644116066184), + (-3.6034146492943870399, 0, -3.3405490932348205213), + (3.6034146492943870399, 0, 3.3405490932348202056), + (-3.3405490932348205213, -3.4270509831248422723, 1.11351636441160673519), + (-3.3405490932348205213, 3.4270509831248422723, 1.11351636441160673519), + (3.3405490932348202056, -3.4270509831248422723, -1.1135163644116066184), + (3.3405490932348202056, 3.4270509831248422723, -1.1135163644116066184), + (-2.9152236890588002395, -2.1180339887498948482, 3.3405490932348202056), + (-2.9152236890588002395, 2.1180339887498948482, 3.3405490932348202056), + (2.9152236890588002395, -2.1180339887498948482, -3.3405490932348205213), + (2.9152236890588002395, 2.1180339887498948482, -3.3405490932348205213), + (-2.2270327288232132368, 0, -1.1135163644116066184), + (-2.2270327288232132368, -4.2360679774997896964, -1.1135163644116066184), + (-2.2270327288232132368, 4.2360679774997896964, -1.1135163644116066184), + (2.2270327288232134704, 0, 1.11351636441160673519), + (2.2270327288232134704, -4.2360679774997896964, 1.11351636441160673519), + (2.2270327288232134704, 4.2360679774997896964, 1.11351636441160673519), + (-1.8017073246471935200, -1.3090169943749474241, 1.11351636441160673519), + (-1.8017073246471935200, 1.3090169943749474241, 1.11351636441160673519), + (1.8017073246471935043, -1.3090169943749474241, -1.1135163644116066184), + (1.8017073246471935043, 1.3090169943749474241, -1.1135163644116066184), + (-1.3763819204711735382, 0, -4.7169310137059934362), + (-1.3763819204711735382, 0, 0.26286555605956679615), + (1.37638192047117353821, 0, 4.7169310137059937438), + (1.37638192047117353821, 0, -0.26286555605956679615), + (-1.1135163644116066184, -3.4270509831248422723, -3.3405490932348205213), + (-1.1135163644116066184, -0.8090169943749474241, 4.7169310137059937438), + (-1.1135163644116066184, -0.8090169943749474241, -0.26286555605956679615), + (-1.1135163644116066184, 0.8090169943749474241, 4.7169310137059937438), + (-1.1135163644116066184, 0.8090169943749474241, -0.26286555605956679615), + (-1.1135163644116066184, 3.4270509831248422723, -3.3405490932348205213), + (1.11351636441160673519, -3.4270509831248422723, 3.3405490932348202056), + (1.11351636441160673519, -0.8090169943749474241, -4.7169310137059934362), + (1.11351636441160673519, -0.8090169943749474241, 0.26286555605956679615), + (1.11351636441160673519, 0.8090169943749474241, -4.7169310137059934362), + (1.11351636441160673519, 0.8090169943749474241, 0.26286555605956679615), + (1.11351636441160673519, 3.4270509831248422723, 3.3405490932348202056), + (-0.85065080835203998877, 0, 1.11351636441160673519), + (0.85065080835203993218, 0, -1.1135163644116066184), + (-0.6881909602355867691, -0.5, -1.1135163644116066184), + (-0.6881909602355867691, 0.5, -1.1135163644116066184), + (-0.6881909602355867691, -4.7360679774997896964, -1.1135163644116066184), + (-0.6881909602355867691, -2.1180339887498948482, -1.1135163644116066184), + (-0.6881909602355867691, 2.1180339887498948482, -1.1135163644116066184), + (-0.6881909602355867691, 4.7360679774997896964, -1.1135163644116066184), + (0.68819096023558676910, -0.5, 1.11351636441160673519), + (0.68819096023558676910, 0.5, 1.11351636441160673519), + (0.68819096023558676910, -4.7360679774997896964, 1.11351636441160673519), + (0.68819096023558676910, -2.1180339887498948482, 1.11351636441160673519), + (0.68819096023558676910, 2.1180339887498948482, 1.11351636441160673519), + (0.68819096023558676910, 4.7360679774997896964, 1.11351636441160673519), + (-0.42532540417601993887, -1.3090169943749474241, -4.7169310137059934362), + (-0.42532540417601993887, -1.3090169943749474241, 0.26286555605956679615), + (-0.42532540417601993887, 1.3090169943749474241, -4.7169310137059934362), + (-0.42532540417601993887, 1.3090169943749474241, 0.26286555605956679615), + (-0.26286555605956679615, -0.8090169943749474241, 1.11351636441160673519), + (-0.26286555605956679615, 0.8090169943749474241, 1.11351636441160673519), + (0.26286555605956679615, -0.8090169943749474241, -1.1135163644116066184), + (0.26286555605956679615, 0.8090169943749474241, -1.1135163644116066184), + (0.42532540417601996609, -1.3090169943749474241, 4.7169310137059937438), + (0.42532540417601996609, -1.3090169943749474241, -0.26286555605956679615), + (0.42532540417601996609, 1.3090169943749474241, 4.7169310137059937438), + (0.42532540417601996609, 1.3090169943749474241, -0.26286555605956679615)], + [9, 66, 47], [44, 62, 77], [20, 91, 49], [33, 47, 83], + [3, 77, 84], [12, 49, 53], [36, 84, 66], [28, 53, 62], + [73, 83, 91], [15, 84, 46], [25, 64, 43], [16, 58, 72], + [26, 46, 51], [11, 43, 74], [4, 72, 91], [60, 74, 84], + [35, 91, 64], [23, 51, 58], [19, 74, 77], [79, 83, 78], + [6, 56, 40], [76, 77, 81], [21, 78, 75], [8, 40, 58], + [31, 75, 74], [42, 58, 83], [41, 81, 56], [13, 75, 43], + [27, 51, 47], [2, 89, 71], [24, 43, 62], [17, 47, 85], + [14, 71, 56], [65, 85, 75], [22, 56, 51], [34, 62, 89], + [5, 85, 78], [32, 81, 46], [10, 53, 48], [45, 78, 64], + [7, 46, 66], [18, 48, 89], [37, 66, 85], [70, 89, 81], + [29, 64, 53], [88, 74, 1], [38, 67, 48], [42, 83, 72], + [57, 1, 85], [34, 48, 62], [59, 72, 87], [19, 62, 74], + [63, 87, 67], [17, 85, 83], [52, 75, 1], [39, 87, 49], + [22, 51, 40], [55, 1, 66], [29, 49, 64], [30, 40, 69], + [13, 64, 75], [82, 69, 87], [7, 66, 51], [90, 85, 1], + [59, 69, 72], [70, 81, 71], [88, 1, 84], [73, 72, 83], + [54, 71, 68], [5, 83, 85], [50, 68, 69], [3, 84, 81], + [57, 66, 1], [30, 68, 40], [28, 62, 48], [52, 1, 74], + [23, 40, 51], [38, 48, 86], [9, 51, 66], [80, 86, 68], + [11, 74, 62], [55, 84, 1], [54, 86, 71], [35, 64, 49], + [90, 1, 75], [41, 71, 81], [39, 49, 67], [15, 81, 84], + [61, 67, 86], [21, 75, 64], [24, 53, 43], [50, 69, 0], + [37, 85, 47], [31, 43, 75], [61, 0, 67], [27, 47, 58], + [10, 67, 53], [8, 58, 69], [90, 75, 85], [45, 91, 78], + [80, 68, 0], [36, 66, 46], [65, 78, 85], [63, 0, 87], + [32, 46, 56], [20, 87, 91], [14, 56, 68], [57, 85, 66], + [33, 58, 47], [61, 86, 0], [60, 84, 77], [37, 47, 66], + [82, 0, 69], [44, 77, 89], [16, 69, 58], [18, 89, 86], + [55, 66, 84], [26, 56, 46], [63, 67, 0], [31, 74, 43], + [36, 46, 84], [50, 0, 68], [25, 43, 53], [6, 68, 56], + [12, 53, 67], [88, 84, 74], [76, 89, 77], [82, 87, 0], + [65, 75, 78], [60, 77, 74], [80, 0, 86], [79, 78, 91], + [2, 86, 89], [4, 91, 87], [52, 74, 75], [21, 64, 78], + [18, 86, 48], [23, 58, 40], [5, 78, 83], [28, 48, 53], + [6, 40, 68], [25, 53, 64], [54, 68, 86], [33, 83, 58], + [17, 83, 47], [12, 67, 49], [41, 56, 71], [9, 47, 51], + [35, 49, 91], [2, 71, 86], [79, 91, 83], [38, 86, 67], + [26, 51, 56], [7, 51, 46], [4, 87, 72], [34, 89, 48], + [15, 46, 81], [42, 72, 58], [10, 48, 67], [27, 58, 51], + [39, 67, 87], [76, 81, 89], [3, 81, 77], [8, 69, 40], + [29, 53, 49], [19, 77, 62], [22, 40, 56], [20, 49, 87], + [32, 56, 81], [59, 87, 69], [24, 62, 53], [11, 62, 43], + [14, 68, 71], [73, 91, 72], [13, 43, 64], [70, 71, 89], + [16, 72, 69], [44, 89, 62], [30, 69, 68], [45, 64, 91]] + # Actual volume is : 51.405764746872634 + assert Abs(polytope_integrate(echidnahedron, 1) - 51.4057647468726) < 1e-12 + assert Abs(polytope_integrate(echidnahedron, expr) - 253.569603474519) <\ + 1e-12 + + # Tests for many polynomials with maximum degree given(2D case). + assert polytope_integrate(cube2, [x**2, y*z], max_degree=2) == \ + {y * z: 3125 / S(4), x ** 2: 3125 / S(3)} + + assert polytope_integrate(cube2, max_degree=2) == \ + {1: 125, x: 625 / S(2), x * z: 3125 / S(4), y: 625 / S(2), + y * z: 3125 / S(4), z ** 2: 3125 / S(3), y ** 2: 3125 / S(3), + z: 625 / S(2), x * y: 3125 / S(4), x ** 2: 3125 / S(3)} + +def test_point_sort(): + assert point_sort([Point(0, 0), Point(1, 0), Point(1, 1)]) == \ + [Point2D(1, 1), Point2D(1, 0), Point2D(0, 0)] + + fig6 = Polygon((0, 0), (1, 0), (1, 1)) + assert polytope_integrate(fig6, x*y) == Rational(-1, 8) + assert polytope_integrate(fig6, x*y, clockwise = True) == Rational(1, 8) + + +def test_polytopes_intersecting_sides(): + fig5 = Polygon(Point(-4.165, -0.832), Point(-3.668, 1.568), + Point(-3.266, 1.279), Point(-1.090, -2.080), + Point(3.313, -0.683), Point(3.033, -4.845), + Point(-4.395, 4.840), Point(-1.007, -3.328)) + assert polytope_integrate(fig5, x**2 + x*y + y**2) ==\ + S(1633405224899363)/(24*10**12) + + fig6 = Polygon(Point(-3.018, -4.473), Point(-0.103, 2.378), + Point(-1.605, -2.308), Point(4.516, -0.771), + Point(4.203, 0.478)) + assert polytope_integrate(fig6, x**2 + x*y + y**2) ==\ + S(88161333955921)/(3*10**12) + + +def test_max_degree(): + polygon = Polygon((0, 0), (0, 1), (1, 1), (1, 0)) + polys = [1, x, y, x*y, x**2*y, x*y**2] + assert polytope_integrate(polygon, polys, max_degree=3) == \ + {1: 1, x: S.Half, y: S.Half, x*y: Rational(1, 4), x**2*y: Rational(1, 6), x*y**2: Rational(1, 6)} + assert polytope_integrate(polygon, polys, max_degree=2) == \ + {1: 1, x: S.Half, y: S.Half, x*y: Rational(1, 4)} + assert polytope_integrate(polygon, polys, max_degree=1) == \ + {1: 1, x: S.Half, y: S.Half} + + +def test_main_integrate3d(): + cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + vertices = cube[0] + faces = cube[1:] + hp_params = hyperplane_parameters(faces, vertices) + assert main_integrate3d(1, faces, vertices, hp_params) == -125 + assert main_integrate3d(1, faces, vertices, hp_params, max_degree=1) == \ + {1: -125, y: Rational(-625, 2), z: Rational(-625, 2), x: Rational(-625, 2)} + + +def test_main_integrate(): + triangle = Polygon((0, 3), (5, 3), (1, 1)) + facets = triangle.sides + hp_params = hyperplane_parameters(triangle) + assert main_integrate(x**2 + y**2, facets, hp_params) == Rational(325, 6) + assert main_integrate(x**2 + y**2, facets, hp_params, max_degree=1) == \ + {0: 0, 1: 5, y: Rational(35, 3), x: 10} + + +def test_polygon_integrate(): + cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ + (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ + [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ + [3, 1, 0, 2], [0, 4, 6, 2]] + facet = cube[1] + facets = cube[1:] + vertices = cube[0] + assert polygon_integrate(facet, [(0, 1, 0), 5], 0, facets, vertices, 1, 0) == -25 + + +def test_distance_to_side(): + point = (0, 0, 0) + assert distance_to_side(point, [(0, 0, 1), (0, 1, 0)], (1, 0, 0)) == -sqrt(2)/2 + + +def test_lineseg_integrate(): + polygon = [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] + line_seg = [(0, 5, 0), (5, 5, 0)] + assert lineseg_integrate(polygon, 0, line_seg, 1, 0) == 5 + assert lineseg_integrate(polygon, 0, line_seg, 0, 0) == 0 + + +def test_integration_reduction(): + triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + facets = triangle.sides + a, b = hyperplane_parameters(triangle)[0] + assert integration_reduction(facets, 0, a, b, 1, (x, y), 0) == 5 + assert integration_reduction(facets, 0, a, b, 0, (x, y), 0) == 0 + + +def test_integration_reduction_dynamic(): + triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) + facets = triangle.sides + a, b = hyperplane_parameters(triangle)[0] + x0 = facets[0].points[0] + monomial_values = [[0, 0, 0, 0], [1, 0, 0, 5],\ + [y, 0, 1, 15], [x, 1, 0, None]] + + assert integration_reduction_dynamic(facets, 0, a, b, x, 1, (x, y), 1,\ + 0, 1, x0, monomial_values, 3) == Rational(25, 2) + assert integration_reduction_dynamic(facets, 0, a, b, 0, 1, (x, y), 1,\ + 0, 1, x0, monomial_values, 3) == 0 + + +def test_is_vertex(): + assert is_vertex(2) is False + assert is_vertex((2, 3)) is True + assert is_vertex(Point(2, 3)) is True + assert is_vertex((2, 3, 4)) is True + assert is_vertex((2, 3, 4, 5)) is False + + +def test_issue_19234(): + polygon = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0)) + polys = [ 1, x, y, x*y, x**2*y, x*y**2] + assert polytope_integrate(polygon, polys) == \ + {1: 1, x: S.Half, y: S.Half, x*y: Rational(1, 4), x**2*y: Rational(1, 6), x*y**2: Rational(1, 6)} + polys = [ 1, x, y, x*y, 3 + x**2*y, x + x*y**2] + assert polytope_integrate(polygon, polys) == \ + {1: 1, x: S.Half, y: S.Half, x*y: Rational(1, 4), x**2*y + 3: Rational(19, 6), x*y**2 + x: Rational(2, 3)} diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_meijerint.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_meijerint.py new file mode 100644 index 0000000000000000000000000000000000000000..f23975e65c03dd2c0c34b80a048ca58cac46ed7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_meijerint.py @@ -0,0 +1,764 @@ +from sympy.core.function import expand_func +from sympy.core.numbers import (I, Rational, oo, pi) +from sympy.core.singleton import S +from sympy.core.sorting import default_sort_key +from sympy.functions.elementary.complexes import Abs, arg, re, unpolarify +from sympy.functions.elementary.exponential import (exp, exp_polar, log) +from sympy.functions.elementary.hyperbolic import cosh, acosh +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold +from sympy.functions.elementary.trigonometric import (cos, sin, sinc, asin) +from sympy.functions.special.error_functions import (erf, erfc) +from sympy.functions.special.gamma_functions import (gamma, polygamma) +from sympy.functions.special.hyper import (hyper, meijerg) +from sympy.integrals.integrals import (Integral, integrate) +from sympy.simplify.hyperexpand import hyperexpand +from sympy.simplify.simplify import simplify +from sympy.integrals.meijerint import (_rewrite_single, _rewrite1, + meijerint_indefinite, _inflate_g, _create_lookup_table, + meijerint_definite, meijerint_inversion) +from sympy.testing.pytest import slow +from sympy.core.random import (verify_numerically, + random_complex_number as randcplx) +from sympy.abc import x, y, a, b, c, d, s, t, z + + +def test_rewrite_single(): + def t(expr, c, m): + e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x) + assert e is not None + assert isinstance(e[0][0][2], meijerg) + assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,)) + + def tn(expr): + assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None + + t(x, 1, x) + t(x**2, 1, x**2) + t(x**2 + y*x**2, y + 1, x**2) + tn(x**2 + x) + tn(x**y) + + def u(expr, x): + from sympy.core.add import Add + r = _rewrite_single(expr, x) + e = Add(*[res[0]*res[2] for res in r[0]]).replace( + exp_polar, exp) # XXX Hack? + assert verify_numerically(e, expr, x) + + u(exp(-x)*sin(x), x) + + # The following has stopped working because hyperexpand changed slightly. + # It is probably not worth fixing + #u(exp(-x)*sin(x)*cos(x), x) + + # This one cannot be done numerically, since it comes out as a g-function + # of argument 4*pi + # NOTE This also tests a bug in inverse mellin transform (which used to + # turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of + # exp_polar). + #u(exp(x)*sin(x), x) + assert _rewrite_single(exp(x)*sin(x), x) == \ + ([(-sqrt(2)/(2*sqrt(pi)), 0, + meijerg(((Rational(-1, 2), 0, Rational(1, 4), S.Half, Rational(3, 4)), (1,)), + ((), (Rational(-1, 2), 0)), 64*exp_polar(-4*I*pi)/x**4))], True) + + +def test_rewrite1(): + assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \ + (5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True) + + +def test_meijerint_indefinite_numerically(): + def t(fac, arg): + g = meijerg([a], [b], [c], [d], arg)*fac + subs = {a: randcplx()/10, b: randcplx()/10 + I, + c: randcplx(), d: randcplx()} + integral = meijerint_indefinite(g, x) + assert integral is not None + assert verify_numerically(g.subs(subs), integral.diff(x).subs(subs), x) + t(1, x) + t(2, x) + t(1, 2*x) + t(1, x**2) + t(5, x**S('3/2')) + t(x**3, x) + t(3*x**S('3/2'), 4*x**S('7/3')) + + +def test_meijerint_definite(): + v, b = meijerint_definite(x, x, 0, 0) + assert v.is_zero and b is True + v, b = meijerint_definite(x, x, oo, oo) + assert v.is_zero and b is True + + +def test_inflate(): + subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(), + d: randcplx(), y: randcplx()/10} + + def t(a, b, arg, n): + from sympy.core.mul import Mul + m1 = meijerg(a, b, arg) + m2 = Mul(*_inflate_g(m1, n)) + # NOTE: (the random number)**9 must still be on the principal sheet. + # Thus make b&d small to create random numbers of small imaginary part. + return verify_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1) + assert t([[a], [b]], [[c], [d]], x, 3) + assert t([[a, y], [b]], [[c], [d]], x, 3) + assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3) + + +def test_recursive(): + from sympy.core.symbol import symbols + a, b, c = symbols('a b c', positive=True) + r = exp(-(x - a)**2)*exp(-(x - b)**2) + e = integrate(r, (x, 0, oo), meijerg=True) + assert simplify(e.expand()) == ( + sqrt(2)*sqrt(pi)*( + (erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4) + e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True) + assert simplify(e) == ( + sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2 + + (2*a + 2*b + c)**2/8)/4) + assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \ + sqrt(pi)/2*(1 + erf(a + b + c)) + assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True)) == \ + sqrt(pi)/2*(1 - erf(a + b + c)) + + +@slow +def test_meijerint(): + from sympy.core.function import expand + from sympy.core.symbol import symbols + s, t, mu = symbols('s t mu', real=True) + assert integrate(meijerg([], [], [0], [], s*t) + *meijerg([], [], [mu/2], [-mu/2], t**2/4), + (t, 0, oo)).is_Piecewise + s = symbols('s', positive=True) + assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \ + gamma(s + 1) + assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo), + meijerg=True) == gamma(s + 1) + assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x), + (x, 0, oo), meijerg=False), + Integral) + + assert meijerint_indefinite(exp(x), x) == exp(x) + + # TODO what simplifications should be done automatically? + # This tests "extra case" for antecedents_1. + a, b = symbols('a b', positive=True) + assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \ + b**(a + 1)/(a + 1) + + # This tests various conditions and expansions: + assert meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True) + + # Again, how about simplifications? + sigma, mu = symbols('sigma mu', positive=True) + i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo) + assert simplify(i) == sqrt(pi)*sigma*(2 - erfc(mu/(2*sigma))) + assert c == True + + i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo) + # TODO it would be nice to test the condition + assert simplify(i) == 1/(mu - sigma) + + # Test substitutions to change limits + assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True) + # Note: causes a NaN in _check_antecedents + assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1 + assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \ + 1 - exp(-exp(I*arg(x))*abs(x)) + + # Test -oo to oo + assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True) + assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True) + assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \ + (sqrt(pi)/2, True) + assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True) + assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2), + x, -oo, oo) == (1, True) + assert meijerint_definite(sinc(x)**2, x, -oo, oo) == (pi, True) + + # Test one of the extra conditions for 2 g-functinos + assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S.Half, True) + + # Test a bug + def res(n): + return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n + for n in range(6): + assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \ + res(n) + + # This used to test trigexpand... now it is done by linear substitution + assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True) + ) == sqrt(2)*sin(a + pi/4)/2 + + # Test the condition 14 from prudnikov. + # (This is besselj*besselj in disguise, to stop the product from being + # recognised in the tables.) + a, b, s = symbols('a b s') + assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4) + *meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo + ) == ( + (4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s) + /(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1) + *gamma(a/2 + b/2 - s + 1)), + (re(s) < 1) & (re(s) < S(1)/2) & (re(a)/2 + re(b)/2 + re(s) > 0))) + + # test a bug + assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \ + Integral(sin(x**a)*sin(x**b), (x, 0, oo)) + + # test better hyperexpand + assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \ + (sqrt(pi)*polygamma(0, S.Half)/4).expand() + + # Test hyperexpand bug. + from sympy.functions.special.gamma_functions import lowergamma + n = symbols('n', integer=True) + assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \ + lowergamma(n + 1, x) + + # Test a bug with argument 1/x + alpha = symbols('alpha', positive=True) + assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \ + (sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S.Half, + alpha/2 + 1)), ((0, 0, S.Half), (Rational(-1, 2),)), alpha**2/16)/4, True) + + # test a bug related to 3016 + a, s = symbols('a s', positive=True) + assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \ + a**(-s/2 - S.Half)*((-1)**s + 1)*gamma(s/2 + S.Half)/2 + + +def test_bessel(): + from sympy.functions.special.bessel import (besseli, besselj) + assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo), + meijerg=True, conds='none')) == \ + 2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b)) + assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo), + meijerg=True, conds='none')) == 1/(2*a) + + # TODO more orthogonality integrals + + assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S.Half)), + (x, 1, oo), meijerg=True, conds='none') + *2/((z/2)**y*sqrt(pi)*gamma(S.Half - y))) == \ + besselj(y, z) + + # Werner Rosenheinrich + # SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS + + assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x) + assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x) + # TODO can do higher powers, but come out as high order ... should they be + # reduced to order 0, 1? + assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x) + assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \ + -(besselj(0, x)**2 + besselj(1, x)**2)/2 + # TODO more besseli when tables are extended or recursive mellin works + assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \ + -2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \ + + 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x + assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \ + -besselj(0, x)**2/2 + assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \ + x**2*besselj(1, x)**2/2 + assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \ + (x*besselj(0, x)**2 + x*besselj(1, x)**2 - + besselj(0, x)*besselj(1, x)) + # TODO how does besselj(0, a*x)*besselj(0, b*x) work? + # TODO how does besselj(0, x)**2*besselj(1, x)**2 work? + # TODO sin(x)*besselj(0, x) etc come out a mess + # TODO can x*log(x)*besselj(0, x) be done? + # TODO how does besselj(1, x)*besselj(0, x+a) work? + # TODO more indefinite integrals when struve functions etc are implemented + + # test a substitution + assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \ + -besselj(0, x**2)/2 + + +def test_inversion(): + from sympy.functions.special.bessel import besselj + from sympy.functions.special.delta_functions import Heaviside + + def inv(f): + return piecewise_fold(meijerint_inversion(f, s, t)) + assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t) + assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t) + assert inv(exp(-s)/s) == Heaviside(t - 1) + assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t) + + # Test some antcedents checking. + assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None + assert inv(exp(s**2)) is None + assert meijerint_inversion(exp(-s**2), s, t) is None + + +def test_inversion_conditional_output(): + from sympy.core.symbol import Symbol + from sympy.integrals.transforms import InverseLaplaceTransform + + a = Symbol('a', positive=True) + F = sqrt(pi/a)*exp(-2*sqrt(a)*sqrt(s)) + f = meijerint_inversion(F, s, t) + assert not f.is_Piecewise + + b = Symbol('b', real=True) + F = F.subs(a, b) + f2 = meijerint_inversion(F, s, t) + assert f2.is_Piecewise + # first piece is same as f + assert f2.args[0][0] == f.subs(a, b) + # last piece is an unevaluated transform + assert f2.args[-1][1] + ILT = InverseLaplaceTransform(F, s, t, None) + assert f2.args[-1][0] == ILT or f2.args[-1][0] == ILT.as_integral + + +def test_inversion_exp_real_nonreal_shift(): + from sympy.core.symbol import Symbol + from sympy.functions.special.delta_functions import DiracDelta + r = Symbol('r', real=True) + c = Symbol('c', extended_real=False) + a = 1 + 2*I + z = Symbol('z') + assert not meijerint_inversion(exp(r*s), s, t).is_Piecewise + assert meijerint_inversion(exp(a*s), s, t) is None + assert meijerint_inversion(exp(c*s), s, t) is None + f = meijerint_inversion(exp(z*s), s, t) + assert f.is_Piecewise + assert isinstance(f.args[0][0], DiracDelta) + + +@slow +def test_lookup_table(): + from sympy.core.random import uniform, randrange + from sympy.core.add import Add + from sympy.integrals.meijerint import z as z_dummy + table = {} + _create_lookup_table(table) + for _, l in table.items(): + for formula, terms, cond, hint in sorted(l, key=default_sort_key): + subs = {} + for ai in list(formula.free_symbols) + [z_dummy]: + if hasattr(ai, 'properties') and ai.properties: + # these Wilds match positive integers + subs[ai] = randrange(1, 10) + else: + subs[ai] = uniform(1.5, 2.0) + if not isinstance(terms, list): + terms = terms(subs) + + # First test that hyperexpand can do this. + expanded = [hyperexpand(g) for (_, g) in terms] + assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded) + + # Now test that the meijer g-function is indeed as advertised. + expanded = Add(*[f*x for (f, x) in terms]) + a, b = formula.n(subs=subs), expanded.n(subs=subs) + r = min(abs(a), abs(b)) + if r < 1: + assert abs(a - b).n() <= 1e-10 + else: + assert (abs(a - b)/r).n() <= 1e-10 + + +def test_branch_bug(): + from sympy.functions.special.gamma_functions import lowergamma + from sympy.simplify.powsimp import powdenest + # TODO gammasimp cannot prove that the factor is unity + assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x), + polar=True) == 2*erf(x**3)*gamma(Rational(2, 3))/3/gamma(Rational(5, 3)) + assert integrate(erf(x**3), x, meijerg=True) == \ + 2*x*erf(x**3)*gamma(Rational(2, 3))/(3*gamma(Rational(5, 3))) \ + - 2*gamma(Rational(2, 3))*lowergamma(Rational(2, 3), x**6)/(3*sqrt(pi)*gamma(Rational(5, 3))) + + +def test_linear_subs(): + from sympy.functions.special.bessel import besselj + assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x) + assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x) + + +@slow +def test_probability(): + # various integrals from probability theory + from sympy.core.function import expand_mul + from sympy.core.symbol import (Symbol, symbols) + from sympy.simplify.gammasimp import gammasimp + from sympy.simplify.powsimp import powsimp + mu1, mu2 = symbols('mu1 mu2', nonzero=True) + sigma1, sigma2 = symbols('sigma1 sigma2', positive=True) + rate = Symbol('lambda', positive=True) + + def normal(x, mu, sigma): + return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2) + + def exponential(x, rate): + return rate*exp(-rate*x) + + assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1 + assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \ + mu1 + assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \ + == mu1**2 + sigma1**2 + assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \ + == mu1**3 + 3*mu1*sigma1**2 + assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1 + assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2 + assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2 + assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2 + assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == \ + -1 + mu1 + mu2 + + i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) + assert not i.has(Abs) + assert simplify(i) == mu1**2 + sigma1**2 + assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), + (x, -oo, oo), (y, -oo, oo), meijerg=True) == \ + sigma2**2 + mu2**2 + + assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1 + assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \ + 1/rate + assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \ + 2/rate**2 + + def E(expr): + res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1), + (x, 0, oo), (y, -oo, oo), meijerg=True) + res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1), + (y, -oo, oo), (x, 0, oo), meijerg=True) + assert expand_mul(res1) == expand_mul(res2) + return res1 + + assert E(1) == 1 + assert E(x*y) == mu1/rate + assert E(x*y**2) == mu1**2/rate + sigma1**2/rate + ans = sigma1**2 + 1/rate**2 + assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans + assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans + assert simplify(E((x + y)**2) - E(x + y)**2) == ans + + # Beta' distribution + alpha, beta = symbols('alpha beta', positive=True) + betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \ + /gamma(alpha)/gamma(beta) + assert integrate(betadist, (x, 0, oo), meijerg=True) == 1 + i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate') + assert (gammasimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta) + j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate') + assert j[1] == (beta > 2) + assert gammasimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \ + /(beta - 2)/(beta - 1)**2 + + # Beta distribution + # NOTE: this is evaluated using antiderivatives. It also tests that + # meijerint_indefinite returns the simplest possible answer. + a, b = symbols('a b', positive=True) + betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b)) + assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1 + assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \ + a/(a + b) + assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \ + a*(a + 1)/(a + b)/(a + b + 1) + assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \ + gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y) + + # Chi distribution + k = Symbol('k', integer=True, positive=True) + chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2) + assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1 + assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \ + sqrt(2)*gamma((k + 1)/2)/gamma(k/2) + assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k + + # Chi^2 distribution + chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2) + assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1 + assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k + assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \ + k*(k + 2) + assert gammasimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo), + meijerg=True)) == 2*sqrt(2)/sqrt(k) + + # Dagum distribution + a, b, p = symbols('a b p', positive=True) + # XXX (x/b)**a does not work + dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1) + assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1 + # XXX conditions are a mess + arg = x*dagum + assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none') + ) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/( + (a*p + 1)*gamma(p)) + assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none') + ) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/( + (a*p + 2)*gamma(p)) + + # F-distribution + d1, d2 = symbols('d1 d2', positive=True) + f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \ + /gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2) + assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1 + # TODO conditions are a mess + assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none') + ) == d2/(d2 - 2) + assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none') + ) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2) + + # TODO gamma, rayleigh + + # inverse gaussian + lamda, mu = symbols('lamda mu', positive=True) + dist = sqrt(lamda/2/pi)*x**(Rational(-3, 2))*exp(-lamda*(x - mu)**2/x/2/mu**2) + mysimp = lambda expr: simplify(expr.rewrite(exp)) + assert mysimp(integrate(dist, (x, 0, oo))) == 1 + assert mysimp(integrate(x*dist, (x, 0, oo))) == mu + assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda + assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2 + + # Levi + c = Symbol('c', positive=True) + assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'), + (x, mu, oo)) == 1 + # higher moments oo + + # log-logistic + alpha, beta = symbols('alpha beta', positive=True) + distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \ + (1 + x**beta/alpha**beta)**2 + # FIXME: If alpha, beta are not declared as finite the line below hangs + # after the changes in: + # https://github.com/sympy/sympy/pull/16603 + assert simplify(integrate(distn, (x, 0, oo))) == 1 + # NOTE the conditions are a mess, but correctly state beta > 1 + assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \ + pi*alpha/beta/sin(pi/beta) + # (similar comment for conditions applies) + assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \ + pi*alpha**y*y/beta/sin(pi*y/beta) + + # weibull + k = Symbol('k', positive=True) + n = Symbol('n', positive=True) + distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k) + assert simplify(integrate(distn, (x, 0, oo))) == 1 + assert simplify(integrate(x**n*distn, (x, 0, oo))) == \ + lamda**n*gamma(1 + n/k) + + # rice distribution + from sympy.functions.special.bessel import besseli + nu, sigma = symbols('nu sigma', positive=True) + rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2) + assert integrate(rice, (x, 0, oo), meijerg=True) == 1 + # can someone verify higher moments? + + # Laplace distribution + mu = Symbol('mu', real=True) + b = Symbol('b', positive=True) + laplace = exp(-abs(x - mu)/b)/2/b + assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1 + assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu + assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \ + 2*b**2 + mu**2 + + # TODO are there other distributions supported on (-oo, oo) that we can do? + + # misc tests + k = Symbol('k', positive=True) + assert gammasimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k), + (x, 0, oo)))) == polygamma(0, k) + + +@slow +def test_expint(): + """ Test various exponential integrals. """ + from sympy.core.symbol import Symbol + from sympy.functions.elementary.hyperbolic import sinh + from sympy.functions.special.error_functions import (Chi, Ci, Ei, Shi, Si, expint) + assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo), + meijerg=True, conds='none' + ).rewrite(expint).expand(func=True))) == expint(y, z) + + assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True, + conds='none').rewrite(expint).expand() == \ + expint(1, z) + assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True, + conds='none').rewrite(expint).expand() == \ + expint(2, z).rewrite(Ei).rewrite(expint) + assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True, + conds='none').rewrite(expint).expand() == \ + expint(3, z).rewrite(Ei).rewrite(expint).expand() + + t = Symbol('t', positive=True) + assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t) + assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \ + Si(t) - pi/2 + assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z) + assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z) + assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \ + I*pi - expint(1, x) + assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \ + == expint(1, x) - exp(-x)/x - I*pi + + u = Symbol('u', polar=True) + assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \ + == Ci(u) + assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \ + == Chi(u) + + assert integrate(expint(1, x), x, meijerg=True + ).rewrite(expint).expand() == x*expint(1, x) - exp(-x) + assert integrate(expint(2, x), x, meijerg=True + ).rewrite(expint).expand() == \ + -x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2 + assert simplify(unpolarify(integrate(expint(y, x), x, + meijerg=True).rewrite(expint).expand(func=True))) == \ + -expint(y + 1, x) + + assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x) + assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u) + assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x) + assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u) + + assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4 + assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2 + + +def test_messy(): + from sympy.functions.elementary.hyperbolic import (acosh, acoth) + from sympy.functions.elementary.trigonometric import (asin, atan) + from sympy.functions.special.bessel import besselj + from sympy.functions.special.error_functions import (Chi, E1, Shi, Si) + from sympy.integrals.transforms import (fourier_transform, laplace_transform) + assert (laplace_transform(Si(x), x, s, simplify=True) == + ((-atan(s) + pi/2)/s, 0, True)) + + assert laplace_transform(Shi(x), x, s, simplify=True) == ( + acoth(s)/s, -oo, s**2 > 1) + + # where should the logs be simplified? + assert laplace_transform(Chi(x), x, s, simplify=True) == ( + (log(s**(-2)) - log(1 - 1/s**2))/(2*s), -oo, s**2 > 1) + + # TODO maybe simplify the inequalities? when the simplification + # allows for generators instead of symbols this will work + assert laplace_transform(besselj(a, x), x, s)[1:] == \ + (0, (re(a) > -2) & (re(a) > -1)) + + # NOTE s < 0 can be done, but argument reduction is not good enough yet + ans = fourier_transform(besselj(1, x)/x, x, s, noconds=False) + assert (ans[0].factor(deep=True).expand(), ans[1]) == \ + (Piecewise((0, (s > 1/(2*pi)) | (s < -1/(2*pi))), + (2*sqrt(-4*pi**2*s**2 + 1), True)), s > 0) + # TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons) + # - folding could be better + + assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \ + log(1 + sqrt(2)) + assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \ + log(S.Half + sqrt(2)/2) + + assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \ + Piecewise((-acosh(1/x), abs(x**(-2)) > 1), (I*asin(1/x), True)) + + +def test_issue_6122(): + assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \ + -I*sqrt(pi)*exp(I*pi/4) + + +def test_issue_6252(): + expr = 1/x/(a + b*x)**Rational(1, 3) + anti = integrate(expr, x, meijerg=True) + assert not anti.has(hyper) + # XXX the expression is a mess, but actually upon differentiation and + # putting in numerical values seems to work... + + +def test_issue_6348(): + assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \ + == pi*exp(-1) + + +def test_fresnel(): + from sympy.functions.special.error_functions import (fresnelc, fresnels) + + assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x) + assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x) + + +def test_issue_6860(): + assert meijerint_indefinite(x**x**x, x) is None + + +def test_issue_7337(): + f = meijerint_indefinite(x*sqrt(2*x + 3), x).together() + assert f == sqrt(2*x + 3)*(2*x**2 + x - 3)/5 + assert f._eval_interval(x, S.NegativeOne, S.One) == Rational(2, 5) + + +def test_issue_8368(): + assert meijerint_indefinite(cosh(x)*exp(-x*t), x) == ( + (-t - 1)*exp(x) + (-t + 1)*exp(-x))*exp(-t*x)/2/(t**2 - 1) + + +def test_issue_10211(): + from sympy.abc import h, w + assert integrate((1/sqrt((y-x)**2 + h**2)**3), (x,0,w), (y,0,w)) == \ + 2*sqrt(1 + w**2/h**2)/h - 2/h + + +def test_issue_11806(): + from sympy.core.symbol import symbols + y, L = symbols('y L', positive=True) + assert integrate(1/sqrt(x**2 + y**2)**3, (x, -L, L)) == \ + 2*L/(y**2*sqrt(L**2 + y**2)) + +def test_issue_10681(): + from sympy.polys.domains.realfield import RR + from sympy.abc import R, r + f = integrate(r**2*(R**2-r**2)**0.5, r, meijerg=True) + g = (1.0/3)*R**1.0*r**3*hyper((-0.5, Rational(3, 2)), (Rational(5, 2),), + r**2*exp_polar(2*I*pi)/R**2) + assert RR.almosteq((f/g).n(), 1.0, 1e-12) + +def test_issue_13536(): + from sympy.core.symbol import Symbol + a = Symbol('a', positive=True) + assert integrate(1/x**2, (x, oo, a)) == -1/a + + +def test_issue_6462(): + from sympy.core.symbol import Symbol + x = Symbol('x') + n = Symbol('n') + # Not the actual issue, still wrong answer for n = 1, but that there is no + # exception + assert integrate(cos(x**n)/x**n, x, meijerg=True).subs(n, 2).equals( + integrate(cos(x**2)/x**2, x, meijerg=True)) + + +def test_indefinite_1_bug(): + assert integrate((b + t)**(-a), t, meijerg=True + ) == -b**(1 - a)*(1 + t/b)**(1 - a)/(a - 1) + + +def test_pr_23583(): + # This result is wrong. Check whether new result is correct when this test fail. + assert integrate(1/sqrt((x - I)**2-1), meijerg=True) == \ + Piecewise((acosh(x - I), Abs((x - I)**2) > 1), (-I*asin(x - I), True)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_prde.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_prde.py new file mode 100644 index 0000000000000000000000000000000000000000..a7429ea8634c742eb77cdb26f99b2cb15853cd42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_prde.py @@ -0,0 +1,322 @@ +"""Most of these tests come from the examples in Bronstein's book.""" +from sympy.integrals.risch import DifferentialExtension, derivation +from sympy.integrals.prde import (prde_normal_denom, prde_special_denom, + prde_linear_constraints, constant_system, prde_spde, prde_no_cancel_b_large, + prde_no_cancel_b_small, limited_integrate_reduce, limited_integrate, + is_deriv_k, is_log_deriv_k_t_radical, parametric_log_deriv_heu, + is_log_deriv_k_t_radical_in_field, param_poly_rischDE, param_rischDE, + prde_cancel_liouvillian) + +from sympy.polys.polymatrix import PolyMatrix as Matrix + +from sympy.core.numbers import Rational +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.polys.domains.rationalfield import QQ +from sympy.polys.polytools import Poly +from sympy.abc import x, t, n + +t0, t1, t2, t3, k = symbols('t:4 k') + + +def test_prde_normal_denom(): + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]}) + fa = Poly(1, t) + fd = Poly(x, t) + G = [(Poly(t, t), Poly(1 + t**2, t)), (Poly(1, t), Poly(x + x*t**2, t))] + assert prde_normal_denom(fa, fd, G, DE) == \ + (Poly(x, t, domain='ZZ(x)'), (Poly(1, t, domain='ZZ(x)'), Poly(1, t, + domain='ZZ(x)')), [(Poly(x*t, t, domain='ZZ(x)'), + Poly(t**2 + 1, t, domain='ZZ(x)')), (Poly(1, t, domain='ZZ(x)'), + Poly(t**2 + 1, t, domain='ZZ(x)'))], Poly(1, t, domain='ZZ(x)')) + G = [(Poly(t, t), Poly(t**2 + 2*t + 1, t)), (Poly(x*t, t), + Poly(t**2 + 2*t + 1, t)), (Poly(x*t**2, t), Poly(t**2 + 2*t + 1, t))] + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert prde_normal_denom(Poly(x, t), Poly(1, t), G, DE) == \ + (Poly(t + 1, t), (Poly((-1 + x)*t + x, t), Poly(1, t, domain='ZZ[x]')), [(Poly(t, t), + Poly(1, t)), (Poly(x*t, t), Poly(1, t, domain='ZZ[x]')), (Poly(x*t**2, t), + Poly(1, t, domain='ZZ[x]'))], Poly(t + 1, t)) + + +def test_prde_special_denom(): + a = Poly(t + 1, t) + ba = Poly(t**2, t) + bd = Poly(1, t) + G = [(Poly(t, t), Poly(1, t)), (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))] + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert prde_special_denom(a, ba, bd, G, DE) == \ + (Poly(t + 1, t), Poly(t**2, t), [(Poly(t, t), Poly(1, t)), + (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))], Poly(1, t)) + G = [(Poly(t, t), Poly(1, t)), (Poly(1, t), Poly(t, t))] + assert prde_special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), G, DE) == \ + (Poly(1, t), Poly(t**2 - 1, t), [(Poly(t**2, t), Poly(1, t)), + (Poly(1, t), Poly(1, t))], Poly(t, t)) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-2*x*t0, t0)]}) + DE.decrement_level() + G = [(Poly(t, t), Poly(t**2, t)), (Poly(2*t, t), Poly(t, t))] + assert prde_special_denom(Poly(5*x*t + 1, t), Poly(t**2 + 2*x**3*t, t), Poly(t**3 + 2, t), G, DE) == \ + (Poly(5*x*t + 1, t), Poly(0, t, domain='ZZ[x]'), [(Poly(t, t), Poly(t**2, t)), + (Poly(2*t, t), Poly(t, t))], Poly(1, x)) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly((t**2 + 1)*2*x, t)]}) + G = [(Poly(t + x, t), Poly(t*x, t)), (Poly(2*t, t), Poly(x**2, x))] + assert prde_special_denom(Poly(5*x*t + 1, t), Poly(t**2 + 2*x**3*t, t), Poly(t**3, t), G, DE) == \ + (Poly(5*x*t + 1, t), Poly(0, t, domain='ZZ[x]'), [(Poly(t + x, t), Poly(x*t, t)), + (Poly(2*t, t, x), Poly(x**2, t, x))], Poly(1, t)) + assert prde_special_denom(Poly(t + 1, t), Poly(t**2, t), Poly(t**3, t), G, DE) == \ + (Poly(t + 1, t), Poly(0, t, domain='ZZ[x]'), [(Poly(t + x, t), Poly(x*t, t)), (Poly(2*t, t, x), + Poly(x**2, t, x))], Poly(1, t)) + + +def test_prde_linear_constraints(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + G = [(Poly(2*x**3 + 3*x + 1, x), Poly(x**2 - 1, x)), (Poly(1, x), Poly(x - 1, x)), + (Poly(1, x), Poly(x + 1, x))] + assert prde_linear_constraints(Poly(1, x), Poly(0, x), G, DE) == \ + ((Poly(2*x, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(0, x, domain='QQ')), + Matrix([[1, 1, -1], [5, 1, 1]], x)) + G = [(Poly(t, t), Poly(1, t)), (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))] + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert prde_linear_constraints(Poly(t + 1, t), Poly(t**2, t), G, DE) == \ + ((Poly(t, t, domain='QQ'), Poly(t**2, t, domain='QQ'), Poly(t**3, t, domain='QQ')), + Matrix(0, 3, [], t)) + G = [(Poly(2*x, t), Poly(t, t)), (Poly(-x, t), Poly(t, t))] + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + assert prde_linear_constraints(Poly(1, t), Poly(0, t), G, DE) == \ + ((Poly(0, t, domain='QQ[x]'), Poly(0, t, domain='QQ[x]')), Matrix([[2*x, -x]], t)) + + +def test_constant_system(): + A = Matrix([[-(x + 3)/(x - 1), (x + 1)/(x - 1), 1], + [-x - 3, x + 1, x - 1], + [2*(x + 3)/(x - 1), 0, 0]], t) + u = Matrix([[(x + 1)/(x - 1)], [x + 1], [0]], t) + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + R = QQ.frac_field(x)[t] + assert constant_system(A, u, DE) == \ + (Matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 1]], ring=R), Matrix([0, 1, 0, 0], ring=R)) + + +def test_prde_spde(): + D = [Poly(x, t), Poly(-x*t, t)] + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + # TODO: when bound_degree() can handle this, test degree bound from that too + assert prde_spde(Poly(t, t), Poly(-1/x, t), D, n, DE) == \ + (Poly(t, t), Poly(0, t, domain='ZZ(x)'), + [Poly(2*x, t, domain='ZZ(x)'), Poly(-x, t, domain='ZZ(x)')], + [Poly(-x**2, t, domain='ZZ(x)'), Poly(0, t, domain='ZZ(x)')], n - 1) + + +def test_prde_no_cancel(): + # b large + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + assert prde_no_cancel_b_large(Poly(1, x), [Poly(x**2, x), Poly(1, x)], 2, DE) == \ + ([Poly(x**2 - 2*x + 2, x), Poly(1, x)], Matrix([[1, 0, -1, 0], + [0, 1, 0, -1]], x)) + assert prde_no_cancel_b_large(Poly(1, x), [Poly(x**3, x), Poly(1, x)], 3, DE) == \ + ([Poly(x**3 - 3*x**2 + 6*x - 6, x), Poly(1, x)], Matrix([[1, 0, -1, 0], + [0, 1, 0, -1]], x)) + assert prde_no_cancel_b_large(Poly(x, x), [Poly(x**2, x), Poly(1, x)], 1, DE) == \ + ([Poly(x, x, domain='ZZ'), Poly(0, x, domain='ZZ')], Matrix([[1, -1, 0, 0], + [1, 0, -1, 0], + [0, 1, 0, -1]], x)) + # b small + # XXX: Is there a better example of a monomial with D.degree() > 2? + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**3 + 1, t)]}) + + # My original q was t**4 + t + 1, but this solution implies q == t**4 + # (c1 = 4), with some of the ci for the original q equal to 0. + G = [Poly(t**6, t), Poly(x*t**5, t), Poly(t**3, t), Poly(x*t**2, t), Poly(1 + x, t)] + R = QQ.frac_field(x)[t] + assert prde_no_cancel_b_small(Poly(x*t, t), G, 4, DE) == \ + ([Poly(t**4/4 - x/12*t**3 + x**2/24*t**2 + (Rational(-11, 12) - x**3/24)*t + x/24, t), + Poly(x/3*t**3 - x**2/6*t**2 + (Rational(-1, 3) + x**3/6)*t - x/6, t), Poly(t, t), + Poly(0, t), Poly(0, t)], Matrix([[1, 0, -1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, Rational(-1, 4), 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, -1, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, -1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, -1, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, -1, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, -1]], ring=R)) + + # TODO: Add test for deg(b) <= 0 with b small + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]}) + b = Poly(-1/x**2, t, field=True) # deg(b) == 0 + q = [Poly(x**i*t**j, t, field=True) for i in range(2) for j in range(3)] + h, A = prde_no_cancel_b_small(b, q, 3, DE) + V = A.nullspace() + R = QQ.frac_field(x)[t] + assert len(V) == 1 + assert V[0] == Matrix([Rational(-1, 2), 0, 0, 1, 0, 0]*3, ring=R) + assert (Matrix([h])*V[0][6:, :])[0] == Poly(x**2/2, t, domain='QQ(x)') + assert (Matrix([q])*V[0][:6, :])[0] == Poly(x - S.Half, t, domain='QQ(x)') + + +def test_prde_cancel_liouvillian(): + ### 1. case == 'primitive' + # used when integrating f = log(x) - log(x - 1) + # Not taken from 'the' book + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + p0 = Poly(0, t, field=True) + p1 = Poly((x - 1)*t, t, domain='ZZ(x)') + p2 = Poly(x - 1, t, domain='ZZ(x)') + p3 = Poly(-x**2 + x, t, domain='ZZ(x)') + h, A = prde_cancel_liouvillian(Poly(-1/(x - 1), t), [Poly(-x + 1, t), Poly(1, t)], 1, DE) + V = A.nullspace() + assert h == [p0, p0, p1, p0, p0, p0, p0, p0, p0, p0, p2, p3, p0, p0, p0, p0] + assert A.rank() == 16 + assert (Matrix([h])*V[0][:16, :]) == Matrix([[Poly(0, t, domain='QQ(x)')]]) + + ### 2. case == 'exp' + # used when integrating log(x/exp(x) + 1) + # Not taken from book + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t, t)]}) + assert prde_cancel_liouvillian(Poly(0, t, domain='QQ[x]'), [Poly(1, t, domain='QQ(x)')], 0, DE) == \ + ([Poly(1, t, domain='QQ'), Poly(x, t, domain='ZZ(x)')], Matrix([[-1, 0, 1]], DE.t)) + + +def test_param_poly_rischDE(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + a = Poly(x**2 - x, x, field=True) + b = Poly(1, x, field=True) + q = [Poly(x, x, field=True), Poly(x**2, x, field=True)] + h, A = param_poly_rischDE(a, b, q, 3, DE) + + assert A.nullspace() == [Matrix([0, 1, 1, 1], DE.t)] # c1, c2, d1, d2 + # Solution of a*Dp + b*p = c1*q1 + c2*q2 = q2 = x**2 + # is d1*h1 + d2*h2 = h1 + h2 = x. + assert h[0] + h[1] == Poly(x, x, domain='QQ') + # a*Dp + b*p = q1 = x has no solution. + + a = Poly(x**2 - x, x, field=True) + b = Poly(x**2 - 5*x + 3, x, field=True) + q = [Poly(1, x, field=True), Poly(x, x, field=True), + Poly(x**2, x, field=True)] + h, A = param_poly_rischDE(a, b, q, 3, DE) + + assert A.nullspace() == [Matrix([3, -5, 1, -5, 1, 1], DE.t)] + p = -Poly(5, DE.t)*h[0] + h[1] + h[2] # Poly(1, x) + assert a*derivation(p, DE) + b*p == Poly(x**2 - 5*x + 3, x, domain='QQ') + + +def test_param_rischDE(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + p1, px = Poly(1, x, field=True), Poly(x, x, field=True) + G = [(p1, px), (p1, p1), (px, p1)] # [1/x, 1, x] + h, A = param_rischDE(-p1, Poly(x**2, x, field=True), G, DE) + assert len(h) == 3 + p = [hi[0].as_expr()/hi[1].as_expr() for hi in h] + V = A.nullspace() + assert len(V) == 2 + assert V[0] == Matrix([-1, 1, 0, -1, 1, 0], DE.t) + y = -p[0] + p[1] + 0*p[2] # x + assert y.diff(x) - y/x**2 == 1 - 1/x # Dy + f*y == -G0 + G1 + 0*G2 + + # the below test computation takes place while computing the integral + # of 'f = log(log(x + exp(x)))' + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + G = [(Poly(t + x, t, domain='ZZ(x)'), Poly(1, t, domain='QQ')), (Poly(0, t, domain='QQ'), Poly(1, t, domain='QQ'))] + h, A = param_rischDE(Poly(-t - 1, t, field=True), Poly(t + x, t, field=True), G, DE) + assert len(h) == 5 + p = [hi[0].as_expr()/hi[1].as_expr() for hi in h] + V = A.nullspace() + assert len(V) == 3 + assert V[0] == Matrix([0, 0, 0, 0, 1, 0, 0], DE.t) + y = 0*p[0] + 0*p[1] + 1*p[2] + 0*p[3] + 0*p[4] + assert y.diff(t) - y/(t + x) == 0 # Dy + f*y = 0*G0 + 0*G1 + + +def test_limited_integrate_reduce(): + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + assert limited_integrate_reduce(Poly(x, t), Poly(t**2, t), [(Poly(x, t), + Poly(t, t))], DE) == \ + (Poly(t, t), Poly(-1/x, t), Poly(t, t), 1, (Poly(x, t), Poly(1, t, domain='ZZ[x]')), + [(Poly(-x*t, t), Poly(1, t, domain='ZZ[x]'))]) + + +def test_limited_integrate(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + G = [(Poly(x, x), Poly(x + 1, x))] + assert limited_integrate(Poly(-(1 + x + 5*x**2 - 3*x**3), x), + Poly(1 - x - x**2 + x**3, x), G, DE) == \ + ((Poly(x**2 - x + 2, x), Poly(x - 1, x, domain='QQ')), [2]) + G = [(Poly(1, x), Poly(x, x))] + assert limited_integrate(Poly(5*x**2, x), Poly(3, x), G, DE) == \ + ((Poly(5*x**3/9, x), Poly(1, x, domain='QQ')), [0]) + + +def test_is_log_deriv_k_t_radical(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)], 'exts': [None], + 'extargs': [None]}) + assert is_log_deriv_k_t_radical(Poly(2*x, x), Poly(1, x), DE) is None + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2*t1, t1), Poly(1/x, t2)], + 'exts': [None, 'exp', 'log'], 'extargs': [None, 2*x, x]}) + assert is_log_deriv_k_t_radical(Poly(x + t2/2, t2), Poly(1, t2), DE) == \ + ([(t1, 1), (x, 1)], t1*x, 2, 0) + # TODO: Add more tests + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0, t0), Poly(1/x, t)], + 'exts': [None, 'exp', 'log'], 'extargs': [None, x, x]}) + assert is_log_deriv_k_t_radical(Poly(x + t/2 + 3, t), Poly(1, t), DE) == \ + ([(t0, 2), (x, 1)], x*t0**2, 2, 3) + + +def test_is_deriv_k(): + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(1/(x + 1), t2)], + 'exts': [None, 'log', 'log'], 'extargs': [None, x, x + 1]}) + assert is_deriv_k(Poly(2*x**2 + 2*x, t2), Poly(1, t2), DE) == \ + ([(t1, 1), (t2, 1)], t1 + t2, 2) + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(t2, t2)], + 'exts': [None, 'log', 'exp'], 'extargs': [None, x, x]}) + assert is_deriv_k(Poly(x**2*t2**3, t2), Poly(1, t2), DE) == \ + ([(x, 3), (t1, 2)], 2*t1 + 3*x, 1) + # TODO: Add more tests, including ones with exponentials + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2/x, t1)], + 'exts': [None, 'log'], 'extargs': [None, x**2]}) + assert is_deriv_k(Poly(x, t1), Poly(1, t1), DE) == \ + ([(t1, S.Half)], t1/2, 1) + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2/(1 + x), t0)], + 'exts': [None, 'log'], 'extargs': [None, x**2 + 2*x + 1]}) + assert is_deriv_k(Poly(1 + x, t0), Poly(1, t0), DE) == \ + ([(t0, S.Half)], t0/2, 1) + + # Issue 10798 + # DE = DifferentialExtension(log(1/x), x) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-1/x, t)], + 'exts': [None, 'log'], 'extargs': [None, 1/x]}) + assert is_deriv_k(Poly(1, t), Poly(x, t), DE) == ([(t, 1)], t, 1) + + +def test_is_log_deriv_k_t_radical_in_field(): + # NOTE: any potential constant factor in the second element of the result + # doesn't matter, because it cancels in Da/a. + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + assert is_log_deriv_k_t_radical_in_field(Poly(5*t + 1, t), Poly(2*t*x, t), DE) == \ + (2, t*x**5) + assert is_log_deriv_k_t_radical_in_field(Poly(2 + 3*t, t), Poly(5*x*t, t), DE) == \ + (5, x**3*t**2) + + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t/x**2, t)]}) + assert is_log_deriv_k_t_radical_in_field(Poly(-(1 + 2*t), t), + Poly(2*x**2 + 2*x**2*t, t), DE) == \ + (2, t + t**2) + assert is_log_deriv_k_t_radical_in_field(Poly(-1, t), Poly(x**2, t), DE) == \ + (1, t) + assert is_log_deriv_k_t_radical_in_field(Poly(1, t), Poly(2*x**2, t), DE) == \ + (2, 1/t) + + +def test_parametric_log_deriv(): + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + assert parametric_log_deriv_heu(Poly(5*t**2 + t - 6, t), Poly(2*x*t**2, t), + Poly(-1, t), Poly(x*t**2, t), DE) == \ + (2, 6, t*x**5) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_quadrature.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..97471dbdbc13fda0bce7a8823ff2cefac4ab8802 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_quadrature.py @@ -0,0 +1,601 @@ +from sympy.core import S, Rational +from sympy.integrals.quadrature import (gauss_legendre, gauss_laguerre, + gauss_hermite, gauss_gen_laguerre, + gauss_chebyshev_t, gauss_chebyshev_u, + gauss_jacobi, gauss_lobatto) + + +def test_legendre(): + x, w = gauss_legendre(1, 17) + assert [str(r) for r in x] == ['0'] + assert [str(r) for r in w] == ['2.0000000000000000'] + + x, w = gauss_legendre(2, 17) + assert [str(r) for r in x] == [ + '-0.57735026918962576', + '0.57735026918962576'] + assert [str(r) for r in w] == [ + '1.0000000000000000', + '1.0000000000000000'] + + x, w = gauss_legendre(3, 17) + assert [str(r) for r in x] == [ + '-0.77459666924148338', + '0', + '0.77459666924148338'] + assert [str(r) for r in w] == [ + '0.55555555555555556', + '0.88888888888888889', + '0.55555555555555556'] + + x, w = gauss_legendre(4, 17) + assert [str(r) for r in x] == [ + '-0.86113631159405258', + '-0.33998104358485626', + '0.33998104358485626', + '0.86113631159405258'] + assert [str(r) for r in w] == [ + '0.34785484513745386', + '0.65214515486254614', + '0.65214515486254614', + '0.34785484513745386'] + + +def test_legendre_precise(): + x, w = gauss_legendre(3, 40) + assert [str(r) for r in x] == [ + '-0.7745966692414833770358530799564799221666', + '0', + '0.7745966692414833770358530799564799221666'] + assert [str(r) for r in w] == [ + '0.5555555555555555555555555555555555555556', + '0.8888888888888888888888888888888888888889', + '0.5555555555555555555555555555555555555556'] + + +def test_laguerre(): + x, w = gauss_laguerre(1, 17) + assert [str(r) for r in x] == ['1.0000000000000000'] + assert [str(r) for r in w] == ['1.0000000000000000'] + + x, w = gauss_laguerre(2, 17) + assert [str(r) for r in x] == [ + '0.58578643762690495', + '3.4142135623730950'] + assert [str(r) for r in w] == [ + '0.85355339059327376', + '0.14644660940672624'] + + x, w = gauss_laguerre(3, 17) + assert [str(r) for r in x] == [ + '0.41577455678347908', + '2.2942803602790417', + '6.2899450829374792', + ] + assert [str(r) for r in w] == [ + '0.71109300992917302', + '0.27851773356924085', + '0.010389256501586136', + ] + + x, w = gauss_laguerre(4, 17) + assert [str(r) for r in x] == [ + '0.32254768961939231', + '1.7457611011583466', + '4.5366202969211280', + '9.3950709123011331'] + assert [str(r) for r in w] == [ + '0.60315410434163360', + '0.35741869243779969', + '0.038887908515005384', + '0.00053929470556132745'] + + x, w = gauss_laguerre(5, 17) + assert [str(r) for r in x] == [ + '0.26356031971814091', + '1.4134030591065168', + '3.5964257710407221', + '7.0858100058588376', + '12.640800844275783'] + assert [str(r) for r in w] == [ + '0.52175561058280865', + '0.39866681108317593', + '0.075942449681707595', + '0.0036117586799220485', + '2.3369972385776228e-5'] + + +def test_laguerre_precise(): + x, w = gauss_laguerre(3, 40) + assert [str(r) for r in x] == [ + '0.4157745567834790833115338731282744735466', + '2.294280360279041719822050361359593868960', + '6.289945082937479196866415765512131657493'] + assert [str(r) for r in w] == [ + '0.7110930099291730154495901911425944313094', + '0.2785177335692408488014448884567264810349', + '0.01038925650158613574896492040067908765572'] + + +def test_hermite(): + x, w = gauss_hermite(1, 17) + assert [str(r) for r in x] == ['0'] + assert [str(r) for r in w] == ['1.7724538509055160'] + + x, w = gauss_hermite(2, 17) + assert [str(r) for r in x] == [ + '-0.70710678118654752', + '0.70710678118654752'] + assert [str(r) for r in w] == [ + '0.88622692545275801', + '0.88622692545275801'] + + x, w = gauss_hermite(3, 17) + assert [str(r) for r in x] == [ + '-1.2247448713915890', + '0', + '1.2247448713915890'] + assert [str(r) for r in w] == [ + '0.29540897515091934', + '1.1816359006036774', + '0.29540897515091934'] + + x, w = gauss_hermite(4, 17) + assert [str(r) for r in x] == [ + '-1.6506801238857846', + '-0.52464762327529032', + '0.52464762327529032', + '1.6506801238857846'] + assert [str(r) for r in w] == [ + '0.081312835447245177', + '0.80491409000551284', + '0.80491409000551284', + '0.081312835447245177'] + + x, w = gauss_hermite(5, 17) + assert [str(r) for r in x] == [ + '-2.0201828704560856', + '-0.95857246461381851', + '0', + '0.95857246461381851', + '2.0201828704560856'] + assert [str(r) for r in w] == [ + '0.019953242059045913', + '0.39361932315224116', + '0.94530872048294188', + '0.39361932315224116', + '0.019953242059045913'] + + +def test_hermite_precise(): + x, w = gauss_hermite(3, 40) + assert [str(r) for r in x] == [ + '-1.224744871391589049098642037352945695983', + '0', + '1.224744871391589049098642037352945695983'] + assert [str(r) for r in w] == [ + '0.2954089751509193378830279138901908637996', + '1.181635900603677351532111655560763455198', + '0.2954089751509193378830279138901908637996'] + + +def test_gen_laguerre(): + x, w = gauss_gen_laguerre(1, Rational(-1, 2), 17) + assert [str(r) for r in x] == ['0.50000000000000000'] + assert [str(r) for r in w] == ['1.7724538509055160'] + + x, w = gauss_gen_laguerre(2, Rational(-1, 2), 17) + assert [str(r) for r in x] == [ + '0.27525512860841095', + '2.7247448713915890'] + assert [str(r) for r in w] == [ + '1.6098281800110257', + '0.16262567089449035'] + + x, w = gauss_gen_laguerre(3, Rational(-1, 2), 17) + assert [str(r) for r in x] == [ + '0.19016350919348813', + '1.7844927485432516', + '5.5253437422632603'] + assert [str(r) for r in w] == [ + '1.4492591904487850', + '0.31413464064571329', + '0.0090600198110176913'] + + x, w = gauss_gen_laguerre(4, Rational(-1, 2), 17) + assert [str(r) for r in x] == [ + '0.14530352150331709', + '1.3390972881263614', + '3.9269635013582872', + '8.5886356890120343'] + assert [str(r) for r in w] == [ + '1.3222940251164826', + '0.41560465162978376', + '0.034155966014826951', + '0.00039920814442273524'] + + x, w = gauss_gen_laguerre(5, Rational(-1, 2), 17) + assert [str(r) for r in x] == [ + '0.11758132021177814', + '1.0745620124369040', + '3.0859374437175500', + '6.4147297336620305', + '11.807189489971737'] + assert [str(r) for r in w] == [ + '1.2217252674706516', + '0.48027722216462937', + '0.067748788910962126', + '0.0026872914935624654', + '1.5280865710465241e-5'] + + x, w = gauss_gen_laguerre(1, 2, 17) + assert [str(r) for r in x] == ['3.0000000000000000'] + assert [str(r) for r in w] == ['2.0000000000000000'] + + x, w = gauss_gen_laguerre(2, 2, 17) + assert [str(r) for r in x] == [ + '2.0000000000000000', + '6.0000000000000000'] + assert [str(r) for r in w] == [ + '1.5000000000000000', + '0.50000000000000000'] + + x, w = gauss_gen_laguerre(3, 2, 17) + assert [str(r) for r in x] == [ + '1.5173870806774125', + '4.3115831337195203', + '9.1710297856030672'] + assert [str(r) for r in w] == [ + '1.0374949614904253', + '0.90575000470306537', + '0.056755033806509347'] + + x, w = gauss_gen_laguerre(4, 2, 17) + assert [str(r) for r in x] == [ + '1.2267632635003021', + '3.4125073586969460', + '6.9026926058516134', + '12.458036771951139'] + assert [str(r) for r in w] == [ + '0.72552499769865438', + '1.0634242919791946', + '0.20669613102835355', + '0.0043545792937974889'] + + x, w = gauss_gen_laguerre(5, 2, 17) + assert [str(r) for r in x] == [ + '1.0311091440933816', + '2.8372128239538217', + '5.6202942725987079', + '9.6829098376640271', + '15.828473921690062'] + assert [str(r) for r in w] == [ + '0.52091739683509184', + '1.0667059331592211', + '0.38354972366693113', + '0.028564233532974658', + '0.00026271280578124935'] + + +def test_gen_laguerre_precise(): + x, w = gauss_gen_laguerre(3, Rational(-1, 2), 40) + assert [str(r) for r in x] == [ + '0.1901635091934881328718554276203028970878', + '1.784492748543251591186722461957367638500', + '5.525343742263260275941422110422329464413'] + assert [str(r) for r in w] == [ + '1.449259190448785048183829411195134343108', + '0.3141346406457132878326231270167565378246', + '0.009060019811017691281714945129254301865020'] + + x, w = gauss_gen_laguerre(3, 2, 40) + assert [str(r) for r in x] == [ + '1.517387080677412495020323111016672547482', + '4.311583133719520302881184669723530562299', + '9.171029785603067202098492219259796890218'] + assert [str(r) for r in w] == [ + '1.037494961490425285817554606541269153041', + '0.9057500047030653669269785048806009945254', + '0.05675503380650934725546688857812985243312'] + + +def test_chebyshev_t(): + x, w = gauss_chebyshev_t(1, 17) + assert [str(r) for r in x] == ['0'] + assert [str(r) for r in w] == ['3.1415926535897932'] + + x, w = gauss_chebyshev_t(2, 17) + assert [str(r) for r in x] == [ + '0.70710678118654752', + '-0.70710678118654752'] + assert [str(r) for r in w] == [ + '1.5707963267948966', + '1.5707963267948966'] + + x, w = gauss_chebyshev_t(3, 17) + assert [str(r) for r in x] == [ + '0.86602540378443865', + '0', + '-0.86602540378443865'] + assert [str(r) for r in w] == [ + '1.0471975511965977', + '1.0471975511965977', + '1.0471975511965977'] + + x, w = gauss_chebyshev_t(4, 17) + assert [str(r) for r in x] == [ + '0.92387953251128676', + '0.38268343236508977', + '-0.38268343236508977', + '-0.92387953251128676'] + assert [str(r) for r in w] == [ + '0.78539816339744831', + '0.78539816339744831', + '0.78539816339744831', + '0.78539816339744831'] + + x, w = gauss_chebyshev_t(5, 17) + assert [str(r) for r in x] == [ + '0.95105651629515357', + '0.58778525229247313', + '0', + '-0.58778525229247313', + '-0.95105651629515357'] + assert [str(r) for r in w] == [ + '0.62831853071795865', + '0.62831853071795865', + '0.62831853071795865', + '0.62831853071795865', + '0.62831853071795865'] + + +def test_chebyshev_t_precise(): + x, w = gauss_chebyshev_t(3, 40) + assert [str(r) for r in x] == [ + '0.8660254037844386467637231707529361834714', + '0', + '-0.8660254037844386467637231707529361834714'] + assert [str(r) for r in w] == [ + '1.047197551196597746154214461093167628066', + '1.047197551196597746154214461093167628066', + '1.047197551196597746154214461093167628066'] + + +def test_chebyshev_u(): + x, w = gauss_chebyshev_u(1, 17) + assert [str(r) for r in x] == ['0'] + assert [str(r) for r in w] == ['1.5707963267948966'] + + x, w = gauss_chebyshev_u(2, 17) + assert [str(r) for r in x] == [ + '0.50000000000000000', + '-0.50000000000000000'] + assert [str(r) for r in w] == [ + '0.78539816339744831', + '0.78539816339744831'] + + x, w = gauss_chebyshev_u(3, 17) + assert [str(r) for r in x] == [ + '0.70710678118654752', + '0', + '-0.70710678118654752'] + assert [str(r) for r in w] == [ + '0.39269908169872415', + '0.78539816339744831', + '0.39269908169872415'] + + x, w = gauss_chebyshev_u(4, 17) + assert [str(r) for r in x] == [ + '0.80901699437494742', + '0.30901699437494742', + '-0.30901699437494742', + '-0.80901699437494742'] + assert [str(r) for r in w] == [ + '0.21707871342270599', + '0.56831944997474231', + '0.56831944997474231', + '0.21707871342270599'] + + x, w = gauss_chebyshev_u(5, 17) + assert [str(r) for r in x] == [ + '0.86602540378443865', + '0.50000000000000000', + '0', + '-0.50000000000000000', + '-0.86602540378443865'] + assert [str(r) for r in w] == [ + '0.13089969389957472', + '0.39269908169872415', + '0.52359877559829887', + '0.39269908169872415', + '0.13089969389957472'] + + +def test_chebyshev_u_precise(): + x, w = gauss_chebyshev_u(3, 40) + assert [str(r) for r in x] == [ + '0.7071067811865475244008443621048490392848', + '0', + '-0.7071067811865475244008443621048490392848'] + assert [str(r) for r in w] == [ + '0.3926990816987241548078304229099378605246', + '0.7853981633974483096156608458198757210493', + '0.3926990816987241548078304229099378605246'] + + +def test_jacobi(): + x, w = gauss_jacobi(1, Rational(-1, 2), S.Half, 17) + assert [str(r) for r in x] == ['0.50000000000000000'] + assert [str(r) for r in w] == ['3.1415926535897932'] + + x, w = gauss_jacobi(2, Rational(-1, 2), S.Half, 17) + assert [str(r) for r in x] == [ + '-0.30901699437494742', + '0.80901699437494742'] + assert [str(r) for r in w] == [ + '0.86831485369082398', + '2.2732777998989693'] + + x, w = gauss_jacobi(3, Rational(-1, 2), S.Half, 17) + assert [str(r) for r in x] == [ + '-0.62348980185873353', + '0.22252093395631440', + '0.90096886790241913'] + assert [str(r) for r in w] == [ + '0.33795476356635433', + '1.0973322242791115', + '1.7063056657443274'] + + x, w = gauss_jacobi(4, Rational(-1, 2), S.Half, 17) + assert [str(r) for r in x] == [ + '-0.76604444311897804', + '-0.17364817766693035', + '0.50000000000000000', + '0.93969262078590838'] + assert [str(r) for r in w] == [ + '0.16333179083642836', + '0.57690240318269103', + '1.0471975511965977', + '1.3541609083740761'] + + x, w = gauss_jacobi(5, Rational(-1, 2), S.Half, 17) + assert [str(r) for r in x] == [ + '-0.84125353283118117', + '-0.41541501300188643', + '0.14231483827328514', + '0.65486073394528506', + '0.95949297361449739'] + assert [str(r) for r in w] == [ + '0.090675770007435372', + '0.33391416373675607', + '0.65248870981926643', + '0.94525424081394926', + '1.1192597692123861'] + + x, w = gauss_jacobi(1, 2, 3, 17) + assert [str(r) for r in x] == ['0.14285714285714286'] + assert [str(r) for r in w] == ['1.0666666666666667'] + + x, w = gauss_jacobi(2, 2, 3, 17) + assert [str(r) for r in x] == [ + '-0.24025307335204215', + '0.46247529557426437'] + assert [str(r) for r in w] == [ + '0.48514624517838660', + '0.58152042148828007'] + + x, w = gauss_jacobi(3, 2, 3, 17) + assert [str(r) for r in x] == [ + '-0.46115870378089762', + '0.10438533038323902', + '0.62950064612493132'] + assert [str(r) for r in w] == [ + '0.17937613502213266', + '0.61595640991147154', + '0.27133412173306246'] + + x, w = gauss_jacobi(4, 2, 3, 17) + assert [str(r) for r in x] == [ + '-0.59903470850824782', + '-0.14761105199952565', + '0.32554377081188859', + '0.72879429738819258'] + assert [str(r) for r in w] == [ + '0.067809641836772187', + '0.38956404952032481', + '0.47995970868024150', + '0.12933326662932816'] + + x, w = gauss_jacobi(5, 2, 3, 17) + assert [str(r) for r in x] == [ + '-0.69045775012676106', + '-0.32651993134900065', + '0.082337849552034905', + '0.47517887061283164', + '0.79279429464422850'] + assert [str(r) for r in w] == [ + '0.027410178066337099', + '0.21291786060364828', + '0.43908437944395081', + '0.32220656547221822', + '0.065047683080512268'] + + +def test_jacobi_precise(): + x, w = gauss_jacobi(3, Rational(-1, 2), S.Half, 40) + assert [str(r) for r in x] == [ + '-0.6234898018587335305250048840042398106323', + '0.2225209339563144042889025644967947594664', + '0.9009688679024191262361023195074450511659'] + assert [str(r) for r in w] == [ + '0.3379547635663543330553835737094171534907', + '1.097332224279111467485302294320899710461', + '1.706305665744327437921957515249186020246'] + + x, w = gauss_jacobi(3, 2, 3, 40) + assert [str(r) for r in x] == [ + '-0.4611587037808976179121958105554375981274', + '0.1043853303832390210914918407615869143233', + '0.6295006461249313240934312425211234110769'] + assert [str(r) for r in w] == [ + '0.1793761350221326596137764371503859752628', + '0.6159564099114715430909548532229749439714', + '0.2713341217330624639619353762933057474325'] + + +def test_lobatto(): + x, w = gauss_lobatto(2, 17) + assert [str(r) for r in x] == [ + '-1', + '1'] + assert [str(r) for r in w] == [ + '1.0000000000000000', + '1.0000000000000000'] + + x, w = gauss_lobatto(3, 17) + assert [str(r) for r in x] == [ + '-1', + '0', + '1'] + assert [str(r) for r in w] == [ + '0.33333333333333333', + '1.3333333333333333', + '0.33333333333333333'] + + x, w = gauss_lobatto(4, 17) + assert [str(r) for r in x] == [ + '-1', + '-0.44721359549995794', + '0.44721359549995794', + '1'] + assert [str(r) for r in w] == [ + '0.16666666666666667', + '0.83333333333333333', + '0.83333333333333333', + '0.16666666666666667'] + + x, w = gauss_lobatto(5, 17) + assert [str(r) for r in x] == [ + '-1', + '-0.65465367070797714', + '0', + '0.65465367070797714', + '1'] + assert [str(r) for r in w] == [ + '0.10000000000000000', + '0.54444444444444444', + '0.71111111111111111', + '0.54444444444444444', + '0.10000000000000000'] + + +def test_lobatto_precise(): + x, w = gauss_lobatto(3, 40) + assert [str(r) for r in x] == [ + '-1', + '0', + '1'] + assert [str(r) for r in w] == [ + '0.3333333333333333333333333333333333333333', + '1.333333333333333333333333333333333333333', + '0.3333333333333333333333333333333333333333'] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_rde.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_rde.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7df5ce05846dc270756cd878870bbff78ff976 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_rde.py @@ -0,0 +1,202 @@ +"""Most of these tests come from the examples in Bronstein's book.""" +from sympy.core.numbers import (I, Rational, oo) +from sympy.core.symbol import symbols +from sympy.polys.polytools import Poly +from sympy.integrals.risch import (DifferentialExtension, + NonElementaryIntegralException) +from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, + normal_denom, special_denom, bound_degree, spde, solve_poly_rde, + no_cancel_equal, cancel_primitive, cancel_exp, rischDE) + +from sympy.testing.pytest import raises +from sympy.abc import x, t, z, n + +t0, t1, t2, k = symbols('t:3 k') + + +def test_order_at(): + a = Poly(t**4, t) + b = Poly((t**2 + 1)**3*t, t) + c = Poly((t**2 + 1)**6*t, t) + d = Poly((t**2 + 1)**10*t**10, t) + e = Poly((t**2 + 1)**100*t**37, t) + p1 = Poly(t, t) + p2 = Poly(1 + t**2, t) + assert order_at(a, p1, t) == 4 + assert order_at(b, p1, t) == 1 + assert order_at(c, p1, t) == 1 + assert order_at(d, p1, t) == 10 + assert order_at(e, p1, t) == 37 + assert order_at(a, p2, t) == 0 + assert order_at(b, p2, t) == 3 + assert order_at(c, p2, t) == 6 + assert order_at(d, p1, t) == 10 + assert order_at(e, p2, t) == 100 + assert order_at(Poly(0, t), Poly(t, t), t) is oo + assert order_at_oo(Poly(t**2 - 1, t), Poly(t + 1), t) == \ + order_at_oo(Poly(t - 1, t), Poly(1, t), t) == -1 + assert order_at_oo(Poly(0, t), Poly(1, t), t) is oo + +def test_weak_normalizer(): + a = Poly((1 + x)*t**5 + 4*t**4 + (-1 - 3*x)*t**3 - 4*t**2 + (-2 + 2*x)*t, t) + d = Poly(t**4 - 3*t**2 + 2, t) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + r = weak_normalizer(a, d, DE, z) + assert r == (Poly(t**5 - t**4 - 4*t**3 + 4*t**2 + 4*t - 4, t, domain='ZZ[x]'), + (Poly((1 + x)*t**2 + x*t, t, domain='ZZ[x]'), + Poly(t + 1, t, domain='ZZ[x]'))) + assert weak_normalizer(r[1][0], r[1][1], DE) == (Poly(1, t), r[1]) + r = weak_normalizer(Poly(1 + t**2), Poly(t**2 - 1, t), DE, z) + assert r == (Poly(t**4 - 2*t**2 + 1, t), (Poly(-3*t**2 + 1, t), Poly(t**2 - 1, t))) + assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1]) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2)]}) + r = weak_normalizer(Poly(1 + t**2), Poly(t, t), DE, z) + assert r == (Poly(t, t), (Poly(0, t), Poly(1, t))) + assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1]) + + +def test_normal_denom(): + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + raises(NonElementaryIntegralException, lambda: normal_denom(Poly(1, x), Poly(1, x), + Poly(1, x), Poly(x, x), DE)) + fa, fd = Poly(t**2 + 1, t), Poly(1, t) + ga, gd = Poly(1, t), Poly(t**2, t) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]}) + assert normal_denom(fa, fd, ga, gd, DE) == \ + (Poly(t, t), (Poly(t**3 - t**2 + t - 1, t), Poly(1, t)), (Poly(1, t), + Poly(1, t)), Poly(t, t)) + + +def test_special_denom(): + # TODO: add more tests here + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), Poly(t**2 - 1, t), + Poly(t, t), DE) == \ + (Poly(1, t), Poly(t**2 - 1, t), Poly(t**2 - 1, t), Poly(t, t)) +# assert special_denom(Poly(1, t), Poly(2*x, t), Poly((1 + 2*x)*t, t), DE) == 1 + + # issue 3940 + # Note, this isn't a very good test, because the denominator is just 1, + # but at least it tests the exp cancellation case + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-2*x*t0, t0), + Poly(I*k*t1, t1)]}) + DE.decrement_level() + assert special_denom(Poly(1, t0), Poly(I*k, t0), Poly(1, t0), Poly(t0, t0), + Poly(1, t0), DE) == \ + (Poly(1, t0, domain='ZZ'), Poly(I*k, t0, domain='ZZ_I[k,x]'), + Poly(t0, t0, domain='ZZ'), Poly(1, t0, domain='ZZ')) + + + assert special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), Poly(t**2 - 1, t), + Poly(t, t), DE, case='tan') == \ + (Poly(1, t, t0, domain='ZZ'), Poly(t**2, t0, t, domain='ZZ[x]'), + Poly(t, t, t0, domain='ZZ'), Poly(1, t0, domain='ZZ')) + + raises(ValueError, lambda: special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), Poly(t**2 - 1, t), + Poly(t, t), DE, case='unrecognized_case')) + + +def test_bound_degree_fail(): + # Primitive + DE = DifferentialExtension(extension={'D': [Poly(1, x), + Poly(t0/x**2, t0), Poly(1/x, t)]}) + assert bound_degree(Poly(t**2, t), Poly(-(1/x**2*t**2 + 1/x), t), + Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/2*x*t**2 + x*t, + t), DE) == 3 + + +def test_bound_degree(): + # Base + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + assert bound_degree(Poly(1, x), Poly(-2*x, x), Poly(1, x), DE) == 0 + + # Primitive (see above test_bound_degree_fail) + # TODO: Add test for when the degree bound becomes larger after limited_integrate + # TODO: Add test for db == da - 1 case + + # Exp + # TODO: Add tests + # TODO: Add test for when the degree becomes larger after parametric_log_deriv() + + # Nonlinear + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]}) + assert bound_degree(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), DE) == 0 + + +def test_spde(): + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]}) + raises(NonElementaryIntegralException, lambda: spde(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE)) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert spde(Poly(t**2 + x*t*2 + x**2, t), Poly(t**2/x**2 + (2/x - 1)*t, t), + Poly(t**2/x**2 + (2/x - 1)*t, t), 0, DE) == \ + (Poly(0, t), Poly(0, t), 0, Poly(0, t), Poly(1, t, domain='ZZ(x)')) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0/x**2, t0), Poly(1/x, t)]}) + assert spde(Poly(t**2, t), Poly(-t**2/x**2 - 1/x, t), + Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/(2*x)*t**2 + x*t, t), 3, DE) == \ + (Poly(0, t), Poly(0, t), 0, Poly(0, t), + Poly(t0*t**2/2 + x**2*t**2 - x**2*t, t, domain='ZZ(x,t0)')) + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 + + 3*x**4/4 + x**3 - x**2 + 1, x), 4, DE) == \ + (Poly(0, x, domain='QQ'), Poly(x/2 - Rational(1, 4), x), 2, Poly(x**2 + x + 1, x), Poly(x*Rational(5, 4), x)) + assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 + + 3*x**4/4 + x**3 - x**2 + 1, x), n, DE) == \ + (Poly(0, x, domain='QQ'), Poly(x/2 - Rational(1, 4), x), -2 + n, Poly(x**2 + x + 1, x), Poly(x*Rational(5, 4), x)) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1, t)]}) + raises(NonElementaryIntegralException, lambda: spde(Poly((t - 1)*(t**2 + 1)**2, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE)) + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + assert spde(Poly(x**2 - x, x), Poly(1, x), Poly(9*x**4 - 10*x**3 + 2*x**2, x), 4, DE) == \ + (Poly(0, x, domain='ZZ'), Poly(0, x), 0, Poly(0, x), Poly(3*x**3 - 2*x**2, x, domain='QQ')) + assert spde(Poly(x**2 - x, x), Poly(x**2 - 5*x + 3, x), Poly(x**7 - x**6 - 2*x**4 + 3*x**3 - x**2, x), 5, DE) == \ + (Poly(1, x, domain='QQ'), Poly(x + 1, x, domain='QQ'), 1, Poly(x**4 - x**3, x), Poly(x**3 - x**2, x, domain='QQ')) + +def test_solve_poly_rde_no_cancel(): + # deg(b) large + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]}) + assert solve_poly_rde(Poly(t**2 + 1, t), Poly(t**3 + (x + 1)*t**2 + t + x + 2, t), + oo, DE) == Poly(t + x, t) + # deg(b) small + DE = DifferentialExtension(extension={'D': [Poly(1, x)]}) + assert solve_poly_rde(Poly(0, x), Poly(x/2 - Rational(1, 4), x), oo, DE) == \ + Poly(x**2/4 - x/4, x) + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]}) + assert solve_poly_rde(Poly(2, t), Poly(t**2 + 2*t + 3, t), 1, DE) == \ + Poly(t + 1, t, x) + # deg(b) == deg(D) - 1 + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]}) + assert no_cancel_equal(Poly(1 - t, t), + Poly(t**3 + t**2 - 2*x*t - 2*x, t), oo, DE) == \ + (Poly(t**2, t), 1, Poly((-2 - 2*x)*t - 2*x, t)) + + +def test_solve_poly_rde_cancel(): + # exp + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + assert cancel_exp(Poly(2*x, t), Poly(2*x, t), 0, DE) == \ + Poly(1, t) + assert cancel_exp(Poly(2*x, t), Poly((1 + 2*x)*t, t), 1, DE) == \ + Poly(t, t) + # TODO: Add more exp tests, including tests that require is_deriv_in_field() + + # primitive + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]}) + + # If the DecrementLevel context manager is working correctly, this shouldn't + # cause any problems with the further tests. + raises(NonElementaryIntegralException, lambda: cancel_primitive(Poly(1, t), Poly(t, t), oo, DE)) + + assert cancel_primitive(Poly(1, t), Poly(t + 1/x, t), 2, DE) == \ + Poly(t, t) + assert cancel_primitive(Poly(4*x, t), Poly(4*x*t**2 + 2*t/x, t), 3, DE) == \ + Poly(t**2, t) + + # TODO: Add more primitive tests, including tests that require is_deriv_in_field() + + +def test_rischDE(): + # TODO: Add more tests for rischDE, including ones from the text + DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]}) + DE.decrement_level() + assert rischDE(Poly(-2*x, x), Poly(1, x), Poly(1 - 2*x - 2*x**2, x), + Poly(1, x), DE) == \ + (Poly(x + 1, x), Poly(1, x)) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_trigonometry.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_trigonometry.py new file mode 100644 index 0000000000000000000000000000000000000000..857c8503c5aa690d66e9cdab49730b4ea655a52c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/tests/test_trigonometry.py @@ -0,0 +1,98 @@ +from sympy.core import Ne, Rational, Symbol +from sympy.functions import sin, cos, tan, csc, sec, cot, log, Piecewise +from sympy.integrals.trigonometry import trigintegrate + +x = Symbol('x') + + +def test_trigintegrate_odd(): + assert trigintegrate(Rational(1), x) == x + assert trigintegrate(x, x) is None + assert trigintegrate(x**2, x) is None + + assert trigintegrate(sin(x), x) == -cos(x) + assert trigintegrate(cos(x), x) == sin(x) + + assert trigintegrate(sin(3*x), x) == -cos(3*x)/3 + assert trigintegrate(cos(3*x), x) == sin(3*x)/3 + + y = Symbol('y') + assert trigintegrate(sin(y*x), x) == Piecewise( + (-cos(y*x)/y, Ne(y, 0)), (0, True)) + assert trigintegrate(cos(y*x), x) == Piecewise( + (sin(y*x)/y, Ne(y, 0)), (x, True)) + assert trigintegrate(sin(y*x)**2, x) == Piecewise( + ((x*y/2 - sin(x*y)*cos(x*y)/2)/y, Ne(y, 0)), (0, True)) + assert trigintegrate(sin(y*x)*cos(y*x), x) == Piecewise( + (sin(x*y)**2/(2*y), Ne(y, 0)), (0, True)) + assert trigintegrate(cos(y*x)**2, x) == Piecewise( + ((x*y/2 + sin(x*y)*cos(x*y)/2)/y, Ne(y, 0)), (x, True)) + + y = Symbol('y', positive=True) + # TODO: remove conds='none' below. For this to work we would have to rule + # out (e.g. by trying solve) the condition y = 0, incompatible with + # y.is_positive being True. + assert trigintegrate(sin(y*x), x, conds='none') == -cos(y*x)/y + assert trigintegrate(cos(y*x), x, conds='none') == sin(y*x)/y + + assert trigintegrate(sin(x)*cos(x), x) == sin(x)**2/2 + assert trigintegrate(sin(x)*cos(x)**2, x) == -cos(x)**3/3 + assert trigintegrate(sin(x)**2*cos(x), x) == sin(x)**3/3 + + # check if it selects right function to substitute, + # so the result is kept simple + assert trigintegrate(sin(x)**7 * cos(x), x) == sin(x)**8/8 + assert trigintegrate(sin(x) * cos(x)**7, x) == -cos(x)**8/8 + + assert trigintegrate(sin(x)**7 * cos(x)**3, x) == \ + -sin(x)**10/10 + sin(x)**8/8 + assert trigintegrate(sin(x)**3 * cos(x)**7, x) == \ + cos(x)**10/10 - cos(x)**8/8 + + # both n, m are odd and -ve, and not necessarily equal + assert trigintegrate(sin(x)**-1*cos(x)**-1, x) == \ + -log(sin(x)**2 - 1)/2 + log(sin(x)) + + +def test_trigintegrate_even(): + assert trigintegrate(sin(x)**2, x) == x/2 - cos(x)*sin(x)/2 + assert trigintegrate(cos(x)**2, x) == x/2 + cos(x)*sin(x)/2 + + assert trigintegrate(sin(3*x)**2, x) == x/2 - cos(3*x)*sin(3*x)/6 + assert trigintegrate(cos(3*x)**2, x) == x/2 + cos(3*x)*sin(3*x)/6 + assert trigintegrate(sin(x)**2 * cos(x)**2, x) == \ + x/8 - sin(2*x)*cos(2*x)/16 + + assert trigintegrate(sin(x)**4 * cos(x)**2, x) == \ + x/16 - sin(x) *cos(x)/16 - sin(x)**3*cos(x)/24 + \ + sin(x)**5*cos(x)/6 + + assert trigintegrate(sin(x)**2 * cos(x)**4, x) == \ + x/16 + cos(x) *sin(x)/16 + cos(x)**3*sin(x)/24 - \ + cos(x)**5*sin(x)/6 + + assert trigintegrate(sin(x)**(-4), x) == -2*cos(x)/(3*sin(x)) \ + - cos(x)/(3*sin(x)**3) + + assert trigintegrate(cos(x)**(-6), x) == sin(x)/(5*cos(x)**5) \ + + 4*sin(x)/(15*cos(x)**3) + 8*sin(x)/(15*cos(x)) + + +def test_trigintegrate_mixed(): + assert trigintegrate(sin(x)*sec(x), x) == -log(cos(x)) + assert trigintegrate(sin(x)*csc(x), x) == x + assert trigintegrate(sin(x)*cot(x), x) == sin(x) + + assert trigintegrate(cos(x)*sec(x), x) == x + assert trigintegrate(cos(x)*csc(x), x) == log(sin(x)) + assert trigintegrate(cos(x)*tan(x), x) == -cos(x) + assert trigintegrate(cos(x)*cot(x), x) == log(cos(x) - 1)/2 \ + - log(cos(x) + 1)/2 + cos(x) + assert trigintegrate(cot(x)*cos(x)**2, x) == log(sin(x)) - sin(x)**2/2 + + +def test_trigintegrate_symbolic(): + n = Symbol('n', integer=True) + assert trigintegrate(cos(x)**n, x) is None + assert trigintegrate(sin(x)**n, x) is None + assert trigintegrate(cot(x)**n, x) is None diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/transforms.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..25335b8172c6865ced5a15b492ec6d38bdf2da9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/transforms.py @@ -0,0 +1,1588 @@ +""" Integral Transforms """ +from functools import reduce, wraps +from itertools import repeat +from sympy.core import S, pi +from sympy.core.add import Add +from sympy.core.function import ( + AppliedUndef, count_ops, expand, expand_mul, Function) +from sympy.core.mul import Mul +from sympy.core.numbers import igcd, ilcm +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import Dummy +from sympy.core.traversal import postorder_traversal +from sympy.functions.combinatorial.factorials import factorial, rf +from sympy.functions.elementary.complexes import re, arg, Abs +from sympy.functions.elementary.exponential import exp, exp_polar +from sympy.functions.elementary.hyperbolic import cosh, coth, sinh, tanh +from sympy.functions.elementary.integers import ceiling +from sympy.functions.elementary.miscellaneous import Max, Min, sqrt +from sympy.functions.elementary.piecewise import piecewise_fold +from sympy.functions.elementary.trigonometric import cos, cot, sin, tan +from sympy.functions.special.bessel import besselj +from sympy.functions.special.delta_functions import Heaviside +from sympy.functions.special.gamma_functions import gamma +from sympy.functions.special.hyper import meijerg +from sympy.integrals import integrate, Integral +from sympy.integrals.meijerint import _dummy +from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And +from sympy.polys.polyroots import roots +from sympy.polys.polytools import factor, Poly +from sympy.polys.rootoftools import CRootOf +from sympy.utilities.iterables import iterable +from sympy.utilities.misc import debug + + +########################################################################## +# Helpers / Utilities +########################################################################## + + +class IntegralTransformError(NotImplementedError): + """ + Exception raised in relation to problems computing transforms. + + Explanation + =========== + + This class is mostly used internally; if integrals cannot be computed + objects representing unevaluated transforms are usually returned. + + The hint ``needeval=True`` can be used to disable returning transform + objects, and instead raise this exception if an integral cannot be + computed. + """ + def __init__(self, transform, function, msg): + super().__init__( + "%s Transform could not be computed: %s." % (transform, msg)) + self.function = function + + +class IntegralTransform(Function): + """ + Base class for integral transforms. + + Explanation + =========== + + This class represents unevaluated transforms. + + To implement a concrete transform, derive from this class and implement + the ``_compute_transform(f, x, s, **hints)`` and ``_as_integral(f, x, s)`` + functions. If the transform cannot be computed, raise :obj:`IntegralTransformError`. + + Also set ``cls._name``. For instance, + + >>> from sympy import LaplaceTransform + >>> LaplaceTransform._name + 'Laplace' + + Implement ``self._collapse_extra`` if your function returns more than just a + number and possibly a convergence condition. + """ + + @property + def function(self): + """ The function to be transformed. """ + return self.args[0] + + @property + def function_variable(self): + """ The dependent variable of the function to be transformed. """ + return self.args[1] + + @property + def transform_variable(self): + """ The independent transform variable. """ + return self.args[2] + + @property + def free_symbols(self): + """ + This method returns the symbols that will exist when the transform + is evaluated. + """ + return self.function.free_symbols.union({self.transform_variable}) \ + - {self.function_variable} + + def _compute_transform(self, f, x, s, **hints): + raise NotImplementedError + + def _as_integral(self, f, x, s): + raise NotImplementedError + + def _collapse_extra(self, extra): + cond = And(*extra) + if cond == False: + raise IntegralTransformError(self.__class__.name, None, '') + return cond + + def _try_directly(self, **hints): + T = None + try_directly = not any(func.has(self.function_variable) + for func in self.function.atoms(AppliedUndef)) + if try_directly: + try: + T = self._compute_transform(self.function, + self.function_variable, self.transform_variable, **hints) + except IntegralTransformError: + debug('[IT _try ] Caught IntegralTransformError, returns None') + T = None + + fn = self.function + if not fn.is_Add: + fn = expand_mul(fn) + return fn, T + + def doit(self, **hints): + """ + Try to evaluate the transform in closed form. + + Explanation + =========== + + This general function handles linearity, but apart from that leaves + pretty much everything to _compute_transform. + + Standard hints are the following: + + - ``simplify``: whether or not to simplify the result + - ``noconds``: if True, do not return convergence conditions + - ``needeval``: if True, raise IntegralTransformError instead of + returning IntegralTransform objects + + The default values of these hints depend on the concrete transform, + usually the default is + ``(simplify, noconds, needeval) = (True, False, False)``. + """ + needeval = hints.pop('needeval', False) + simplify = hints.pop('simplify', True) + hints['simplify'] = simplify + + fn, T = self._try_directly(**hints) + + if T is not None: + return T + + if fn.is_Add: + hints['needeval'] = needeval + res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints) + for x in fn.args] + extra = [] + ress = [] + for x in res: + if not isinstance(x, tuple): + x = [x] + ress.append(x[0]) + if len(x) == 2: + # only a condition + extra.append(x[1]) + elif len(x) > 2: + # some region parameters and a condition (Mellin, Laplace) + extra += [x[1:]] + if simplify==True: + res = Add(*ress).simplify() + else: + res = Add(*ress) + if not extra: + return res + try: + extra = self._collapse_extra(extra) + if iterable(extra): + return (res,) + tuple(extra) + else: + return (res, extra) + except IntegralTransformError: + pass + + if needeval: + raise IntegralTransformError( + self.__class__._name, self.function, 'needeval') + + # TODO handle derivatives etc + + # pull out constant coefficients + coeff, rest = fn.as_coeff_mul(self.function_variable) + return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:]))) + + @property + def as_integral(self): + return self._as_integral(self.function, self.function_variable, + self.transform_variable) + + def _eval_rewrite_as_Integral(self, *args, **kwargs): + return self.as_integral + + +def _simplify(expr, doit): + if doit: + from sympy.simplify import simplify + from sympy.simplify.powsimp import powdenest + return simplify(powdenest(piecewise_fold(expr), polar=True)) + return expr + + +def _noconds_(default): + """ + This is a decorator generator for dropping convergence conditions. + + Explanation + =========== + + Suppose you define a function ``transform(*args)`` which returns a tuple of + the form ``(result, cond1, cond2, ...)``. + + Decorating it ``@_noconds_(default)`` will add a new keyword argument + ``noconds`` to it. If ``noconds=True``, the return value will be altered to + be only ``result``, whereas if ``noconds=False`` the return value will not + be altered. + + The default value of the ``noconds`` keyword will be ``default`` (i.e. the + argument of this function). + """ + def make_wrapper(func): + @wraps(func) + def wrapper(*args, noconds=default, **kwargs): + res = func(*args, **kwargs) + if noconds: + return res[0] + return res + return wrapper + return make_wrapper +_noconds = _noconds_(False) + + +########################################################################## +# Mellin Transform +########################################################################## + +def _default_integrator(f, x): + return integrate(f, (x, S.Zero, S.Infinity)) + + +@_noconds +def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True): + """ Backend function to compute Mellin transforms. """ + # We use a fresh dummy, because assumptions on s might drop conditions on + # convergence of the integral. + s = _dummy('s', 'mellin-transform', f) + F = integrator(x**(s - 1) * f, x) + + if not F.has(Integral): + return _simplify(F.subs(s, s_), simplify), (S.NegativeInfinity, S.Infinity), S.true + + if not F.is_Piecewise: # XXX can this work if integration gives continuous result now? + raise IntegralTransformError('Mellin', f, 'could not compute integral') + + F, cond = F.args[0] + if F.has(Integral): + raise IntegralTransformError( + 'Mellin', f, 'integral in unexpected form') + + def process_conds(cond): + """ + Turn ``cond`` into a strip (a, b), and auxiliary conditions. + """ + from sympy.solvers.inequalities import _solve_inequality + a = S.NegativeInfinity + b = S.Infinity + aux = S.true + conds = conjuncts(to_cnf(cond)) + t = Dummy('t', real=True) + for c in conds: + a_ = S.Infinity + b_ = S.NegativeInfinity + aux_ = [] + for d in disjuncts(c): + d_ = d.replace( + re, lambda x: x.as_real_imag()[0]).subs(re(s), t) + if not d.is_Relational or \ + d.rel_op in ('==', '!=') \ + or d_.has(s) or not d_.has(t): + aux_ += [d] + continue + soln = _solve_inequality(d_, t) + if not soln.is_Relational or \ + soln.rel_op in ('==', '!='): + aux_ += [d] + continue + if soln.lts == t: + b_ = Max(soln.gts, b_) + else: + a_ = Min(soln.lts, a_) + if a_ is not S.Infinity and a_ != b: + a = Max(a_, a) + elif b_ is not S.NegativeInfinity and b_ != a: + b = Min(b_, b) + else: + aux = And(aux, Or(*aux_)) + return a, b, aux + + conds = [process_conds(c) for c in disjuncts(cond)] + conds = [x for x in conds if x[2] != False] + conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2]))) + + if not conds: + raise IntegralTransformError('Mellin', f, 'no convergence found') + + a, b, aux = conds[0] + return _simplify(F.subs(s, s_), simplify), (a, b), aux + + +class MellinTransform(IntegralTransform): + """ + Class representing unevaluated Mellin transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute Mellin transforms, see the :func:`mellin_transform` + docstring. + """ + + _name = 'Mellin' + + def _compute_transform(self, f, x, s, **hints): + return _mellin_transform(f, x, s, **hints) + + def _as_integral(self, f, x, s): + return Integral(f*x**(s - 1), (x, S.Zero, S.Infinity)) + + def _collapse_extra(self, extra): + a = [] + b = [] + cond = [] + for (sa, sb), c in extra: + a += [sa] + b += [sb] + cond += [c] + res = (Max(*a), Min(*b)), And(*cond) + if (res[0][0] >= res[0][1]) == True or res[1] == False: + raise IntegralTransformError( + 'Mellin', None, 'no combined convergence.') + return res + + +def mellin_transform(f, x, s, **hints): + r""" + Compute the Mellin transform `F(s)` of `f(x)`, + + .. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x. + + For all "sensible" functions, this converges absolutely in a strip + `a < \operatorname{Re}(s) < b`. + + Explanation + =========== + + The Mellin transform is related via change of variables to the Fourier + transform, and also to the (bilateral) Laplace transform. + + This function returns ``(F, (a, b), cond)`` + where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip + (as above), and ``cond`` are auxiliary convergence conditions. + + If the integral cannot be computed in closed form, this function returns + an unevaluated :class:`MellinTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``, + then only `F` will be returned (i.e. not ``cond``, and also not the strip + ``(a, b)``). + + Examples + ======== + + >>> from sympy import mellin_transform, exp + >>> from sympy.abc import x, s + >>> mellin_transform(exp(-x), x, s) + (gamma(s), (0, oo), True) + + See Also + ======== + + inverse_mellin_transform, laplace_transform, fourier_transform + hankel_transform, inverse_hankel_transform + """ + return MellinTransform(f, x, s).doit(**hints) + + +def _rewrite_sin(m_n, s, a, b): + """ + Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible + with the strip (a, b). + + Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``. + + Examples + ======== + + >>> from sympy.integrals.transforms import _rewrite_sin + >>> from sympy import pi, S + >>> from sympy.abc import s + >>> _rewrite_sin((pi, 0), s, 0, 1) + (gamma(s), gamma(1 - s), pi) + >>> _rewrite_sin((pi, 0), s, 1, 0) + (gamma(s - 1), gamma(2 - s), -pi) + >>> _rewrite_sin((pi, 0), s, -1, 0) + (gamma(s + 1), gamma(-s), -pi) + >>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2) + (gamma(s - 1/2), gamma(3/2 - s), -pi) + >>> _rewrite_sin((pi, pi), s, 0, 1) + (gamma(s), gamma(1 - s), -pi) + >>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2) + (gamma(2*s), gamma(1 - 2*s), pi) + >>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1) + (gamma(2*s - 1), gamma(2 - 2*s), -pi) + """ + # (This is a separate function because it is moderately complicated, + # and I want to doctest it.) + # We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x). + # But there is one comlication: the gamma functions determine the + # inegration contour in the definition of the G-function. Usually + # it would not matter if this is slightly shifted, unless this way + # we create an undefined function! + # So we try to write this in such a way that the gammas are + # eminently on the right side of the strip. + m, n = m_n + + m = expand_mul(m/pi) + n = expand_mul(n/pi) + r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand + return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi + + +class MellinTransformStripError(ValueError): + """ + Exception raised by _rewrite_gamma. Mainly for internal use. + """ + pass + + +def _rewrite_gamma(f, s, a, b): + """ + Try to rewrite the product f(s) as a product of gamma functions, + so that the inverse Mellin transform of f can be expressed as a meijer + G function. + + Explanation + =========== + + Return (an, ap), (bm, bq), arg, exp, fac such that + G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s). + + Raises IntegralTransformError or MellinTransformStripError on failure. + + It is asserted that f has no poles in the fundamental strip designated by + (a, b). One of a and b is allowed to be None. The fundamental strip is + important, because it determines the inversion contour. + + This function can handle exponentials, linear factors, trigonometric + functions. + + This is a helper function for inverse_mellin_transform that will not + attempt any transformations on f. + + Examples + ======== + + >>> from sympy.integrals.transforms import _rewrite_gamma + >>> from sympy.abc import s + >>> from sympy import oo + >>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo) + (([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1) + >>> _rewrite_gamma((s-1)**2, s, -oo, oo) + (([], [1, 1]), ([2, 2], []), 1, 1, 1) + + Importance of the fundamental strip: + + >>> _rewrite_gamma(1/s, s, 0, oo) + (([1], []), ([], [0]), 1, 1, 1) + >>> _rewrite_gamma(1/s, s, None, oo) + (([1], []), ([], [0]), 1, 1, 1) + >>> _rewrite_gamma(1/s, s, 0, None) + (([1], []), ([], [0]), 1, 1, 1) + >>> _rewrite_gamma(1/s, s, -oo, 0) + (([], [1]), ([0], []), 1, 1, -1) + >>> _rewrite_gamma(1/s, s, None, 0) + (([], [1]), ([0], []), 1, 1, -1) + >>> _rewrite_gamma(1/s, s, -oo, None) + (([], [1]), ([0], []), 1, 1, -1) + + >>> _rewrite_gamma(2**(-s+3), s, -oo, oo) + (([], []), ([], []), 1/2, 1, 8) + """ + # Our strategy will be as follows: + # 1) Guess a constant c such that the inversion integral should be + # performed wrt s'=c*s (instead of plain s). Write s for s'. + # 2) Process all factors, rewrite them independently as gamma functions in + # argument s, or exponentials of s. + # 3) Try to transform all gamma functions s.t. they have argument + # a+s or a-s. + # 4) Check that the resulting G function parameters are valid. + # 5) Combine all the exponentials. + + a_, b_ = S([a, b]) + + def left(c, is_numer): + """ + Decide whether pole at c lies to the left of the fundamental strip. + """ + # heuristically, this is the best chance for us to solve the inequalities + c = expand(re(c)) + if a_ is None and b_ is S.Infinity: + return True + if a_ is None: + return c < b_ + if b_ is None: + return c <= a_ + if (c >= b_) == True: + return False + if (c <= a_) == True: + return True + if is_numer: + return None + if a_.free_symbols or b_.free_symbols or c.free_symbols: + return None # XXX + #raise IntegralTransformError('Inverse Mellin', f, + # 'Could not determine position of singularity %s' + # ' relative to fundamental strip' % c) + raise MellinTransformStripError('Pole inside critical strip?') + + # 1) + s_multipliers = [] + for g in f.atoms(gamma): + if not g.has(s): + continue + arg = g.args[0] + if arg.is_Add: + arg = arg.as_independent(s)[1] + coeff, _ = arg.as_coeff_mul(s) + s_multipliers += [coeff] + for g in f.atoms(sin, cos, tan, cot): + if not g.has(s): + continue + arg = g.args[0] + if arg.is_Add: + arg = arg.as_independent(s)[1] + coeff, _ = arg.as_coeff_mul(s) + s_multipliers += [coeff/pi] + s_multipliers = [Abs(x) if x.is_extended_real else x for x in s_multipliers] + common_coefficient = S.One + for x in s_multipliers: + if not x.is_Rational: + common_coefficient = x + break + s_multipliers = [x/common_coefficient for x in s_multipliers] + if not (all(x.is_Rational for x in s_multipliers) and + common_coefficient.is_extended_real): + raise IntegralTransformError("Gamma", None, "Nonrational multiplier") + s_multiplier = common_coefficient/reduce(ilcm, [S(x.q) + for x in s_multipliers], S.One) + if s_multiplier == common_coefficient: + if len(s_multipliers) == 0: + s_multiplier = common_coefficient + else: + s_multiplier = common_coefficient \ + *reduce(igcd, [S(x.p) for x in s_multipliers]) + + f = f.subs(s, s/s_multiplier) + fac = S.One/s_multiplier + exponent = S.One/s_multiplier + if a_ is not None: + a_ *= s_multiplier + if b_ is not None: + b_ *= s_multiplier + + # 2) + numer, denom = f.as_numer_denom() + numer = Mul.make_args(numer) + denom = Mul.make_args(denom) + args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False))) + + facs = [] + dfacs = [] + # *_gammas will contain pairs (a, c) representing Gamma(a*s + c) + numer_gammas = [] + denom_gammas = [] + # exponentials will contain bases for exponentials of s + exponentials = [] + + def exception(fact): + return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact) + while args: + fact, is_numer = args.pop() + if is_numer: + ugammas, lgammas = numer_gammas, denom_gammas + ufacs = facs + else: + ugammas, lgammas = denom_gammas, numer_gammas + ufacs = dfacs + + def linear_arg(arg): + """ Test if arg is of form a*s+b, raise exception if not. """ + if not arg.is_polynomial(s): + raise exception(fact) + p = Poly(arg, s) + if p.degree() != 1: + raise exception(fact) + return p.all_coeffs() + + # constants + if not fact.has(s): + ufacs += [fact] + # exponentials + elif fact.is_Pow or isinstance(fact, exp): + if fact.is_Pow: + base = fact.base + exp_ = fact.exp + else: + base = exp_polar(1) + exp_ = fact.exp + if exp_.is_Integer: + cond = is_numer + if exp_ < 0: + cond = not cond + args += [(base, cond)]*Abs(exp_) + continue + elif not base.has(s): + a, b = linear_arg(exp_) + if not is_numer: + base = 1/base + exponentials += [base**a] + facs += [base**b] + else: + raise exception(fact) + # linear factors + elif fact.is_polynomial(s): + p = Poly(fact, s) + if p.degree() != 1: + # We completely factor the poly. For this we need the roots. + # Now roots() only works in some cases (low degree), and CRootOf + # only works without parameters. So try both... + coeff = p.LT()[1] + rs = roots(p, s) + if len(rs) != p.degree(): + rs = CRootOf.all_roots(p) + ufacs += [coeff] + args += [(s - c, is_numer) for c in rs] + continue + a, c = p.all_coeffs() + ufacs += [a] + c /= -a + # Now need to convert s - c + if left(c, is_numer): + ugammas += [(S.One, -c + 1)] + lgammas += [(S.One, -c)] + else: + ufacs += [-1] + ugammas += [(S.NegativeOne, c + 1)] + lgammas += [(S.NegativeOne, c)] + elif isinstance(fact, gamma): + a, b = linear_arg(fact.args[0]) + if is_numer: + if (a > 0 and (left(-b/a, is_numer) == False)) or \ + (a < 0 and (left(-b/a, is_numer) == True)): + raise NotImplementedError( + 'Gammas partially over the strip.') + ugammas += [(a, b)] + elif isinstance(fact, sin): + # We try to re-write all trigs as gammas. This is not in + # general the best strategy, since sometimes this is impossible, + # but rewriting as exponentials would work. However trig functions + # in inverse mellin transforms usually all come from simplifying + # gamma terms, so this should work. + a = fact.args[0] + if is_numer: + # No problem with the poles. + gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi + else: + gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_) + args += [(gamma1, not is_numer), (gamma2, not is_numer)] + ufacs += [fac_] + elif isinstance(fact, tan): + a = fact.args[0] + args += [(sin(a, evaluate=False), is_numer), + (sin(pi/2 - a, evaluate=False), not is_numer)] + elif isinstance(fact, cos): + a = fact.args[0] + args += [(sin(pi/2 - a, evaluate=False), is_numer)] + elif isinstance(fact, cot): + a = fact.args[0] + args += [(sin(pi/2 - a, evaluate=False), is_numer), + (sin(a, evaluate=False), not is_numer)] + else: + raise exception(fact) + + fac *= Mul(*facs)/Mul(*dfacs) + + # 3) + an, ap, bm, bq = [], [], [], [] + for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True), + (denom_gammas, bq, ap, False)]: + while gammas: + a, c = gammas.pop() + if a != -1 and a != +1: + # We use the gamma function multiplication theorem. + p = Abs(S(a)) + newa = a/p + newc = c/p + if not a.is_Integer: + raise TypeError("a is not an integer") + for k in range(p): + gammas += [(newa, newc + k/p)] + if is_numer: + fac *= (2*pi)**((1 - p)/2) * p**(c - S.Half) + exponentials += [p**a] + else: + fac /= (2*pi)**((1 - p)/2) * p**(c - S.Half) + exponentials += [p**(-a)] + continue + if a == +1: + plus.append(1 - c) + else: + minus.append(c) + + # 4) + # TODO + + # 5) + arg = Mul(*exponentials) + + # for testability, sort the arguments + an.sort(key=default_sort_key) + ap.sort(key=default_sort_key) + bm.sort(key=default_sort_key) + bq.sort(key=default_sort_key) + + return (an, ap), (bm, bq), arg, exponent, fac + + +@_noconds_(True) +def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False): + """ A helper for the real inverse_mellin_transform function, this one here + assumes x to be real and positive. """ + x = _dummy('t', 'inverse-mellin-transform', F, positive=True) + # Actually, we won't try integration at all. Instead we use the definition + # of the Meijer G function as a fairly general inverse mellin transform. + F = F.rewrite(gamma) + for g in [factor(F), expand_mul(F), expand(F)]: + if g.is_Add: + # do all terms separately + ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg, + noconds=False) + for G in g.args] + conds = [p[1] for p in ress] + ress = [p[0] for p in ress] + res = Add(*ress) + if not as_meijerg: + res = factor(res, gens=res.atoms(Heaviside)) + return res.subs(x, x_), And(*conds) + + try: + a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1]) + except IntegralTransformError: + continue + try: + G = meijerg(a, b, C/x**e) + except ValueError: + continue + if as_meijerg: + h = G + else: + try: + from sympy.simplify import hyperexpand + h = hyperexpand(G) + except NotImplementedError: + raise IntegralTransformError( + 'Inverse Mellin', F, 'Could not calculate integral') + + if h.is_Piecewise and len(h.args) == 3: + # XXX we break modularity here! + h = Heaviside(x - Abs(C))*h.args[0].args[0] \ + + Heaviside(Abs(C) - x)*h.args[1].args[0] + # We must ensure that the integral along the line we want converges, + # and return that value. + # See [L], 5.2 + cond = [Abs(arg(G.argument)) < G.delta*pi] + # Note: we allow ">=" here, this corresponds to convergence if we let + # limits go to oo symmetrically. ">" corresponds to absolute convergence. + cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1), + Abs(arg(G.argument)) == G.delta*pi)] + cond = Or(*cond) + if cond == False: + raise IntegralTransformError( + 'Inverse Mellin', F, 'does not converge') + return (h*fac).subs(x, x_), cond + + raise IntegralTransformError('Inverse Mellin', F, '') + +_allowed = None + + +class InverseMellinTransform(IntegralTransform): + """ + Class representing unevaluated inverse Mellin transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse Mellin transforms, see the + :func:`inverse_mellin_transform` docstring. + """ + + _name = 'Inverse Mellin' + _none_sentinel = Dummy('None') + _c = Dummy('c') + + def __new__(cls, F, s, x, a, b, **opts): + if a is None: + a = InverseMellinTransform._none_sentinel + if b is None: + b = InverseMellinTransform._none_sentinel + return IntegralTransform.__new__(cls, F, s, x, a, b, **opts) + + @property + def fundamental_strip(self): + a, b = self.args[3], self.args[4] + if a is InverseMellinTransform._none_sentinel: + a = None + if b is InverseMellinTransform._none_sentinel: + b = None + return a, b + + def _compute_transform(self, F, s, x, **hints): + # IntegralTransform's doit will cause this hint to exist, but + # InverseMellinTransform should ignore it + hints.pop('simplify', True) + global _allowed + if _allowed is None: + _allowed = { + exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, + factorial, rf} + for f in postorder_traversal(F): + if f.is_Function and f.has(s) and f.func not in _allowed: + raise IntegralTransformError('Inverse Mellin', F, + 'Component %s not recognised.' % f) + strip = self.fundamental_strip + return _inverse_mellin_transform(F, s, x, strip, **hints) + + def _as_integral(self, F, s, x): + c = self.__class__._c + return Integral(F*x**(-s), (s, c - S.ImaginaryUnit*S.Infinity, c + + S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit) + + +def inverse_mellin_transform(F, s, x, strip, **hints): + r""" + Compute the inverse Mellin transform of `F(s)` over the fundamental + strip given by ``strip=(a, b)``. + + Explanation + =========== + + This can be defined as + + .. math:: f(x) = \frac{1}{2\pi i} \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s, + + for any `c` in the fundamental strip. Under certain regularity + conditions on `F` and/or `f`, + this recovers `f` from its Mellin transform `F` + (and vice versa), for positive real `x`. + + One of `a` or `b` may be passed as ``None``; a suitable `c` will be + inferred. + + If the integral cannot be computed in closed form, this function returns + an unevaluated :class:`InverseMellinTransform` object. + + Note that this function will assume x to be positive and real, regardless + of the SymPy assumptions! + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + + Examples + ======== + + >>> from sympy import inverse_mellin_transform, oo, gamma + >>> from sympy.abc import x, s + >>> inverse_mellin_transform(gamma(s), s, x, (0, oo)) + exp(-x) + + The fundamental strip matters: + + >>> f = 1/(s**2 - 1) + >>> inverse_mellin_transform(f, s, x, (-oo, -1)) + x*(1 - 1/x**2)*Heaviside(x - 1)/2 + >>> inverse_mellin_transform(f, s, x, (-1, 1)) + -x*Heaviside(1 - x)/2 - Heaviside(x - 1)/(2*x) + >>> inverse_mellin_transform(f, s, x, (1, oo)) + (1/2 - x**2/2)*Heaviside(1 - x)/x + + See Also + ======== + + mellin_transform + hankel_transform, inverse_hankel_transform + """ + return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints) + + +########################################################################## +# Fourier Transform +########################################################################## + +@_noconds_(True) +def _fourier_transform(f, x, k, a, b, name, simplify=True): + r""" + Compute a general Fourier-type transform + + .. math:: + + F(k) = a \int_{-\infty}^{\infty} e^{bixk} f(x)\, dx. + + For suitable choice of *a* and *b*, this reduces to the standard Fourier + and inverse Fourier transforms. + """ + F = integrate(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity)) + + if not F.has(Integral): + return _simplify(F, simplify), S.true + + integral_f = integrate(f, (x, S.NegativeInfinity, S.Infinity)) + if integral_f in (S.NegativeInfinity, S.Infinity, S.NaN) or integral_f.has(Integral): + raise IntegralTransformError(name, f, 'function not integrable on real axis') + + if not F.is_Piecewise: + raise IntegralTransformError(name, f, 'could not compute integral') + + F, cond = F.args[0] + if F.has(Integral): + raise IntegralTransformError(name, f, 'integral in unexpected form') + + return _simplify(F, simplify), cond + + +class FourierTypeTransform(IntegralTransform): + """ Base class for Fourier transforms.""" + + def a(self): + raise NotImplementedError( + "Class %s must implement a(self) but does not" % self.__class__) + + def b(self): + raise NotImplementedError( + "Class %s must implement b(self) but does not" % self.__class__) + + def _compute_transform(self, f, x, k, **hints): + return _fourier_transform(f, x, k, + self.a(), self.b(), + self.__class__._name, **hints) + + def _as_integral(self, f, x, k): + a = self.a() + b = self.b() + return Integral(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity)) + + +class FourierTransform(FourierTypeTransform): + """ + Class representing unevaluated Fourier transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute Fourier transforms, see the :func:`fourier_transform` + docstring. + """ + + _name = 'Fourier' + + def a(self): + return 1 + + def b(self): + return -2*S.Pi + + +def fourier_transform(f, x, k, **hints): + r""" + Compute the unitary, ordinary-frequency Fourier transform of ``f``, defined + as + + .. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`FourierTransform` object. + + For other Fourier transform conventions, see the function + :func:`sympy.integrals.transforms._fourier_transform`. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import fourier_transform, exp + >>> from sympy.abc import x, k + >>> fourier_transform(exp(-x**2), x, k) + sqrt(pi)*exp(-pi**2*k**2) + >>> fourier_transform(exp(-x**2), x, k, noconds=False) + (sqrt(pi)*exp(-pi**2*k**2), True) + + See Also + ======== + + inverse_fourier_transform + sine_transform, inverse_sine_transform + cosine_transform, inverse_cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return FourierTransform(f, x, k).doit(**hints) + + +class InverseFourierTransform(FourierTypeTransform): + """ + Class representing unevaluated inverse Fourier transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse Fourier transforms, see the + :func:`inverse_fourier_transform` docstring. + """ + + _name = 'Inverse Fourier' + + def a(self): + return 1 + + def b(self): + return 2*S.Pi + + +def inverse_fourier_transform(F, k, x, **hints): + r""" + Compute the unitary, ordinary-frequency inverse Fourier transform of `F`, + defined as + + .. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`InverseFourierTransform` object. + + For other Fourier transform conventions, see the function + :func:`sympy.integrals.transforms._fourier_transform`. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import inverse_fourier_transform, exp, sqrt, pi + >>> from sympy.abc import x, k + >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x) + exp(-x**2) + >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False) + (exp(-x**2), True) + + See Also + ======== + + fourier_transform + sine_transform, inverse_sine_transform + cosine_transform, inverse_cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return InverseFourierTransform(F, k, x).doit(**hints) + + +########################################################################## +# Fourier Sine and Cosine Transform +########################################################################## + +@_noconds_(True) +def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True): + """ + Compute a general sine or cosine-type transform + F(k) = a int_0^oo b*sin(x*k) f(x) dx. + F(k) = a int_0^oo b*cos(x*k) f(x) dx. + + For suitable choice of a and b, this reduces to the standard sine/cosine + and inverse sine/cosine transforms. + """ + F = integrate(a*f*K(b*x*k), (x, S.Zero, S.Infinity)) + + if not F.has(Integral): + return _simplify(F, simplify), S.true + + if not F.is_Piecewise: + raise IntegralTransformError(name, f, 'could not compute integral') + + F, cond = F.args[0] + if F.has(Integral): + raise IntegralTransformError(name, f, 'integral in unexpected form') + + return _simplify(F, simplify), cond + + +class SineCosineTypeTransform(IntegralTransform): + """ + Base class for sine and cosine transforms. + Specify cls._kern. + """ + + def a(self): + raise NotImplementedError( + "Class %s must implement a(self) but does not" % self.__class__) + + def b(self): + raise NotImplementedError( + "Class %s must implement b(self) but does not" % self.__class__) + + + def _compute_transform(self, f, x, k, **hints): + return _sine_cosine_transform(f, x, k, + self.a(), self.b(), + self.__class__._kern, + self.__class__._name, **hints) + + def _as_integral(self, f, x, k): + a = self.a() + b = self.b() + K = self.__class__._kern + return Integral(a*f*K(b*x*k), (x, S.Zero, S.Infinity)) + + +class SineTransform(SineCosineTypeTransform): + """ + Class representing unevaluated sine transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute sine transforms, see the :func:`sine_transform` + docstring. + """ + + _name = 'Sine' + _kern = sin + + def a(self): + return sqrt(2)/sqrt(pi) + + def b(self): + return S.One + + +def sine_transform(f, x, k, **hints): + r""" + Compute the unitary, ordinary-frequency sine transform of `f`, defined + as + + .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`SineTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import sine_transform, exp + >>> from sympy.abc import x, k, a + >>> sine_transform(x*exp(-a*x**2), x, k) + sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2)) + >>> sine_transform(x**(-a), x, k) + 2**(1/2 - a)*k**(a - 1)*gamma(1 - a/2)/gamma(a/2 + 1/2) + + See Also + ======== + + fourier_transform, inverse_fourier_transform + inverse_sine_transform + cosine_transform, inverse_cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return SineTransform(f, x, k).doit(**hints) + + +class InverseSineTransform(SineCosineTypeTransform): + """ + Class representing unevaluated inverse sine transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse sine transforms, see the + :func:`inverse_sine_transform` docstring. + """ + + _name = 'Inverse Sine' + _kern = sin + + def a(self): + return sqrt(2)/sqrt(pi) + + def b(self): + return S.One + + +def inverse_sine_transform(F, k, x, **hints): + r""" + Compute the unitary, ordinary-frequency inverse sine transform of `F`, + defined as + + .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`InverseSineTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import inverse_sine_transform, exp, sqrt, gamma + >>> from sympy.abc import x, k, a + >>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)* + ... gamma(-a/2 + 1)/gamma((a+1)/2), k, x) + x**(-a) + >>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x) + x*exp(-a*x**2) + + See Also + ======== + + fourier_transform, inverse_fourier_transform + sine_transform + cosine_transform, inverse_cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return InverseSineTransform(F, k, x).doit(**hints) + + +class CosineTransform(SineCosineTypeTransform): + """ + Class representing unevaluated cosine transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute cosine transforms, see the :func:`cosine_transform` + docstring. + """ + + _name = 'Cosine' + _kern = cos + + def a(self): + return sqrt(2)/sqrt(pi) + + def b(self): + return S.One + + +def cosine_transform(f, x, k, **hints): + r""" + Compute the unitary, ordinary-frequency cosine transform of `f`, defined + as + + .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`CosineTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import cosine_transform, exp, sqrt, cos + >>> from sympy.abc import x, k, a + >>> cosine_transform(exp(-a*x), x, k) + sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)) + >>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k) + a*exp(-a**2/(2*k))/(2*k**(3/2)) + + See Also + ======== + + fourier_transform, inverse_fourier_transform, + sine_transform, inverse_sine_transform + inverse_cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return CosineTransform(f, x, k).doit(**hints) + + +class InverseCosineTransform(SineCosineTypeTransform): + """ + Class representing unevaluated inverse cosine transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse cosine transforms, see the + :func:`inverse_cosine_transform` docstring. + """ + + _name = 'Inverse Cosine' + _kern = cos + + def a(self): + return sqrt(2)/sqrt(pi) + + def b(self): + return S.One + + +def inverse_cosine_transform(F, k, x, **hints): + r""" + Compute the unitary, ordinary-frequency inverse cosine transform of `F`, + defined as + + .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`InverseCosineTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import inverse_cosine_transform, sqrt, pi + >>> from sympy.abc import x, k, a + >>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x) + exp(-a*x) + >>> inverse_cosine_transform(1/sqrt(k), k, x) + 1/sqrt(x) + + See Also + ======== + + fourier_transform, inverse_fourier_transform, + sine_transform, inverse_sine_transform + cosine_transform + hankel_transform, inverse_hankel_transform + mellin_transform, laplace_transform + """ + return InverseCosineTransform(F, k, x).doit(**hints) + + +########################################################################## +# Hankel Transform +########################################################################## + +@_noconds_(True) +def _hankel_transform(f, r, k, nu, name, simplify=True): + r""" + Compute a general Hankel transform + + .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. + """ + F = integrate(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity)) + + if not F.has(Integral): + return _simplify(F, simplify), S.true + + if not F.is_Piecewise: + raise IntegralTransformError(name, f, 'could not compute integral') + + F, cond = F.args[0] + if F.has(Integral): + raise IntegralTransformError(name, f, 'integral in unexpected form') + + return _simplify(F, simplify), cond + + +class HankelTypeTransform(IntegralTransform): + """ + Base class for Hankel transforms. + """ + + def doit(self, **hints): + return self._compute_transform(self.function, + self.function_variable, + self.transform_variable, + self.args[3], + **hints) + + def _compute_transform(self, f, r, k, nu, **hints): + return _hankel_transform(f, r, k, nu, self._name, **hints) + + def _as_integral(self, f, r, k, nu): + return Integral(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity)) + + @property + def as_integral(self): + return self._as_integral(self.function, + self.function_variable, + self.transform_variable, + self.args[3]) + + +class HankelTransform(HankelTypeTransform): + """ + Class representing unevaluated Hankel transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute Hankel transforms, see the :func:`hankel_transform` + docstring. + """ + + _name = 'Hankel' + + +def hankel_transform(f, r, k, nu, **hints): + r""" + Compute the Hankel transform of `f`, defined as + + .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`HankelTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import hankel_transform, inverse_hankel_transform + >>> from sympy import exp + >>> from sympy.abc import r, k, m, nu, a + + >>> ht = hankel_transform(1/r**m, r, k, nu) + >>> ht + 2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2)) + + >>> inverse_hankel_transform(ht, k, r, nu) + r**(-m) + + >>> ht = hankel_transform(exp(-a*r), r, k, 0) + >>> ht + a/(k**3*(a**2/k**2 + 1)**(3/2)) + + >>> inverse_hankel_transform(ht, k, r, 0) + exp(-a*r) + + See Also + ======== + + fourier_transform, inverse_fourier_transform + sine_transform, inverse_sine_transform + cosine_transform, inverse_cosine_transform + inverse_hankel_transform + mellin_transform, laplace_transform + """ + return HankelTransform(f, r, k, nu).doit(**hints) + + +class InverseHankelTransform(HankelTypeTransform): + """ + Class representing unevaluated inverse Hankel transforms. + + For usage of this class, see the :class:`IntegralTransform` docstring. + + For how to compute inverse Hankel transforms, see the + :func:`inverse_hankel_transform` docstring. + """ + + _name = 'Inverse Hankel' + + +def inverse_hankel_transform(F, k, r, nu, **hints): + r""" + Compute the inverse Hankel transform of `F` defined as + + .. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k. + + Explanation + =========== + + If the transform cannot be computed in closed form, this + function returns an unevaluated :class:`InverseHankelTransform` object. + + For a description of possible hints, refer to the docstring of + :func:`sympy.integrals.transforms.IntegralTransform.doit`. + Note that for this transform, by default ``noconds=True``. + + Examples + ======== + + >>> from sympy import hankel_transform, inverse_hankel_transform + >>> from sympy import exp + >>> from sympy.abc import r, k, m, nu, a + + >>> ht = hankel_transform(1/r**m, r, k, nu) + >>> ht + 2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2)) + + >>> inverse_hankel_transform(ht, k, r, nu) + r**(-m) + + >>> ht = hankel_transform(exp(-a*r), r, k, 0) + >>> ht + a/(k**3*(a**2/k**2 + 1)**(3/2)) + + >>> inverse_hankel_transform(ht, k, r, 0) + exp(-a*r) + + See Also + ======== + + fourier_transform, inverse_fourier_transform + sine_transform, inverse_sine_transform + cosine_transform, inverse_cosine_transform + hankel_transform + mellin_transform, laplace_transform + """ + return InverseHankelTransform(F, k, r, nu).doit(**hints) + + +########################################################################## +# Laplace Transform +########################################################################## + +# Stub classes and functions that used to be here +import sympy.integrals.laplace as _laplace + +LaplaceTransform = _laplace.LaplaceTransform +laplace_transform = _laplace.laplace_transform +InverseLaplaceTransform = _laplace.InverseLaplaceTransform +inverse_laplace_transform = _laplace.inverse_laplace_transform diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/integrals/trigonometry.py b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/trigonometry.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6389bcc79f28ed6c255546685da1a0e061c327 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/integrals/trigonometry.py @@ -0,0 +1,335 @@ +from sympy.core import cacheit, Dummy, Ne, Integer, Rational, S, Wild +from sympy.functions import binomial, sin, cos, Piecewise, Abs +from .integrals import integrate + +# TODO sin(a*x)*cos(b*x) -> sin((a+b)x) + sin((a-b)x) ? + +# creating, each time, Wild's and sin/cos/Mul is expensive. Also, our match & +# subs are very slow when not cached, and if we create Wild each time, we +# effectively block caching. +# +# so we cache the pattern + +# need to use a function instead of lamda since hash of lambda changes on +# each call to _pat_sincos +def _integer_instance(n): + return isinstance(n, Integer) + +@cacheit +def _pat_sincos(x): + a = Wild('a', exclude=[x]) + n, m = [Wild(s, exclude=[x], properties=[_integer_instance]) + for s in 'nm'] + pat = sin(a*x)**n * cos(a*x)**m + return pat, a, n, m + +_u = Dummy('u') + + +def trigintegrate(f, x, conds='piecewise'): + """ + Integrate f = Mul(trig) over x. + + Examples + ======== + + >>> from sympy import sin, cos, tan, sec + >>> from sympy.integrals.trigonometry import trigintegrate + >>> from sympy.abc import x + + >>> trigintegrate(sin(x)*cos(x), x) + sin(x)**2/2 + + >>> trigintegrate(sin(x)**2, x) + x/2 - sin(x)*cos(x)/2 + + >>> trigintegrate(tan(x)*sec(x), x) + 1/cos(x) + + >>> trigintegrate(sin(x)*tan(x), x) + -log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x) + + References + ========== + + .. [1] https://en.wikibooks.org/wiki/Calculus/Integration_techniques + + See Also + ======== + + sympy.integrals.integrals.Integral.doit + sympy.integrals.integrals.Integral + """ + pat, a, n, m = _pat_sincos(x) + + f = f.rewrite('sincos') + M = f.match(pat) + + if M is None: + return + + n, m = M[n], M[m] + if n.is_zero and m.is_zero: + return x + zz = x if n.is_zero else S.Zero + + a = M[a] + + if n.is_odd or m.is_odd: + u = _u + n_, m_ = n.is_odd, m.is_odd + + # take smallest n or m -- to choose simplest substitution + if n_ and m_: + + # Make sure to choose the positive one + # otherwise an incorrect integral can occur. + if n < 0 and m > 0: + m_ = True + n_ = False + elif m < 0 and n > 0: + n_ = True + m_ = False + # Both are negative so choose the smallest n or m + # in absolute value for simplest substitution. + elif (n < 0 and m < 0): + n_ = n > m + m_ = not (n > m) + + # Both n and m are odd and positive + else: + n_ = (n < m) # NB: careful here, one of the + m_ = not (n < m) # conditions *must* be true + + # n m u=C (n-1)/2 m + # S(x) * C(x) dx --> -(1-u^2) * u du + if n_: + ff = -(1 - u**2)**((n - 1)/2) * u**m + uu = cos(a*x) + + # n m u=S n (m-1)/2 + # S(x) * C(x) dx --> u * (1-u^2) du + elif m_: + ff = u**n * (1 - u**2)**((m - 1)/2) + uu = sin(a*x) + + fi = integrate(ff, u) # XXX cyclic deps + fx = fi.subs(u, uu) + if conds == 'piecewise': + return Piecewise((fx / a, Ne(a, 0)), (zz, True)) + return fx / a + + # n & m are both even + # + # 2k 2m 2l 2l + # we transform S (x) * C (x) into terms with only S (x) or C (x) + # + # example: + # 100 4 100 2 2 100 4 2 + # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x)) + # + # 104 102 100 + # = S (x) - 2*S (x) + S (x) + # 2k + # then S is integrated with recursive formula + + # take largest n or m -- to choose simplest substitution + n_ = (Abs(n) > Abs(m)) + m_ = (Abs(m) > Abs(n)) + res = S.Zero + + if n_: + # 2k 2 k i 2i + # C = (1 - S ) = sum(i, (-) * B(k, i) * S ) + if m > 0: + for i in range(0, m//2 + 1): + res += (S.NegativeOne**i * binomial(m//2, i) * + _sin_pow_integrate(n + 2*i, x)) + + elif m == 0: + res = _sin_pow_integrate(n, x) + else: + + # m < 0 , |n| > |m| + # / + # | + # | m n + # | cos (x) sin (x) dx = + # | + # | + #/ + # / + # | + # -1 m+1 n-1 n - 1 | m+2 n-2 + # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx + # | + # m + 1 m + 1 | + # / + + res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + + Rational(n - 1, m + 1) * + trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) + + elif m_: + # 2k 2 k i 2i + # S = (1 - C ) = sum(i, (-) * B(k, i) * C ) + if n > 0: + + # / / + # | | + # | m n | -m n + # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx + # | | + # / / + # + # |m| > |n| ; m, n >0 ; m, n belong to Z - {0} + # n 2 + # sin (x) term is expanded here in terms of cos (x), + # and then integrated. + # + + for i in range(0, n//2 + 1): + res += (S.NegativeOne**i * binomial(n//2, i) * + _cos_pow_integrate(m + 2*i, x)) + + elif n == 0: + + # / + # | + # | 1 + # | _ _ _ + # | m + # | cos (x) + # / + # + + res = _cos_pow_integrate(m, x) + else: + + # n < 0 , |m| > |n| + # / + # | + # | m n + # | cos (x) sin (x) dx = + # | + # | + #/ + # / + # | + # 1 m-1 n+1 m - 1 | m-2 n+2 + # _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx + # | + # n + 1 n + 1 | + # / + + res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) + + Rational(m - 1, n + 1) * + trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x)) + + else: + if m == n: + ##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate. + res = integrate((sin(2*x)*S.Half)**m, x) + elif (m == -n): + if n < 0: + # Same as the scheme described above. + # the function argument to integrate in the end will + # be 1, this cannot be integrated by trigintegrate. + # Hence use sympy.integrals.integrate. + res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) + + Rational(m - 1, n + 1) * + integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x)) + else: + res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + + Rational(n - 1, m + 1) * + integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) + if conds == 'piecewise': + return Piecewise((res.subs(x, a*x) / a, Ne(a, 0)), (zz, True)) + return res.subs(x, a*x) / a + + +def _sin_pow_integrate(n, x): + if n > 0: + if n == 1: + #Recursion break + return -cos(x) + + # n > 0 + # / / + # | | + # | n -1 n-1 n - 1 | n-2 + # | sin (x) dx = ______ cos (x) sin (x) + _______ | sin (x) dx + # | | + # | n n | + #/ / + # + # + + return (Rational(-1, n) * cos(x) * sin(x)**(n - 1) + + Rational(n - 1, n) * _sin_pow_integrate(n - 2, x)) + + if n < 0: + if n == -1: + ##Make sure this does not come back here again. + ##Recursion breaks here or at n==0. + return trigintegrate(1/sin(x), x) + + # n < 0 + # / / + # | | + # | n 1 n+1 n + 2 | n+2 + # | sin (x) dx = _______ cos (x) sin (x) + _______ | sin (x) dx + # | | + # | n + 1 n + 1 | + #/ / + # + + return (Rational(1, n + 1) * cos(x) * sin(x)**(n + 1) + + Rational(n + 2, n + 1) * _sin_pow_integrate(n + 2, x)) + + else: + #n == 0 + #Recursion break. + return x + + +def _cos_pow_integrate(n, x): + if n > 0: + if n == 1: + #Recursion break. + return sin(x) + + # n > 0 + # / / + # | | + # | n 1 n-1 n - 1 | n-2 + # | sin (x) dx = ______ sin (x) cos (x) + _______ | cos (x) dx + # | | + # | n n | + #/ / + # + + return (Rational(1, n) * sin(x) * cos(x)**(n - 1) + + Rational(n - 1, n) * _cos_pow_integrate(n - 2, x)) + + if n < 0: + if n == -1: + ##Recursion break + return trigintegrate(1/cos(x), x) + + # n < 0 + # / / + # | | + # | n -1 n+1 n + 2 | n+2 + # | cos (x) dx = _______ sin (x) cos (x) + _______ | cos (x) dx + # | | + # | n + 1 n + 1 | + #/ / + # + + return (Rational(-1, n + 1) * sin(x) * cos(x)**(n + 1) + + Rational(n + 2, n + 1) * _cos_pow_integrate(n + 2, x)) + else: + # n == 0 + #Recursion Break. + return x diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c90f8a5b26327a2eaaf268cb242a819ee3383ff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b0f15b23e4ee1d714106f45aba74defd7cb0ff0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27d851425da297d3867596065832e666d970a7cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5046e9c3944115a986db311e49b753fb3b9ba037 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..074bcf93b7375eb3dc96d16b5450b539074d8f7d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/__init__.py @@ -0,0 +1,22 @@ +from .plot import plot_backends +from .plot_implicit import plot_implicit +from .textplot import textplot +from .pygletplot import PygletPlot +from .plot import PlotGrid +from .plot import (plot, plot_parametric, plot3d, plot3d_parametric_surface, + plot3d_parametric_line, plot_contour) + +__all__ = [ + 'plot_backends', + + 'plot_implicit', + + 'textplot', + + 'PygletPlot', + + 'PlotGrid', + + 'plot', 'plot_parametric', 'plot3d', 'plot3d_parametric_surface', + 'plot3d_parametric_line', 'plot_contour' +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/experimental_lambdify.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/experimental_lambdify.py new file mode 100644 index 0000000000000000000000000000000000000000..f4c5938a139363fa801928f316338fa4a9d4e77c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/experimental_lambdify.py @@ -0,0 +1,643 @@ +""" rewrite of lambdify - This stuff is not stable at all. + +It is for internal use in the new plotting module. +It may (will! see the Q'n'A in the source) be rewritten. + +It's completely self contained. Especially it does not use lambdarepr. + +It does not aim to replace the current lambdify. Most importantly it will never +ever support anything else than SymPy expressions (no Matrices, dictionaries +and so on). +""" + + +import re +from sympy.core.numbers import (I, NumberSymbol, oo, zoo) +from sympy.core.symbol import Symbol +from sympy.utilities.iterables import numbered_symbols + +# We parse the expression string into a tree that identifies functions. Then +# we translate the names of the functions and we translate also some strings +# that are not names of functions (all this according to translation +# dictionaries). +# If the translation goes to another module (like numpy) the +# module is imported and 'func' is translated to 'module.func'. +# If a function can not be translated, the inner nodes of that part of the +# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not +# translated to np.sqrt and the Integral does not crash. +# A namespace for all this is generated by crawling the (func, args) tree of +# the expression. The creation of this namespace involves many ugly +# workarounds. +# The namespace consists of all the names needed for the SymPy expression and +# all the name of modules used for translation. Those modules are imported only +# as a name (import numpy as np) in order to keep the namespace small and +# manageable. + +# Please, if there is a bug, do not try to fix it here! Rewrite this by using +# the method proposed in the last Q'n'A below. That way the new function will +# work just as well, be just as simple, but it wont need any new workarounds. +# If you insist on fixing it here, look at the workarounds in the function +# sympy_expression_namespace and in lambdify. + +# Q: Why are you not using Python abstract syntax tree? +# A: Because it is more complicated and not much more powerful in this case. + +# Q: What if I have Symbol('sin') or g=Function('f')? +# A: You will break the algorithm. We should use srepr to defend against this? +# The problem with Symbol('sin') is that it will be printed as 'sin'. The +# parser will distinguish it from the function 'sin' because functions are +# detected thanks to the opening parenthesis, but the lambda expression won't +# understand the difference if we have also the sin function. +# The solution (complicated) is to use srepr and maybe ast. +# The problem with the g=Function('f') is that it will be printed as 'f' but in +# the global namespace we have only 'g'. But as the same printer is used in the +# constructor of the namespace there will be no problem. + +# Q: What if some of the printers are not printing as expected? +# A: The algorithm wont work. You must use srepr for those cases. But even +# srepr may not print well. All problems with printers should be considered +# bugs. + +# Q: What about _imp_ functions? +# A: Those are taken care for by evalf. A special case treatment will work +# faster but it's not worth the code complexity. + +# Q: Will ast fix all possible problems? +# A: No. You will always have to use some printer. Even srepr may not work in +# some cases. But if the printer does not work, that should be considered a +# bug. + +# Q: Is there same way to fix all possible problems? +# A: Probably by constructing our strings ourself by traversing the (func, +# args) tree and creating the namespace at the same time. That actually sounds +# good. + +from sympy.external import import_module +import warnings + +#TODO debugging output + + +class vectorized_lambdify: + """ Return a sufficiently smart, vectorized and lambdified function. + + Returns only reals. + + Explanation + =========== + + This function uses experimental_lambdify to created a lambdified + expression ready to be used with numpy. Many of the functions in SymPy + are not implemented in numpy so in some cases we resort to Python cmath or + even to evalf. + + The following translations are tried: + only numpy complex + - on errors raised by SymPy trying to work with ndarray: + only Python cmath and then vectorize complex128 + + When using Python cmath there is no need for evalf or float/complex + because Python cmath calls those. + + This function never tries to mix numpy directly with evalf because numpy + does not understand SymPy Float. If this is needed one can use the + float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or + better one can be explicit about the dtypes that numpy works with. + Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what + types of errors to expect. + """ + def __init__(self, args, expr): + self.args = args + self.expr = expr + self.np = import_module('numpy') + + self.lambda_func_1 = experimental_lambdify( + args, expr, use_np=True) + self.vector_func_1 = self.lambda_func_1 + + self.lambda_func_2 = experimental_lambdify( + args, expr, use_python_cmath=True) + self.vector_func_2 = self.np.vectorize( + self.lambda_func_2, otypes=[complex]) + + self.vector_func = self.vector_func_1 + self.failure = False + + def __call__(self, *args): + np = self.np + + try: + temp_args = (np.array(a, dtype=complex) for a in args) + results = self.vector_func(*temp_args) + results = np.ma.masked_where( + np.abs(results.imag) > 1e-7 * np.abs(results), + results.real, copy=False) + return results + except ValueError: + if self.failure: + raise + + self.failure = True + self.vector_func = self.vector_func_2 + warnings.warn( + 'The evaluation of the expression is problematic. ' + 'We are trying a failback method that may still work. ' + 'Please report this as a bug.') + return self.__call__(*args) + + +class lambdify: + """Returns the lambdified function. + + Explanation + =========== + + This function uses experimental_lambdify to create a lambdified + expression. It uses cmath to lambdify the expression. If the function + is not implemented in Python cmath, Python cmath calls evalf on those + functions. + """ + + def __init__(self, args, expr): + self.args = args + self.expr = expr + self.lambda_func_1 = experimental_lambdify( + args, expr, use_python_cmath=True, use_evalf=True) + self.lambda_func_2 = experimental_lambdify( + args, expr, use_python_math=True, use_evalf=True) + self.lambda_func_3 = experimental_lambdify( + args, expr, use_evalf=True, complex_wrap_evalf=True) + self.lambda_func = self.lambda_func_1 + self.failure = False + + def __call__(self, args): + try: + #The result can be sympy.Float. Hence wrap it with complex type. + result = complex(self.lambda_func(args)) + if abs(result.imag) > 1e-7 * abs(result): + return None + return result.real + except (ZeroDivisionError, OverflowError): + return None + except TypeError as e: + if self.failure: + raise e + + if self.lambda_func == self.lambda_func_1: + self.lambda_func = self.lambda_func_2 + return self.__call__(args) + + self.failure = True + self.lambda_func = self.lambda_func_3 + warnings.warn( + 'The evaluation of the expression is problematic. ' + 'We are trying a failback method that may still work. ' + 'Please report this as a bug.', stacklevel=2) + return self.__call__(args) + + +def experimental_lambdify(*args, **kwargs): + l = Lambdifier(*args, **kwargs) + return l + + +class Lambdifier: + def __init__(self, args, expr, print_lambda=False, use_evalf=False, + float_wrap_evalf=False, complex_wrap_evalf=False, + use_np=False, use_python_math=False, use_python_cmath=False, + use_interval=False): + + self.print_lambda = print_lambda + self.use_evalf = use_evalf + self.float_wrap_evalf = float_wrap_evalf + self.complex_wrap_evalf = complex_wrap_evalf + self.use_np = use_np + self.use_python_math = use_python_math + self.use_python_cmath = use_python_cmath + self.use_interval = use_interval + + # Constructing the argument string + # - check + if not all(isinstance(a, Symbol) for a in args): + raise ValueError('The arguments must be Symbols.') + # - use numbered symbols + syms = numbered_symbols(exclude=expr.free_symbols) + newargs = [next(syms) for _ in args] + expr = expr.xreplace(dict(zip(args, newargs))) + argstr = ', '.join([str(a) for a in newargs]) + del syms, newargs, args + + # Constructing the translation dictionaries and making the translation + self.dict_str = self.get_dict_str() + self.dict_fun = self.get_dict_fun() + exprstr = str(expr) + newexpr = self.tree2str_translate(self.str2tree(exprstr)) + + # Constructing the namespaces + namespace = {} + namespace.update(self.sympy_atoms_namespace(expr)) + namespace.update(self.sympy_expression_namespace(expr)) + # XXX Workaround + # Ugly workaround because Pow(a,Half) prints as sqrt(a) + # and sympy_expression_namespace can not catch it. + from sympy.functions.elementary.miscellaneous import sqrt + namespace.update({'sqrt': sqrt}) + namespace.update({'Eq': lambda x, y: x == y}) + namespace.update({'Ne': lambda x, y: x != y}) + # End workaround. + if use_python_math: + namespace.update({'math': __import__('math')}) + if use_python_cmath: + namespace.update({'cmath': __import__('cmath')}) + if use_np: + try: + namespace.update({'np': __import__('numpy')}) + except ImportError: + raise ImportError( + 'experimental_lambdify failed to import numpy.') + if use_interval: + namespace.update({'imath': __import__( + 'sympy.plotting.intervalmath', fromlist=['intervalmath'])}) + namespace.update({'math': __import__('math')}) + + # Construct the lambda + if self.print_lambda: + print(newexpr) + eval_str = 'lambda %s : ( %s )' % (argstr, newexpr) + self.eval_str = eval_str + exec("MYNEWLAMBDA = %s" % eval_str, namespace) + self.lambda_func = namespace['MYNEWLAMBDA'] + + def __call__(self, *args, **kwargs): + return self.lambda_func(*args, **kwargs) + + + ############################################################################## + # Dicts for translating from SymPy to other modules + ############################################################################## + ### + # builtins + ### + # Functions with different names in builtins + builtin_functions_different = { + 'Min': 'min', + 'Max': 'max', + 'Abs': 'abs', + } + + # Strings that should be translated + builtin_not_functions = { + 'I': '1j', +# 'oo': '1e400', + } + + ### + # numpy + ### + + # Functions that are the same in numpy + numpy_functions_same = [ + 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log', + 'sqrt', 'floor', 'conjugate', + ] + + # Functions with different names in numpy + numpy_functions_different = { + "acos": "arccos", + "acosh": "arccosh", + "arg": "angle", + "asin": "arcsin", + "asinh": "arcsinh", + "atan": "arctan", + "atan2": "arctan2", + "atanh": "arctanh", + "ceiling": "ceil", + "im": "imag", + "ln": "log", + "Max": "amax", + "Min": "amin", + "re": "real", + "Abs": "abs", + } + + # Strings that should be translated + numpy_not_functions = { + 'pi': 'np.pi', + 'oo': 'np.inf', + 'E': 'np.e', + } + + ### + # Python math + ### + + # Functions that are the same in math + math_functions_same = [ + 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2', + 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', + 'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma', + ] + + # Functions with different names in math + math_functions_different = { + 'ceiling': 'ceil', + 'ln': 'log', + 'loggamma': 'lgamma' + } + + # Strings that should be translated + math_not_functions = { + 'pi': 'math.pi', + 'E': 'math.e', + } + + ### + # Python cmath + ### + + # Functions that are the same in cmath + cmath_functions_same = [ + 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', + 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', + 'exp', 'log', 'sqrt', + ] + + # Functions with different names in cmath + cmath_functions_different = { + 'ln': 'log', + 'arg': 'phase', + } + + # Strings that should be translated + cmath_not_functions = { + 'pi': 'cmath.pi', + 'E': 'cmath.e', + } + + ### + # intervalmath + ### + + interval_not_functions = { + 'pi': 'math.pi', + 'E': 'math.e' + } + + interval_functions_same = [ + 'sin', 'cos', 'exp', 'tan', 'atan', 'log', + 'sqrt', 'cosh', 'sinh', 'tanh', 'floor', + 'acos', 'asin', 'acosh', 'asinh', 'atanh', + 'Abs', 'And', 'Or' + ] + + interval_functions_different = { + 'Min': 'imin', + 'Max': 'imax', + 'ceiling': 'ceil', + + } + + ### + # mpmath, etc + ### + #TODO + + ### + # Create the final ordered tuples of dictionaries + ### + + # For strings + def get_dict_str(self): + dict_str = dict(self.builtin_not_functions) + if self.use_np: + dict_str.update(self.numpy_not_functions) + if self.use_python_math: + dict_str.update(self.math_not_functions) + if self.use_python_cmath: + dict_str.update(self.cmath_not_functions) + if self.use_interval: + dict_str.update(self.interval_not_functions) + return dict_str + + # For functions + def get_dict_fun(self): + dict_fun = dict(self.builtin_functions_different) + if self.use_np: + for s in self.numpy_functions_same: + dict_fun[s] = 'np.' + s + for k, v in self.numpy_functions_different.items(): + dict_fun[k] = 'np.' + v + if self.use_python_math: + for s in self.math_functions_same: + dict_fun[s] = 'math.' + s + for k, v in self.math_functions_different.items(): + dict_fun[k] = 'math.' + v + if self.use_python_cmath: + for s in self.cmath_functions_same: + dict_fun[s] = 'cmath.' + s + for k, v in self.cmath_functions_different.items(): + dict_fun[k] = 'cmath.' + v + if self.use_interval: + for s in self.interval_functions_same: + dict_fun[s] = 'imath.' + s + for k, v in self.interval_functions_different.items(): + dict_fun[k] = 'imath.' + v + return dict_fun + + ############################################################################## + # The translator functions, tree parsers, etc. + ############################################################################## + + def str2tree(self, exprstr): + """Converts an expression string to a tree. + + Explanation + =========== + + Functions are represented by ('func_name(', tree_of_arguments). + Other expressions are (head_string, mid_tree, tail_str). + Expressions that do not contain functions are directly returned. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy import Integral, sin + >>> from sympy.plotting.experimental_lambdify import Lambdifier + >>> str2tree = Lambdifier([x], x).str2tree + + >>> str2tree(str(Integral(x, (x, 1, y)))) + ('', ('Integral(', 'x, (x, 1, y)'), ')') + >>> str2tree(str(x+y)) + 'x + y' + >>> str2tree(str(x+y*sin(z)+1)) + ('x + y*', ('sin(', 'z'), ') + 1') + >>> str2tree('sin(y*(y + 1.1) + (sin(y)))') + ('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')') + """ + #matches the first 'function_name(' + first_par = re.search(r'(\w+\()', exprstr) + if first_par is None: + return exprstr + else: + start = first_par.start() + end = first_par.end() + head = exprstr[:start] + func = exprstr[start:end] + tail = exprstr[end:] + count = 0 + for i, c in enumerate(tail): + if c == '(': + count += 1 + elif c == ')': + count -= 1 + if count == -1: + break + func_tail = self.str2tree(tail[:i]) + tail = self.str2tree(tail[i:]) + return (head, (func, func_tail), tail) + + @classmethod + def tree2str(cls, tree): + """Converts a tree to string without translations. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy import sin + >>> from sympy.plotting.experimental_lambdify import Lambdifier + >>> str2tree = Lambdifier([x], x).str2tree + >>> tree2str = Lambdifier([x], x).tree2str + + >>> tree2str(str2tree(str(x+y*sin(z)+1))) + 'x + y*sin(z) + 1' + """ + if isinstance(tree, str): + return tree + else: + return ''.join(map(cls.tree2str, tree)) + + def tree2str_translate(self, tree): + """Converts a tree to string with translations. + + Explanation + =========== + + Function names are translated by translate_func. + Other strings are translated by translate_str. + """ + if isinstance(tree, str): + return self.translate_str(tree) + elif isinstance(tree, tuple) and len(tree) == 2: + return self.translate_func(tree[0][:-1], tree[1]) + else: + return ''.join([self.tree2str_translate(t) for t in tree]) + + def translate_str(self, estr): + """Translate substrings of estr using in order the dictionaries in + dict_tuple_str.""" + for pattern, repl in self.dict_str.items(): + estr = re.sub(pattern, repl, estr) + return estr + + def translate_func(self, func_name, argtree): + """Translate function names and the tree of arguments. + + Explanation + =========== + + If the function name is not in the dictionaries of dict_tuple_fun then the + function is surrounded by a float((...).evalf()). + + The use of float is necessary as np.(sympy.Float(..)) raises an + error.""" + if func_name in self.dict_fun: + new_name = self.dict_fun[func_name] + argstr = self.tree2str_translate(argtree) + return new_name + '(' + argstr + elif func_name in ['Eq', 'Ne']: + op = {'Eq': '==', 'Ne': '!='} + return "(lambda x, y: x {} y)({}".format(op[func_name], self.tree2str_translate(argtree)) + else: + template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s' + if self.float_wrap_evalf: + template = 'float(%s)' % template + elif self.complex_wrap_evalf: + template = 'complex(%s)' % template + + # Wrapping should only happen on the outermost expression, which + # is the only thing we know will be a number. + float_wrap_evalf = self.float_wrap_evalf + complex_wrap_evalf = self.complex_wrap_evalf + self.float_wrap_evalf = False + self.complex_wrap_evalf = False + ret = template % (func_name, self.tree2str_translate(argtree)) + self.float_wrap_evalf = float_wrap_evalf + self.complex_wrap_evalf = complex_wrap_evalf + return ret + + ############################################################################## + # The namespace constructors + ############################################################################## + + @classmethod + def sympy_expression_namespace(cls, expr): + """Traverses the (func, args) tree of an expression and creates a SymPy + namespace. All other modules are imported only as a module name. That way + the namespace is not polluted and rests quite small. It probably causes much + more variable lookups and so it takes more time, but there are no tests on + that for the moment.""" + if expr is None: + return {} + else: + funcname = str(expr.func) + # XXX Workaround + # Here we add an ugly workaround because str(func(x)) + # is not always the same as str(func). Eg + # >>> str(Integral(x)) + # "Integral(x)" + # >>> str(Integral) + # "" + # >>> str(sqrt(x)) + # "sqrt(x)" + # >>> str(sqrt) + # "" + # >>> str(sin(x)) + # "sin(x)" + # >>> str(sin) + # "sin" + # Either one of those can be used but not all at the same time. + # The code considers the sin example as the right one. + regexlist = [ + r'$', + # the example Integral + r'$', # the example sqrt + ] + for r in regexlist: + m = re.match(r, funcname) + if m is not None: + funcname = m.groups()[0] + # End of the workaround + # XXX debug: print funcname + args_dict = {} + for a in expr.args: + if (isinstance(a, Symbol) or + isinstance(a, NumberSymbol) or + a in [I, zoo, oo]): + continue + else: + args_dict.update(cls.sympy_expression_namespace(a)) + args_dict.update({funcname: expr.func}) + return args_dict + + @staticmethod + def sympy_atoms_namespace(expr): + """For no real reason this function is separated from + sympy_expression_namespace. It can be moved to it.""" + atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo) + d = {} + for a in atoms: + # XXX debug: print 'atom:' + str(a) + d[str(a)] = a + return d diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/intervalmath/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/intervalmath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb9a6a57f94e931f0c5f5b3dda7b0b6fd31841f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/intervalmath/__init__.py @@ -0,0 +1,12 @@ +from .interval_arithmetic import interval +from .lib_interval import (Abs, exp, log, log10, sin, cos, tan, sqrt, + imin, imax, sinh, cosh, tanh, acosh, asinh, atanh, + asin, acos, atan, ceil, floor, And, Or) + +__all__ = [ + 'interval', + + 'Abs', 'exp', 'log', 'log10', 'sin', 'cos', 'tan', 'sqrt', 'imin', 'imax', + 'sinh', 'cosh', 'tanh', 'acosh', 'asinh', 'atanh', 'asin', 'acos', 'atan', + 'ceil', 'floor', 'And', 'Or', +] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..1fbcb2c506e4423406199549ee0f76b90d403fc2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot.py @@ -0,0 +1,2637 @@ +"""Plotting module for SymPy. + +A plot is represented by the ``Plot`` class that contains a reference to the +backend and a list of the data series to be plotted. The data series are +instances of classes meant to simplify getting points and meshes from SymPy +expressions. ``plot_backends`` is a dictionary with all the backends. + +This module gives only the essential. For all the fancy stuff use directly +the backend. You can get the backend wrapper for every plot from the +``_backend`` attribute. Moreover the data series classes have various useful +methods like ``get_points``, ``get_meshes``, etc, that may +be useful if you wish to use another plotting library. + +Especially if you need publication ready graphs and this module is not enough +for you - just get the ``_backend`` attribute and add whatever you want +directly to it. In the case of matplotlib (the common way to graph data in +python) just copy ``_backend.fig`` which is the figure and ``_backend.ax`` +which is the axis and work on them as you would on any other matplotlib object. + +Simplicity of code takes much greater importance than performance. Do not use it +if you care at all about performance. A new backend instance is initialized +every time you call ``show()`` and the old one is left to the garbage collector. +""" + + +from collections.abc import Callable + + +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.expr import Expr +from sympy.core.function import arity, Function +from sympy.core.symbol import (Dummy, Symbol) +from sympy.core.sympify import sympify +from sympy.external import import_module +from sympy.printing.latex import latex +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.iterables import is_sequence +from .experimental_lambdify import (vectorized_lambdify, lambdify) + +# N.B. +# When changing the minimum module version for matplotlib, please change +# the same in the `SymPyDocTestFinder`` in `sympy/testing/runtests.py` + +# Backend specific imports - textplot +from sympy.plotting.textplot import textplot + +# Global variable +# Set to False when running tests / doctests so that the plots don't show. +_show = True + + +def unset_show(): + """ + Disable show(). For use in the tests. + """ + global _show + _show = False + +def _str_or_latex(label): + if isinstance(label, Basic): + return latex(label, mode='inline') + return str(label) + +############################################################################## +# The public interface +############################################################################## + + +class Plot: + """The central class of the plotting module. + + Explanation + =========== + + For interactive work the function :func:`plot()` is better suited. + + This class permits the plotting of SymPy expressions using numerous + backends (:external:mod:`matplotlib`, textplot, the old pyglet module for SymPy, Google + charts api, etc). + + The figure can contain an arbitrary number of plots of SymPy expressions, + lists of coordinates of points, etc. Plot has a private attribute _series that + contains all data series to be plotted (expressions for lines or surfaces, + lists of points, etc (all subclasses of BaseSeries)). Those data series are + instances of classes not imported by ``from sympy import *``. + + The customization of the figure is on two levels. Global options that + concern the figure as a whole (e.g. title, xlabel, scale, etc) and + per-data series options (e.g. name) and aesthetics (e.g. color, point shape, + line type, etc.). + + The difference between options and aesthetics is that an aesthetic can be + a function of the coordinates (or parameters in a parametric plot). The + supported values for an aesthetic are: + + - None (the backend uses default values) + - a constant + - a function of one variable (the first coordinate or parameter) + - a function of two variables (the first and second coordinate or parameters) + - a function of three variables (only in nonparametric 3D plots) + + Their implementation depends on the backend so they may not work in some + backends. + + If the plot is parametric and the arity of the aesthetic function permits + it the aesthetic is calculated over parameters and not over coordinates. + If the arity does not permit calculation over parameters the calculation is + done over coordinates. + + Only cartesian coordinates are supported for the moment, but you can use + the parametric plots to plot in polar, spherical and cylindrical + coordinates. + + The arguments for the constructor Plot must be subclasses of BaseSeries. + + Any global option can be specified as a keyword argument. + + The global options for a figure are: + + - title : str + - xlabel : str or Symbol + - ylabel : str or Symbol + - zlabel : str or Symbol + - legend : bool + - xscale : {'linear', 'log'} + - yscale : {'linear', 'log'} + - axis : bool + - axis_center : tuple of two floats or {'center', 'auto'} + - xlim : tuple of two floats + - ylim : tuple of two floats + - aspect_ratio : tuple of two floats or {'auto'} + - autoscale : bool + - margin : float in [0, 1] + - backend : {'default', 'matplotlib', 'text'} or a subclass of BaseBackend + - size : optional tuple of two floats, (width, height); default: None + + The per data series options and aesthetics are: + There are none in the base series. See below for options for subclasses. + + Some data series support additional aesthetics or options: + + :class:`~.LineOver1DRangeSeries`, :class:`~.Parametric2DLineSeries`, and + :class:`~.Parametric3DLineSeries` support the following: + + Aesthetics: + + - line_color : string, or float, or function, optional + Specifies the color for the plot, which depends on the backend being + used. + + For example, if ``MatplotlibBackend`` is being used, then + Matplotlib string colors are acceptable (``"red"``, ``"r"``, + ``"cyan"``, ``"c"``, ...). + Alternatively, we can use a float number, 0 < color < 1, wrapped in a + string (for example, ``line_color="0.5"``) to specify grayscale colors. + Alternatively, We can specify a function returning a single + float value: this will be used to apply a color-loop (for example, + ``line_color=lambda x: math.cos(x)``). + + Note that by setting line_color, it would be applied simultaneously + to all the series. + + Options: + + - label : str + - steps : bool + - integers_only : bool + + :class:`~.SurfaceOver2DRangeSeries` and :class:`~.ParametricSurfaceSeries` + support the following: + + Aesthetics: + + - surface_color : function which returns a float. + """ + + def __init__(self, *args, + title=None, xlabel=None, ylabel=None, zlabel=None, aspect_ratio='auto', + xlim=None, ylim=None, axis_center='auto', axis=True, + xscale='linear', yscale='linear', legend=False, autoscale=True, + margin=0, annotations=None, markers=None, rectangles=None, + fill=None, backend='default', size=None, **kwargs): + super().__init__() + + # Options for the graph as a whole. + # The possible values for each option are described in the docstring of + # Plot. They are based purely on convention, no checking is done. + self.title = title + self.xlabel = xlabel + self.ylabel = ylabel + self.zlabel = zlabel + self.aspect_ratio = aspect_ratio + self.axis_center = axis_center + self.axis = axis + self.xscale = xscale + self.yscale = yscale + self.legend = legend + self.autoscale = autoscale + self.margin = margin + self.annotations = annotations + self.markers = markers + self.rectangles = rectangles + self.fill = fill + + # Contains the data objects to be plotted. The backend should be smart + # enough to iterate over this list. + self._series = [] + self._series.extend(args) + + # The backend type. On every show() a new backend instance is created + # in self._backend which is tightly coupled to the Plot instance + # (thanks to the parent attribute of the backend). + if isinstance(backend, str): + self.backend = plot_backends[backend] + elif (type(backend) == type) and issubclass(backend, BaseBackend): + self.backend = backend + else: + raise TypeError( + "backend must be either a string or a subclass of BaseBackend") + + is_real = \ + lambda lim: all(getattr(i, 'is_real', True) for i in lim) + is_finite = \ + lambda lim: all(getattr(i, 'is_finite', True) for i in lim) + + # reduce code repetition + def check_and_set(t_name, t): + if t: + if not is_real(t): + raise ValueError( + "All numbers from {}={} must be real".format(t_name, t)) + if not is_finite(t): + raise ValueError( + "All numbers from {}={} must be finite".format(t_name, t)) + setattr(self, t_name, (float(t[0]), float(t[1]))) + + self.xlim = None + check_and_set("xlim", xlim) + self.ylim = None + check_and_set("ylim", ylim) + self.size = None + check_and_set("size", size) + + + def show(self): + # TODO move this to the backend (also for save) + if hasattr(self, '_backend'): + self._backend.close() + self._backend = self.backend(self) + self._backend.show() + + def save(self, path): + if hasattr(self, '_backend'): + self._backend.close() + self._backend = self.backend(self) + self._backend.save(path) + + def __str__(self): + series_strs = [('[%d]: ' % i) + str(s) + for i, s in enumerate(self._series)] + return 'Plot object containing:\n' + '\n'.join(series_strs) + + def __getitem__(self, index): + return self._series[index] + + def __setitem__(self, index, *args): + if len(args) == 1 and isinstance(args[0], BaseSeries): + self._series[index] = args + + def __delitem__(self, index): + del self._series[index] + + def append(self, arg): + """Adds an element from a plot's series to an existing plot. + + Examples + ======== + + Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the + second plot's first series object to the first, use the + ``append`` method, like so: + + .. plot:: + :format: doctest + :include-source: True + + >>> from sympy import symbols + >>> from sympy.plotting import plot + >>> x = symbols('x') + >>> p1 = plot(x*x, show=False) + >>> p2 = plot(x, show=False) + >>> p1.append(p2[0]) + >>> p1 + Plot object containing: + [0]: cartesian line: x**2 for x over (-10.0, 10.0) + [1]: cartesian line: x for x over (-10.0, 10.0) + >>> p1.show() + + See Also + ======== + + extend + + """ + if isinstance(arg, BaseSeries): + self._series.append(arg) + else: + raise TypeError('Must specify element of plot to append.') + + def extend(self, arg): + """Adds all series from another plot. + + Examples + ======== + + Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the + second plot to the first, use the ``extend`` method, like so: + + .. plot:: + :format: doctest + :include-source: True + + >>> from sympy import symbols + >>> from sympy.plotting import plot + >>> x = symbols('x') + >>> p1 = plot(x**2, show=False) + >>> p2 = plot(x, -x, show=False) + >>> p1.extend(p2) + >>> p1 + Plot object containing: + [0]: cartesian line: x**2 for x over (-10.0, 10.0) + [1]: cartesian line: x for x over (-10.0, 10.0) + [2]: cartesian line: -x for x over (-10.0, 10.0) + >>> p1.show() + + """ + if isinstance(arg, Plot): + self._series.extend(arg._series) + elif is_sequence(arg): + self._series.extend(arg) + else: + raise TypeError('Expecting Plot or sequence of BaseSeries') + + +class PlotGrid: + """This class helps to plot subplots from already created SymPy plots + in a single figure. + + Examples + ======== + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> from sympy import symbols + >>> from sympy.plotting import plot, plot3d, PlotGrid + >>> x, y = symbols('x, y') + >>> p1 = plot(x, x**2, x**3, (x, -5, 5)) + >>> p2 = plot((x**2, (x, -6, 6)), (x, (x, -5, 5))) + >>> p3 = plot(x**3, (x, -5, 5)) + >>> p4 = plot3d(x*y, (x, -5, 5), (y, -5, 5)) + + Plotting vertically in a single line: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> PlotGrid(2, 1, p1, p2) + PlotGrid object containing: + Plot[0]:Plot object containing: + [0]: cartesian line: x for x over (-5.0, 5.0) + [1]: cartesian line: x**2 for x over (-5.0, 5.0) + [2]: cartesian line: x**3 for x over (-5.0, 5.0) + Plot[1]:Plot object containing: + [0]: cartesian line: x**2 for x over (-6.0, 6.0) + [1]: cartesian line: x for x over (-5.0, 5.0) + + Plotting horizontally in a single line: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> PlotGrid(1, 3, p2, p3, p4) + PlotGrid object containing: + Plot[0]:Plot object containing: + [0]: cartesian line: x**2 for x over (-6.0, 6.0) + [1]: cartesian line: x for x over (-5.0, 5.0) + Plot[1]:Plot object containing: + [0]: cartesian line: x**3 for x over (-5.0, 5.0) + Plot[2]:Plot object containing: + [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) + + Plotting in a grid form: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> PlotGrid(2, 2, p1, p2, p3, p4) + PlotGrid object containing: + Plot[0]:Plot object containing: + [0]: cartesian line: x for x over (-5.0, 5.0) + [1]: cartesian line: x**2 for x over (-5.0, 5.0) + [2]: cartesian line: x**3 for x over (-5.0, 5.0) + Plot[1]:Plot object containing: + [0]: cartesian line: x**2 for x over (-6.0, 6.0) + [1]: cartesian line: x for x over (-5.0, 5.0) + Plot[2]:Plot object containing: + [0]: cartesian line: x**3 for x over (-5.0, 5.0) + Plot[3]:Plot object containing: + [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) + + """ + def __init__(self, nrows, ncolumns, *args, show=True, size=None, **kwargs): + """ + Parameters + ========== + + nrows : + The number of rows that should be in the grid of the + required subplot. + ncolumns : + The number of columns that should be in the grid + of the required subplot. + + nrows and ncolumns together define the required grid. + + Arguments + ========= + + A list of predefined plot objects entered in a row-wise sequence + i.e. plot objects which are to be in the top row of the required + grid are written first, then the second row objects and so on + + Keyword arguments + ================= + + show : Boolean + The default value is set to ``True``. Set show to ``False`` and + the function will not display the subplot. The returned instance + of the ``PlotGrid`` class can then be used to save or display the + plot by calling the ``save()`` and ``show()`` methods + respectively. + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of + the overall figure. The default value is set to ``None``, meaning + the size will be set by the default backend. + """ + self.nrows = nrows + self.ncolumns = ncolumns + self._series = [] + self.args = args + for arg in args: + self._series.append(arg._series) + self.backend = DefaultBackend + self.size = size + if show: + self.show() + + def show(self): + if hasattr(self, '_backend'): + self._backend.close() + self._backend = self.backend(self) + self._backend.show() + + def save(self, path): + if hasattr(self, '_backend'): + self._backend.close() + self._backend = self.backend(self) + self._backend.save(path) + + def __str__(self): + plot_strs = [('Plot[%d]:' % i) + str(plot) + for i, plot in enumerate(self.args)] + + return 'PlotGrid object containing:\n' + '\n'.join(plot_strs) + + +############################################################################## +# Data Series +############################################################################## +#TODO more general way to calculate aesthetics (see get_color_array) + +### The base class for all series +class BaseSeries: + """Base class for the data objects containing stuff to be plotted. + + Explanation + =========== + + The backend should check if it supports the data series that is given. + (e.g. TextBackend supports only LineOver1DRangeSeries). + It is the backend responsibility to know how to use the class of + data series that is given. + + Some data series classes are grouped (using a class attribute like is_2Dline) + according to the api they present (based only on convention). The backend is + not obliged to use that api (e.g. LineOver1DRangeSeries belongs to the + is_2Dline group and presents the get_points method, but the + TextBackend does not use the get_points method). + """ + + # Some flags follow. The rationale for using flags instead of checking base + # classes is that setting multiple flags is simpler than multiple + # inheritance. + + is_2Dline = False + # Some of the backends expect: + # - get_points returning 1D np.arrays list_x, list_y + # - get_color_array returning 1D np.array (done in Line2DBaseSeries) + # with the colors calculated at the points from get_points + + is_3Dline = False + # Some of the backends expect: + # - get_points returning 1D np.arrays list_x, list_y, list_y + # - get_color_array returning 1D np.array (done in Line2DBaseSeries) + # with the colors calculated at the points from get_points + + is_3Dsurface = False + # Some of the backends expect: + # - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays) + # - get_points an alias for get_meshes + + is_contour = False + # Some of the backends expect: + # - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays) + # - get_points an alias for get_meshes + + is_implicit = False + # Some of the backends expect: + # - get_meshes returning mesh_x (1D array), mesh_y(1D array, + # mesh_z (2D np.arrays) + # - get_points an alias for get_meshes + # Different from is_contour as the colormap in backend will be + # different + + is_parametric = False + # The calculation of aesthetics expects: + # - get_parameter_points returning one or two np.arrays (1D or 2D) + # used for calculation aesthetics + + def __init__(self): + super().__init__() + + @property + def is_3D(self): + flags3D = [ + self.is_3Dline, + self.is_3Dsurface + ] + return any(flags3D) + + @property + def is_line(self): + flagslines = [ + self.is_2Dline, + self.is_3Dline + ] + return any(flagslines) + + +### 2D lines +class Line2DBaseSeries(BaseSeries): + """A base class for 2D lines. + + - adding the label, steps and only_integers options + - making is_2Dline true + - defining get_segments and get_color_array + """ + + is_2Dline = True + + _dim = 2 + + def __init__(self): + super().__init__() + self.label = None + self.steps = False + self.only_integers = False + self.line_color = None + + def get_data(self): + """ Return lists of coordinates for plotting the line. + + Returns + ======= + x : list + List of x-coordinates + + y : list + List of y-coordinates + + z : list + List of z-coordinates in case of Parametric3DLineSeries + """ + np = import_module('numpy') + points = self.get_points() + if self.steps is True: + if len(points) == 2: + x = np.array((points[0], points[0])).T.flatten()[1:] + y = np.array((points[1], points[1])).T.flatten()[:-1] + points = (x, y) + else: + x = np.repeat(points[0], 3)[2:] + y = np.repeat(points[1], 3)[:-2] + z = np.repeat(points[2], 3)[1:-1] + points = (x, y, z) + return points + + def get_segments(self): + sympy_deprecation_warning( + """ + The Line2DBaseSeries.get_segments() method is deprecated. + + Instead, use the MatplotlibBackend.get_segments() method, or use + The get_points() or get_data() methods. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-get-segments") + + np = import_module('numpy') + points = type(self).get_data(self) + points = np.ma.array(points).T.reshape(-1, 1, self._dim) + return np.ma.concatenate([points[:-1], points[1:]], axis=1) + + def get_color_array(self): + np = import_module('numpy') + c = self.line_color + if hasattr(c, '__call__'): + f = np.vectorize(c) + nargs = arity(c) + if nargs == 1 and self.is_parametric: + x = self.get_parameter_points() + return f(centers_of_segments(x)) + else: + variables = list(map(centers_of_segments, self.get_points())) + if nargs == 1: + return f(variables[0]) + elif nargs == 2: + return f(*variables[:2]) + else: # only if the line is 3D (otherwise raises an error) + return f(*variables) + else: + return c*np.ones(self.nb_of_points) + + +class List2DSeries(Line2DBaseSeries): + """Representation for a line consisting of list of points.""" + + def __init__(self, list_x, list_y): + np = import_module('numpy') + super().__init__() + self.list_x = np.array(list_x) + self.list_y = np.array(list_y) + self.label = 'list' + + def __str__(self): + return 'list plot' + + def get_points(self): + return (self.list_x, self.list_y) + + +class LineOver1DRangeSeries(Line2DBaseSeries): + """Representation for a line consisting of a SymPy expression over a range.""" + + def __init__(self, expr, var_start_end, **kwargs): + super().__init__() + self.expr = sympify(expr) + self.label = kwargs.get('label', None) or self.expr + self.var = sympify(var_start_end[0]) + self.start = float(var_start_end[1]) + self.end = float(var_start_end[2]) + self.nb_of_points = kwargs.get('nb_of_points', 300) + self.adaptive = kwargs.get('adaptive', True) + self.depth = kwargs.get('depth', 12) + self.line_color = kwargs.get('line_color', None) + self.xscale = kwargs.get('xscale', 'linear') + + def __str__(self): + return 'cartesian line: %s for %s over %s' % ( + str(self.expr), str(self.var), str((self.start, self.end))) + + def get_points(self): + """ Return lists of coordinates for plotting. Depending on the + ``adaptive`` option, this function will either use an adaptive algorithm + or it will uniformly sample the expression over the provided range. + + Returns + ======= + x : list + List of x-coordinates + + y : list + List of y-coordinates + + + Explanation + =========== + + The adaptive sampling is done by recursively checking if three + points are almost collinear. If they are not collinear, then more + points are added between those points. + + References + ========== + + .. [1] Adaptive polygonal approximation of parametric curves, + Luiz Henrique de Figueiredo. + + """ + if self.only_integers or not self.adaptive: + return self._uniform_sampling() + else: + f = lambdify([self.var], self.expr) + x_coords = [] + y_coords = [] + np = import_module('numpy') + def sample(p, q, depth): + """ Samples recursively if three points are almost collinear. + For depth < 6, points are added irrespective of whether they + satisfy the collinearity condition or not. The maximum depth + allowed is 12. + """ + # Randomly sample to avoid aliasing. + random = 0.45 + np.random.rand() * 0.1 + if self.xscale == 'log': + xnew = 10**(np.log10(p[0]) + random * (np.log10(q[0]) - + np.log10(p[0]))) + else: + xnew = p[0] + random * (q[0] - p[0]) + ynew = f(xnew) + new_point = np.array([xnew, ynew]) + + # Maximum depth + if depth > self.depth: + x_coords.append(q[0]) + y_coords.append(q[1]) + + # Sample irrespective of whether the line is flat till the + # depth of 6. We are not using linspace to avoid aliasing. + elif depth < 6: + sample(p, new_point, depth + 1) + sample(new_point, q, depth + 1) + + # Sample ten points if complex values are encountered + # at both ends. If there is a real value in between, then + # sample those points further. + elif p[1] is None and q[1] is None: + if self.xscale == 'log': + xarray = np.logspace(p[0], q[0], 10) + else: + xarray = np.linspace(p[0], q[0], 10) + yarray = list(map(f, xarray)) + if not all(y is None for y in yarray): + for i in range(len(yarray) - 1): + if not (yarray[i] is None and yarray[i + 1] is None): + sample([xarray[i], yarray[i]], + [xarray[i + 1], yarray[i + 1]], depth + 1) + + # Sample further if one of the end points in None (i.e. a + # complex value) or the three points are not almost collinear. + elif (p[1] is None or q[1] is None or new_point[1] is None + or not flat(p, new_point, q)): + sample(p, new_point, depth + 1) + sample(new_point, q, depth + 1) + else: + x_coords.append(q[0]) + y_coords.append(q[1]) + + f_start = f(self.start) + f_end = f(self.end) + x_coords.append(self.start) + y_coords.append(f_start) + sample(np.array([self.start, f_start]), + np.array([self.end, f_end]), 0) + + return (x_coords, y_coords) + + def _uniform_sampling(self): + np = import_module('numpy') + if self.only_integers is True: + if self.xscale == 'log': + list_x = np.logspace(int(self.start), int(self.end), + num=int(self.end) - int(self.start) + 1) + else: + list_x = np.linspace(int(self.start), int(self.end), + num=int(self.end) - int(self.start) + 1) + else: + if self.xscale == 'log': + list_x = np.logspace(self.start, self.end, num=self.nb_of_points) + else: + list_x = np.linspace(self.start, self.end, num=self.nb_of_points) + f = vectorized_lambdify([self.var], self.expr) + list_y = f(list_x) + return (list_x, list_y) + + +class Parametric2DLineSeries(Line2DBaseSeries): + """Representation for a line consisting of two parametric SymPy expressions + over a range.""" + + is_parametric = True + + def __init__(self, expr_x, expr_y, var_start_end, **kwargs): + super().__init__() + self.expr_x = sympify(expr_x) + self.expr_y = sympify(expr_y) + self.label = kwargs.get('label', None) or \ + Tuple(self.expr_x, self.expr_y) + self.var = sympify(var_start_end[0]) + self.start = float(var_start_end[1]) + self.end = float(var_start_end[2]) + self.nb_of_points = kwargs.get('nb_of_points', 300) + self.adaptive = kwargs.get('adaptive', True) + self.depth = kwargs.get('depth', 12) + self.line_color = kwargs.get('line_color', None) + + def __str__(self): + return 'parametric cartesian line: (%s, %s) for %s over %s' % ( + str(self.expr_x), str(self.expr_y), str(self.var), + str((self.start, self.end))) + + def get_parameter_points(self): + np = import_module('numpy') + return np.linspace(self.start, self.end, num=self.nb_of_points) + + def _uniform_sampling(self): + param = self.get_parameter_points() + fx = vectorized_lambdify([self.var], self.expr_x) + fy = vectorized_lambdify([self.var], self.expr_y) + list_x = fx(param) + list_y = fy(param) + return (list_x, list_y) + + def get_points(self): + """ Return lists of coordinates for plotting. Depending on the + ``adaptive`` option, this function will either use an adaptive algorithm + or it will uniformly sample the expression over the provided range. + + Returns + ======= + x : list + List of x-coordinates + + y : list + List of y-coordinates + + + Explanation + =========== + + The adaptive sampling is done by recursively checking if three + points are almost collinear. If they are not collinear, then more + points are added between those points. + + References + ========== + + .. [1] Adaptive polygonal approximation of parametric curves, + Luiz Henrique de Figueiredo. + + """ + if not self.adaptive: + return self._uniform_sampling() + + f_x = lambdify([self.var], self.expr_x) + f_y = lambdify([self.var], self.expr_y) + x_coords = [] + y_coords = [] + + def sample(param_p, param_q, p, q, depth): + """ Samples recursively if three points are almost collinear. + For depth < 6, points are added irrespective of whether they + satisfy the collinearity condition or not. The maximum depth + allowed is 12. + """ + # Randomly sample to avoid aliasing. + np = import_module('numpy') + random = 0.45 + np.random.rand() * 0.1 + param_new = param_p + random * (param_q - param_p) + xnew = f_x(param_new) + ynew = f_y(param_new) + new_point = np.array([xnew, ynew]) + + # Maximum depth + if depth > self.depth: + x_coords.append(q[0]) + y_coords.append(q[1]) + + # Sample irrespective of whether the line is flat till the + # depth of 6. We are not using linspace to avoid aliasing. + elif depth < 6: + sample(param_p, param_new, p, new_point, depth + 1) + sample(param_new, param_q, new_point, q, depth + 1) + + # Sample ten points if complex values are encountered + # at both ends. If there is a real value in between, then + # sample those points further. + elif ((p[0] is None and q[1] is None) or + (p[1] is None and q[1] is None)): + param_array = np.linspace(param_p, param_q, 10) + x_array = list(map(f_x, param_array)) + y_array = list(map(f_y, param_array)) + if not all(x is None and y is None + for x, y in zip(x_array, y_array)): + for i in range(len(y_array) - 1): + if ((x_array[i] is not None and y_array[i] is not None) or + (x_array[i + 1] is not None and y_array[i + 1] is not None)): + point_a = [x_array[i], y_array[i]] + point_b = [x_array[i + 1], y_array[i + 1]] + sample(param_array[i], param_array[i], point_a, + point_b, depth + 1) + + # Sample further if one of the end points in None (i.e. a complex + # value) or the three points are not almost collinear. + elif (p[0] is None or p[1] is None + or q[1] is None or q[0] is None + or not flat(p, new_point, q)): + sample(param_p, param_new, p, new_point, depth + 1) + sample(param_new, param_q, new_point, q, depth + 1) + else: + x_coords.append(q[0]) + y_coords.append(q[1]) + + f_start_x = f_x(self.start) + f_start_y = f_y(self.start) + start = [f_start_x, f_start_y] + f_end_x = f_x(self.end) + f_end_y = f_y(self.end) + end = [f_end_x, f_end_y] + x_coords.append(f_start_x) + y_coords.append(f_start_y) + sample(self.start, self.end, start, end, 0) + + return x_coords, y_coords + + +### 3D lines +class Line3DBaseSeries(Line2DBaseSeries): + """A base class for 3D lines. + + Most of the stuff is derived from Line2DBaseSeries.""" + + is_2Dline = False + is_3Dline = True + _dim = 3 + + def __init__(self): + super().__init__() + + +class Parametric3DLineSeries(Line3DBaseSeries): + """Representation for a 3D line consisting of three parametric SymPy + expressions and a range.""" + + is_parametric = True + + def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs): + super().__init__() + self.expr_x = sympify(expr_x) + self.expr_y = sympify(expr_y) + self.expr_z = sympify(expr_z) + self.label = kwargs.get('label', None) or \ + Tuple(self.expr_x, self.expr_y) + self.var = sympify(var_start_end[0]) + self.start = float(var_start_end[1]) + self.end = float(var_start_end[2]) + self.nb_of_points = kwargs.get('nb_of_points', 300) + self.line_color = kwargs.get('line_color', None) + self._xlim = None + self._ylim = None + self._zlim = None + + def __str__(self): + return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % ( + str(self.expr_x), str(self.expr_y), str(self.expr_z), + str(self.var), str((self.start, self.end))) + + def get_parameter_points(self): + np = import_module('numpy') + return np.linspace(self.start, self.end, num=self.nb_of_points) + + def get_points(self): + np = import_module('numpy') + param = self.get_parameter_points() + fx = vectorized_lambdify([self.var], self.expr_x) + fy = vectorized_lambdify([self.var], self.expr_y) + fz = vectorized_lambdify([self.var], self.expr_z) + + list_x = fx(param) + list_y = fy(param) + list_z = fz(param) + + list_x = np.array(list_x, dtype=np.float64) + list_y = np.array(list_y, dtype=np.float64) + list_z = np.array(list_z, dtype=np.float64) + + list_x = np.ma.masked_invalid(list_x) + list_y = np.ma.masked_invalid(list_y) + list_z = np.ma.masked_invalid(list_z) + + self._xlim = (np.amin(list_x), np.amax(list_x)) + self._ylim = (np.amin(list_y), np.amax(list_y)) + self._zlim = (np.amin(list_z), np.amax(list_z)) + return list_x, list_y, list_z + + +### Surfaces +class SurfaceBaseSeries(BaseSeries): + """A base class for 3D surfaces.""" + + is_3Dsurface = True + + def __init__(self): + super().__init__() + self.surface_color = None + + def get_color_array(self): + np = import_module('numpy') + c = self.surface_color + if isinstance(c, Callable): + f = np.vectorize(c) + nargs = arity(c) + if self.is_parametric: + variables = list(map(centers_of_faces, self.get_parameter_meshes())) + if nargs == 1: + return f(variables[0]) + elif nargs == 2: + return f(*variables) + variables = list(map(centers_of_faces, self.get_meshes())) + if nargs == 1: + return f(variables[0]) + elif nargs == 2: + return f(*variables[:2]) + else: + return f(*variables) + else: + if isinstance(self, SurfaceOver2DRangeSeries): + return c*np.ones(min(self.nb_of_points_x, self.nb_of_points_y)) + else: + return c*np.ones(min(self.nb_of_points_u, self.nb_of_points_v)) + + +class SurfaceOver2DRangeSeries(SurfaceBaseSeries): + """Representation for a 3D surface consisting of a SymPy expression and 2D + range.""" + def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs): + super().__init__() + self.expr = sympify(expr) + self.var_x = sympify(var_start_end_x[0]) + self.start_x = float(var_start_end_x[1]) + self.end_x = float(var_start_end_x[2]) + self.var_y = sympify(var_start_end_y[0]) + self.start_y = float(var_start_end_y[1]) + self.end_y = float(var_start_end_y[2]) + self.nb_of_points_x = kwargs.get('nb_of_points_x', 50) + self.nb_of_points_y = kwargs.get('nb_of_points_y', 50) + self.surface_color = kwargs.get('surface_color', None) + + self._xlim = (self.start_x, self.end_x) + self._ylim = (self.start_y, self.end_y) + + def __str__(self): + return ('cartesian surface: %s for' + ' %s over %s and %s over %s') % ( + str(self.expr), + str(self.var_x), + str((self.start_x, self.end_x)), + str(self.var_y), + str((self.start_y, self.end_y))) + + def get_meshes(self): + np = import_module('numpy') + mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x, + num=self.nb_of_points_x), + np.linspace(self.start_y, self.end_y, + num=self.nb_of_points_y)) + f = vectorized_lambdify((self.var_x, self.var_y), self.expr) + mesh_z = f(mesh_x, mesh_y) + mesh_z = np.array(mesh_z, dtype=np.float64) + mesh_z = np.ma.masked_invalid(mesh_z) + self._zlim = (np.amin(mesh_z), np.amax(mesh_z)) + return mesh_x, mesh_y, mesh_z + + +class ParametricSurfaceSeries(SurfaceBaseSeries): + """Representation for a 3D surface consisting of three parametric SymPy + expressions and a range.""" + + is_parametric = True + + def __init__( + self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v, + **kwargs): + super().__init__() + self.expr_x = sympify(expr_x) + self.expr_y = sympify(expr_y) + self.expr_z = sympify(expr_z) + self.var_u = sympify(var_start_end_u[0]) + self.start_u = float(var_start_end_u[1]) + self.end_u = float(var_start_end_u[2]) + self.var_v = sympify(var_start_end_v[0]) + self.start_v = float(var_start_end_v[1]) + self.end_v = float(var_start_end_v[2]) + self.nb_of_points_u = kwargs.get('nb_of_points_u', 50) + self.nb_of_points_v = kwargs.get('nb_of_points_v', 50) + self.surface_color = kwargs.get('surface_color', None) + + def __str__(self): + return ('parametric cartesian surface: (%s, %s, %s) for' + ' %s over %s and %s over %s') % ( + str(self.expr_x), + str(self.expr_y), + str(self.expr_z), + str(self.var_u), + str((self.start_u, self.end_u)), + str(self.var_v), + str((self.start_v, self.end_v))) + + def get_parameter_meshes(self): + np = import_module('numpy') + return np.meshgrid(np.linspace(self.start_u, self.end_u, + num=self.nb_of_points_u), + np.linspace(self.start_v, self.end_v, + num=self.nb_of_points_v)) + + def get_meshes(self): + np = import_module('numpy') + + mesh_u, mesh_v = self.get_parameter_meshes() + fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x) + fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y) + fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z) + + mesh_x = fx(mesh_u, mesh_v) + mesh_y = fy(mesh_u, mesh_v) + mesh_z = fz(mesh_u, mesh_v) + + mesh_x = np.array(mesh_x, dtype=np.float64) + mesh_y = np.array(mesh_y, dtype=np.float64) + mesh_z = np.array(mesh_z, dtype=np.float64) + + mesh_x = np.ma.masked_invalid(mesh_x) + mesh_y = np.ma.masked_invalid(mesh_y) + mesh_z = np.ma.masked_invalid(mesh_z) + + self._xlim = (np.amin(mesh_x), np.amax(mesh_x)) + self._ylim = (np.amin(mesh_y), np.amax(mesh_y)) + self._zlim = (np.amin(mesh_z), np.amax(mesh_z)) + + return mesh_x, mesh_y, mesh_z + + +### Contours +class ContourSeries(BaseSeries): + """Representation for a contour plot.""" + # The code is mostly repetition of SurfaceOver2DRange. + # Presently used in contour_plot function + + is_contour = True + + def __init__(self, expr, var_start_end_x, var_start_end_y): + super().__init__() + self.nb_of_points_x = 50 + self.nb_of_points_y = 50 + self.expr = sympify(expr) + self.var_x = sympify(var_start_end_x[0]) + self.start_x = float(var_start_end_x[1]) + self.end_x = float(var_start_end_x[2]) + self.var_y = sympify(var_start_end_y[0]) + self.start_y = float(var_start_end_y[1]) + self.end_y = float(var_start_end_y[2]) + + self.get_points = self.get_meshes + + self._xlim = (self.start_x, self.end_x) + self._ylim = (self.start_y, self.end_y) + + def __str__(self): + return ('contour: %s for ' + '%s over %s and %s over %s') % ( + str(self.expr), + str(self.var_x), + str((self.start_x, self.end_x)), + str(self.var_y), + str((self.start_y, self.end_y))) + + def get_meshes(self): + np = import_module('numpy') + mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x, + num=self.nb_of_points_x), + np.linspace(self.start_y, self.end_y, + num=self.nb_of_points_y)) + f = vectorized_lambdify((self.var_x, self.var_y), self.expr) + return (mesh_x, mesh_y, f(mesh_x, mesh_y)) + + +############################################################################## +# Backends +############################################################################## + +class BaseBackend: + """Base class for all backends. A backend represents the plotting library, + which implements the necessary functionalities in order to use SymPy + plotting functions. + + How the plotting module works: + + 1. Whenever a plotting function is called, the provided expressions are + processed and a list of instances of the :class:`BaseSeries` class is + created, containing the necessary information to plot the expressions + (e.g. the expression, ranges, series name, ...). Eventually, these + objects will generate the numerical data to be plotted. + 2. A :class:`~.Plot` object is instantiated, which stores the list of + series and the main attributes of the plot (e.g. axis labels, title, ...). + 3. When the ``show`` command is executed, a new backend is instantiated, + which loops through each series object to generate and plot the + numerical data. The backend is also going to set the axis labels, title, + ..., according to the values stored in the Plot instance. + + The backend should check if it supports the data series that it is given + (e.g. :class:`TextBackend` supports only :class:`LineOver1DRangeSeries`). + + It is the backend responsibility to know how to use the class of data series + that it's given. Note that the current implementation of the ``*Series`` + classes is "matplotlib-centric": the numerical data returned by the + ``get_points`` and ``get_meshes`` methods is meant to be used directly by + Matplotlib. Therefore, the new backend will have to pre-process the + numerical data to make it compatible with the chosen plotting library. + Keep in mind that future SymPy versions may improve the ``*Series`` classes + in order to return numerical data "non-matplotlib-centric", hence if you code + a new backend you have the responsibility to check if its working on each + SymPy release. + + Please explore the :class:`MatplotlibBackend` source code to understand how a + backend should be coded. + + Methods + ======= + + In order to be used by SymPy plotting functions, a backend must implement + the following methods: + + * show(self): used to loop over the data series, generate the numerical + data, plot it and set the axis labels, title, ... + * save(self, path): used to save the current plot to the specified file + path. + * close(self): used to close the current plot backend (note: some plotting + library does not support this functionality. In that case, just raise a + warning). + + See also + ======== + + MatplotlibBackend + """ + def __init__(self, parent): + super().__init__() + self.parent = parent + + def show(self): + raise NotImplementedError + + def save(self, path): + raise NotImplementedError + + def close(self): + raise NotImplementedError + + +# Don't have to check for the success of importing matplotlib in each case; +# we will only be using this backend if we can successfully import matploblib +class MatplotlibBackend(BaseBackend): + """ This class implements the functionalities to use Matplotlib with SymPy + plotting functions. + """ + def __init__(self, parent): + super().__init__(parent) + self.matplotlib = import_module('matplotlib', + import_kwargs={'fromlist': ['pyplot', 'cm', 'collections']}, + min_module_version='1.1.0', catch=(RuntimeError,)) + self.plt = self.matplotlib.pyplot + self.cm = self.matplotlib.cm + self.LineCollection = self.matplotlib.collections.LineCollection + aspect = getattr(self.parent, 'aspect_ratio', 'auto') + if aspect != 'auto': + aspect = float(aspect[1]) / aspect[0] + + if isinstance(self.parent, Plot): + nrows, ncolumns = 1, 1 + series_list = [self.parent._series] + elif isinstance(self.parent, PlotGrid): + nrows, ncolumns = self.parent.nrows, self.parent.ncolumns + series_list = self.parent._series + + self.ax = [] + self.fig = self.plt.figure(figsize=parent.size) + + for i, series in enumerate(series_list): + are_3D = [s.is_3D for s in series] + + if any(are_3D) and not all(are_3D): + raise ValueError('The matplotlib backend cannot mix 2D and 3D.') + elif all(are_3D): + # mpl_toolkits.mplot3d is necessary for + # projection='3d' + mpl_toolkits = import_module('mpl_toolkits', # noqa + import_kwargs={'fromlist': ['mplot3d']}) + self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, projection='3d', aspect=aspect)) + + elif not any(are_3D): + self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, aspect=aspect)) + self.ax[i].spines['left'].set_position('zero') + self.ax[i].spines['right'].set_color('none') + self.ax[i].spines['bottom'].set_position('zero') + self.ax[i].spines['top'].set_color('none') + self.ax[i].xaxis.set_ticks_position('bottom') + self.ax[i].yaxis.set_ticks_position('left') + + @staticmethod + def get_segments(x, y, z=None): + """ Convert two list of coordinates to a list of segments to be used + with Matplotlib's :external:class:`~matplotlib.collections.LineCollection`. + + Parameters + ========== + x : list + List of x-coordinates + + y : list + List of y-coordinates + + z : list + List of z-coordinates for a 3D line. + """ + np = import_module('numpy') + if z is not None: + dim = 3 + points = (x, y, z) + else: + dim = 2 + points = (x, y) + points = np.ma.array(points).T.reshape(-1, 1, dim) + return np.ma.concatenate([points[:-1], points[1:]], axis=1) + + def _process_series(self, series, ax, parent): + np = import_module('numpy') + mpl_toolkits = import_module( + 'mpl_toolkits', import_kwargs={'fromlist': ['mplot3d']}) + + # XXX Workaround for matplotlib issue + # https://github.com/matplotlib/matplotlib/issues/17130 + xlims, ylims, zlims = [], [], [] + + for s in series: + # Create the collections + if s.is_2Dline: + x, y = s.get_data() + if (isinstance(s.line_color, (int, float)) or + callable(s.line_color)): + segments = self.get_segments(x, y) + collection = self.LineCollection(segments) + collection.set_array(s.get_color_array()) + ax.add_collection(collection) + else: + lbl = _str_or_latex(s.label) + line, = ax.plot(x, y, label=lbl, color=s.line_color) + elif s.is_contour: + ax.contour(*s.get_meshes()) + elif s.is_3Dline: + x, y, z = s.get_data() + if (isinstance(s.line_color, (int, float)) or + callable(s.line_color)): + art3d = mpl_toolkits.mplot3d.art3d + segments = self.get_segments(x, y, z) + collection = art3d.Line3DCollection(segments) + collection.set_array(s.get_color_array()) + ax.add_collection(collection) + else: + lbl = _str_or_latex(s.label) + ax.plot(x, y, z, label=lbl, color=s.line_color) + + xlims.append(s._xlim) + ylims.append(s._ylim) + zlims.append(s._zlim) + elif s.is_3Dsurface: + x, y, z = s.get_meshes() + collection = ax.plot_surface(x, y, z, + cmap=getattr(self.cm, 'viridis', self.cm.jet), + rstride=1, cstride=1, linewidth=0.1) + if isinstance(s.surface_color, (float, int, Callable)): + color_array = s.get_color_array() + color_array = color_array.reshape(color_array.size) + collection.set_array(color_array) + else: + collection.set_color(s.surface_color) + + xlims.append(s._xlim) + ylims.append(s._ylim) + zlims.append(s._zlim) + elif s.is_implicit: + points = s.get_raster() + if len(points) == 2: + # interval math plotting + x, y = _matplotlib_list(points[0]) + ax.fill(x, y, facecolor=s.line_color, edgecolor='None') + else: + # use contourf or contour depending on whether it is + # an inequality or equality. + # XXX: ``contour`` plots multiple lines. Should be fixed. + ListedColormap = self.matplotlib.colors.ListedColormap + colormap = ListedColormap(["white", s.line_color]) + xarray, yarray, zarray, plot_type = points + if plot_type == 'contour': + ax.contour(xarray, yarray, zarray, cmap=colormap) + else: + ax.contourf(xarray, yarray, zarray, cmap=colormap) + else: + raise NotImplementedError( + '{} is not supported in the SymPy plotting module ' + 'with matplotlib backend. Please report this issue.' + .format(ax)) + + Axes3D = mpl_toolkits.mplot3d.Axes3D + if not isinstance(ax, Axes3D): + ax.autoscale_view( + scalex=ax.get_autoscalex_on(), + scaley=ax.get_autoscaley_on()) + else: + # XXX Workaround for matplotlib issue + # https://github.com/matplotlib/matplotlib/issues/17130 + if xlims: + xlims = np.array(xlims) + xlim = (np.amin(xlims[:, 0]), np.amax(xlims[:, 1])) + ax.set_xlim(xlim) + else: + ax.set_xlim([0, 1]) + + if ylims: + ylims = np.array(ylims) + ylim = (np.amin(ylims[:, 0]), np.amax(ylims[:, 1])) + ax.set_ylim(ylim) + else: + ax.set_ylim([0, 1]) + + if zlims: + zlims = np.array(zlims) + zlim = (np.amin(zlims[:, 0]), np.amax(zlims[:, 1])) + ax.set_zlim(zlim) + else: + ax.set_zlim([0, 1]) + + # Set global options. + # TODO The 3D stuff + # XXX The order of those is important. + if parent.xscale and not isinstance(ax, Axes3D): + ax.set_xscale(parent.xscale) + if parent.yscale and not isinstance(ax, Axes3D): + ax.set_yscale(parent.yscale) + if not isinstance(ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check + ax.set_autoscale_on(parent.autoscale) + if parent.axis_center: + val = parent.axis_center + if isinstance(ax, Axes3D): + pass + elif val == 'center': + ax.spines['left'].set_position('center') + ax.spines['bottom'].set_position('center') + elif val == 'auto': + xl, xh = ax.get_xlim() + yl, yh = ax.get_ylim() + pos_left = ('data', 0) if xl*xh <= 0 else 'center' + pos_bottom = ('data', 0) if yl*yh <= 0 else 'center' + ax.spines['left'].set_position(pos_left) + ax.spines['bottom'].set_position(pos_bottom) + else: + ax.spines['left'].set_position(('data', val[0])) + ax.spines['bottom'].set_position(('data', val[1])) + if not parent.axis: + ax.set_axis_off() + if parent.legend: + if ax.legend(): + ax.legend_.set_visible(parent.legend) + if parent.margin: + ax.set_xmargin(parent.margin) + ax.set_ymargin(parent.margin) + if parent.title: + ax.set_title(parent.title) + if parent.xlabel: + xlbl = _str_or_latex(parent.xlabel) + ax.set_xlabel(xlbl, position=(1, 0)) + if parent.ylabel: + ylbl = _str_or_latex(parent.ylabel) + ax.set_ylabel(ylbl, position=(0, 1)) + if isinstance(ax, Axes3D) and parent.zlabel: + zlbl = _str_or_latex(parent.zlabel) + ax.set_zlabel(zlbl, position=(0, 1)) + if parent.annotations: + for a in parent.annotations: + ax.annotate(**a) + if parent.markers: + for marker in parent.markers: + # make a copy of the marker dictionary + # so that it doesn't get altered + m = marker.copy() + args = m.pop('args') + ax.plot(*args, **m) + if parent.rectangles: + for r in parent.rectangles: + rect = self.matplotlib.patches.Rectangle(**r) + ax.add_patch(rect) + if parent.fill: + ax.fill_between(**parent.fill) + + # xlim and ylim should always be set at last so that plot limits + # doesn't get altered during the process. + if parent.xlim: + ax.set_xlim(parent.xlim) + if parent.ylim: + ax.set_ylim(parent.ylim) + + + def process_series(self): + """ + Iterates over every ``Plot`` object and further calls + _process_series() + """ + parent = self.parent + if isinstance(parent, Plot): + series_list = [parent._series] + else: + series_list = parent._series + + for i, (series, ax) in enumerate(zip(series_list, self.ax)): + if isinstance(self.parent, PlotGrid): + parent = self.parent.args[i] + self._process_series(series, ax, parent) + + def show(self): + self.process_series() + #TODO after fixing https://github.com/ipython/ipython/issues/1255 + # you can uncomment the next line and remove the pyplot.show() call + #self.fig.show() + if _show: + self.fig.tight_layout() + self.plt.show() + else: + self.close() + + def save(self, path): + self.process_series() + self.fig.savefig(path) + + def close(self): + self.plt.close(self.fig) + + +class TextBackend(BaseBackend): + def __init__(self, parent): + super().__init__(parent) + + def show(self): + if not _show: + return + if len(self.parent._series) != 1: + raise ValueError( + 'The TextBackend supports only one graph per Plot.') + elif not isinstance(self.parent._series[0], LineOver1DRangeSeries): + raise ValueError( + 'The TextBackend supports only expressions over a 1D range') + else: + ser = self.parent._series[0] + textplot(ser.expr, ser.start, ser.end) + + def close(self): + pass + + +class DefaultBackend(BaseBackend): + def __new__(cls, parent): + matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + if matplotlib: + return MatplotlibBackend(parent) + else: + return TextBackend(parent) + + +plot_backends = { + 'matplotlib': MatplotlibBackend, + 'text': TextBackend, + 'default': DefaultBackend +} + + +############################################################################## +# Finding the centers of line segments or mesh faces +############################################################################## + +def centers_of_segments(array): + np = import_module('numpy') + return np.mean(np.vstack((array[:-1], array[1:])), 0) + + +def centers_of_faces(array): + np = import_module('numpy') + return np.mean(np.dstack((array[:-1, :-1], + array[1:, :-1], + array[:-1, 1:], + array[:-1, :-1], + )), 2) + + +def flat(x, y, z, eps=1e-3): + """Checks whether three points are almost collinear""" + np = import_module('numpy') + # Workaround plotting piecewise (#8577): + # workaround for `lambdify` in `.experimental_lambdify` fails + # to return numerical values in some cases. Lower-level fix + # in `lambdify` is possible. + vector_a = (x - y).astype(np.float64) + vector_b = (z - y).astype(np.float64) + dot_product = np.dot(vector_a, vector_b) + vector_a_norm = np.linalg.norm(vector_a) + vector_b_norm = np.linalg.norm(vector_b) + cos_theta = dot_product / (vector_a_norm * vector_b_norm) + return abs(cos_theta + 1) < eps + + +def _matplotlib_list(interval_list): + """ + Returns lists for matplotlib ``fill`` command from a list of bounding + rectangular intervals + """ + xlist = [] + ylist = [] + if len(interval_list): + for intervals in interval_list: + intervalx = intervals[0] + intervaly = intervals[1] + xlist.extend([intervalx.start, intervalx.start, + intervalx.end, intervalx.end, None]) + ylist.extend([intervaly.start, intervaly.end, + intervaly.end, intervaly.start, None]) + else: + #XXX Ugly hack. Matplotlib does not accept empty lists for ``fill`` + xlist.extend((None, None, None, None)) + ylist.extend((None, None, None, None)) + return xlist, ylist + + +####New API for plotting module #### + +# TODO: Add color arrays for plots. +# TODO: Add more plotting options for 3d plots. +# TODO: Adaptive sampling for 3D plots. + +def plot(*args, show=True, **kwargs): + """Plots a function of a single variable as a curve. + + Parameters + ========== + + args : + The first argument is the expression representing the function + of single variable to be plotted. + + The last argument is a 3-tuple denoting the range of the free + variable. e.g. ``(x, 0, 5)`` + + Typical usage examples are in the following: + + - Plotting a single expression with a single range. + ``plot(expr, range, **kwargs)`` + - Plotting a single expression with the default range (-10, 10). + ``plot(expr, **kwargs)`` + - Plotting multiple expressions with a single range. + ``plot(expr1, expr2, ..., range, **kwargs)`` + - Plotting multiple expressions with multiple ranges. + ``plot((expr1, range1), (expr2, range2), ..., **kwargs)`` + + It is best practice to specify range explicitly because default + range may change in the future if a more advanced default range + detection algorithm is implemented. + + show : bool, optional + The default value is set to ``True``. Set show to ``False`` and + the function will not display the plot. The returned instance of + the ``Plot`` class can then be used to save or display the plot + by calling the ``save()`` and ``show()`` methods respectively. + + line_color : string, or float, or function, optional + Specifies the color for the plot. + See ``Plot`` to see how to set color for the plots. + Note that by setting ``line_color``, it would be applied simultaneously + to all the series. + + title : str, optional + Title of the plot. It is set to the latex representation of + the expression, if the plot has only one expression. + + label : str, optional + The label of the expression in the plot. It will be used when + called with ``legend``. Default is the name of the expression. + e.g. ``sin(x)`` + + xlabel : str or expression, optional + Label for the x-axis. + + ylabel : str or expression, optional + Label for the y-axis. + + xscale : 'linear' or 'log', optional + Sets the scaling of the x-axis. + + yscale : 'linear' or 'log', optional + Sets the scaling of the y-axis. + + axis_center : (float, float), optional + Tuple of two floats denoting the coordinates of the center or + {'center', 'auto'} + + xlim : (float, float), optional + Denotes the x-axis limits, ``(min, max)```. + + ylim : (float, float), optional + Denotes the y-axis limits, ``(min, max)```. + + annotations : list, optional + A list of dictionaries specifying the type of annotation + required. The keys in the dictionary should be equivalent + to the arguments of the :external:mod:`matplotlib`'s + :external:meth:`~matplotlib.axes.Axes.annotate` method. + + markers : list, optional + A list of dictionaries specifying the type the markers required. + The keys in the dictionary should be equivalent to the arguments + of the :external:mod:`matplotlib`'s :external:func:`~matplotlib.pyplot.plot()` function + along with the marker related keyworded arguments. + + rectangles : list, optional + A list of dictionaries specifying the dimensions of the + rectangles to be plotted. The keys in the dictionary should be + equivalent to the arguments of the :external:mod:`matplotlib`'s + :external:class:`~matplotlib.patches.Rectangle` class. + + fill : dict, optional + A dictionary specifying the type of color filling required in + the plot. The keys in the dictionary should be equivalent to the + arguments of the :external:mod:`matplotlib`'s + :external:meth:`~matplotlib.axes.Axes.fill_between` method. + + adaptive : bool, optional + The default value is set to ``True``. Set adaptive to ``False`` + and specify ``nb_of_points`` if uniform sampling is required. + + The plotting uses an adaptive algorithm which samples + recursively to accurately plot. The adaptive algorithm uses a + random point near the midpoint of two points that has to be + further sampled. Hence the same plots can appear slightly + different. + + depth : int, optional + Recursion depth of the adaptive algorithm. A depth of value + `n` samples a maximum of `2^{n}` points. + + If the ``adaptive`` flag is set to ``False``, this will be + ignored. + + nb_of_points : int, optional + Used when the ``adaptive`` is set to ``False``. The function + is uniformly sampled at ``nb_of_points`` number of points. + + If the ``adaptive`` flag is set to ``True``, this will be + ignored. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of + the overall figure. The default value is set to ``None``, meaning + the size will be set by the default backend. + + Examples + ======== + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> from sympy import symbols + >>> from sympy.plotting import plot + >>> x = symbols('x') + + Single Plot + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot(x**2, (x, -5, 5)) + Plot object containing: + [0]: cartesian line: x**2 for x over (-5.0, 5.0) + + Multiple plots with single range. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot(x, x**2, x**3, (x, -5, 5)) + Plot object containing: + [0]: cartesian line: x for x over (-5.0, 5.0) + [1]: cartesian line: x**2 for x over (-5.0, 5.0) + [2]: cartesian line: x**3 for x over (-5.0, 5.0) + + Multiple plots with different ranges. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5))) + Plot object containing: + [0]: cartesian line: x**2 for x over (-6.0, 6.0) + [1]: cartesian line: x for x over (-5.0, 5.0) + + No adaptive sampling. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot(x**2, adaptive=False, nb_of_points=400) + Plot object containing: + [0]: cartesian line: x**2 for x over (-10.0, 10.0) + + See Also + ======== + + Plot, LineOver1DRangeSeries + + """ + args = list(map(sympify, args)) + free = set() + for a in args: + if isinstance(a, Expr): + free |= a.free_symbols + if len(free) > 1: + raise ValueError( + 'The same variable should be used in all ' + 'univariate expressions being plotted.') + x = free.pop() if free else Symbol('x') + kwargs.setdefault('xlabel', x) + kwargs.setdefault('ylabel', Function('f')(x)) + series = [] + plot_expr = check_arguments(args, 1, 1) + series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr] + + plots = Plot(*series, **kwargs) + if show: + plots.show() + return plots + + +def plot_parametric(*args, show=True, **kwargs): + """ + Plots a 2D parametric curve. + + Parameters + ========== + + args + Common specifications are: + + - Plotting a single parametric curve with a range + ``plot_parametric((expr_x, expr_y), range)`` + - Plotting multiple parametric curves with the same range + ``plot_parametric((expr_x, expr_y), ..., range)`` + - Plotting multiple parametric curves with different ranges + ``plot_parametric((expr_x, expr_y, range), ...)`` + + ``expr_x`` is the expression representing $x$ component of the + parametric function. + + ``expr_y`` is the expression representing $y$ component of the + parametric function. + + ``range`` is a 3-tuple denoting the parameter symbol, start and + stop. For example, ``(u, 0, 5)``. + + If the range is not specified, then a default range of (-10, 10) + is used. + + However, if the arguments are specified as + ``(expr_x, expr_y, range), ...``, you must specify the ranges + for each expressions manually. + + Default range may change in the future if a more advanced + algorithm is implemented. + + adaptive : bool, optional + Specifies whether to use the adaptive sampling or not. + + The default value is set to ``True``. Set adaptive to ``False`` + and specify ``nb_of_points`` if uniform sampling is required. + + depth : int, optional + The recursion depth of the adaptive algorithm. A depth of + value $n$ samples a maximum of $2^n$ points. + + nb_of_points : int, optional + Used when the ``adaptive`` flag is set to ``False``. + + Specifies the number of the points used for the uniform + sampling. + + line_color : string, or float, or function, optional + Specifies the color for the plot. + See ``Plot`` to see how to set color for the plots. + Note that by setting ``line_color``, it would be applied simultaneously + to all the series. + + label : str, optional + The label of the expression in the plot. It will be used when + called with ``legend``. Default is the name of the expression. + e.g. ``sin(x)`` + + xlabel : str, optional + Label for the x-axis. + + ylabel : str, optional + Label for the y-axis. + + xscale : 'linear' or 'log', optional + Sets the scaling of the x-axis. + + yscale : 'linear' or 'log', optional + Sets the scaling of the y-axis. + + axis_center : (float, float), optional + Tuple of two floats denoting the coordinates of the center or + {'center', 'auto'} + + xlim : (float, float), optional + Denotes the x-axis limits, ``(min, max)```. + + ylim : (float, float), optional + Denotes the y-axis limits, ``(min, max)```. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of + the overall figure. The default value is set to ``None``, meaning + the size will be set by the default backend. + + Examples + ======== + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import plot_parametric, symbols, cos, sin + >>> u = symbols('u') + + A parametric plot with a single expression: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot_parametric((cos(u), sin(u)), (u, -5, 5)) + Plot object containing: + [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0) + + A parametric plot with multiple expressions with the same range: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot_parametric((cos(u), sin(u)), (u, cos(u)), (u, -10, 10)) + Plot object containing: + [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0) + [1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0) + + A parametric plot with multiple expressions with different ranges + for each curve: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot_parametric((cos(u), sin(u), (u, -5, 5)), + ... (cos(u), u, (u, -5, 5))) + Plot object containing: + [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0) + [1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0) + + Notes + ===== + + The plotting uses an adaptive algorithm which samples recursively to + accurately plot the curve. The adaptive algorithm uses a random point + near the midpoint of two points that has to be further sampled. + Hence, repeating the same plot command can give slightly different + results because of the random sampling. + + If there are multiple plots, then the same optional arguments are + applied to all the plots drawn in the same canvas. If you want to + set these options separately, you can index the returned ``Plot`` + object and set it. + + For example, when you specify ``line_color`` once, it would be + applied simultaneously to both series. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> from sympy import pi + >>> expr1 = (u, cos(2*pi*u)/2 + 1/2) + >>> expr2 = (u, sin(2*pi*u)/2 + 1/2) + >>> p = plot_parametric(expr1, expr2, (u, 0, 1), line_color='blue') + + If you want to specify the line color for the specific series, you + should index each item and apply the property manually. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p[0].line_color = 'red' + >>> p.show() + + See Also + ======== + + Plot, Parametric2DLineSeries + """ + args = list(map(sympify, args)) + series = [] + plot_expr = check_arguments(args, 2, 1) + series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr] + plots = Plot(*series, **kwargs) + if show: + plots.show() + return plots + + +def plot3d_parametric_line(*args, show=True, **kwargs): + """ + Plots a 3D parametric line plot. + + Usage + ===== + + Single plot: + + ``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)`` + + If the range is not specified, then a default range of (-10, 10) is used. + + Multiple plots. + + ``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)`` + + Ranges have to be specified for every expression. + + Default range may change in the future if a more advanced default range + detection algorithm is implemented. + + Arguments + ========= + + expr_x : Expression representing the function along x. + + expr_y : Expression representing the function along y. + + expr_z : Expression representing the function along z. + + range : (:class:`~.Symbol`, float, float) + A 3-tuple denoting the range of the parameter variable, e.g., (u, 0, 5). + + Keyword Arguments + ================= + + Arguments for ``Parametric3DLineSeries`` class. + + nb_of_points : The range is uniformly sampled at ``nb_of_points`` + number of points. + + Aesthetics: + + line_color : string, or float, or function, optional + Specifies the color for the plot. + See ``Plot`` to see how to set color for the plots. + Note that by setting ``line_color``, it would be applied simultaneously + to all the series. + + label : str + The label to the plot. It will be used when called with ``legend=True`` + to denote the function with the given label in the plot. + + If there are multiple plots, then the same series arguments are applied to + all the plots. If you want to set these options separately, you can index + the returned ``Plot`` object and set it. + + Arguments for ``Plot`` class. + + title : str + Title of the plot. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of + the overall figure. The default value is set to ``None``, meaning + the size will be set by the default backend. + + Examples + ======== + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import symbols, cos, sin + >>> from sympy.plotting import plot3d_parametric_line + >>> u = symbols('u') + + Single plot. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5)) + Plot object containing: + [0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0) + + + Multiple plots. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)), + ... (sin(u), u**2, u, (u, -5, 5))) + Plot object containing: + [0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0) + [1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0) + + + See Also + ======== + + Plot, Parametric3DLineSeries + + """ + args = list(map(sympify, args)) + series = [] + plot_expr = check_arguments(args, 3, 1) + series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr] + kwargs.setdefault("xlabel", "x") + kwargs.setdefault("ylabel", "y") + kwargs.setdefault("zlabel", "z") + plots = Plot(*series, **kwargs) + if show: + plots.show() + return plots + + +def plot3d(*args, show=True, **kwargs): + """ + Plots a 3D surface plot. + + Usage + ===== + + Single plot + + ``plot3d(expr, range_x, range_y, **kwargs)`` + + If the ranges are not specified, then a default range of (-10, 10) is used. + + Multiple plot with the same range. + + ``plot3d(expr1, expr2, range_x, range_y, **kwargs)`` + + If the ranges are not specified, then a default range of (-10, 10) is used. + + Multiple plots with different ranges. + + ``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)`` + + Ranges have to be specified for every expression. + + Default range may change in the future if a more advanced default range + detection algorithm is implemented. + + Arguments + ========= + + expr : Expression representing the function along x. + + range_x : (:class:`~.Symbol`, float, float) + A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5). + + range_y : (:class:`~.Symbol`, float, float) + A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5). + + Keyword Arguments + ================= + + Arguments for ``SurfaceOver2DRangeSeries`` class: + + nb_of_points_x : int + The x range is sampled uniformly at ``nb_of_points_x`` of points. + + nb_of_points_y : int + The y range is sampled uniformly at ``nb_of_points_y`` of points. + + Aesthetics: + + surface_color : Function which returns a float + Specifies the color for the surface of the plot. + See :class:`~.Plot` for more details. + + If there are multiple plots, then the same series arguments are applied to + all the plots. If you want to set these options separately, you can index + the returned ``Plot`` object and set it. + + Arguments for ``Plot`` class: + + title : str + Title of the plot. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of the + overall figure. The default value is set to ``None``, meaning the size will + be set by the default backend. + + Examples + ======== + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import symbols + >>> from sympy.plotting import plot3d + >>> x, y = symbols('x y') + + Single plot + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d(x*y, (x, -5, 5), (y, -5, 5)) + Plot object containing: + [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) + + + Multiple plots with same range + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5)) + Plot object containing: + [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) + [1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) + + + Multiple plots with different ranges. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)), + ... (x*y, (x, -3, 3), (y, -3, 3))) + Plot object containing: + [0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0) + [1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0) + + + See Also + ======== + + Plot, SurfaceOver2DRangeSeries + + """ + + args = list(map(sympify, args)) + series = [] + plot_expr = check_arguments(args, 1, 2) + series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr] + kwargs.setdefault("xlabel", series[0].var_x) + kwargs.setdefault("ylabel", series[0].var_y) + kwargs.setdefault("zlabel", Function('f')(series[0].var_x, series[0].var_y)) + plots = Plot(*series, **kwargs) + if show: + plots.show() + return plots + + +def plot3d_parametric_surface(*args, show=True, **kwargs): + """ + Plots a 3D parametric surface plot. + + Explanation + =========== + + Single plot. + + ``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)`` + + If the ranges is not specified, then a default range of (-10, 10) is used. + + Multiple plots. + + ``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)`` + + Ranges have to be specified for every expression. + + Default range may change in the future if a more advanced default range + detection algorithm is implemented. + + Arguments + ========= + + expr_x : Expression representing the function along ``x``. + + expr_y : Expression representing the function along ``y``. + + expr_z : Expression representing the function along ``z``. + + range_u : (:class:`~.Symbol`, float, float) + A 3-tuple denoting the range of the u variable, e.g. (u, 0, 5). + + range_v : (:class:`~.Symbol`, float, float) + A 3-tuple denoting the range of the v variable, e.g. (v, 0, 5). + + Keyword Arguments + ================= + + Arguments for ``ParametricSurfaceSeries`` class: + + nb_of_points_u : int + The ``u`` range is sampled uniformly at ``nb_of_points_v`` of points + + nb_of_points_y : int + The ``v`` range is sampled uniformly at ``nb_of_points_y`` of points + + Aesthetics: + + surface_color : Function which returns a float + Specifies the color for the surface of the plot. See + :class:`~Plot` for more details. + + If there are multiple plots, then the same series arguments are applied for + all the plots. If you want to set these options separately, you can index + the returned ``Plot`` object and set it. + + + Arguments for ``Plot`` class: + + title : str + Title of the plot. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of the + overall figure. The default value is set to ``None``, meaning the size will + be set by the default backend. + + Examples + ======== + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import symbols, cos, sin + >>> from sympy.plotting import plot3d_parametric_surface + >>> u, v = symbols('u v') + + Single plot. + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v, + ... (u, -5, 5), (v, -5, 5)) + Plot object containing: + [0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0) + + + See Also + ======== + + Plot, ParametricSurfaceSeries + + """ + + args = list(map(sympify, args)) + series = [] + plot_expr = check_arguments(args, 3, 2) + series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr] + kwargs.setdefault("xlabel", "x") + kwargs.setdefault("ylabel", "y") + kwargs.setdefault("zlabel", "z") + plots = Plot(*series, **kwargs) + if show: + plots.show() + return plots + +def plot_contour(*args, show=True, **kwargs): + """ + Draws contour plot of a function + + Usage + ===== + + Single plot + + ``plot_contour(expr, range_x, range_y, **kwargs)`` + + If the ranges are not specified, then a default range of (-10, 10) is used. + + Multiple plot with the same range. + + ``plot_contour(expr1, expr2, range_x, range_y, **kwargs)`` + + If the ranges are not specified, then a default range of (-10, 10) is used. + + Multiple plots with different ranges. + + ``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)`` + + Ranges have to be specified for every expression. + + Default range may change in the future if a more advanced default range + detection algorithm is implemented. + + Arguments + ========= + + expr : Expression representing the function along x. + + range_x : (:class:`Symbol`, float, float) + A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5). + + range_y : (:class:`Symbol`, float, float) + A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5). + + Keyword Arguments + ================= + + Arguments for ``ContourSeries`` class: + + nb_of_points_x : int + The x range is sampled uniformly at ``nb_of_points_x`` of points. + + nb_of_points_y : int + The y range is sampled uniformly at ``nb_of_points_y`` of points. + + Aesthetics: + + surface_color : Function which returns a float + Specifies the color for the surface of the plot. See + :class:`sympy.plotting.Plot` for more details. + + If there are multiple plots, then the same series arguments are applied to + all the plots. If you want to set these options separately, you can index + the returned ``Plot`` object and set it. + + Arguments for ``Plot`` class: + + title : str + Title of the plot. + + size : (float, float), optional + A tuple in the form (width, height) in inches to specify the size of + the overall figure. The default value is set to ``None``, meaning + the size will be set by the default backend. + + See Also + ======== + + Plot, ContourSeries + + """ + + args = list(map(sympify, args)) + plot_expr = check_arguments(args, 1, 2) + series = [ContourSeries(*arg) for arg in plot_expr] + plot_contours = Plot(*series, **kwargs) + if len(plot_expr[0].free_symbols) > 2: + raise ValueError('Contour Plot cannot Plot for more than two variables.') + if show: + plot_contours.show() + return plot_contours + +def check_arguments(args, expr_len, nb_of_free_symbols): + """ + Checks the arguments and converts into tuples of the + form (exprs, ranges). + + Examples + ======== + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import cos, sin, symbols + >>> from sympy.plotting.plot import check_arguments + >>> x = symbols('x') + >>> check_arguments([cos(x), sin(x)], 2, 1) + [(cos(x), sin(x), (x, -10, 10))] + + >>> check_arguments([x, x**2], 1, 1) + [(x, (x, -10, 10)), (x**2, (x, -10, 10))] + """ + if not args: + return [] + if expr_len > 1 and isinstance(args[0], Expr): + # Multiple expressions same range. + # The arguments are tuples when the expression length is + # greater than 1. + if len(args) < expr_len: + raise ValueError("len(args) should not be less than expr_len") + for i in range(len(args)): + if isinstance(args[i], Tuple): + break + else: + i = len(args) + 1 + + exprs = Tuple(*args[:i]) + free_symbols = list(set().union(*[e.free_symbols for e in exprs])) + if len(args) == expr_len + nb_of_free_symbols: + #Ranges given + plots = [exprs + Tuple(*args[expr_len:])] + else: + default_range = Tuple(-10, 10) + ranges = [] + for symbol in free_symbols: + ranges.append(Tuple(symbol) + default_range) + + for i in range(len(free_symbols) - nb_of_free_symbols): + ranges.append(Tuple(Dummy()) + default_range) + plots = [exprs + Tuple(*ranges)] + return plots + + if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and + len(args[0]) == expr_len and + expr_len != 3): + # Cannot handle expressions with number of expression = 3. It is + # not possible to differentiate between expressions and ranges. + #Series of plots with same range + for i in range(len(args)): + if isinstance(args[i], Tuple) and len(args[i]) != expr_len: + break + if not isinstance(args[i], Tuple): + args[i] = Tuple(args[i]) + else: + i = len(args) + 1 + + exprs = args[:i] + assert all(isinstance(e, Expr) for expr in exprs for e in expr) + free_symbols = list(set().union(*[e.free_symbols for expr in exprs + for e in expr])) + + if len(free_symbols) > nb_of_free_symbols: + raise ValueError("The number of free_symbols in the expression " + "is greater than %d" % nb_of_free_symbols) + if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple): + ranges = Tuple(*list(args[ + i:i + nb_of_free_symbols])) + plots = [expr + ranges for expr in exprs] + return plots + else: + # Use default ranges. + default_range = Tuple(-10, 10) + ranges = [] + for symbol in free_symbols: + ranges.append(Tuple(symbol) + default_range) + + for i in range(nb_of_free_symbols - len(free_symbols)): + ranges.append(Tuple(Dummy()) + default_range) + ranges = Tuple(*ranges) + plots = [expr + ranges for expr in exprs] + return plots + + elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols: + # Multiple plots with different ranges. + for arg in args: + for i in range(expr_len): + if not isinstance(arg[i], Expr): + raise ValueError("Expected an expression, given %s" % + str(arg[i])) + for i in range(nb_of_free_symbols): + if not len(arg[i + expr_len]) == 3: + raise ValueError("The ranges should be a tuple of " + "length 3, got %s" % str(arg[i + expr_len])) + return args diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot_implicit.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot_implicit.py new file mode 100644 index 0000000000000000000000000000000000000000..3a2763a3a9715cdaf182cd80099f96e47f76dd3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/plot_implicit.py @@ -0,0 +1,432 @@ +"""Implicit plotting module for SymPy. + +Explanation +=========== + +The module implements a data series called ImplicitSeries which is used by +``Plot`` class to plot implicit plots for different backends. The module, +by default, implements plotting using interval arithmetic. It switches to a +fall back algorithm if the expression cannot be plotted using interval arithmetic. +It is also possible to specify to use the fall back algorithm for all plots. + +Boolean combinations of expressions cannot be plotted by the fall back +algorithm. + +See Also +======== + +sympy.plotting.plot + +References +========== + +.. [1] Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for +Mathematical Formulae with Two Free Variables. + +.. [2] Jeffrey Allen Tupper. Graphing Equations with Generalized Interval +Arithmetic. Master's thesis. University of Toronto, 1996 + +""" + + +from .plot import BaseSeries, Plot +from .experimental_lambdify import experimental_lambdify, vectorized_lambdify +from .intervalmath import interval +from sympy.core.relational import (Equality, GreaterThan, LessThan, + Relational, StrictLessThan, StrictGreaterThan) +from sympy.core.containers import Tuple +from sympy.core.relational import Eq +from sympy.core.symbol import (Dummy, Symbol) +from sympy.core.sympify import sympify +from sympy.external import import_module +from sympy.logic.boolalg import BooleanFunction +from sympy.polys.polyutils import _sort_gens +from sympy.utilities.decorator import doctest_depends_on +from sympy.utilities.iterables import flatten +import warnings + + +class ImplicitSeries(BaseSeries): + """ Representation for Implicit plot """ + is_implicit = True + + def __init__(self, expr, var_start_end_x, var_start_end_y, + has_equality, use_interval_math, depth, nb_of_points, + line_color): + super().__init__() + self.expr = sympify(expr) + self.label = self.expr + self.var_x = sympify(var_start_end_x[0]) + self.start_x = float(var_start_end_x[1]) + self.end_x = float(var_start_end_x[2]) + self.var_y = sympify(var_start_end_y[0]) + self.start_y = float(var_start_end_y[1]) + self.end_y = float(var_start_end_y[2]) + self.get_points = self.get_raster + self.has_equality = has_equality # If the expression has equality, i.e. + #Eq, Greaterthan, LessThan. + self.nb_of_points = nb_of_points + self.use_interval_math = use_interval_math + self.depth = 4 + depth + self.line_color = line_color + + def __str__(self): + return ('Implicit equation: %s for ' + '%s over %s and %s over %s') % ( + str(self.expr), + str(self.var_x), + str((self.start_x, self.end_x)), + str(self.var_y), + str((self.start_y, self.end_y))) + + def get_raster(self): + func = experimental_lambdify((self.var_x, self.var_y), self.expr, + use_interval=True) + xinterval = interval(self.start_x, self.end_x) + yinterval = interval(self.start_y, self.end_y) + try: + func(xinterval, yinterval) + except AttributeError: + # XXX: AttributeError("'list' object has no attribute 'is_real'") + # That needs fixing somehow - we shouldn't be catching + # AttributeError here. + if self.use_interval_math: + warnings.warn("Adaptive meshing could not be applied to the" + " expression. Using uniform meshing.", stacklevel=7) + self.use_interval_math = False + + if self.use_interval_math: + return self._get_raster_interval(func) + else: + return self._get_meshes_grid() + + def _get_raster_interval(self, func): + """ Uses interval math to adaptively mesh and obtain the plot""" + k = self.depth + interval_list = [] + #Create initial 32 divisions + np = import_module('numpy') + xsample = np.linspace(self.start_x, self.end_x, 33) + ysample = np.linspace(self.start_y, self.end_y, 33) + + #Add a small jitter so that there are no false positives for equality. + # Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2) + #which will draw a rectangle. + jitterx = (np.random.rand( + len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20 + jittery = (np.random.rand( + len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20 + xsample += jitterx + ysample += jittery + + xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1], + xsample[1:])] + yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1], + ysample[1:])] + interval_list = [[x, y] for x in xinter for y in yinter] + plot_list = [] + + #recursive call refinepixels which subdivides the intervals which are + #neither True nor False according to the expression. + def refine_pixels(interval_list): + """ Evaluates the intervals and subdivides the interval if the + expression is partially satisfied.""" + temp_interval_list = [] + plot_list = [] + for intervals in interval_list: + + #Convert the array indices to x and y values + intervalx = intervals[0] + intervaly = intervals[1] + func_eval = func(intervalx, intervaly) + #The expression is valid in the interval. Change the contour + #array values to 1. + if func_eval[1] is False or func_eval[0] is False: + pass + elif func_eval == (True, True): + plot_list.append([intervalx, intervaly]) + elif func_eval[1] is None or func_eval[0] is None: + #Subdivide + avgx = intervalx.mid + avgy = intervaly.mid + a = interval(intervalx.start, avgx) + b = interval(avgx, intervalx.end) + c = interval(intervaly.start, avgy) + d = interval(avgy, intervaly.end) + temp_interval_list.append([a, c]) + temp_interval_list.append([a, d]) + temp_interval_list.append([b, c]) + temp_interval_list.append([b, d]) + return temp_interval_list, plot_list + + while k >= 0 and len(interval_list): + interval_list, plot_list_temp = refine_pixels(interval_list) + plot_list.extend(plot_list_temp) + k = k - 1 + #Check whether the expression represents an equality + #If it represents an equality, then none of the intervals + #would have satisfied the expression due to floating point + #differences. Add all the undecided values to the plot. + if self.has_equality: + for intervals in interval_list: + intervalx = intervals[0] + intervaly = intervals[1] + func_eval = func(intervalx, intervaly) + if func_eval[1] and func_eval[0] is not False: + plot_list.append([intervalx, intervaly]) + return plot_list, 'fill' + + def _get_meshes_grid(self): + """Generates the mesh for generating a contour. + + In the case of equality, ``contour`` function of matplotlib can + be used. In other cases, matplotlib's ``contourf`` is used. + """ + equal = False + if isinstance(self.expr, Equality): + expr = self.expr.lhs - self.expr.rhs + equal = True + + elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)): + expr = self.expr.lhs - self.expr.rhs + + elif isinstance(self.expr, (LessThan, StrictLessThan)): + expr = self.expr.rhs - self.expr.lhs + else: + raise NotImplementedError("The expression is not supported for " + "plotting in uniform meshed plot.") + np = import_module('numpy') + xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points) + yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points) + x_grid, y_grid = np.meshgrid(xarray, yarray) + + func = vectorized_lambdify((self.var_x, self.var_y), expr) + z_grid = func(x_grid, y_grid) + z_grid[np.ma.where(z_grid < 0)] = -1 + z_grid[np.ma.where(z_grid > 0)] = 1 + if equal: + return xarray, yarray, z_grid, 'contour' + else: + return xarray, yarray, z_grid, 'contourf' + + +@doctest_depends_on(modules=('matplotlib',)) +def plot_implicit(expr, x_var=None, y_var=None, adaptive=True, depth=0, + points=300, line_color="blue", show=True, **kwargs): + """A plot function to plot implicit equations / inequalities. + + Arguments + ========= + + - expr : The equation / inequality that is to be plotted. + - x_var (optional) : symbol to plot on x-axis or tuple giving symbol + and range as ``(symbol, xmin, xmax)`` + - y_var (optional) : symbol to plot on y-axis or tuple giving symbol + and range as ``(symbol, ymin, ymax)`` + + If neither ``x_var`` nor ``y_var`` are given then the free symbols in the + expression will be assigned in the order they are sorted. + + The following keyword arguments can also be used: + + - ``adaptive`` Boolean. The default value is set to True. It has to be + set to False if you want to use a mesh grid. + + - ``depth`` integer. The depth of recursion for adaptive mesh grid. + Default value is 0. Takes value in the range (0, 4). + + - ``points`` integer. The number of points if adaptive mesh grid is not + used. Default value is 300. + + - ``show`` Boolean. Default value is True. If set to False, the plot will + not be shown. See ``Plot`` for further information. + + - ``title`` string. The title for the plot. + + - ``xlabel`` string. The label for the x-axis + + - ``ylabel`` string. The label for the y-axis + + Aesthetics options: + + - ``line_color``: float or string. Specifies the color for the plot. + See ``Plot`` to see how to set color for the plots. + Default value is "Blue" + + plot_implicit, by default, uses interval arithmetic to plot functions. If + the expression cannot be plotted using interval arithmetic, it defaults to + a generating a contour using a mesh grid of fixed number of points. By + setting adaptive to False, you can force plot_implicit to use the mesh + grid. The mesh grid method can be effective when adaptive plotting using + interval arithmetic, fails to plot with small line width. + + Examples + ======== + + Plot expressions: + + .. plot:: + :context: reset + :format: doctest + :include-source: True + + >>> from sympy import plot_implicit, symbols, Eq, And + >>> x, y = symbols('x y') + + Without any ranges for the symbols in the expression: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p1 = plot_implicit(Eq(x**2 + y**2, 5)) + + With the range for the symbols: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p2 = plot_implicit( + ... Eq(x**2 + y**2, 3), (x, -3, 3), (y, -3, 3)) + + With depth of recursion as argument: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p3 = plot_implicit( + ... Eq(x**2 + y**2, 5), (x, -4, 4), (y, -4, 4), depth = 2) + + Using mesh grid and not using adaptive meshing: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p4 = plot_implicit( + ... Eq(x**2 + y**2, 5), (x, -5, 5), (y, -2, 2), + ... adaptive=False) + + Using mesh grid without using adaptive meshing with number of points + specified: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p5 = plot_implicit( + ... Eq(x**2 + y**2, 5), (x, -5, 5), (y, -2, 2), + ... adaptive=False, points=400) + + Plotting regions: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p6 = plot_implicit(y > x**2) + + Plotting Using boolean conjunctions: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p7 = plot_implicit(And(y > x, y > -x)) + + When plotting an expression with a single variable (y - 1, for example), + specify the x or the y variable explicitly: + + .. plot:: + :context: close-figs + :format: doctest + :include-source: True + + >>> p8 = plot_implicit(y - 1, y_var=y) + >>> p9 = plot_implicit(x - 1, x_var=x) + """ + has_equality = False # Represents whether the expression contains an Equality, + #GreaterThan or LessThan + + def arg_expand(bool_expr): + """ + Recursively expands the arguments of an Boolean Function + """ + for arg in bool_expr.args: + if isinstance(arg, BooleanFunction): + arg_expand(arg) + elif isinstance(arg, Relational): + arg_list.append(arg) + + arg_list = [] + if isinstance(expr, BooleanFunction): + arg_expand(expr) + + #Check whether there is an equality in the expression provided. + if any(isinstance(e, (Equality, GreaterThan, LessThan)) + for e in arg_list): + has_equality = True + + elif not isinstance(expr, Relational): + expr = Eq(expr, 0) + has_equality = True + elif isinstance(expr, (Equality, GreaterThan, LessThan)): + has_equality = True + + xyvar = [i for i in (x_var, y_var) if i is not None] + free_symbols = expr.free_symbols + range_symbols = Tuple(*flatten(xyvar)).free_symbols + undeclared = free_symbols - range_symbols + if len(free_symbols & range_symbols) > 2: + raise NotImplementedError("Implicit plotting is not implemented for " + "more than 2 variables") + + #Create default ranges if the range is not provided. + default_range = Tuple(-5, 5) + def _range_tuple(s): + if isinstance(s, Symbol): + return Tuple(s) + default_range + if len(s) == 3: + return Tuple(*s) + raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s) + + if len(xyvar) == 0: + xyvar = list(_sort_gens(free_symbols)) + var_start_end_x = _range_tuple(xyvar[0]) + x = var_start_end_x[0] + if len(xyvar) != 2: + if x in undeclared or not undeclared: + xyvar.append(Dummy('f(%s)' % x.name)) + else: + xyvar.append(undeclared.pop()) + var_start_end_y = _range_tuple(xyvar[1]) + + #Check whether the depth is greater than 4 or less than 0. + if depth > 4: + depth = 4 + elif depth < 0: + depth = 0 + + series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y, + has_equality, adaptive, depth, + points, line_color) + + #set the x and y limits + kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:]) + kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:]) + # set the x and y labels + kwargs.setdefault('xlabel', var_start_end_x[0]) + kwargs.setdefault('ylabel', var_start_end_y[0]) + p = Plot(series_argument, **kwargs) + if show: + p.show() + return p diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/__init__.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd86a505d8c4b8026bd91cde27d441e00223a8bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/__init__.py @@ -0,0 +1,138 @@ +"""Plotting module that can plot 2D and 3D functions +""" + +from sympy.utilities.decorator import doctest_depends_on + +@doctest_depends_on(modules=('pyglet',)) +def PygletPlot(*args, **kwargs): + """ + + Plot Examples + ============= + + See examples/advanced/pyglet_plotting.py for many more examples. + + >>> from sympy.plotting.pygletplot import PygletPlot as Plot + >>> from sympy.abc import x, y, z + + >>> Plot(x*y**3-y*x**3) + [0]: -x**3*y + x*y**3, 'mode=cartesian' + + >>> p = Plot() + >>> p[1] = x*y + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + + >>> p = Plot() + >>> p[1] = x**2+y**2 + >>> p[2] = -x**2-y**2 + + + Variable Intervals + ================== + + The basic format is [var, min, max, steps], but the + syntax is flexible and arguments left out are taken + from the defaults for the current coordinate mode: + + >>> Plot(x**2) # implies [x,-5,5,100] + [0]: x**2, 'mode=cartesian' + + >>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100] + [0]: x**2 - y**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13,100]) + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [-13,13]) # [x,-13,13,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13]) # [x,-13,13,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(1*x, [], [x], mode='cylindrical') + ... # [unbound_theta,0,2*Pi,40], [x,-1,1,20] + [0]: x, 'mode=cartesian' + + + Coordinate Modes + ================ + + Plot supports several curvilinear coordinate modes, and + they independent for each plotted function. You can specify + a coordinate mode explicitly with the 'mode' named argument, + but it can be automatically determined for Cartesian or + parametric plots, and therefore must only be specified for + polar, cylindrical, and spherical modes. + + Specifically, Plot(function arguments) and Plot[n] = + (function arguments) will interpret your arguments as a + Cartesian plot if you provide one function and a parametric + plot if you provide two or three functions. Similarly, the + arguments will be interpreted as a curve if one variable is + used, and a surface if two are used. + + Supported mode names by number of variables: + + 1: parametric, cartesian, polar + 2: parametric, cartesian, cylindrical = polar, spherical + + >>> Plot(1, mode='spherical') + + + Calculator-like Interface + ========================= + + >>> p = Plot(visible=False) + >>> f = x**2 + >>> p[1] = f + >>> p[2] = f.diff(x) + >>> p[3] = f.diff(x).diff(x) + >>> p + [1]: x**2, 'mode=cartesian' + [2]: 2*x, 'mode=cartesian' + [3]: 2, 'mode=cartesian' + >>> p.show() + >>> p.clear() + >>> p + + >>> p[1] = x**2+y**2 + >>> p[1].style = 'solid' + >>> p[2] = -x**2-y**2 + >>> p[2].style = 'wireframe' + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + >>> p[1].style = 'both' + >>> p[2].style = 'both' + >>> p.close() + + + Plot Window Keyboard Controls + ============================= + + Screen Rotation: + X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2 + Z axis Q,E, Numpad 7,9 + + Model Rotation: + Z axis Z,C, Numpad 1,3 + + Zoom: R,F, PgUp,PgDn, Numpad +,- + + Reset Camera: X, Numpad 5 + + Camera Presets: + XY F1 + XZ F2 + YZ F3 + Perspective F4 + + Sensitivity Modifier: SHIFT + + Axes Toggle: + Visible F5 + Colors F6 + + Close Window: ESCAPE + + ============================= + """ + + from sympy.plotting.pygletplot.plot import PygletPlot + return PygletPlot(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/color_scheme.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/color_scheme.py new file mode 100644 index 0000000000000000000000000000000000000000..613e777a7f45f54349c47d272aa6d1c157bcd117 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/color_scheme.py @@ -0,0 +1,336 @@ +from sympy.core.basic import Basic +from sympy.core.symbol import (Symbol, symbols) +from sympy.utilities.lambdify import lambdify +from .util import interpolate, rinterpolate, create_bounds, update_bounds +from sympy.utilities.iterables import sift + + +class ColorGradient: + colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9] + intervals = 0.0, 1.0 + + def __init__(self, *args): + if len(args) == 2: + self.colors = list(args) + self.intervals = [0.0, 1.0] + elif len(args) > 0: + if len(args) % 2 != 0: + raise ValueError("len(args) should be even") + self.colors = [args[i] for i in range(1, len(args), 2)] + self.intervals = [args[i] for i in range(0, len(args), 2)] + assert len(self.colors) == len(self.intervals) + + def copy(self): + c = ColorGradient() + c.colors = [e[::] for e in self.colors] + c.intervals = self.intervals[::] + return c + + def _find_interval(self, v): + m = len(self.intervals) + i = 0 + while i < m - 1 and self.intervals[i] <= v: + i += 1 + return i + + def _interpolate_axis(self, axis, v): + i = self._find_interval(v) + v = rinterpolate(self.intervals[i - 1], self.intervals[i], v) + return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v) + + def __call__(self, r, g, b): + c = self._interpolate_axis + return c(0, r), c(1, g), c(2, b) + +default_color_schemes = {} # defined at the bottom of this file + + +class ColorScheme: + + def __init__(self, *args, **kwargs): + self.args = args + self.f, self.gradient = None, ColorGradient() + + if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]): + self.f = args[0] + elif len(args) == 1 and isinstance(args[0], str): + if args[0] in default_color_schemes: + cs = default_color_schemes[args[0]] + self.f, self.gradient = cs.f, cs.gradient.copy() + else: + self.f = lambdify('x,y,z,u,v', args[0]) + else: + self.f, self.gradient = self._interpret_args(args) + self._test_color_function() + if not isinstance(self.gradient, ColorGradient): + raise ValueError("Color gradient not properly initialized. " + "(Not a ColorGradient instance.)") + + def _interpret_args(self, args): + f, gradient = None, self.gradient + atoms, lists = self._sort_args(args) + s = self._pop_symbol_list(lists) + s = self._fill_in_vars(s) + + # prepare the error message for lambdification failure + f_str = ', '.join(str(fa) for fa in atoms) + s_str = (str(sa) for sa in s) + s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0) + f_error = ValueError("Could not interpret arguments " + "%s as functions of %s." % (f_str, s_str)) + + # try to lambdify args + if len(atoms) == 1: + fv = atoms[0] + try: + f = lambdify(s, [fv, fv, fv]) + except TypeError: + raise f_error + + elif len(atoms) == 3: + fr, fg, fb = atoms + try: + f = lambdify(s, [fr, fg, fb]) + except TypeError: + raise f_error + + else: + raise ValueError("A ColorScheme must provide 1 or 3 " + "functions in x, y, z, u, and/or v.") + + # try to intrepret any given color information + if len(lists) == 0: + gargs = [] + + elif len(lists) == 1: + gargs = lists[0] + + elif len(lists) == 2: + try: + (r1, g1, b1), (r2, g2, b2) = lists + except TypeError: + raise ValueError("If two color arguments are given, " + "they must be given in the format " + "(r1, g1, b1), (r2, g2, b2).") + gargs = lists + + elif len(lists) == 3: + try: + (r1, r2), (g1, g2), (b1, b2) = lists + except Exception: + raise ValueError("If three color arguments are given, " + "they must be given in the format " + "(r1, r2), (g1, g2), (b1, b2). To create " + "a multi-step gradient, use the syntax " + "[0, colorStart, step1, color1, ..., 1, " + "colorEnd].") + gargs = [[r1, g1, b1], [r2, g2, b2]] + + else: + raise ValueError("Don't know what to do with collection " + "arguments %s." % (', '.join(str(l) for l in lists))) + + if gargs: + try: + gradient = ColorGradient(*gargs) + except Exception as ex: + raise ValueError(("Could not initialize a gradient " + "with arguments %s. Inner " + "exception: %s") % (gargs, str(ex))) + + return f, gradient + + def _pop_symbol_list(self, lists): + symbol_lists = [] + for l in lists: + mark = True + for s in l: + if s is not None and not isinstance(s, Symbol): + mark = False + break + if mark: + lists.remove(l) + symbol_lists.append(l) + if len(symbol_lists) == 1: + return symbol_lists[0] + elif len(symbol_lists) == 0: + return [] + else: + raise ValueError("Only one list of Symbols " + "can be given for a color scheme.") + + def _fill_in_vars(self, args): + defaults = symbols('x,y,z,u,v') + v_error = ValueError("Could not find what to plot.") + if len(args) == 0: + return defaults + if not isinstance(args, (tuple, list)): + raise v_error + if len(args) == 0: + return defaults + for s in args: + if s is not None and not isinstance(s, Symbol): + raise v_error + # when vars are given explicitly, any vars + # not given are marked 'unbound' as to not + # be accidentally used in an expression + vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)] + # interpret as t + if len(args) == 1: + vars[3] = args[0] + # interpret as u,v + elif len(args) == 2: + if args[0] is not None: + vars[3] = args[0] + if args[1] is not None: + vars[4] = args[1] + # interpret as x,y,z + elif len(args) >= 3: + # allow some of x,y,z to be + # left unbound if not given + if args[0] is not None: + vars[0] = args[0] + if args[1] is not None: + vars[1] = args[1] + if args[2] is not None: + vars[2] = args[2] + # interpret the rest as t + if len(args) >= 4: + vars[3] = args[3] + # ...or u,v + if len(args) >= 5: + vars[4] = args[4] + return vars + + def _sort_args(self, args): + lists, atoms = sift(args, + lambda a: isinstance(a, (tuple, list)), binary=True) + return atoms, lists + + def _test_color_function(self): + if not callable(self.f): + raise ValueError("Color function is not callable.") + try: + result = self.f(0, 0, 0, 0, 0) + if len(result) != 3: + raise ValueError("length should be equal to 3") + except TypeError: + raise ValueError("Color function needs to accept x,y,z,u,v, " + "as arguments even if it doesn't use all of them.") + except AssertionError: + raise ValueError("Color function needs to return 3-tuple r,g,b.") + except Exception: + pass # color function probably not valid at 0,0,0,0,0 + + def __call__(self, x, y, z, u, v): + try: + return self.f(x, y, z, u, v) + except Exception: + return None + + def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None): + """ + Apply this color scheme to a + set of vertices over a single + independent variable u. + """ + bounds = create_bounds() + cverts = [] + if callable(set_len): + set_len(len(u_set)*2) + # calculate f() = r,g,b for each vert + # and find the min and max for r,g,b + for _u in range(len(u_set)): + if verts[_u] is None: + cverts.append(None) + else: + x, y, z = verts[_u] + u, v = u_set[_u], None + c = self(x, y, z, u, v) + if c is not None: + c = list(c) + update_bounds(bounds, c) + cverts.append(c) + if callable(inc_pos): + inc_pos() + # scale and apply gradient + for _u in range(len(u_set)): + if cverts[_u] is not None: + for _c in range(3): + # scale from [f_min, f_max] to [0,1] + cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1], + cverts[_u][_c]) + # apply gradient + cverts[_u] = self.gradient(*cverts[_u]) + if callable(inc_pos): + inc_pos() + return cverts + + def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None): + """ + Apply this color scheme to a + set of vertices over two + independent variables u and v. + """ + bounds = create_bounds() + cverts = [] + if callable(set_len): + set_len(len(u_set)*len(v_set)*2) + # calculate f() = r,g,b for each vert + # and find the min and max for r,g,b + for _u in range(len(u_set)): + column = [] + for _v in range(len(v_set)): + if verts[_u][_v] is None: + column.append(None) + else: + x, y, z = verts[_u][_v] + u, v = u_set[_u], v_set[_v] + c = self(x, y, z, u, v) + if c is not None: + c = list(c) + update_bounds(bounds, c) + column.append(c) + if callable(inc_pos): + inc_pos() + cverts.append(column) + # scale and apply gradient + for _u in range(len(u_set)): + for _v in range(len(v_set)): + if cverts[_u][_v] is not None: + # scale from [f_min, f_max] to [0,1] + for _c in range(3): + cverts[_u][_v][_c] = rinterpolate(bounds[_c][0], + bounds[_c][1], cverts[_u][_v][_c]) + # apply gradient + cverts[_u][_v] = self.gradient(*cverts[_u][_v]) + if callable(inc_pos): + inc_pos() + return cverts + + def str_base(self): + return ", ".join(str(a) for a in self.args) + + def __repr__(self): + return "%s" % (self.str_base()) + + +x, y, z, t, u, v = symbols('x,y,z,t,u,v') + +default_color_schemes['rainbow'] = ColorScheme(z, y, x) +default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97), + (0.97, 0.4, 0.4), (None, None, z)) +default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z), + [0.00, (0.2, 0.2, 1.0), + 0.35, (0.2, 0.8, 0.4), + 0.50, (0.3, 0.9, 0.3), + 0.65, (0.4, 0.8, 0.2), + 1.00, (1.0, 0.2, 0.2)]) + +default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z), + [0.0, (0.3, 0.3, 1.0), + 0.30, (0.3, 1.0, 0.3), + 0.55, (0.95, 1.0, 0.2), + 0.65, (1.0, 0.95, 0.2), + 0.85, (1.0, 0.7, 0.2), + 1.0, (1.0, 0.3, 0.2)]) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_axes.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..ae26fb0b2fa64e7f7318c51ce3fe5afaa276b48e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_axes.py @@ -0,0 +1,251 @@ +import pyglet.gl as pgl +from pyglet import font + +from sympy.core import S +from sympy.plotting.pygletplot.plot_object import PlotObject +from sympy.plotting.pygletplot.util import billboard_matrix, dot_product, \ + get_direction_vectors, strided_range, vec_mag, vec_sub +from sympy.utilities.iterables import is_sequence + + +class PlotAxes(PlotObject): + + def __init__(self, *args, + style='', none=None, frame=None, box=None, ordinate=None, + stride=0.25, + visible='', overlay='', colored='', label_axes='', label_ticks='', + tick_length=0.1, + font_face='Arial', font_size=28, + **kwargs): + # initialize style parameter + style = style.lower() + + # allow alias kwargs to override style kwarg + if none is not None: + style = 'none' + if frame is not None: + style = 'frame' + if box is not None: + style = 'box' + if ordinate is not None: + style = 'ordinate' + + if style in ['', 'ordinate']: + self._render_object = PlotAxesOrdinate(self) + elif style in ['frame', 'box']: + self._render_object = PlotAxesFrame(self) + elif style in ['none']: + self._render_object = None + else: + raise ValueError(("Unrecognized axes style %s.") % (style)) + + # initialize stride parameter + try: + stride = eval(stride) + except TypeError: + pass + if is_sequence(stride): + if len(stride) != 3: + raise ValueError("length should be equal to 3") + self._stride = stride + else: + self._stride = [stride, stride, stride] + self._tick_length = float(tick_length) + + # setup bounding box and ticks + self._origin = [0, 0, 0] + self.reset_bounding_box() + + def flexible_boolean(input, default): + if input in [True, False]: + return input + if input in ('f', 'F', 'false', 'False'): + return False + if input in ('t', 'T', 'true', 'True'): + return True + return default + + # initialize remaining parameters + self.visible = flexible_boolean(kwargs, True) + self._overlay = flexible_boolean(overlay, True) + self._colored = flexible_boolean(colored, False) + self._label_axes = flexible_boolean(label_axes, False) + self._label_ticks = flexible_boolean(label_ticks, True) + + # setup label font + self.font_face = font_face + self.font_size = font_size + + # this is also used to reinit the + # font on window close/reopen + self.reset_resources() + + def reset_resources(self): + self.label_font = None + + def reset_bounding_box(self): + self._bounding_box = [[None, None], [None, None], [None, None]] + self._axis_ticks = [[], [], []] + + def draw(self): + if self._render_object: + pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT | pgl.GL_DEPTH_BUFFER_BIT) + if self._overlay: + pgl.glDisable(pgl.GL_DEPTH_TEST) + self._render_object.draw() + pgl.glPopAttrib() + + def adjust_bounds(self, child_bounds): + b = self._bounding_box + c = child_bounds + for i in range(3): + if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity: + continue + b[i][0] = c[i][0] if b[i][0] is None else min([b[i][0], c[i][0]]) + b[i][1] = c[i][1] if b[i][1] is None else max([b[i][1], c[i][1]]) + self._bounding_box = b + self._recalculate_axis_ticks(i) + + def _recalculate_axis_ticks(self, axis): + b = self._bounding_box + if b[axis][0] is None or b[axis][1] is None: + self._axis_ticks[axis] = [] + else: + self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1], + self._stride[axis]) + + def toggle_visible(self): + self.visible = not self.visible + + def toggle_colors(self): + self._colored = not self._colored + + +class PlotAxesBase(PlotObject): + + def __init__(self, parent_axes): + self._p = parent_axes + + def draw(self): + color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]), + ([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored] + self.draw_background(color) + self.draw_axis(2, color[2]) + self.draw_axis(1, color[1]) + self.draw_axis(0, color[0]) + + def draw_background(self, color): + pass # optional + + def draw_axis(self, axis, color): + raise NotImplementedError() + + def draw_text(self, text, position, color, scale=1.0): + if len(color) == 3: + color = (color[0], color[1], color[2], 1.0) + + if self._p.label_font is None: + self._p.label_font = font.load(self._p.font_face, + self._p.font_size, + bold=True, italic=False) + + label = font.Text(self._p.label_font, text, + color=color, + valign=font.Text.BASELINE, + halign=font.Text.CENTER) + + pgl.glPushMatrix() + pgl.glTranslatef(*position) + billboard_matrix() + scale_factor = 0.005 * scale + pgl.glScalef(scale_factor, scale_factor, scale_factor) + pgl.glColor4f(0, 0, 0, 0) + label.draw() + pgl.glPopMatrix() + + def draw_line(self, v, color): + o = self._p._origin + pgl.glBegin(pgl.GL_LINES) + pgl.glColor3f(*color) + pgl.glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2]) + pgl.glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2]) + pgl.glEnd() + + +class PlotAxesOrdinate(PlotAxesBase): + + def __init__(self, parent_axes): + super().__init__(parent_axes) + + def draw_axis(self, axis, color): + ticks = self._p._axis_ticks[axis] + radius = self._p._tick_length / 2.0 + if len(ticks) < 2: + return + + # calculate the vector for this axis + axis_lines = [[0, 0, 0], [0, 0, 0]] + axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1] + axis_vector = vec_sub(axis_lines[1], axis_lines[0]) + + # calculate angle to the z direction vector + pos_z = get_direction_vectors()[2] + d = abs(dot_product(axis_vector, pos_z)) + d = d / vec_mag(axis_vector) + + # don't draw labels if we're looking down the axis + labels_visible = abs(d - 1.0) > 0.02 + + # draw the ticks and labels + for tick in ticks: + self.draw_tick_line(axis, color, radius, tick, labels_visible) + + # draw the axis line and labels + self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible) + + def draw_axis_line(self, axis, color, a_min, a_max, labels_visible): + axis_line = [[0, 0, 0], [0, 0, 0]] + axis_line[0][axis], axis_line[1][axis] = a_min, a_max + self.draw_line(axis_line, color) + if labels_visible: + self.draw_axis_line_labels(axis, color, axis_line) + + def draw_axis_line_labels(self, axis, color, axis_line): + if not self._p._label_axes: + return + axis_labels = [axis_line[0][::], axis_line[1][::]] + axis_labels[0][axis] -= 0.3 + axis_labels[1][axis] += 0.3 + a_str = ['X', 'Y', 'Z'][axis] + self.draw_text("-" + a_str, axis_labels[0], color) + self.draw_text("+" + a_str, axis_labels[1], color) + + def draw_tick_line(self, axis, color, radius, tick, labels_visible): + tick_axis = {0: 1, 1: 0, 2: 1}[axis] + tick_line = [[0, 0, 0], [0, 0, 0]] + tick_line[0][axis] = tick_line[1][axis] = tick + tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius + self.draw_line(tick_line, color) + if labels_visible: + self.draw_tick_line_label(axis, color, radius, tick) + + def draw_tick_line_label(self, axis, color, radius, tick): + if not self._p._label_axes: + return + tick_label_vector = [0, 0, 0] + tick_label_vector[axis] = tick + tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1, 1, 1][ + axis] * radius * 3.5 + self.draw_text(str(tick), tick_label_vector, color, scale=0.5) + + +class PlotAxesFrame(PlotAxesBase): + + def __init__(self, parent_axes): + super().__init__(parent_axes) + + def draw_background(self, color): + pass + + def draw_axis(self, axis, color): + raise NotImplementedError() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_camera.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..7f4adb9bbe5376dcb8c04e5f7dd006a8e19ab91e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_camera.py @@ -0,0 +1,128 @@ +import pyglet.gl as pgl +from sympy.plotting.pygletplot.plot_rotation import get_spherical_rotatation +from sympy.plotting.pygletplot.util import get_model_matrix, model_to_screen, \ + screen_to_model, vec_subs + + +class PlotCamera: + + min_dist = 0.05 + max_dist = 500.0 + + min_ortho_dist = 100.0 + max_ortho_dist = 10000.0 + + _default_dist = 6.0 + _default_ortho_dist = 600.0 + + rot_presets = { + 'xy': (0, 0, 0), + 'xz': (-90, 0, 0), + 'yz': (0, 90, 0), + 'perspective': (-45, 0, -45) + } + + def __init__(self, window, ortho=False): + self.window = window + self.axes = self.window.plot.axes + self.ortho = ortho + self.reset() + + def init_rot_matrix(self): + pgl.glPushMatrix() + pgl.glLoadIdentity() + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def set_rot_preset(self, preset_name): + self.init_rot_matrix() + try: + r = self.rot_presets[preset_name] + except AttributeError: + raise ValueError( + "%s is not a valid rotation preset." % preset_name) + try: + self.euler_rotate(r[0], 1, 0, 0) + self.euler_rotate(r[1], 0, 1, 0) + self.euler_rotate(r[2], 0, 0, 1) + except AttributeError: + pass + + def reset(self): + self._dist = 0.0 + self._x, self._y = 0.0, 0.0 + self._rot = None + if self.ortho: + self._dist = self._default_ortho_dist + else: + self._dist = self._default_dist + self.init_rot_matrix() + + def mult_rot_matrix(self, rot): + pgl.glPushMatrix() + pgl.glLoadMatrixf(rot) + pgl.glMultMatrixf(self._rot) + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def setup_projection(self): + pgl.glMatrixMode(pgl.GL_PROJECTION) + pgl.glLoadIdentity() + if self.ortho: + # yep, this is pseudo ortho (don't tell anyone) + pgl.gluPerspective( + 0.3, float(self.window.width)/float(self.window.height), + self.min_ortho_dist - 0.01, self.max_ortho_dist + 0.01) + else: + pgl.gluPerspective( + 30.0, float(self.window.width)/float(self.window.height), + self.min_dist - 0.01, self.max_dist + 0.01) + pgl.glMatrixMode(pgl.GL_MODELVIEW) + + def _get_scale(self): + return 1.0, 1.0, 1.0 + + def apply_transformation(self): + pgl.glLoadIdentity() + pgl.glTranslatef(self._x, self._y, -self._dist) + if self._rot is not None: + pgl.glMultMatrixf(self._rot) + pgl.glScalef(*self._get_scale()) + + def spherical_rotate(self, p1, p2, sensitivity=1.0): + mat = get_spherical_rotatation(p1, p2, self.window.width, + self.window.height, sensitivity) + if mat is not None: + self.mult_rot_matrix(mat) + + def euler_rotate(self, angle, x, y, z): + pgl.glPushMatrix() + pgl.glLoadMatrixf(self._rot) + pgl.glRotatef(angle, x, y, z) + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def zoom_relative(self, clicks, sensitivity): + + if self.ortho: + dist_d = clicks * sensitivity * 50.0 + min_dist = self.min_ortho_dist + max_dist = self.max_ortho_dist + else: + dist_d = clicks * sensitivity + min_dist = self.min_dist + max_dist = self.max_dist + + new_dist = (self._dist - dist_d) + if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist: + self._dist = new_dist + + def mouse_translate(self, x, y, dx, dy): + pgl.glPushMatrix() + pgl.glLoadIdentity() + pgl.glTranslatef(0, 0, -self._dist) + z = model_to_screen(0, 0, 0)[2] + d = vec_subs(screen_to_model(x, y, z), screen_to_model(x - dx, y - dy, z)) + pgl.glPopMatrix() + self._x += d[0] + self._y += d[1] diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_controller.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_controller.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7e01e6fd17fddf07b733442208a0a4c9d87d5b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_controller.py @@ -0,0 +1,218 @@ +from pyglet.window import key +from pyglet.window.mouse import LEFT, RIGHT, MIDDLE +from sympy.plotting.pygletplot.util import get_direction_vectors, get_basis_vectors + + +class PlotController: + + normal_mouse_sensitivity = 4.0 + modified_mouse_sensitivity = 1.0 + + normal_key_sensitivity = 160.0 + modified_key_sensitivity = 40.0 + + keymap = { + key.LEFT: 'left', + key.A: 'left', + key.NUM_4: 'left', + + key.RIGHT: 'right', + key.D: 'right', + key.NUM_6: 'right', + + key.UP: 'up', + key.W: 'up', + key.NUM_8: 'up', + + key.DOWN: 'down', + key.S: 'down', + key.NUM_2: 'down', + + key.Z: 'rotate_z_neg', + key.NUM_1: 'rotate_z_neg', + + key.C: 'rotate_z_pos', + key.NUM_3: 'rotate_z_pos', + + key.Q: 'spin_left', + key.NUM_7: 'spin_left', + key.E: 'spin_right', + key.NUM_9: 'spin_right', + + key.X: 'reset_camera', + key.NUM_5: 'reset_camera', + + key.NUM_ADD: 'zoom_in', + key.PAGEUP: 'zoom_in', + key.R: 'zoom_in', + + key.NUM_SUBTRACT: 'zoom_out', + key.PAGEDOWN: 'zoom_out', + key.F: 'zoom_out', + + key.RSHIFT: 'modify_sensitivity', + key.LSHIFT: 'modify_sensitivity', + + key.F1: 'rot_preset_xy', + key.F2: 'rot_preset_xz', + key.F3: 'rot_preset_yz', + key.F4: 'rot_preset_perspective', + + key.F5: 'toggle_axes', + key.F6: 'toggle_axe_colors', + + key.F8: 'save_image' + } + + def __init__(self, window, *, invert_mouse_zoom=False, **kwargs): + self.invert_mouse_zoom = invert_mouse_zoom + self.window = window + self.camera = window.camera + self.action = { + # Rotation around the view Y (up) vector + 'left': False, + 'right': False, + # Rotation around the view X vector + 'up': False, + 'down': False, + # Rotation around the view Z vector + 'spin_left': False, + 'spin_right': False, + # Rotation around the model Z vector + 'rotate_z_neg': False, + 'rotate_z_pos': False, + # Reset to the default rotation + 'reset_camera': False, + # Performs camera z-translation + 'zoom_in': False, + 'zoom_out': False, + # Use alternative sensitivity (speed) + 'modify_sensitivity': False, + # Rotation presets + 'rot_preset_xy': False, + 'rot_preset_xz': False, + 'rot_preset_yz': False, + 'rot_preset_perspective': False, + # axes + 'toggle_axes': False, + 'toggle_axe_colors': False, + # screenshot + 'save_image': False + } + + def update(self, dt): + z = 0 + if self.action['zoom_out']: + z -= 1 + if self.action['zoom_in']: + z += 1 + if z != 0: + self.camera.zoom_relative(z/10.0, self.get_key_sensitivity()/10.0) + + dx, dy, dz = 0, 0, 0 + if self.action['left']: + dx -= 1 + if self.action['right']: + dx += 1 + if self.action['up']: + dy -= 1 + if self.action['down']: + dy += 1 + if self.action['spin_left']: + dz += 1 + if self.action['spin_right']: + dz -= 1 + + if not self.is_2D(): + if dx != 0: + self.camera.euler_rotate(dx*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[1])) + if dy != 0: + self.camera.euler_rotate(dy*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[0])) + if dz != 0: + self.camera.euler_rotate(dz*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[2])) + else: + self.camera.mouse_translate(0, 0, dx*dt*self.get_key_sensitivity(), + -dy*dt*self.get_key_sensitivity()) + + rz = 0 + if self.action['rotate_z_neg'] and not self.is_2D(): + rz -= 1 + if self.action['rotate_z_pos'] and not self.is_2D(): + rz += 1 + + if rz != 0: + self.camera.euler_rotate(rz*dt*self.get_key_sensitivity(), + *(get_basis_vectors()[2])) + + if self.action['reset_camera']: + self.camera.reset() + + if self.action['rot_preset_xy']: + self.camera.set_rot_preset('xy') + if self.action['rot_preset_xz']: + self.camera.set_rot_preset('xz') + if self.action['rot_preset_yz']: + self.camera.set_rot_preset('yz') + if self.action['rot_preset_perspective']: + self.camera.set_rot_preset('perspective') + + if self.action['toggle_axes']: + self.action['toggle_axes'] = False + self.camera.axes.toggle_visible() + + if self.action['toggle_axe_colors']: + self.action['toggle_axe_colors'] = False + self.camera.axes.toggle_colors() + + if self.action['save_image']: + self.action['save_image'] = False + self.window.plot.saveimage() + + return True + + def get_mouse_sensitivity(self): + if self.action['modify_sensitivity']: + return self.modified_mouse_sensitivity + else: + return self.normal_mouse_sensitivity + + def get_key_sensitivity(self): + if self.action['modify_sensitivity']: + return self.modified_key_sensitivity + else: + return self.normal_key_sensitivity + + def on_key_press(self, symbol, modifiers): + if symbol in self.keymap: + self.action[self.keymap[symbol]] = True + + def on_key_release(self, symbol, modifiers): + if symbol in self.keymap: + self.action[self.keymap[symbol]] = False + + def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): + if buttons & LEFT: + if self.is_2D(): + self.camera.mouse_translate(x, y, dx, dy) + else: + self.camera.spherical_rotate((x - dx, y - dy), (x, y), + self.get_mouse_sensitivity()) + if buttons & MIDDLE: + self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy, + self.get_mouse_sensitivity()/20.0) + if buttons & RIGHT: + self.camera.mouse_translate(x, y, dx, dy) + + def on_mouse_scroll(self, x, y, dx, dy): + self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy, + self.get_mouse_sensitivity()) + + def is_2D(self): + functions = self.window.plot._functions + for i in functions: + if len(functions[i].i_vars) > 1 or len(functions[i].d_vars) > 2: + return False + return True diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_interval.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..085ab096915bbc4a3761b71736b4dd14f1ff779f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_interval.py @@ -0,0 +1,181 @@ +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.core.numbers import Integer + + +class PlotInterval: + """ + """ + _v, _v_min, _v_max, _v_steps = None, None, None, None + + def require_all_args(f): + def check(self, *args, **kwargs): + for g in [self._v, self._v_min, self._v_max, self._v_steps]: + if g is None: + raise ValueError("PlotInterval is incomplete.") + return f(self, *args, **kwargs) + return check + + def __init__(self, *args): + if len(args) == 1: + if isinstance(args[0], PlotInterval): + self.fill_from(args[0]) + return + elif isinstance(args[0], str): + try: + args = eval(args[0]) + except TypeError: + s_eval_error = "Could not interpret string %s." + raise ValueError(s_eval_error % (args[0])) + elif isinstance(args[0], (tuple, list)): + args = args[0] + else: + raise ValueError("Not an interval.") + if not isinstance(args, (tuple, list)) or len(args) > 4: + f_error = "PlotInterval must be a tuple or list of length 4 or less." + raise ValueError(f_error) + + args = list(args) + if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)): + self.v = args.pop(0) + if len(args) in [2, 3]: + self.v_min = args.pop(0) + self.v_max = args.pop(0) + if len(args) == 1: + self.v_steps = args.pop(0) + elif len(args) == 1: + self.v_steps = args.pop(0) + + def get_v(self): + return self._v + + def set_v(self, v): + if v is None: + self._v = None + return + if not isinstance(v, Symbol): + raise ValueError("v must be a SymPy Symbol.") + self._v = v + + def get_v_min(self): + return self._v_min + + def set_v_min(self, v_min): + if v_min is None: + self._v_min = None + return + try: + self._v_min = sympify(v_min) + float(self._v_min.evalf()) + except TypeError: + raise ValueError("v_min could not be interpreted as a number.") + + def get_v_max(self): + return self._v_max + + def set_v_max(self, v_max): + if v_max is None: + self._v_max = None + return + try: + self._v_max = sympify(v_max) + float(self._v_max.evalf()) + except TypeError: + raise ValueError("v_max could not be interpreted as a number.") + + def get_v_steps(self): + return self._v_steps + + def set_v_steps(self, v_steps): + if v_steps is None: + self._v_steps = None + return + if isinstance(v_steps, int): + v_steps = Integer(v_steps) + elif not isinstance(v_steps, Integer): + raise ValueError("v_steps must be an int or SymPy Integer.") + if v_steps <= S.Zero: + raise ValueError("v_steps must be positive.") + self._v_steps = v_steps + + @require_all_args + def get_v_len(self): + return self.v_steps + 1 + + v = property(get_v, set_v) + v_min = property(get_v_min, set_v_min) + v_max = property(get_v_max, set_v_max) + v_steps = property(get_v_steps, set_v_steps) + v_len = property(get_v_len) + + def fill_from(self, b): + if b.v is not None: + self.v = b.v + if b.v_min is not None: + self.v_min = b.v_min + if b.v_max is not None: + self.v_max = b.v_max + if b.v_steps is not None: + self.v_steps = b.v_steps + + @staticmethod + def try_parse(*args): + """ + Returns a PlotInterval if args can be interpreted + as such, otherwise None. + """ + if len(args) == 1 and isinstance(args[0], PlotInterval): + return args[0] + try: + return PlotInterval(*args) + except ValueError: + return None + + def _str_base(self): + return ",".join([str(self.v), str(self.v_min), + str(self.v_max), str(self.v_steps)]) + + def __repr__(self): + """ + A string representing the interval in class constructor form. + """ + return "PlotInterval(%s)" % (self._str_base()) + + def __str__(self): + """ + A string representing the interval in list form. + """ + return "[%s]" % (self._str_base()) + + @require_all_args + def assert_complete(self): + pass + + @require_all_args + def vrange(self): + """ + Yields v_steps+1 SymPy numbers ranging from + v_min to v_max. + """ + d = (self.v_max - self.v_min) / self.v_steps + for i in range(self.v_steps + 1): + a = self.v_min + (d * Integer(i)) + yield a + + @require_all_args + def vrange2(self): + """ + Yields v_steps pairs of SymPy numbers ranging from + (v_min, v_min + step) to (v_max - step, v_max). + """ + d = (self.v_max - self.v_min) / self.v_steps + a = self.v_min + (d * S.Zero) + for i in range(self.v_steps): + b = self.v_min + (d * Integer(i + 1)) + yield a, b + a = b + + def frange(self): + for i in self.vrange(): + yield float(i.evalf()) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_mode.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ee00db9177b98b3259438949836fe5b69416c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_mode.py @@ -0,0 +1,400 @@ +from .plot_interval import PlotInterval +from .plot_object import PlotObject +from .util import parse_option_string +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.geometry.entity import GeometryEntity +from sympy.utilities.iterables import is_sequence + + +class PlotMode(PlotObject): + """ + Grandparent class for plotting + modes. Serves as interface for + registration, lookup, and init + of modes. + + To create a new plot mode, + inherit from PlotModeBase + or one of its children, such + as PlotSurface or PlotCurve. + """ + + ## Class-level attributes + ## used to register and lookup + ## plot modes. See PlotModeBase + ## for descriptions and usage. + + i_vars, d_vars = '', '' + intervals = [] + aliases = [] + is_default = False + + ## Draw is the only method here which + ## is meant to be overridden in child + ## classes, and PlotModeBase provides + ## a base implementation. + def draw(self): + raise NotImplementedError() + + ## Everything else in this file has to + ## do with registration and retrieval + ## of plot modes. This is where I've + ## hidden much of the ugliness of automatic + ## plot mode divination... + + ## Plot mode registry data structures + _mode_alias_list = [] + _mode_map = { + 1: {1: {}, 2: {}}, + 2: {1: {}, 2: {}}, + 3: {1: {}, 2: {}}, + } # [d][i][alias_str]: class + _mode_default_map = { + 1: {}, + 2: {}, + 3: {}, + } # [d][i]: class + _i_var_max, _d_var_max = 2, 3 + + def __new__(cls, *args, **kwargs): + """ + This is the function which interprets + arguments given to Plot.__init__ and + Plot.__setattr__. Returns an initialized + instance of the appropriate child class. + """ + + newargs, newkwargs = PlotMode._extract_options(args, kwargs) + mode_arg = newkwargs.get('mode', '') + + # Interpret the arguments + d_vars, intervals = PlotMode._interpret_args(newargs) + i_vars = PlotMode._find_i_vars(d_vars, intervals) + i, d = max([len(i_vars), len(intervals)]), len(d_vars) + + # Find the appropriate mode + subcls = PlotMode._get_mode(mode_arg, i, d) + + # Create the object + o = object.__new__(subcls) + + # Do some setup for the mode instance + o.d_vars = d_vars + o._fill_i_vars(i_vars) + o._fill_intervals(intervals) + o.options = newkwargs + + return o + + @staticmethod + def _get_mode(mode_arg, i_var_count, d_var_count): + """ + Tries to return an appropriate mode class. + Intended to be called only by __new__. + + mode_arg + Can be a string or a class. If it is a + PlotMode subclass, it is simply returned. + If it is a string, it can an alias for + a mode or an empty string. In the latter + case, we try to find a default mode for + the i_var_count and d_var_count. + + i_var_count + The number of independent variables + needed to evaluate the d_vars. + + d_var_count + The number of dependent variables; + usually the number of functions to + be evaluated in plotting. + + For example, a Cartesian function y = f(x) has + one i_var (x) and one d_var (y). A parametric + form x,y,z = f(u,v), f(u,v), f(u,v) has two + two i_vars (u,v) and three d_vars (x,y,z). + """ + # if the mode_arg is simply a PlotMode class, + # check that the mode supports the numbers + # of independent and dependent vars, then + # return it + try: + m = None + if issubclass(mode_arg, PlotMode): + m = mode_arg + except TypeError: + pass + if m: + if not m._was_initialized: + raise ValueError(("To use unregistered plot mode %s " + "you must first call %s._init_mode().") + % (m.__name__, m.__name__)) + if d_var_count != m.d_var_count: + raise ValueError(("%s can only plot functions " + "with %i dependent variables.") + % (m.__name__, + m.d_var_count)) + if i_var_count > m.i_var_count: + raise ValueError(("%s cannot plot functions " + "with more than %i independent " + "variables.") + % (m.__name__, + m.i_var_count)) + return m + # If it is a string, there are two possibilities. + if isinstance(mode_arg, str): + i, d = i_var_count, d_var_count + if i > PlotMode._i_var_max: + raise ValueError(var_count_error(True, True)) + if d > PlotMode._d_var_max: + raise ValueError(var_count_error(False, True)) + # If the string is '', try to find a suitable + # default mode + if not mode_arg: + return PlotMode._get_default_mode(i, d) + # Otherwise, interpret the string as a mode + # alias (e.g. 'cartesian', 'parametric', etc) + else: + return PlotMode._get_aliased_mode(mode_arg, i, d) + else: + raise ValueError("PlotMode argument must be " + "a class or a string") + + @staticmethod + def _get_default_mode(i, d, i_vars=-1): + if i_vars == -1: + i_vars = i + try: + return PlotMode._mode_default_map[d][i] + except KeyError: + # Keep looking for modes in higher i var counts + # which support the given d var count until we + # reach the max i_var count. + if i < PlotMode._i_var_max: + return PlotMode._get_default_mode(i + 1, d, i_vars) + else: + raise ValueError(("Couldn't find a default mode " + "for %i independent and %i " + "dependent variables.") % (i_vars, d)) + + @staticmethod + def _get_aliased_mode(alias, i, d, i_vars=-1): + if i_vars == -1: + i_vars = i + if alias not in PlotMode._mode_alias_list: + raise ValueError(("Couldn't find a mode called" + " %s. Known modes: %s.") + % (alias, ", ".join(PlotMode._mode_alias_list))) + try: + return PlotMode._mode_map[d][i][alias] + except TypeError: + # Keep looking for modes in higher i var counts + # which support the given d var count and alias + # until we reach the max i_var count. + if i < PlotMode._i_var_max: + return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars) + else: + raise ValueError(("Couldn't find a %s mode " + "for %i independent and %i " + "dependent variables.") + % (alias, i_vars, d)) + + @classmethod + def _register(cls): + """ + Called once for each user-usable plot mode. + For Cartesian2D, it is invoked after the + class definition: Cartesian2D._register() + """ + name = cls.__name__ + cls._init_mode() + + try: + i, d = cls.i_var_count, cls.d_var_count + # Add the mode to _mode_map under all + # given aliases + for a in cls.aliases: + if a not in PlotMode._mode_alias_list: + # Also track valid aliases, so + # we can quickly know when given + # an invalid one in _get_mode. + PlotMode._mode_alias_list.append(a) + PlotMode._mode_map[d][i][a] = cls + if cls.is_default: + # If this mode was marked as the + # default for this d,i combination, + # also set that. + PlotMode._mode_default_map[d][i] = cls + + except Exception as e: + raise RuntimeError(("Failed to register " + "plot mode %s. Reason: %s") + % (name, (str(e)))) + + @classmethod + def _init_mode(cls): + """ + Initializes the plot mode based on + the 'mode-specific parameters' above. + Only intended to be called by + PlotMode._register(). To use a mode without + registering it, you can directly call + ModeSubclass._init_mode(). + """ + def symbols_list(symbol_str): + return [Symbol(s) for s in symbol_str] + + # Convert the vars strs into + # lists of symbols. + cls.i_vars = symbols_list(cls.i_vars) + cls.d_vars = symbols_list(cls.d_vars) + + # Var count is used often, calculate + # it once here + cls.i_var_count = len(cls.i_vars) + cls.d_var_count = len(cls.d_vars) + + if cls.i_var_count > PlotMode._i_var_max: + raise ValueError(var_count_error(True, False)) + if cls.d_var_count > PlotMode._d_var_max: + raise ValueError(var_count_error(False, False)) + + # Try to use first alias as primary_alias + if len(cls.aliases) > 0: + cls.primary_alias = cls.aliases[0] + else: + cls.primary_alias = cls.__name__ + + di = cls.intervals + if len(di) != cls.i_var_count: + raise ValueError("Plot mode must provide a " + "default interval for each i_var.") + for i in range(cls.i_var_count): + # default intervals must be given [min,max,steps] + # (no var, but they must be in the same order as i_vars) + if len(di[i]) != 3: + raise ValueError("length should be equal to 3") + + # Initialize an incomplete interval, + # to later be filled with a var when + # the mode is instantiated. + di[i] = PlotInterval(None, *di[i]) + + # To prevent people from using modes + # without these required fields set up. + cls._was_initialized = True + + _was_initialized = False + + ## Initializer Helper Methods + + @staticmethod + def _find_i_vars(functions, intervals): + i_vars = [] + + # First, collect i_vars in the + # order they are given in any + # intervals. + for i in intervals: + if i.v is None: + continue + elif i.v in i_vars: + raise ValueError(("Multiple intervals given " + "for %s.") % (str(i.v))) + i_vars.append(i.v) + + # Then, find any remaining + # i_vars in given functions + # (aka d_vars) + for f in functions: + for a in f.free_symbols: + if a not in i_vars: + i_vars.append(a) + + return i_vars + + def _fill_i_vars(self, i_vars): + # copy default i_vars + self.i_vars = [Symbol(str(i)) for i in self.i_vars] + # replace with given i_vars + for i in range(len(i_vars)): + self.i_vars[i] = i_vars[i] + + def _fill_intervals(self, intervals): + # copy default intervals + self.intervals = [PlotInterval(i) for i in self.intervals] + # track i_vars used so far + v_used = [] + # fill copy of default + # intervals with given info + for i in range(len(intervals)): + self.intervals[i].fill_from(intervals[i]) + if self.intervals[i].v is not None: + v_used.append(self.intervals[i].v) + # Find any orphan intervals and + # assign them i_vars + for i in range(len(self.intervals)): + if self.intervals[i].v is None: + u = [v for v in self.i_vars if v not in v_used] + if len(u) == 0: + raise ValueError("length should not be equal to 0") + self.intervals[i].v = u[0] + v_used.append(u[0]) + + @staticmethod + def _interpret_args(args): + interval_wrong_order = "PlotInterval %s was given before any function(s)." + interpret_error = "Could not interpret %s as a function or interval." + + functions, intervals = [], [] + if isinstance(args[0], GeometryEntity): + for coords in list(args[0].arbitrary_point()): + functions.append(coords) + intervals.append(PlotInterval.try_parse(args[0].plot_interval())) + else: + for a in args: + i = PlotInterval.try_parse(a) + if i is not None: + if len(functions) == 0: + raise ValueError(interval_wrong_order % (str(i))) + else: + intervals.append(i) + else: + if is_sequence(a, include=str): + raise ValueError(interpret_error % (str(a))) + try: + f = sympify(a) + functions.append(f) + except TypeError: + raise ValueError(interpret_error % str(a)) + + return functions, intervals + + @staticmethod + def _extract_options(args, kwargs): + newkwargs, newargs = {}, [] + for a in args: + if isinstance(a, str): + newkwargs = dict(newkwargs, **parse_option_string(a)) + else: + newargs.append(a) + newkwargs = dict(newkwargs, **kwargs) + return newargs, newkwargs + + +def var_count_error(is_independent, is_plotting): + """ + Used to format an error message which differs + slightly in 4 places. + """ + if is_plotting: + v = "Plotting" + else: + v = "Registering plot modes" + if is_independent: + n, s = PlotMode._i_var_max, "independent" + else: + n, s = PlotMode._d_var_max, "dependent" + return ("%s with more than %i %s variables " + "is not supported.") % (v, n, s) diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_modes.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_modes.py new file mode 100644 index 0000000000000000000000000000000000000000..e78e0b4ce291b071f684fa3ffc02f456dffe0023 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_modes.py @@ -0,0 +1,209 @@ +from sympy.utilities.lambdify import lambdify +from sympy.core.numbers import pi +from sympy.functions import sin, cos +from sympy.plotting.pygletplot.plot_curve import PlotCurve +from sympy.plotting.pygletplot.plot_surface import PlotSurface + +from math import sin as p_sin +from math import cos as p_cos + + +def float_vec3(f): + def inner(*args): + v = f(*args) + return float(v[0]), float(v[1]), float(v[2]) + return inner + + +class Cartesian2D(PlotCurve): + i_vars, d_vars = 'x', 'y' + intervals = [[-5, 5, 100]] + aliases = ['cartesian'] + is_default = True + + def _get_sympy_evaluator(self): + fy = self.d_vars[0] + x = self.t_interval.v + + @float_vec3 + def e(_x): + return (_x, fy.subs(x, _x), 0.0) + return e + + def _get_lambda_evaluator(self): + fy = self.d_vars[0] + x = self.t_interval.v + return lambdify([x], [x, fy, 0.0]) + + +class Cartesian3D(PlotSurface): + i_vars, d_vars = 'xy', 'z' + intervals = [[-1, 1, 40], [-1, 1, 40]] + aliases = ['cartesian', 'monge'] + is_default = True + + def _get_sympy_evaluator(self): + fz = self.d_vars[0] + x = self.u_interval.v + y = self.v_interval.v + + @float_vec3 + def e(_x, _y): + return (_x, _y, fz.subs(x, _x).subs(y, _y)) + return e + + def _get_lambda_evaluator(self): + fz = self.d_vars[0] + x = self.u_interval.v + y = self.v_interval.v + return lambdify([x, y], [x, y, fz]) + + +class ParametricCurve2D(PlotCurve): + i_vars, d_vars = 't', 'xy' + intervals = [[0, 2*pi, 100]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy = self.d_vars + t = self.t_interval.v + + @float_vec3 + def e(_t): + return (fx.subs(t, _t), fy.subs(t, _t), 0.0) + return e + + def _get_lambda_evaluator(self): + fx, fy = self.d_vars + t = self.t_interval.v + return lambdify([t], [fx, fy, 0.0]) + + +class ParametricCurve3D(PlotCurve): + i_vars, d_vars = 't', 'xyz' + intervals = [[0, 2*pi, 100]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy, fz = self.d_vars + t = self.t_interval.v + + @float_vec3 + def e(_t): + return (fx.subs(t, _t), fy.subs(t, _t), fz.subs(t, _t)) + return e + + def _get_lambda_evaluator(self): + fx, fy, fz = self.d_vars + t = self.t_interval.v + return lambdify([t], [fx, fy, fz]) + + +class ParametricSurface(PlotSurface): + i_vars, d_vars = 'uv', 'xyz' + intervals = [[-1, 1, 40], [-1, 1, 40]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy, fz = self.d_vars + u = self.u_interval.v + v = self.v_interval.v + + @float_vec3 + def e(_u, _v): + return (fx.subs(u, _u).subs(v, _v), + fy.subs(u, _u).subs(v, _v), + fz.subs(u, _u).subs(v, _v)) + return e + + def _get_lambda_evaluator(self): + fx, fy, fz = self.d_vars + u = self.u_interval.v + v = self.v_interval.v + return lambdify([u, v], [fx, fy, fz]) + + +class Polar(PlotCurve): + i_vars, d_vars = 't', 'r' + intervals = [[0, 2*pi, 100]] + aliases = ['polar'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.t_interval.v + + def e(_t): + _r = float(fr.subs(t, _t)) + return (_r*p_cos(_t), _r*p_sin(_t), 0.0) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.t_interval.v + fx, fy = fr*cos(t), fr*sin(t) + return lambdify([t], [fx, fy, 0.0]) + + +class Cylindrical(PlotSurface): + i_vars, d_vars = 'th', 'r' + intervals = [[0, 2*pi, 40], [-1, 1, 20]] + aliases = ['cylindrical', 'polar'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + h = self.v_interval.v + + def e(_t, _h): + _r = float(fr.subs(t, _t).subs(h, _h)) + return (_r*p_cos(_t), _r*p_sin(_t), _h) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + h = self.v_interval.v + fx, fy = fr*cos(t), fr*sin(t) + return lambdify([t, h], [fx, fy, h]) + + +class Spherical(PlotSurface): + i_vars, d_vars = 'tp', 'r' + intervals = [[0, 2*pi, 40], [0, pi, 20]] + aliases = ['spherical'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + p = self.v_interval.v + + def e(_t, _p): + _r = float(fr.subs(t, _t).subs(p, _p)) + return (_r*p_cos(_t)*p_sin(_p), + _r*p_sin(_t)*p_sin(_p), + _r*p_cos(_p)) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + p = self.v_interval.v + fx = fr * cos(t) * sin(p) + fy = fr * sin(t) * sin(p) + fz = fr * cos(p) + return lambdify([t, p], [fx, fy, fz]) + +Cartesian2D._register() +Cartesian3D._register() +ParametricCurve2D._register() +ParametricCurve3D._register() +ParametricSurface._register() +Polar._register() +Cylindrical._register() +Spherical._register() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_object.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_object.py new file mode 100644 index 0000000000000000000000000000000000000000..e51040fb8b1a52c49d849b96692f6c0dba329d75 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_object.py @@ -0,0 +1,17 @@ +class PlotObject: + """ + Base class for objects which can be displayed in + a Plot. + """ + visible = True + + def _draw(self): + if self.visible: + self.draw() + + def draw(self): + """ + OpenGL rendering code for the plot object. + Override in base class. + """ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_surface.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_surface.py new file mode 100644 index 0000000000000000000000000000000000000000..ed421eebb441d193f4d9b763f56e146c11e5a42c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/pygletplot/plot_surface.py @@ -0,0 +1,102 @@ +import pyglet.gl as pgl + +from sympy.core import S +from sympy.plotting.pygletplot.plot_mode_base import PlotModeBase + + +class PlotSurface(PlotModeBase): + + default_rot_preset = 'perspective' + + def _on_calculate_verts(self): + self.u_interval = self.intervals[0] + self.u_set = list(self.u_interval.frange()) + self.v_interval = self.intervals[1] + self.v_set = list(self.v_interval.frange()) + self.bounds = [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + evaluate = self._get_evaluator() + + self._calculating_verts_pos = 0.0 + self._calculating_verts_len = float( + self.u_interval.v_len*self.v_interval.v_len) + + verts = [] + b = self.bounds + for u in self.u_set: + column = [] + for v in self.v_set: + try: + _e = evaluate(u, v) # calculate vertex + except ZeroDivisionError: + _e = None + if _e is not None: # update bounding box + for axis in range(3): + b[axis][0] = min([b[axis][0], _e[axis]]) + b[axis][1] = max([b[axis][1], _e[axis]]) + column.append(_e) + self._calculating_verts_pos += 1.0 + + verts.append(column) + for axis in range(3): + b[axis][2] = b[axis][1] - b[axis][0] + if b[axis][2] == 0.0: + b[axis][2] = 1.0 + + self.verts = verts + self.push_wireframe(self.draw_verts(False, False)) + self.push_solid(self.draw_verts(False, True)) + + def _on_calculate_cverts(self): + if not self.verts or not self.color: + return + + def set_work_len(n): + self._calculating_cverts_len = float(n) + + def inc_work_pos(): + self._calculating_cverts_pos += 1.0 + set_work_len(1) + self._calculating_cverts_pos = 0 + self.cverts = self.color.apply_to_surface(self.verts, + self.u_set, + self.v_set, + set_len=set_work_len, + inc_pos=inc_work_pos) + self.push_solid(self.draw_verts(True, True)) + + def calculate_one_cvert(self, u, v): + vert = self.verts[u][v] + return self.color(vert[0], vert[1], vert[2], + self.u_set[u], self.v_set[v]) + + def draw_verts(self, use_cverts, use_solid_color): + def f(): + for u in range(1, len(self.u_set)): + pgl.glBegin(pgl.GL_QUAD_STRIP) + for v in range(len(self.v_set)): + pa = self.verts[u - 1][v] + pb = self.verts[u][v] + if pa is None or pb is None: + pgl.glEnd() + pgl.glBegin(pgl.GL_QUAD_STRIP) + continue + if use_cverts: + ca = self.cverts[u - 1][v] + cb = self.cverts[u][v] + if ca is None: + ca = (0, 0, 0) + if cb is None: + cb = (0, 0, 0) + else: + if use_solid_color: + ca = cb = self.default_solid_color + else: + ca = cb = self.default_wireframe_color + pgl.glColor3f(*ca) + pgl.glVertex3f(*pa) + pgl.glColor3f(*cb) + pgl.glVertex3f(*pb) + pgl.glEnd() + return f diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png new file mode 100644 index 0000000000000000000000000000000000000000..07cac5b54f8a39774c151fc70a00552ba83fe5fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:115d0b9b81ed40f93fe9e216b4f6384cf71093e3bbb64a5d648b8b9858c645a0 +size 6864 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png new file mode 100644 index 0000000000000000000000000000000000000000..51c241e9825c28047cdd31fd64020ecb956a19e4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dceffdfe73d6d78f453142c4713e51a88dbe9361f79c710b6df2400edd9c3bc9 +size 7939 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png new file mode 100644 index 0000000000000000000000000000000000000000..cafdc56f650a8c4d7af38fdfd8206891aa9d6cc2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92e71558103d03df0ea5c47876277968b5d4ca8ab8cf43b80b73cce9d962052c +size 10002 diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/plotting/textplot.py b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/textplot.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd2bb42cd9ce2bc25c4d0df395f30e1740afdaa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sympy/plotting/textplot.py @@ -0,0 +1,162 @@ +from sympy.core.numbers import Float +from sympy.core.symbol import Dummy +from sympy.utilities.lambdify import lambdify + +import math + + +def is_valid(x): + """Check if a floating point number is valid""" + if x is None: + return False + if isinstance(x, complex): + return False + return not math.isinf(x) and not math.isnan(x) + + +def rescale(y, W, H, mi, ma): + """Rescale the given array `y` to fit into the integer values + between `0` and `H-1` for the values between ``mi`` and ``ma``. + """ + y_new = [] + + norm = ma - mi + offset = (ma + mi) / 2 + + for x in range(W): + if is_valid(y[x]): + normalized = (y[x] - offset) / norm + if not is_valid(normalized): + y_new.append(None) + else: + rescaled = Float((normalized*H + H/2) * (H-1)/H).round() + rescaled = int(rescaled) + y_new.append(rescaled) + else: + y_new.append(None) + return y_new + + +def linspace(start, stop, num): + return [start + (stop - start) * x / (num-1) for x in range(num)] + + +def textplot_str(expr, a, b, W=55, H=21): + """Generator for the lines of the plot""" + free = expr.free_symbols + if len(free) > 1: + raise ValueError( + "The expression must have a single variable. (Got {})" + .format(free)) + x = free.pop() if free else Dummy() + f = lambdify([x], expr) + a = float(a) + b = float(b) + + # Calculate function values + x = linspace(a, b, W) + y = [] + for val in x: + try: + y.append(f(val)) + # Not sure what exceptions to catch here or why... + except (ValueError, TypeError, ZeroDivisionError): + y.append(None) + + # Normalize height to screen space + y_valid = list(filter(is_valid, y)) + if y_valid: + ma = max(y_valid) + mi = min(y_valid) + if ma == mi: + if ma: + mi, ma = sorted([0, 2*ma]) + else: + mi, ma = -1, 1 + else: + mi, ma = -1, 1 + y_range = ma - mi + precision = math.floor(math.log(y_range, 10)) - 1 + precision *= -1 + mi = round(mi, precision) + ma = round(ma, precision) + y = rescale(y, W, H, mi, ma) + + y_bins = linspace(mi, ma, H) + + # Draw plot + margin = 7 + for h in range(H - 1, -1, -1): + s = [' '] * W + for i in range(W): + if y[i] == h: + if (i == 0 or y[i - 1] == h - 1) and (i == W - 1 or y[i + 1] == h + 1): + s[i] = '/' + elif (i == 0 or y[i - 1] == h + 1) and (i == W - 1 or y[i + 1] == h - 1): + s[i] = '\\' + else: + s[i] = '.' + + if h == 0: + for i in range(W): + s[i] = '_' + + # Print y values + if h in (0, H//2, H - 1): + prefix = ("%g" % y_bins[h]).rjust(margin)[:margin] + else: + prefix = " "*margin + s = "".join(s) + if h == H//2: + s = s.replace(" ", "-") + yield prefix + " |" + s + + # Print x values + bottom = " " * (margin + 2) + bottom += ("%g" % x[0]).ljust(W//2) + if W % 2 == 1: + bottom += ("%g" % x[W//2]).ljust(W//2) + else: + bottom += ("%g" % x[W//2]).ljust(W//2-1) + bottom += "%g" % x[-1] + yield bottom + + +def textplot(expr, a, b, W=55, H=21): + r""" + Print a crude ASCII art plot of the SymPy expression 'expr' (which + should contain a single symbol, e.g. x or something else) over the + interval [a, b]. + + Examples + ======== + + >>> from sympy import Symbol, sin + >>> from sympy.plotting import textplot + >>> t = Symbol('t') + >>> textplot(sin(t)*t, 0, 15) + 14 | ... + | . + | . + | . + | . + | ... + | / . . + | / + | / . + | . . . + 1.5 |----.......-------------------------------------------- + |.... \ . . + | \ / . + | .. / . + | \ / . + | .... + | . + | . . + | + | . . + -11 |_______________________________________________________ + 0 7.5 15 + """ + for line in textplot_str(expr, a, b, W, H): + print(line) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so b/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so new file mode 100644 index 0000000000000000000000000000000000000000..99ce8333bd1993edd96c7e44ad512c8f9b7f9416 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:231c30dc7c168ea6f07a1075a9678ef91e48fd7133441d29c1fda56d3c3e3ad3 +size 482044169