Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__init__.py +6 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/approximation.py +246 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/calculus.py +6 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/differentiation.py +647 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py +2115 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py +973 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/odes.py +288 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/optimization.py +1102 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/polynomials.py +213 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/calculus/quadrature.py +1115 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__init__.py +14 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/elliptic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/factorials.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/rszeta.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/signals.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/zeta.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/bessel.py +1108 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/elliptic.py +1431 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/expintegrals.py +425 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/factorials.py +187 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/functions.py +645 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/hypergeometric.py +1413 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/orthogonal.py +493 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/qfunctions.py +280 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/rszeta.py +1403 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/signals.py +32 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/theta.py +1049 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/zeta.py +1154 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/functions/zetazeros.py +1018 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/libmp/__init__.py +77 -0
- env-llmeval/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import calculus
|
2 |
+
# XXX: hack to set methods
|
3 |
+
from . import approximation
|
4 |
+
from . import differentiation
|
5 |
+
from . import extrapolation
|
6 |
+
from . import polynomials
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (357 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc
ADDED
Binary file (9.13 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc
ADDED
Binary file (481 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc
ADDED
Binary file (20.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc
ADDED
Binary file (69.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc
ADDED
Binary file (30.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc
ADDED
Binary file (29.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc
ADDED
Binary file (7.74 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc
ADDED
Binary file (39.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/approximation.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
from .calculus import defun
|
3 |
+
|
4 |
+
#----------------------------------------------------------------------------#
|
5 |
+
# Approximation methods #
|
6 |
+
#----------------------------------------------------------------------------#
|
7 |
+
|
8 |
+
# The Chebyshev approximation formula is given at:
|
9 |
+
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
|
10 |
+
|
11 |
+
# The only major changes in the following code is that we return the
|
12 |
+
# expanded polynomial coefficients instead of Chebyshev coefficients,
|
13 |
+
# and that we automatically transform [a,b] -> [-1,1] and back
|
14 |
+
# for convenience.
|
15 |
+
|
16 |
+
# Coefficient in Chebyshev approximation
|
17 |
+
def chebcoeff(ctx,f,a,b,j,N):
|
18 |
+
s = ctx.mpf(0)
|
19 |
+
h = ctx.mpf(0.5)
|
20 |
+
for k in range(1, N+1):
|
21 |
+
t = ctx.cospi((k-h)/N)
|
22 |
+
s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
|
23 |
+
return 2*s/N
|
24 |
+
|
25 |
+
# Generate Chebyshev polynomials T_n(ax+b) in expanded form
|
26 |
+
def chebT(ctx, a=1, b=0):
|
27 |
+
Tb = [1]
|
28 |
+
yield Tb
|
29 |
+
Ta = [b, a]
|
30 |
+
while 1:
|
31 |
+
yield Ta
|
32 |
+
# Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
|
33 |
+
Tmp = [0] + [2*a*t for t in Ta]
|
34 |
+
for i, c in enumerate(Ta): Tmp[i] += 2*b*c
|
35 |
+
for i, c in enumerate(Tb): Tmp[i] -= c
|
36 |
+
Ta, Tb = Tmp, Ta
|
37 |
+
|
38 |
+
@defun
|
39 |
+
def chebyfit(ctx, f, interval, N, error=False):
|
40 |
+
r"""
|
41 |
+
Computes a polynomial of degree `N-1` that approximates the
|
42 |
+
given function `f` on the interval `[a, b]`. With ``error=True``,
|
43 |
+
:func:`~mpmath.chebyfit` also returns an accurate estimate of the
|
44 |
+
maximum absolute error; that is, the maximum value of
|
45 |
+
`|f(x) - P(x)|` for `x \in [a, b]`.
|
46 |
+
|
47 |
+
:func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
|
48 |
+
which gives a nearly optimal solution: that is, the maximum
|
49 |
+
error of the approximating polynomial is very close to
|
50 |
+
the smallest possible for any polynomial of the same degree.
|
51 |
+
|
52 |
+
Chebyshev approximation is very useful if one needs repeated
|
53 |
+
evaluation of an expensive function, such as function defined
|
54 |
+
implicitly by an integral or a differential equation. (For
|
55 |
+
example, it could be used to turn a slow mpmath function
|
56 |
+
into a fast machine-precision version of the same.)
|
57 |
+
|
58 |
+
**Examples**
|
59 |
+
|
60 |
+
Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
|
61 |
+
of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
|
62 |
+
|
63 |
+
>>> from mpmath import *
|
64 |
+
>>> mp.dps = 15; mp.pretty = True
|
65 |
+
>>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
|
66 |
+
>>> nprint(poly)
|
67 |
+
[0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
|
68 |
+
>>> nprint(err, 12)
|
69 |
+
1.61351758081e-5
|
70 |
+
|
71 |
+
The polynomial can be evaluated using ``polyval``::
|
72 |
+
|
73 |
+
>>> nprint(polyval(poly, 1.6), 12)
|
74 |
+
-0.0291858904138
|
75 |
+
>>> nprint(cos(1.6), 12)
|
76 |
+
-0.0291995223013
|
77 |
+
|
78 |
+
Sampling the true error at 1000 points shows that the error
|
79 |
+
estimate generated by ``chebyfit`` is remarkably good::
|
80 |
+
|
81 |
+
>>> error = lambda x: abs(cos(x) - polyval(poly, x))
|
82 |
+
>>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
|
83 |
+
1.61349954245e-5
|
84 |
+
|
85 |
+
**Choice of degree**
|
86 |
+
|
87 |
+
The degree `N` can be set arbitrarily high, to obtain an
|
88 |
+
arbitrarily good approximation. As a rule of thumb, an
|
89 |
+
`N`-term Chebyshev approximation is good to `N/(b-a)` decimal
|
90 |
+
places on a unit interval (although this depends on how
|
91 |
+
well-behaved `f` is). The cost grows accordingly: ``chebyfit``
|
92 |
+
evaluates the function `(N^2)/2` times to compute the
|
93 |
+
coefficients and an additional `N` times to estimate the error.
|
94 |
+
|
95 |
+
**Possible issues**
|
96 |
+
|
97 |
+
One should be careful to use a sufficiently high working
|
98 |
+
precision both when calling ``chebyfit`` and when evaluating
|
99 |
+
the resulting polynomial, as the polynomial is sometimes
|
100 |
+
ill-conditioned. It is for example difficult to reach
|
101 |
+
15-digit accuracy when evaluating the polynomial using
|
102 |
+
machine precision floats, no matter the theoretical
|
103 |
+
accuracy of the polynomial. (The option to return the
|
104 |
+
coefficients in Chebyshev form should be made available
|
105 |
+
in the future.)
|
106 |
+
|
107 |
+
It is important to note the Chebyshev approximation works
|
108 |
+
poorly if `f` is not smooth. A function containing singularities,
|
109 |
+
rapid oscillation, etc can be approximated more effectively by
|
110 |
+
multiplying it by a weight function that cancels out the
|
111 |
+
nonsmooth features, or by dividing the interval into several
|
112 |
+
segments.
|
113 |
+
"""
|
114 |
+
a, b = ctx._as_points(interval)
|
115 |
+
orig = ctx.prec
|
116 |
+
try:
|
117 |
+
ctx.prec = orig + int(N**0.5) + 20
|
118 |
+
c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
|
119 |
+
d = [ctx.zero] * N
|
120 |
+
d[0] = -c[0]/2
|
121 |
+
h = ctx.mpf(0.5)
|
122 |
+
T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
|
123 |
+
for (k, Tk) in zip(range(N), T):
|
124 |
+
for i in range(len(Tk)):
|
125 |
+
d[i] += c[k]*Tk[i]
|
126 |
+
d = d[::-1]
|
127 |
+
# Estimate maximum error
|
128 |
+
err = ctx.zero
|
129 |
+
for k in range(N):
|
130 |
+
x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
|
131 |
+
err = max(err, abs(f(x) - ctx.polyval(d, x)))
|
132 |
+
finally:
|
133 |
+
ctx.prec = orig
|
134 |
+
if error:
|
135 |
+
return d, +err
|
136 |
+
else:
|
137 |
+
return d
|
138 |
+
|
139 |
+
@defun
|
140 |
+
def fourier(ctx, f, interval, N):
|
141 |
+
r"""
|
142 |
+
Computes the Fourier series of degree `N` of the given function
|
143 |
+
on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
|
144 |
+
two lists `(c, s)` of coefficients (the cosine series and sine
|
145 |
+
series, respectively), such that
|
146 |
+
|
147 |
+
.. math ::
|
148 |
+
|
149 |
+
f(x) \sim \sum_{k=0}^N
|
150 |
+
c_k \cos(k m x) + s_k \sin(k m x)
|
151 |
+
|
152 |
+
where `m = 2 \pi / (b-a)`.
|
153 |
+
|
154 |
+
Note that many texts define the first coefficient as `2 c_0` instead
|
155 |
+
of `c_0`. The easiest way to evaluate the computed series correctly
|
156 |
+
is to pass it to :func:`~mpmath.fourierval`.
|
157 |
+
|
158 |
+
**Examples**
|
159 |
+
|
160 |
+
The function `f(x) = x` has a simple Fourier series on the standard
|
161 |
+
interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
|
162 |
+
the function has odd symmetry), and the sine coefficients are
|
163 |
+
rational numbers::
|
164 |
+
|
165 |
+
>>> from mpmath import *
|
166 |
+
>>> mp.dps = 15; mp.pretty = True
|
167 |
+
>>> c, s = fourier(lambda x: x, [-pi, pi], 5)
|
168 |
+
>>> nprint(c)
|
169 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
170 |
+
>>> nprint(s)
|
171 |
+
[0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
|
172 |
+
|
173 |
+
This computes a Fourier series of a nonsymmetric function on
|
174 |
+
a nonstandard interval::
|
175 |
+
|
176 |
+
>>> I = [-1, 1.5]
|
177 |
+
>>> f = lambda x: x**2 - 4*x + 1
|
178 |
+
>>> cs = fourier(f, I, 4)
|
179 |
+
>>> nprint(cs[0])
|
180 |
+
[0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
|
181 |
+
>>> nprint(cs[1])
|
182 |
+
[0.0, -2.6255, 0.580905, 0.219974, -0.540057]
|
183 |
+
|
184 |
+
It is instructive to plot a function along with its truncated
|
185 |
+
Fourier series::
|
186 |
+
|
187 |
+
>>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
|
188 |
+
|
189 |
+
Fourier series generally converge slowly (and may not converge
|
190 |
+
pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
|
191 |
+
series gives an `L^2` error corresponding to 2-digit accuracy::
|
192 |
+
|
193 |
+
>>> I = [-1, 1]
|
194 |
+
>>> cs = fourier(cosh, I, 9)
|
195 |
+
>>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
|
196 |
+
>>> nprint(sqrt(quad(g, I)))
|
197 |
+
0.00467963
|
198 |
+
|
199 |
+
:func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
|
200 |
+
the accuracy (and speed) can be improved by including all singular
|
201 |
+
points in the interval specification::
|
202 |
+
|
203 |
+
>>> nprint(fourier(abs, [-1, 1], 0), 10)
|
204 |
+
([0.5000441648], [0.0])
|
205 |
+
>>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
|
206 |
+
([0.5], [0.0])
|
207 |
+
|
208 |
+
"""
|
209 |
+
interval = ctx._as_points(interval)
|
210 |
+
a = interval[0]
|
211 |
+
b = interval[-1]
|
212 |
+
L = b-a
|
213 |
+
cos_series = []
|
214 |
+
sin_series = []
|
215 |
+
cutoff = ctx.eps*10
|
216 |
+
for n in xrange(N+1):
|
217 |
+
m = 2*n*ctx.pi/L
|
218 |
+
an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
|
219 |
+
bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
|
220 |
+
if n == 0:
|
221 |
+
an /= 2
|
222 |
+
if abs(an) < cutoff: an = ctx.zero
|
223 |
+
if abs(bn) < cutoff: bn = ctx.zero
|
224 |
+
cos_series.append(an)
|
225 |
+
sin_series.append(bn)
|
226 |
+
return cos_series, sin_series
|
227 |
+
|
228 |
+
@defun
|
229 |
+
def fourierval(ctx, series, interval, x):
|
230 |
+
"""
|
231 |
+
Evaluates a Fourier series (in the format computed by
|
232 |
+
by :func:`~mpmath.fourier` for the given interval) at the point `x`.
|
233 |
+
|
234 |
+
The series should be a pair `(c, s)` where `c` is the
|
235 |
+
cosine series and `s` is the sine series. The two lists
|
236 |
+
need not have the same length.
|
237 |
+
"""
|
238 |
+
cs, ss = series
|
239 |
+
ab = ctx._as_points(interval)
|
240 |
+
a = interval[0]
|
241 |
+
b = interval[-1]
|
242 |
+
m = 2*ctx.pi/(ab[-1]-ab[0])
|
243 |
+
s = ctx.zero
|
244 |
+
s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
|
245 |
+
s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
|
246 |
+
return s
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/calculus.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class CalculusMethods(object):
|
2 |
+
pass
|
3 |
+
|
4 |
+
def defun(f):
|
5 |
+
setattr(CalculusMethods, f.__name__, f)
|
6 |
+
return f
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/differentiation.py
ADDED
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
from .calculus import defun
|
3 |
+
|
4 |
+
try:
|
5 |
+
iteritems = dict.iteritems
|
6 |
+
except AttributeError:
|
7 |
+
iteritems = dict.items
|
8 |
+
|
9 |
+
#----------------------------------------------------------------------------#
|
10 |
+
# Differentiation #
|
11 |
+
#----------------------------------------------------------------------------#
|
12 |
+
|
13 |
+
@defun
|
14 |
+
def difference(ctx, s, n):
|
15 |
+
r"""
|
16 |
+
Given a sequence `(s_k)` containing at least `n+1` items, returns the
|
17 |
+
`n`-th forward difference,
|
18 |
+
|
19 |
+
.. math ::
|
20 |
+
|
21 |
+
\Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
|
22 |
+
"""
|
23 |
+
n = int(n)
|
24 |
+
d = ctx.zero
|
25 |
+
b = (-1) ** (n & 1)
|
26 |
+
for k in xrange(n+1):
|
27 |
+
d += b * s[k]
|
28 |
+
b = (b * (k-n)) // (k+1)
|
29 |
+
return d
|
30 |
+
|
31 |
+
def hsteps(ctx, f, x, n, prec, **options):
|
32 |
+
singular = options.get('singular')
|
33 |
+
addprec = options.get('addprec', 10)
|
34 |
+
direction = options.get('direction', 0)
|
35 |
+
workprec = (prec+2*addprec) * (n+1)
|
36 |
+
orig = ctx.prec
|
37 |
+
try:
|
38 |
+
ctx.prec = workprec
|
39 |
+
h = options.get('h')
|
40 |
+
if h is None:
|
41 |
+
if options.get('relative'):
|
42 |
+
hextramag = int(ctx.mag(x))
|
43 |
+
else:
|
44 |
+
hextramag = 0
|
45 |
+
h = ctx.ldexp(1, -prec-addprec-hextramag)
|
46 |
+
else:
|
47 |
+
h = ctx.convert(h)
|
48 |
+
# Directed: steps x, x+h, ... x+n*h
|
49 |
+
direction = options.get('direction', 0)
|
50 |
+
if direction:
|
51 |
+
h *= ctx.sign(direction)
|
52 |
+
steps = xrange(n+1)
|
53 |
+
norm = h
|
54 |
+
# Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
|
55 |
+
else:
|
56 |
+
steps = xrange(-n, n+1, 2)
|
57 |
+
norm = (2*h)
|
58 |
+
# Perturb
|
59 |
+
if singular:
|
60 |
+
x += 0.5*h
|
61 |
+
values = [f(x+k*h) for k in steps]
|
62 |
+
return values, norm, workprec
|
63 |
+
finally:
|
64 |
+
ctx.prec = orig
|
65 |
+
|
66 |
+
|
67 |
+
@defun
|
68 |
+
def diff(ctx, f, x, n=1, **options):
|
69 |
+
r"""
|
70 |
+
Numerically computes the derivative of `f`, `f'(x)`, or generally for
|
71 |
+
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
|
72 |
+
A few basic examples are::
|
73 |
+
|
74 |
+
>>> from mpmath import *
|
75 |
+
>>> mp.dps = 15; mp.pretty = True
|
76 |
+
>>> diff(lambda x: x**2 + x, 1.0)
|
77 |
+
3.0
|
78 |
+
>>> diff(lambda x: x**2 + x, 1.0, 2)
|
79 |
+
2.0
|
80 |
+
>>> diff(lambda x: x**2 + x, 1.0, 3)
|
81 |
+
0.0
|
82 |
+
>>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
|
83 |
+
[20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
|
84 |
+
|
85 |
+
Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
|
86 |
+
and order `(n_1, \ldots, n_k)`, the partial derivative
|
87 |
+
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
|
88 |
+
|
89 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
|
90 |
+
2.75
|
91 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
|
92 |
+
3.0
|
93 |
+
|
94 |
+
**Options**
|
95 |
+
|
96 |
+
The following optional keyword arguments are recognized:
|
97 |
+
|
98 |
+
``method``
|
99 |
+
Supported methods are ``'step'`` or ``'quad'``: derivatives may be
|
100 |
+
computed using either a finite difference with a small step
|
101 |
+
size `h` (default), or numerical quadrature.
|
102 |
+
``direction``
|
103 |
+
Direction of finite difference: can be -1 for a left
|
104 |
+
difference, 0 for a central difference (default), or +1
|
105 |
+
for a right difference; more generally can be any complex number.
|
106 |
+
``addprec``
|
107 |
+
Extra precision for `h` used to account for the function's
|
108 |
+
sensitivity to perturbations (default = 10).
|
109 |
+
``relative``
|
110 |
+
Choose `h` relative to the magnitude of `x`, rather than an
|
111 |
+
absolute value; useful for large or tiny `x` (default = False).
|
112 |
+
``h``
|
113 |
+
As an alternative to ``addprec`` and ``relative``, manually
|
114 |
+
select the step size `h`.
|
115 |
+
``singular``
|
116 |
+
If True, evaluation exactly at the point `x` is avoided; this is
|
117 |
+
useful for differentiating functions with removable singularities.
|
118 |
+
Default = False.
|
119 |
+
``radius``
|
120 |
+
Radius of integration contour (with ``method = 'quad'``).
|
121 |
+
Default = 0.25. A larger radius typically is faster and more
|
122 |
+
accurate, but it must be chosen so that `f` has no
|
123 |
+
singularities within the radius from the evaluation point.
|
124 |
+
|
125 |
+
A finite difference requires `n+1` function evaluations and must be
|
126 |
+
performed at `(n+1)` times the target precision. Accordingly, `f` must
|
127 |
+
support fast evaluation at high precision.
|
128 |
+
|
129 |
+
With integration, a larger number of function evaluations is
|
130 |
+
required, but not much extra precision is required. For high order
|
131 |
+
derivatives, this method may thus be faster if f is very expensive to
|
132 |
+
evaluate at high precision.
|
133 |
+
|
134 |
+
**Further examples**
|
135 |
+
|
136 |
+
The direction option is useful for computing left- or right-sided
|
137 |
+
derivatives of nonsmooth functions::
|
138 |
+
|
139 |
+
>>> diff(abs, 0, direction=0)
|
140 |
+
0.0
|
141 |
+
>>> diff(abs, 0, direction=1)
|
142 |
+
1.0
|
143 |
+
>>> diff(abs, 0, direction=-1)
|
144 |
+
-1.0
|
145 |
+
|
146 |
+
More generally, if the direction is nonzero, a right difference
|
147 |
+
is computed where the step size is multiplied by sign(direction).
|
148 |
+
For example, with direction=+j, the derivative from the positive
|
149 |
+
imaginary direction will be computed::
|
150 |
+
|
151 |
+
>>> diff(abs, 0, direction=j)
|
152 |
+
(0.0 - 1.0j)
|
153 |
+
|
154 |
+
With integration, the result may have a small imaginary part
|
155 |
+
even even if the result is purely real::
|
156 |
+
|
157 |
+
>>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
|
158 |
+
(0.5 - 4.59...e-26j)
|
159 |
+
>>> chop(_)
|
160 |
+
0.5
|
161 |
+
|
162 |
+
Adding precision to obtain an accurate value::
|
163 |
+
|
164 |
+
>>> diff(cos, 1e-30)
|
165 |
+
0.0
|
166 |
+
>>> diff(cos, 1e-30, h=0.0001)
|
167 |
+
-9.99999998328279e-31
|
168 |
+
>>> diff(cos, 1e-30, addprec=100)
|
169 |
+
-1.0e-30
|
170 |
+
|
171 |
+
"""
|
172 |
+
partial = False
|
173 |
+
try:
|
174 |
+
orders = list(n)
|
175 |
+
x = list(x)
|
176 |
+
partial = True
|
177 |
+
except TypeError:
|
178 |
+
pass
|
179 |
+
if partial:
|
180 |
+
x = [ctx.convert(_) for _ in x]
|
181 |
+
return _partial_diff(ctx, f, x, orders, options)
|
182 |
+
method = options.get('method', 'step')
|
183 |
+
if n == 0 and method != 'quad' and not options.get('singular'):
|
184 |
+
return f(ctx.convert(x))
|
185 |
+
prec = ctx.prec
|
186 |
+
try:
|
187 |
+
if method == 'step':
|
188 |
+
values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
|
189 |
+
ctx.prec = workprec
|
190 |
+
v = ctx.difference(values, n) / norm**n
|
191 |
+
elif method == 'quad':
|
192 |
+
ctx.prec += 10
|
193 |
+
radius = ctx.convert(options.get('radius', 0.25))
|
194 |
+
def g(t):
|
195 |
+
rei = radius*ctx.expj(t)
|
196 |
+
z = x + rei
|
197 |
+
return f(z) / rei**n
|
198 |
+
d = ctx.quadts(g, [0, 2*ctx.pi])
|
199 |
+
v = d * ctx.factorial(n) / (2*ctx.pi)
|
200 |
+
else:
|
201 |
+
raise ValueError("unknown method: %r" % method)
|
202 |
+
finally:
|
203 |
+
ctx.prec = prec
|
204 |
+
return +v
|
205 |
+
|
206 |
+
def _partial_diff(ctx, f, xs, orders, options):
|
207 |
+
if not orders:
|
208 |
+
return f()
|
209 |
+
if not sum(orders):
|
210 |
+
return f(*xs)
|
211 |
+
i = 0
|
212 |
+
for i in range(len(orders)):
|
213 |
+
if orders[i]:
|
214 |
+
break
|
215 |
+
order = orders[i]
|
216 |
+
def fdiff_inner(*f_args):
|
217 |
+
def inner(t):
|
218 |
+
return f(*(f_args[:i] + (t,) + f_args[i+1:]))
|
219 |
+
return ctx.diff(inner, f_args[i], order, **options)
|
220 |
+
orders[i] = 0
|
221 |
+
return _partial_diff(ctx, fdiff_inner, xs, orders, options)
|
222 |
+
|
223 |
+
@defun
|
224 |
+
def diffs(ctx, f, x, n=None, **options):
|
225 |
+
r"""
|
226 |
+
Returns a generator that yields the sequence of derivatives
|
227 |
+
|
228 |
+
.. math ::
|
229 |
+
|
230 |
+
f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
|
231 |
+
|
232 |
+
With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
|
233 |
+
function evaluations to generate the first `k` derivatives,
|
234 |
+
rather than the roughly `O(k^2)` evaluations
|
235 |
+
required if one calls :func:`~mpmath.diff` `k` separate times.
|
236 |
+
|
237 |
+
With `n < \infty`, the generator stops as soon as the
|
238 |
+
`n`-th derivative has been generated. If the exact number of
|
239 |
+
needed derivatives is known in advance, this is further
|
240 |
+
slightly more efficient.
|
241 |
+
|
242 |
+
Options are the same as for :func:`~mpmath.diff`.
|
243 |
+
|
244 |
+
**Examples**
|
245 |
+
|
246 |
+
>>> from mpmath import *
|
247 |
+
>>> mp.dps = 15
|
248 |
+
>>> nprint(list(diffs(cos, 1, 5)))
|
249 |
+
[0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
|
250 |
+
>>> for i, d in zip(range(6), diffs(cos, 1)):
|
251 |
+
... print("%s %s" % (i, d))
|
252 |
+
...
|
253 |
+
0 0.54030230586814
|
254 |
+
1 -0.841470984807897
|
255 |
+
2 -0.54030230586814
|
256 |
+
3 0.841470984807897
|
257 |
+
4 0.54030230586814
|
258 |
+
5 -0.841470984807897
|
259 |
+
|
260 |
+
"""
|
261 |
+
if n is None:
|
262 |
+
n = ctx.inf
|
263 |
+
else:
|
264 |
+
n = int(n)
|
265 |
+
if options.get('method', 'step') != 'step':
|
266 |
+
k = 0
|
267 |
+
while k < n + 1:
|
268 |
+
yield ctx.diff(f, x, k, **options)
|
269 |
+
k += 1
|
270 |
+
return
|
271 |
+
singular = options.get('singular')
|
272 |
+
if singular:
|
273 |
+
yield ctx.diff(f, x, 0, singular=True)
|
274 |
+
else:
|
275 |
+
yield f(ctx.convert(x))
|
276 |
+
if n < 1:
|
277 |
+
return
|
278 |
+
if n == ctx.inf:
|
279 |
+
A, B = 1, 2
|
280 |
+
else:
|
281 |
+
A, B = 1, n+1
|
282 |
+
while 1:
|
283 |
+
callprec = ctx.prec
|
284 |
+
y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
|
285 |
+
for k in xrange(A, B):
|
286 |
+
try:
|
287 |
+
ctx.prec = workprec
|
288 |
+
d = ctx.difference(y, k) / norm**k
|
289 |
+
finally:
|
290 |
+
ctx.prec = callprec
|
291 |
+
yield +d
|
292 |
+
if k >= n:
|
293 |
+
return
|
294 |
+
A, B = B, int(A*1.4+1)
|
295 |
+
B = min(B, n)
|
296 |
+
|
297 |
+
def iterable_to_function(gen):
|
298 |
+
gen = iter(gen)
|
299 |
+
data = []
|
300 |
+
def f(k):
|
301 |
+
for i in xrange(len(data), k+1):
|
302 |
+
data.append(next(gen))
|
303 |
+
return data[k]
|
304 |
+
return f
|
305 |
+
|
306 |
+
@defun
|
307 |
+
def diffs_prod(ctx, factors):
|
308 |
+
r"""
|
309 |
+
Given a list of `N` iterables or generators yielding
|
310 |
+
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
|
311 |
+
generate `g(x), g'(x), g''(x), \ldots` where
|
312 |
+
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
|
313 |
+
|
314 |
+
At high precision and for large orders, this is typically more efficient
|
315 |
+
than numerical differentiation if the derivatives of each `f_k(x)`
|
316 |
+
admit direct computation.
|
317 |
+
|
318 |
+
Note: This function does not increase the working precision internally,
|
319 |
+
so guard digits may have to be added externally for full accuracy.
|
320 |
+
|
321 |
+
**Examples**
|
322 |
+
|
323 |
+
>>> from mpmath import *
|
324 |
+
>>> mp.dps = 15; mp.pretty = True
|
325 |
+
>>> f = lambda x: exp(x)*cos(x)*sin(x)
|
326 |
+
>>> u = diffs(f, 1)
|
327 |
+
>>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
|
328 |
+
>>> next(u); next(v)
|
329 |
+
1.23586333600241
|
330 |
+
1.23586333600241
|
331 |
+
>>> next(u); next(v)
|
332 |
+
0.104658952245596
|
333 |
+
0.104658952245596
|
334 |
+
>>> next(u); next(v)
|
335 |
+
-5.96999877552086
|
336 |
+
-5.96999877552086
|
337 |
+
>>> next(u); next(v)
|
338 |
+
-12.4632923122697
|
339 |
+
-12.4632923122697
|
340 |
+
|
341 |
+
"""
|
342 |
+
N = len(factors)
|
343 |
+
if N == 1:
|
344 |
+
for c in factors[0]:
|
345 |
+
yield c
|
346 |
+
else:
|
347 |
+
u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
|
348 |
+
v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
|
349 |
+
n = 0
|
350 |
+
while 1:
|
351 |
+
#yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
|
352 |
+
s = u(n) * v(0)
|
353 |
+
a = 1
|
354 |
+
for k in xrange(1,n+1):
|
355 |
+
a = a * (n-k+1) // k
|
356 |
+
s += a * u(n-k) * v(k)
|
357 |
+
yield s
|
358 |
+
n += 1
|
359 |
+
|
360 |
+
def dpoly(n, _cache={}):
|
361 |
+
"""
|
362 |
+
nth differentiation polynomial for exp (Faa di Bruno's formula).
|
363 |
+
|
364 |
+
TODO: most exponents are zero, so maybe a sparse representation
|
365 |
+
would be better.
|
366 |
+
"""
|
367 |
+
if n in _cache:
|
368 |
+
return _cache[n]
|
369 |
+
if not _cache:
|
370 |
+
_cache[0] = {(0,):1}
|
371 |
+
R = dpoly(n-1)
|
372 |
+
R = dict((c+(0,),v) for (c,v) in iteritems(R))
|
373 |
+
Ra = {}
|
374 |
+
for powers, count in iteritems(R):
|
375 |
+
powers1 = (powers[0]+1,) + powers[1:]
|
376 |
+
if powers1 in Ra:
|
377 |
+
Ra[powers1] += count
|
378 |
+
else:
|
379 |
+
Ra[powers1] = count
|
380 |
+
for powers, count in iteritems(R):
|
381 |
+
if not sum(powers):
|
382 |
+
continue
|
383 |
+
for k,p in enumerate(powers):
|
384 |
+
if p:
|
385 |
+
powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
|
386 |
+
if powers2 in Ra:
|
387 |
+
Ra[powers2] += p*count
|
388 |
+
else:
|
389 |
+
Ra[powers2] = p*count
|
390 |
+
_cache[n] = Ra
|
391 |
+
return _cache[n]
|
392 |
+
|
393 |
+
@defun
|
394 |
+
def diffs_exp(ctx, fdiffs):
|
395 |
+
r"""
|
396 |
+
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
|
397 |
+
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
|
398 |
+
|
399 |
+
At high precision and for large orders, this is typically more efficient
|
400 |
+
than numerical differentiation if the derivatives of `f(x)`
|
401 |
+
admit direct computation.
|
402 |
+
|
403 |
+
Note: This function does not increase the working precision internally,
|
404 |
+
so guard digits may have to be added externally for full accuracy.
|
405 |
+
|
406 |
+
**Examples**
|
407 |
+
|
408 |
+
The derivatives of the gamma function can be computed using
|
409 |
+
logarithmic differentiation::
|
410 |
+
|
411 |
+
>>> from mpmath import *
|
412 |
+
>>> mp.dps = 15; mp.pretty = True
|
413 |
+
>>>
|
414 |
+
>>> def diffs_loggamma(x):
|
415 |
+
... yield loggamma(x)
|
416 |
+
... i = 0
|
417 |
+
... while 1:
|
418 |
+
... yield psi(i,x)
|
419 |
+
... i += 1
|
420 |
+
...
|
421 |
+
>>> u = diffs_exp(diffs_loggamma(3))
|
422 |
+
>>> v = diffs(gamma, 3)
|
423 |
+
>>> next(u); next(v)
|
424 |
+
2.0
|
425 |
+
2.0
|
426 |
+
>>> next(u); next(v)
|
427 |
+
1.84556867019693
|
428 |
+
1.84556867019693
|
429 |
+
>>> next(u); next(v)
|
430 |
+
2.49292999190269
|
431 |
+
2.49292999190269
|
432 |
+
>>> next(u); next(v)
|
433 |
+
3.44996501352367
|
434 |
+
3.44996501352367
|
435 |
+
|
436 |
+
"""
|
437 |
+
fn = iterable_to_function(fdiffs)
|
438 |
+
f0 = ctx.exp(fn(0))
|
439 |
+
yield f0
|
440 |
+
i = 1
|
441 |
+
while 1:
|
442 |
+
s = ctx.mpf(0)
|
443 |
+
for powers, c in iteritems(dpoly(i)):
|
444 |
+
s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
|
445 |
+
yield s * f0
|
446 |
+
i += 1
|
447 |
+
|
448 |
+
@defun
|
449 |
+
def differint(ctx, f, x, n=1, x0=0):
|
450 |
+
r"""
|
451 |
+
Calculates the Riemann-Liouville differintegral, or fractional
|
452 |
+
derivative, defined by
|
453 |
+
|
454 |
+
.. math ::
|
455 |
+
|
456 |
+
\,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
|
457 |
+
\int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
|
458 |
+
|
459 |
+
where `f` is a given (presumably well-behaved) function,
|
460 |
+
`x` is the evaluation point, `n` is the order, and `x_0` is
|
461 |
+
the reference point of integration (`m` is an arbitrary
|
462 |
+
parameter selected automatically).
|
463 |
+
|
464 |
+
With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
|
465 |
+
the second derivative `f''(x)`, etc. With `n = -1`, it gives
|
466 |
+
`\int_{x_0}^x f(t) dt`, with `n = -2`
|
467 |
+
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
|
468 |
+
|
469 |
+
As `n` is permitted to be any number, this operator generalizes
|
470 |
+
iterated differentiation and iterated integration to a single
|
471 |
+
operator with a continuous order parameter.
|
472 |
+
|
473 |
+
**Examples**
|
474 |
+
|
475 |
+
There is an exact formula for the fractional derivative of a
|
476 |
+
monomial `x^p`, which may be used as a reference. For example,
|
477 |
+
the following gives a half-derivative (order 0.5)::
|
478 |
+
|
479 |
+
>>> from mpmath import *
|
480 |
+
>>> mp.dps = 15; mp.pretty = True
|
481 |
+
>>> x = mpf(3); p = 2; n = 0.5
|
482 |
+
>>> differint(lambda t: t**p, x, n)
|
483 |
+
7.81764019044672
|
484 |
+
>>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
|
485 |
+
7.81764019044672
|
486 |
+
|
487 |
+
Another useful test function is the exponential function, whose
|
488 |
+
integration / differentiation formula easy generalizes
|
489 |
+
to arbitrary order. Here we first compute a third derivative,
|
490 |
+
and then a triply nested integral. (The reference point `x_0`
|
491 |
+
is set to `-\infty` to avoid nonzero endpoint terms.)::
|
492 |
+
|
493 |
+
>>> differint(lambda x: exp(pi*x), -1.5, 3)
|
494 |
+
0.278538406900792
|
495 |
+
>>> exp(pi*-1.5) * pi**3
|
496 |
+
0.278538406900792
|
497 |
+
>>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
|
498 |
+
1922.50563031149
|
499 |
+
>>> exp(pi*3.5) / pi**3
|
500 |
+
1922.50563031149
|
501 |
+
|
502 |
+
However, for noninteger `n`, the differentiation formula for the
|
503 |
+
exponential function must be modified to give the same result as the
|
504 |
+
Riemann-Liouville differintegral::
|
505 |
+
|
506 |
+
>>> x = mpf(3.5)
|
507 |
+
>>> c = pi
|
508 |
+
>>> n = 1+2*j
|
509 |
+
>>> differint(lambda x: exp(c*x), x, n)
|
510 |
+
(-123295.005390743 + 140955.117867654j)
|
511 |
+
>>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
|
512 |
+
(-123295.005390743 + 140955.117867654j)
|
513 |
+
|
514 |
+
|
515 |
+
"""
|
516 |
+
m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
|
517 |
+
r = m-n-1
|
518 |
+
g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
|
519 |
+
return ctx.diff(g, x, m) / ctx.gamma(m-n)
|
520 |
+
|
521 |
+
@defun
|
522 |
+
def diffun(ctx, f, n=1, **options):
|
523 |
+
r"""
|
524 |
+
Given a function `f`, returns a function `g(x)` that evaluates the nth
|
525 |
+
derivative `f^{(n)}(x)`::
|
526 |
+
|
527 |
+
>>> from mpmath import *
|
528 |
+
>>> mp.dps = 15; mp.pretty = True
|
529 |
+
>>> cos2 = diffun(sin)
|
530 |
+
>>> sin2 = diffun(sin, 4)
|
531 |
+
>>> cos(1.3), cos2(1.3)
|
532 |
+
(0.267498828624587, 0.267498828624587)
|
533 |
+
>>> sin(1.3), sin2(1.3)
|
534 |
+
(0.963558185417193, 0.963558185417193)
|
535 |
+
|
536 |
+
The function `f` must support arbitrary precision evaluation.
|
537 |
+
See :func:`~mpmath.diff` for additional details and supported
|
538 |
+
keyword options.
|
539 |
+
"""
|
540 |
+
if n == 0:
|
541 |
+
return f
|
542 |
+
def g(x):
|
543 |
+
return ctx.diff(f, x, n, **options)
|
544 |
+
return g
|
545 |
+
|
546 |
+
@defun
|
547 |
+
def taylor(ctx, f, x, n, **options):
|
548 |
+
r"""
|
549 |
+
Produces a degree-`n` Taylor polynomial around the point `x` of the
|
550 |
+
given function `f`. The coefficients are returned as a list.
|
551 |
+
|
552 |
+
>>> from mpmath import *
|
553 |
+
>>> mp.dps = 15; mp.pretty = True
|
554 |
+
>>> nprint(chop(taylor(sin, 0, 5)))
|
555 |
+
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
|
556 |
+
|
557 |
+
The coefficients are computed using high-order numerical
|
558 |
+
differentiation. The function must be possible to evaluate
|
559 |
+
to arbitrary precision. See :func:`~mpmath.diff` for additional details
|
560 |
+
and supported keyword options.
|
561 |
+
|
562 |
+
Note that to evaluate the Taylor polynomial as an approximation
|
563 |
+
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
|
564 |
+
and the point of the Taylor expansion must be subtracted from
|
565 |
+
the argument:
|
566 |
+
|
567 |
+
>>> p = taylor(exp, 2.0, 10)
|
568 |
+
>>> polyval(p[::-1], 2.5 - 2.0)
|
569 |
+
12.1824939606092
|
570 |
+
>>> exp(2.5)
|
571 |
+
12.1824939607035
|
572 |
+
|
573 |
+
"""
|
574 |
+
gen = enumerate(ctx.diffs(f, x, n, **options))
|
575 |
+
if options.get("chop", True):
|
576 |
+
return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
|
577 |
+
else:
|
578 |
+
return [d/ctx.factorial(i) for i, d in gen]
|
579 |
+
|
580 |
+
@defun
|
581 |
+
def pade(ctx, a, L, M):
|
582 |
+
r"""
|
583 |
+
Computes a Pade approximation of degree `(L, M)` to a function.
|
584 |
+
Given at least `L+M+1` Taylor coefficients `a` approximating
|
585 |
+
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
|
586 |
+
polynomials `P, Q` satisfying
|
587 |
+
|
588 |
+
.. math ::
|
589 |
+
|
590 |
+
P = \sum_{k=0}^L p_k x^k
|
591 |
+
|
592 |
+
Q = \sum_{k=0}^M q_k x^k
|
593 |
+
|
594 |
+
Q_0 = 1
|
595 |
+
|
596 |
+
A(x) Q(x) = P(x) + O(x^{L+M+1})
|
597 |
+
|
598 |
+
`P(x)/Q(x)` can provide a good approximation to an analytic function
|
599 |
+
beyond the radius of convergence of its Taylor series (example
|
600 |
+
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
|
601 |
+
Ch.1A)::
|
602 |
+
|
603 |
+
>>> from mpmath import *
|
604 |
+
>>> mp.dps = 15; mp.pretty = True
|
605 |
+
>>> one = mpf(1)
|
606 |
+
>>> def f(x):
|
607 |
+
... return sqrt((one + 2*x)/(one + x))
|
608 |
+
...
|
609 |
+
>>> a = taylor(f, 0, 6)
|
610 |
+
>>> p, q = pade(a, 3, 3)
|
611 |
+
>>> x = 10
|
612 |
+
>>> polyval(p[::-1], x)/polyval(q[::-1], x)
|
613 |
+
1.38169105566806
|
614 |
+
>>> f(x)
|
615 |
+
1.38169855941551
|
616 |
+
|
617 |
+
"""
|
618 |
+
# To determine L+1 coefficients of P and M coefficients of Q
|
619 |
+
# L+M+1 coefficients of A must be provided
|
620 |
+
if len(a) < L+M+1:
|
621 |
+
raise ValueError("L+M+1 Coefficients should be provided")
|
622 |
+
|
623 |
+
if M == 0:
|
624 |
+
if L == 0:
|
625 |
+
return [ctx.one], [ctx.one]
|
626 |
+
else:
|
627 |
+
return a[:L+1], [ctx.one]
|
628 |
+
|
629 |
+
# Solve first
|
630 |
+
# a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
|
631 |
+
# ...
|
632 |
+
# a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
|
633 |
+
A = ctx.matrix(M)
|
634 |
+
for j in range(M):
|
635 |
+
for i in range(min(M, L+j+1)):
|
636 |
+
A[j, i] = a[L+j-i]
|
637 |
+
v = -ctx.matrix(a[(L+1):(L+M+1)])
|
638 |
+
x = ctx.lu_solve(A, v)
|
639 |
+
q = [ctx.one] + list(x)
|
640 |
+
# compute p
|
641 |
+
p = [0]*(L+1)
|
642 |
+
for i in range(L+1):
|
643 |
+
s = a[i]
|
644 |
+
for j in range(1, min(M,i) + 1):
|
645 |
+
s += q[j]*a[i-j]
|
646 |
+
p[i] = s
|
647 |
+
return p, q
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py
ADDED
@@ -0,0 +1,2115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
from itertools import izip
|
3 |
+
except ImportError:
|
4 |
+
izip = zip
|
5 |
+
|
6 |
+
from ..libmp.backend import xrange
|
7 |
+
from .calculus import defun
|
8 |
+
|
9 |
+
try:
|
10 |
+
next = next
|
11 |
+
except NameError:
|
12 |
+
next = lambda _: _.next()
|
13 |
+
|
14 |
+
@defun
|
15 |
+
def richardson(ctx, seq):
|
16 |
+
r"""
|
17 |
+
Given a list ``seq`` of the first `N` elements of a slowly convergent
|
18 |
+
infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
|
19 |
+
Richardson extrapolate for the limit.
|
20 |
+
|
21 |
+
:func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
|
22 |
+
limit and `c` is the magnitude of the largest weight used during the
|
23 |
+
computation. The weight provides an estimate of the precision
|
24 |
+
lost to cancellation. Due to cancellation effects, the sequence must
|
25 |
+
be typically be computed at a much higher precision than the target
|
26 |
+
accuracy of the extrapolation.
|
27 |
+
|
28 |
+
**Applicability and issues**
|
29 |
+
|
30 |
+
The `N`-step Richardson extrapolation algorithm used by
|
31 |
+
:func:`~mpmath.richardson` is described in [1].
|
32 |
+
|
33 |
+
Richardson extrapolation only works for a specific type of sequence,
|
34 |
+
namely one converging like partial sums of
|
35 |
+
`P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
|
36 |
+
When the sequence does not convergence at such a rate
|
37 |
+
:func:`~mpmath.richardson` generally produces garbage.
|
38 |
+
|
39 |
+
Richardson extrapolation has the advantage of being fast: the `N`-term
|
40 |
+
extrapolate requires only `O(N)` arithmetic operations, and usually
|
41 |
+
produces an estimate that is accurate to `O(N)` digits. Contrast with
|
42 |
+
the Shanks transformation (see :func:`~mpmath.shanks`), which requires
|
43 |
+
`O(N^2)` operations.
|
44 |
+
|
45 |
+
:func:`~mpmath.richardson` is unable to produce an estimate for the
|
46 |
+
approximation error. One way to estimate the error is to perform
|
47 |
+
two extrapolations with slightly different `N` and comparing the
|
48 |
+
results.
|
49 |
+
|
50 |
+
Richardson extrapolation does not work for oscillating sequences.
|
51 |
+
As a simple workaround, :func:`~mpmath.richardson` detects if the last
|
52 |
+
three elements do not differ monotonically, and in that case
|
53 |
+
applies extrapolation only to the even-index elements.
|
54 |
+
|
55 |
+
**Example**
|
56 |
+
|
57 |
+
Applying Richardson extrapolation to the Leibniz series for `\pi`::
|
58 |
+
|
59 |
+
>>> from mpmath import *
|
60 |
+
>>> mp.dps = 30; mp.pretty = True
|
61 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
62 |
+
... for m in range(1,30)]
|
63 |
+
>>> v, c = richardson(S[:10])
|
64 |
+
>>> v
|
65 |
+
3.2126984126984126984126984127
|
66 |
+
>>> nprint([v-pi, c])
|
67 |
+
[0.0711058, 2.0]
|
68 |
+
|
69 |
+
>>> v, c = richardson(S[:30])
|
70 |
+
>>> v
|
71 |
+
3.14159265468624052829954206226
|
72 |
+
>>> nprint([v-pi, c])
|
73 |
+
[1.09645e-9, 20833.3]
|
74 |
+
|
75 |
+
**References**
|
76 |
+
|
77 |
+
1. [BenderOrszag]_ pp. 375-376
|
78 |
+
|
79 |
+
"""
|
80 |
+
if len(seq) < 3:
|
81 |
+
raise ValueError("seq should be of minimum length 3")
|
82 |
+
if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
|
83 |
+
seq = seq[::2]
|
84 |
+
N = len(seq)//2-1
|
85 |
+
s = ctx.zero
|
86 |
+
# The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
|
87 |
+
# To avoid repeated factorials, we simplify the quotient
|
88 |
+
# of successive weights to obtain a recurrence relation
|
89 |
+
c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
|
90 |
+
maxc = 1
|
91 |
+
for k in xrange(N+1):
|
92 |
+
s += c * seq[N+k]
|
93 |
+
maxc = max(abs(c), maxc)
|
94 |
+
c *= (k-N)*ctx.mpf(k+N+1)**N
|
95 |
+
c /= ((1+k)*ctx.mpf(k+N)**N)
|
96 |
+
return s, maxc
|
97 |
+
|
98 |
+
@defun
|
99 |
+
def shanks(ctx, seq, table=None, randomized=False):
|
100 |
+
r"""
|
101 |
+
Given a list ``seq`` of the first `N` elements of a slowly
|
102 |
+
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
|
103 |
+
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
|
104 |
+
transformation often provides strong convergence acceleration,
|
105 |
+
especially if the sequence is oscillating.
|
106 |
+
|
107 |
+
The iterated Shanks transformation is computed using the Wynn
|
108 |
+
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
|
109 |
+
epsilon table generated by Wynn's algorithm, which can be read
|
110 |
+
off as follows:
|
111 |
+
|
112 |
+
* The table is a list of lists forming a lower triangular matrix,
|
113 |
+
where higher row and column indices correspond to more accurate
|
114 |
+
values.
|
115 |
+
* The columns with even index hold dummy entries (required for the
|
116 |
+
computation) and the columns with odd index hold the actual
|
117 |
+
extrapolates.
|
118 |
+
* The last element in the last row is typically the most
|
119 |
+
accurate estimate of the limit.
|
120 |
+
* The difference to the third last element in the last row
|
121 |
+
provides an estimate of the approximation error.
|
122 |
+
* The magnitude of the second last element provides an estimate
|
123 |
+
of the numerical accuracy lost to cancellation.
|
124 |
+
|
125 |
+
For convenience, so the extrapolation is stopped at an odd index
|
126 |
+
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
|
127 |
+
limit.
|
128 |
+
|
129 |
+
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
|
130 |
+
This can be used to efficiently extend a previous computation after
|
131 |
+
new elements have been appended to the sequence. The table will
|
132 |
+
then be updated in-place.
|
133 |
+
|
134 |
+
**The Shanks transformation**
|
135 |
+
|
136 |
+
The Shanks transformation is defined as follows (see [2]): given
|
137 |
+
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
|
138 |
+
given by
|
139 |
+
|
140 |
+
.. math ::
|
141 |
+
|
142 |
+
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
|
143 |
+
|
144 |
+
The Shanks transformation gives the exact limit `A_{\infty}` in a
|
145 |
+
single step if `A_k = A + a q^k`. Note in particular that it
|
146 |
+
extrapolates the exact sum of a geometric series in a single step.
|
147 |
+
|
148 |
+
Applying the Shanks transformation once often improves convergence
|
149 |
+
substantially for an arbitrary sequence, but the optimal effect is
|
150 |
+
obtained by applying it iteratively:
|
151 |
+
`S(S(A_k)), S(S(S(A_k))), \ldots`.
|
152 |
+
|
153 |
+
Wynn's epsilon algorithm provides an efficient way to generate
|
154 |
+
the table of iterated Shanks transformations. It reduces the
|
155 |
+
computation of each element to essentially a single division, at
|
156 |
+
the cost of requiring dummy elements in the table. See [1] for
|
157 |
+
details.
|
158 |
+
|
159 |
+
**Precision issues**
|
160 |
+
|
161 |
+
Due to cancellation effects, the sequence must be typically be
|
162 |
+
computed at a much higher precision than the target accuracy
|
163 |
+
of the extrapolation.
|
164 |
+
|
165 |
+
If the Shanks transformation converges to the exact limit (such
|
166 |
+
as if the sequence is a geometric series), then a division by
|
167 |
+
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
|
168 |
+
terminating the iteration and returning the table it has
|
169 |
+
generated so far. With *randomized=True*, it will instead
|
170 |
+
replace the zero by a pseudorandom number close to zero.
|
171 |
+
(TODO: find a better solution to this problem.)
|
172 |
+
|
173 |
+
**Examples**
|
174 |
+
|
175 |
+
We illustrate by applying Shanks transformation to the Leibniz
|
176 |
+
series for `\pi`::
|
177 |
+
|
178 |
+
>>> from mpmath import *
|
179 |
+
>>> mp.dps = 50
|
180 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
181 |
+
... for m in range(1,30)]
|
182 |
+
>>>
|
183 |
+
>>> T = shanks(S[:7])
|
184 |
+
>>> for row in T:
|
185 |
+
... nprint(row)
|
186 |
+
...
|
187 |
+
[-0.75]
|
188 |
+
[1.25, 3.16667]
|
189 |
+
[-1.75, 3.13333, -28.75]
|
190 |
+
[2.25, 3.14524, 82.25, 3.14234]
|
191 |
+
[-2.75, 3.13968, -177.75, 3.14139, -969.937]
|
192 |
+
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
|
193 |
+
|
194 |
+
The extrapolated accuracy is about 4 digits, and about 4 digits
|
195 |
+
may have been lost due to cancellation::
|
196 |
+
|
197 |
+
>>> L = T[-1]
|
198 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
199 |
+
[2.22532e-5, 4.78309e-5, 3515.06]
|
200 |
+
|
201 |
+
Now we extend the computation::
|
202 |
+
|
203 |
+
>>> T = shanks(S[:25], T)
|
204 |
+
>>> L = T[-1]
|
205 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
206 |
+
[3.75527e-19, 1.48478e-19, 2.96014e+17]
|
207 |
+
|
208 |
+
The value for pi is now accurate to 18 digits. About 18 digits may
|
209 |
+
also have been lost to cancellation.
|
210 |
+
|
211 |
+
Here is an example with a geometric series, where the convergence
|
212 |
+
is immediate (the sum is exactly 1)::
|
213 |
+
|
214 |
+
>>> mp.dps = 15
|
215 |
+
>>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
|
216 |
+
... nprint(row)
|
217 |
+
[4.0]
|
218 |
+
[8.0, 1.0]
|
219 |
+
|
220 |
+
**References**
|
221 |
+
|
222 |
+
1. [GravesMorris]_
|
223 |
+
|
224 |
+
2. [BenderOrszag]_ pp. 368-375
|
225 |
+
|
226 |
+
"""
|
227 |
+
if len(seq) < 2:
|
228 |
+
raise ValueError("seq should be of minimum length 2")
|
229 |
+
if table:
|
230 |
+
START = len(table)
|
231 |
+
else:
|
232 |
+
START = 0
|
233 |
+
table = []
|
234 |
+
STOP = len(seq) - 1
|
235 |
+
if STOP & 1:
|
236 |
+
STOP -= 1
|
237 |
+
one = ctx.one
|
238 |
+
eps = +ctx.eps
|
239 |
+
if randomized:
|
240 |
+
from random import Random
|
241 |
+
rnd = Random()
|
242 |
+
rnd.seed(START)
|
243 |
+
for i in xrange(START, STOP):
|
244 |
+
row = []
|
245 |
+
for j in xrange(i+1):
|
246 |
+
if j == 0:
|
247 |
+
a, b = 0, seq[i+1]-seq[i]
|
248 |
+
else:
|
249 |
+
if j == 1:
|
250 |
+
a = seq[i]
|
251 |
+
else:
|
252 |
+
a = table[i-1][j-2]
|
253 |
+
b = row[j-1] - table[i-1][j-1]
|
254 |
+
if not b:
|
255 |
+
if randomized:
|
256 |
+
b = (1 + rnd.getrandbits(10))*eps
|
257 |
+
elif i & 1:
|
258 |
+
return table[:-1]
|
259 |
+
else:
|
260 |
+
return table
|
261 |
+
row.append(a + one/b)
|
262 |
+
table.append(row)
|
263 |
+
return table
|
264 |
+
|
265 |
+
|
266 |
+
class levin_class:
|
267 |
+
# levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
268 |
+
r"""
|
269 |
+
This interface implements Levin's (nonlinear) sequence transformation for
|
270 |
+
convergence acceleration and summation of divergent series. It performs
|
271 |
+
better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
|
272 |
+
or alternating divergent series.
|
273 |
+
|
274 |
+
Let *A* be the series we want to sum:
|
275 |
+
|
276 |
+
.. math ::
|
277 |
+
|
278 |
+
A = \sum_{k=0}^{\infty} a_k
|
279 |
+
|
280 |
+
Attention: all `a_k` must be non-zero!
|
281 |
+
|
282 |
+
Let `s_n` be the partial sums of this series:
|
283 |
+
|
284 |
+
.. math ::
|
285 |
+
|
286 |
+
s_n = \sum_{k=0}^n a_k.
|
287 |
+
|
288 |
+
**Methods**
|
289 |
+
|
290 |
+
Calling ``levin`` returns an object with the following methods.
|
291 |
+
|
292 |
+
``update(...)`` works with the list of individual terms `a_k` of *A*, and
|
293 |
+
``update_step(...)`` works with the list of partial sums `s_k` of *A*:
|
294 |
+
|
295 |
+
.. code ::
|
296 |
+
|
297 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
298 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
299 |
+
|
300 |
+
``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
|
301 |
+
works with the partial sums `s_k`:
|
302 |
+
|
303 |
+
.. code ::
|
304 |
+
|
305 |
+
v, e = ...step(a_k)
|
306 |
+
v, e = ...step_psum(s_k)
|
307 |
+
|
308 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
309 |
+
simply the difference between the current estimate and the last estimate.
|
310 |
+
One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
|
311 |
+
|
312 |
+
**A word of caution**
|
313 |
+
|
314 |
+
One can only hope for good results (i.e. convergence acceleration or
|
315 |
+
resummation) if the `s_n` have some well defind asymptotic behavior for
|
316 |
+
large `n` and are not erratic or random. Furthermore one usually needs very
|
317 |
+
high working precision because of the numerical cancellation. If the working
|
318 |
+
precision is insufficient, levin may produce silently numerical garbage.
|
319 |
+
Furthermore even if the Levin-transformation converges, in the general case
|
320 |
+
there is no proof that the result is mathematically sound. Only for very
|
321 |
+
special classes of problems one can prove that the Levin-transformation
|
322 |
+
converges to the expected result (for example Stieltjes-type integrals).
|
323 |
+
Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
|
324 |
+
to Shanks/Wynn-epsilon, Richardson & co.
|
325 |
+
In summary one can say that the Levin-transformation is powerful but
|
326 |
+
unreliable and that it may need a copious amount of working precision.
|
327 |
+
|
328 |
+
The Levin transform has several variants differing in the choice of weights.
|
329 |
+
Some variants are better suited for the possible flavours of convergence
|
330 |
+
behaviour of *A* than other variants:
|
331 |
+
|
332 |
+
.. code ::
|
333 |
+
|
334 |
+
convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
|
335 |
+
|
336 |
+
logarithmic + - + -
|
337 |
+
linear + + + +
|
338 |
+
alternating divergent + + + +
|
339 |
+
|
340 |
+
"+" means the variant is suitable,"-" means the variant is not suitable;
|
341 |
+
for comparison the Shanks/Wynn-epsilon transform is listed, too.
|
342 |
+
|
343 |
+
The variant is controlled though the variant keyword (i.e. ``variant="u"``,
|
344 |
+
``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
|
345 |
+
|
346 |
+
Finally it is possible to use the Sidi-S transform instead of the Levin transform
|
347 |
+
by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
|
348 |
+
Levin transformation for some divergent series (see the examples).
|
349 |
+
|
350 |
+
Parameters:
|
351 |
+
|
352 |
+
.. code ::
|
353 |
+
|
354 |
+
method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
|
355 |
+
variant "u","t" or "v" chooses the weight variant.
|
356 |
+
|
357 |
+
The Levin transform is also accessible through the nsum interface.
|
358 |
+
``method="l"`` or ``method="levin"`` select the normal Levin transform while
|
359 |
+
``method="sidi"``
|
360 |
+
selects the Sidi-S transform. The variant is in both cases selected through the
|
361 |
+
levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
|
362 |
+
it will miss the point where the Levin transform converges resulting in numerical
|
363 |
+
overflow/garbage. For highly divergent series a copious amount of working precision
|
364 |
+
must be chosen.
|
365 |
+
|
366 |
+
**Examples**
|
367 |
+
|
368 |
+
First we sum the zeta function::
|
369 |
+
|
370 |
+
>>> from mpmath import mp
|
371 |
+
>>> mp.prec = 53
|
372 |
+
>>> eps = mp.mpf(mp.eps)
|
373 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
374 |
+
... L = mp.levin(method = "levin", variant = "u")
|
375 |
+
... S, s, n = [], 0, 1
|
376 |
+
... while 1:
|
377 |
+
... s += mp.one / (n * n)
|
378 |
+
... n += 1
|
379 |
+
... S.append(s)
|
380 |
+
... v, e = L.update_psum(S)
|
381 |
+
... if e < eps:
|
382 |
+
... break
|
383 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
384 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 6))
|
385 |
+
0.0
|
386 |
+
>>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
|
387 |
+
>>> print(mp.chop(v - w))
|
388 |
+
0.0
|
389 |
+
|
390 |
+
Now we sum the zeta function outside its range of convergence
|
391 |
+
(attention: This does not work at the negative integers!)::
|
392 |
+
|
393 |
+
>>> eps = mp.mpf(mp.eps)
|
394 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
395 |
+
... L = mp.levin(method = "levin", variant = "v")
|
396 |
+
... A, n = [], 1
|
397 |
+
... while 1:
|
398 |
+
... s = mp.mpf(n) ** (2 + 3j)
|
399 |
+
... n += 1
|
400 |
+
... A.append(s)
|
401 |
+
... v, e = L.update(A)
|
402 |
+
... if e < eps:
|
403 |
+
... break
|
404 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
405 |
+
>>> print(mp.chop(v - mp.zeta(-2-3j)))
|
406 |
+
0.0
|
407 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
408 |
+
>>> print(mp.chop(v - w))
|
409 |
+
0.0
|
410 |
+
|
411 |
+
Now we sum the divergent asymptotic expansion of an integral related to the
|
412 |
+
exponential integral (see also [2] p.373). The Sidi-S transform works best here::
|
413 |
+
|
414 |
+
>>> z = mp.mpf(10)
|
415 |
+
>>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
416 |
+
>>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
417 |
+
>>> eps = mp.mpf(mp.eps)
|
418 |
+
>>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
|
419 |
+
... L = mp.levin(method = "sidi", variant = "t")
|
420 |
+
... n = 0
|
421 |
+
... while 1:
|
422 |
+
... s = (-1)**n * mp.fac(n) * z ** (-n)
|
423 |
+
... v, e = L.step(s)
|
424 |
+
... n += 1
|
425 |
+
... if e < eps:
|
426 |
+
... break
|
427 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
428 |
+
>>> print(mp.chop(v - exact))
|
429 |
+
0.0
|
430 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
431 |
+
>>> print(mp.chop(v - w))
|
432 |
+
0.0
|
433 |
+
|
434 |
+
Another highly divergent integral is also summable::
|
435 |
+
|
436 |
+
>>> z = mp.mpf(2)
|
437 |
+
>>> eps = mp.mpf(mp.eps)
|
438 |
+
>>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
439 |
+
>>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
440 |
+
>>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
|
441 |
+
... L = mp.levin(method = "levin", variant = "t")
|
442 |
+
... n, s = 0, 0
|
443 |
+
... while 1:
|
444 |
+
... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
|
445 |
+
... n += 1
|
446 |
+
... v, e = L.step_psum(s)
|
447 |
+
... if e < eps:
|
448 |
+
... break
|
449 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
450 |
+
>>> print(mp.chop(v - exact))
|
451 |
+
0.0
|
452 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
453 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
454 |
+
>>> print(mp.chop(v - w))
|
455 |
+
0.0
|
456 |
+
|
457 |
+
These examples run with 15-20 decimal digits precision. For higher precision the
|
458 |
+
working precision must be raised.
|
459 |
+
|
460 |
+
**Examples for nsum**
|
461 |
+
|
462 |
+
Here we calculate Euler's constant as the constant term in the Laurent
|
463 |
+
expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
|
464 |
+
the logarithmic convergence behaviour of the Dirichlet series for zeta::
|
465 |
+
|
466 |
+
>>> mp.dps = 30
|
467 |
+
>>> z = mp.mpf(10) ** (-10)
|
468 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
|
469 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
470 |
+
0.0
|
471 |
+
|
472 |
+
The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
|
473 |
+
|
474 |
+
>>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
|
475 |
+
>>> print(mp.chop(a - mp.log(2)))
|
476 |
+
0.0
|
477 |
+
|
478 |
+
Hypergeometric series can also be summed outside their range of convergence.
|
479 |
+
The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
|
480 |
+
point where the Levin transform converges resulting in numerical overflow/garbage::
|
481 |
+
|
482 |
+
>>> z = 2 + 1j
|
483 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
484 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
485 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
486 |
+
>>> print(mp.chop(exact-v))
|
487 |
+
0.0
|
488 |
+
|
489 |
+
References:
|
490 |
+
|
491 |
+
[1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
|
492 |
+
Convergence and the Summation of Divergent Series" arXiv:math/0306302
|
493 |
+
|
494 |
+
[2] A. Sidi - "Pratical Extrapolation Methods"
|
495 |
+
|
496 |
+
[3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
|
497 |
+
|
498 |
+
"""
|
499 |
+
|
500 |
+
def __init__(self, method = "levin", variant = "u"):
|
501 |
+
self.variant = variant
|
502 |
+
self.n = 0
|
503 |
+
self.a0 = 0
|
504 |
+
self.theta = 1
|
505 |
+
self.A = []
|
506 |
+
self.B = []
|
507 |
+
self.last = 0
|
508 |
+
self.last_s = False
|
509 |
+
|
510 |
+
if method == "levin":
|
511 |
+
self.factor = self.factor_levin
|
512 |
+
elif method == "sidi":
|
513 |
+
self.factor = self.factor_sidi
|
514 |
+
else:
|
515 |
+
raise ValueError("levin: unknown method \"%s\"" % method)
|
516 |
+
|
517 |
+
def factor_levin(self, i):
|
518 |
+
# original levin
|
519 |
+
# [1] p.50,e.7.5-7 (with n-j replaced by i)
|
520 |
+
return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
|
521 |
+
|
522 |
+
def factor_sidi(self, i):
|
523 |
+
# sidi analogon to levin (factorial series)
|
524 |
+
# [1] p.59,e.8.3-16 (with n-j replaced by i)
|
525 |
+
return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
|
526 |
+
|
527 |
+
def run(self, s, a0, a1 = 0):
|
528 |
+
if self.variant=="t":
|
529 |
+
# levin t
|
530 |
+
w=a0
|
531 |
+
elif self.variant=="u":
|
532 |
+
# levin u
|
533 |
+
w=a0*(self.theta+self.n)
|
534 |
+
elif self.variant=="v":
|
535 |
+
# levin v
|
536 |
+
w=a0*a1/(a0-a1)
|
537 |
+
else:
|
538 |
+
assert False, "unknown variant"
|
539 |
+
|
540 |
+
if w==0:
|
541 |
+
raise ValueError("levin: zero weight")
|
542 |
+
|
543 |
+
self.A.append(s/w)
|
544 |
+
self.B.append(1/w)
|
545 |
+
|
546 |
+
for i in range(self.n-1,-1,-1):
|
547 |
+
if i==self.n-1:
|
548 |
+
f=1
|
549 |
+
else:
|
550 |
+
f=self.factor(i)
|
551 |
+
|
552 |
+
self.A[i]=self.A[i+1]-f*self.A[i]
|
553 |
+
self.B[i]=self.B[i+1]-f*self.B[i]
|
554 |
+
|
555 |
+
self.n+=1
|
556 |
+
|
557 |
+
###########################################################################
|
558 |
+
|
559 |
+
def update_psum(self,S):
|
560 |
+
"""
|
561 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
562 |
+
|
563 |
+
A = sum(a_k, k = 0..infinity)
|
564 |
+
s_n = sum(a_k, k = 0..n)
|
565 |
+
|
566 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
567 |
+
|
568 |
+
output:
|
569 |
+
v current estimate of the series A
|
570 |
+
e an error estimate which is simply the difference between the current
|
571 |
+
estimate and the last estimate.
|
572 |
+
"""
|
573 |
+
|
574 |
+
if self.variant!="v":
|
575 |
+
if self.n==0:
|
576 |
+
self.run(S[0],S[0])
|
577 |
+
while self.n<len(S):
|
578 |
+
self.run(S[self.n],S[self.n]-S[self.n-1])
|
579 |
+
else:
|
580 |
+
if len(S)==1:
|
581 |
+
self.last=0
|
582 |
+
return S[0],abs(S[0])
|
583 |
+
|
584 |
+
if self.n==0:
|
585 |
+
self.a1=S[1]-S[0]
|
586 |
+
self.run(S[0],S[0],self.a1)
|
587 |
+
|
588 |
+
while self.n<len(S)-1:
|
589 |
+
na1=S[self.n+1]-S[self.n]
|
590 |
+
self.run(S[self.n],self.a1,na1)
|
591 |
+
self.a1=na1
|
592 |
+
|
593 |
+
value=self.A[0]/self.B[0]
|
594 |
+
err=abs(value-self.last)
|
595 |
+
self.last=value
|
596 |
+
|
597 |
+
return value,err
|
598 |
+
|
599 |
+
def update(self,X):
|
600 |
+
"""
|
601 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
602 |
+
|
603 |
+
A = sum(a_k, k = 0..infinity)
|
604 |
+
|
605 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
606 |
+
|
607 |
+
output:
|
608 |
+
v current estimate of the series A
|
609 |
+
e an error estimate which is simply the difference between the current
|
610 |
+
estimate and the last estimate.
|
611 |
+
"""
|
612 |
+
|
613 |
+
if self.variant!="v":
|
614 |
+
if self.n==0:
|
615 |
+
self.s=X[0]
|
616 |
+
self.run(self.s,X[0])
|
617 |
+
while self.n<len(X):
|
618 |
+
self.s+=X[self.n]
|
619 |
+
self.run(self.s,X[self.n])
|
620 |
+
else:
|
621 |
+
if len(X)==1:
|
622 |
+
self.last=0
|
623 |
+
return X[0],abs(X[0])
|
624 |
+
|
625 |
+
if self.n==0:
|
626 |
+
self.s=X[0]
|
627 |
+
self.run(self.s,X[0],X[1])
|
628 |
+
|
629 |
+
while self.n<len(X)-1:
|
630 |
+
self.s+=X[self.n]
|
631 |
+
self.run(self.s,X[self.n],X[self.n+1])
|
632 |
+
|
633 |
+
value=self.A[0]/self.B[0]
|
634 |
+
err=abs(value-self.last)
|
635 |
+
self.last=value
|
636 |
+
|
637 |
+
return value,err
|
638 |
+
|
639 |
+
###########################################################################
|
640 |
+
|
641 |
+
def step_psum(self,s):
|
642 |
+
"""
|
643 |
+
This routine applies the convergence acceleration to the partial sums.
|
644 |
+
|
645 |
+
A = sum(a_k, k = 0..infinity)
|
646 |
+
s_n = sum(a_k, k = 0..n)
|
647 |
+
|
648 |
+
v, e = ...step_psum(s_k)
|
649 |
+
|
650 |
+
output:
|
651 |
+
v current estimate of the series A
|
652 |
+
e an error estimate which is simply the difference between the current
|
653 |
+
estimate and the last estimate.
|
654 |
+
"""
|
655 |
+
|
656 |
+
if self.variant!="v":
|
657 |
+
if self.n==0:
|
658 |
+
self.last_s=s
|
659 |
+
self.run(s,s)
|
660 |
+
else:
|
661 |
+
self.run(s,s-self.last_s)
|
662 |
+
self.last_s=s
|
663 |
+
else:
|
664 |
+
if isinstance(self.last_s,bool):
|
665 |
+
self.last_s=s
|
666 |
+
self.last_w=s
|
667 |
+
self.last=0
|
668 |
+
return s,abs(s)
|
669 |
+
|
670 |
+
na1=s-self.last_s
|
671 |
+
self.run(self.last_s,self.last_w,na1)
|
672 |
+
self.last_w=na1
|
673 |
+
self.last_s=s
|
674 |
+
|
675 |
+
value=self.A[0]/self.B[0]
|
676 |
+
err=abs(value-self.last)
|
677 |
+
self.last=value
|
678 |
+
|
679 |
+
return value,err
|
680 |
+
|
681 |
+
def step(self,x):
|
682 |
+
"""
|
683 |
+
This routine applies the convergence acceleration to the individual terms.
|
684 |
+
|
685 |
+
A = sum(a_k, k = 0..infinity)
|
686 |
+
|
687 |
+
v, e = ...step(a_k)
|
688 |
+
|
689 |
+
output:
|
690 |
+
v current estimate of the series A
|
691 |
+
e an error estimate which is simply the difference between the current
|
692 |
+
estimate and the last estimate.
|
693 |
+
"""
|
694 |
+
|
695 |
+
if self.variant!="v":
|
696 |
+
if self.n==0:
|
697 |
+
self.s=x
|
698 |
+
self.run(self.s,x)
|
699 |
+
else:
|
700 |
+
self.s+=x
|
701 |
+
self.run(self.s,x)
|
702 |
+
else:
|
703 |
+
if isinstance(self.last_s,bool):
|
704 |
+
self.last_s=x
|
705 |
+
self.s=0
|
706 |
+
self.last=0
|
707 |
+
return x,abs(x)
|
708 |
+
|
709 |
+
self.s+=self.last_s
|
710 |
+
self.run(self.s,self.last_s,x)
|
711 |
+
self.last_s=x
|
712 |
+
|
713 |
+
value=self.A[0]/self.B[0]
|
714 |
+
err=abs(value-self.last)
|
715 |
+
self.last=value
|
716 |
+
|
717 |
+
return value,err
|
718 |
+
|
719 |
+
def levin(ctx, method = "levin", variant = "u"):
|
720 |
+
L = levin_class(method = method, variant = variant)
|
721 |
+
L.ctx = ctx
|
722 |
+
return L
|
723 |
+
|
724 |
+
levin.__doc__ = levin_class.__doc__
|
725 |
+
defun(levin)
|
726 |
+
|
727 |
+
|
728 |
+
class cohen_alt_class:
|
729 |
+
# cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
730 |
+
r"""
|
731 |
+
This interface implements the convergence acceleration of alternating series
|
732 |
+
as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
|
733 |
+
of Alternating Series". This series transformation works only well if the
|
734 |
+
individual terms of the series have an alternating sign. It belongs to the
|
735 |
+
class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
|
736 |
+
or Levin transform). This series transformation is also able to sum some types
|
737 |
+
of divergent series. See the paper under which conditions this resummation is
|
738 |
+
mathematical sound.
|
739 |
+
|
740 |
+
Let *A* be the series we want to sum:
|
741 |
+
|
742 |
+
.. math ::
|
743 |
+
|
744 |
+
A = \sum_{k=0}^{\infty} a_k
|
745 |
+
|
746 |
+
Let `s_n` be the partial sums of this series:
|
747 |
+
|
748 |
+
.. math ::
|
749 |
+
|
750 |
+
s_n = \sum_{k=0}^n a_k.
|
751 |
+
|
752 |
+
|
753 |
+
**Interface**
|
754 |
+
|
755 |
+
Calling ``cohen_alt`` returns an object with the following methods.
|
756 |
+
|
757 |
+
Then ``update(...)`` works with the list of individual terms `a_k` and
|
758 |
+
``update_psum(...)`` works with the list of partial sums `s_k`:
|
759 |
+
|
760 |
+
.. code ::
|
761 |
+
|
762 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
763 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
764 |
+
|
765 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
766 |
+
simply the difference between the current estimate and the last estimate.
|
767 |
+
|
768 |
+
**Examples**
|
769 |
+
|
770 |
+
Here we compute the alternating zeta function using ``update_psum``::
|
771 |
+
|
772 |
+
>>> from mpmath import mp
|
773 |
+
>>> AC = mp.cohen_alt()
|
774 |
+
>>> S, s, n = [], 0, 1
|
775 |
+
>>> while 1:
|
776 |
+
... s += -((-1) ** n) * mp.one / (n * n)
|
777 |
+
... n += 1
|
778 |
+
... S.append(s)
|
779 |
+
... v, e = AC.update_psum(S)
|
780 |
+
... if e < mp.eps:
|
781 |
+
... break
|
782 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
783 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 12))
|
784 |
+
0.0
|
785 |
+
|
786 |
+
Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
|
787 |
+
|
788 |
+
>>> A = []
|
789 |
+
>>> AC = mp.cohen_alt()
|
790 |
+
>>> n = 1
|
791 |
+
>>> while 1:
|
792 |
+
... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
|
793 |
+
... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
|
794 |
+
... n += 1
|
795 |
+
... v, e = AC.update(A)
|
796 |
+
... if e < mp.eps:
|
797 |
+
... break
|
798 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
799 |
+
>>> v = mp.exp(v)
|
800 |
+
>>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
|
801 |
+
0.0
|
802 |
+
|
803 |
+
``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
|
804 |
+
|
805 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
806 |
+
>>> print(mp.chop(v - mp.log(2)))
|
807 |
+
0.0
|
808 |
+
>>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
|
809 |
+
>>> print(mp.chop(v - mp.pi / 4))
|
810 |
+
0.0
|
811 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
812 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
813 |
+
0.0
|
814 |
+
|
815 |
+
"""
|
816 |
+
|
817 |
+
def __init__(self):
|
818 |
+
self.last=0
|
819 |
+
|
820 |
+
def update(self, A):
|
821 |
+
"""
|
822 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
823 |
+
|
824 |
+
A = sum(a_k, k = 0..infinity)
|
825 |
+
|
826 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
827 |
+
|
828 |
+
output:
|
829 |
+
v current estimate of the series A
|
830 |
+
e an error estimate which is simply the difference between the current
|
831 |
+
estimate and the last estimate.
|
832 |
+
"""
|
833 |
+
|
834 |
+
n = len(A)
|
835 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
836 |
+
d = (d + 1 / d) / 2
|
837 |
+
b = -self.ctx.one
|
838 |
+
c = -d
|
839 |
+
s = 0
|
840 |
+
|
841 |
+
for k in xrange(n):
|
842 |
+
c = b - c
|
843 |
+
if k % 2 == 0:
|
844 |
+
s = s + c * A[k]
|
845 |
+
else:
|
846 |
+
s = s - c * A[k]
|
847 |
+
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
848 |
+
|
849 |
+
value = s / d
|
850 |
+
|
851 |
+
err = abs(value - self.last)
|
852 |
+
self.last = value
|
853 |
+
|
854 |
+
return value, err
|
855 |
+
|
856 |
+
def update_psum(self, S):
|
857 |
+
"""
|
858 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
859 |
+
|
860 |
+
A = sum(a_k, k = 0..infinity)
|
861 |
+
s_n = sum(a_k ,k = 0..n)
|
862 |
+
|
863 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
864 |
+
|
865 |
+
output:
|
866 |
+
v current estimate of the series A
|
867 |
+
e an error estimate which is simply the difference between the current
|
868 |
+
estimate and the last estimate.
|
869 |
+
"""
|
870 |
+
|
871 |
+
n = len(S)
|
872 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
873 |
+
d = (d + 1 / d) / 2
|
874 |
+
b = self.ctx.one
|
875 |
+
s = 0
|
876 |
+
|
877 |
+
for k in xrange(n):
|
878 |
+
b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
|
879 |
+
s += b * S[k]
|
880 |
+
|
881 |
+
value = s / d
|
882 |
+
|
883 |
+
err = abs(value - self.last)
|
884 |
+
self.last = value
|
885 |
+
|
886 |
+
return value, err
|
887 |
+
|
888 |
+
def cohen_alt(ctx):
|
889 |
+
L = cohen_alt_class()
|
890 |
+
L.ctx = ctx
|
891 |
+
return L
|
892 |
+
|
893 |
+
cohen_alt.__doc__ = cohen_alt_class.__doc__
|
894 |
+
defun(cohen_alt)
|
895 |
+
|
896 |
+
|
897 |
+
@defun
|
898 |
+
def sumap(ctx, f, interval, integral=None, error=False):
|
899 |
+
r"""
|
900 |
+
Evaluates an infinite series of an analytic summand *f* using the
|
901 |
+
Abel-Plana formula
|
902 |
+
|
903 |
+
.. math ::
|
904 |
+
|
905 |
+
\sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
|
906 |
+
i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
|
907 |
+
|
908 |
+
Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
|
909 |
+
the Abel-Plana formula does not require derivatives. However,
|
910 |
+
it only works when `|f(it)-f(-it)|` does not
|
911 |
+
increase too rapidly with `t`.
|
912 |
+
|
913 |
+
**Examples**
|
914 |
+
|
915 |
+
The Abel-Plana formula is particularly useful when the summand
|
916 |
+
decreases like a power of `k`; for example when the sum is a pure
|
917 |
+
zeta function::
|
918 |
+
|
919 |
+
>>> from mpmath import *
|
920 |
+
>>> mp.dps = 25; mp.pretty = True
|
921 |
+
>>> sumap(lambda k: 1/k**2.5, [1,inf])
|
922 |
+
1.34148725725091717975677
|
923 |
+
>>> zeta(2.5)
|
924 |
+
1.34148725725091717975677
|
925 |
+
>>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
|
926 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
927 |
+
>>> zeta(2.5+2.5j, 1+1j)
|
928 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
929 |
+
|
930 |
+
If the series is alternating, numerical quadrature along the real
|
931 |
+
line is likely to give poor results, so it is better to evaluate
|
932 |
+
the first term symbolically whenever possible:
|
933 |
+
|
934 |
+
>>> n=3; z=-0.75
|
935 |
+
>>> I = expint(n,-log(z))
|
936 |
+
>>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
|
937 |
+
-0.6917036036904594510141448
|
938 |
+
>>> polylog(n,z)
|
939 |
+
-0.6917036036904594510141448
|
940 |
+
|
941 |
+
"""
|
942 |
+
prec = ctx.prec
|
943 |
+
try:
|
944 |
+
ctx.prec += 10
|
945 |
+
a, b = interval
|
946 |
+
if b != ctx.inf:
|
947 |
+
raise ValueError("b should be equal to ctx.inf")
|
948 |
+
g = lambda x: f(x+a)
|
949 |
+
if integral is None:
|
950 |
+
i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
|
951 |
+
else:
|
952 |
+
i1, err1 = integral, 0
|
953 |
+
j = ctx.j
|
954 |
+
p = ctx.pi * 2
|
955 |
+
if ctx._is_real_type(i1):
|
956 |
+
h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
|
957 |
+
else:
|
958 |
+
h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
|
959 |
+
i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
|
960 |
+
err = err1+err2
|
961 |
+
v = i1+i2+0.5*g(ctx.mpf(0))
|
962 |
+
finally:
|
963 |
+
ctx.prec = prec
|
964 |
+
if error:
|
965 |
+
return +v, err
|
966 |
+
return +v
|
967 |
+
|
968 |
+
|
969 |
+
@defun
|
970 |
+
def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
|
971 |
+
adiffs=None, bdiffs=None, verbose=False, error=False,
|
972 |
+
_fast_abort=False):
|
973 |
+
r"""
|
974 |
+
Uses the Euler-Maclaurin formula to compute an approximation accurate
|
975 |
+
to within ``tol`` (which defaults to the present epsilon) of the sum
|
976 |
+
|
977 |
+
.. math ::
|
978 |
+
|
979 |
+
S = \sum_{k=a}^b f(k)
|
980 |
+
|
981 |
+
where `(a,b)` are given by ``interval`` and `a` or `b` may be
|
982 |
+
infinite. The approximation is
|
983 |
+
|
984 |
+
.. math ::
|
985 |
+
|
986 |
+
S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
|
987 |
+
\sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
|
988 |
+
\left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
|
989 |
+
|
990 |
+
The last sum in the Euler-Maclaurin formula is not generally
|
991 |
+
convergent (a notable exception is if `f` is a polynomial, in
|
992 |
+
which case Euler-Maclaurin actually gives an exact result).
|
993 |
+
|
994 |
+
The summation is stopped as soon as the quotient between two
|
995 |
+
consecutive terms falls below *reject*. That is, by default
|
996 |
+
(*reject* = 10), the summation is continued as long as each
|
997 |
+
term adds at least one decimal.
|
998 |
+
|
999 |
+
Although not convergent, convergence to a given tolerance can
|
1000 |
+
often be "forced" if `b = \infty` by summing up to `a+N` and then
|
1001 |
+
applying the Euler-Maclaurin formula to the sum over the range
|
1002 |
+
`(a+N+1, \ldots, \infty)`. This procedure is implemented by
|
1003 |
+
:func:`~mpmath.nsum`.
|
1004 |
+
|
1005 |
+
By default numerical quadrature and differentiation is used.
|
1006 |
+
If the symbolic values of the integral and endpoint derivatives
|
1007 |
+
are known, it is more efficient to pass the value of the
|
1008 |
+
integral explicitly as ``integral`` and the derivatives
|
1009 |
+
explicitly as ``adiffs`` and ``bdiffs``. The derivatives
|
1010 |
+
should be given as iterables that yield
|
1011 |
+
`f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
|
1012 |
+
|
1013 |
+
**Examples**
|
1014 |
+
|
1015 |
+
Summation of an infinite series, with automatic and symbolic
|
1016 |
+
integral and derivative values (the second should be much faster)::
|
1017 |
+
|
1018 |
+
>>> from mpmath import *
|
1019 |
+
>>> mp.dps = 50; mp.pretty = True
|
1020 |
+
>>> sumem(lambda n: 1/n**2, [32, inf])
|
1021 |
+
0.03174336652030209012658168043874142714132886413417
|
1022 |
+
>>> I = mpf(1)/32
|
1023 |
+
>>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
|
1024 |
+
>>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
|
1025 |
+
0.03174336652030209012658168043874142714132886413417
|
1026 |
+
|
1027 |
+
An exact evaluation of a finite polynomial sum::
|
1028 |
+
|
1029 |
+
>>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
|
1030 |
+
10500155000624963999742499550000.0
|
1031 |
+
>>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
|
1032 |
+
10500155000624963999742499550000
|
1033 |
+
|
1034 |
+
"""
|
1035 |
+
tol = tol or +ctx.eps
|
1036 |
+
interval = ctx._as_points(interval)
|
1037 |
+
a = ctx.convert(interval[0])
|
1038 |
+
b = ctx.convert(interval[-1])
|
1039 |
+
err = ctx.zero
|
1040 |
+
prev = 0
|
1041 |
+
M = 10000
|
1042 |
+
if a == ctx.ninf: adiffs = (0 for n in xrange(M))
|
1043 |
+
else: adiffs = adiffs or ctx.diffs(f, a)
|
1044 |
+
if b == ctx.inf: bdiffs = (0 for n in xrange(M))
|
1045 |
+
else: bdiffs = bdiffs or ctx.diffs(f, b)
|
1046 |
+
orig = ctx.prec
|
1047 |
+
#verbose = 1
|
1048 |
+
try:
|
1049 |
+
ctx.prec += 10
|
1050 |
+
s = ctx.zero
|
1051 |
+
for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
|
1052 |
+
if k & 1:
|
1053 |
+
term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
|
1054 |
+
mag = abs(term)
|
1055 |
+
if verbose:
|
1056 |
+
print("term", k, "magnitude =", ctx.nstr(mag))
|
1057 |
+
if k > 4 and mag < tol:
|
1058 |
+
s += term
|
1059 |
+
break
|
1060 |
+
elif k > 4 and abs(prev) / mag < reject:
|
1061 |
+
err += mag
|
1062 |
+
if _fast_abort:
|
1063 |
+
return [s, (s, err)][error]
|
1064 |
+
if verbose:
|
1065 |
+
print("Failed to converge")
|
1066 |
+
break
|
1067 |
+
else:
|
1068 |
+
s += term
|
1069 |
+
prev = term
|
1070 |
+
# Endpoint correction
|
1071 |
+
if a != ctx.ninf: s += f(a)/2
|
1072 |
+
if b != ctx.inf: s += f(b)/2
|
1073 |
+
# Tail integral
|
1074 |
+
if verbose:
|
1075 |
+
print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
|
1076 |
+
if integral:
|
1077 |
+
s += integral
|
1078 |
+
else:
|
1079 |
+
integral, ierr = ctx.quad(f, interval, error=True)
|
1080 |
+
if verbose:
|
1081 |
+
print("Integration error:", ierr)
|
1082 |
+
s += integral
|
1083 |
+
err += ierr
|
1084 |
+
finally:
|
1085 |
+
ctx.prec = orig
|
1086 |
+
if error:
|
1087 |
+
return s, err
|
1088 |
+
else:
|
1089 |
+
return s
|
1090 |
+
|
1091 |
+
@defun
|
1092 |
+
def adaptive_extrapolation(ctx, update, emfun, kwargs):
|
1093 |
+
option = kwargs.get
|
1094 |
+
if ctx._fixed_precision:
|
1095 |
+
tol = option('tol', ctx.eps*2**10)
|
1096 |
+
else:
|
1097 |
+
tol = option('tol', ctx.eps/2**10)
|
1098 |
+
verbose = option('verbose', False)
|
1099 |
+
maxterms = option('maxterms', ctx.dps*10)
|
1100 |
+
method = set(option('method', 'r+s').split('+'))
|
1101 |
+
skip = option('skip', 0)
|
1102 |
+
steps = iter(option('steps', xrange(10, 10**9, 10)))
|
1103 |
+
strict = option('strict')
|
1104 |
+
#steps = (10 for i in xrange(1000))
|
1105 |
+
summer=[]
|
1106 |
+
if 'd' in method or 'direct' in method:
|
1107 |
+
TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
|
1108 |
+
else:
|
1109 |
+
TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
|
1110 |
+
TRY_SHANKS = ('s' in method) or ('shanks' in method)
|
1111 |
+
TRY_EULER_MACLAURIN = ('e' in method) or \
|
1112 |
+
('euler-maclaurin' in method)
|
1113 |
+
|
1114 |
+
def init_levin(m):
|
1115 |
+
variant = kwargs.get("levin_variant", "u")
|
1116 |
+
if isinstance(variant, str):
|
1117 |
+
if variant == "all":
|
1118 |
+
variant = ["u", "v", "t"]
|
1119 |
+
else:
|
1120 |
+
variant = [variant]
|
1121 |
+
for s in variant:
|
1122 |
+
L = levin_class(method = m, variant = s)
|
1123 |
+
L.ctx = ctx
|
1124 |
+
L.name = m + "(" + s + ")"
|
1125 |
+
summer.append(L)
|
1126 |
+
|
1127 |
+
if ('l' in method) or ('levin' in method):
|
1128 |
+
init_levin("levin")
|
1129 |
+
|
1130 |
+
if ('sidi' in method):
|
1131 |
+
init_levin("sidi")
|
1132 |
+
|
1133 |
+
if ('a' in method) or ('alternating' in method):
|
1134 |
+
L = cohen_alt_class()
|
1135 |
+
L.ctx = ctx
|
1136 |
+
L.name = "alternating"
|
1137 |
+
summer.append(L)
|
1138 |
+
|
1139 |
+
last_richardson_value = 0
|
1140 |
+
shanks_table = []
|
1141 |
+
index = 0
|
1142 |
+
step = 10
|
1143 |
+
partial = []
|
1144 |
+
best = ctx.zero
|
1145 |
+
orig = ctx.prec
|
1146 |
+
try:
|
1147 |
+
if 'workprec' in kwargs:
|
1148 |
+
ctx.prec = kwargs['workprec']
|
1149 |
+
elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
|
1150 |
+
ctx.prec = (ctx.prec+10) * 4
|
1151 |
+
else:
|
1152 |
+
ctx.prec += 30
|
1153 |
+
while 1:
|
1154 |
+
if index >= maxterms:
|
1155 |
+
break
|
1156 |
+
|
1157 |
+
# Get new batch of terms
|
1158 |
+
try:
|
1159 |
+
step = next(steps)
|
1160 |
+
except StopIteration:
|
1161 |
+
pass
|
1162 |
+
if verbose:
|
1163 |
+
print("-"*70)
|
1164 |
+
print("Adding terms #%i-#%i" % (index, index+step))
|
1165 |
+
update(partial, xrange(index, index+step))
|
1166 |
+
index += step
|
1167 |
+
|
1168 |
+
# Check direct error
|
1169 |
+
best = partial[-1]
|
1170 |
+
error = abs(best - partial[-2])
|
1171 |
+
if verbose:
|
1172 |
+
print("Direct error: %s" % ctx.nstr(error))
|
1173 |
+
if error <= tol:
|
1174 |
+
return best
|
1175 |
+
|
1176 |
+
# Check each extrapolation method
|
1177 |
+
if TRY_RICHARDSON:
|
1178 |
+
value, maxc = ctx.richardson(partial)
|
1179 |
+
# Convergence
|
1180 |
+
richardson_error = abs(value - last_richardson_value)
|
1181 |
+
if verbose:
|
1182 |
+
print("Richardson error: %s" % ctx.nstr(richardson_error))
|
1183 |
+
# Convergence
|
1184 |
+
if richardson_error <= tol:
|
1185 |
+
return value
|
1186 |
+
last_richardson_value = value
|
1187 |
+
# Unreliable due to cancellation
|
1188 |
+
if ctx.eps*maxc > tol:
|
1189 |
+
if verbose:
|
1190 |
+
print("Ran out of precision for Richardson")
|
1191 |
+
TRY_RICHARDSON = False
|
1192 |
+
if richardson_error < error:
|
1193 |
+
error = richardson_error
|
1194 |
+
best = value
|
1195 |
+
if TRY_SHANKS:
|
1196 |
+
shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
|
1197 |
+
row = shanks_table[-1]
|
1198 |
+
if len(row) == 2:
|
1199 |
+
est1 = row[-1]
|
1200 |
+
shanks_error = 0
|
1201 |
+
else:
|
1202 |
+
est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
|
1203 |
+
shanks_error = abs(est1-est2)
|
1204 |
+
if verbose:
|
1205 |
+
print("Shanks error: %s" % ctx.nstr(shanks_error))
|
1206 |
+
if shanks_error <= tol:
|
1207 |
+
return est1
|
1208 |
+
if ctx.eps*maxc > tol:
|
1209 |
+
if verbose:
|
1210 |
+
print("Ran out of precision for Shanks")
|
1211 |
+
TRY_SHANKS = False
|
1212 |
+
if shanks_error < error:
|
1213 |
+
error = shanks_error
|
1214 |
+
best = est1
|
1215 |
+
for L in summer:
|
1216 |
+
est, lerror = L.update_psum(partial)
|
1217 |
+
if verbose:
|
1218 |
+
print("%s error: %s" % (L.name, ctx.nstr(lerror)))
|
1219 |
+
if lerror <= tol:
|
1220 |
+
return est
|
1221 |
+
if lerror < error:
|
1222 |
+
error = lerror
|
1223 |
+
best = est
|
1224 |
+
if TRY_EULER_MACLAURIN:
|
1225 |
+
if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
|
1226 |
+
if verbose:
|
1227 |
+
print ("NOT using Euler-Maclaurin: the series appears"
|
1228 |
+
" to be alternating, so numerical\n quadrature"
|
1229 |
+
" will most likely fail")
|
1230 |
+
TRY_EULER_MACLAURIN = False
|
1231 |
+
else:
|
1232 |
+
value, em_error = emfun(index, tol)
|
1233 |
+
value += partial[-1]
|
1234 |
+
if verbose:
|
1235 |
+
print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
|
1236 |
+
if em_error <= tol:
|
1237 |
+
return value
|
1238 |
+
if em_error < error:
|
1239 |
+
best = value
|
1240 |
+
finally:
|
1241 |
+
ctx.prec = orig
|
1242 |
+
if strict:
|
1243 |
+
raise ctx.NoConvergence
|
1244 |
+
if verbose:
|
1245 |
+
print("Warning: failed to converge to target accuracy")
|
1246 |
+
return best
|
1247 |
+
|
1248 |
+
@defun
|
1249 |
+
def nsum(ctx, f, *intervals, **options):
|
1250 |
+
r"""
|
1251 |
+
Computes the sum
|
1252 |
+
|
1253 |
+
.. math :: S = \sum_{k=a}^b f(k)
|
1254 |
+
|
1255 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
1256 |
+
`b = \infty` are allowed, or more generally
|
1257 |
+
|
1258 |
+
.. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
|
1259 |
+
\sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
|
1260 |
+
|
1261 |
+
if multiple intervals are given.
|
1262 |
+
|
1263 |
+
Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
|
1264 |
+
where the first converges rapidly and the second converges slowly,
|
1265 |
+
are::
|
1266 |
+
|
1267 |
+
>>> from mpmath import *
|
1268 |
+
>>> mp.dps = 15; mp.pretty = True
|
1269 |
+
>>> nsum(lambda n: 1/fac(n), [0, inf])
|
1270 |
+
2.71828182845905
|
1271 |
+
>>> nsum(lambda n: 1/n**2, [1, inf])
|
1272 |
+
1.64493406684823
|
1273 |
+
|
1274 |
+
When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
|
1275 |
+
accurately estimate the sums of slowly convergent series. If the series is
|
1276 |
+
finite, :func:`~mpmath.nsum` currently does not attempt to perform any
|
1277 |
+
extrapolation, and simply calls :func:`~mpmath.fsum`.
|
1278 |
+
|
1279 |
+
Multidimensional infinite series are reduced to a single-dimensional
|
1280 |
+
series over expanding hypercubes; if both infinite and finite dimensions
|
1281 |
+
are present, the finite ranges are moved innermost. For more advanced
|
1282 |
+
control over the summation order, use nested calls to :func:`~mpmath.nsum`,
|
1283 |
+
or manually rewrite the sum as a single-dimensional series.
|
1284 |
+
|
1285 |
+
**Options**
|
1286 |
+
|
1287 |
+
*tol*
|
1288 |
+
Desired maximum final error. Defaults roughly to the
|
1289 |
+
epsilon of the working precision.
|
1290 |
+
|
1291 |
+
*method*
|
1292 |
+
Which summation algorithm to use (described below).
|
1293 |
+
Default: ``'richardson+shanks'``.
|
1294 |
+
|
1295 |
+
*maxterms*
|
1296 |
+
Cancel after at most this many terms. Default: 10*dps.
|
1297 |
+
|
1298 |
+
*steps*
|
1299 |
+
An iterable giving the number of terms to add between
|
1300 |
+
each extrapolation attempt. The default sequence is
|
1301 |
+
[10, 20, 30, 40, ...]. For example, if you know that
|
1302 |
+
approximately 100 terms will be required, efficiency might be
|
1303 |
+
improved by setting this to [100, 10]. Then the first
|
1304 |
+
extrapolation will be performed after 100 terms, the second
|
1305 |
+
after 110, etc.
|
1306 |
+
|
1307 |
+
*verbose*
|
1308 |
+
Print details about progress.
|
1309 |
+
|
1310 |
+
*ignore*
|
1311 |
+
If enabled, any term that raises ``ArithmeticError``
|
1312 |
+
or ``ValueError`` (e.g. through division by zero) is replaced
|
1313 |
+
by a zero. This is convenient for lattice sums with
|
1314 |
+
a singular term near the origin.
|
1315 |
+
|
1316 |
+
**Methods**
|
1317 |
+
|
1318 |
+
Unfortunately, an algorithm that can efficiently sum any infinite
|
1319 |
+
series does not exist. :func:`~mpmath.nsum` implements several different
|
1320 |
+
algorithms that each work well in different cases. The *method*
|
1321 |
+
keyword argument selects a method.
|
1322 |
+
|
1323 |
+
The default method is ``'r+s'``, i.e. both Richardson extrapolation
|
1324 |
+
and Shanks transformation is attempted. A slower method that
|
1325 |
+
handles more cases is ``'r+s+e'``. For very high precision
|
1326 |
+
summation, or if the summation needs to be fast (for example if
|
1327 |
+
multiple sums need to be evaluated), it is a good idea to
|
1328 |
+
investigate which one method works best and only use that.
|
1329 |
+
|
1330 |
+
``'richardson'`` / ``'r'``:
|
1331 |
+
Uses Richardson extrapolation. Provides useful extrapolation
|
1332 |
+
when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
|
1333 |
+
for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
|
1334 |
+
additional information.
|
1335 |
+
|
1336 |
+
``'shanks'`` / ``'s'``:
|
1337 |
+
Uses Shanks transformation. Typically provides useful
|
1338 |
+
extrapolation when `f(k) \sim c^k` or when successive terms
|
1339 |
+
alternate signs. Is able to sum some divergent series.
|
1340 |
+
See :func:`~mpmath.shanks` for additional information.
|
1341 |
+
|
1342 |
+
``'levin'`` / ``'l'``:
|
1343 |
+
Uses the Levin transformation. It performs better than the Shanks
|
1344 |
+
transformation for logarithmic convergent or alternating divergent
|
1345 |
+
series. The ``'levin_variant'``-keyword selects the variant. Valid
|
1346 |
+
choices are "u", "t", "v" and "all" whereby "all" uses all three
|
1347 |
+
u,t and v simultanously (This is good for performance comparison in
|
1348 |
+
conjunction with "verbose=True"). Instead of the Levin transform one can
|
1349 |
+
also use the Sidi-S transform by selecting the method ``'sidi'``.
|
1350 |
+
See :func:`~mpmath.levin` for additional details.
|
1351 |
+
|
1352 |
+
``'alternating'`` / ``'a'``:
|
1353 |
+
This is the convergence acceleration of alternating series developped
|
1354 |
+
by Cohen, Villegras and Zagier.
|
1355 |
+
See :func:`~mpmath.cohen_alt` for additional details.
|
1356 |
+
|
1357 |
+
``'euler-maclaurin'`` / ``'e'``:
|
1358 |
+
Uses the Euler-Maclaurin summation formula to approximate
|
1359 |
+
the remainder sum by an integral. This requires high-order
|
1360 |
+
numerical derivatives and numerical integration. The advantage
|
1361 |
+
of this algorithm is that it works regardless of the
|
1362 |
+
decay rate of `f`, as long as `f` is sufficiently smooth.
|
1363 |
+
See :func:`~mpmath.sumem` for additional information.
|
1364 |
+
|
1365 |
+
``'direct'`` / ``'d'``:
|
1366 |
+
Does not perform any extrapolation. This can be used
|
1367 |
+
(and should only be used for) rapidly convergent series.
|
1368 |
+
The summation automatically stops when the terms
|
1369 |
+
decrease below the target tolerance.
|
1370 |
+
|
1371 |
+
**Basic examples**
|
1372 |
+
|
1373 |
+
A finite sum::
|
1374 |
+
|
1375 |
+
>>> nsum(lambda k: 1/k, [1, 6])
|
1376 |
+
2.45
|
1377 |
+
|
1378 |
+
Summation of a series going to negative infinity and a doubly
|
1379 |
+
infinite series::
|
1380 |
+
|
1381 |
+
>>> nsum(lambda k: 1/k**2, [-inf, -1])
|
1382 |
+
1.64493406684823
|
1383 |
+
>>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
|
1384 |
+
3.15334809493716
|
1385 |
+
|
1386 |
+
:func:`~mpmath.nsum` handles sums of complex numbers::
|
1387 |
+
|
1388 |
+
>>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
|
1389 |
+
(1.6 + 0.8j)
|
1390 |
+
|
1391 |
+
The following sum converges very rapidly, so it is most
|
1392 |
+
efficient to sum it by disabling convergence acceleration::
|
1393 |
+
|
1394 |
+
>>> mp.dps = 1000
|
1395 |
+
>>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
|
1396 |
+
... method='direct')
|
1397 |
+
>>> b = (cos(1)+sin(1))/4
|
1398 |
+
>>> abs(a-b) < mpf('1e-998')
|
1399 |
+
True
|
1400 |
+
|
1401 |
+
**Examples with Richardson extrapolation**
|
1402 |
+
|
1403 |
+
Richardson extrapolation works well for sums over rational
|
1404 |
+
functions, as well as their alternating counterparts::
|
1405 |
+
|
1406 |
+
>>> mp.dps = 50
|
1407 |
+
>>> nsum(lambda k: 1 / k**3, [1, inf],
|
1408 |
+
... method='richardson')
|
1409 |
+
1.2020569031595942853997381615114499907649862923405
|
1410 |
+
>>> zeta(3)
|
1411 |
+
1.2020569031595942853997381615114499907649862923405
|
1412 |
+
|
1413 |
+
>>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
|
1414 |
+
... method='richardson')
|
1415 |
+
2.9348022005446793094172454999380755676568497036204
|
1416 |
+
>>> pi**2/2-2
|
1417 |
+
2.9348022005446793094172454999380755676568497036204
|
1418 |
+
|
1419 |
+
>>> nsum(lambda k: (-1)**k / k**3, [1, inf],
|
1420 |
+
... method='richardson')
|
1421 |
+
-0.90154267736969571404980362113358749307373971925537
|
1422 |
+
>>> -3*zeta(3)/4
|
1423 |
+
-0.90154267736969571404980362113358749307373971925538
|
1424 |
+
|
1425 |
+
**Examples with Shanks transformation**
|
1426 |
+
|
1427 |
+
The Shanks transformation works well for geometric series
|
1428 |
+
and typically provides excellent acceleration for Taylor
|
1429 |
+
series near the border of their disk of convergence.
|
1430 |
+
Here we apply it to a series for `\log(2)`, which can be
|
1431 |
+
seen as the Taylor series for `\log(1+x)` with `x = 1`::
|
1432 |
+
|
1433 |
+
>>> nsum(lambda k: -(-1)**k/k, [1, inf],
|
1434 |
+
... method='shanks')
|
1435 |
+
0.69314718055994530941723212145817656807550013436025
|
1436 |
+
>>> log(2)
|
1437 |
+
0.69314718055994530941723212145817656807550013436025
|
1438 |
+
|
1439 |
+
Here we apply it to a slowly convergent geometric series::
|
1440 |
+
|
1441 |
+
>>> nsum(lambda k: mpf('0.995')**k, [0, inf],
|
1442 |
+
... method='shanks')
|
1443 |
+
200.0
|
1444 |
+
|
1445 |
+
Finally, Shanks' method works very well for alternating series
|
1446 |
+
where `f(k) = (-1)^k g(k)`, and often does so regardless of
|
1447 |
+
the exact decay rate of `g(k)`::
|
1448 |
+
|
1449 |
+
>>> mp.dps = 15
|
1450 |
+
>>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
|
1451 |
+
... method='shanks')
|
1452 |
+
0.765147024625408
|
1453 |
+
>>> (2-sqrt(2))*zeta(1.5)/2
|
1454 |
+
0.765147024625408
|
1455 |
+
|
1456 |
+
The following slowly convergent alternating series has no known
|
1457 |
+
closed-form value. Evaluating the sum a second time at higher
|
1458 |
+
precision indicates that the value is probably correct::
|
1459 |
+
|
1460 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
1461 |
+
... method='shanks')
|
1462 |
+
0.924299897222939
|
1463 |
+
>>> mp.dps = 30
|
1464 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
1465 |
+
... method='shanks')
|
1466 |
+
0.92429989722293885595957018136
|
1467 |
+
|
1468 |
+
**Examples with Levin transformation**
|
1469 |
+
|
1470 |
+
The following example calculates Euler's constant as the constant term in
|
1471 |
+
the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
|
1472 |
+
because of the logarithmic convergence behaviour of the Dirichlet series
|
1473 |
+
for zeta.
|
1474 |
+
|
1475 |
+
>>> mp.dps = 30
|
1476 |
+
>>> z = mp.mpf(10) ** (-10)
|
1477 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
|
1478 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
1479 |
+
0.0
|
1480 |
+
|
1481 |
+
Now we sum the zeta function outside its range of convergence
|
1482 |
+
(attention: This does not work at the negative integers!):
|
1483 |
+
|
1484 |
+
>>> mp.dps = 15
|
1485 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
1486 |
+
>>> print(mp.chop(w - mp.zeta(-2-3j)))
|
1487 |
+
0.0
|
1488 |
+
|
1489 |
+
The next example resummates an asymptotic series expansion of an integral
|
1490 |
+
related to the exponential integral.
|
1491 |
+
|
1492 |
+
>>> mp.dps = 15
|
1493 |
+
>>> z = mp.mpf(10)
|
1494 |
+
>>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
1495 |
+
>>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
1496 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
1497 |
+
>>> print(mp.chop(w - exact))
|
1498 |
+
0.0
|
1499 |
+
|
1500 |
+
Following highly divergent asymptotic expansion needs some care. Firstly we
|
1501 |
+
need copious amount of working precision. Secondly the stepsize must not be
|
1502 |
+
chosen to large, otherwise nsum may miss the point where the Levin transform
|
1503 |
+
converges and reach the point where only numerical garbage is produced due to
|
1504 |
+
numerical cancellation.
|
1505 |
+
|
1506 |
+
>>> mp.dps = 15
|
1507 |
+
>>> z = mp.mpf(2)
|
1508 |
+
>>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
1509 |
+
>>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
1510 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
1511 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
1512 |
+
>>> print(mp.chop(w - exact))
|
1513 |
+
0.0
|
1514 |
+
|
1515 |
+
The hypergeoemtric function can also be summed outside its range of convergence:
|
1516 |
+
|
1517 |
+
>>> mp.dps = 15
|
1518 |
+
>>> z = 2 + 1j
|
1519 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
1520 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
1521 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
1522 |
+
>>> print(mp.chop(exact-v))
|
1523 |
+
0.0
|
1524 |
+
|
1525 |
+
**Examples with Cohen's alternating series resummation**
|
1526 |
+
|
1527 |
+
The next example sums the alternating zeta function:
|
1528 |
+
|
1529 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
1530 |
+
>>> print(mp.chop(v - mp.log(2)))
|
1531 |
+
0.0
|
1532 |
+
|
1533 |
+
The derivate of the alternating zeta function outside its range of
|
1534 |
+
convergence:
|
1535 |
+
|
1536 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
1537 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
1538 |
+
0.0
|
1539 |
+
|
1540 |
+
**Examples with Euler-Maclaurin summation**
|
1541 |
+
|
1542 |
+
The sum in the following example has the wrong rate of convergence
|
1543 |
+
for either Richardson or Shanks to be effective.
|
1544 |
+
|
1545 |
+
>>> f = lambda k: log(k)/k**2.5
|
1546 |
+
>>> mp.dps = 15
|
1547 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin')
|
1548 |
+
0.38734195032621
|
1549 |
+
>>> -diff(zeta, 2.5)
|
1550 |
+
0.38734195032621
|
1551 |
+
|
1552 |
+
Increasing ``steps`` improves speed at higher precision::
|
1553 |
+
|
1554 |
+
>>> mp.dps = 50
|
1555 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
|
1556 |
+
0.38734195032620997271199237593105101319948228874688
|
1557 |
+
>>> -diff(zeta, 2.5)
|
1558 |
+
0.38734195032620997271199237593105101319948228874688
|
1559 |
+
|
1560 |
+
**Divergent series**
|
1561 |
+
|
1562 |
+
The Shanks transformation is able to sum some *divergent*
|
1563 |
+
series. In particular, it is often able to sum Taylor series
|
1564 |
+
beyond their radius of convergence (this is due to a relation
|
1565 |
+
between the Shanks transformation and Pade approximations;
|
1566 |
+
see :func:`~mpmath.pade` for an alternative way to evaluate divergent
|
1567 |
+
Taylor series). Furthermore the Levin-transform examples above
|
1568 |
+
contain some divergent series resummation.
|
1569 |
+
|
1570 |
+
Here we apply it to `\log(1+x)` far outside the region of
|
1571 |
+
convergence::
|
1572 |
+
|
1573 |
+
>>> mp.dps = 50
|
1574 |
+
>>> nsum(lambda k: -(-9)**k/k, [1, inf],
|
1575 |
+
... method='shanks')
|
1576 |
+
2.3025850929940456840179914546843642076011014886288
|
1577 |
+
>>> log(10)
|
1578 |
+
2.3025850929940456840179914546843642076011014886288
|
1579 |
+
|
1580 |
+
A particular type of divergent series that can be summed
|
1581 |
+
using the Shanks transformation is geometric series.
|
1582 |
+
The result is the same as using the closed-form formula
|
1583 |
+
for an infinite geometric series::
|
1584 |
+
|
1585 |
+
>>> mp.dps = 15
|
1586 |
+
>>> for n in range(-8, 8):
|
1587 |
+
... if n == 1:
|
1588 |
+
... continue
|
1589 |
+
... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
|
1590 |
+
... nsum(lambda k: n**k, [0, inf], method='shanks')))
|
1591 |
+
...
|
1592 |
+
-8.0 0.111111111111111 0.111111111111111
|
1593 |
+
-7.0 0.125 0.125
|
1594 |
+
-6.0 0.142857142857143 0.142857142857143
|
1595 |
+
-5.0 0.166666666666667 0.166666666666667
|
1596 |
+
-4.0 0.2 0.2
|
1597 |
+
-3.0 0.25 0.25
|
1598 |
+
-2.0 0.333333333333333 0.333333333333333
|
1599 |
+
-1.0 0.5 0.5
|
1600 |
+
0.0 1.0 1.0
|
1601 |
+
2.0 -1.0 -1.0
|
1602 |
+
3.0 -0.5 -0.5
|
1603 |
+
4.0 -0.333333333333333 -0.333333333333333
|
1604 |
+
5.0 -0.25 -0.25
|
1605 |
+
6.0 -0.2 -0.2
|
1606 |
+
7.0 -0.166666666666667 -0.166666666666667
|
1607 |
+
|
1608 |
+
**Multidimensional sums**
|
1609 |
+
|
1610 |
+
Any combination of finite and infinite ranges is allowed for the
|
1611 |
+
summation indices::
|
1612 |
+
|
1613 |
+
>>> mp.dps = 15
|
1614 |
+
>>> nsum(lambda x,y: x+y, [2,3], [4,5])
|
1615 |
+
28.0
|
1616 |
+
>>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
|
1617 |
+
6.0
|
1618 |
+
>>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
|
1619 |
+
6.0
|
1620 |
+
>>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
|
1621 |
+
7.0
|
1622 |
+
>>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
|
1623 |
+
7.0
|
1624 |
+
>>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
|
1625 |
+
7.0
|
1626 |
+
|
1627 |
+
Some nice examples of double series with analytic solutions or
|
1628 |
+
reductions to single-dimensional series (see [1])::
|
1629 |
+
|
1630 |
+
>>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
|
1631 |
+
1.60669515241529
|
1632 |
+
>>> nsum(lambda n: 1/(2**n-1), [1,inf])
|
1633 |
+
1.60669515241529
|
1634 |
+
|
1635 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
|
1636 |
+
0.278070510848213
|
1637 |
+
>>> pi*(pi-3*ln2)/12
|
1638 |
+
0.278070510848213
|
1639 |
+
|
1640 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
|
1641 |
+
0.129319852864168
|
1642 |
+
>>> altzeta(2) - altzeta(1)
|
1643 |
+
0.129319852864168
|
1644 |
+
|
1645 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
|
1646 |
+
0.0790756439455825
|
1647 |
+
>>> altzeta(3) - altzeta(2)
|
1648 |
+
0.0790756439455825
|
1649 |
+
|
1650 |
+
>>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
|
1651 |
+
... [1,inf], [1,inf])
|
1652 |
+
0.28125
|
1653 |
+
>>> mpf(9)/32
|
1654 |
+
0.28125
|
1655 |
+
|
1656 |
+
>>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
|
1657 |
+
... [1,inf], [1,inf], workprec=400)
|
1658 |
+
1.64493406684823
|
1659 |
+
>>> zeta(2)
|
1660 |
+
1.64493406684823
|
1661 |
+
|
1662 |
+
A hard example of a multidimensional sum is the Madelung constant
|
1663 |
+
in three dimensions (see [2]). The defining sum converges very
|
1664 |
+
slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
|
1665 |
+
obtain an accurate value through convergence acceleration. The
|
1666 |
+
second evaluation below uses a much more efficient, rapidly
|
1667 |
+
convergent 2D sum::
|
1668 |
+
|
1669 |
+
>>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
|
1670 |
+
... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
|
1671 |
+
-1.74756459463318
|
1672 |
+
>>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
|
1673 |
+
... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
|
1674 |
+
-1.74756459463318
|
1675 |
+
|
1676 |
+
Another example of a lattice sum in 2D::
|
1677 |
+
|
1678 |
+
>>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
|
1679 |
+
... [-inf,inf], ignore=True)
|
1680 |
+
-2.1775860903036
|
1681 |
+
>>> -pi*ln2
|
1682 |
+
-2.1775860903036
|
1683 |
+
|
1684 |
+
An example of an Eisenstein series::
|
1685 |
+
|
1686 |
+
>>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
|
1687 |
+
... ignore=True)
|
1688 |
+
(3.1512120021539 + 0.0j)
|
1689 |
+
|
1690 |
+
**References**
|
1691 |
+
|
1692 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
|
1693 |
+
2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
|
1694 |
+
|
1695 |
+
"""
|
1696 |
+
infinite, g = standardize(ctx, f, intervals, options)
|
1697 |
+
if not infinite:
|
1698 |
+
return +g()
|
1699 |
+
|
1700 |
+
def update(partial_sums, indices):
|
1701 |
+
if partial_sums:
|
1702 |
+
psum = partial_sums[-1]
|
1703 |
+
else:
|
1704 |
+
psum = ctx.zero
|
1705 |
+
for k in indices:
|
1706 |
+
psum = psum + g(ctx.mpf(k))
|
1707 |
+
partial_sums.append(psum)
|
1708 |
+
|
1709 |
+
prec = ctx.prec
|
1710 |
+
|
1711 |
+
def emfun(point, tol):
|
1712 |
+
workprec = ctx.prec
|
1713 |
+
ctx.prec = prec + 10
|
1714 |
+
v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
|
1715 |
+
ctx.prec = workprec
|
1716 |
+
return v
|
1717 |
+
|
1718 |
+
return +ctx.adaptive_extrapolation(update, emfun, options)
|
1719 |
+
|
1720 |
+
|
1721 |
+
def wrapsafe(f):
|
1722 |
+
def g(*args):
|
1723 |
+
try:
|
1724 |
+
return f(*args)
|
1725 |
+
except (ArithmeticError, ValueError):
|
1726 |
+
return 0
|
1727 |
+
return g
|
1728 |
+
|
1729 |
+
def standardize(ctx, f, intervals, options):
|
1730 |
+
if options.get("ignore"):
|
1731 |
+
f = wrapsafe(f)
|
1732 |
+
finite = []
|
1733 |
+
infinite = []
|
1734 |
+
for k, points in enumerate(intervals):
|
1735 |
+
a, b = ctx._as_points(points)
|
1736 |
+
if b < a:
|
1737 |
+
return False, (lambda: ctx.zero)
|
1738 |
+
if a == ctx.ninf or b == ctx.inf:
|
1739 |
+
infinite.append((k, (a,b)))
|
1740 |
+
else:
|
1741 |
+
finite.append((k, (int(a), int(b))))
|
1742 |
+
if finite:
|
1743 |
+
f = fold_finite(ctx, f, finite)
|
1744 |
+
if not infinite:
|
1745 |
+
return False, lambda: f(*([0]*len(intervals)))
|
1746 |
+
if infinite:
|
1747 |
+
f = standardize_infinite(ctx, f, infinite)
|
1748 |
+
f = fold_infinite(ctx, f, infinite)
|
1749 |
+
args = [0] * len(intervals)
|
1750 |
+
d = infinite[0][0]
|
1751 |
+
def g(k):
|
1752 |
+
args[d] = k
|
1753 |
+
return f(*args)
|
1754 |
+
return True, g
|
1755 |
+
|
1756 |
+
# backwards compatible itertools.product
|
1757 |
+
def cartesian_product(args):
|
1758 |
+
pools = map(tuple, args)
|
1759 |
+
result = [[]]
|
1760 |
+
for pool in pools:
|
1761 |
+
result = [x+[y] for x in result for y in pool]
|
1762 |
+
for prod in result:
|
1763 |
+
yield tuple(prod)
|
1764 |
+
|
1765 |
+
def fold_finite(ctx, f, intervals):
|
1766 |
+
if not intervals:
|
1767 |
+
return f
|
1768 |
+
indices = [v[0] for v in intervals]
|
1769 |
+
points = [v[1] for v in intervals]
|
1770 |
+
ranges = [xrange(a, b+1) for (a,b) in points]
|
1771 |
+
def g(*args):
|
1772 |
+
args = list(args)
|
1773 |
+
s = ctx.zero
|
1774 |
+
for xs in cartesian_product(ranges):
|
1775 |
+
for dim, x in zip(indices, xs):
|
1776 |
+
args[dim] = ctx.mpf(x)
|
1777 |
+
s += f(*args)
|
1778 |
+
return s
|
1779 |
+
#print "Folded finite", indices
|
1780 |
+
return g
|
1781 |
+
|
1782 |
+
# Standardize each interval to [0,inf]
|
1783 |
+
def standardize_infinite(ctx, f, intervals):
|
1784 |
+
if not intervals:
|
1785 |
+
return f
|
1786 |
+
dim, [a,b] = intervals[-1]
|
1787 |
+
if a == ctx.ninf:
|
1788 |
+
if b == ctx.inf:
|
1789 |
+
def g(*args):
|
1790 |
+
args = list(args)
|
1791 |
+
k = args[dim]
|
1792 |
+
if k:
|
1793 |
+
s = f(*args)
|
1794 |
+
args[dim] = -k
|
1795 |
+
s += f(*args)
|
1796 |
+
return s
|
1797 |
+
else:
|
1798 |
+
return f(*args)
|
1799 |
+
else:
|
1800 |
+
def g(*args):
|
1801 |
+
args = list(args)
|
1802 |
+
args[dim] = b - args[dim]
|
1803 |
+
return f(*args)
|
1804 |
+
else:
|
1805 |
+
def g(*args):
|
1806 |
+
args = list(args)
|
1807 |
+
args[dim] += a
|
1808 |
+
return f(*args)
|
1809 |
+
#print "Standardized infinity along dimension", dim, a, b
|
1810 |
+
return standardize_infinite(ctx, g, intervals[:-1])
|
1811 |
+
|
1812 |
+
def fold_infinite(ctx, f, intervals):
|
1813 |
+
if len(intervals) < 2:
|
1814 |
+
return f
|
1815 |
+
dim1 = intervals[-2][0]
|
1816 |
+
dim2 = intervals[-1][0]
|
1817 |
+
# Assume intervals are [0,inf] x [0,inf] x ...
|
1818 |
+
def g(*args):
|
1819 |
+
args = list(args)
|
1820 |
+
#args.insert(dim2, None)
|
1821 |
+
n = int(args[dim1])
|
1822 |
+
s = ctx.zero
|
1823 |
+
#y = ctx.mpf(n)
|
1824 |
+
args[dim2] = ctx.mpf(n) #y
|
1825 |
+
for x in xrange(n+1):
|
1826 |
+
args[dim1] = ctx.mpf(x)
|
1827 |
+
s += f(*args)
|
1828 |
+
args[dim1] = ctx.mpf(n) #ctx.mpf(n)
|
1829 |
+
for y in xrange(n):
|
1830 |
+
args[dim2] = ctx.mpf(y)
|
1831 |
+
s += f(*args)
|
1832 |
+
return s
|
1833 |
+
#print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
|
1834 |
+
return fold_infinite(ctx, g, intervals[:-1])
|
1835 |
+
|
1836 |
+
@defun
|
1837 |
+
def nprod(ctx, f, interval, nsum=False, **kwargs):
|
1838 |
+
r"""
|
1839 |
+
Computes the product
|
1840 |
+
|
1841 |
+
.. math ::
|
1842 |
+
|
1843 |
+
P = \prod_{k=a}^b f(k)
|
1844 |
+
|
1845 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
1846 |
+
`b = \infty` are allowed.
|
1847 |
+
|
1848 |
+
By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
|
1849 |
+
:func:`~mpmath.nsum`, except applied to the partial products rather than
|
1850 |
+
partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
|
1851 |
+
supported. If ``nsum=True``, the product is instead computed via
|
1852 |
+
:func:`~mpmath.nsum` as
|
1853 |
+
|
1854 |
+
.. math ::
|
1855 |
+
|
1856 |
+
P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
|
1857 |
+
|
1858 |
+
This is slower, but can sometimes yield better results. It is
|
1859 |
+
also required (and used automatically) when Euler-Maclaurin
|
1860 |
+
summation is requested.
|
1861 |
+
|
1862 |
+
**Examples**
|
1863 |
+
|
1864 |
+
A simple finite product::
|
1865 |
+
|
1866 |
+
>>> from mpmath import *
|
1867 |
+
>>> mp.dps = 25; mp.pretty = True
|
1868 |
+
>>> nprod(lambda k: k, [1, 4])
|
1869 |
+
24.0
|
1870 |
+
|
1871 |
+
A large number of infinite products have known exact values,
|
1872 |
+
and can therefore be used as a reference. Most of the following
|
1873 |
+
examples are taken from MathWorld [1].
|
1874 |
+
|
1875 |
+
A few infinite products with simple values are::
|
1876 |
+
|
1877 |
+
>>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
|
1878 |
+
3.141592653589793238462643
|
1879 |
+
>>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
|
1880 |
+
2.0
|
1881 |
+
>>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
|
1882 |
+
0.6666666666666666666666667
|
1883 |
+
>>> nprod(lambda k: (1-1/k**2), [2, inf])
|
1884 |
+
0.5
|
1885 |
+
|
1886 |
+
Next, several more infinite products with more complicated
|
1887 |
+
values::
|
1888 |
+
|
1889 |
+
>>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
|
1890 |
+
5.180668317897115748416626
|
1891 |
+
5.180668317897115748416626
|
1892 |
+
|
1893 |
+
>>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
|
1894 |
+
0.2720290549821331629502366
|
1895 |
+
0.2720290549821331629502366
|
1896 |
+
|
1897 |
+
>>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
|
1898 |
+
0.8480540493529003921296502
|
1899 |
+
>>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
|
1900 |
+
0.8480540493529003921296502
|
1901 |
+
|
1902 |
+
>>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
|
1903 |
+
1.848936182858244485224927
|
1904 |
+
>>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
|
1905 |
+
1.848936182858244485224927
|
1906 |
+
|
1907 |
+
>>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
|
1908 |
+
0.9190194775937444301739244
|
1909 |
+
0.9190194775937444301739244
|
1910 |
+
|
1911 |
+
>>> nprod(lambda k: (1-1/k**6), [2, inf])
|
1912 |
+
0.9826842777421925183244759
|
1913 |
+
>>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
|
1914 |
+
0.9826842777421925183244759
|
1915 |
+
|
1916 |
+
>>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
|
1917 |
+
1.838038955187488860347849
|
1918 |
+
1.838038955187488860347849
|
1919 |
+
|
1920 |
+
>>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
|
1921 |
+
1.447255926890365298959138
|
1922 |
+
>>> exp(1+euler/2)/sqrt(2*pi)
|
1923 |
+
1.447255926890365298959138
|
1924 |
+
|
1925 |
+
The following two products are equivalent and can be evaluated in
|
1926 |
+
terms of a Jacobi theta function. Pi can be replaced by any value
|
1927 |
+
(as long as convergence is preserved)::
|
1928 |
+
|
1929 |
+
>>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
|
1930 |
+
0.3838451207481672404778686
|
1931 |
+
>>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
|
1932 |
+
0.3838451207481672404778686
|
1933 |
+
>>> jtheta(4,0,1/pi)
|
1934 |
+
0.3838451207481672404778686
|
1935 |
+
|
1936 |
+
This product does not have a known closed form value::
|
1937 |
+
|
1938 |
+
>>> nprod(lambda k: (1-1/2**k), [1, inf])
|
1939 |
+
0.2887880950866024212788997
|
1940 |
+
|
1941 |
+
A product taken from `-\infty`::
|
1942 |
+
|
1943 |
+
>>> nprod(lambda k: 1-k**(-3), [-inf,-2])
|
1944 |
+
0.8093965973662901095786805
|
1945 |
+
>>> cosh(pi*sqrt(3)/2)/(3*pi)
|
1946 |
+
0.8093965973662901095786805
|
1947 |
+
|
1948 |
+
A doubly infinite product::
|
1949 |
+
|
1950 |
+
>>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
|
1951 |
+
23.41432688231864337420035
|
1952 |
+
>>> exp(pi/tanh(pi))
|
1953 |
+
23.41432688231864337420035
|
1954 |
+
|
1955 |
+
A product requiring the use of Euler-Maclaurin summation to compute
|
1956 |
+
an accurate value::
|
1957 |
+
|
1958 |
+
>>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
|
1959 |
+
0.696155111336231052898125
|
1960 |
+
|
1961 |
+
**References**
|
1962 |
+
|
1963 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
|
1964 |
+
|
1965 |
+
"""
|
1966 |
+
if nsum or ('e' in kwargs.get('method', '')):
|
1967 |
+
orig = ctx.prec
|
1968 |
+
try:
|
1969 |
+
# TODO: we are evaluating log(1+eps) -> eps, which is
|
1970 |
+
# inaccurate. This currently works because nsum greatly
|
1971 |
+
# increases the working precision. But we should be
|
1972 |
+
# more intelligent and handle the precision here.
|
1973 |
+
ctx.prec += 10
|
1974 |
+
v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
|
1975 |
+
finally:
|
1976 |
+
ctx.prec = orig
|
1977 |
+
return +ctx.exp(v)
|
1978 |
+
|
1979 |
+
a, b = ctx._as_points(interval)
|
1980 |
+
if a == ctx.ninf:
|
1981 |
+
if b == ctx.inf:
|
1982 |
+
return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
|
1983 |
+
return ctx.nprod(f, [-b, ctx.inf], **kwargs)
|
1984 |
+
elif b != ctx.inf:
|
1985 |
+
return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
|
1986 |
+
|
1987 |
+
a = int(a)
|
1988 |
+
|
1989 |
+
def update(partial_products, indices):
|
1990 |
+
if partial_products:
|
1991 |
+
pprod = partial_products[-1]
|
1992 |
+
else:
|
1993 |
+
pprod = ctx.one
|
1994 |
+
for k in indices:
|
1995 |
+
pprod = pprod * f(a + ctx.mpf(k))
|
1996 |
+
partial_products.append(pprod)
|
1997 |
+
|
1998 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
1999 |
+
|
2000 |
+
|
2001 |
+
@defun
|
2002 |
+
def limit(ctx, f, x, direction=1, exp=False, **kwargs):
|
2003 |
+
r"""
|
2004 |
+
Computes an estimate of the limit
|
2005 |
+
|
2006 |
+
.. math ::
|
2007 |
+
|
2008 |
+
\lim_{t \to x} f(t)
|
2009 |
+
|
2010 |
+
where `x` may be finite or infinite.
|
2011 |
+
|
2012 |
+
For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
|
2013 |
+
consecutive integer values of `n`, where the approach direction
|
2014 |
+
`d` may be specified using the *direction* keyword argument.
|
2015 |
+
For infinite `x`, :func:`~mpmath.limit` evaluates values of
|
2016 |
+
`f(\mathrm{sign}(x) \cdot n)`.
|
2017 |
+
|
2018 |
+
If the approach to the limit is not sufficiently fast to give
|
2019 |
+
an accurate estimate directly, :func:`~mpmath.limit` attempts to find
|
2020 |
+
the limit using Richardson extrapolation or the Shanks
|
2021 |
+
transformation. You can select between these methods using
|
2022 |
+
the *method* keyword (see documentation of :func:`~mpmath.nsum` for
|
2023 |
+
more information).
|
2024 |
+
|
2025 |
+
**Options**
|
2026 |
+
|
2027 |
+
The following options are available with essentially the
|
2028 |
+
same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
|
2029 |
+
*steps*, *verbose*.
|
2030 |
+
|
2031 |
+
If the option *exp=True* is set, `f` will be
|
2032 |
+
sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
|
2033 |
+
instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
|
2034 |
+
This can sometimes improve the rate of convergence so that
|
2035 |
+
:func:`~mpmath.limit` may return a more accurate answer (and faster).
|
2036 |
+
However, do note that this can only be used if `f`
|
2037 |
+
supports fast and accurate evaluation for arguments that
|
2038 |
+
are extremely close to the limit point (or if infinite,
|
2039 |
+
very large arguments).
|
2040 |
+
|
2041 |
+
**Examples**
|
2042 |
+
|
2043 |
+
A basic evaluation of a removable singularity::
|
2044 |
+
|
2045 |
+
>>> from mpmath import *
|
2046 |
+
>>> mp.dps = 30; mp.pretty = True
|
2047 |
+
>>> limit(lambda x: (x-sin(x))/x**3, 0)
|
2048 |
+
0.166666666666666666666666666667
|
2049 |
+
|
2050 |
+
Computing the exponential function using its limit definition::
|
2051 |
+
|
2052 |
+
>>> limit(lambda n: (1+3/n)**n, inf)
|
2053 |
+
20.0855369231876677409285296546
|
2054 |
+
>>> exp(3)
|
2055 |
+
20.0855369231876677409285296546
|
2056 |
+
|
2057 |
+
A limit for `\pi`::
|
2058 |
+
|
2059 |
+
>>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
|
2060 |
+
>>> limit(f, inf)
|
2061 |
+
3.14159265358979323846264338328
|
2062 |
+
|
2063 |
+
Calculating the coefficient in Stirling's formula::
|
2064 |
+
|
2065 |
+
>>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
|
2066 |
+
2.50662827463100050241576528481
|
2067 |
+
>>> sqrt(2*pi)
|
2068 |
+
2.50662827463100050241576528481
|
2069 |
+
|
2070 |
+
Evaluating Euler's constant `\gamma` using the limit representation
|
2071 |
+
|
2072 |
+
.. math ::
|
2073 |
+
|
2074 |
+
\gamma = \lim_{n \rightarrow \infty } \left[ \left(
|
2075 |
+
\sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
|
2076 |
+
|
2077 |
+
(which converges notoriously slowly)::
|
2078 |
+
|
2079 |
+
>>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
|
2080 |
+
>>> limit(f, inf)
|
2081 |
+
0.577215664901532860606512090082
|
2082 |
+
>>> +euler
|
2083 |
+
0.577215664901532860606512090082
|
2084 |
+
|
2085 |
+
With default settings, the following limit converges too slowly
|
2086 |
+
to be evaluated accurately. Changing to exponential sampling
|
2087 |
+
however gives a perfect result::
|
2088 |
+
|
2089 |
+
>>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
|
2090 |
+
>>> limit(f, inf)
|
2091 |
+
0.992831158558330281129249686491
|
2092 |
+
>>> limit(f, inf, exp=True)
|
2093 |
+
1.0
|
2094 |
+
|
2095 |
+
"""
|
2096 |
+
|
2097 |
+
if ctx.isinf(x):
|
2098 |
+
direction = ctx.sign(x)
|
2099 |
+
g = lambda k: f(ctx.mpf(k+1)*direction)
|
2100 |
+
else:
|
2101 |
+
direction *= ctx.one
|
2102 |
+
g = lambda k: f(x + direction/(k+1))
|
2103 |
+
if exp:
|
2104 |
+
h = g
|
2105 |
+
g = lambda k: h(2**k)
|
2106 |
+
|
2107 |
+
def update(values, indices):
|
2108 |
+
for k in indices:
|
2109 |
+
values.append(g(k+1))
|
2110 |
+
|
2111 |
+
# XXX: steps used by nsum don't work well
|
2112 |
+
if not 'steps' in kwargs:
|
2113 |
+
kwargs['steps'] = [10]
|
2114 |
+
|
2115 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py
ADDED
@@ -0,0 +1,973 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# contributed to mpmath by Kristopher L. Kuhlman, February 2017
|
2 |
+
# contributed to mpmath by Guillermo Navas-Palencia, February 2022
|
3 |
+
|
4 |
+
class InverseLaplaceTransform(object):
|
5 |
+
r"""
|
6 |
+
Inverse Laplace transform methods are implemented using this
|
7 |
+
class, in order to simplify the code and provide a common
|
8 |
+
infrastructure.
|
9 |
+
|
10 |
+
Implement a custom inverse Laplace transform algorithm by
|
11 |
+
subclassing :class:`InverseLaplaceTransform` and implementing the
|
12 |
+
appropriate methods. The subclass can then be used by
|
13 |
+
:func:`~mpmath.invertlaplace` by passing it as the *method*
|
14 |
+
argument.
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(self, ctx):
|
18 |
+
self.ctx = ctx
|
19 |
+
|
20 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
21 |
+
r"""
|
22 |
+
Determine the vector of Laplace parameter values needed for an
|
23 |
+
algorithm, this will depend on the choice of algorithm (de
|
24 |
+
Hoog is default), the algorithm-specific parameters passed (or
|
25 |
+
default ones), and desired time.
|
26 |
+
"""
|
27 |
+
raise NotImplementedError
|
28 |
+
|
29 |
+
def calc_time_domain_solution(self, fp):
|
30 |
+
r"""
|
31 |
+
Compute the time domain solution, after computing the
|
32 |
+
Laplace-space function evaluations at the abscissa required
|
33 |
+
for the algorithm. Abscissa computed for one algorithm are
|
34 |
+
typically not useful for another algorithm.
|
35 |
+
"""
|
36 |
+
raise NotImplementedError
|
37 |
+
|
38 |
+
|
39 |
+
class FixedTalbot(InverseLaplaceTransform):
|
40 |
+
|
41 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
42 |
+
r"""The "fixed" Talbot method deforms the Bromwich contour towards
|
43 |
+
`-\infty` in the shape of a parabola. Traditionally the Talbot
|
44 |
+
algorithm has adjustable parameters, but the "fixed" version
|
45 |
+
does not. The `r` parameter could be passed in as a parameter,
|
46 |
+
if you want to override the default given by (Abate & Valko,
|
47 |
+
2004).
|
48 |
+
|
49 |
+
The Laplace parameter is sampled along a parabola opening
|
50 |
+
along the negative imaginary axis, with the base of the
|
51 |
+
parabola along the real axis at
|
52 |
+
`p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
|
53 |
+
the approximation (degree) grows, the abscissa required for
|
54 |
+
function evaluation tend towards `-\infty`, requiring high
|
55 |
+
precision to prevent overflow. If any poles, branch cuts or
|
56 |
+
other singularities exist such that the deformed Bromwich
|
57 |
+
contour lies to the left of the singularity, the method will
|
58 |
+
fail.
|
59 |
+
|
60 |
+
**Optional arguments**
|
61 |
+
|
62 |
+
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
63 |
+
recognizes the following keywords
|
64 |
+
|
65 |
+
*tmax*
|
66 |
+
maximum time associated with vector of times
|
67 |
+
(typically just the time requested)
|
68 |
+
*degree*
|
69 |
+
integer order of approximation (M = number of terms)
|
70 |
+
*r*
|
71 |
+
abscissa for `p_0` (otherwise computed using rule
|
72 |
+
of thumb `2M/5`)
|
73 |
+
|
74 |
+
The working precision will be increased according to a rule of
|
75 |
+
thumb. If 'degree' is not specified, the working precision and
|
76 |
+
degree are chosen to hopefully achieve the dps of the calling
|
77 |
+
context. If 'degree' is specified, the working precision is
|
78 |
+
chosen to achieve maximum resulting precision for the
|
79 |
+
specified degree.
|
80 |
+
|
81 |
+
.. math ::
|
82 |
+
|
83 |
+
p_0=\frac{r}{t}
|
84 |
+
|
85 |
+
.. math ::
|
86 |
+
|
87 |
+
p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
|
88 |
+
\frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
|
89 |
+
|
90 |
+
where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
|
91 |
+
maximum specified time.
|
92 |
+
|
93 |
+
"""
|
94 |
+
|
95 |
+
# required
|
96 |
+
# ------------------------------
|
97 |
+
# time of desired approximation
|
98 |
+
self.t = self.ctx.convert(t)
|
99 |
+
|
100 |
+
# optional
|
101 |
+
# ------------------------------
|
102 |
+
# maximum time desired (used for scaling) default is requested
|
103 |
+
# time.
|
104 |
+
self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
|
105 |
+
|
106 |
+
# empirical relationships used here based on a linear fit of
|
107 |
+
# requested and delivered dps for exponentially decaying time
|
108 |
+
# functions for requested dps up to 512.
|
109 |
+
|
110 |
+
if 'degree' in kwargs:
|
111 |
+
self.degree = kwargs['degree']
|
112 |
+
self.dps_goal = self.degree
|
113 |
+
else:
|
114 |
+
self.dps_goal = int(1.72*self.ctx.dps)
|
115 |
+
self.degree = max(12, int(1.38*self.dps_goal))
|
116 |
+
|
117 |
+
M = self.degree
|
118 |
+
|
119 |
+
# this is adjusting the dps of the calling context hopefully
|
120 |
+
# the caller doesn't monkey around with it between calling
|
121 |
+
# this routine and calc_time_domain_solution()
|
122 |
+
self.dps_orig = self.ctx.dps
|
123 |
+
self.ctx.dps = self.dps_goal
|
124 |
+
|
125 |
+
# Abate & Valko rule of thumb for r parameter
|
126 |
+
self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
|
127 |
+
|
128 |
+
self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
|
129 |
+
|
130 |
+
self.cot_theta = self.ctx.matrix(M, 1)
|
131 |
+
self.cot_theta[0] = 0 # not used
|
132 |
+
|
133 |
+
# all but time-dependent part of p
|
134 |
+
self.delta = self.ctx.matrix(M, 1)
|
135 |
+
self.delta[0] = self.r
|
136 |
+
|
137 |
+
for i in range(1, M):
|
138 |
+
self.cot_theta[i] = self.ctx.cot(self.theta[i])
|
139 |
+
self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
|
140 |
+
|
141 |
+
self.p = self.ctx.matrix(M, 1)
|
142 |
+
self.p = self.delta/self.tmax
|
143 |
+
|
144 |
+
# NB: p is complex (mpc)
|
145 |
+
|
146 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
147 |
+
r"""The fixed Talbot time-domain solution is computed from the
|
148 |
+
Laplace-space function evaluations using
|
149 |
+
|
150 |
+
.. math ::
|
151 |
+
|
152 |
+
f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
|
153 |
+
\gamma_k \bar{f}(p_k)\right]
|
154 |
+
|
155 |
+
where
|
156 |
+
|
157 |
+
.. math ::
|
158 |
+
|
159 |
+
\gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
|
160 |
+
|
161 |
+
.. math ::
|
162 |
+
|
163 |
+
\gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
|
164 |
+
\cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
|
165 |
+
\frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
|
166 |
+
|
167 |
+
Again, `j=\sqrt{-1}`.
|
168 |
+
|
169 |
+
Before calling this function, call
|
170 |
+
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
171 |
+
to set the parameters and compute the required coefficients.
|
172 |
+
|
173 |
+
**References**
|
174 |
+
|
175 |
+
1. Abate, J., P. Valko (2004). Multi-precision Laplace
|
176 |
+
transform inversion. *International Journal for Numerical
|
177 |
+
Methods in Engineering* 60:979-993,
|
178 |
+
http://dx.doi.org/10.1002/nme.995
|
179 |
+
2. Talbot, A. (1979). The accurate numerical inversion of
|
180 |
+
Laplace transforms. *IMA Journal of Applied Mathematics*
|
181 |
+
23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
|
182 |
+
"""
|
183 |
+
|
184 |
+
# required
|
185 |
+
# ------------------------------
|
186 |
+
self.t = self.ctx.convert(t)
|
187 |
+
|
188 |
+
# assume fp was computed from p matrix returned from
|
189 |
+
# calc_laplace_parameter(), so is already a list or matrix of
|
190 |
+
# mpmath 'mpc' types
|
191 |
+
|
192 |
+
# these were computed in previous call to
|
193 |
+
# calc_laplace_parameter()
|
194 |
+
theta = self.theta
|
195 |
+
delta = self.delta
|
196 |
+
M = self.degree
|
197 |
+
p = self.p
|
198 |
+
r = self.r
|
199 |
+
|
200 |
+
ans = self.ctx.matrix(M, 1)
|
201 |
+
ans[0] = self.ctx.exp(delta[0])*fp[0]/2
|
202 |
+
|
203 |
+
for i in range(1, M):
|
204 |
+
ans[i] = self.ctx.exp(delta[i])*fp[i]*(
|
205 |
+
1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
|
206 |
+
1j*self.cot_theta[i])
|
207 |
+
|
208 |
+
result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
|
209 |
+
|
210 |
+
# setting dps back to value when calc_laplace_parameter was
|
211 |
+
# called, unless flag is set.
|
212 |
+
if not manual_prec:
|
213 |
+
self.ctx.dps = self.dps_orig
|
214 |
+
|
215 |
+
return result.real
|
216 |
+
|
217 |
+
|
218 |
+
# ****************************************
|
219 |
+
|
220 |
+
class Stehfest(InverseLaplaceTransform):
|
221 |
+
|
222 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
223 |
+
r"""
|
224 |
+
The Gaver-Stehfest method is a discrete approximation of the
|
225 |
+
Widder-Post inversion algorithm, rather than a direct
|
226 |
+
approximation of the Bromwich contour integral.
|
227 |
+
|
228 |
+
The method abscissa along the real axis, and therefore has
|
229 |
+
issues inverting oscillatory functions (which have poles in
|
230 |
+
pairs away from the real axis).
|
231 |
+
|
232 |
+
The working precision will be increased according to a rule of
|
233 |
+
thumb. If 'degree' is not specified, the working precision and
|
234 |
+
degree are chosen to hopefully achieve the dps of the calling
|
235 |
+
context. If 'degree' is specified, the working precision is
|
236 |
+
chosen to achieve maximum resulting precision for the
|
237 |
+
specified degree.
|
238 |
+
|
239 |
+
.. math ::
|
240 |
+
|
241 |
+
p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
|
242 |
+
"""
|
243 |
+
|
244 |
+
# required
|
245 |
+
# ------------------------------
|
246 |
+
# time of desired approximation
|
247 |
+
self.t = self.ctx.convert(t)
|
248 |
+
|
249 |
+
# optional
|
250 |
+
# ------------------------------
|
251 |
+
|
252 |
+
# empirical relationships used here based on a linear fit of
|
253 |
+
# requested and delivered dps for exponentially decaying time
|
254 |
+
# functions for requested dps up to 512.
|
255 |
+
|
256 |
+
if 'degree' in kwargs:
|
257 |
+
self.degree = kwargs['degree']
|
258 |
+
self.dps_goal = int(1.38*self.degree)
|
259 |
+
else:
|
260 |
+
self.dps_goal = int(2.93*self.ctx.dps)
|
261 |
+
self.degree = max(16, self.dps_goal)
|
262 |
+
|
263 |
+
# _coeff routine requires even degree
|
264 |
+
if self.degree % 2 > 0:
|
265 |
+
self.degree += 1
|
266 |
+
|
267 |
+
M = self.degree
|
268 |
+
|
269 |
+
# this is adjusting the dps of the calling context
|
270 |
+
# hopefully the caller doesn't monkey around with it
|
271 |
+
# between calling this routine and calc_time_domain_solution()
|
272 |
+
self.dps_orig = self.ctx.dps
|
273 |
+
self.ctx.dps = self.dps_goal
|
274 |
+
|
275 |
+
self.V = self._coeff()
|
276 |
+
self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
|
277 |
+
|
278 |
+
# NB: p is real (mpf)
|
279 |
+
|
280 |
+
def _coeff(self):
|
281 |
+
r"""Salzer summation weights (aka, "Stehfest coefficients")
|
282 |
+
only depend on the approximation order (M) and the precision"""
|
283 |
+
|
284 |
+
M = self.degree
|
285 |
+
M2 = int(M/2) # checked earlier that M is even
|
286 |
+
|
287 |
+
V = self.ctx.matrix(M, 1)
|
288 |
+
|
289 |
+
# Salzer summation weights
|
290 |
+
# get very large in magnitude and oscillate in sign,
|
291 |
+
# if the precision is not high enough, there will be
|
292 |
+
# catastrophic cancellation
|
293 |
+
for k in range(1, M+1):
|
294 |
+
z = self.ctx.matrix(min(k, M2)+1, 1)
|
295 |
+
for j in range(int((k+1)/2), min(k, M2)+1):
|
296 |
+
z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
|
297 |
+
(self.ctx.fac(M2-j)*self.ctx.fac(j)*
|
298 |
+
self.ctx.fac(j-1)*self.ctx.fac(k-j)*
|
299 |
+
self.ctx.fac(2*j-k)))
|
300 |
+
V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
|
301 |
+
|
302 |
+
return V
|
303 |
+
|
304 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
305 |
+
r"""Compute time-domain Stehfest algorithm solution.
|
306 |
+
|
307 |
+
.. math ::
|
308 |
+
|
309 |
+
f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
|
310 |
+
p_k \right)
|
311 |
+
|
312 |
+
where
|
313 |
+
|
314 |
+
.. math ::
|
315 |
+
|
316 |
+
V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
|
317 |
+
\frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
|
318 |
+
\left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
|
319 |
+
|
320 |
+
As the degree increases, the abscissa (`p_k`) only increase
|
321 |
+
linearly towards `\infty`, but the Stehfest coefficients
|
322 |
+
(`V_k`) alternate in sign and increase rapidly in sign,
|
323 |
+
requiring high precision to prevent overflow or loss of
|
324 |
+
significance when evaluating the sum.
|
325 |
+
|
326 |
+
**References**
|
327 |
+
|
328 |
+
1. Widder, D. (1941). *The Laplace Transform*. Princeton.
|
329 |
+
2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
|
330 |
+
Laplace transforms. *Communications of the ACM* 13(1):47-49,
|
331 |
+
http://dx.doi.org/10.1145/361953.361969
|
332 |
+
|
333 |
+
"""
|
334 |
+
|
335 |
+
# required
|
336 |
+
self.t = self.ctx.convert(t)
|
337 |
+
|
338 |
+
# assume fp was computed from p matrix returned from
|
339 |
+
# calc_laplace_parameter(), so is already
|
340 |
+
# a list or matrix of mpmath 'mpf' types
|
341 |
+
|
342 |
+
result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
|
343 |
+
|
344 |
+
# setting dps back to value when calc_laplace_parameter was called
|
345 |
+
if not manual_prec:
|
346 |
+
self.ctx.dps = self.dps_orig
|
347 |
+
|
348 |
+
# ignore any small imaginary part
|
349 |
+
return result.real
|
350 |
+
|
351 |
+
|
352 |
+
# ****************************************
|
353 |
+
|
354 |
+
class deHoog(InverseLaplaceTransform):
|
355 |
+
|
356 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
357 |
+
r"""the de Hoog, Knight & Stokes algorithm is an
|
358 |
+
accelerated form of the Fourier series numerical
|
359 |
+
inverse Laplace transform algorithms.
|
360 |
+
|
361 |
+
.. math ::
|
362 |
+
|
363 |
+
p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
|
364 |
+
|
365 |
+
where
|
366 |
+
|
367 |
+
.. math ::
|
368 |
+
|
369 |
+
\gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
|
370 |
+
|
371 |
+
`j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
|
372 |
+
`\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
|
373 |
+
rightmost pole or singularity, which is chosen based on the
|
374 |
+
desired accuracy (assuming the rightmost singularity is 0),
|
375 |
+
and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
|
376 |
+
chosen in relation to `\alpha`.`
|
377 |
+
|
378 |
+
When increasing the degree, the abscissa increase towards
|
379 |
+
`j\infty`, but more slowly than the fixed Talbot
|
380 |
+
algorithm. The de Hoog et al. algorithm typically does better
|
381 |
+
with oscillatory functions of time, and less well-behaved
|
382 |
+
functions. The method tends to be slower than the Talbot and
|
383 |
+
Stehfest algorithsm, especially so at very high precision
|
384 |
+
(e.g., `>500` digits precision).
|
385 |
+
|
386 |
+
"""
|
387 |
+
|
388 |
+
# required
|
389 |
+
# ------------------------------
|
390 |
+
self.t = self.ctx.convert(t)
|
391 |
+
|
392 |
+
# optional
|
393 |
+
# ------------------------------
|
394 |
+
self.tmax = kwargs.get('tmax', self.t)
|
395 |
+
|
396 |
+
# empirical relationships used here based on a linear fit of
|
397 |
+
# requested and delivered dps for exponentially decaying time
|
398 |
+
# functions for requested dps up to 512.
|
399 |
+
|
400 |
+
if 'degree' in kwargs:
|
401 |
+
self.degree = kwargs['degree']
|
402 |
+
self.dps_goal = int(1.38*self.degree)
|
403 |
+
else:
|
404 |
+
self.dps_goal = int(self.ctx.dps*1.36)
|
405 |
+
self.degree = max(10, self.dps_goal)
|
406 |
+
|
407 |
+
# 2*M+1 terms in approximation
|
408 |
+
M = self.degree
|
409 |
+
|
410 |
+
# adjust alpha component of abscissa of convergence for higher
|
411 |
+
# precision
|
412 |
+
tmp = self.ctx.power(10.0, -self.dps_goal)
|
413 |
+
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
414 |
+
|
415 |
+
# desired tolerance (here simply related to alpha)
|
416 |
+
self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
|
417 |
+
self.np = 2*self.degree+1 # number of terms in approximation
|
418 |
+
|
419 |
+
# this is adjusting the dps of the calling context
|
420 |
+
# hopefully the caller doesn't monkey around with it
|
421 |
+
# between calling this routine and calc_time_domain_solution()
|
422 |
+
self.dps_orig = self.ctx.dps
|
423 |
+
self.ctx.dps = self.dps_goal
|
424 |
+
|
425 |
+
# scaling factor (likely tun-able, but 2 is typical)
|
426 |
+
self.scale = kwargs.get('scale', 2)
|
427 |
+
self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
|
428 |
+
|
429 |
+
self.p = self.ctx.matrix(2*M+1, 1)
|
430 |
+
self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
|
431 |
+
self.p = (self.gamma + self.ctx.pi*
|
432 |
+
self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
|
433 |
+
|
434 |
+
# NB: p is complex (mpc)
|
435 |
+
|
436 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
437 |
+
r"""Calculate time-domain solution for
|
438 |
+
de Hoog, Knight & Stokes algorithm.
|
439 |
+
|
440 |
+
The un-accelerated Fourier series approach is:
|
441 |
+
|
442 |
+
.. math ::
|
443 |
+
|
444 |
+
f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
|
445 |
+
\Re\left[\bar{f}\left( p_k \right)
|
446 |
+
e^{i\pi t/T} \right],
|
447 |
+
|
448 |
+
where the prime on the summation indicates the first term is halved.
|
449 |
+
|
450 |
+
This simplistic approach requires so many function evaluations
|
451 |
+
that it is not practical. Non-linear acceleration is
|
452 |
+
accomplished via Pade-approximation and an analytic expression
|
453 |
+
for the remainder of the continued fraction. See the original
|
454 |
+
paper (reference 2 below) a detailed description of the
|
455 |
+
numerical approach.
|
456 |
+
|
457 |
+
**References**
|
458 |
+
|
459 |
+
1. Davies, B. (2005). *Integral Transforms and their
|
460 |
+
Applications*, Third Edition. Springer.
|
461 |
+
2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
|
462 |
+
method for numerical inversion of Laplace transforms. *SIAM
|
463 |
+
Journal of Scientific and Statistical Computing* 3:357-366,
|
464 |
+
http://dx.doi.org/10.1137/0903022
|
465 |
+
|
466 |
+
"""
|
467 |
+
|
468 |
+
M = self.degree
|
469 |
+
np = self.np
|
470 |
+
T = self.T
|
471 |
+
|
472 |
+
self.t = self.ctx.convert(t)
|
473 |
+
|
474 |
+
# would it be useful to try re-using
|
475 |
+
# space between e&q and A&B?
|
476 |
+
e = self.ctx.zeros(np, M+1)
|
477 |
+
q = self.ctx.matrix(2*M, M)
|
478 |
+
d = self.ctx.matrix(np, 1)
|
479 |
+
A = self.ctx.zeros(np+1, 1)
|
480 |
+
B = self.ctx.ones(np+1, 1)
|
481 |
+
|
482 |
+
# initialize Q-D table
|
483 |
+
e[:, 0] = 0.0 + 0j
|
484 |
+
q[0, 0] = fp[1]/(fp[0]/2)
|
485 |
+
for i in range(1, 2*M):
|
486 |
+
q[i, 0] = fp[i+1]/fp[i]
|
487 |
+
|
488 |
+
# rhombus rule for filling triangular Q-D table (e & q)
|
489 |
+
for r in range(1, M+1):
|
490 |
+
# start with e, column 1, 0:2*M-2
|
491 |
+
mr = 2*(M-r) + 1
|
492 |
+
e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
|
493 |
+
if not r == M:
|
494 |
+
rq = r+1
|
495 |
+
mr = 2*(M-rq)+1 + 2
|
496 |
+
for i in range(mr):
|
497 |
+
q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
|
498 |
+
|
499 |
+
# build up continued fraction coefficients (d)
|
500 |
+
d[0] = fp[0]/2
|
501 |
+
for r in range(1, M+1):
|
502 |
+
d[2*r-1] = -q[0, r-1] # even terms
|
503 |
+
d[2*r] = -e[0, r] # odd terms
|
504 |
+
|
505 |
+
# seed A and B for recurrence
|
506 |
+
A[0] = 0.0 + 0.0j
|
507 |
+
A[1] = d[0]
|
508 |
+
B[0:2] = 1.0 + 0.0j
|
509 |
+
|
510 |
+
# base of the power series
|
511 |
+
z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
|
512 |
+
|
513 |
+
# coefficients of Pade approximation (A & B)
|
514 |
+
# using recurrence for all but last term
|
515 |
+
for i in range(1, 2*M):
|
516 |
+
A[i+1] = A[i] + d[i]*A[i-1]*z
|
517 |
+
B[i+1] = B[i] + d[i]*B[i-1]*z
|
518 |
+
|
519 |
+
# "improved remainder" to continued fraction
|
520 |
+
brem = (1 + (d[2*M-1] - d[2*M])*z)/2
|
521 |
+
# powm1(x,y) computes x^y - 1 more accurately near zero
|
522 |
+
rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
|
523 |
+
self.ctx.fraction(1, 2))
|
524 |
+
|
525 |
+
# last term of recurrence using new remainder
|
526 |
+
A[np] = A[2*M] + rem*A[2*M-1]
|
527 |
+
B[np] = B[2*M] + rem*B[2*M-1]
|
528 |
+
|
529 |
+
# diagonal Pade approximation
|
530 |
+
# F=A/B represents accelerated trapezoid rule
|
531 |
+
result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
|
532 |
+
|
533 |
+
# setting dps back to value when calc_laplace_parameter was called
|
534 |
+
if not manual_prec:
|
535 |
+
self.ctx.dps = self.dps_orig
|
536 |
+
|
537 |
+
return result
|
538 |
+
|
539 |
+
|
540 |
+
# ****************************************
|
541 |
+
|
542 |
+
class Cohen(InverseLaplaceTransform):
|
543 |
+
|
544 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
545 |
+
r"""The Cohen algorithm accelerates the convergence of the nearly
|
546 |
+
alternating series resulting from the application of the trapezoidal
|
547 |
+
rule to the Bromwich contour inversion integral.
|
548 |
+
|
549 |
+
.. math ::
|
550 |
+
|
551 |
+
p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
|
552 |
+
|
553 |
+
where
|
554 |
+
|
555 |
+
.. math ::
|
556 |
+
|
557 |
+
\gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
|
558 |
+
|
559 |
+
`d = \mathrm{dps\_goal}`, which is chosen based on the desired
|
560 |
+
accuracy using the method developed in [1] to improve numerical
|
561 |
+
stability. The Cohen algorithm shows robustness similar to the de Hoog
|
562 |
+
et al. algorithm, but it is faster than the fixed Talbot algorithm.
|
563 |
+
|
564 |
+
**Optional arguments**
|
565 |
+
|
566 |
+
*degree*
|
567 |
+
integer order of the approximation (M = number of terms)
|
568 |
+
*alpha*
|
569 |
+
abscissa for `p_0` (controls the discretization error)
|
570 |
+
|
571 |
+
The working precision will be increased according to a rule of
|
572 |
+
thumb. If 'degree' is not specified, the working precision and
|
573 |
+
degree are chosen to hopefully achieve the dps of the calling
|
574 |
+
context. If 'degree' is specified, the working precision is
|
575 |
+
chosen to achieve maximum resulting precision for the
|
576 |
+
specified degree.
|
577 |
+
|
578 |
+
**References**
|
579 |
+
|
580 |
+
1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
|
581 |
+
distribution in the Gaussian copula model: a comparison of methods.
|
582 |
+
*Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
|
583 |
+
|
584 |
+
"""
|
585 |
+
self.t = self.ctx.convert(t)
|
586 |
+
|
587 |
+
if 'degree' in kwargs:
|
588 |
+
self.degree = kwargs['degree']
|
589 |
+
self.dps_goal = int(1.5 * self.degree)
|
590 |
+
else:
|
591 |
+
self.dps_goal = int(self.ctx.dps * 1.74)
|
592 |
+
self.degree = max(22, int(1.31 * self.dps_goal))
|
593 |
+
|
594 |
+
M = self.degree + 1
|
595 |
+
|
596 |
+
# this is adjusting the dps of the calling context hopefully
|
597 |
+
# the caller doesn't monkey around with it between calling
|
598 |
+
# this routine and calc_time_domain_solution()
|
599 |
+
self.dps_orig = self.ctx.dps
|
600 |
+
self.ctx.dps = self.dps_goal
|
601 |
+
|
602 |
+
ttwo = 2 * self.t
|
603 |
+
tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
|
604 |
+
tmp = self.ctx.fraction(2, 3) * tmp
|
605 |
+
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
606 |
+
|
607 |
+
# all but time-dependent part of p
|
608 |
+
a_t = self.alpha / ttwo
|
609 |
+
p_t = self.ctx.pi * 1j / self.t
|
610 |
+
|
611 |
+
self.p = self.ctx.matrix(M, 1)
|
612 |
+
self.p[0] = a_t
|
613 |
+
|
614 |
+
for i in range(1, M):
|
615 |
+
self.p[i] = a_t + i * p_t
|
616 |
+
|
617 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
618 |
+
r"""Calculate time-domain solution for Cohen algorithm.
|
619 |
+
|
620 |
+
The accelerated nearly alternating series is:
|
621 |
+
|
622 |
+
.. math ::
|
623 |
+
|
624 |
+
f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
|
625 |
+
\Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
|
626 |
+
\sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
|
627 |
+
\left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
|
628 |
+
|
629 |
+
where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
|
630 |
+
|
631 |
+
1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
|
632 |
+
acceleration of alternating series. *Experiment. Math* 9(1):3-12
|
633 |
+
|
634 |
+
"""
|
635 |
+
self.t = self.ctx.convert(t)
|
636 |
+
|
637 |
+
n = self.degree
|
638 |
+
M = n + 1
|
639 |
+
|
640 |
+
A = self.ctx.matrix(M, 1)
|
641 |
+
for i in range(M):
|
642 |
+
A[i] = fp[i].real
|
643 |
+
|
644 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
645 |
+
d = (d + 1 / d) / 2
|
646 |
+
b = -self.ctx.one
|
647 |
+
c = -d
|
648 |
+
s = 0
|
649 |
+
|
650 |
+
for k in range(n):
|
651 |
+
c = b - c
|
652 |
+
s = s + c * A[k + 1]
|
653 |
+
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
654 |
+
|
655 |
+
result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
|
656 |
+
|
657 |
+
# setting dps back to value when calc_laplace_parameter was
|
658 |
+
# called, unless flag is set.
|
659 |
+
if not manual_prec:
|
660 |
+
self.ctx.dps = self.dps_orig
|
661 |
+
|
662 |
+
return result
|
663 |
+
|
664 |
+
|
665 |
+
# ****************************************
|
666 |
+
|
667 |
+
class LaplaceTransformInversionMethods(object):
|
668 |
+
def __init__(ctx, *args, **kwargs):
|
669 |
+
ctx._fixed_talbot = FixedTalbot(ctx)
|
670 |
+
ctx._stehfest = Stehfest(ctx)
|
671 |
+
ctx._de_hoog = deHoog(ctx)
|
672 |
+
ctx._cohen = Cohen(ctx)
|
673 |
+
|
674 |
+
def invertlaplace(ctx, f, t, **kwargs):
|
675 |
+
r"""Computes the numerical inverse Laplace transform for a
|
676 |
+
Laplace-space function at a given time. The function being
|
677 |
+
evaluated is assumed to be a real-valued function of time.
|
678 |
+
|
679 |
+
The user must supply a Laplace-space function `\bar{f}(p)`,
|
680 |
+
and a desired time at which to estimate the time-domain
|
681 |
+
solution `f(t)`.
|
682 |
+
|
683 |
+
A few basic examples of Laplace-space functions with known
|
684 |
+
inverses (see references [1,2]) :
|
685 |
+
|
686 |
+
.. math ::
|
687 |
+
|
688 |
+
\mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
|
689 |
+
|
690 |
+
.. math ::
|
691 |
+
|
692 |
+
\mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
|
693 |
+
|
694 |
+
.. math ::
|
695 |
+
|
696 |
+
\bar{f}(p) = \frac{1}{(p+1)^2}
|
697 |
+
|
698 |
+
.. math ::
|
699 |
+
|
700 |
+
f(t) = t e^{-t}
|
701 |
+
|
702 |
+
>>> from mpmath import *
|
703 |
+
>>> mp.dps = 15; mp.pretty = True
|
704 |
+
>>> tt = [0.001, 0.01, 0.1, 1, 10]
|
705 |
+
>>> fp = lambda p: 1/(p+1)**2
|
706 |
+
>>> ft = lambda t: t*exp(-t)
|
707 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
|
708 |
+
(0.000999000499833375, 8.57923043561212e-20)
|
709 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
|
710 |
+
(0.00990049833749168, 3.27007646698047e-19)
|
711 |
+
>>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
|
712 |
+
(0.090483741803596, -1.75215800052168e-18)
|
713 |
+
>>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
|
714 |
+
(0.367879441171442, 1.2428864009344e-17)
|
715 |
+
>>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
|
716 |
+
(0.000453999297624849, 4.04513489306658e-20)
|
717 |
+
|
718 |
+
The methods also work for higher precision:
|
719 |
+
|
720 |
+
>>> mp.dps = 100; mp.pretty = True
|
721 |
+
>>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
|
722 |
+
('0.000999000499833375', '-4.96868310693356e-105')
|
723 |
+
>>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
|
724 |
+
('0.00990049833749168', '1.23032291513122e-104')
|
725 |
+
|
726 |
+
.. math ::
|
727 |
+
|
728 |
+
\bar{f}(p) = \frac{1}{p^2+1}
|
729 |
+
|
730 |
+
.. math ::
|
731 |
+
|
732 |
+
f(t) = \mathrm{J}_0(t)
|
733 |
+
|
734 |
+
>>> mp.dps = 15; mp.pretty = True
|
735 |
+
>>> fp = lambda p: 1/sqrt(p*p + 1)
|
736 |
+
>>> ft = lambda t: besselj(0,t)
|
737 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
|
738 |
+
(0.999999750000016, -6.09717765032273e-18)
|
739 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
|
740 |
+
(0.99997500015625, -5.61756281076169e-17)
|
741 |
+
|
742 |
+
.. math ::
|
743 |
+
|
744 |
+
\bar{f}(p) = \frac{\log p}{p}
|
745 |
+
|
746 |
+
.. math ::
|
747 |
+
|
748 |
+
f(t) = -\gamma -\log t
|
749 |
+
|
750 |
+
>>> mp.dps = 15; mp.pretty = True
|
751 |
+
>>> fp = lambda p: log(p)/p
|
752 |
+
>>> ft = lambda t: -euler-log(t)
|
753 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
|
754 |
+
(6.3305396140806, -1.92126634837863e-16)
|
755 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
|
756 |
+
(4.02795452108656, -4.81486093200704e-16)
|
757 |
+
|
758 |
+
**Options**
|
759 |
+
|
760 |
+
:func:`~mpmath.invertlaplace` recognizes the following optional
|
761 |
+
keywords valid for all methods:
|
762 |
+
|
763 |
+
*method*
|
764 |
+
Chooses numerical inverse Laplace transform algorithm
|
765 |
+
(described below).
|
766 |
+
*degree*
|
767 |
+
Number of terms used in the approximation
|
768 |
+
|
769 |
+
**Algorithms**
|
770 |
+
|
771 |
+
Mpmath implements four numerical inverse Laplace transform
|
772 |
+
algorithms, attributed to: Talbot, Stehfest, and de Hoog,
|
773 |
+
Knight and Stokes. These can be selected by using
|
774 |
+
*method='talbot'*, *method='stehfest'*, *method='dehoog'* or
|
775 |
+
*method='cohen'* or by passing the classes *method=FixedTalbot*,
|
776 |
+
*method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
|
777 |
+
:func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
|
778 |
+
:func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
|
779 |
+
are also available as shortcuts.
|
780 |
+
|
781 |
+
All four algorithms implement a heuristic balance between the
|
782 |
+
requested precision and the precision used internally for the
|
783 |
+
calculations. This has been tuned for a typical exponentially
|
784 |
+
decaying function and precision up to few hundred decimal
|
785 |
+
digits.
|
786 |
+
|
787 |
+
The Laplace transform converts the variable time (i.e., along
|
788 |
+
a line) into a parameter given by the right half of the
|
789 |
+
complex `p`-plane. Singularities, poles, and branch cuts in
|
790 |
+
the complex `p`-plane contain all the information regarding
|
791 |
+
the time behavior of the corresponding function. Any numerical
|
792 |
+
method must therefore sample `p`-plane "close enough" to the
|
793 |
+
singularities to accurately characterize them, while not
|
794 |
+
getting too close to have catastrophic cancellation, overflow,
|
795 |
+
or underflow issues. Most significantly, if one or more of the
|
796 |
+
singularities in the `p`-plane is not on the left side of the
|
797 |
+
Bromwich contour, its effects will be left out of the computed
|
798 |
+
solution, and the answer will be completely wrong.
|
799 |
+
|
800 |
+
*Talbot*
|
801 |
+
|
802 |
+
The fixed Talbot method is high accuracy and fast, but the
|
803 |
+
method can catastrophically fail for certain classes of time-domain
|
804 |
+
behavior, including a Heaviside step function for positive
|
805 |
+
time (e.g., `H(t-2)`), or some oscillatory behaviors. The
|
806 |
+
Talbot method usually has adjustable parameters, but the
|
807 |
+
"fixed" variety implemented here does not. This method
|
808 |
+
deforms the Bromwich integral contour in the shape of a
|
809 |
+
parabola towards `-\infty`, which leads to problems
|
810 |
+
when the solution has a decaying exponential in it (e.g., a
|
811 |
+
Heaviside step function is equivalent to multiplying by a
|
812 |
+
decaying exponential in Laplace space).
|
813 |
+
|
814 |
+
*Stehfest*
|
815 |
+
|
816 |
+
The Stehfest algorithm only uses abscissa along the real axis
|
817 |
+
of the complex `p`-plane to estimate the time-domain
|
818 |
+
function. Oscillatory time-domain functions have poles away
|
819 |
+
from the real axis, so this method does not work well with
|
820 |
+
oscillatory functions, especially high-frequency ones. This
|
821 |
+
method also depends on summation of terms in a series that
|
822 |
+
grows very large, and will have catastrophic cancellation
|
823 |
+
during summation if the working precision is too low.
|
824 |
+
|
825 |
+
*de Hoog et al.*
|
826 |
+
|
827 |
+
The de Hoog, Knight, and Stokes method is essentially a
|
828 |
+
Fourier-series quadrature-type approximation to the Bromwich
|
829 |
+
contour integral, with non-linear series acceleration and an
|
830 |
+
analytical expression for the remainder term. This method is
|
831 |
+
typically one of the most robust. This method also involves the
|
832 |
+
greatest amount of overhead, so it is typically the slowest of the
|
833 |
+
four methods at high precision.
|
834 |
+
|
835 |
+
*Cohen*
|
836 |
+
|
837 |
+
The Cohen method is a trapezoidal rule approximation to the Bromwich
|
838 |
+
contour integral, with linear acceleration for alternating
|
839 |
+
series. This method is as robust as the de Hoog et al method and the
|
840 |
+
fastest of the four methods at high precision, and is therefore the
|
841 |
+
default method.
|
842 |
+
|
843 |
+
**Singularities**
|
844 |
+
|
845 |
+
All numerical inverse Laplace transform methods have problems
|
846 |
+
at large time when the Laplace-space function has poles,
|
847 |
+
singularities, or branch cuts to the right of the origin in
|
848 |
+
the complex plane. For simple poles in `\bar{f}(p)` at the
|
849 |
+
`p`-plane origin, the time function is constant in time (e.g.,
|
850 |
+
`\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
|
851 |
+
`p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
|
852 |
+
decreasing function of time (e.g., `\mathcal{L}\left\lbrace
|
853 |
+
e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
|
854 |
+
a pole to the right of the origin leads to an increasing
|
855 |
+
function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
|
856 |
+
\right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
|
857 |
+
singularities occur off the real `p` axis, the time-domain
|
858 |
+
function is oscillatory. For example `\mathcal{L}\left\lbrace
|
859 |
+
\mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
|
860 |
+
starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
|
861 |
+
function, This range of behaviors is illustrated in Duffy [3]
|
862 |
+
Figure 4.10.4, p. 228.
|
863 |
+
|
864 |
+
In general as `p \rightarrow \infty` `t \rightarrow 0` and
|
865 |
+
vice-versa. All numerical inverse Laplace transform methods
|
866 |
+
require their abscissa to shift closer to the origin for
|
867 |
+
larger times. If the abscissa shift left of the rightmost
|
868 |
+
singularity in the Laplace domain, the answer will be
|
869 |
+
completely wrong (the effect of singularities to the right of
|
870 |
+
the Bromwich contour are not included in the results).
|
871 |
+
|
872 |
+
For example, the following exponentially growing function has
|
873 |
+
a pole at `p=3`:
|
874 |
+
|
875 |
+
.. math ::
|
876 |
+
|
877 |
+
\bar{f}(p)=\frac{1}{p^2-9}
|
878 |
+
|
879 |
+
.. math ::
|
880 |
+
|
881 |
+
f(t)=\frac{1}{3}\sinh 3t
|
882 |
+
|
883 |
+
>>> mp.dps = 15; mp.pretty = True
|
884 |
+
>>> fp = lambda p: 1/(p*p-9)
|
885 |
+
>>> ft = lambda t: sinh(3*t)/3
|
886 |
+
>>> tt = [0.01,0.1,1.0,10.0]
|
887 |
+
>>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
|
888 |
+
(0.0100015000675014, 0.0100015000675014)
|
889 |
+
>>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
|
890 |
+
(0.101506764482381, 0.101506764482381)
|
891 |
+
>>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
|
892 |
+
(3.33929164246997, 3.33929164246997)
|
893 |
+
>>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
|
894 |
+
(1781079096920.74, -1.61331069624091e-14)
|
895 |
+
|
896 |
+
**References**
|
897 |
+
|
898 |
+
1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
|
899 |
+
2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
|
900 |
+
Inversion, Springer.
|
901 |
+
3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
|
902 |
+
|
903 |
+
**Numerical Inverse Laplace Transform Reviews**
|
904 |
+
|
905 |
+
1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
|
906 |
+
inversion of the Laplace transform: Applications to Biology,
|
907 |
+
Economics, Engineering, and Physics*. Elsevier.
|
908 |
+
2. Davies, B., B. Martin (1979). Numerical inversion of the
|
909 |
+
Laplace transform: a survey and comparison of methods. *Journal
|
910 |
+
of Computational Physics* 33:1-32,
|
911 |
+
http://dx.doi.org/10.1016/0021-9991(79)90025-1
|
912 |
+
3. Duffy, D.G. (1993). On the numerical inversion of Laplace
|
913 |
+
transforms: Comparison of three new methods on characteristic
|
914 |
+
problems from applications. *ACM Transactions on Mathematical
|
915 |
+
Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
|
916 |
+
4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
|
917 |
+
Algorithms for Laplace-Space Numerical Approaches, *Numerical
|
918 |
+
Algorithms*, 63(2):339-355.
|
919 |
+
http://dx.doi.org/10.1007/s11075-012-9625-3
|
920 |
+
|
921 |
+
"""
|
922 |
+
|
923 |
+
rule = kwargs.get('method', 'cohen')
|
924 |
+
if type(rule) is str:
|
925 |
+
lrule = rule.lower()
|
926 |
+
if lrule == 'talbot':
|
927 |
+
rule = ctx._fixed_talbot
|
928 |
+
elif lrule == 'stehfest':
|
929 |
+
rule = ctx._stehfest
|
930 |
+
elif lrule == 'dehoog':
|
931 |
+
rule = ctx._de_hoog
|
932 |
+
elif rule == 'cohen':
|
933 |
+
rule = ctx._cohen
|
934 |
+
else:
|
935 |
+
raise ValueError("unknown invlap algorithm: %s" % rule)
|
936 |
+
else:
|
937 |
+
rule = rule(ctx)
|
938 |
+
|
939 |
+
# determine the vector of Laplace-space parameter
|
940 |
+
# needed for the requested method and desired time
|
941 |
+
rule.calc_laplace_parameter(t, **kwargs)
|
942 |
+
|
943 |
+
# compute the Laplace-space function evalutations
|
944 |
+
# at the required abscissa.
|
945 |
+
fp = [f(p) for p in rule.p]
|
946 |
+
|
947 |
+
# compute the time-domain solution from the
|
948 |
+
# Laplace-space function evaluations
|
949 |
+
return rule.calc_time_domain_solution(fp, t)
|
950 |
+
|
951 |
+
# shortcuts for the above function for specific methods
|
952 |
+
def invlaptalbot(ctx, *args, **kwargs):
|
953 |
+
kwargs['method'] = 'talbot'
|
954 |
+
return ctx.invertlaplace(*args, **kwargs)
|
955 |
+
|
956 |
+
def invlapstehfest(ctx, *args, **kwargs):
|
957 |
+
kwargs['method'] = 'stehfest'
|
958 |
+
return ctx.invertlaplace(*args, **kwargs)
|
959 |
+
|
960 |
+
def invlapdehoog(ctx, *args, **kwargs):
|
961 |
+
kwargs['method'] = 'dehoog'
|
962 |
+
return ctx.invertlaplace(*args, **kwargs)
|
963 |
+
|
964 |
+
def invlapcohen(ctx, *args, **kwargs):
|
965 |
+
kwargs['method'] = 'cohen'
|
966 |
+
return ctx.invertlaplace(*args, **kwargs)
|
967 |
+
|
968 |
+
|
969 |
+
# ****************************************
|
970 |
+
|
971 |
+
if __name__ == '__main__':
|
972 |
+
import doctest
|
973 |
+
doctest.testmod()
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/odes.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from bisect import bisect
|
2 |
+
from ..libmp.backend import xrange
|
3 |
+
|
4 |
+
class ODEMethods(object):
|
5 |
+
pass
|
6 |
+
|
7 |
+
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
|
8 |
+
h = tol = ctx.ldexp(1, -tol_prec)
|
9 |
+
dim = len(y0)
|
10 |
+
xs = [x0]
|
11 |
+
ys = [y0]
|
12 |
+
x = x0
|
13 |
+
y = y0
|
14 |
+
orig = ctx.prec
|
15 |
+
try:
|
16 |
+
ctx.prec = orig*(1+n)
|
17 |
+
# Use n steps with Euler's method to get
|
18 |
+
# evaluation points for derivatives
|
19 |
+
for i in range(n):
|
20 |
+
fxy = derivs(x, y)
|
21 |
+
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
|
22 |
+
x += h
|
23 |
+
xs.append(x)
|
24 |
+
ys.append(y)
|
25 |
+
# Compute derivatives
|
26 |
+
ser = [[] for d in range(dim)]
|
27 |
+
for j in range(n+1):
|
28 |
+
s = [0]*dim
|
29 |
+
b = (-1) ** (j & 1)
|
30 |
+
k = 1
|
31 |
+
for i in range(j+1):
|
32 |
+
for d in range(dim):
|
33 |
+
s[d] += b * ys[i][d]
|
34 |
+
b = (b * (j-k+1)) // (-k)
|
35 |
+
k += 1
|
36 |
+
scale = h**(-j) / ctx.fac(j)
|
37 |
+
for d in range(dim):
|
38 |
+
s[d] = s[d] * scale
|
39 |
+
ser[d].append(s[d])
|
40 |
+
finally:
|
41 |
+
ctx.prec = orig
|
42 |
+
# Estimate radius for which we can get full accuracy.
|
43 |
+
# XXX: do this right for zeros
|
44 |
+
radius = ctx.one
|
45 |
+
for ts in ser:
|
46 |
+
if ts[-1]:
|
47 |
+
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
|
48 |
+
radius /= 2 # XXX
|
49 |
+
return ser, x0+radius
|
50 |
+
|
51 |
+
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
|
52 |
+
r"""
|
53 |
+
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
|
54 |
+
that is a numerical solution of the `n+1`-dimensional first-order
|
55 |
+
ordinary differential equation (ODE) system
|
56 |
+
|
57 |
+
.. math ::
|
58 |
+
|
59 |
+
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
60 |
+
|
61 |
+
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
62 |
+
|
63 |
+
\vdots
|
64 |
+
|
65 |
+
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
66 |
+
|
67 |
+
The derivatives are specified by the vector-valued function
|
68 |
+
*F* that evaluates
|
69 |
+
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
|
70 |
+
The initial point `x_0` is specified by the scalar argument *x0*,
|
71 |
+
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
|
72 |
+
specified by the vector argument *y0*.
|
73 |
+
|
74 |
+
For convenience, if the system is one-dimensional, you may optionally
|
75 |
+
provide just a scalar value for *y0*. In this case, *F* should accept
|
76 |
+
a scalar *y* argument and return a scalar. The solution function
|
77 |
+
*y* will return scalar values instead of length-1 vectors.
|
78 |
+
|
79 |
+
Evaluation of the solution function `y(x)` is permitted
|
80 |
+
for any `x \ge x_0`.
|
81 |
+
|
82 |
+
A high-order ODE can be solved by transforming it into first-order
|
83 |
+
vector form. This transformation is described in standard texts
|
84 |
+
on ODEs. Examples will also be given below.
|
85 |
+
|
86 |
+
**Options, speed and accuracy**
|
87 |
+
|
88 |
+
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
|
89 |
+
method. For reasonably well-behaved problems, the solution will
|
90 |
+
be fully accurate to within the working precision. Note that
|
91 |
+
*F* must be possible to evaluate to very high precision
|
92 |
+
for the generation of Taylor series to work.
|
93 |
+
|
94 |
+
To get a faster but less accurate solution, you can set a large
|
95 |
+
value for *tol* (which defaults roughly to *eps*). If you just
|
96 |
+
want to plot the solution or perform a basic simulation,
|
97 |
+
*tol = 0.01* is likely sufficient.
|
98 |
+
|
99 |
+
The *degree* argument controls the degree of the solver (with
|
100 |
+
*method='taylor'*, this is the degree of the Taylor series
|
101 |
+
expansion). A higher degree means that a longer step can be taken
|
102 |
+
before a new local solution must be generated from *F*,
|
103 |
+
meaning that fewer steps are required to get from `x_0` to a given
|
104 |
+
`x_1`. On the other hand, a higher degree also means that each
|
105 |
+
local solution becomes more expensive (i.e., more evaluations of
|
106 |
+
*F* are required per step, and at higher precision).
|
107 |
+
|
108 |
+
The optimal setting therefore involves a tradeoff. Generally,
|
109 |
+
decreasing the *degree* for Taylor series is likely to give faster
|
110 |
+
solution at low precision, while increasing is likely to be better
|
111 |
+
at higher precision.
|
112 |
+
|
113 |
+
The function
|
114 |
+
object returned by :func:`~mpmath.odefun` caches the solutions at all step
|
115 |
+
points and uses polynomial interpolation between step points.
|
116 |
+
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
|
117 |
+
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
|
118 |
+
and continuing the evaluation up to `x_2 > x_1` is also fast.
|
119 |
+
|
120 |
+
**Examples of first-order ODEs**
|
121 |
+
|
122 |
+
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
|
123 |
+
which has explicit solution `y(x) = \exp(x)`::
|
124 |
+
|
125 |
+
>>> from mpmath import *
|
126 |
+
>>> mp.dps = 15; mp.pretty = True
|
127 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
128 |
+
>>> for x in [0, 1, 2.5]:
|
129 |
+
... print((f(x), exp(x)))
|
130 |
+
...
|
131 |
+
(1.0, 1.0)
|
132 |
+
(2.71828182845905, 2.71828182845905)
|
133 |
+
(12.1824939607035, 12.1824939607035)
|
134 |
+
|
135 |
+
The solution with high precision::
|
136 |
+
|
137 |
+
>>> mp.dps = 50
|
138 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
139 |
+
>>> f(1)
|
140 |
+
2.7182818284590452353602874713526624977572470937
|
141 |
+
>>> exp(1)
|
142 |
+
2.7182818284590452353602874713526624977572470937
|
143 |
+
|
144 |
+
Using the more general vectorized form, the test problem
|
145 |
+
can be input as (note that *f* returns a 1-element vector)::
|
146 |
+
|
147 |
+
>>> mp.dps = 15
|
148 |
+
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
|
149 |
+
>>> f(1)
|
150 |
+
[2.71828182845905]
|
151 |
+
|
152 |
+
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
|
153 |
+
impossible (and at best difficult) to solve analytically. As
|
154 |
+
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
|
155 |
+
for `y(0) = \pi/2`. An exact solution happens to be known
|
156 |
+
for this problem, and is given by
|
157 |
+
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
|
158 |
+
|
159 |
+
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
|
160 |
+
>>> for x in [2, 5, 10]:
|
161 |
+
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
|
162 |
+
...
|
163 |
+
(2.87255666284091, 2.87255666284091)
|
164 |
+
(3.14158520028345, 3.14158520028345)
|
165 |
+
(3.14159265358979, 3.14159265358979)
|
166 |
+
|
167 |
+
If `F` is independent of `y`, an ODE can be solved using direct
|
168 |
+
integration. We can therefore obtain a reference solution with
|
169 |
+
:func:`~mpmath.quad`::
|
170 |
+
|
171 |
+
>>> f = lambda x: (1+x**2)/(1+x**3)
|
172 |
+
>>> g = odefun(lambda x, y: f(x), pi, 0)
|
173 |
+
>>> g(2*pi)
|
174 |
+
0.72128263801696
|
175 |
+
>>> quad(f, [pi, 2*pi])
|
176 |
+
0.72128263801696
|
177 |
+
|
178 |
+
**Examples of second-order ODEs**
|
179 |
+
|
180 |
+
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
|
181 |
+
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
|
182 |
+
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
|
183 |
+
together, we get the first-order, two-dimensional vector ODE
|
184 |
+
|
185 |
+
.. math ::
|
186 |
+
|
187 |
+
\begin{cases}
|
188 |
+
y_0' = y_1 \\
|
189 |
+
y_1' = -y_0
|
190 |
+
\end{cases}
|
191 |
+
|
192 |
+
To get a well-defined IVP, we need two initial values. With
|
193 |
+
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
|
194 |
+
course be solved by `y(x) = y_0(x) = \cos(x)` and
|
195 |
+
`-y'(x) = y_1(x) = \sin(x)`. We check this::
|
196 |
+
|
197 |
+
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
|
198 |
+
>>> for x in [0, 1, 2.5, 10]:
|
199 |
+
... nprint(f(x), 15)
|
200 |
+
... nprint([cos(x), sin(x)], 15)
|
201 |
+
... print("---")
|
202 |
+
...
|
203 |
+
[1.0, 0.0]
|
204 |
+
[1.0, 0.0]
|
205 |
+
---
|
206 |
+
[0.54030230586814, 0.841470984807897]
|
207 |
+
[0.54030230586814, 0.841470984807897]
|
208 |
+
---
|
209 |
+
[-0.801143615546934, 0.598472144103957]
|
210 |
+
[-0.801143615546934, 0.598472144103957]
|
211 |
+
---
|
212 |
+
[-0.839071529076452, -0.54402111088937]
|
213 |
+
[-0.839071529076452, -0.54402111088937]
|
214 |
+
---
|
215 |
+
|
216 |
+
Note that we get both the sine and the cosine solutions
|
217 |
+
simultaneously.
|
218 |
+
|
219 |
+
**TODO**
|
220 |
+
|
221 |
+
* Better automatic choice of degree and step size
|
222 |
+
* Make determination of Taylor series convergence radius
|
223 |
+
more robust
|
224 |
+
* Allow solution for `x < x_0`
|
225 |
+
* Allow solution for complex `x`
|
226 |
+
* Test for difficult (ill-conditioned) problems
|
227 |
+
* Implement Runge-Kutta and other algorithms
|
228 |
+
|
229 |
+
"""
|
230 |
+
if tol:
|
231 |
+
tol_prec = int(-ctx.log(tol, 2))+10
|
232 |
+
else:
|
233 |
+
tol_prec = ctx.prec+10
|
234 |
+
degree = degree or (3 + int(3*ctx.dps/2.))
|
235 |
+
workprec = ctx.prec + 40
|
236 |
+
try:
|
237 |
+
len(y0)
|
238 |
+
return_vector = True
|
239 |
+
except TypeError:
|
240 |
+
F_ = F
|
241 |
+
F = lambda x, y: [F_(x, y[0])]
|
242 |
+
y0 = [y0]
|
243 |
+
return_vector = False
|
244 |
+
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
|
245 |
+
series_boundaries = [x0, xb]
|
246 |
+
series_data = [(ser, x0, xb)]
|
247 |
+
# We will be working with vectors of Taylor series
|
248 |
+
def mpolyval(ser, a):
|
249 |
+
return [ctx.polyval(s[::-1], a) for s in ser]
|
250 |
+
# Find nearest expansion point; compute if necessary
|
251 |
+
def get_series(x):
|
252 |
+
if x < x0:
|
253 |
+
raise ValueError
|
254 |
+
n = bisect(series_boundaries, x)
|
255 |
+
if n < len(series_boundaries):
|
256 |
+
return series_data[n-1]
|
257 |
+
while 1:
|
258 |
+
ser, xa, xb = series_data[-1]
|
259 |
+
if verbose:
|
260 |
+
print("Computing Taylor series for [%f, %f]" % (xa, xb))
|
261 |
+
y = mpolyval(ser, xb-xa)
|
262 |
+
xa = xb
|
263 |
+
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
|
264 |
+
series_boundaries.append(xb)
|
265 |
+
series_data.append((ser, xa, xb))
|
266 |
+
if x <= xb:
|
267 |
+
return series_data[-1]
|
268 |
+
# Evaluation function
|
269 |
+
def interpolant(x):
|
270 |
+
x = ctx.convert(x)
|
271 |
+
orig = ctx.prec
|
272 |
+
try:
|
273 |
+
ctx.prec = workprec
|
274 |
+
ser, xa, xb = get_series(x)
|
275 |
+
y = mpolyval(ser, x-xa)
|
276 |
+
finally:
|
277 |
+
ctx.prec = orig
|
278 |
+
if return_vector:
|
279 |
+
return [+yk for yk in y]
|
280 |
+
else:
|
281 |
+
return +y[0]
|
282 |
+
return interpolant
|
283 |
+
|
284 |
+
ODEMethods.odefun = odefun
|
285 |
+
|
286 |
+
if __name__ == "__main__":
|
287 |
+
import doctest
|
288 |
+
doctest.testmod()
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/optimization.py
ADDED
@@ -0,0 +1,1102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import print_function
|
2 |
+
|
3 |
+
from copy import copy
|
4 |
+
|
5 |
+
from ..libmp.backend import xrange
|
6 |
+
|
7 |
+
class OptimizationMethods(object):
|
8 |
+
def __init__(ctx):
|
9 |
+
pass
|
10 |
+
|
11 |
+
##############
|
12 |
+
# 1D-SOLVERS #
|
13 |
+
##############
|
14 |
+
|
15 |
+
class Newton:
|
16 |
+
"""
|
17 |
+
1d-solver generating pairs of approximative root and error.
|
18 |
+
|
19 |
+
Needs starting points x0 close to the root.
|
20 |
+
|
21 |
+
Pro:
|
22 |
+
|
23 |
+
* converges fast
|
24 |
+
* sometimes more robust than secant with bad second starting point
|
25 |
+
|
26 |
+
Contra:
|
27 |
+
|
28 |
+
* converges slowly for multiple roots
|
29 |
+
* needs first derivative
|
30 |
+
* 2 function evaluations per iteration
|
31 |
+
"""
|
32 |
+
maxsteps = 20
|
33 |
+
|
34 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
35 |
+
self.ctx = ctx
|
36 |
+
if len(x0) == 1:
|
37 |
+
self.x0 = x0[0]
|
38 |
+
else:
|
39 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
40 |
+
self.f = f
|
41 |
+
if not 'df' in kwargs:
|
42 |
+
def df(x):
|
43 |
+
return self.ctx.diff(f, x)
|
44 |
+
else:
|
45 |
+
df = kwargs['df']
|
46 |
+
self.df = df
|
47 |
+
|
48 |
+
def __iter__(self):
|
49 |
+
f = self.f
|
50 |
+
df = self.df
|
51 |
+
x0 = self.x0
|
52 |
+
while True:
|
53 |
+
x1 = x0 - f(x0) / df(x0)
|
54 |
+
error = abs(x1 - x0)
|
55 |
+
x0 = x1
|
56 |
+
yield (x1, error)
|
57 |
+
|
58 |
+
class Secant:
|
59 |
+
"""
|
60 |
+
1d-solver generating pairs of approximative root and error.
|
61 |
+
|
62 |
+
Needs starting points x0 and x1 close to the root.
|
63 |
+
x1 defaults to x0 + 0.25.
|
64 |
+
|
65 |
+
Pro:
|
66 |
+
|
67 |
+
* converges fast
|
68 |
+
|
69 |
+
Contra:
|
70 |
+
|
71 |
+
* converges slowly for multiple roots
|
72 |
+
"""
|
73 |
+
maxsteps = 30
|
74 |
+
|
75 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
76 |
+
self.ctx = ctx
|
77 |
+
if len(x0) == 1:
|
78 |
+
self.x0 = x0[0]
|
79 |
+
self.x1 = self.x0 + 0.25
|
80 |
+
elif len(x0) == 2:
|
81 |
+
self.x0 = x0[0]
|
82 |
+
self.x1 = x0[1]
|
83 |
+
else:
|
84 |
+
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
|
85 |
+
self.f = f
|
86 |
+
|
87 |
+
def __iter__(self):
|
88 |
+
f = self.f
|
89 |
+
x0 = self.x0
|
90 |
+
x1 = self.x1
|
91 |
+
f0 = f(x0)
|
92 |
+
while True:
|
93 |
+
f1 = f(x1)
|
94 |
+
l = x1 - x0
|
95 |
+
if not l:
|
96 |
+
break
|
97 |
+
s = (f1 - f0) / l
|
98 |
+
if not s:
|
99 |
+
break
|
100 |
+
x0, x1 = x1, x1 - f1/s
|
101 |
+
f0 = f1
|
102 |
+
yield x1, abs(l)
|
103 |
+
|
104 |
+
class MNewton:
|
105 |
+
"""
|
106 |
+
1d-solver generating pairs of approximative root and error.
|
107 |
+
|
108 |
+
Needs starting point x0 close to the root.
|
109 |
+
Uses modified Newton's method that converges fast regardless of the
|
110 |
+
multiplicity of the root.
|
111 |
+
|
112 |
+
Pro:
|
113 |
+
|
114 |
+
* converges fast for multiple roots
|
115 |
+
|
116 |
+
Contra:
|
117 |
+
|
118 |
+
* needs first and second derivative of f
|
119 |
+
* 3 function evaluations per iteration
|
120 |
+
"""
|
121 |
+
maxsteps = 20
|
122 |
+
|
123 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
124 |
+
self.ctx = ctx
|
125 |
+
if not len(x0) == 1:
|
126 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
127 |
+
self.x0 = x0[0]
|
128 |
+
self.f = f
|
129 |
+
if not 'df' in kwargs:
|
130 |
+
def df(x):
|
131 |
+
return self.ctx.diff(f, x)
|
132 |
+
else:
|
133 |
+
df = kwargs['df']
|
134 |
+
self.df = df
|
135 |
+
if not 'd2f' in kwargs:
|
136 |
+
def d2f(x):
|
137 |
+
return self.ctx.diff(df, x)
|
138 |
+
else:
|
139 |
+
d2f = kwargs['df']
|
140 |
+
self.d2f = d2f
|
141 |
+
|
142 |
+
def __iter__(self):
|
143 |
+
x = self.x0
|
144 |
+
f = self.f
|
145 |
+
df = self.df
|
146 |
+
d2f = self.d2f
|
147 |
+
while True:
|
148 |
+
prevx = x
|
149 |
+
fx = f(x)
|
150 |
+
if fx == 0:
|
151 |
+
break
|
152 |
+
dfx = df(x)
|
153 |
+
d2fx = d2f(x)
|
154 |
+
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
|
155 |
+
x -= fx / (dfx - fx * d2fx / dfx)
|
156 |
+
error = abs(x - prevx)
|
157 |
+
yield x, error
|
158 |
+
|
159 |
+
class Halley:
|
160 |
+
"""
|
161 |
+
1d-solver generating pairs of approximative root and error.
|
162 |
+
|
163 |
+
Needs a starting point x0 close to the root.
|
164 |
+
Uses Halley's method with cubic convergence rate.
|
165 |
+
|
166 |
+
Pro:
|
167 |
+
|
168 |
+
* converges even faster the Newton's method
|
169 |
+
* useful when computing with *many* digits
|
170 |
+
|
171 |
+
Contra:
|
172 |
+
|
173 |
+
* needs first and second derivative of f
|
174 |
+
* 3 function evaluations per iteration
|
175 |
+
* converges slowly for multiple roots
|
176 |
+
"""
|
177 |
+
|
178 |
+
maxsteps = 20
|
179 |
+
|
180 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
181 |
+
self.ctx = ctx
|
182 |
+
if not len(x0) == 1:
|
183 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
184 |
+
self.x0 = x0[0]
|
185 |
+
self.f = f
|
186 |
+
if not 'df' in kwargs:
|
187 |
+
def df(x):
|
188 |
+
return self.ctx.diff(f, x)
|
189 |
+
else:
|
190 |
+
df = kwargs['df']
|
191 |
+
self.df = df
|
192 |
+
if not 'd2f' in kwargs:
|
193 |
+
def d2f(x):
|
194 |
+
return self.ctx.diff(df, x)
|
195 |
+
else:
|
196 |
+
d2f = kwargs['df']
|
197 |
+
self.d2f = d2f
|
198 |
+
|
199 |
+
def __iter__(self):
|
200 |
+
x = self.x0
|
201 |
+
f = self.f
|
202 |
+
df = self.df
|
203 |
+
d2f = self.d2f
|
204 |
+
while True:
|
205 |
+
prevx = x
|
206 |
+
fx = f(x)
|
207 |
+
dfx = df(x)
|
208 |
+
d2fx = d2f(x)
|
209 |
+
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
|
210 |
+
error = abs(x - prevx)
|
211 |
+
yield x, error
|
212 |
+
|
213 |
+
class Muller:
|
214 |
+
"""
|
215 |
+
1d-solver generating pairs of approximative root and error.
|
216 |
+
|
217 |
+
Needs starting points x0, x1 and x2 close to the root.
|
218 |
+
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
|
219 |
+
Uses Muller's method that converges towards complex roots.
|
220 |
+
|
221 |
+
Pro:
|
222 |
+
|
223 |
+
* converges fast (somewhat faster than secant)
|
224 |
+
* can find complex roots
|
225 |
+
|
226 |
+
Contra:
|
227 |
+
|
228 |
+
* converges slowly for multiple roots
|
229 |
+
* may have complex values for real starting points and real roots
|
230 |
+
|
231 |
+
http://en.wikipedia.org/wiki/Muller's_method
|
232 |
+
"""
|
233 |
+
maxsteps = 30
|
234 |
+
|
235 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
236 |
+
self.ctx = ctx
|
237 |
+
if len(x0) == 1:
|
238 |
+
self.x0 = x0[0]
|
239 |
+
self.x1 = self.x0 + 0.25
|
240 |
+
self.x2 = self.x1 + 0.25
|
241 |
+
elif len(x0) == 2:
|
242 |
+
self.x0 = x0[0]
|
243 |
+
self.x1 = x0[1]
|
244 |
+
self.x2 = self.x1 + 0.25
|
245 |
+
elif len(x0) == 3:
|
246 |
+
self.x0 = x0[0]
|
247 |
+
self.x1 = x0[1]
|
248 |
+
self.x2 = x0[2]
|
249 |
+
else:
|
250 |
+
raise ValueError('expected 1, 2 or 3 starting points, got %i'
|
251 |
+
% len(x0))
|
252 |
+
self.f = f
|
253 |
+
self.verbose = kwargs['verbose']
|
254 |
+
|
255 |
+
def __iter__(self):
|
256 |
+
f = self.f
|
257 |
+
x0 = self.x0
|
258 |
+
x1 = self.x1
|
259 |
+
x2 = self.x2
|
260 |
+
fx0 = f(x0)
|
261 |
+
fx1 = f(x1)
|
262 |
+
fx2 = f(x2)
|
263 |
+
while True:
|
264 |
+
# TODO: maybe refactoring with function for divided differences
|
265 |
+
# calculate divided differences
|
266 |
+
fx2x1 = (fx1 - fx2) / (x1 - x2)
|
267 |
+
fx2x0 = (fx0 - fx2) / (x0 - x2)
|
268 |
+
fx1x0 = (fx0 - fx1) / (x0 - x1)
|
269 |
+
w = fx2x1 + fx2x0 - fx1x0
|
270 |
+
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
|
271 |
+
if w == 0 and fx2x1x0 == 0:
|
272 |
+
if self.verbose:
|
273 |
+
print('canceled with')
|
274 |
+
print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
|
275 |
+
break
|
276 |
+
x0 = x1
|
277 |
+
fx0 = fx1
|
278 |
+
x1 = x2
|
279 |
+
fx1 = fx2
|
280 |
+
# denominator should be as large as possible => choose sign
|
281 |
+
r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
|
282 |
+
if abs(w - r) > abs(w + r):
|
283 |
+
r = -r
|
284 |
+
x2 -= 2*fx2 / (w + r)
|
285 |
+
fx2 = f(x2)
|
286 |
+
error = abs(x2 - x1)
|
287 |
+
yield x2, error
|
288 |
+
|
289 |
+
# TODO: consider raising a ValueError when there's no sign change in a and b
|
290 |
+
class Bisection:
|
291 |
+
"""
|
292 |
+
1d-solver generating pairs of approximative root and error.
|
293 |
+
|
294 |
+
Uses bisection method to find a root of f in [a, b].
|
295 |
+
Might fail for multiple roots (needs sign change).
|
296 |
+
|
297 |
+
Pro:
|
298 |
+
|
299 |
+
* robust and reliable
|
300 |
+
|
301 |
+
Contra:
|
302 |
+
|
303 |
+
* converges slowly
|
304 |
+
* needs sign change
|
305 |
+
"""
|
306 |
+
maxsteps = 100
|
307 |
+
|
308 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
309 |
+
self.ctx = ctx
|
310 |
+
if len(x0) != 2:
|
311 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
312 |
+
self.f = f
|
313 |
+
self.a = x0[0]
|
314 |
+
self.b = x0[1]
|
315 |
+
|
316 |
+
def __iter__(self):
|
317 |
+
f = self.f
|
318 |
+
a = self.a
|
319 |
+
b = self.b
|
320 |
+
l = b - a
|
321 |
+
fb = f(b)
|
322 |
+
while True:
|
323 |
+
m = self.ctx.ldexp(a + b, -1)
|
324 |
+
fm = f(m)
|
325 |
+
sign = fm * fb
|
326 |
+
if sign < 0:
|
327 |
+
a = m
|
328 |
+
elif sign > 0:
|
329 |
+
b = m
|
330 |
+
fb = fm
|
331 |
+
else:
|
332 |
+
yield m, self.ctx.zero
|
333 |
+
l /= 2
|
334 |
+
yield (a + b)/2, abs(l)
|
335 |
+
|
336 |
+
def _getm(method):
|
337 |
+
"""
|
338 |
+
Return a function to calculate m for Illinois-like methods.
|
339 |
+
"""
|
340 |
+
if method == 'illinois':
|
341 |
+
def getm(fz, fb):
|
342 |
+
return 0.5
|
343 |
+
elif method == 'pegasus':
|
344 |
+
def getm(fz, fb):
|
345 |
+
return fb/(fb + fz)
|
346 |
+
elif method == 'anderson':
|
347 |
+
def getm(fz, fb):
|
348 |
+
m = 1 - fz/fb
|
349 |
+
if m > 0:
|
350 |
+
return m
|
351 |
+
else:
|
352 |
+
return 0.5
|
353 |
+
else:
|
354 |
+
raise ValueError("method '%s' not recognized" % method)
|
355 |
+
return getm
|
356 |
+
|
357 |
+
class Illinois:
|
358 |
+
"""
|
359 |
+
1d-solver generating pairs of approximative root and error.
|
360 |
+
|
361 |
+
Uses Illinois method or similar to find a root of f in [a, b].
|
362 |
+
Might fail for multiple roots (needs sign change).
|
363 |
+
Combines bisect with secant (improved regula falsi).
|
364 |
+
|
365 |
+
The only difference between the methods is the scaling factor m, which is
|
366 |
+
used to ensure convergence (you can choose one using the 'method' keyword):
|
367 |
+
|
368 |
+
Illinois method ('illinois'):
|
369 |
+
m = 0.5
|
370 |
+
|
371 |
+
Pegasus method ('pegasus'):
|
372 |
+
m = fb/(fb + fz)
|
373 |
+
|
374 |
+
Anderson-Bjoerk method ('anderson'):
|
375 |
+
m = 1 - fz/fb if positive else 0.5
|
376 |
+
|
377 |
+
Pro:
|
378 |
+
|
379 |
+
* converges very fast
|
380 |
+
|
381 |
+
Contra:
|
382 |
+
|
383 |
+
* has problems with multiple roots
|
384 |
+
* needs sign change
|
385 |
+
"""
|
386 |
+
maxsteps = 30
|
387 |
+
|
388 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
389 |
+
self.ctx = ctx
|
390 |
+
if len(x0) != 2:
|
391 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
392 |
+
self.a = x0[0]
|
393 |
+
self.b = x0[1]
|
394 |
+
self.f = f
|
395 |
+
self.tol = kwargs['tol']
|
396 |
+
self.verbose = kwargs['verbose']
|
397 |
+
self.method = kwargs.get('method', 'illinois')
|
398 |
+
self.getm = _getm(self.method)
|
399 |
+
if self.verbose:
|
400 |
+
print('using %s method' % self.method)
|
401 |
+
|
402 |
+
def __iter__(self):
|
403 |
+
method = self.method
|
404 |
+
f = self.f
|
405 |
+
a = self.a
|
406 |
+
b = self.b
|
407 |
+
fa = f(a)
|
408 |
+
fb = f(b)
|
409 |
+
m = None
|
410 |
+
while True:
|
411 |
+
l = b - a
|
412 |
+
if l == 0:
|
413 |
+
break
|
414 |
+
s = (fb - fa) / l
|
415 |
+
z = a - fa/s
|
416 |
+
fz = f(z)
|
417 |
+
if abs(fz) < self.tol:
|
418 |
+
# TODO: better condition (when f is very flat)
|
419 |
+
if self.verbose:
|
420 |
+
print('canceled with z =', z)
|
421 |
+
yield z, l
|
422 |
+
break
|
423 |
+
if fz * fb < 0: # root in [z, b]
|
424 |
+
a = b
|
425 |
+
fa = fb
|
426 |
+
b = z
|
427 |
+
fb = fz
|
428 |
+
else: # root in [a, z]
|
429 |
+
m = self.getm(fz, fb)
|
430 |
+
b = z
|
431 |
+
fb = fz
|
432 |
+
fa = m*fa # scale down to ensure convergence
|
433 |
+
if self.verbose and m and not method == 'illinois':
|
434 |
+
print('m:', m)
|
435 |
+
yield (a + b)/2, abs(l)
|
436 |
+
|
437 |
+
def Pegasus(*args, **kwargs):
|
438 |
+
"""
|
439 |
+
1d-solver generating pairs of approximative root and error.
|
440 |
+
|
441 |
+
Uses Pegasus method to find a root of f in [a, b].
|
442 |
+
Wrapper for illinois to use method='pegasus'.
|
443 |
+
"""
|
444 |
+
kwargs['method'] = 'pegasus'
|
445 |
+
return Illinois(*args, **kwargs)
|
446 |
+
|
447 |
+
def Anderson(*args, **kwargs):
|
448 |
+
"""
|
449 |
+
1d-solver generating pairs of approximative root and error.
|
450 |
+
|
451 |
+
Uses Anderson-Bjoerk method to find a root of f in [a, b].
|
452 |
+
Wrapper for illinois to use method='pegasus'.
|
453 |
+
"""
|
454 |
+
kwargs['method'] = 'anderson'
|
455 |
+
return Illinois(*args, **kwargs)
|
456 |
+
|
457 |
+
# TODO: check whether it's possible to combine it with Illinois stuff
|
458 |
+
class Ridder:
|
459 |
+
"""
|
460 |
+
1d-solver generating pairs of approximative root and error.
|
461 |
+
|
462 |
+
Ridders' method to find a root of f in [a, b].
|
463 |
+
Is told to perform as well as Brent's method while being simpler.
|
464 |
+
|
465 |
+
Pro:
|
466 |
+
|
467 |
+
* very fast
|
468 |
+
* simpler than Brent's method
|
469 |
+
|
470 |
+
Contra:
|
471 |
+
|
472 |
+
* two function evaluations per step
|
473 |
+
* has problems with multiple roots
|
474 |
+
* needs sign change
|
475 |
+
|
476 |
+
http://en.wikipedia.org/wiki/Ridders'_method
|
477 |
+
"""
|
478 |
+
maxsteps = 30
|
479 |
+
|
480 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
481 |
+
self.ctx = ctx
|
482 |
+
self.f = f
|
483 |
+
if len(x0) != 2:
|
484 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
485 |
+
self.x1 = x0[0]
|
486 |
+
self.x2 = x0[1]
|
487 |
+
self.verbose = kwargs['verbose']
|
488 |
+
self.tol = kwargs['tol']
|
489 |
+
|
490 |
+
def __iter__(self):
|
491 |
+
ctx = self.ctx
|
492 |
+
f = self.f
|
493 |
+
x1 = self.x1
|
494 |
+
fx1 = f(x1)
|
495 |
+
x2 = self.x2
|
496 |
+
fx2 = f(x2)
|
497 |
+
while True:
|
498 |
+
x3 = 0.5*(x1 + x2)
|
499 |
+
fx3 = f(x3)
|
500 |
+
x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
|
501 |
+
fx4 = f(x4)
|
502 |
+
if abs(fx4) < self.tol:
|
503 |
+
# TODO: better condition (when f is very flat)
|
504 |
+
if self.verbose:
|
505 |
+
print('canceled with f(x4) =', fx4)
|
506 |
+
yield x4, abs(x1 - x2)
|
507 |
+
break
|
508 |
+
if fx4 * fx2 < 0: # root in [x4, x2]
|
509 |
+
x1 = x4
|
510 |
+
fx1 = fx4
|
511 |
+
else: # root in [x1, x4]
|
512 |
+
x2 = x4
|
513 |
+
fx2 = fx4
|
514 |
+
error = abs(x1 - x2)
|
515 |
+
yield (x1 + x2)/2, error
|
516 |
+
|
517 |
+
class ANewton:
|
518 |
+
"""
|
519 |
+
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
|
520 |
+
|
521 |
+
Uses Newton's method modified to use Steffensens method when convergence is
|
522 |
+
slow. (I.e. for multiple roots.)
|
523 |
+
"""
|
524 |
+
maxsteps = 20
|
525 |
+
|
526 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
527 |
+
self.ctx = ctx
|
528 |
+
if not len(x0) == 1:
|
529 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
530 |
+
self.x0 = x0[0]
|
531 |
+
self.f = f
|
532 |
+
if not 'df' in kwargs:
|
533 |
+
def df(x):
|
534 |
+
return self.ctx.diff(f, x)
|
535 |
+
else:
|
536 |
+
df = kwargs['df']
|
537 |
+
self.df = df
|
538 |
+
def phi(x):
|
539 |
+
return x - f(x) / df(x)
|
540 |
+
self.phi = phi
|
541 |
+
self.verbose = kwargs['verbose']
|
542 |
+
|
543 |
+
def __iter__(self):
|
544 |
+
x0 = self.x0
|
545 |
+
f = self.f
|
546 |
+
df = self.df
|
547 |
+
phi = self.phi
|
548 |
+
error = 0
|
549 |
+
counter = 0
|
550 |
+
while True:
|
551 |
+
prevx = x0
|
552 |
+
try:
|
553 |
+
x0 = phi(x0)
|
554 |
+
except ZeroDivisionError:
|
555 |
+
if self.verbose:
|
556 |
+
print('ZeroDivisionError: canceled with x =', x0)
|
557 |
+
break
|
558 |
+
preverror = error
|
559 |
+
error = abs(prevx - x0)
|
560 |
+
# TODO: decide not to use convergence acceleration
|
561 |
+
if error and abs(error - preverror) / error < 1:
|
562 |
+
if self.verbose:
|
563 |
+
print('converging slowly')
|
564 |
+
counter += 1
|
565 |
+
if counter >= 3:
|
566 |
+
# accelerate convergence
|
567 |
+
phi = steffensen(phi)
|
568 |
+
counter = 0
|
569 |
+
if self.verbose:
|
570 |
+
print('accelerating convergence')
|
571 |
+
yield x0, error
|
572 |
+
|
573 |
+
# TODO: add Brent
|
574 |
+
|
575 |
+
############################
|
576 |
+
# MULTIDIMENSIONAL SOLVERS #
|
577 |
+
############################
|
578 |
+
|
579 |
+
def jacobian(ctx, f, x):
|
580 |
+
"""
|
581 |
+
Calculate the Jacobian matrix of a function at the point x0.
|
582 |
+
|
583 |
+
This is the first derivative of a vectorial function:
|
584 |
+
|
585 |
+
f : R^m -> R^n with m >= n
|
586 |
+
"""
|
587 |
+
x = ctx.matrix(x)
|
588 |
+
h = ctx.sqrt(ctx.eps)
|
589 |
+
fx = ctx.matrix(f(*x))
|
590 |
+
m = len(fx)
|
591 |
+
n = len(x)
|
592 |
+
J = ctx.matrix(m, n)
|
593 |
+
for j in xrange(n):
|
594 |
+
xj = x.copy()
|
595 |
+
xj[j] += h
|
596 |
+
Jj = (ctx.matrix(f(*xj)) - fx) / h
|
597 |
+
for i in xrange(m):
|
598 |
+
J[i,j] = Jj[i]
|
599 |
+
return J
|
600 |
+
|
601 |
+
# TODO: test with user-specified jacobian matrix
|
602 |
+
class MDNewton:
|
603 |
+
"""
|
604 |
+
Find the root of a vector function numerically using Newton's method.
|
605 |
+
|
606 |
+
f is a vector function representing a nonlinear equation system.
|
607 |
+
|
608 |
+
x0 is the starting point close to the root.
|
609 |
+
|
610 |
+
J is a function returning the Jacobian matrix for a point.
|
611 |
+
|
612 |
+
Supports overdetermined systems.
|
613 |
+
|
614 |
+
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
|
615 |
+
The function to calculate the Jacobian matrix can be given using the
|
616 |
+
keyword 'J'. Otherwise it will be calculated numerically.
|
617 |
+
|
618 |
+
Please note that this method converges only locally. Especially for high-
|
619 |
+
dimensional systems it is not trivial to find a good starting point being
|
620 |
+
close enough to the root.
|
621 |
+
|
622 |
+
It is recommended to use a faster, low-precision solver from SciPy [1] or
|
623 |
+
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
|
624 |
+
root-polishing to any precision.
|
625 |
+
|
626 |
+
[1] http://scipy.org
|
627 |
+
|
628 |
+
[2] http://openopt.org/Welcome
|
629 |
+
"""
|
630 |
+
maxsteps = 10
|
631 |
+
|
632 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
633 |
+
self.ctx = ctx
|
634 |
+
self.f = f
|
635 |
+
if isinstance(x0, (tuple, list)):
|
636 |
+
x0 = ctx.matrix(x0)
|
637 |
+
assert x0.cols == 1, 'need a vector'
|
638 |
+
self.x0 = x0
|
639 |
+
if 'J' in kwargs:
|
640 |
+
self.J = kwargs['J']
|
641 |
+
else:
|
642 |
+
def J(*x):
|
643 |
+
return ctx.jacobian(f, x)
|
644 |
+
self.J = J
|
645 |
+
self.norm = kwargs['norm']
|
646 |
+
self.verbose = kwargs['verbose']
|
647 |
+
|
648 |
+
def __iter__(self):
|
649 |
+
f = self.f
|
650 |
+
x0 = self.x0
|
651 |
+
norm = self.norm
|
652 |
+
J = self.J
|
653 |
+
fx = self.ctx.matrix(f(*x0))
|
654 |
+
fxnorm = norm(fx)
|
655 |
+
cancel = False
|
656 |
+
while not cancel:
|
657 |
+
# get direction of descent
|
658 |
+
fxn = -fx
|
659 |
+
Jx = J(*x0)
|
660 |
+
s = self.ctx.lu_solve(Jx, fxn)
|
661 |
+
if self.verbose:
|
662 |
+
print('Jx:')
|
663 |
+
print(Jx)
|
664 |
+
print('s:', s)
|
665 |
+
# damping step size TODO: better strategy (hard task)
|
666 |
+
l = self.ctx.one
|
667 |
+
x1 = x0 + s
|
668 |
+
while True:
|
669 |
+
if x1 == x0:
|
670 |
+
if self.verbose:
|
671 |
+
print("canceled, won't get more excact")
|
672 |
+
cancel = True
|
673 |
+
break
|
674 |
+
fx = self.ctx.matrix(f(*x1))
|
675 |
+
newnorm = norm(fx)
|
676 |
+
if newnorm < fxnorm:
|
677 |
+
# new x accepted
|
678 |
+
fxnorm = newnorm
|
679 |
+
x0 = x1
|
680 |
+
break
|
681 |
+
l /= 2
|
682 |
+
x1 = x0 + l*s
|
683 |
+
yield (x0, fxnorm)
|
684 |
+
|
685 |
+
#############
|
686 |
+
# UTILITIES #
|
687 |
+
#############
|
688 |
+
|
689 |
+
str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
|
690 |
+
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
|
691 |
+
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
|
692 |
+
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
|
693 |
+
|
694 |
+
def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
|
695 |
+
r"""
|
696 |
+
Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
|
697 |
+
interval for *x*.
|
698 |
+
|
699 |
+
Multidimensional overdetermined systems are supported.
|
700 |
+
You can specify them using a function or a list of functions.
|
701 |
+
|
702 |
+
Mathematically speaking, this function returns `x` such that
|
703 |
+
`|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
|
704 |
+
If the computed value does not meet this criterion, an exception is raised.
|
705 |
+
This exception can be disabled with *verify=False*.
|
706 |
+
|
707 |
+
For interval arithmetic (``iv.findroot()``), please note that
|
708 |
+
the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
|
709 |
+
It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
|
710 |
+
regardless of numerical error. This may be improved in the future.
|
711 |
+
|
712 |
+
**Arguments**
|
713 |
+
|
714 |
+
*f*
|
715 |
+
one dimensional function
|
716 |
+
*x0*
|
717 |
+
starting point, several starting points or interval (depends on solver)
|
718 |
+
*tol*
|
719 |
+
the returned solution has an error smaller than this
|
720 |
+
*verbose*
|
721 |
+
print additional information for each iteration if true
|
722 |
+
*verify*
|
723 |
+
verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
|
724 |
+
*solver*
|
725 |
+
a generator for *f* and *x0* returning approximative solution and error
|
726 |
+
*maxsteps*
|
727 |
+
after how many steps the solver will cancel
|
728 |
+
*df*
|
729 |
+
first derivative of *f* (used by some solvers)
|
730 |
+
*d2f*
|
731 |
+
second derivative of *f* (used by some solvers)
|
732 |
+
*multidimensional*
|
733 |
+
force multidimensional solving
|
734 |
+
*J*
|
735 |
+
Jacobian matrix of *f* (used by multidimensional solvers)
|
736 |
+
*norm*
|
737 |
+
used vector norm (used by multidimensional solvers)
|
738 |
+
|
739 |
+
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
|
740 |
+
yielding pairs of approximative solution and estimated error (which is
|
741 |
+
expected to be positive).
|
742 |
+
You can use the following string aliases:
|
743 |
+
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
|
744 |
+
'ridder', 'anewton', 'bisect'
|
745 |
+
|
746 |
+
See mpmath.calculus.optimization for their documentation.
|
747 |
+
|
748 |
+
**Examples**
|
749 |
+
|
750 |
+
The function :func:`~mpmath.findroot` locates a root of a given function using the
|
751 |
+
secant method by default. A simple example use of the secant method is to
|
752 |
+
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
|
753 |
+
|
754 |
+
>>> from mpmath import *
|
755 |
+
>>> mp.dps = 30; mp.pretty = True
|
756 |
+
>>> findroot(sin, 3)
|
757 |
+
3.14159265358979323846264338328
|
758 |
+
|
759 |
+
The secant method can be used to find complex roots of analytic functions,
|
760 |
+
although it must in that case generally be given a nonreal starting value
|
761 |
+
(or else it will never leave the real line)::
|
762 |
+
|
763 |
+
>>> mp.dps = 15
|
764 |
+
>>> findroot(lambda x: x**3 + 2*x + 1, j)
|
765 |
+
(0.226698825758202 + 1.46771150871022j)
|
766 |
+
|
767 |
+
A nice application is to compute nontrivial roots of the Riemann zeta
|
768 |
+
function with many digits (good initial values are needed for convergence)::
|
769 |
+
|
770 |
+
>>> mp.dps = 30
|
771 |
+
>>> findroot(zeta, 0.5+14j)
|
772 |
+
(0.5 + 14.1347251417346937904572519836j)
|
773 |
+
|
774 |
+
The secant method can also be used as an optimization algorithm, by passing
|
775 |
+
it a derivative of a function. The following example locates the positive
|
776 |
+
minimum of the gamma function::
|
777 |
+
|
778 |
+
>>> mp.dps = 20
|
779 |
+
>>> findroot(lambda x: diff(gamma, x), 1)
|
780 |
+
1.4616321449683623413
|
781 |
+
|
782 |
+
Finally, a useful application is to compute inverse functions, such as the
|
783 |
+
Lambert W function which is the inverse of `w e^w`, given the first
|
784 |
+
term of the solution's asymptotic expansion as the initial value. In basic
|
785 |
+
cases, this gives identical results to mpmath's built-in ``lambertw``
|
786 |
+
function::
|
787 |
+
|
788 |
+
>>> def lambert(x):
|
789 |
+
... return findroot(lambda w: w*exp(w) - x, log(1+x))
|
790 |
+
...
|
791 |
+
>>> mp.dps = 15
|
792 |
+
>>> lambert(1); lambertw(1)
|
793 |
+
0.567143290409784
|
794 |
+
0.567143290409784
|
795 |
+
>>> lambert(1000); lambert(1000)
|
796 |
+
5.2496028524016
|
797 |
+
5.2496028524016
|
798 |
+
|
799 |
+
Multidimensional functions are also supported::
|
800 |
+
|
801 |
+
>>> f = [lambda x1, x2: x1**2 + x2,
|
802 |
+
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
|
803 |
+
>>> findroot(f, (0, 0))
|
804 |
+
[-0.618033988749895]
|
805 |
+
[-0.381966011250105]
|
806 |
+
>>> findroot(f, (10, 10))
|
807 |
+
[ 1.61803398874989]
|
808 |
+
[-2.61803398874989]
|
809 |
+
|
810 |
+
You can verify this by solving the system manually.
|
811 |
+
|
812 |
+
Please note that the following (more general) syntax also works::
|
813 |
+
|
814 |
+
>>> def f(x1, x2):
|
815 |
+
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
|
816 |
+
...
|
817 |
+
>>> findroot(f, (0, 0))
|
818 |
+
[-0.618033988749895]
|
819 |
+
[-0.381966011250105]
|
820 |
+
|
821 |
+
|
822 |
+
**Multiple roots**
|
823 |
+
|
824 |
+
For multiple roots all methods of the Newtonian family (including secant)
|
825 |
+
converge slowly. Consider this example::
|
826 |
+
|
827 |
+
>>> f = lambda x: (x - 1)**99
|
828 |
+
>>> findroot(f, 0.9, verify=False)
|
829 |
+
0.918073542444929
|
830 |
+
|
831 |
+
Even for a very close starting point the secant method converges very
|
832 |
+
slowly. Use ``verbose=True`` to illustrate this.
|
833 |
+
|
834 |
+
It is possible to modify Newton's method to make it converge regardless of
|
835 |
+
the root's multiplicity::
|
836 |
+
|
837 |
+
>>> findroot(f, -10, solver='mnewton')
|
838 |
+
1.0
|
839 |
+
|
840 |
+
This variant uses the first and second derivative of the function, which is
|
841 |
+
not very efficient.
|
842 |
+
|
843 |
+
Alternatively you can use an experimental Newtonian solver that keeps track
|
844 |
+
of the speed of convergence and accelerates it using Steffensen's method if
|
845 |
+
necessary::
|
846 |
+
|
847 |
+
>>> findroot(f, -10, solver='anewton', verbose=True)
|
848 |
+
x: -9.88888888888888888889
|
849 |
+
error: 0.111111111111111111111
|
850 |
+
converging slowly
|
851 |
+
x: -9.77890011223344556678
|
852 |
+
error: 0.10998877665544332211
|
853 |
+
converging slowly
|
854 |
+
x: -9.67002233332199662166
|
855 |
+
error: 0.108877778911448945119
|
856 |
+
converging slowly
|
857 |
+
accelerating convergence
|
858 |
+
x: -9.5622443299551077669
|
859 |
+
error: 0.107778003366888854764
|
860 |
+
converging slowly
|
861 |
+
x: 0.99999999999999999214
|
862 |
+
error: 10.562244329955107759
|
863 |
+
x: 1.0
|
864 |
+
error: 7.8598304758094664213e-18
|
865 |
+
ZeroDivisionError: canceled with x = 1.0
|
866 |
+
1.0
|
867 |
+
|
868 |
+
**Complex roots**
|
869 |
+
|
870 |
+
For complex roots it's recommended to use Muller's method as it converges
|
871 |
+
even for real starting points very fast::
|
872 |
+
|
873 |
+
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
|
874 |
+
(0.727136084491197 + 0.934099289460529j)
|
875 |
+
|
876 |
+
|
877 |
+
**Intersection methods**
|
878 |
+
|
879 |
+
When you need to find a root in a known interval, it's highly recommended to
|
880 |
+
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
|
881 |
+
Usually they converge faster and more reliable. They have however problems
|
882 |
+
with multiple roots and usually need a sign change to find a root::
|
883 |
+
|
884 |
+
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
|
885 |
+
0.0
|
886 |
+
|
887 |
+
Be careful with symmetric functions::
|
888 |
+
|
889 |
+
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
|
890 |
+
Traceback (most recent call last):
|
891 |
+
...
|
892 |
+
ZeroDivisionError
|
893 |
+
|
894 |
+
It fails even for better starting points, because there is no sign change::
|
895 |
+
|
896 |
+
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
|
897 |
+
Traceback (most recent call last):
|
898 |
+
...
|
899 |
+
ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
|
900 |
+
Try another starting point or tweak arguments.
|
901 |
+
|
902 |
+
"""
|
903 |
+
prec = ctx.prec
|
904 |
+
try:
|
905 |
+
ctx.prec += 20
|
906 |
+
|
907 |
+
# initialize arguments
|
908 |
+
if tol is None:
|
909 |
+
tol = ctx.eps * 2**10
|
910 |
+
|
911 |
+
kwargs['verbose'] = kwargs.get('verbose', verbose)
|
912 |
+
|
913 |
+
if 'd1f' in kwargs:
|
914 |
+
kwargs['df'] = kwargs['d1f']
|
915 |
+
|
916 |
+
kwargs['tol'] = tol
|
917 |
+
if isinstance(x0, (list, tuple)):
|
918 |
+
x0 = [ctx.convert(x) for x in x0]
|
919 |
+
else:
|
920 |
+
x0 = [ctx.convert(x0)]
|
921 |
+
|
922 |
+
if isinstance(solver, str):
|
923 |
+
try:
|
924 |
+
solver = str2solver[solver]
|
925 |
+
except KeyError:
|
926 |
+
raise ValueError('could not recognize solver')
|
927 |
+
|
928 |
+
# accept list of functions
|
929 |
+
if isinstance(f, (list, tuple)):
|
930 |
+
f2 = copy(f)
|
931 |
+
def tmp(*args):
|
932 |
+
return [fn(*args) for fn in f2]
|
933 |
+
f = tmp
|
934 |
+
|
935 |
+
# detect multidimensional functions
|
936 |
+
try:
|
937 |
+
fx = f(*x0)
|
938 |
+
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
|
939 |
+
except TypeError:
|
940 |
+
fx = f(x0[0])
|
941 |
+
multidimensional = False
|
942 |
+
if 'multidimensional' in kwargs:
|
943 |
+
multidimensional = kwargs['multidimensional']
|
944 |
+
if multidimensional:
|
945 |
+
# only one multidimensional solver available at the moment
|
946 |
+
solver = MDNewton
|
947 |
+
if not 'norm' in kwargs:
|
948 |
+
norm = lambda x: ctx.norm(x, 'inf')
|
949 |
+
kwargs['norm'] = norm
|
950 |
+
else:
|
951 |
+
norm = kwargs['norm']
|
952 |
+
else:
|
953 |
+
norm = abs
|
954 |
+
|
955 |
+
# happily return starting point if it's a root
|
956 |
+
if norm(fx) == 0:
|
957 |
+
if multidimensional:
|
958 |
+
return ctx.matrix(x0)
|
959 |
+
else:
|
960 |
+
return x0[0]
|
961 |
+
|
962 |
+
# use solver
|
963 |
+
iterations = solver(ctx, f, x0, **kwargs)
|
964 |
+
if 'maxsteps' in kwargs:
|
965 |
+
maxsteps = kwargs['maxsteps']
|
966 |
+
else:
|
967 |
+
maxsteps = iterations.maxsteps
|
968 |
+
i = 0
|
969 |
+
for x, error in iterations:
|
970 |
+
if verbose:
|
971 |
+
print('x: ', x)
|
972 |
+
print('error:', error)
|
973 |
+
i += 1
|
974 |
+
if error < tol * max(1, norm(x)) or i >= maxsteps:
|
975 |
+
break
|
976 |
+
else:
|
977 |
+
if not i:
|
978 |
+
raise ValueError('Could not find root using the given solver.\n'
|
979 |
+
'Try another starting point or tweak arguments.')
|
980 |
+
if not isinstance(x, (list, tuple, ctx.matrix)):
|
981 |
+
xl = [x]
|
982 |
+
else:
|
983 |
+
xl = x
|
984 |
+
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
|
985 |
+
raise ValueError('Could not find root within given tolerance. '
|
986 |
+
'(%s > %s)\n'
|
987 |
+
'Try another starting point or tweak arguments.'
|
988 |
+
% (norm(f(*xl))**2, tol))
|
989 |
+
return x
|
990 |
+
finally:
|
991 |
+
ctx.prec = prec
|
992 |
+
|
993 |
+
|
994 |
+
def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
|
995 |
+
"""
|
996 |
+
Return the multiplicity of a given root of f.
|
997 |
+
|
998 |
+
Internally, numerical derivatives are used. This might be inefficient for
|
999 |
+
higher order derviatives. Due to this, ``multiplicity`` cancels after
|
1000 |
+
evaluating 10 derivatives by default. You can be specify the n-th derivative
|
1001 |
+
using the dnf keyword.
|
1002 |
+
|
1003 |
+
>>> from mpmath import *
|
1004 |
+
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
|
1005 |
+
2
|
1006 |
+
|
1007 |
+
"""
|
1008 |
+
if tol is None:
|
1009 |
+
tol = ctx.eps ** 0.8
|
1010 |
+
kwargs['d0f'] = f
|
1011 |
+
for i in xrange(maxsteps):
|
1012 |
+
dfstr = 'd' + str(i) + 'f'
|
1013 |
+
if dfstr in kwargs:
|
1014 |
+
df = kwargs[dfstr]
|
1015 |
+
else:
|
1016 |
+
df = lambda x: ctx.diff(f, x, i)
|
1017 |
+
if not abs(df(root)) < tol:
|
1018 |
+
break
|
1019 |
+
return i
|
1020 |
+
|
1021 |
+
def steffensen(f):
|
1022 |
+
"""
|
1023 |
+
linear convergent function -> quadratic convergent function
|
1024 |
+
|
1025 |
+
Steffensen's method for quadratic convergence of a linear converging
|
1026 |
+
sequence.
|
1027 |
+
Don not use it for higher rates of convergence.
|
1028 |
+
It may even work for divergent sequences.
|
1029 |
+
|
1030 |
+
Definition:
|
1031 |
+
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
|
1032 |
+
|
1033 |
+
Example
|
1034 |
+
.......
|
1035 |
+
|
1036 |
+
You can use Steffensen's method to accelerate a fixpoint iteration of linear
|
1037 |
+
(or less) convergence.
|
1038 |
+
|
1039 |
+
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
|
1040 |
+
phi(x) = x**2 there are two fixpoints: 0 and 1.
|
1041 |
+
|
1042 |
+
Let's try Steffensen's method:
|
1043 |
+
|
1044 |
+
>>> f = lambda x: x**2
|
1045 |
+
>>> from mpmath.calculus.optimization import steffensen
|
1046 |
+
>>> F = steffensen(f)
|
1047 |
+
>>> for x in [0.5, 0.9, 2.0]:
|
1048 |
+
... fx = Fx = x
|
1049 |
+
... for i in xrange(9):
|
1050 |
+
... try:
|
1051 |
+
... fx = f(fx)
|
1052 |
+
... except OverflowError:
|
1053 |
+
... pass
|
1054 |
+
... try:
|
1055 |
+
... Fx = F(Fx)
|
1056 |
+
... except ZeroDivisionError:
|
1057 |
+
... pass
|
1058 |
+
... print('%20g %20g' % (fx, Fx))
|
1059 |
+
0.25 -0.5
|
1060 |
+
0.0625 0.1
|
1061 |
+
0.00390625 -0.0011236
|
1062 |
+
1.52588e-05 1.41691e-09
|
1063 |
+
2.32831e-10 -2.84465e-27
|
1064 |
+
5.42101e-20 2.30189e-80
|
1065 |
+
2.93874e-39 -1.2197e-239
|
1066 |
+
8.63617e-78 0
|
1067 |
+
7.45834e-155 0
|
1068 |
+
0.81 1.02676
|
1069 |
+
0.6561 1.00134
|
1070 |
+
0.430467 1
|
1071 |
+
0.185302 1
|
1072 |
+
0.0343368 1
|
1073 |
+
0.00117902 1
|
1074 |
+
1.39008e-06 1
|
1075 |
+
1.93233e-12 1
|
1076 |
+
3.73392e-24 1
|
1077 |
+
4 1.6
|
1078 |
+
16 1.2962
|
1079 |
+
256 1.10194
|
1080 |
+
65536 1.01659
|
1081 |
+
4.29497e+09 1.00053
|
1082 |
+
1.84467e+19 1
|
1083 |
+
3.40282e+38 1
|
1084 |
+
1.15792e+77 1
|
1085 |
+
1.34078e+154 1
|
1086 |
+
|
1087 |
+
Unmodified, the iteration converges only towards 0. Modified it converges
|
1088 |
+
not only much faster, it converges even to the repelling fixpoint 1.
|
1089 |
+
"""
|
1090 |
+
def F(x):
|
1091 |
+
fx = f(x)
|
1092 |
+
ffx = f(fx)
|
1093 |
+
return (x*ffx - fx**2) / (ffx - 2*fx + x)
|
1094 |
+
return F
|
1095 |
+
|
1096 |
+
OptimizationMethods.jacobian = jacobian
|
1097 |
+
OptimizationMethods.findroot = findroot
|
1098 |
+
OptimizationMethods.multiplicity = multiplicity
|
1099 |
+
|
1100 |
+
if __name__ == '__main__':
|
1101 |
+
import doctest
|
1102 |
+
doctest.testmod()
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/polynomials.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
from .calculus import defun
|
3 |
+
|
4 |
+
#----------------------------------------------------------------------------#
|
5 |
+
# Polynomials #
|
6 |
+
#----------------------------------------------------------------------------#
|
7 |
+
|
8 |
+
# XXX: extra precision
|
9 |
+
@defun
|
10 |
+
def polyval(ctx, coeffs, x, derivative=False):
|
11 |
+
r"""
|
12 |
+
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
|
13 |
+
:func:`~mpmath.polyval` evaluates the polynomial
|
14 |
+
|
15 |
+
.. math ::
|
16 |
+
|
17 |
+
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
|
18 |
+
|
19 |
+
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
|
20 |
+
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
|
21 |
+
tuple `(P(x), P'(x))`.
|
22 |
+
|
23 |
+
>>> from mpmath import *
|
24 |
+
>>> mp.pretty = True
|
25 |
+
>>> polyval([3, 0, 2], 0.5)
|
26 |
+
2.75
|
27 |
+
>>> polyval([3, 0, 2], 0.5, derivative=True)
|
28 |
+
(2.75, 3.0)
|
29 |
+
|
30 |
+
The coefficients and the evaluation point may be any combination
|
31 |
+
of real or complex numbers.
|
32 |
+
"""
|
33 |
+
if not coeffs:
|
34 |
+
return ctx.zero
|
35 |
+
p = ctx.convert(coeffs[0])
|
36 |
+
q = ctx.zero
|
37 |
+
for c in coeffs[1:]:
|
38 |
+
if derivative:
|
39 |
+
q = p + x*q
|
40 |
+
p = c + x*p
|
41 |
+
if derivative:
|
42 |
+
return p, q
|
43 |
+
else:
|
44 |
+
return p
|
45 |
+
|
46 |
+
@defun
|
47 |
+
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
|
48 |
+
error=False, roots_init=None):
|
49 |
+
"""
|
50 |
+
Computes all roots (real or complex) of a given polynomial.
|
51 |
+
|
52 |
+
The roots are returned as a sorted list, where real roots appear first
|
53 |
+
followed by complex conjugate roots as adjacent elements. The polynomial
|
54 |
+
should be given as a list of coefficients, in the format used by
|
55 |
+
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
|
56 |
+
|
57 |
+
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
|
58 |
+
where *err* is an estimate of the maximum error among the computed roots.
|
59 |
+
|
60 |
+
**Examples**
|
61 |
+
|
62 |
+
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
|
63 |
+
|
64 |
+
>>> from mpmath import *
|
65 |
+
>>> mp.dps = 15; mp.pretty = True
|
66 |
+
>>> nprint(polyroots([1,-1,-14,24]), 4)
|
67 |
+
[-4.0, 2.0, 3.0]
|
68 |
+
|
69 |
+
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
|
70 |
+
error estimate::
|
71 |
+
|
72 |
+
>>> roots, err = polyroots([4,3,2], error=True)
|
73 |
+
>>> for r in roots:
|
74 |
+
... print(r)
|
75 |
+
...
|
76 |
+
(-0.375 + 0.59947894041409j)
|
77 |
+
(-0.375 - 0.59947894041409j)
|
78 |
+
>>>
|
79 |
+
>>> err
|
80 |
+
2.22044604925031e-16
|
81 |
+
>>>
|
82 |
+
>>> polyval([4,3,2], roots[0])
|
83 |
+
(2.22044604925031e-16 + 0.0j)
|
84 |
+
>>> polyval([4,3,2], roots[1])
|
85 |
+
(2.22044604925031e-16 + 0.0j)
|
86 |
+
|
87 |
+
The following example computes all the 5th roots of unity; that is,
|
88 |
+
the roots of `x^5 - 1`::
|
89 |
+
|
90 |
+
>>> mp.dps = 20
|
91 |
+
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
|
92 |
+
... print(r)
|
93 |
+
...
|
94 |
+
1.0
|
95 |
+
(-0.8090169943749474241 + 0.58778525229247312917j)
|
96 |
+
(-0.8090169943749474241 - 0.58778525229247312917j)
|
97 |
+
(0.3090169943749474241 + 0.95105651629515357212j)
|
98 |
+
(0.3090169943749474241 - 0.95105651629515357212j)
|
99 |
+
|
100 |
+
**Precision and conditioning**
|
101 |
+
|
102 |
+
The roots are computed to the current working precision accuracy. If this
|
103 |
+
accuracy cannot be achieved in ``maxsteps`` steps, then a
|
104 |
+
``NoConvergence`` exception is raised. The algorithm internally is using
|
105 |
+
the current working precision extended by ``extraprec``. If
|
106 |
+
``NoConvergence`` was raised, that is caused either by not having enough
|
107 |
+
extra precision to achieve convergence (in which case increasing
|
108 |
+
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
|
109 |
+
case increasing ``maxsteps`` should fix the problem), or a combination of
|
110 |
+
both.
|
111 |
+
|
112 |
+
The user should always do a convergence study with regards to
|
113 |
+
``extraprec`` to ensure accurate results. It is possible to get
|
114 |
+
convergence to a wrong answer with too low ``extraprec``.
|
115 |
+
|
116 |
+
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
|
117 |
+
typically compute all roots of an arbitrary polynomial to high precision::
|
118 |
+
|
119 |
+
>>> mp.dps = 60
|
120 |
+
>>> for r in polyroots([1, 0, -10, 0, 1]):
|
121 |
+
... print(r)
|
122 |
+
...
|
123 |
+
-3.14626436994197234232913506571557044551247712918732870123249
|
124 |
+
-0.317837245195782244725757617296174288373133378433432554879127
|
125 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
126 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
127 |
+
>>>
|
128 |
+
>>> sqrt(3) + sqrt(2)
|
129 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
130 |
+
>>> sqrt(3) - sqrt(2)
|
131 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
132 |
+
|
133 |
+
**Algorithm**
|
134 |
+
|
135 |
+
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
|
136 |
+
uses complex arithmetic to locate all roots simultaneously.
|
137 |
+
The Durand-Kerner method can be viewed as approximately performing
|
138 |
+
simultaneous Newton iteration for all the roots. In particular,
|
139 |
+
the convergence to simple roots is quadratic, just like Newton's
|
140 |
+
method.
|
141 |
+
|
142 |
+
Although all roots are internally calculated using complex arithmetic, any
|
143 |
+
root found to have an imaginary part smaller than the estimated numerical
|
144 |
+
error is truncated to a real number (small real parts are also chopped).
|
145 |
+
Real roots are placed first in the returned list, sorted by value. The
|
146 |
+
remaining complex roots are sorted by their real parts so that conjugate
|
147 |
+
roots end up next to each other.
|
148 |
+
|
149 |
+
**References**
|
150 |
+
|
151 |
+
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
|
152 |
+
|
153 |
+
"""
|
154 |
+
if len(coeffs) <= 1:
|
155 |
+
if not coeffs or not coeffs[0]:
|
156 |
+
raise ValueError("Input to polyroots must not be the zero polynomial")
|
157 |
+
# Constant polynomial with no roots
|
158 |
+
return []
|
159 |
+
|
160 |
+
orig = ctx.prec
|
161 |
+
tol = +ctx.eps
|
162 |
+
with ctx.extraprec(extraprec):
|
163 |
+
deg = len(coeffs) - 1
|
164 |
+
# Must be monic
|
165 |
+
lead = ctx.convert(coeffs[0])
|
166 |
+
if lead == 1:
|
167 |
+
coeffs = [ctx.convert(c) for c in coeffs]
|
168 |
+
else:
|
169 |
+
coeffs = [c/lead for c in coeffs]
|
170 |
+
f = lambda x: ctx.polyval(coeffs, x)
|
171 |
+
if roots_init is None:
|
172 |
+
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
|
173 |
+
else:
|
174 |
+
roots = [None]*deg;
|
175 |
+
deg_init = min(deg, len(roots_init))
|
176 |
+
roots[:deg_init] = list(roots_init[:deg_init])
|
177 |
+
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
|
178 |
+
in xrange(deg_init,deg)]
|
179 |
+
err = [ctx.one for n in xrange(deg)]
|
180 |
+
# Durand-Kerner iteration until convergence
|
181 |
+
for step in xrange(maxsteps):
|
182 |
+
if abs(max(err)) < tol:
|
183 |
+
break
|
184 |
+
for i in xrange(deg):
|
185 |
+
p = roots[i]
|
186 |
+
x = f(p)
|
187 |
+
for j in range(deg):
|
188 |
+
if i != j:
|
189 |
+
try:
|
190 |
+
x /= (p-roots[j])
|
191 |
+
except ZeroDivisionError:
|
192 |
+
continue
|
193 |
+
roots[i] = p - x
|
194 |
+
err[i] = abs(x)
|
195 |
+
if abs(max(err)) >= tol:
|
196 |
+
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
|
197 |
+
% maxsteps)
|
198 |
+
# Remove small real or imaginary parts
|
199 |
+
if cleanup:
|
200 |
+
for i in xrange(deg):
|
201 |
+
if abs(roots[i]) < tol:
|
202 |
+
roots[i] = ctx.zero
|
203 |
+
elif abs(ctx._im(roots[i])) < tol:
|
204 |
+
roots[i] = roots[i].real
|
205 |
+
elif abs(ctx._re(roots[i])) < tol:
|
206 |
+
roots[i] = roots[i].imag * 1j
|
207 |
+
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
|
208 |
+
if error:
|
209 |
+
err = max(err)
|
210 |
+
err = max(err, ctx.ldexp(1, -orig+1))
|
211 |
+
return [+r for r in roots], +err
|
212 |
+
else:
|
213 |
+
return [+r for r in roots]
|
env-llmeval/lib/python3.10/site-packages/mpmath/calculus/quadrature.py
ADDED
@@ -0,0 +1,1115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
from ..libmp.backend import xrange
|
4 |
+
|
5 |
+
class QuadratureRule(object):
|
6 |
+
"""
|
7 |
+
Quadrature rules are implemented using this class, in order to
|
8 |
+
simplify the code and provide a common infrastructure
|
9 |
+
for tasks such as error estimation and node caching.
|
10 |
+
|
11 |
+
You can implement a custom quadrature rule by subclassing
|
12 |
+
:class:`QuadratureRule` and implementing the appropriate
|
13 |
+
methods. The subclass can then be used by :func:`~mpmath.quad` by
|
14 |
+
passing it as the *method* argument.
|
15 |
+
|
16 |
+
:class:`QuadratureRule` instances are supposed to be singletons.
|
17 |
+
:class:`QuadratureRule` therefore implements instance caching
|
18 |
+
in :func:`~mpmath.__new__`.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, ctx):
|
22 |
+
self.ctx = ctx
|
23 |
+
self.standard_cache = {}
|
24 |
+
self.transformed_cache = {}
|
25 |
+
self.interval_count = {}
|
26 |
+
|
27 |
+
def clear(self):
|
28 |
+
"""
|
29 |
+
Delete cached node data.
|
30 |
+
"""
|
31 |
+
self.standard_cache = {}
|
32 |
+
self.transformed_cache = {}
|
33 |
+
self.interval_count = {}
|
34 |
+
|
35 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
36 |
+
r"""
|
37 |
+
Compute nodes for the standard interval `[-1, 1]`. Subclasses
|
38 |
+
should probably implement only this method, and use
|
39 |
+
:func:`~mpmath.get_nodes` method to retrieve the nodes.
|
40 |
+
"""
|
41 |
+
raise NotImplementedError
|
42 |
+
|
43 |
+
def get_nodes(self, a, b, degree, prec, verbose=False):
|
44 |
+
"""
|
45 |
+
Return nodes for given interval, degree and precision. The
|
46 |
+
nodes are retrieved from a cache if already computed;
|
47 |
+
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
|
48 |
+
and are then cached.
|
49 |
+
|
50 |
+
Subclasses should probably not implement this method,
|
51 |
+
but just implement :func:`~mpmath.calc_nodes` for the actual
|
52 |
+
node computation.
|
53 |
+
"""
|
54 |
+
key = (a, b, degree, prec)
|
55 |
+
if key in self.transformed_cache:
|
56 |
+
return self.transformed_cache[key]
|
57 |
+
orig = self.ctx.prec
|
58 |
+
try:
|
59 |
+
self.ctx.prec = prec+20
|
60 |
+
# Get nodes on standard interval
|
61 |
+
if (degree, prec) in self.standard_cache:
|
62 |
+
nodes = self.standard_cache[degree, prec]
|
63 |
+
else:
|
64 |
+
nodes = self.calc_nodes(degree, prec, verbose)
|
65 |
+
self.standard_cache[degree, prec] = nodes
|
66 |
+
# Transform to general interval
|
67 |
+
nodes = self.transform_nodes(nodes, a, b, verbose)
|
68 |
+
if key in self.interval_count:
|
69 |
+
self.transformed_cache[key] = nodes
|
70 |
+
else:
|
71 |
+
self.interval_count[key] = True
|
72 |
+
finally:
|
73 |
+
self.ctx.prec = orig
|
74 |
+
return nodes
|
75 |
+
|
76 |
+
def transform_nodes(self, nodes, a, b, verbose=False):
|
77 |
+
r"""
|
78 |
+
Rescale standardized nodes (for `[-1, 1]`) to a general
|
79 |
+
interval `[a, b]`. For a finite interval, a simple linear
|
80 |
+
change of variables is used. Otherwise, the following
|
81 |
+
transformations are used:
|
82 |
+
|
83 |
+
.. math ::
|
84 |
+
|
85 |
+
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
|
86 |
+
|
87 |
+
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
|
88 |
+
|
89 |
+
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
|
90 |
+
|
91 |
+
"""
|
92 |
+
ctx = self.ctx
|
93 |
+
a = ctx.convert(a)
|
94 |
+
b = ctx.convert(b)
|
95 |
+
one = ctx.one
|
96 |
+
if (a, b) == (-one, one):
|
97 |
+
return nodes
|
98 |
+
half = ctx.mpf(0.5)
|
99 |
+
new_nodes = []
|
100 |
+
if ctx.isinf(a) or ctx.isinf(b):
|
101 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
102 |
+
p05 = -half
|
103 |
+
for x, w in nodes:
|
104 |
+
x2 = x*x
|
105 |
+
px1 = one-x2
|
106 |
+
spx1 = px1**p05
|
107 |
+
x = x*spx1
|
108 |
+
w *= spx1/px1
|
109 |
+
new_nodes.append((x, w))
|
110 |
+
elif a == ctx.ninf:
|
111 |
+
b1 = b+1
|
112 |
+
for x, w in nodes:
|
113 |
+
u = 2/(x+one)
|
114 |
+
x = b1-u
|
115 |
+
w *= half*u**2
|
116 |
+
new_nodes.append((x, w))
|
117 |
+
elif b == ctx.inf:
|
118 |
+
a1 = a-1
|
119 |
+
for x, w in nodes:
|
120 |
+
u = 2/(x+one)
|
121 |
+
x = a1+u
|
122 |
+
w *= half*u**2
|
123 |
+
new_nodes.append((x, w))
|
124 |
+
elif a == ctx.inf or b == ctx.ninf:
|
125 |
+
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
|
126 |
+
else:
|
127 |
+
raise NotImplementedError
|
128 |
+
else:
|
129 |
+
# Simple linear change of variables
|
130 |
+
C = (b-a)/2
|
131 |
+
D = (b+a)/2
|
132 |
+
for x, w in nodes:
|
133 |
+
new_nodes.append((D+C*x, C*w))
|
134 |
+
return new_nodes
|
135 |
+
|
136 |
+
def guess_degree(self, prec):
|
137 |
+
"""
|
138 |
+
Given a desired precision `p` in bits, estimate the degree `m`
|
139 |
+
of the quadrature required to accomplish full accuracy for
|
140 |
+
typical integrals. By default, :func:`~mpmath.quad` will perform up
|
141 |
+
to `m` iterations. The value of `m` should be a slight
|
142 |
+
overestimate, so that "slightly bad" integrals can be dealt
|
143 |
+
with automatically using a few extra iterations. On the
|
144 |
+
other hand, it should not be too big, so :func:`~mpmath.quad` can
|
145 |
+
quit within a reasonable amount of time when it is given
|
146 |
+
an "unsolvable" integral.
|
147 |
+
|
148 |
+
The default formula used by :func:`~mpmath.guess_degree` is tuned
|
149 |
+
for both :class:`TanhSinh` and :class:`GaussLegendre`.
|
150 |
+
The output is roughly as follows:
|
151 |
+
|
152 |
+
+---------+---------+
|
153 |
+
| `p` | `m` |
|
154 |
+
+=========+=========+
|
155 |
+
| 50 | 6 |
|
156 |
+
+---------+---------+
|
157 |
+
| 100 | 7 |
|
158 |
+
+---------+---------+
|
159 |
+
| 500 | 10 |
|
160 |
+
+---------+---------+
|
161 |
+
| 3000 | 12 |
|
162 |
+
+---------+---------+
|
163 |
+
|
164 |
+
This formula is based purely on a limited amount of
|
165 |
+
experimentation and will sometimes be wrong.
|
166 |
+
"""
|
167 |
+
# Expected degree
|
168 |
+
# XXX: use mag
|
169 |
+
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
|
170 |
+
# Reasonable "worst case"
|
171 |
+
g += 2
|
172 |
+
return g
|
173 |
+
|
174 |
+
def estimate_error(self, results, prec, epsilon):
|
175 |
+
r"""
|
176 |
+
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
|
177 |
+
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
|
178 |
+
the error of `I_k`.
|
179 |
+
|
180 |
+
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
|
181 |
+
|
182 |
+
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
|
183 |
+
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
|
184 |
+
that each degree increment roughly doubles the accuracy of
|
185 |
+
the quadrature rule (this is true for both :class:`TanhSinh`
|
186 |
+
and :class:`GaussLegendre`). The extrapolation formula is given
|
187 |
+
by Borwein, Bailey & Girgensohn. Although not very conservative,
|
188 |
+
this method seems to be very robust in practice.
|
189 |
+
"""
|
190 |
+
if len(results) == 2:
|
191 |
+
return abs(results[0]-results[1])
|
192 |
+
try:
|
193 |
+
if results[-1] == results[-2] == results[-3]:
|
194 |
+
return self.ctx.zero
|
195 |
+
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
|
196 |
+
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
|
197 |
+
except ValueError:
|
198 |
+
return epsilon
|
199 |
+
D3 = -prec
|
200 |
+
D4 = min(0, max(D1**2/D2, 2*D1, D3))
|
201 |
+
return self.ctx.mpf(10) ** int(D4)
|
202 |
+
|
203 |
+
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
|
204 |
+
"""
|
205 |
+
Main integration function. Computes the 1D integral over
|
206 |
+
the interval specified by *points*. For each subinterval,
|
207 |
+
performs quadrature of degree from 1 up to *max_degree*
|
208 |
+
until :func:`~mpmath.estimate_error` signals convergence.
|
209 |
+
|
210 |
+
:func:`~mpmath.summation` transforms each subintegration to
|
211 |
+
the standard interval and then calls :func:`~mpmath.sum_next`.
|
212 |
+
"""
|
213 |
+
ctx = self.ctx
|
214 |
+
I = total_err = ctx.zero
|
215 |
+
for i in xrange(len(points)-1):
|
216 |
+
a, b = points[i], points[i+1]
|
217 |
+
if a == b:
|
218 |
+
continue
|
219 |
+
# XXX: we could use a single variable transformation,
|
220 |
+
# but this is not good in practice. We get better accuracy
|
221 |
+
# by having 0 as an endpoint.
|
222 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
223 |
+
_f = f
|
224 |
+
f = lambda x: _f(-x) + _f(x)
|
225 |
+
a, b = (ctx.zero, ctx.inf)
|
226 |
+
results = []
|
227 |
+
err = ctx.zero
|
228 |
+
for degree in xrange(1, max_degree+1):
|
229 |
+
nodes = self.get_nodes(a, b, degree, prec, verbose)
|
230 |
+
if verbose:
|
231 |
+
print("Integrating from %s to %s (degree %s of %s)" % \
|
232 |
+
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
|
233 |
+
result = self.sum_next(f, nodes, degree, prec, results, verbose)
|
234 |
+
results.append(result)
|
235 |
+
if degree > 1:
|
236 |
+
err = self.estimate_error(results, prec, epsilon)
|
237 |
+
if verbose:
|
238 |
+
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
|
239 |
+
if err <= epsilon:
|
240 |
+
break
|
241 |
+
I += results[-1]
|
242 |
+
total_err += err
|
243 |
+
if total_err > epsilon:
|
244 |
+
if verbose:
|
245 |
+
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
|
246 |
+
return I, total_err
|
247 |
+
|
248 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
249 |
+
r"""
|
250 |
+
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
|
251 |
+
contains the `(w_k, x_k)` pairs.
|
252 |
+
|
253 |
+
:func:`~mpmath.summation` will supply the list *results* of
|
254 |
+
values computed by :func:`~mpmath.sum_next` at previous degrees, in
|
255 |
+
case the quadrature rule is able to reuse them.
|
256 |
+
"""
|
257 |
+
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
|
258 |
+
|
259 |
+
|
260 |
+
class TanhSinh(QuadratureRule):
|
261 |
+
r"""
|
262 |
+
This class implements "tanh-sinh" or "doubly exponential"
|
263 |
+
quadrature. This quadrature rule is based on the Euler-Maclaurin
|
264 |
+
integral formula. By performing a change of variables involving
|
265 |
+
nested exponentials / hyperbolic functions (hence the name), the
|
266 |
+
derivatives at the endpoints vanish rapidly. Since the error term
|
267 |
+
in the Euler-Maclaurin formula depends on the derivatives at the
|
268 |
+
endpoints, a simple step sum becomes extremely accurate. In
|
269 |
+
practice, this means that doubling the number of evaluation
|
270 |
+
points roughly doubles the number of accurate digits.
|
271 |
+
|
272 |
+
Comparison to Gauss-Legendre:
|
273 |
+
* Initial computation of nodes is usually faster
|
274 |
+
* Handles endpoint singularities better
|
275 |
+
* Handles infinite integration intervals better
|
276 |
+
* Is slower for smooth integrands once nodes have been computed
|
277 |
+
|
278 |
+
The implementation of the tanh-sinh algorithm is based on the
|
279 |
+
description given in Borwein, Bailey & Girgensohn, "Experimentation
|
280 |
+
in Mathematics - Computational Paths to Discovery", A K Peters,
|
281 |
+
2003, pages 312-313. In the present implementation, a few
|
282 |
+
improvements have been made:
|
283 |
+
|
284 |
+
* A more efficient scheme is used to compute nodes (exploiting
|
285 |
+
recurrence for the exponential function)
|
286 |
+
* The nodes are computed successively instead of all at once
|
287 |
+
|
288 |
+
**References**
|
289 |
+
|
290 |
+
* [Bailey]_
|
291 |
+
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
|
292 |
+
|
293 |
+
"""
|
294 |
+
|
295 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
296 |
+
"""
|
297 |
+
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
|
298 |
+
fact that half of the abscissas at degree `m` are precisely the
|
299 |
+
abscissas from degree `m-1`. Thus reusing the result from
|
300 |
+
the previous level allows a 2x speedup.
|
301 |
+
"""
|
302 |
+
h = self.ctx.mpf(2)**(-degree)
|
303 |
+
# Abscissas overlap, so reusing saves half of the time
|
304 |
+
if previous:
|
305 |
+
S = previous[-1]/(h*2)
|
306 |
+
else:
|
307 |
+
S = self.ctx.zero
|
308 |
+
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
|
309 |
+
return h*S
|
310 |
+
|
311 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
312 |
+
r"""
|
313 |
+
The abscissas and weights for tanh-sinh quadrature of degree
|
314 |
+
`m` are given by
|
315 |
+
|
316 |
+
.. math::
|
317 |
+
|
318 |
+
x_k = \tanh(\pi/2 \sinh(t_k))
|
319 |
+
|
320 |
+
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
|
321 |
+
|
322 |
+
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
|
323 |
+
list of nodes is actually infinite, but the weights die off so
|
324 |
+
rapidly that only a few are needed.
|
325 |
+
"""
|
326 |
+
ctx = self.ctx
|
327 |
+
nodes = []
|
328 |
+
|
329 |
+
extra = 20
|
330 |
+
ctx.prec += extra
|
331 |
+
tol = ctx.ldexp(1, -prec-10)
|
332 |
+
pi4 = ctx.pi/4
|
333 |
+
|
334 |
+
# For simplicity, we work in steps h = 1/2^n, with the first point
|
335 |
+
# offset so that we can reuse the sum from the previous degree
|
336 |
+
|
337 |
+
# We define degree 1 to include the "degree 0" steps, including
|
338 |
+
# the point x = 0. (It doesn't work well otherwise; not sure why.)
|
339 |
+
t0 = ctx.ldexp(1, -degree)
|
340 |
+
if degree == 1:
|
341 |
+
#nodes.append((mpf(0), pi4))
|
342 |
+
#nodes.append((-mpf(0), pi4))
|
343 |
+
nodes.append((ctx.zero, ctx.pi/2))
|
344 |
+
h = t0
|
345 |
+
else:
|
346 |
+
h = t0*2
|
347 |
+
|
348 |
+
# Since h is fixed, we can compute the next exponential
|
349 |
+
# by simply multiplying by exp(h)
|
350 |
+
expt0 = ctx.exp(t0)
|
351 |
+
a = pi4 * expt0
|
352 |
+
b = pi4 / expt0
|
353 |
+
udelta = ctx.exp(h)
|
354 |
+
urdelta = 1/udelta
|
355 |
+
|
356 |
+
for k in xrange(0, 20*2**degree+1):
|
357 |
+
# Reference implementation:
|
358 |
+
# t = t0 + k*h
|
359 |
+
# x = tanh(pi/2 * sinh(t))
|
360 |
+
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
|
361 |
+
|
362 |
+
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
|
363 |
+
c = ctx.exp(a-b)
|
364 |
+
d = 1/c
|
365 |
+
co = (c+d)/2
|
366 |
+
si = (c-d)/2
|
367 |
+
x = si / co
|
368 |
+
w = (a+b) / co**2
|
369 |
+
diff = abs(x-1)
|
370 |
+
if diff <= tol:
|
371 |
+
break
|
372 |
+
|
373 |
+
nodes.append((x, w))
|
374 |
+
nodes.append((-x, w))
|
375 |
+
|
376 |
+
a *= udelta
|
377 |
+
b *= urdelta
|
378 |
+
|
379 |
+
if verbose and k % 300 == 150:
|
380 |
+
# Note: the number displayed is rather arbitrary. Should
|
381 |
+
# figure out how to print something that looks more like a
|
382 |
+
# percentage
|
383 |
+
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
|
384 |
+
|
385 |
+
ctx.prec -= extra
|
386 |
+
return nodes
|
387 |
+
|
388 |
+
|
389 |
+
class GaussLegendre(QuadratureRule):
|
390 |
+
r"""
|
391 |
+
This class implements Gauss-Legendre quadrature, which is
|
392 |
+
exceptionally efficient for polynomials and polynomial-like (i.e.
|
393 |
+
very smooth) integrands.
|
394 |
+
|
395 |
+
The abscissas and weights are given by roots and values of
|
396 |
+
Legendre polynomials, which are the orthogonal polynomials
|
397 |
+
on `[-1, 1]` with respect to the unit weight
|
398 |
+
(see :func:`~mpmath.legendre`).
|
399 |
+
|
400 |
+
In this implementation, we take the "degree" `m` of the quadrature
|
401 |
+
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
|
402 |
+
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
|
403 |
+
than linear, convergence as the degree is incremented.
|
404 |
+
|
405 |
+
Comparison to tanh-sinh quadrature:
|
406 |
+
* Is faster for smooth integrands once nodes have been computed
|
407 |
+
* Initial computation of nodes is usually slower
|
408 |
+
* Handles endpoint singularities worse
|
409 |
+
* Handles infinite integration intervals worse
|
410 |
+
|
411 |
+
"""
|
412 |
+
|
413 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
414 |
+
r"""
|
415 |
+
Calculates the abscissas and weights for Gauss-Legendre
|
416 |
+
quadrature of degree of given degree (actually `3 \cdot 2^m`).
|
417 |
+
"""
|
418 |
+
ctx = self.ctx
|
419 |
+
# It is important that the epsilon is set lower than the
|
420 |
+
# "real" epsilon
|
421 |
+
epsilon = ctx.ldexp(1, -prec-8)
|
422 |
+
# Fairly high precision might be required for accurate
|
423 |
+
# evaluation of the roots
|
424 |
+
orig = ctx.prec
|
425 |
+
ctx.prec = int(prec*1.5)
|
426 |
+
if degree == 1:
|
427 |
+
x = ctx.sqrt(ctx.mpf(3)/5)
|
428 |
+
w = ctx.mpf(5)/9
|
429 |
+
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
|
430 |
+
ctx.prec = orig
|
431 |
+
return nodes
|
432 |
+
nodes = []
|
433 |
+
n = 3*2**(degree-1)
|
434 |
+
upto = n//2 + 1
|
435 |
+
for j in xrange(1, upto):
|
436 |
+
# Asymptotic formula for the roots
|
437 |
+
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
|
438 |
+
# Newton iteration
|
439 |
+
while 1:
|
440 |
+
t1, t2 = 1, 0
|
441 |
+
# Evaluates the Legendre polynomial using its defining
|
442 |
+
# recurrence relation
|
443 |
+
for j1 in xrange(1,n+1):
|
444 |
+
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
|
445 |
+
t4 = n*(r*t1-t2)/(r**2-1)
|
446 |
+
a = t1/t4
|
447 |
+
r = r - a
|
448 |
+
if abs(a) < epsilon:
|
449 |
+
break
|
450 |
+
x = r
|
451 |
+
w = 2/((1-r**2)*t4**2)
|
452 |
+
if verbose and j % 30 == 15:
|
453 |
+
print("Computing nodes (%i of %i)" % (j, upto))
|
454 |
+
nodes.append((x, w))
|
455 |
+
nodes.append((-x, w))
|
456 |
+
ctx.prec = orig
|
457 |
+
return nodes
|
458 |
+
|
459 |
+
class QuadratureMethods(object):
|
460 |
+
|
461 |
+
def __init__(ctx, *args, **kwargs):
|
462 |
+
ctx._gauss_legendre = GaussLegendre(ctx)
|
463 |
+
ctx._tanh_sinh = TanhSinh(ctx)
|
464 |
+
|
465 |
+
def quad(ctx, f, *points, **kwargs):
|
466 |
+
r"""
|
467 |
+
Computes a single, double or triple integral over a given
|
468 |
+
1D interval, 2D rectangle, or 3D cuboid. A basic example::
|
469 |
+
|
470 |
+
>>> from mpmath import *
|
471 |
+
>>> mp.dps = 15; mp.pretty = True
|
472 |
+
>>> quad(sin, [0, pi])
|
473 |
+
2.0
|
474 |
+
|
475 |
+
A basic 2D integral::
|
476 |
+
|
477 |
+
>>> f = lambda x, y: cos(x+y/2)
|
478 |
+
>>> quad(f, [-pi/2, pi/2], [0, pi])
|
479 |
+
4.0
|
480 |
+
|
481 |
+
**Interval format**
|
482 |
+
|
483 |
+
The integration range for each dimension may be specified
|
484 |
+
using a list or tuple. Arguments are interpreted as follows:
|
485 |
+
|
486 |
+
``quad(f, [x1, x2])`` -- calculates
|
487 |
+
`\int_{x_1}^{x_2} f(x) \, dx`
|
488 |
+
|
489 |
+
``quad(f, [x1, x2], [y1, y2])`` -- calculates
|
490 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
|
491 |
+
|
492 |
+
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
|
493 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
|
494 |
+
\, dz \, dy \, dx`
|
495 |
+
|
496 |
+
Endpoints may be finite or infinite. An interval descriptor
|
497 |
+
may also contain more than two points. In this
|
498 |
+
case, the integration is split into subintervals, between
|
499 |
+
each pair of consecutive points. This is useful for
|
500 |
+
dealing with mid-interval discontinuities, or integrating
|
501 |
+
over large intervals where the function is irregular or
|
502 |
+
oscillates.
|
503 |
+
|
504 |
+
**Options**
|
505 |
+
|
506 |
+
:func:`~mpmath.quad` recognizes the following keyword arguments:
|
507 |
+
|
508 |
+
*method*
|
509 |
+
Chooses integration algorithm (described below).
|
510 |
+
*error*
|
511 |
+
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
|
512 |
+
integral and `e` is the estimated error.
|
513 |
+
*maxdegree*
|
514 |
+
Maximum degree of the quadrature rule to try before
|
515 |
+
quitting.
|
516 |
+
*verbose*
|
517 |
+
Print details about progress.
|
518 |
+
|
519 |
+
**Algorithms**
|
520 |
+
|
521 |
+
Mpmath presently implements two integration algorithms: tanh-sinh
|
522 |
+
quadrature and Gauss-Legendre quadrature. These can be selected
|
523 |
+
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
|
524 |
+
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
|
525 |
+
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
|
526 |
+
as shortcuts.
|
527 |
+
|
528 |
+
Both algorithms have the property that doubling the number of
|
529 |
+
evaluation points roughly doubles the accuracy, so both are ideal
|
530 |
+
for high precision quadrature (hundreds or thousands of digits).
|
531 |
+
|
532 |
+
At high precision, computing the nodes and weights for the
|
533 |
+
integration can be expensive (more expensive than computing the
|
534 |
+
function values). To make repeated integrations fast, nodes
|
535 |
+
are automatically cached.
|
536 |
+
|
537 |
+
The advantages of the tanh-sinh algorithm are that it tends to
|
538 |
+
handle endpoint singularities well, and that the nodes are cheap
|
539 |
+
to compute on the first run. For these reasons, it is used by
|
540 |
+
:func:`~mpmath.quad` as the default algorithm.
|
541 |
+
|
542 |
+
Gauss-Legendre quadrature often requires fewer function
|
543 |
+
evaluations, and is therefore often faster for repeated use, but
|
544 |
+
the algorithm does not handle endpoint singularities as well and
|
545 |
+
the nodes are more expensive to compute. Gauss-Legendre quadrature
|
546 |
+
can be a better choice if the integrand is smooth and repeated
|
547 |
+
integrations are required (e.g. for multiple integrals).
|
548 |
+
|
549 |
+
See the documentation for :class:`TanhSinh` and
|
550 |
+
:class:`GaussLegendre` for additional details.
|
551 |
+
|
552 |
+
**Examples of 1D integrals**
|
553 |
+
|
554 |
+
Intervals may be infinite or half-infinite. The following two
|
555 |
+
examples evaluate the limits of the inverse tangent function
|
556 |
+
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
|
557 |
+
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
|
558 |
+
|
559 |
+
>>> mp.dps = 15
|
560 |
+
>>> quad(lambda x: 2/(x**2+1), [0, inf])
|
561 |
+
3.14159265358979
|
562 |
+
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
|
563 |
+
3.14159265358979
|
564 |
+
|
565 |
+
Integrals can typically be resolved to high precision.
|
566 |
+
The following computes 50 digits of `\pi` by integrating the
|
567 |
+
area of the half-circle defined by `x^2 + y^2 \le 1`,
|
568 |
+
`-1 \le x \le 1`, `y \ge 0`::
|
569 |
+
|
570 |
+
>>> mp.dps = 50
|
571 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
|
572 |
+
3.1415926535897932384626433832795028841971693993751
|
573 |
+
|
574 |
+
One can just as well compute 1000 digits (output truncated)::
|
575 |
+
|
576 |
+
>>> mp.dps = 1000
|
577 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
|
578 |
+
3.141592653589793238462643383279502884...216420199
|
579 |
+
|
580 |
+
Complex integrals are supported. The following computes
|
581 |
+
a residue at `z = 0` by integrating counterclockwise along the
|
582 |
+
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
|
583 |
+
|
584 |
+
>>> mp.dps = 15
|
585 |
+
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
|
586 |
+
(0.0 + 6.28318530717959j)
|
587 |
+
|
588 |
+
**Examples of 2D and 3D integrals**
|
589 |
+
|
590 |
+
Here are several nice examples of analytically solvable
|
591 |
+
2D integrals (taken from MathWorld [1]) that can be evaluated
|
592 |
+
to high precision fairly rapidly by :func:`~mpmath.quad`::
|
593 |
+
|
594 |
+
>>> mp.dps = 30
|
595 |
+
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
|
596 |
+
>>> quad(f, [0, 1], [0, 1])
|
597 |
+
0.577215664901532860606512090082
|
598 |
+
>>> +euler
|
599 |
+
0.577215664901532860606512090082
|
600 |
+
|
601 |
+
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
|
602 |
+
>>> quad(f, [-1, 1], [-1, 1])
|
603 |
+
3.17343648530607134219175646705
|
604 |
+
>>> 4*log(2+sqrt(3))-2*pi/3
|
605 |
+
3.17343648530607134219175646705
|
606 |
+
|
607 |
+
>>> f = lambda x, y: 1/(1-x**2 * y**2)
|
608 |
+
>>> quad(f, [0, 1], [0, 1])
|
609 |
+
1.23370055013616982735431137498
|
610 |
+
>>> pi**2 / 8
|
611 |
+
1.23370055013616982735431137498
|
612 |
+
|
613 |
+
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
|
614 |
+
1.64493406684822643647241516665
|
615 |
+
>>> pi**2 / 6
|
616 |
+
1.64493406684822643647241516665
|
617 |
+
|
618 |
+
Multiple integrals may be done over infinite ranges::
|
619 |
+
|
620 |
+
>>> mp.dps = 15
|
621 |
+
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
|
622 |
+
0.367879441171442
|
623 |
+
>>> print(1/e)
|
624 |
+
0.367879441171442
|
625 |
+
|
626 |
+
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
|
627 |
+
For example, we can replicate the earlier example of calculating
|
628 |
+
`\pi` by integrating over the unit-circle, and actually use double
|
629 |
+
quadrature to actually measure the area circle::
|
630 |
+
|
631 |
+
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
|
632 |
+
>>> quad(f, [-1, 1])
|
633 |
+
3.14159265358979
|
634 |
+
|
635 |
+
Here is a simple triple integral::
|
636 |
+
|
637 |
+
>>> mp.dps = 15
|
638 |
+
>>> f = lambda x,y,z: x*y/(1+z)
|
639 |
+
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
|
640 |
+
0.101366277027041
|
641 |
+
>>> (log(3)-log(2))/4
|
642 |
+
0.101366277027041
|
643 |
+
|
644 |
+
**Singularities**
|
645 |
+
|
646 |
+
Both tanh-sinh and Gauss-Legendre quadrature are designed to
|
647 |
+
integrate smooth (infinitely differentiable) functions. Neither
|
648 |
+
algorithm copes well with mid-interval singularities (such as
|
649 |
+
mid-interval discontinuities in `f(x)` or `f'(x)`).
|
650 |
+
The best solution is to split the integral into parts::
|
651 |
+
|
652 |
+
>>> mp.dps = 15
|
653 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
|
654 |
+
3.99900894176779
|
655 |
+
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
|
656 |
+
4.0
|
657 |
+
|
658 |
+
The tanh-sinh rule often works well for integrands having a
|
659 |
+
singularity at one or both endpoints::
|
660 |
+
|
661 |
+
>>> mp.dps = 15
|
662 |
+
>>> quad(log, [0, 1], method='tanh-sinh') # Good
|
663 |
+
-1.0
|
664 |
+
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
|
665 |
+
-0.999932197413801
|
666 |
+
|
667 |
+
However, the result may still be inaccurate for some functions::
|
668 |
+
|
669 |
+
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
670 |
+
1.99999999946942
|
671 |
+
|
672 |
+
This problem is not due to the quadrature rule per se, but to
|
673 |
+
numerical amplification of errors in the nodes. The problem can be
|
674 |
+
circumvented by temporarily increasing the precision::
|
675 |
+
|
676 |
+
>>> mp.dps = 30
|
677 |
+
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
678 |
+
>>> mp.dps = 15
|
679 |
+
>>> +a
|
680 |
+
2.0
|
681 |
+
|
682 |
+
**Highly variable functions**
|
683 |
+
|
684 |
+
For functions that are smooth (in the sense of being infinitely
|
685 |
+
differentiable) but contain sharp mid-interval peaks or many
|
686 |
+
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
|
687 |
+
example, with default settings, :func:`~mpmath.quad` is able to integrate
|
688 |
+
`\sin(x)` accurately over an interval of length 100 but not over
|
689 |
+
length 1000::
|
690 |
+
|
691 |
+
>>> quad(sin, [0, 100]); 1-cos(100) # Good
|
692 |
+
0.137681127712316
|
693 |
+
0.137681127712316
|
694 |
+
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
|
695 |
+
-37.8587612408485
|
696 |
+
0.437620923709297
|
697 |
+
|
698 |
+
One solution is to break the integration into 10 intervals of
|
699 |
+
length 100::
|
700 |
+
|
701 |
+
>>> quad(sin, linspace(0, 1000, 10)) # Good
|
702 |
+
0.437620923709297
|
703 |
+
|
704 |
+
Another is to increase the degree of the quadrature::
|
705 |
+
|
706 |
+
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
|
707 |
+
0.437620923709297
|
708 |
+
|
709 |
+
Whether splitting the interval or increasing the degree is
|
710 |
+
more efficient differs from case to case. Another example is the
|
711 |
+
function `1/(1+x^2)`, which has a sharp peak centered around
|
712 |
+
`x = 0`::
|
713 |
+
|
714 |
+
>>> f = lambda x: 1/(1+x**2)
|
715 |
+
>>> quad(f, [-100, 100]) # Bad
|
716 |
+
3.64804647105268
|
717 |
+
>>> quad(f, [-100, 100], maxdegree=10) # Good
|
718 |
+
3.12159332021646
|
719 |
+
>>> quad(f, [-100, 0, 100]) # Also good
|
720 |
+
3.12159332021646
|
721 |
+
|
722 |
+
**References**
|
723 |
+
|
724 |
+
1. http://mathworld.wolfram.com/DoubleIntegral.html
|
725 |
+
|
726 |
+
"""
|
727 |
+
rule = kwargs.get('method', 'tanh-sinh')
|
728 |
+
if type(rule) is str:
|
729 |
+
if rule == 'tanh-sinh':
|
730 |
+
rule = ctx._tanh_sinh
|
731 |
+
elif rule == 'gauss-legendre':
|
732 |
+
rule = ctx._gauss_legendre
|
733 |
+
else:
|
734 |
+
raise ValueError("unknown quadrature rule: %s" % rule)
|
735 |
+
else:
|
736 |
+
rule = rule(ctx)
|
737 |
+
verbose = kwargs.get('verbose')
|
738 |
+
dim = len(points)
|
739 |
+
orig = prec = ctx.prec
|
740 |
+
epsilon = ctx.eps/8
|
741 |
+
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
|
742 |
+
points = [ctx._as_points(p) for p in points]
|
743 |
+
try:
|
744 |
+
ctx.prec += 20
|
745 |
+
if dim == 1:
|
746 |
+
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
|
747 |
+
elif dim == 2:
|
748 |
+
v, err = rule.summation(lambda x: \
|
749 |
+
rule.summation(lambda y: f(x,y), \
|
750 |
+
points[1], prec, epsilon, m)[0],
|
751 |
+
points[0], prec, epsilon, m, verbose)
|
752 |
+
elif dim == 3:
|
753 |
+
v, err = rule.summation(lambda x: \
|
754 |
+
rule.summation(lambda y: \
|
755 |
+
rule.summation(lambda z: f(x,y,z), \
|
756 |
+
points[2], prec, epsilon, m)[0],
|
757 |
+
points[1], prec, epsilon, m)[0],
|
758 |
+
points[0], prec, epsilon, m, verbose)
|
759 |
+
else:
|
760 |
+
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
|
761 |
+
finally:
|
762 |
+
ctx.prec = orig
|
763 |
+
if kwargs.get("error"):
|
764 |
+
return +v, err
|
765 |
+
return +v
|
766 |
+
|
767 |
+
def quadts(ctx, *args, **kwargs):
|
768 |
+
"""
|
769 |
+
Performs tanh-sinh quadrature. The call
|
770 |
+
|
771 |
+
quadts(func, *points, ...)
|
772 |
+
|
773 |
+
is simply a shortcut for:
|
774 |
+
|
775 |
+
quad(func, *points, ..., method=TanhSinh)
|
776 |
+
|
777 |
+
For example, a single integral and a double integral:
|
778 |
+
|
779 |
+
quadts(lambda x: exp(cos(x)), [0, 1])
|
780 |
+
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
781 |
+
|
782 |
+
See the documentation for quad for information about how points
|
783 |
+
arguments and keyword arguments are parsed.
|
784 |
+
|
785 |
+
See documentation for TanhSinh for algorithmic information about
|
786 |
+
tanh-sinh quadrature.
|
787 |
+
"""
|
788 |
+
kwargs['method'] = 'tanh-sinh'
|
789 |
+
return ctx.quad(*args, **kwargs)
|
790 |
+
|
791 |
+
def quadgl(ctx, *args, **kwargs):
|
792 |
+
"""
|
793 |
+
Performs Gauss-Legendre quadrature. The call
|
794 |
+
|
795 |
+
quadgl(func, *points, ...)
|
796 |
+
|
797 |
+
is simply a shortcut for:
|
798 |
+
|
799 |
+
quad(func, *points, ..., method=GaussLegendre)
|
800 |
+
|
801 |
+
For example, a single integral and a double integral:
|
802 |
+
|
803 |
+
quadgl(lambda x: exp(cos(x)), [0, 1])
|
804 |
+
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
805 |
+
|
806 |
+
See the documentation for quad for information about how points
|
807 |
+
arguments and keyword arguments are parsed.
|
808 |
+
|
809 |
+
See documentation for TanhSinh for algorithmic information about
|
810 |
+
tanh-sinh quadrature.
|
811 |
+
"""
|
812 |
+
kwargs['method'] = 'gauss-legendre'
|
813 |
+
return ctx.quad(*args, **kwargs)
|
814 |
+
|
815 |
+
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
|
816 |
+
r"""
|
817 |
+
Calculates
|
818 |
+
|
819 |
+
.. math ::
|
820 |
+
|
821 |
+
I = \int_a^b f(x) dx
|
822 |
+
|
823 |
+
where at least one of `a` and `b` is infinite and where
|
824 |
+
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
|
825 |
+
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
|
826 |
+
can also handle oscillatory integrals where the oscillation
|
827 |
+
rate is different from a pure sine or cosine wave.
|
828 |
+
|
829 |
+
In the standard case when `|a| < \infty, b = \infty`,
|
830 |
+
:func:`~mpmath.quadosc` works by evaluating the infinite series
|
831 |
+
|
832 |
+
.. math ::
|
833 |
+
|
834 |
+
I = \int_a^{x_1} f(x) dx +
|
835 |
+
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
|
836 |
+
|
837 |
+
where `x_k` are consecutive zeros (alternatively
|
838 |
+
some other periodic reference point) of `f(x)`.
|
839 |
+
Accordingly, :func:`~mpmath.quadosc` requires information about the
|
840 |
+
zeros of `f(x)`. For a periodic function, you can specify
|
841 |
+
the zeros by either providing the angular frequency `\omega`
|
842 |
+
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
|
843 |
+
specify the `n`-th zero by providing the *zeros* arguments.
|
844 |
+
Below is an example of each::
|
845 |
+
|
846 |
+
>>> from mpmath import *
|
847 |
+
>>> mp.dps = 15; mp.pretty = True
|
848 |
+
>>> f = lambda x: sin(3*x)/(x**2+1)
|
849 |
+
>>> quadosc(f, [0,inf], omega=3)
|
850 |
+
0.37833007080198
|
851 |
+
>>> quadosc(f, [0,inf], period=2*pi/3)
|
852 |
+
0.37833007080198
|
853 |
+
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
|
854 |
+
0.37833007080198
|
855 |
+
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
|
856 |
+
0.37833007080198
|
857 |
+
|
858 |
+
Note that *zeros* was specified to multiply `n` by the
|
859 |
+
*half-period*, not the full period. In theory, it does not matter
|
860 |
+
whether each partial integral is done over a half period or a full
|
861 |
+
period. However, if done over half-periods, the infinite series
|
862 |
+
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
|
863 |
+
typically makes the extrapolation much more efficient.
|
864 |
+
|
865 |
+
Here is an example of an integration over the entire real line,
|
866 |
+
and a half-infinite integration starting at `-\infty`::
|
867 |
+
|
868 |
+
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
|
869 |
+
1.15572734979092
|
870 |
+
>>> pi/e
|
871 |
+
1.15572734979092
|
872 |
+
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
|
873 |
+
-0.0844109505595739
|
874 |
+
>>> cos(1)+si(1)-pi/2
|
875 |
+
-0.0844109505595738
|
876 |
+
|
877 |
+
Of course, the integrand may contain a complex exponential just as
|
878 |
+
well as a real sine or cosine::
|
879 |
+
|
880 |
+
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
|
881 |
+
(0.156410688228254 + 0.0j)
|
882 |
+
>>> pi/e**3
|
883 |
+
0.156410688228254
|
884 |
+
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
|
885 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
886 |
+
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
|
887 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
888 |
+
|
889 |
+
**Non-periodic functions**
|
890 |
+
|
891 |
+
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
|
892 |
+
strictly periodic, *omega* or *period* might not work, and it might
|
893 |
+
be necessary to use *zeros*.
|
894 |
+
|
895 |
+
A notable exception can be made for Bessel functions which, though not
|
896 |
+
periodic, are "asymptotically periodic" in a sufficiently strong sense
|
897 |
+
that the sum extrapolation will work out::
|
898 |
+
|
899 |
+
>>> quadosc(j0, [0, inf], period=2*pi)
|
900 |
+
1.0
|
901 |
+
>>> quadosc(j1, [0, inf], period=2*pi)
|
902 |
+
1.0
|
903 |
+
|
904 |
+
More properly, one should provide the exact Bessel function zeros::
|
905 |
+
|
906 |
+
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
|
907 |
+
>>> quadosc(j0, [0, inf], zeros=j0zero)
|
908 |
+
1.0
|
909 |
+
|
910 |
+
For an example where *zeros* becomes necessary, consider the
|
911 |
+
complete Fresnel integrals
|
912 |
+
|
913 |
+
.. math ::
|
914 |
+
|
915 |
+
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
|
916 |
+
= \sqrt{\frac{\pi}{8}}.
|
917 |
+
|
918 |
+
Although the integrands do not decrease in magnitude as
|
919 |
+
`x \to \infty`, the integrals are convergent since the oscillation
|
920 |
+
rate increases (causing consecutive periods to asymptotically
|
921 |
+
cancel out). These integrals are virtually impossible to calculate
|
922 |
+
to any kind of accuracy using standard quadrature rules. However,
|
923 |
+
if one provides the correct asymptotic distribution of zeros
|
924 |
+
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
|
925 |
+
|
926 |
+
>>> mp.dps = 30
|
927 |
+
>>> f = lambda x: cos(x**2)
|
928 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
929 |
+
0.626657068657750125603941321203
|
930 |
+
>>> f = lambda x: sin(x**2)
|
931 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
932 |
+
0.626657068657750125603941321203
|
933 |
+
>>> sqrt(pi/8)
|
934 |
+
0.626657068657750125603941321203
|
935 |
+
|
936 |
+
(Interestingly, these integrals can still be evaluated if one
|
937 |
+
places some other constant than `\pi` in the square root sign.)
|
938 |
+
|
939 |
+
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
|
940 |
+
the inverse-function distribution `h^{-1}(x)`::
|
941 |
+
|
942 |
+
>>> mp.dps = 15
|
943 |
+
>>> f = lambda x: sin(exp(x))
|
944 |
+
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
|
945 |
+
-0.25024394235267
|
946 |
+
>>> pi/2-si(e)
|
947 |
+
-0.250243942352671
|
948 |
+
|
949 |
+
**Non-alternating functions**
|
950 |
+
|
951 |
+
If the integrand oscillates around a positive value, without
|
952 |
+
alternating signs, the extrapolation might fail. A simple trick
|
953 |
+
that sometimes works is to multiply or divide the frequency by 2::
|
954 |
+
|
955 |
+
>>> f = lambda x: 1/x**2+sin(x)/x**4
|
956 |
+
>>> quadosc(f, [1,inf], omega=1) # Bad
|
957 |
+
1.28642190869861
|
958 |
+
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
|
959 |
+
1.28652953559617
|
960 |
+
>>> 1+(cos(1)+ci(1)+sin(1))/6
|
961 |
+
1.28652953559617
|
962 |
+
|
963 |
+
**Fast decay**
|
964 |
+
|
965 |
+
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
|
966 |
+
integrands. If the integrand decreases exponentially or faster,
|
967 |
+
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
|
968 |
+
much faster than :func:`~mpmath.quadosc`)::
|
969 |
+
|
970 |
+
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
|
971 |
+
0.5
|
972 |
+
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
|
973 |
+
0.5
|
974 |
+
|
975 |
+
"""
|
976 |
+
a, b = ctx._as_points(interval)
|
977 |
+
a = ctx.convert(a)
|
978 |
+
b = ctx.convert(b)
|
979 |
+
if [omega, period, zeros].count(None) != 2:
|
980 |
+
raise ValueError( \
|
981 |
+
"must specify exactly one of omega, period, zeros")
|
982 |
+
if a == ctx.ninf and b == ctx.inf:
|
983 |
+
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
|
984 |
+
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
|
985 |
+
return s1 + s2
|
986 |
+
if a == ctx.ninf:
|
987 |
+
if zeros:
|
988 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
|
989 |
+
else:
|
990 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
|
991 |
+
if b != ctx.inf:
|
992 |
+
raise ValueError("quadosc requires an infinite integration interval")
|
993 |
+
if not zeros:
|
994 |
+
if omega:
|
995 |
+
period = 2*ctx.pi/omega
|
996 |
+
zeros = lambda n: n*period/2
|
997 |
+
#for n in range(1,10):
|
998 |
+
# p = zeros(n)
|
999 |
+
# if p > a:
|
1000 |
+
# break
|
1001 |
+
#if n >= 9:
|
1002 |
+
# raise ValueError("zeros do not appear to be correctly indexed")
|
1003 |
+
n = 1
|
1004 |
+
s = ctx.quadgl(f, [a, zeros(n)])
|
1005 |
+
def term(k):
|
1006 |
+
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
|
1007 |
+
s += ctx.nsum(term, [n, ctx.inf])
|
1008 |
+
return s
|
1009 |
+
|
1010 |
+
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
|
1011 |
+
"""
|
1012 |
+
Computes the integral of *f* over the interval or path specified
|
1013 |
+
by *interval*, using :func:`~mpmath.quad` together with adaptive
|
1014 |
+
subdivision of the interval.
|
1015 |
+
|
1016 |
+
This function gives an accurate answer for some integrals where
|
1017 |
+
:func:`~mpmath.quad` fails::
|
1018 |
+
|
1019 |
+
>>> from mpmath import *
|
1020 |
+
>>> mp.dps = 15; mp.pretty = True
|
1021 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
|
1022 |
+
3.99900894176779
|
1023 |
+
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
|
1024 |
+
4.0
|
1025 |
+
>>> quadsubdiv(sin, [0, 1000])
|
1026 |
+
0.437620923709297
|
1027 |
+
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
|
1028 |
+
3.12159332021646
|
1029 |
+
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
|
1030 |
+
5050.0
|
1031 |
+
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
|
1032 |
+
0.347400172657248
|
1033 |
+
|
1034 |
+
The argument *maxintervals* can be set to limit the permissible
|
1035 |
+
subdivision::
|
1036 |
+
|
1037 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
|
1038 |
+
(-5.40487904307774, 5.011)
|
1039 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
|
1040 |
+
(0.631417921866934, 1.10101120134116e-17)
|
1041 |
+
|
1042 |
+
Subdivision does not guarantee a correct answer since, the error
|
1043 |
+
estimate on subintervals may be inaccurate::
|
1044 |
+
|
1045 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
1046 |
+
(0.210802735500549, 1.0001111101e-17)
|
1047 |
+
>>> mp.dps = 20
|
1048 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
1049 |
+
(0.21080273550054927738, 2.200000001e-24)
|
1050 |
+
|
1051 |
+
The second answer is correct. We can get an accurate result at lower
|
1052 |
+
precision by forcing a finer initial subdivision::
|
1053 |
+
|
1054 |
+
>>> mp.dps = 15
|
1055 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
|
1056 |
+
0.210802735500549
|
1057 |
+
|
1058 |
+
The following integral is too oscillatory for convergence, but we can get a
|
1059 |
+
reasonable estimate::
|
1060 |
+
|
1061 |
+
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
|
1062 |
+
>>> round(v, 6), round(err, 6)
|
1063 |
+
(0.504067, 1e-06)
|
1064 |
+
>>> sin(1) - ci(1)
|
1065 |
+
0.504067061906928
|
1066 |
+
|
1067 |
+
"""
|
1068 |
+
queue = []
|
1069 |
+
for i in range(len(interval)-1):
|
1070 |
+
queue.append((interval[i], interval[i+1]))
|
1071 |
+
total = ctx.zero
|
1072 |
+
total_error = ctx.zero
|
1073 |
+
if maxintervals is None:
|
1074 |
+
maxintervals = 10 * ctx.prec
|
1075 |
+
count = 0
|
1076 |
+
quad_args = kwargs.copy()
|
1077 |
+
quad_args["verbose"] = False
|
1078 |
+
quad_args["error"] = True
|
1079 |
+
if tol is None:
|
1080 |
+
tol = +ctx.eps
|
1081 |
+
orig = ctx.prec
|
1082 |
+
try:
|
1083 |
+
ctx.prec += 5
|
1084 |
+
while queue:
|
1085 |
+
a, b = queue.pop()
|
1086 |
+
s, err = ctx.quad(f, [a, b], **quad_args)
|
1087 |
+
if kwargs.get("verbose"):
|
1088 |
+
print("subinterval", count, a, b, err)
|
1089 |
+
if err < tol or count > maxintervals:
|
1090 |
+
total += s
|
1091 |
+
total_error += err
|
1092 |
+
else:
|
1093 |
+
count += 1
|
1094 |
+
if count == maxintervals and kwargs.get("verbose"):
|
1095 |
+
print("warning: number of intervals exceeded maxintervals")
|
1096 |
+
if a == -ctx.inf and b == ctx.inf:
|
1097 |
+
m = 0
|
1098 |
+
elif a == -ctx.inf:
|
1099 |
+
m = min(b-1, 2*b)
|
1100 |
+
elif b == ctx.inf:
|
1101 |
+
m = max(a+1, 2*a)
|
1102 |
+
else:
|
1103 |
+
m = a + (b - a) / 2
|
1104 |
+
queue.append((a, m))
|
1105 |
+
queue.append((m, b))
|
1106 |
+
finally:
|
1107 |
+
ctx.prec = orig
|
1108 |
+
if kwargs.get("error"):
|
1109 |
+
return +total, +total_error
|
1110 |
+
else:
|
1111 |
+
return +total
|
1112 |
+
|
1113 |
+
if __name__ == '__main__':
|
1114 |
+
import doctest
|
1115 |
+
doctest.testmod()
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__init__.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import functions
|
2 |
+
# Hack to update methods
|
3 |
+
from . import factorials
|
4 |
+
from . import hypergeometric
|
5 |
+
from . import expintegrals
|
6 |
+
from . import bessel
|
7 |
+
from . import orthogonal
|
8 |
+
from . import theta
|
9 |
+
from . import elliptic
|
10 |
+
from . import signals
|
11 |
+
from . import zeta
|
12 |
+
from . import rszeta
|
13 |
+
from . import zetazeros
|
14 |
+
from . import qfunctions
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (592 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc
ADDED
Binary file (34.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/elliptic.cpython-310.pyc
ADDED
Binary file (40.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/factorials.cpython-310.pyc
ADDED
Binary file (4.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc
ADDED
Binary file (39.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-310.pyc
ADDED
Binary file (14.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc
ADDED
Binary file (7.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/rszeta.cpython-310.pyc
ADDED
Binary file (29.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/signals.cpython-310.pyc
ADDED
Binary file (1.14 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc
ADDED
Binary file (21 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/zeta.cpython-310.pyc
ADDED
Binary file (32.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc
ADDED
Binary file (29.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/bessel.py
ADDED
@@ -0,0 +1,1108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun, defun_wrapped
|
2 |
+
|
3 |
+
@defun
|
4 |
+
def j0(ctx, x):
|
5 |
+
"""Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`."""
|
6 |
+
return ctx.besselj(0, x)
|
7 |
+
|
8 |
+
@defun
|
9 |
+
def j1(ctx, x):
|
10 |
+
"""Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`."""
|
11 |
+
return ctx.besselj(1, x)
|
12 |
+
|
13 |
+
@defun
|
14 |
+
def besselj(ctx, n, z, derivative=0, **kwargs):
|
15 |
+
if type(n) is int:
|
16 |
+
n_isint = True
|
17 |
+
else:
|
18 |
+
n = ctx.convert(n)
|
19 |
+
n_isint = ctx.isint(n)
|
20 |
+
if n_isint:
|
21 |
+
n = int(ctx._re(n))
|
22 |
+
if n_isint and n < 0:
|
23 |
+
return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs)
|
24 |
+
z = ctx.convert(z)
|
25 |
+
M = ctx.mag(z)
|
26 |
+
if derivative:
|
27 |
+
d = ctx.convert(derivative)
|
28 |
+
# TODO: the integer special-casing shouldn't be necessary.
|
29 |
+
# However, the hypergeometric series gets inaccurate for large d
|
30 |
+
# because of inaccurate pole cancellation at a pole far from
|
31 |
+
# zero (needs to be fixed in hypercomb or hypsum)
|
32 |
+
if ctx.isint(d) and d >= 0:
|
33 |
+
d = int(d)
|
34 |
+
orig = ctx.prec
|
35 |
+
try:
|
36 |
+
ctx.prec += 15
|
37 |
+
v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z)
|
38 |
+
for k in range(d+1))
|
39 |
+
finally:
|
40 |
+
ctx.prec = orig
|
41 |
+
v *= ctx.mpf(2)**(-d)
|
42 |
+
else:
|
43 |
+
def h(n,d):
|
44 |
+
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True)
|
45 |
+
B = [0.5*(n-d+1), 0.5*(n-d+2)]
|
46 |
+
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)]
|
47 |
+
return T
|
48 |
+
v = ctx.hypercomb(h, [n,d], **kwargs)
|
49 |
+
else:
|
50 |
+
# Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation
|
51 |
+
if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20:
|
52 |
+
try:
|
53 |
+
return ctx._besselj(n, z)
|
54 |
+
except NotImplementedError:
|
55 |
+
pass
|
56 |
+
if not z:
|
57 |
+
if not n:
|
58 |
+
v = ctx.one + n+z
|
59 |
+
elif ctx.re(n) > 0:
|
60 |
+
v = n*z
|
61 |
+
else:
|
62 |
+
v = ctx.inf + z + n
|
63 |
+
else:
|
64 |
+
#v = 0
|
65 |
+
orig = ctx.prec
|
66 |
+
try:
|
67 |
+
# XXX: workaround for accuracy in low level hypergeometric series
|
68 |
+
# when alternating, large arguments
|
69 |
+
ctx.prec += min(3*abs(M), ctx.prec)
|
70 |
+
w = ctx.fmul(z, 0.5, exact=True)
|
71 |
+
def h(n):
|
72 |
+
r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True)
|
73 |
+
return [([w], [n], [], [n+1], [], [n+1], r)]
|
74 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
75 |
+
finally:
|
76 |
+
ctx.prec = orig
|
77 |
+
v = +v
|
78 |
+
return v
|
79 |
+
|
80 |
+
@defun
|
81 |
+
def besseli(ctx, n, z, derivative=0, **kwargs):
|
82 |
+
n = ctx.convert(n)
|
83 |
+
z = ctx.convert(z)
|
84 |
+
if not z:
|
85 |
+
if derivative:
|
86 |
+
raise ValueError
|
87 |
+
if not n:
|
88 |
+
# I(0,0) = 1
|
89 |
+
return 1+n+z
|
90 |
+
if ctx.isint(n):
|
91 |
+
return 0*(n+z)
|
92 |
+
r = ctx.re(n)
|
93 |
+
if r == 0:
|
94 |
+
return ctx.nan*(n+z)
|
95 |
+
elif r > 0:
|
96 |
+
return 0*(n+z)
|
97 |
+
else:
|
98 |
+
return ctx.inf+(n+z)
|
99 |
+
M = ctx.mag(z)
|
100 |
+
if derivative:
|
101 |
+
d = ctx.convert(derivative)
|
102 |
+
def h(n,d):
|
103 |
+
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True)
|
104 |
+
B = [0.5*(n-d+1), 0.5*(n-d+2), n+1]
|
105 |
+
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)]
|
106 |
+
return T
|
107 |
+
v = ctx.hypercomb(h, [n,d], **kwargs)
|
108 |
+
else:
|
109 |
+
def h(n):
|
110 |
+
w = ctx.fmul(z, 0.5, exact=True)
|
111 |
+
r = ctx.fmul(w, w, prec=max(0,ctx.prec+M))
|
112 |
+
return [([w], [n], [], [n+1], [], [n+1], r)]
|
113 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
114 |
+
return v
|
115 |
+
|
116 |
+
@defun_wrapped
|
117 |
+
def bessely(ctx, n, z, derivative=0, **kwargs):
|
118 |
+
if not z:
|
119 |
+
if derivative:
|
120 |
+
# Not implemented
|
121 |
+
raise ValueError
|
122 |
+
if not n:
|
123 |
+
# ~ log(z/2)
|
124 |
+
return -ctx.inf + (n+z)
|
125 |
+
if ctx.im(n):
|
126 |
+
return ctx.nan * (n+z)
|
127 |
+
r = ctx.re(n)
|
128 |
+
q = n+0.5
|
129 |
+
if ctx.isint(q):
|
130 |
+
if n > 0:
|
131 |
+
return -ctx.inf + (n+z)
|
132 |
+
else:
|
133 |
+
return 0 * (n+z)
|
134 |
+
if r < 0 and int(ctx.floor(q)) % 2:
|
135 |
+
return ctx.inf + (n+z)
|
136 |
+
else:
|
137 |
+
return ctx.ninf + (n+z)
|
138 |
+
# XXX: use hypercomb
|
139 |
+
ctx.prec += 10
|
140 |
+
m, d = ctx.nint_distance(n)
|
141 |
+
if d < -ctx.prec:
|
142 |
+
h = +ctx.eps
|
143 |
+
ctx.prec *= 2
|
144 |
+
n += h
|
145 |
+
elif d < 0:
|
146 |
+
ctx.prec -= d
|
147 |
+
# TODO: avoid cancellation for imaginary arguments
|
148 |
+
cos, sin = ctx.cospi_sinpi(n)
|
149 |
+
return (ctx.besselj(n,z,derivative,**kwargs)*cos - \
|
150 |
+
ctx.besselj(-n,z,derivative,**kwargs))/sin
|
151 |
+
|
152 |
+
@defun_wrapped
|
153 |
+
def besselk(ctx, n, z, **kwargs):
|
154 |
+
if not z:
|
155 |
+
return ctx.inf
|
156 |
+
M = ctx.mag(z)
|
157 |
+
if M < 1:
|
158 |
+
# Represent as limit definition
|
159 |
+
def h(n):
|
160 |
+
r = (z/2)**2
|
161 |
+
T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
|
162 |
+
T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
|
163 |
+
return T1, T2
|
164 |
+
# We could use the limit definition always, but it leads
|
165 |
+
# to very bad cancellation (of exponentially large terms)
|
166 |
+
# for large real z
|
167 |
+
# Instead represent in terms of 2F0
|
168 |
+
else:
|
169 |
+
ctx.prec += M
|
170 |
+
def h(n):
|
171 |
+
return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
|
172 |
+
[n+0.5, 0.5-n], [], -1/(2*z))]
|
173 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
174 |
+
|
175 |
+
@defun_wrapped
|
176 |
+
def hankel1(ctx,n,x,**kwargs):
|
177 |
+
return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs)
|
178 |
+
|
179 |
+
@defun_wrapped
|
180 |
+
def hankel2(ctx,n,x,**kwargs):
|
181 |
+
return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs)
|
182 |
+
|
183 |
+
@defun_wrapped
|
184 |
+
def whitm(ctx,k,m,z,**kwargs):
|
185 |
+
if z == 0:
|
186 |
+
# M(k,m,z) = 0^(1/2+m)
|
187 |
+
if ctx.re(m) > -0.5:
|
188 |
+
return z
|
189 |
+
elif ctx.re(m) < -0.5:
|
190 |
+
return ctx.inf + z
|
191 |
+
else:
|
192 |
+
return ctx.nan * z
|
193 |
+
x = ctx.fmul(-0.5, z, exact=True)
|
194 |
+
y = 0.5+m
|
195 |
+
return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs)
|
196 |
+
|
197 |
+
@defun_wrapped
|
198 |
+
def whitw(ctx,k,m,z,**kwargs):
|
199 |
+
if z == 0:
|
200 |
+
g = abs(ctx.re(m))
|
201 |
+
if g < 0.5:
|
202 |
+
return z
|
203 |
+
elif g > 0.5:
|
204 |
+
return ctx.inf + z
|
205 |
+
else:
|
206 |
+
return ctx.nan * z
|
207 |
+
x = ctx.fmul(-0.5, z, exact=True)
|
208 |
+
y = 0.5+m
|
209 |
+
return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs)
|
210 |
+
|
211 |
+
@defun
|
212 |
+
def hyperu(ctx, a, b, z, **kwargs):
|
213 |
+
a, atype = ctx._convert_param(a)
|
214 |
+
b, btype = ctx._convert_param(b)
|
215 |
+
z = ctx.convert(z)
|
216 |
+
if not z:
|
217 |
+
if ctx.re(b) <= 1:
|
218 |
+
return ctx.gammaprod([1-b],[a-b+1])
|
219 |
+
else:
|
220 |
+
return ctx.inf + z
|
221 |
+
bb = 1+a-b
|
222 |
+
bb, bbtype = ctx._convert_param(bb)
|
223 |
+
try:
|
224 |
+
orig = ctx.prec
|
225 |
+
try:
|
226 |
+
ctx.prec += 10
|
227 |
+
v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec)
|
228 |
+
return v / z**a
|
229 |
+
finally:
|
230 |
+
ctx.prec = orig
|
231 |
+
except ctx.NoConvergence:
|
232 |
+
pass
|
233 |
+
def h(a,b):
|
234 |
+
w = ctx.sinpi(b)
|
235 |
+
T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z)
|
236 |
+
T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z)
|
237 |
+
return T1, T2
|
238 |
+
return ctx.hypercomb(h, [a,b], **kwargs)
|
239 |
+
|
240 |
+
@defun
|
241 |
+
def struveh(ctx,n,z, **kwargs):
|
242 |
+
n = ctx.convert(n)
|
243 |
+
z = ctx.convert(z)
|
244 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/
|
245 |
+
def h(n):
|
246 |
+
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)]
|
247 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
248 |
+
|
249 |
+
@defun
|
250 |
+
def struvel(ctx,n,z, **kwargs):
|
251 |
+
n = ctx.convert(n)
|
252 |
+
z = ctx.convert(z)
|
253 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/
|
254 |
+
def h(n):
|
255 |
+
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)]
|
256 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
257 |
+
|
258 |
+
def _anger(ctx,which,v,z,**kwargs):
|
259 |
+
v = ctx._convert_param(v)[0]
|
260 |
+
z = ctx.convert(z)
|
261 |
+
def h(v):
|
262 |
+
b = ctx.mpq_1_2
|
263 |
+
u = v*b
|
264 |
+
m = b*3
|
265 |
+
a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u
|
266 |
+
c, s = ctx.cospi_sinpi(u)
|
267 |
+
if which == 0:
|
268 |
+
A, B = [b*z, s], [c]
|
269 |
+
if which == 1:
|
270 |
+
A, B = [b*z, -c], [s]
|
271 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
272 |
+
T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w
|
273 |
+
T2 = B, [1], [], [b1,b2], [1], [b1,b2], w
|
274 |
+
return T1, T2
|
275 |
+
return ctx.hypercomb(h, [v], **kwargs)
|
276 |
+
|
277 |
+
@defun
|
278 |
+
def angerj(ctx, v, z, **kwargs):
|
279 |
+
return _anger(ctx, 0, v, z, **kwargs)
|
280 |
+
|
281 |
+
@defun
|
282 |
+
def webere(ctx, v, z, **kwargs):
|
283 |
+
return _anger(ctx, 1, v, z, **kwargs)
|
284 |
+
|
285 |
+
@defun
|
286 |
+
def lommels1(ctx, u, v, z, **kwargs):
|
287 |
+
u = ctx._convert_param(u)[0]
|
288 |
+
v = ctx._convert_param(v)[0]
|
289 |
+
z = ctx.convert(z)
|
290 |
+
def h(u,v):
|
291 |
+
b = ctx.mpq_1_2
|
292 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
293 |
+
return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \
|
294 |
+
[b*(u-v+3),b*(u+v+3)], w),
|
295 |
+
return ctx.hypercomb(h, [u,v], **kwargs)
|
296 |
+
|
297 |
+
@defun
|
298 |
+
def lommels2(ctx, u, v, z, **kwargs):
|
299 |
+
u = ctx._convert_param(u)[0]
|
300 |
+
v = ctx._convert_param(v)[0]
|
301 |
+
z = ctx.convert(z)
|
302 |
+
# Asymptotic expansion (GR p. 947) -- need to be careful
|
303 |
+
# not to use for small arguments
|
304 |
+
# def h(u,v):
|
305 |
+
# b = ctx.mpq_1_2
|
306 |
+
# w = -(z/2)**(-2)
|
307 |
+
# return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w),
|
308 |
+
def h(u,v):
|
309 |
+
b = ctx.mpq_1_2
|
310 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
311 |
+
T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w
|
312 |
+
T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w
|
313 |
+
T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w
|
314 |
+
#c1 = ctx.cospi((u-v)*b)
|
315 |
+
#c2 = ctx.cospi((u+v)*b)
|
316 |
+
#s = ctx.sinpi(v)
|
317 |
+
#r1 = (u-v+1)*b
|
318 |
+
#r2 = (u+v+1)*b
|
319 |
+
#T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w
|
320 |
+
#T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w
|
321 |
+
#T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w
|
322 |
+
#T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w
|
323 |
+
return T1, T2, T3
|
324 |
+
return ctx.hypercomb(h, [u,v], **kwargs)
|
325 |
+
|
326 |
+
@defun
|
327 |
+
def ber(ctx, n, z, **kwargs):
|
328 |
+
n = ctx.convert(n)
|
329 |
+
z = ctx.convert(z)
|
330 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/
|
331 |
+
def h(n):
|
332 |
+
r = -(z/4)**4
|
333 |
+
cos, sin = ctx.cospi_sinpi(-0.75*n)
|
334 |
+
T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
|
335 |
+
T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
|
336 |
+
return T1, T2
|
337 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
338 |
+
|
339 |
+
@defun
|
340 |
+
def bei(ctx, n, z, **kwargs):
|
341 |
+
n = ctx.convert(n)
|
342 |
+
z = ctx.convert(z)
|
343 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/
|
344 |
+
def h(n):
|
345 |
+
r = -(z/4)**4
|
346 |
+
cos, sin = ctx.cospi_sinpi(0.75*n)
|
347 |
+
T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
|
348 |
+
T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
|
349 |
+
return T1, T2
|
350 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
351 |
+
|
352 |
+
@defun
|
353 |
+
def ker(ctx, n, z, **kwargs):
|
354 |
+
n = ctx.convert(n)
|
355 |
+
z = ctx.convert(z)
|
356 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/
|
357 |
+
def h(n):
|
358 |
+
r = -(z/4)**4
|
359 |
+
cos1, sin1 = ctx.cospi_sinpi(0.25*n)
|
360 |
+
cos2, sin2 = ctx.cospi_sinpi(0.75*n)
|
361 |
+
T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r
|
362 |
+
T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r
|
363 |
+
T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
|
364 |
+
T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
|
365 |
+
return T1, T2, T3, T4
|
366 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
367 |
+
|
368 |
+
@defun
|
369 |
+
def kei(ctx, n, z, **kwargs):
|
370 |
+
n = ctx.convert(n)
|
371 |
+
z = ctx.convert(z)
|
372 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/
|
373 |
+
def h(n):
|
374 |
+
r = -(z/4)**4
|
375 |
+
cos1, sin1 = ctx.cospi_sinpi(0.75*n)
|
376 |
+
cos2, sin2 = ctx.cospi_sinpi(0.25*n)
|
377 |
+
T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
|
378 |
+
T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
|
379 |
+
T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r
|
380 |
+
T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r
|
381 |
+
return T1, T2, T3, T4
|
382 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
383 |
+
|
384 |
+
# TODO: do this more generically?
|
385 |
+
def c_memo(f):
|
386 |
+
name = f.__name__
|
387 |
+
def f_wrapped(ctx):
|
388 |
+
cache = ctx._misc_const_cache
|
389 |
+
prec = ctx.prec
|
390 |
+
p,v = cache.get(name, (-1,0))
|
391 |
+
if p >= prec:
|
392 |
+
return +v
|
393 |
+
else:
|
394 |
+
cache[name] = (prec, f(ctx))
|
395 |
+
return cache[name][1]
|
396 |
+
return f_wrapped
|
397 |
+
|
398 |
+
@c_memo
|
399 |
+
def _airyai_C1(ctx):
|
400 |
+
return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3))
|
401 |
+
|
402 |
+
@c_memo
|
403 |
+
def _airyai_C2(ctx):
|
404 |
+
return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3))
|
405 |
+
|
406 |
+
@c_memo
|
407 |
+
def _airybi_C1(ctx):
|
408 |
+
return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3))
|
409 |
+
|
410 |
+
@c_memo
|
411 |
+
def _airybi_C2(ctx):
|
412 |
+
return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3)
|
413 |
+
|
414 |
+
def _airybi_n2_inf(ctx):
|
415 |
+
prec = ctx.prec
|
416 |
+
try:
|
417 |
+
v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi)
|
418 |
+
finally:
|
419 |
+
ctx.prec = prec
|
420 |
+
return +v
|
421 |
+
|
422 |
+
# Derivatives at z = 0
|
423 |
+
# TODO: could be expressed more elegantly using triple factorials
|
424 |
+
def _airyderiv_0(ctx, z, n, ntype, which):
|
425 |
+
if ntype == 'Z':
|
426 |
+
if n < 0:
|
427 |
+
return z
|
428 |
+
r = ctx.mpq_1_3
|
429 |
+
prec = ctx.prec
|
430 |
+
try:
|
431 |
+
ctx.prec += 10
|
432 |
+
v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi
|
433 |
+
if which == 0:
|
434 |
+
v *= ctx.sinpi(2*(n+1)*r)
|
435 |
+
v /= ctx.power(3,'2/3')
|
436 |
+
else:
|
437 |
+
v *= abs(ctx.sinpi(2*(n+1)*r))
|
438 |
+
v /= ctx.power(3,'1/6')
|
439 |
+
finally:
|
440 |
+
ctx.prec = prec
|
441 |
+
return +v + z
|
442 |
+
else:
|
443 |
+
# singular (does the limit exist?)
|
444 |
+
raise NotImplementedError
|
445 |
+
|
446 |
+
@defun
|
447 |
+
def airyai(ctx, z, derivative=0, **kwargs):
|
448 |
+
z = ctx.convert(z)
|
449 |
+
if derivative:
|
450 |
+
n, ntype = ctx._convert_param(derivative)
|
451 |
+
else:
|
452 |
+
n = 0
|
453 |
+
# Values at infinities
|
454 |
+
if not ctx.isnormal(z) and z:
|
455 |
+
if n and ntype == 'Z':
|
456 |
+
if n == -1:
|
457 |
+
if z == ctx.inf:
|
458 |
+
return ctx.mpf(1)/3 + 1/z
|
459 |
+
if z == ctx.ninf:
|
460 |
+
return ctx.mpf(-2)/3 + 1/z
|
461 |
+
if n < -1:
|
462 |
+
if z == ctx.inf:
|
463 |
+
return z
|
464 |
+
if z == ctx.ninf:
|
465 |
+
return (-1)**n * (-z)
|
466 |
+
if (not n) and z == ctx.inf or z == ctx.ninf:
|
467 |
+
return 1/z
|
468 |
+
# TODO: limits
|
469 |
+
raise ValueError("essential singularity of Ai(z)")
|
470 |
+
# Account for exponential scaling
|
471 |
+
if z:
|
472 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
473 |
+
else:
|
474 |
+
extraprec = 0
|
475 |
+
if n:
|
476 |
+
if n == 1:
|
477 |
+
def h():
|
478 |
+
# http://functions.wolfram.com/03.07.06.0005.01
|
479 |
+
if ctx._re(z) > 4:
|
480 |
+
ctx.prec += extraprec
|
481 |
+
w = z**1.5; r = -0.75/w; u = -2*w/3
|
482 |
+
ctx.prec -= extraprec
|
483 |
+
C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4)
|
484 |
+
return ([C],[1],[],[],[(-1,6),(7,6)],[],r),
|
485 |
+
# http://functions.wolfram.com/03.07.26.0001.01
|
486 |
+
else:
|
487 |
+
ctx.prec += extraprec
|
488 |
+
w = z**3 / 9
|
489 |
+
ctx.prec -= extraprec
|
490 |
+
C1 = _airyai_C1(ctx) * 0.5
|
491 |
+
C2 = _airyai_C2(ctx)
|
492 |
+
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
|
493 |
+
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
|
494 |
+
return T1, T2
|
495 |
+
return ctx.hypercomb(h, [], **kwargs)
|
496 |
+
else:
|
497 |
+
if z == 0:
|
498 |
+
return _airyderiv_0(ctx, z, n, ntype, 0)
|
499 |
+
# http://functions.wolfram.com/03.05.20.0004.01
|
500 |
+
def h(n):
|
501 |
+
ctx.prec += extraprec
|
502 |
+
w = z**3/9
|
503 |
+
ctx.prec -= extraprec
|
504 |
+
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
|
505 |
+
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
|
506 |
+
T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \
|
507 |
+
[a1,a2], [b1,b2,b3], w
|
508 |
+
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
|
509 |
+
T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \
|
510 |
+
[a1,a2], [b1,b2,b3], w
|
511 |
+
return T1, T2
|
512 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
513 |
+
if ctx._is_real_type(z) and ctx.isint(n):
|
514 |
+
v = ctx._re(v)
|
515 |
+
return v
|
516 |
+
else:
|
517 |
+
def h():
|
518 |
+
if ctx._re(z) > 4:
|
519 |
+
# We could use 1F1, but it results in huge cancellation;
|
520 |
+
# the following expansion is better.
|
521 |
+
# TODO: asymptotic series for derivatives
|
522 |
+
ctx.prec += extraprec
|
523 |
+
w = z**1.5; r = -0.75/w; u = -2*w/3
|
524 |
+
ctx.prec -= extraprec
|
525 |
+
C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4))
|
526 |
+
return ([C],[1],[],[],[(1,6),(5,6)],[],r),
|
527 |
+
else:
|
528 |
+
ctx.prec += extraprec
|
529 |
+
w = z**3 / 9
|
530 |
+
ctx.prec -= extraprec
|
531 |
+
C1 = _airyai_C1(ctx)
|
532 |
+
C2 = _airyai_C2(ctx)
|
533 |
+
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
|
534 |
+
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
|
535 |
+
return T1, T2
|
536 |
+
return ctx.hypercomb(h, [], **kwargs)
|
537 |
+
|
538 |
+
@defun
|
539 |
+
def airybi(ctx, z, derivative=0, **kwargs):
|
540 |
+
z = ctx.convert(z)
|
541 |
+
if derivative:
|
542 |
+
n, ntype = ctx._convert_param(derivative)
|
543 |
+
else:
|
544 |
+
n = 0
|
545 |
+
# Values at infinities
|
546 |
+
if not ctx.isnormal(z) and z:
|
547 |
+
if n and ntype == 'Z':
|
548 |
+
if z == ctx.inf:
|
549 |
+
return z
|
550 |
+
if z == ctx.ninf:
|
551 |
+
if n == -1:
|
552 |
+
return 1/z
|
553 |
+
if n == -2:
|
554 |
+
return _airybi_n2_inf(ctx)
|
555 |
+
if n < -2:
|
556 |
+
return (-1)**n * (-z)
|
557 |
+
if not n:
|
558 |
+
if z == ctx.inf:
|
559 |
+
return z
|
560 |
+
if z == ctx.ninf:
|
561 |
+
return 1/z
|
562 |
+
# TODO: limits
|
563 |
+
raise ValueError("essential singularity of Bi(z)")
|
564 |
+
if z:
|
565 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
566 |
+
else:
|
567 |
+
extraprec = 0
|
568 |
+
if n:
|
569 |
+
if n == 1:
|
570 |
+
# http://functions.wolfram.com/03.08.26.0001.01
|
571 |
+
def h():
|
572 |
+
ctx.prec += extraprec
|
573 |
+
w = z**3 / 9
|
574 |
+
ctx.prec -= extraprec
|
575 |
+
C1 = _airybi_C1(ctx)*0.5
|
576 |
+
C2 = _airybi_C2(ctx)
|
577 |
+
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
|
578 |
+
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
|
579 |
+
return T1, T2
|
580 |
+
return ctx.hypercomb(h, [], **kwargs)
|
581 |
+
else:
|
582 |
+
if z == 0:
|
583 |
+
return _airyderiv_0(ctx, z, n, ntype, 1)
|
584 |
+
def h(n):
|
585 |
+
ctx.prec += extraprec
|
586 |
+
w = z**3/9
|
587 |
+
ctx.prec -= extraprec
|
588 |
+
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
|
589 |
+
q16 = ctx.mpq_1_6
|
590 |
+
q56 = ctx.mpq_5_6
|
591 |
+
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
|
592 |
+
T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \
|
593 |
+
[a1,a2], [b1,b2,b3], w
|
594 |
+
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
|
595 |
+
T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \
|
596 |
+
[a1,a2], [b1,b2,b3], w
|
597 |
+
return T1, T2
|
598 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
599 |
+
if ctx._is_real_type(z) and ctx.isint(n):
|
600 |
+
v = ctx._re(v)
|
601 |
+
return v
|
602 |
+
else:
|
603 |
+
def h():
|
604 |
+
ctx.prec += extraprec
|
605 |
+
w = z**3 / 9
|
606 |
+
ctx.prec -= extraprec
|
607 |
+
C1 = _airybi_C1(ctx)
|
608 |
+
C2 = _airybi_C2(ctx)
|
609 |
+
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
|
610 |
+
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
|
611 |
+
return T1, T2
|
612 |
+
return ctx.hypercomb(h, [], **kwargs)
|
613 |
+
|
614 |
+
def _airy_zero(ctx, which, k, derivative, complex=False):
|
615 |
+
# Asymptotic formulas are given in DLMF section 9.9
|
616 |
+
def U(t): return t**(2/3.)*(1-7/(t**2*48))
|
617 |
+
def T(t): return t**(2/3.)*(1+5/(t**2*48))
|
618 |
+
k = int(k)
|
619 |
+
if k < 1:
|
620 |
+
raise ValueError("k cannot be less than 1")
|
621 |
+
if not derivative in (0,1):
|
622 |
+
raise ValueError("Derivative should lie between 0 and 1")
|
623 |
+
if which == 0:
|
624 |
+
if derivative:
|
625 |
+
return ctx.findroot(lambda z: ctx.airyai(z,1),
|
626 |
+
-U(3*ctx.pi*(4*k-3)/8))
|
627 |
+
return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8))
|
628 |
+
if which == 1 and complex == False:
|
629 |
+
if derivative:
|
630 |
+
return ctx.findroot(lambda z: ctx.airybi(z,1),
|
631 |
+
-U(3*ctx.pi*(4*k-1)/8))
|
632 |
+
return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8))
|
633 |
+
if which == 1 and complex == True:
|
634 |
+
if derivative:
|
635 |
+
t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2
|
636 |
+
s = ctx.expjpi(ctx.mpf(1)/3) * T(t)
|
637 |
+
return ctx.findroot(lambda z: ctx.airybi(z,1), s)
|
638 |
+
t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2
|
639 |
+
s = ctx.expjpi(ctx.mpf(1)/3) * U(t)
|
640 |
+
return ctx.findroot(ctx.airybi, s)
|
641 |
+
|
642 |
+
@defun
|
643 |
+
def airyaizero(ctx, k, derivative=0):
|
644 |
+
return _airy_zero(ctx, 0, k, derivative, False)
|
645 |
+
|
646 |
+
@defun
|
647 |
+
def airybizero(ctx, k, derivative=0, complex=False):
|
648 |
+
return _airy_zero(ctx, 1, k, derivative, complex)
|
649 |
+
|
650 |
+
def _scorer(ctx, z, which, kwargs):
|
651 |
+
z = ctx.convert(z)
|
652 |
+
if ctx.isinf(z):
|
653 |
+
if z == ctx.inf:
|
654 |
+
if which == 0: return 1/z
|
655 |
+
if which == 1: return z
|
656 |
+
if z == ctx.ninf:
|
657 |
+
return 1/z
|
658 |
+
raise ValueError("essential singularity")
|
659 |
+
if z:
|
660 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
661 |
+
else:
|
662 |
+
extraprec = 0
|
663 |
+
if kwargs.get('derivative'):
|
664 |
+
raise NotImplementedError
|
665 |
+
# Direct asymptotic expansions, to avoid
|
666 |
+
# exponentially large cancellation
|
667 |
+
try:
|
668 |
+
if ctx.mag(z) > 3:
|
669 |
+
if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999:
|
670 |
+
def h():
|
671 |
+
return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
|
672 |
+
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
|
673 |
+
if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999:
|
674 |
+
def h():
|
675 |
+
return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
|
676 |
+
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
|
677 |
+
except ctx.NoConvergence:
|
678 |
+
pass
|
679 |
+
def h():
|
680 |
+
A = ctx.airybi(z, **kwargs)/3
|
681 |
+
B = -2*ctx.pi
|
682 |
+
if which == 1:
|
683 |
+
A *= 2
|
684 |
+
B *= -1
|
685 |
+
ctx.prec += extraprec
|
686 |
+
w = z**3/9
|
687 |
+
ctx.prec -= extraprec
|
688 |
+
T1 = [A], [1], [], [], [], [], 0
|
689 |
+
T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w
|
690 |
+
return T1, T2
|
691 |
+
return ctx.hypercomb(h, [], **kwargs)
|
692 |
+
|
693 |
+
@defun
|
694 |
+
def scorergi(ctx, z, **kwargs):
|
695 |
+
return _scorer(ctx, z, 0, kwargs)
|
696 |
+
|
697 |
+
@defun
|
698 |
+
def scorerhi(ctx, z, **kwargs):
|
699 |
+
return _scorer(ctx, z, 1, kwargs)
|
700 |
+
|
701 |
+
@defun_wrapped
|
702 |
+
def coulombc(ctx, l, eta, _cache={}):
|
703 |
+
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
|
704 |
+
return +_cache[l,eta][1]
|
705 |
+
G3 = ctx.loggamma(2*l+2)
|
706 |
+
G1 = ctx.loggamma(1+l+ctx.j*eta)
|
707 |
+
G2 = ctx.loggamma(1+l-ctx.j*eta)
|
708 |
+
v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3)
|
709 |
+
if not (ctx.im(l) or ctx.im(eta)):
|
710 |
+
v = ctx.re(v)
|
711 |
+
_cache[l,eta] = (ctx.prec, v)
|
712 |
+
return v
|
713 |
+
|
714 |
+
@defun_wrapped
|
715 |
+
def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs):
|
716 |
+
# Regular Coulomb wave function
|
717 |
+
# Note: w can be either 1 or -1; the other may be better in some cases
|
718 |
+
# TODO: check that chop=True chops when and only when it should
|
719 |
+
#ctx.prec += 10
|
720 |
+
def h(l, eta):
|
721 |
+
try:
|
722 |
+
jw = ctx.j*w
|
723 |
+
jwz = ctx.fmul(jw, z, exact=True)
|
724 |
+
jwz2 = ctx.fmul(jwz, -2, exact=True)
|
725 |
+
C = ctx.coulombc(l, eta)
|
726 |
+
T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \
|
727 |
+
[2*l+2], jwz2
|
728 |
+
except ValueError:
|
729 |
+
T1 = [0], [-1], [], [], [], [], 0
|
730 |
+
return (T1,)
|
731 |
+
v = ctx.hypercomb(h, [l,eta], **kwargs)
|
732 |
+
if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \
|
733 |
+
(ctx.re(z) >= 0):
|
734 |
+
v = ctx.re(v)
|
735 |
+
return v
|
736 |
+
|
737 |
+
@defun_wrapped
|
738 |
+
def _coulomb_chi(ctx, l, eta, _cache={}):
|
739 |
+
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
|
740 |
+
return _cache[l,eta][1]
|
741 |
+
def terms():
|
742 |
+
l2 = -l-1
|
743 |
+
jeta = ctx.j*eta
|
744 |
+
return [ctx.loggamma(1+l+jeta) * (-0.5j),
|
745 |
+
ctx.loggamma(1+l-jeta) * (0.5j),
|
746 |
+
ctx.loggamma(1+l2+jeta) * (0.5j),
|
747 |
+
ctx.loggamma(1+l2-jeta) * (-0.5j),
|
748 |
+
-(l+0.5)*ctx.pi]
|
749 |
+
v = ctx.sum_accurately(terms, 1)
|
750 |
+
_cache[l,eta] = (ctx.prec, v)
|
751 |
+
return v
|
752 |
+
|
753 |
+
@defun_wrapped
|
754 |
+
def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs):
|
755 |
+
# Irregular Coulomb wave function
|
756 |
+
# Note: w can be either 1 or -1; the other may be better in some cases
|
757 |
+
# TODO: check that chop=True chops when and only when it should
|
758 |
+
if not ctx._im(l):
|
759 |
+
l = ctx._re(l) # XXX: for isint
|
760 |
+
def h(l, eta):
|
761 |
+
# Force perturbation for integers and half-integers
|
762 |
+
if ctx.isint(l*2):
|
763 |
+
T1 = [0], [-1], [], [], [], [], 0
|
764 |
+
return (T1,)
|
765 |
+
l2 = -l-1
|
766 |
+
try:
|
767 |
+
chi = ctx._coulomb_chi(l, eta)
|
768 |
+
jw = ctx.j*w
|
769 |
+
s = ctx.sin(chi); c = ctx.cos(chi)
|
770 |
+
C1 = ctx.coulombc(l,eta)
|
771 |
+
C2 = ctx.coulombc(l2,eta)
|
772 |
+
u = ctx.exp(jw*z)
|
773 |
+
x = -2*jw*z
|
774 |
+
T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \
|
775 |
+
[1+l+jw*eta], [2*l+2], x
|
776 |
+
T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \
|
777 |
+
[1+l2+jw*eta], [2*l2+2], x
|
778 |
+
return T1, T2
|
779 |
+
except ValueError:
|
780 |
+
T1 = [0], [-1], [], [], [], [], 0
|
781 |
+
return (T1,)
|
782 |
+
v = ctx.hypercomb(h, [l,eta], **kwargs)
|
783 |
+
if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \
|
784 |
+
(ctx._re(z) >= 0):
|
785 |
+
v = ctx._re(v)
|
786 |
+
return v
|
787 |
+
|
788 |
+
def mcmahon(ctx,kind,prime,v,m):
|
789 |
+
"""
|
790 |
+
Computes an estimate for the location of the Bessel function zero
|
791 |
+
j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic
|
792 |
+
expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)).
|
793 |
+
|
794 |
+
Returns (r,err) where r is the estimated location of the root
|
795 |
+
and err is a positive number estimating the error of the
|
796 |
+
asymptotic expansion.
|
797 |
+
"""
|
798 |
+
u = 4*v**2
|
799 |
+
if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4
|
800 |
+
if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4
|
801 |
+
if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4
|
802 |
+
if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4
|
803 |
+
if not prime:
|
804 |
+
s1 = b
|
805 |
+
s2 = -(u-1)/(8*b)
|
806 |
+
s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3)
|
807 |
+
s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5)
|
808 |
+
s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7)
|
809 |
+
if prime:
|
810 |
+
s1 = b
|
811 |
+
s2 = -(u+3)/(8*b)
|
812 |
+
s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3)
|
813 |
+
s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5)
|
814 |
+
s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7)
|
815 |
+
terms = [s1,s2,s3,s4,s5]
|
816 |
+
s = s1
|
817 |
+
err = 0.0
|
818 |
+
for i in range(1,len(terms)):
|
819 |
+
if abs(terms[i]) < abs(terms[i-1]):
|
820 |
+
s += terms[i]
|
821 |
+
else:
|
822 |
+
err = abs(terms[i])
|
823 |
+
if i == len(terms)-1:
|
824 |
+
err = abs(terms[-1])
|
825 |
+
return s, err
|
826 |
+
|
827 |
+
def generalized_bisection(ctx,f,a,b,n):
|
828 |
+
"""
|
829 |
+
Given f known to have exactly n simple roots within [a,b],
|
830 |
+
return a list of n intervals isolating the roots
|
831 |
+
and having opposite signs at the endpoints.
|
832 |
+
|
833 |
+
TODO: this can be optimized, e.g. by reusing evaluation points.
|
834 |
+
"""
|
835 |
+
if n < 1:
|
836 |
+
raise ValueError("n cannot be less than 1")
|
837 |
+
N = n+1
|
838 |
+
points = []
|
839 |
+
signs = []
|
840 |
+
while 1:
|
841 |
+
points = ctx.linspace(a,b,N)
|
842 |
+
signs = [ctx.sign(f(x)) for x in points]
|
843 |
+
ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \
|
844 |
+
if signs[i]*signs[i+1] == -1]
|
845 |
+
if len(ok_intervals) == n:
|
846 |
+
return ok_intervals
|
847 |
+
N = N*2
|
848 |
+
|
849 |
+
def find_in_interval(ctx, f, ab):
|
850 |
+
return ctx.findroot(f, ab, solver='illinois', verify=False)
|
851 |
+
|
852 |
+
def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}):
|
853 |
+
prec = ctx.prec
|
854 |
+
workprec = max(prec, ctx.mag(v), ctx.mag(m))+10
|
855 |
+
try:
|
856 |
+
ctx.prec = workprec
|
857 |
+
v = ctx.mpf(v)
|
858 |
+
m = int(m)
|
859 |
+
prime = int(prime)
|
860 |
+
if v < 0:
|
861 |
+
raise ValueError("v cannot be negative")
|
862 |
+
if m < 1:
|
863 |
+
raise ValueError("m cannot be less than 1")
|
864 |
+
if not prime in (0,1):
|
865 |
+
raise ValueError("prime should lie between 0 and 1")
|
866 |
+
if kind == 1:
|
867 |
+
if prime: f = lambda x: ctx.besselj(v,x,derivative=1)
|
868 |
+
else: f = lambda x: ctx.besselj(v,x)
|
869 |
+
if kind == 2:
|
870 |
+
if prime: f = lambda x: ctx.bessely(v,x,derivative=1)
|
871 |
+
else: f = lambda x: ctx.bessely(v,x)
|
872 |
+
# The first root of J' is very close to 0 for small
|
873 |
+
# orders, and this needs to be special-cased
|
874 |
+
if kind == 1 and prime and m == 1:
|
875 |
+
if v == 0:
|
876 |
+
return ctx.zero
|
877 |
+
if v <= 1:
|
878 |
+
# TODO: use v <= j'_{v,1} < y_{v,1}?
|
879 |
+
r = 2*ctx.sqrt(v*(1+v)/(v+2))
|
880 |
+
return find_in_interval(ctx, f, (r/10, 2*r))
|
881 |
+
if (kind,prime,v,m) in _interval_cache:
|
882 |
+
return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m])
|
883 |
+
r, err = mcmahon(ctx, kind, prime, v, m)
|
884 |
+
if err < isoltol:
|
885 |
+
return find_in_interval(ctx, f, (r-isoltol, r+isoltol))
|
886 |
+
# An x such that 0 < x < r_{v,1}
|
887 |
+
if kind == 1 and not prime: low = 2.4
|
888 |
+
if kind == 1 and prime: low = 1.8
|
889 |
+
if kind == 2 and not prime: low = 0.8
|
890 |
+
if kind == 2 and prime: low = 2.0
|
891 |
+
n = m+1
|
892 |
+
while 1:
|
893 |
+
r1, err = mcmahon(ctx, kind, prime, v, n)
|
894 |
+
if err < isoltol:
|
895 |
+
r2, err2 = mcmahon(ctx, kind, prime, v, n+1)
|
896 |
+
intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n)
|
897 |
+
for k, ab in enumerate(intervals):
|
898 |
+
_interval_cache[kind,prime,v,k+1] = ab
|
899 |
+
return find_in_interval(ctx, f, intervals[m-1])
|
900 |
+
else:
|
901 |
+
n = n*2
|
902 |
+
finally:
|
903 |
+
ctx.prec = prec
|
904 |
+
|
905 |
+
@defun
|
906 |
+
def besseljzero(ctx, v, m, derivative=0):
|
907 |
+
r"""
|
908 |
+
For a real order `\nu \ge 0` and a positive integer `m`, returns
|
909 |
+
`j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
|
910 |
+
first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively,
|
911 |
+
with *derivative=1*, gives the first nonnegative simple zero
|
912 |
+
`j'_{\nu,m}` of `J'_{\nu}(z)`.
|
913 |
+
|
914 |
+
The indexing convention is that used by Abramowitz & Stegun
|
915 |
+
and the DLMF. Note the special case `j'_{0,1} = 0`, while all other
|
916 |
+
zeros are positive. In effect, only simple zeros are counted
|
917 |
+
(all zeros of Bessel functions are simple except possibly `z = 0`)
|
918 |
+
and `j_{\nu,m}` becomes a monotonic function of both `\nu`
|
919 |
+
and `m`.
|
920 |
+
|
921 |
+
The zeros are interlaced according to the inequalities
|
922 |
+
|
923 |
+
.. math ::
|
924 |
+
|
925 |
+
j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1}
|
926 |
+
|
927 |
+
j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots
|
928 |
+
|
929 |
+
**Examples**
|
930 |
+
|
931 |
+
Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`::
|
932 |
+
|
933 |
+
>>> from mpmath import *
|
934 |
+
>>> mp.dps = 25; mp.pretty = True
|
935 |
+
>>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3)
|
936 |
+
2.404825557695772768621632
|
937 |
+
5.520078110286310649596604
|
938 |
+
8.653727912911012216954199
|
939 |
+
>>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3)
|
940 |
+
3.831705970207512315614436
|
941 |
+
7.01558666981561875353705
|
942 |
+
10.17346813506272207718571
|
943 |
+
>>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3)
|
944 |
+
5.135622301840682556301402
|
945 |
+
8.417244140399864857783614
|
946 |
+
11.61984117214905942709415
|
947 |
+
|
948 |
+
Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`::
|
949 |
+
|
950 |
+
0.0
|
951 |
+
3.831705970207512315614436
|
952 |
+
7.01558666981561875353705
|
953 |
+
>>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1)
|
954 |
+
1.84118378134065930264363
|
955 |
+
5.331442773525032636884016
|
956 |
+
8.536316366346285834358961
|
957 |
+
>>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1)
|
958 |
+
3.054236928227140322755932
|
959 |
+
6.706133194158459146634394
|
960 |
+
9.969467823087595793179143
|
961 |
+
|
962 |
+
Zeros with large index::
|
963 |
+
|
964 |
+
>>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000)
|
965 |
+
313.3742660775278447196902
|
966 |
+
3140.807295225078628895545
|
967 |
+
31415.14114171350798533666
|
968 |
+
>>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000)
|
969 |
+
321.1893195676003157339222
|
970 |
+
3148.657306813047523500494
|
971 |
+
31422.9947255486291798943
|
972 |
+
>>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1)
|
973 |
+
311.8018681873704508125112
|
974 |
+
3139.236339643802482833973
|
975 |
+
31413.57032947022399485808
|
976 |
+
|
977 |
+
Zeros of functions with large order::
|
978 |
+
|
979 |
+
>>> besseljzero(50,1)
|
980 |
+
57.11689916011917411936228
|
981 |
+
>>> besseljzero(50,2)
|
982 |
+
62.80769876483536093435393
|
983 |
+
>>> besseljzero(50,100)
|
984 |
+
388.6936600656058834640981
|
985 |
+
>>> besseljzero(50,1,1)
|
986 |
+
52.99764038731665010944037
|
987 |
+
>>> besseljzero(50,2,1)
|
988 |
+
60.02631933279942589882363
|
989 |
+
>>> besseljzero(50,100,1)
|
990 |
+
387.1083151608726181086283
|
991 |
+
|
992 |
+
Zeros of functions with fractional order::
|
993 |
+
|
994 |
+
>>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4)
|
995 |
+
3.141592653589793238462643
|
996 |
+
4.493409457909064175307881
|
997 |
+
15.15657692957458622921634
|
998 |
+
|
999 |
+
Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite
|
1000 |
+
products over their zeros::
|
1001 |
+
|
1002 |
+
>>> v,z = 2, mpf(1)
|
1003 |
+
>>> (z/2)**v/gamma(v+1) * \
|
1004 |
+
... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf])
|
1005 |
+
...
|
1006 |
+
0.1149034849319004804696469
|
1007 |
+
>>> besselj(v,z)
|
1008 |
+
0.1149034849319004804696469
|
1009 |
+
>>> (z/2)**(v-1)/2/gamma(v) * \
|
1010 |
+
... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf])
|
1011 |
+
...
|
1012 |
+
0.2102436158811325550203884
|
1013 |
+
>>> besselj(v,z,1)
|
1014 |
+
0.2102436158811325550203884
|
1015 |
+
|
1016 |
+
"""
|
1017 |
+
return +bessel_zero(ctx, 1, derivative, v, m)
|
1018 |
+
|
1019 |
+
@defun
|
1020 |
+
def besselyzero(ctx, v, m, derivative=0):
|
1021 |
+
r"""
|
1022 |
+
For a real order `\nu \ge 0` and a positive integer `m`, returns
|
1023 |
+
`y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
|
1024 |
+
second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively,
|
1025 |
+
with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of
|
1026 |
+
`Y'_{\nu}(z)`.
|
1027 |
+
|
1028 |
+
The zeros are interlaced according to the inequalities
|
1029 |
+
|
1030 |
+
.. math ::
|
1031 |
+
|
1032 |
+
y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1}
|
1033 |
+
|
1034 |
+
y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots
|
1035 |
+
|
1036 |
+
**Examples**
|
1037 |
+
|
1038 |
+
Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`::
|
1039 |
+
|
1040 |
+
>>> from mpmath import *
|
1041 |
+
>>> mp.dps = 25; mp.pretty = True
|
1042 |
+
>>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3)
|
1043 |
+
0.8935769662791675215848871
|
1044 |
+
3.957678419314857868375677
|
1045 |
+
7.086051060301772697623625
|
1046 |
+
>>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3)
|
1047 |
+
2.197141326031017035149034
|
1048 |
+
5.429681040794135132772005
|
1049 |
+
8.596005868331168926429606
|
1050 |
+
>>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3)
|
1051 |
+
3.384241767149593472701426
|
1052 |
+
6.793807513268267538291167
|
1053 |
+
10.02347797936003797850539
|
1054 |
+
|
1055 |
+
Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`::
|
1056 |
+
|
1057 |
+
>>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1)
|
1058 |
+
2.197141326031017035149034
|
1059 |
+
5.429681040794135132772005
|
1060 |
+
8.596005868331168926429606
|
1061 |
+
>>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1)
|
1062 |
+
3.683022856585177699898967
|
1063 |
+
6.941499953654175655751944
|
1064 |
+
10.12340465543661307978775
|
1065 |
+
>>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1)
|
1066 |
+
5.002582931446063945200176
|
1067 |
+
8.350724701413079526349714
|
1068 |
+
11.57419546521764654624265
|
1069 |
+
|
1070 |
+
Zeros with large index::
|
1071 |
+
|
1072 |
+
>>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000)
|
1073 |
+
311.8034717601871549333419
|
1074 |
+
3139.236498918198006794026
|
1075 |
+
31413.57034538691205229188
|
1076 |
+
>>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000)
|
1077 |
+
319.6183338562782156235062
|
1078 |
+
3147.086508524556404473186
|
1079 |
+
31421.42392920214673402828
|
1080 |
+
>>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1)
|
1081 |
+
313.3726705426359345050449
|
1082 |
+
3140.807136030340213610065
|
1083 |
+
31415.14112579761578220175
|
1084 |
+
|
1085 |
+
Zeros of functions with large order::
|
1086 |
+
|
1087 |
+
>>> besselyzero(50,1)
|
1088 |
+
53.50285882040036394680237
|
1089 |
+
>>> besselyzero(50,2)
|
1090 |
+
60.11244442774058114686022
|
1091 |
+
>>> besselyzero(50,100)
|
1092 |
+
387.1096509824943957706835
|
1093 |
+
>>> besselyzero(50,1,1)
|
1094 |
+
56.96290427516751320063605
|
1095 |
+
>>> besselyzero(50,2,1)
|
1096 |
+
62.74888166945933944036623
|
1097 |
+
>>> besselyzero(50,100,1)
|
1098 |
+
388.6923300548309258355475
|
1099 |
+
|
1100 |
+
Zeros of functions with fractional order::
|
1101 |
+
|
1102 |
+
>>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4)
|
1103 |
+
1.570796326794896619231322
|
1104 |
+
2.798386045783887136720249
|
1105 |
+
13.56721208770735123376018
|
1106 |
+
|
1107 |
+
"""
|
1108 |
+
return +bessel_zero(ctx, 2, derivative, v, m)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/elliptic.py
ADDED
@@ -0,0 +1,1431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""
|
2 |
+
Elliptic functions historically comprise the elliptic integrals
|
3 |
+
and their inverses, and originate from the problem of computing the
|
4 |
+
arc length of an ellipse. From a more modern point of view,
|
5 |
+
an elliptic function is defined as a doubly periodic function, i.e.
|
6 |
+
a function which satisfies
|
7 |
+
|
8 |
+
.. math ::
|
9 |
+
|
10 |
+
f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
|
11 |
+
|
12 |
+
for some half-periods `\omega_1, \omega_2` with
|
13 |
+
`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
|
14 |
+
functions are the Jacobi elliptic functions. More broadly, this section
|
15 |
+
includes quasi-doubly periodic functions (such as the Jacobi theta
|
16 |
+
functions) and other functions useful in the study of elliptic functions.
|
17 |
+
|
18 |
+
Many different conventions for the arguments of
|
19 |
+
elliptic functions are in use. It is even standard to use
|
20 |
+
different parameterizations for different functions in the same
|
21 |
+
text or software (and mpmath is no exception).
|
22 |
+
The usual parameters are the elliptic nome `q`, which usually
|
23 |
+
must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
|
24 |
+
complex number); the elliptic modulus `k` (an arbitrary complex
|
25 |
+
number); and the half-period ratio `\tau`, which usually must
|
26 |
+
satisfy `\mathrm{Im}[\tau] > 0`.
|
27 |
+
These quantities can be expressed in terms of each other
|
28 |
+
using the following relations:
|
29 |
+
|
30 |
+
.. math ::
|
31 |
+
|
32 |
+
m = k^2
|
33 |
+
|
34 |
+
.. math ::
|
35 |
+
|
36 |
+
\tau = i \frac{K(1-m)}{K(m)}
|
37 |
+
|
38 |
+
.. math ::
|
39 |
+
|
40 |
+
q = e^{i \pi \tau}
|
41 |
+
|
42 |
+
.. math ::
|
43 |
+
|
44 |
+
k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)}
|
45 |
+
|
46 |
+
In addition, an alternative definition is used for the nome in
|
47 |
+
number theory, which we here denote by q-bar:
|
48 |
+
|
49 |
+
.. math ::
|
50 |
+
|
51 |
+
\bar{q} = q^2 = e^{2 i \pi \tau}
|
52 |
+
|
53 |
+
For convenience, mpmath provides functions to convert
|
54 |
+
between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
|
55 |
+
:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
|
56 |
+
|
57 |
+
**References**
|
58 |
+
|
59 |
+
1. [AbramowitzStegun]_
|
60 |
+
|
61 |
+
2. [WhittakerWatson]_
|
62 |
+
|
63 |
+
"""
|
64 |
+
|
65 |
+
from .functions import defun, defun_wrapped
|
66 |
+
|
67 |
+
@defun_wrapped
|
68 |
+
def eta(ctx, tau):
|
69 |
+
r"""
|
70 |
+
Returns the Dedekind eta function of tau in the upper half-plane.
|
71 |
+
|
72 |
+
>>> from mpmath import *
|
73 |
+
>>> mp.dps = 25; mp.pretty = True
|
74 |
+
>>> eta(1j); gamma(0.25) / (2*pi**0.75)
|
75 |
+
(0.7682254223260566590025942 + 0.0j)
|
76 |
+
0.7682254223260566590025942
|
77 |
+
>>> tau = sqrt(2) + sqrt(5)*1j
|
78 |
+
>>> eta(-1/tau); sqrt(-1j*tau) * eta(tau)
|
79 |
+
(0.9022859908439376463573294 + 0.07985093673948098408048575j)
|
80 |
+
(0.9022859908439376463573295 + 0.07985093673948098408048575j)
|
81 |
+
>>> eta(tau+1); exp(pi*1j/12) * eta(tau)
|
82 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
83 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
84 |
+
>>> f = lambda z: diff(eta, z) / eta(z)
|
85 |
+
>>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3))
|
86 |
+
0.0
|
87 |
+
|
88 |
+
"""
|
89 |
+
if ctx.im(tau) <= 0.0:
|
90 |
+
raise ValueError("eta is only defined in the upper half-plane")
|
91 |
+
q = ctx.expjpi(tau/12)
|
92 |
+
return q * ctx.qp(q**24)
|
93 |
+
|
94 |
+
def nome(ctx, m):
|
95 |
+
m = ctx.convert(m)
|
96 |
+
if not m:
|
97 |
+
return m
|
98 |
+
if m == ctx.one:
|
99 |
+
return m
|
100 |
+
if ctx.isnan(m):
|
101 |
+
return m
|
102 |
+
if ctx.isinf(m):
|
103 |
+
if m == ctx.ninf:
|
104 |
+
return type(m)(-1)
|
105 |
+
else:
|
106 |
+
return ctx.mpc(-1)
|
107 |
+
a = ctx.ellipk(ctx.one-m)
|
108 |
+
b = ctx.ellipk(m)
|
109 |
+
v = ctx.exp(-ctx.pi*a/b)
|
110 |
+
if not ctx._im(m) and ctx._re(m) < 1:
|
111 |
+
if ctx._is_real_type(m):
|
112 |
+
return v.real
|
113 |
+
else:
|
114 |
+
return v.real + 0j
|
115 |
+
elif m == 2:
|
116 |
+
v = ctx.mpc(0, v.imag)
|
117 |
+
return v
|
118 |
+
|
119 |
+
@defun_wrapped
|
120 |
+
def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
121 |
+
r"""
|
122 |
+
Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
|
123 |
+
|
124 |
+
>>> from mpmath import *
|
125 |
+
>>> mp.dps = 25; mp.pretty = True
|
126 |
+
>>> qfrom(q=0.25)
|
127 |
+
0.25
|
128 |
+
>>> qfrom(m=mfrom(q=0.25))
|
129 |
+
0.25
|
130 |
+
>>> qfrom(k=kfrom(q=0.25))
|
131 |
+
0.25
|
132 |
+
>>> qfrom(tau=taufrom(q=0.25))
|
133 |
+
(0.25 + 0.0j)
|
134 |
+
>>> qfrom(qbar=qbarfrom(q=0.25))
|
135 |
+
0.25
|
136 |
+
|
137 |
+
"""
|
138 |
+
if q is not None:
|
139 |
+
return ctx.convert(q)
|
140 |
+
if m is not None:
|
141 |
+
return nome(ctx, m)
|
142 |
+
if k is not None:
|
143 |
+
return nome(ctx, ctx.convert(k)**2)
|
144 |
+
if tau is not None:
|
145 |
+
return ctx.expjpi(tau)
|
146 |
+
if qbar is not None:
|
147 |
+
return ctx.sqrt(qbar)
|
148 |
+
|
149 |
+
@defun_wrapped
|
150 |
+
def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
151 |
+
r"""
|
152 |
+
Returns the number-theoretic nome `\bar q`, given any of
|
153 |
+
`q, m, k, \tau, \bar{q}`::
|
154 |
+
|
155 |
+
>>> from mpmath import *
|
156 |
+
>>> mp.dps = 25; mp.pretty = True
|
157 |
+
>>> qbarfrom(qbar=0.25)
|
158 |
+
0.25
|
159 |
+
>>> qbarfrom(q=qfrom(qbar=0.25))
|
160 |
+
0.25
|
161 |
+
>>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
|
162 |
+
0.25
|
163 |
+
>>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
|
164 |
+
0.25
|
165 |
+
>>> qbarfrom(tau=taufrom(qbar=0.25))
|
166 |
+
(0.25 + 0.0j)
|
167 |
+
|
168 |
+
"""
|
169 |
+
if qbar is not None:
|
170 |
+
return ctx.convert(qbar)
|
171 |
+
if q is not None:
|
172 |
+
return ctx.convert(q) ** 2
|
173 |
+
if m is not None:
|
174 |
+
return nome(ctx, m) ** 2
|
175 |
+
if k is not None:
|
176 |
+
return nome(ctx, ctx.convert(k)**2) ** 2
|
177 |
+
if tau is not None:
|
178 |
+
return ctx.expjpi(2*tau)
|
179 |
+
|
180 |
+
@defun_wrapped
|
181 |
+
def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
182 |
+
r"""
|
183 |
+
Returns the elliptic half-period ratio `\tau`, given any of
|
184 |
+
`q, m, k, \tau, \bar{q}`::
|
185 |
+
|
186 |
+
>>> from mpmath import *
|
187 |
+
>>> mp.dps = 25; mp.pretty = True
|
188 |
+
>>> taufrom(tau=0.5j)
|
189 |
+
(0.0 + 0.5j)
|
190 |
+
>>> taufrom(q=qfrom(tau=0.5j))
|
191 |
+
(0.0 + 0.5j)
|
192 |
+
>>> taufrom(m=mfrom(tau=0.5j))
|
193 |
+
(0.0 + 0.5j)
|
194 |
+
>>> taufrom(k=kfrom(tau=0.5j))
|
195 |
+
(0.0 + 0.5j)
|
196 |
+
>>> taufrom(qbar=qbarfrom(tau=0.5j))
|
197 |
+
(0.0 + 0.5j)
|
198 |
+
|
199 |
+
"""
|
200 |
+
if tau is not None:
|
201 |
+
return ctx.convert(tau)
|
202 |
+
if m is not None:
|
203 |
+
m = ctx.convert(m)
|
204 |
+
return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
|
205 |
+
if k is not None:
|
206 |
+
k = ctx.convert(k)
|
207 |
+
return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
|
208 |
+
if q is not None:
|
209 |
+
return ctx.log(q) / (ctx.pi*ctx.j)
|
210 |
+
if qbar is not None:
|
211 |
+
qbar = ctx.convert(qbar)
|
212 |
+
return ctx.log(qbar) / (2*ctx.pi*ctx.j)
|
213 |
+
|
214 |
+
@defun_wrapped
|
215 |
+
def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
216 |
+
r"""
|
217 |
+
Returns the elliptic modulus `k`, given any of
|
218 |
+
`q, m, k, \tau, \bar{q}`::
|
219 |
+
|
220 |
+
>>> from mpmath import *
|
221 |
+
>>> mp.dps = 25; mp.pretty = True
|
222 |
+
>>> kfrom(k=0.25)
|
223 |
+
0.25
|
224 |
+
>>> kfrom(m=mfrom(k=0.25))
|
225 |
+
0.25
|
226 |
+
>>> kfrom(q=qfrom(k=0.25))
|
227 |
+
0.25
|
228 |
+
>>> kfrom(tau=taufrom(k=0.25))
|
229 |
+
(0.25 + 0.0j)
|
230 |
+
>>> kfrom(qbar=qbarfrom(k=0.25))
|
231 |
+
0.25
|
232 |
+
|
233 |
+
As `q \to 1` and `q \to -1`, `k` rapidly approaches
|
234 |
+
`1` and `i \infty` respectively::
|
235 |
+
|
236 |
+
>>> kfrom(q=0.75)
|
237 |
+
0.9999999999999899166471767
|
238 |
+
>>> kfrom(q=-0.75)
|
239 |
+
(0.0 + 7041781.096692038332790615j)
|
240 |
+
>>> kfrom(q=1)
|
241 |
+
1
|
242 |
+
>>> kfrom(q=-1)
|
243 |
+
(0.0 + +infj)
|
244 |
+
"""
|
245 |
+
if k is not None:
|
246 |
+
return ctx.convert(k)
|
247 |
+
if m is not None:
|
248 |
+
return ctx.sqrt(m)
|
249 |
+
if tau is not None:
|
250 |
+
q = ctx.expjpi(tau)
|
251 |
+
if qbar is not None:
|
252 |
+
q = ctx.sqrt(qbar)
|
253 |
+
if q == 1:
|
254 |
+
return q
|
255 |
+
if q == -1:
|
256 |
+
return ctx.mpc(0,'inf')
|
257 |
+
return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
|
258 |
+
|
259 |
+
@defun_wrapped
|
260 |
+
def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
261 |
+
r"""
|
262 |
+
Returns the elliptic parameter `m`, given any of
|
263 |
+
`q, m, k, \tau, \bar{q}`::
|
264 |
+
|
265 |
+
>>> from mpmath import *
|
266 |
+
>>> mp.dps = 25; mp.pretty = True
|
267 |
+
>>> mfrom(m=0.25)
|
268 |
+
0.25
|
269 |
+
>>> mfrom(q=qfrom(m=0.25))
|
270 |
+
0.25
|
271 |
+
>>> mfrom(k=kfrom(m=0.25))
|
272 |
+
0.25
|
273 |
+
>>> mfrom(tau=taufrom(m=0.25))
|
274 |
+
(0.25 + 0.0j)
|
275 |
+
>>> mfrom(qbar=qbarfrom(m=0.25))
|
276 |
+
0.25
|
277 |
+
|
278 |
+
As `q \to 1` and `q \to -1`, `m` rapidly approaches
|
279 |
+
`1` and `-\infty` respectively::
|
280 |
+
|
281 |
+
>>> mfrom(q=0.75)
|
282 |
+
0.9999999999999798332943533
|
283 |
+
>>> mfrom(q=-0.75)
|
284 |
+
-49586681013729.32611558353
|
285 |
+
>>> mfrom(q=1)
|
286 |
+
1.0
|
287 |
+
>>> mfrom(q=-1)
|
288 |
+
-inf
|
289 |
+
|
290 |
+
The inverse nome as a function of `q` has an integer
|
291 |
+
Taylor series expansion::
|
292 |
+
|
293 |
+
>>> taylor(lambda q: mfrom(q), 0, 7)
|
294 |
+
[0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
|
295 |
+
|
296 |
+
"""
|
297 |
+
if m is not None:
|
298 |
+
return m
|
299 |
+
if k is not None:
|
300 |
+
return k**2
|
301 |
+
if tau is not None:
|
302 |
+
q = ctx.expjpi(tau)
|
303 |
+
if qbar is not None:
|
304 |
+
q = ctx.sqrt(qbar)
|
305 |
+
if q == 1:
|
306 |
+
return ctx.convert(q)
|
307 |
+
if q == -1:
|
308 |
+
return q*ctx.inf
|
309 |
+
v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
|
310 |
+
if ctx._is_real_type(q) and q < 0:
|
311 |
+
v = v.real
|
312 |
+
return v
|
313 |
+
|
314 |
+
jacobi_spec = {
|
315 |
+
'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
|
316 |
+
'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
|
317 |
+
'dn' : ([4],[3],[3],[4], '1', 'sech'),
|
318 |
+
'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
|
319 |
+
'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
|
320 |
+
'nd' : ([3],[4],[4],[3], '1', 'cosh'),
|
321 |
+
'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
|
322 |
+
'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
|
323 |
+
'cd' : ([3],[2],[2],[3], 'cos', '1'),
|
324 |
+
'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
|
325 |
+
'dc' : ([2],[3],[3],[2], 'sec', '1'),
|
326 |
+
'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
|
327 |
+
'cc' : None,
|
328 |
+
'ss' : None,
|
329 |
+
'nn' : None,
|
330 |
+
'dd' : None
|
331 |
+
}
|
332 |
+
|
333 |
+
@defun
|
334 |
+
def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
|
335 |
+
try:
|
336 |
+
S = jacobi_spec[kind]
|
337 |
+
except KeyError:
|
338 |
+
raise ValueError("First argument must be a two-character string "
|
339 |
+
"containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
|
340 |
+
if u is None:
|
341 |
+
def f(*args, **kwargs):
|
342 |
+
return ctx.ellipfun(kind, *args, **kwargs)
|
343 |
+
f.__name__ = kind
|
344 |
+
return f
|
345 |
+
prec = ctx.prec
|
346 |
+
try:
|
347 |
+
ctx.prec += 10
|
348 |
+
u = ctx.convert(u)
|
349 |
+
q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
|
350 |
+
if S is None:
|
351 |
+
v = ctx.one + 0*q*u
|
352 |
+
elif q == ctx.zero:
|
353 |
+
if S[4] == '1': v = ctx.one
|
354 |
+
else: v = getattr(ctx, S[4])(u)
|
355 |
+
v += 0*q*u
|
356 |
+
elif q == ctx.one:
|
357 |
+
if S[5] == '1': v = ctx.one
|
358 |
+
else: v = getattr(ctx, S[5])(u)
|
359 |
+
v += 0*q*u
|
360 |
+
else:
|
361 |
+
t = u / ctx.jtheta(3, 0, q)**2
|
362 |
+
v = ctx.one
|
363 |
+
for a in S[0]: v *= ctx.jtheta(a, 0, q)
|
364 |
+
for b in S[1]: v /= ctx.jtheta(b, 0, q)
|
365 |
+
for c in S[2]: v *= ctx.jtheta(c, t, q)
|
366 |
+
for d in S[3]: v /= ctx.jtheta(d, t, q)
|
367 |
+
finally:
|
368 |
+
ctx.prec = prec
|
369 |
+
return +v
|
370 |
+
|
371 |
+
@defun_wrapped
|
372 |
+
def kleinj(ctx, tau=None, **kwargs):
|
373 |
+
r"""
|
374 |
+
Evaluates the Klein j-invariant, which is a modular function defined for
|
375 |
+
`\tau` in the upper half-plane as
|
376 |
+
|
377 |
+
.. math ::
|
378 |
+
|
379 |
+
J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
|
380 |
+
|
381 |
+
where `g_2` and `g_3` are the modular invariants of the Weierstrass
|
382 |
+
elliptic function,
|
383 |
+
|
384 |
+
.. math ::
|
385 |
+
|
386 |
+
g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
|
387 |
+
|
388 |
+
g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
|
389 |
+
|
390 |
+
An alternative, common notation is that of the j-function
|
391 |
+
`j(\tau) = 1728 J(\tau)`.
|
392 |
+
|
393 |
+
**Plots**
|
394 |
+
|
395 |
+
.. literalinclude :: /plots/kleinj.py
|
396 |
+
.. image :: /plots/kleinj.png
|
397 |
+
.. literalinclude :: /plots/kleinj2.py
|
398 |
+
.. image :: /plots/kleinj2.png
|
399 |
+
|
400 |
+
**Examples**
|
401 |
+
|
402 |
+
Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
|
403 |
+
|
404 |
+
>>> from mpmath import *
|
405 |
+
>>> mp.dps = 25; mp.pretty = True
|
406 |
+
>>> tau = 0.625+0.75*j
|
407 |
+
>>> tau = 0.625+0.75*j
|
408 |
+
>>> kleinj(tau)
|
409 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
410 |
+
>>> kleinj(tau+1)
|
411 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
412 |
+
>>> kleinj(-1/tau)
|
413 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228946j)
|
414 |
+
|
415 |
+
The j-function has a famous Laurent series expansion in terms of the nome
|
416 |
+
`\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
|
417 |
+
|
418 |
+
>>> mp.dps = 15
|
419 |
+
>>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
|
420 |
+
[1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
|
421 |
+
|
422 |
+
The j-function admits exact evaluation at special algebraic points
|
423 |
+
related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
|
424 |
+
|
425 |
+
>>> @extraprec(10)
|
426 |
+
... def h(n):
|
427 |
+
... v = (1+sqrt(n)*j)
|
428 |
+
... if n > 2:
|
429 |
+
... v *= 0.5
|
430 |
+
... return v
|
431 |
+
...
|
432 |
+
>>> mp.dps = 25
|
433 |
+
>>> for n in [1,2,3,7,11,19,43,67,163]:
|
434 |
+
... n, chop(1728*kleinj(h(n)))
|
435 |
+
...
|
436 |
+
(1, 1728.0)
|
437 |
+
(2, 8000.0)
|
438 |
+
(3, 0.0)
|
439 |
+
(7, -3375.0)
|
440 |
+
(11, -32768.0)
|
441 |
+
(19, -884736.0)
|
442 |
+
(43, -884736000.0)
|
443 |
+
(67, -147197952000.0)
|
444 |
+
(163, -262537412640768000.0)
|
445 |
+
|
446 |
+
Also at other special points, the j-function assumes explicit
|
447 |
+
algebraic values, e.g.::
|
448 |
+
|
449 |
+
>>> chop(1728*kleinj(j*sqrt(5)))
|
450 |
+
1264538.909475140509320227
|
451 |
+
>>> identify(cbrt(_)) # note: not simplified
|
452 |
+
'((100+sqrt(13520))/2)'
|
453 |
+
>>> (50+26*sqrt(5))**3
|
454 |
+
1264538.909475140509320227
|
455 |
+
|
456 |
+
"""
|
457 |
+
q = ctx.qfrom(tau=tau, **kwargs)
|
458 |
+
t2 = ctx.jtheta(2,0,q)
|
459 |
+
t3 = ctx.jtheta(3,0,q)
|
460 |
+
t4 = ctx.jtheta(4,0,q)
|
461 |
+
P = (t2**8 + t3**8 + t4**8)**3
|
462 |
+
Q = 54*(t2*t3*t4)**8
|
463 |
+
return P/Q
|
464 |
+
|
465 |
+
|
466 |
+
def RF_calc(ctx, x, y, z, r):
|
467 |
+
if y == z: return RC_calc(ctx, x, y, r)
|
468 |
+
if x == z: return RC_calc(ctx, y, x, r)
|
469 |
+
if x == y: return RC_calc(ctx, z, x, r)
|
470 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
|
471 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
|
472 |
+
return x*y*z
|
473 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
|
474 |
+
return ctx.zero
|
475 |
+
xm,ym,zm = x,y,z
|
476 |
+
A0 = Am = (x+y+z)/3
|
477 |
+
Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
|
478 |
+
g = ctx.mpf(0.25)
|
479 |
+
pow4 = ctx.one
|
480 |
+
while 1:
|
481 |
+
xs = ctx.sqrt(xm)
|
482 |
+
ys = ctx.sqrt(ym)
|
483 |
+
zs = ctx.sqrt(zm)
|
484 |
+
lm = xs*ys + xs*zs + ys*zs
|
485 |
+
Am1 = (Am+lm)*g
|
486 |
+
xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
|
487 |
+
if pow4 * Q < abs(Am):
|
488 |
+
break
|
489 |
+
Am = Am1
|
490 |
+
pow4 *= g
|
491 |
+
t = pow4/Am
|
492 |
+
X = (A0-x)*t
|
493 |
+
Y = (A0-y)*t
|
494 |
+
Z = -X-Y
|
495 |
+
E2 = X*Y-Z**2
|
496 |
+
E3 = X*Y*Z
|
497 |
+
return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
|
498 |
+
|
499 |
+
def RC_calc(ctx, x, y, r, pv=True):
|
500 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y)):
|
501 |
+
if ctx.isinf(x) or ctx.isinf(y):
|
502 |
+
return 1/(x*y)
|
503 |
+
if y == 0:
|
504 |
+
return ctx.inf
|
505 |
+
if x == 0:
|
506 |
+
return ctx.pi / ctx.sqrt(y) / 2
|
507 |
+
raise ValueError
|
508 |
+
# Cauchy principal value
|
509 |
+
if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
|
510 |
+
return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
|
511 |
+
if x == y:
|
512 |
+
return 1/ctx.sqrt(x)
|
513 |
+
extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
|
514 |
+
ctx.prec += extraprec
|
515 |
+
if ctx._is_real_type(x) and ctx._is_real_type(y):
|
516 |
+
x = ctx._re(x)
|
517 |
+
y = ctx._re(y)
|
518 |
+
a = ctx.sqrt(x/y)
|
519 |
+
if x < y:
|
520 |
+
b = ctx.sqrt(y-x)
|
521 |
+
v = ctx.acos(a)/b
|
522 |
+
else:
|
523 |
+
b = ctx.sqrt(x-y)
|
524 |
+
v = ctx.acosh(a)/b
|
525 |
+
else:
|
526 |
+
sx = ctx.sqrt(x)
|
527 |
+
sy = ctx.sqrt(y)
|
528 |
+
v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
|
529 |
+
ctx.prec -= extraprec
|
530 |
+
return v
|
531 |
+
|
532 |
+
def RJ_calc(ctx, x, y, z, p, r, integration):
|
533 |
+
"""
|
534 |
+
With integration == 0, computes RJ only using Carlson's algorithm
|
535 |
+
(may be wrong for some values).
|
536 |
+
With integration == 1, uses an initial integration to make sure
|
537 |
+
Carlson's algorithm is correct.
|
538 |
+
With integration == 2, uses only integration.
|
539 |
+
"""
|
540 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and \
|
541 |
+
ctx.isnormal(z) and ctx.isnormal(p)):
|
542 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
|
543 |
+
return x*y*z
|
544 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
|
545 |
+
return ctx.zero
|
546 |
+
if not p:
|
547 |
+
return ctx.inf
|
548 |
+
if (not x) + (not y) + (not z) > 1:
|
549 |
+
return ctx.inf
|
550 |
+
# Check conditions and fall back on integration for argument
|
551 |
+
# reduction if needed. The following conditions might be needlessly
|
552 |
+
# restrictive.
|
553 |
+
initial_integral = ctx.zero
|
554 |
+
if integration >= 1:
|
555 |
+
ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0)
|
556 |
+
if not ok:
|
557 |
+
if x == p or y == p or z == p:
|
558 |
+
ok = True
|
559 |
+
if not ok:
|
560 |
+
if p.imag != 0 or p.real >= 0:
|
561 |
+
if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z):
|
562 |
+
ok = True
|
563 |
+
if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z):
|
564 |
+
ok = True
|
565 |
+
if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y):
|
566 |
+
ok = True
|
567 |
+
if not ok or (integration == 2):
|
568 |
+
N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1
|
569 |
+
# Integrate around any singularities
|
570 |
+
if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]):
|
571 |
+
margin = ctx.j
|
572 |
+
elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]):
|
573 |
+
margin = -ctx.j
|
574 |
+
else:
|
575 |
+
margin = 1
|
576 |
+
# Go through the upper half-plane, but low enough that any
|
577 |
+
# parameter starting in the lower plane doesn't cross the
|
578 |
+
# branch cut
|
579 |
+
for t in [x, y, z, p]:
|
580 |
+
if t.imag >= 0 or t.real > 0:
|
581 |
+
continue
|
582 |
+
margin = min(margin, abs(t.imag) * 0.5)
|
583 |
+
margin *= ctx.j
|
584 |
+
N += margin
|
585 |
+
F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p))
|
586 |
+
if integration == 2:
|
587 |
+
return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf])
|
588 |
+
initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N])
|
589 |
+
x += N; y += N; z += N; p += N
|
590 |
+
xm,ym,zm,pm = x,y,z,p
|
591 |
+
A0 = Am = (x + y + z + 2*p)/5
|
592 |
+
delta = (p-x)*(p-y)*(p-z)
|
593 |
+
Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
|
594 |
+
g = ctx.mpf(0.25)
|
595 |
+
pow4 = ctx.one
|
596 |
+
S = 0
|
597 |
+
while 1:
|
598 |
+
sx = ctx.sqrt(xm)
|
599 |
+
sy = ctx.sqrt(ym)
|
600 |
+
sz = ctx.sqrt(zm)
|
601 |
+
sp = ctx.sqrt(pm)
|
602 |
+
lm = sx*sy + sx*sz + sy*sz
|
603 |
+
Am1 = (Am+lm)*g
|
604 |
+
xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
|
605 |
+
dm = (sp+sx) * (sp+sy) * (sp+sz)
|
606 |
+
em = delta * pow4**3 / dm**2
|
607 |
+
if pow4 * Q < abs(Am):
|
608 |
+
break
|
609 |
+
T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
|
610 |
+
S += T
|
611 |
+
pow4 *= g
|
612 |
+
Am = Am1
|
613 |
+
t = pow4 / Am
|
614 |
+
X = (A0-x)*t
|
615 |
+
Y = (A0-y)*t
|
616 |
+
Z = (A0-z)*t
|
617 |
+
P = (-X-Y-Z)/2
|
618 |
+
E2 = X*Y + X*Z + Y*Z - 3*P**2
|
619 |
+
E3 = X*Y*Z + 2*E2*P + 4*P**3
|
620 |
+
E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
|
621 |
+
E5 = X*Y*Z*P**2
|
622 |
+
P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
|
623 |
+
Q = 24024
|
624 |
+
v1 = pow4 * ctx.power(Am, -1.5) * P/Q
|
625 |
+
v2 = 6*S
|
626 |
+
return initial_integral + v1 + v2
|
627 |
+
|
628 |
+
@defun
|
629 |
+
def elliprf(ctx, x, y, z):
|
630 |
+
r"""
|
631 |
+
Evaluates the Carlson symmetric elliptic integral of the first kind
|
632 |
+
|
633 |
+
.. math ::
|
634 |
+
|
635 |
+
R_F(x,y,z) = \frac{1}{2}
|
636 |
+
\int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
|
637 |
+
|
638 |
+
which is defined for `x,y,z \notin (-\infty,0)`, and with
|
639 |
+
at most one of `x,y,z` being zero.
|
640 |
+
|
641 |
+
For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
|
642 |
+
For complex `x,y,z`, the principal square root is taken as `t \to \infty`
|
643 |
+
and as `t \to 0` non-principal branches are chosen as necessary so as to
|
644 |
+
make the integrand continuous.
|
645 |
+
|
646 |
+
**Examples**
|
647 |
+
|
648 |
+
Some basic values and limits::
|
649 |
+
|
650 |
+
>>> from mpmath import *
|
651 |
+
>>> mp.dps = 25; mp.pretty = True
|
652 |
+
>>> elliprf(0,1,1); pi/2
|
653 |
+
1.570796326794896619231322
|
654 |
+
1.570796326794896619231322
|
655 |
+
>>> elliprf(0,1,inf)
|
656 |
+
0.0
|
657 |
+
>>> elliprf(1,1,1)
|
658 |
+
1.0
|
659 |
+
>>> elliprf(2,2,2)**2
|
660 |
+
0.5
|
661 |
+
>>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
|
662 |
+
+inf
|
663 |
+
+inf
|
664 |
+
+inf
|
665 |
+
+inf
|
666 |
+
|
667 |
+
Representing complete elliptic integrals in terms of `R_F`::
|
668 |
+
|
669 |
+
>>> m = mpf(0.75)
|
670 |
+
>>> ellipk(m); elliprf(0,1-m,1)
|
671 |
+
2.156515647499643235438675
|
672 |
+
2.156515647499643235438675
|
673 |
+
>>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
|
674 |
+
1.211056027568459524803563
|
675 |
+
1.211056027568459524803563
|
676 |
+
|
677 |
+
Some symmetries and argument transformations::
|
678 |
+
|
679 |
+
>>> x,y,z = 2,3,4
|
680 |
+
>>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
|
681 |
+
0.5840828416771517066928492
|
682 |
+
0.5840828416771517066928492
|
683 |
+
0.5840828416771517066928492
|
684 |
+
>>> k = mpf(100000)
|
685 |
+
>>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
|
686 |
+
0.001847032121923321253219284
|
687 |
+
0.001847032121923321253219284
|
688 |
+
>>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
|
689 |
+
>>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
|
690 |
+
0.5840828416771517066928492
|
691 |
+
0.5840828416771517066928492
|
692 |
+
>>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
|
693 |
+
0.5840828416771517066928492
|
694 |
+
|
695 |
+
Comparing with numerical integration::
|
696 |
+
|
697 |
+
>>> x,y,z = 2,3,4
|
698 |
+
>>> elliprf(x,y,z)
|
699 |
+
0.5840828416771517066928492
|
700 |
+
>>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
|
701 |
+
>>> q = extradps(25)(quad)
|
702 |
+
>>> q(f, [0,inf])
|
703 |
+
0.5840828416771517066928492
|
704 |
+
|
705 |
+
With the following arguments, the square root in the integrand becomes
|
706 |
+
discontinuous at `t = 1/2` if the principal branch is used. To obtain
|
707 |
+
the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
|
708 |
+
on `t \in (0, 1/2)`::
|
709 |
+
|
710 |
+
>>> x,y,z = j-1,j,0
|
711 |
+
>>> elliprf(x,y,z)
|
712 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
713 |
+
>>> -q(f, [0,0.5]) + q(f, [0.5,inf])
|
714 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
715 |
+
|
716 |
+
The so-called *first lemniscate constant*, a transcendental number::
|
717 |
+
|
718 |
+
>>> elliprf(0,1,2)
|
719 |
+
1.31102877714605990523242
|
720 |
+
>>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
|
721 |
+
1.31102877714605990523242
|
722 |
+
>>> gamma('1/4')**2/(4*sqrt(2*pi))
|
723 |
+
1.31102877714605990523242
|
724 |
+
|
725 |
+
**References**
|
726 |
+
|
727 |
+
1. [Carlson]_
|
728 |
+
2. [DLMF]_ Chapter 19. Elliptic Integrals
|
729 |
+
|
730 |
+
"""
|
731 |
+
x = ctx.convert(x)
|
732 |
+
y = ctx.convert(y)
|
733 |
+
z = ctx.convert(z)
|
734 |
+
prec = ctx.prec
|
735 |
+
try:
|
736 |
+
ctx.prec += 20
|
737 |
+
tol = ctx.eps * 2**10
|
738 |
+
v = RF_calc(ctx, x, y, z, tol)
|
739 |
+
finally:
|
740 |
+
ctx.prec = prec
|
741 |
+
return +v
|
742 |
+
|
743 |
+
@defun
|
744 |
+
def elliprc(ctx, x, y, pv=True):
|
745 |
+
r"""
|
746 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
747 |
+
of the first kind
|
748 |
+
|
749 |
+
.. math ::
|
750 |
+
|
751 |
+
R_C(x,y) = R_F(x,y,y) =
|
752 |
+
\frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
|
753 |
+
|
754 |
+
If `y \in (-\infty,0)`, either a value defined by continuity,
|
755 |
+
or with *pv=True* the Cauchy principal value, can be computed.
|
756 |
+
|
757 |
+
If `x \ge 0, y > 0`, the value can be expressed in terms of
|
758 |
+
elementary functions as
|
759 |
+
|
760 |
+
.. math ::
|
761 |
+
|
762 |
+
R_C(x,y) =
|
763 |
+
\begin{cases}
|
764 |
+
\dfrac{1}{\sqrt{y-x}}
|
765 |
+
\cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
|
766 |
+
\dfrac{1}{\sqrt{y}}, & x = y \\
|
767 |
+
\dfrac{1}{\sqrt{x-y}}
|
768 |
+
\cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
|
769 |
+
\end{cases}.
|
770 |
+
|
771 |
+
**Examples**
|
772 |
+
|
773 |
+
Some special values and limits::
|
774 |
+
|
775 |
+
>>> from mpmath import *
|
776 |
+
>>> mp.dps = 25; mp.pretty = True
|
777 |
+
>>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
|
778 |
+
3.141592653589793238462643
|
779 |
+
3.141592653589793238462643
|
780 |
+
3.141592653589793238462643
|
781 |
+
>>> elliprc(1,0)
|
782 |
+
+inf
|
783 |
+
>>> elliprc(5,5)**2
|
784 |
+
0.2
|
785 |
+
>>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
|
786 |
+
0.0
|
787 |
+
0.0
|
788 |
+
0.0
|
789 |
+
|
790 |
+
Comparing with the elementary closed-form solution::
|
791 |
+
|
792 |
+
>>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
|
793 |
+
2.041630778983498390751238
|
794 |
+
2.041630778983498390751238
|
795 |
+
>>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
|
796 |
+
1.875180765206547065111085
|
797 |
+
1.875180765206547065111085
|
798 |
+
|
799 |
+
Comparing with numerical integration::
|
800 |
+
|
801 |
+
>>> q = extradps(25)(quad)
|
802 |
+
>>> elliprc(2, -3, pv=True)
|
803 |
+
0.3333969101113672670749334
|
804 |
+
>>> elliprc(2, -3, pv=False)
|
805 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
806 |
+
>>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
|
807 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
808 |
+
|
809 |
+
"""
|
810 |
+
x = ctx.convert(x)
|
811 |
+
y = ctx.convert(y)
|
812 |
+
prec = ctx.prec
|
813 |
+
try:
|
814 |
+
ctx.prec += 20
|
815 |
+
tol = ctx.eps * 2**10
|
816 |
+
v = RC_calc(ctx, x, y, tol, pv)
|
817 |
+
finally:
|
818 |
+
ctx.prec = prec
|
819 |
+
return +v
|
820 |
+
|
821 |
+
@defun
|
822 |
+
def elliprj(ctx, x, y, z, p, integration=1):
|
823 |
+
r"""
|
824 |
+
Evaluates the Carlson symmetric elliptic integral of the third kind
|
825 |
+
|
826 |
+
.. math ::
|
827 |
+
|
828 |
+
R_J(x,y,z,p) = \frac{3}{2}
|
829 |
+
\int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
|
830 |
+
|
831 |
+
Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
|
832 |
+
is defined so as to be continuous along the path of integration for
|
833 |
+
complex values of the arguments.
|
834 |
+
|
835 |
+
**Examples**
|
836 |
+
|
837 |
+
Some values and limits::
|
838 |
+
|
839 |
+
>>> from mpmath import *
|
840 |
+
>>> mp.dps = 25; mp.pretty = True
|
841 |
+
>>> elliprj(1,1,1,1)
|
842 |
+
1.0
|
843 |
+
>>> elliprj(2,2,2,2); 1/(2*sqrt(2))
|
844 |
+
0.3535533905932737622004222
|
845 |
+
0.3535533905932737622004222
|
846 |
+
>>> elliprj(0,1,2,2)
|
847 |
+
1.067937989667395702268688
|
848 |
+
>>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
|
849 |
+
1.067937989667395702268688
|
850 |
+
>>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
|
851 |
+
1.380226776765915172432054
|
852 |
+
1.380226776765915172432054
|
853 |
+
>>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
|
854 |
+
+inf
|
855 |
+
+inf
|
856 |
+
+inf
|
857 |
+
>>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
|
858 |
+
0.0
|
859 |
+
0.0
|
860 |
+
>>> chop(elliprj(1+j, 1-j, 1, 1))
|
861 |
+
0.8505007163686739432927844
|
862 |
+
|
863 |
+
Scale transformation::
|
864 |
+
|
865 |
+
>>> x,y,z,p = 2,3,4,5
|
866 |
+
>>> k = mpf(100000)
|
867 |
+
>>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
|
868 |
+
4.521291677592745527851168e-9
|
869 |
+
4.521291677592745527851168e-9
|
870 |
+
|
871 |
+
Comparing with numerical integration::
|
872 |
+
|
873 |
+
>>> elliprj(1,2,3,4)
|
874 |
+
0.2398480997495677621758617
|
875 |
+
>>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
|
876 |
+
>>> 1.5*quad(f, [0,inf])
|
877 |
+
0.2398480997495677621758617
|
878 |
+
>>> elliprj(1,2+1j,3,4-2j)
|
879 |
+
(0.216888906014633498739952 + 0.04081912627366673332369512j)
|
880 |
+
>>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
|
881 |
+
>>> 1.5*quad(f, [0,inf])
|
882 |
+
(0.216888906014633498739952 + 0.04081912627366673332369511j)
|
883 |
+
|
884 |
+
"""
|
885 |
+
x = ctx.convert(x)
|
886 |
+
y = ctx.convert(y)
|
887 |
+
z = ctx.convert(z)
|
888 |
+
p = ctx.convert(p)
|
889 |
+
prec = ctx.prec
|
890 |
+
try:
|
891 |
+
ctx.prec += 20
|
892 |
+
tol = ctx.eps * 2**10
|
893 |
+
v = RJ_calc(ctx, x, y, z, p, tol, integration)
|
894 |
+
finally:
|
895 |
+
ctx.prec = prec
|
896 |
+
return +v
|
897 |
+
|
898 |
+
@defun
|
899 |
+
def elliprd(ctx, x, y, z):
|
900 |
+
r"""
|
901 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
902 |
+
of the third kind or Carlson elliptic integral of the
|
903 |
+
second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
|
904 |
+
|
905 |
+
See :func:`~mpmath.elliprj` for additional information.
|
906 |
+
|
907 |
+
**Examples**
|
908 |
+
|
909 |
+
>>> from mpmath import *
|
910 |
+
>>> mp.dps = 25; mp.pretty = True
|
911 |
+
>>> elliprd(1,2,3)
|
912 |
+
0.2904602810289906442326534
|
913 |
+
>>> elliprj(1,2,3,3)
|
914 |
+
0.2904602810289906442326534
|
915 |
+
|
916 |
+
The so-called *second lemniscate constant*, a transcendental number::
|
917 |
+
|
918 |
+
>>> elliprd(0,2,1)/3
|
919 |
+
0.5990701173677961037199612
|
920 |
+
>>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
|
921 |
+
0.5990701173677961037199612
|
922 |
+
>>> gamma('3/4')**2/sqrt(2*pi)
|
923 |
+
0.5990701173677961037199612
|
924 |
+
|
925 |
+
"""
|
926 |
+
return ctx.elliprj(x,y,z,z)
|
927 |
+
|
928 |
+
@defun
|
929 |
+
def elliprg(ctx, x, y, z):
|
930 |
+
r"""
|
931 |
+
Evaluates the Carlson completely symmetric elliptic integral
|
932 |
+
of the second kind
|
933 |
+
|
934 |
+
.. math ::
|
935 |
+
|
936 |
+
R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
|
937 |
+
\frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
|
938 |
+
\left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
|
939 |
+
|
940 |
+
**Examples**
|
941 |
+
|
942 |
+
Evaluation for real and complex arguments::
|
943 |
+
|
944 |
+
>>> from mpmath import *
|
945 |
+
>>> mp.dps = 25; mp.pretty = True
|
946 |
+
>>> elliprg(0,1,1)*4; +pi
|
947 |
+
3.141592653589793238462643
|
948 |
+
3.141592653589793238462643
|
949 |
+
>>> elliprg(0,0.5,1)
|
950 |
+
0.6753219405238377512600874
|
951 |
+
>>> chop(elliprg(1+j, 1-j, 2))
|
952 |
+
1.172431327676416604532822
|
953 |
+
|
954 |
+
A double integral that can be evaluated in terms of `R_G`::
|
955 |
+
|
956 |
+
>>> x,y,z = 2,3,4
|
957 |
+
>>> def f(t,u):
|
958 |
+
... st = fp.sin(t); ct = fp.cos(t)
|
959 |
+
... su = fp.sin(u); cu = fp.cos(u)
|
960 |
+
... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
|
961 |
+
...
|
962 |
+
>>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
|
963 |
+
1.725503028069
|
964 |
+
>>> nprint(elliprg(x,y,z), 13)
|
965 |
+
1.725503028069
|
966 |
+
|
967 |
+
"""
|
968 |
+
x = ctx.convert(x)
|
969 |
+
y = ctx.convert(y)
|
970 |
+
z = ctx.convert(z)
|
971 |
+
zeros = (not x) + (not y) + (not z)
|
972 |
+
if zeros == 3:
|
973 |
+
return (x+y+z)*0
|
974 |
+
if zeros == 2:
|
975 |
+
if x: return 0.5*ctx.sqrt(x)
|
976 |
+
if y: return 0.5*ctx.sqrt(y)
|
977 |
+
return 0.5*ctx.sqrt(z)
|
978 |
+
if zeros == 1:
|
979 |
+
if not z:
|
980 |
+
x, z = z, x
|
981 |
+
def terms():
|
982 |
+
T1 = 0.5*z*ctx.elliprf(x,y,z)
|
983 |
+
T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
|
984 |
+
T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z)
|
985 |
+
return T1,T2,T3
|
986 |
+
return ctx.sum_accurately(terms)
|
987 |
+
|
988 |
+
|
989 |
+
@defun_wrapped
|
990 |
+
def ellipf(ctx, phi, m):
|
991 |
+
r"""
|
992 |
+
Evaluates the Legendre incomplete elliptic integral of the first kind
|
993 |
+
|
994 |
+
.. math ::
|
995 |
+
|
996 |
+
F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
|
997 |
+
|
998 |
+
or equivalently
|
999 |
+
|
1000 |
+
.. math ::
|
1001 |
+
|
1002 |
+
F(\phi,m) = \int_0^{\sin \phi}
|
1003 |
+
\frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
|
1004 |
+
|
1005 |
+
The function reduces to a complete elliptic integral of the first kind
|
1006 |
+
(see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
|
1007 |
+
|
1008 |
+
.. math ::
|
1009 |
+
|
1010 |
+
F\left(\frac{\pi}{2}, m\right) = K(m).
|
1011 |
+
|
1012 |
+
In the defining integral, it is assumed that the principal branch
|
1013 |
+
of the square root is taken and that the path of integration avoids
|
1014 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
1015 |
+
the function extends quasi-periodically as
|
1016 |
+
|
1017 |
+
.. math ::
|
1018 |
+
|
1019 |
+
F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
|
1020 |
+
|
1021 |
+
**Plots**
|
1022 |
+
|
1023 |
+
.. literalinclude :: /plots/ellipf.py
|
1024 |
+
.. image :: /plots/ellipf.png
|
1025 |
+
|
1026 |
+
**Examples**
|
1027 |
+
|
1028 |
+
Basic values and limits::
|
1029 |
+
|
1030 |
+
>>> from mpmath import *
|
1031 |
+
>>> mp.dps = 25; mp.pretty = True
|
1032 |
+
>>> ellipf(0,1)
|
1033 |
+
0.0
|
1034 |
+
>>> ellipf(0,0)
|
1035 |
+
0.0
|
1036 |
+
>>> ellipf(1,0); ellipf(2+3j,0)
|
1037 |
+
1.0
|
1038 |
+
(2.0 + 3.0j)
|
1039 |
+
>>> ellipf(1,1); log(sec(1)+tan(1))
|
1040 |
+
1.226191170883517070813061
|
1041 |
+
1.226191170883517070813061
|
1042 |
+
>>> ellipf(pi/2, -0.5); ellipk(-0.5)
|
1043 |
+
1.415737208425956198892166
|
1044 |
+
1.415737208425956198892166
|
1045 |
+
>>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
|
1046 |
+
+inf
|
1047 |
+
+inf
|
1048 |
+
>>> ellipf(1.5, 1)
|
1049 |
+
3.340677542798311003320813
|
1050 |
+
|
1051 |
+
Comparing with numerical integration::
|
1052 |
+
|
1053 |
+
>>> z,m = 0.5, 1.25
|
1054 |
+
>>> ellipf(z,m)
|
1055 |
+
0.5287219202206327872978255
|
1056 |
+
>>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
|
1057 |
+
0.5287219202206327872978255
|
1058 |
+
|
1059 |
+
The arguments may be complex numbers::
|
1060 |
+
|
1061 |
+
>>> ellipf(3j, 0.5)
|
1062 |
+
(0.0 + 1.713602407841590234804143j)
|
1063 |
+
>>> ellipf(3+4j, 5-6j)
|
1064 |
+
(1.269131241950351323305741 - 0.3561052815014558335412538j)
|
1065 |
+
>>> z,m = 2+3j, 1.25
|
1066 |
+
>>> k = 1011
|
1067 |
+
>>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
|
1068 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
1069 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
1070 |
+
|
1071 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
1072 |
+
hypergeometric series of two variables
|
1073 |
+
(see :func:`~mpmath.appellf1`)::
|
1074 |
+
|
1075 |
+
>>> z,m = 0.5, 0.25
|
1076 |
+
>>> ellipf(z,m)
|
1077 |
+
0.5050887275786480788831083
|
1078 |
+
>>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
|
1079 |
+
0.5050887275786480788831083
|
1080 |
+
|
1081 |
+
"""
|
1082 |
+
z = phi
|
1083 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
1084 |
+
if m == 0:
|
1085 |
+
return z + m
|
1086 |
+
if z == 0:
|
1087 |
+
return z * m
|
1088 |
+
if m == ctx.inf or m == ctx.ninf: return z/m
|
1089 |
+
raise ValueError
|
1090 |
+
x = z.real
|
1091 |
+
ctx.prec += max(0, ctx.mag(x))
|
1092 |
+
pi = +ctx.pi
|
1093 |
+
away = abs(x) > pi/2
|
1094 |
+
if m == 1:
|
1095 |
+
if away:
|
1096 |
+
return ctx.inf
|
1097 |
+
if away:
|
1098 |
+
d = ctx.nint(x/pi)
|
1099 |
+
z = z-pi*d
|
1100 |
+
P = 2*d*ctx.ellipk(m)
|
1101 |
+
else:
|
1102 |
+
P = 0
|
1103 |
+
c, s = ctx.cos_sin(z)
|
1104 |
+
return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
|
1105 |
+
|
1106 |
+
@defun_wrapped
|
1107 |
+
def ellipe(ctx, *args):
|
1108 |
+
r"""
|
1109 |
+
Called with a single argument `m`, evaluates the Legendre complete
|
1110 |
+
elliptic integral of the second kind, `E(m)`, defined by
|
1111 |
+
|
1112 |
+
.. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
|
1113 |
+
\frac{\pi}{2}
|
1114 |
+
\,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
|
1115 |
+
|
1116 |
+
Called with two arguments `\phi, m`, evaluates the incomplete elliptic
|
1117 |
+
integral of the second kind
|
1118 |
+
|
1119 |
+
.. math ::
|
1120 |
+
|
1121 |
+
E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
|
1122 |
+
\int_0^{\sin z}
|
1123 |
+
\frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
|
1124 |
+
|
1125 |
+
The incomplete integral reduces to a complete integral when
|
1126 |
+
`\phi = \frac{\pi}{2}`; that is,
|
1127 |
+
|
1128 |
+
.. math ::
|
1129 |
+
|
1130 |
+
E\left(\frac{\pi}{2}, m\right) = E(m).
|
1131 |
+
|
1132 |
+
In the defining integral, it is assumed that the principal branch
|
1133 |
+
of the square root is taken and that the path of integration avoids
|
1134 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
|
1135 |
+
the function extends quasi-periodically as
|
1136 |
+
|
1137 |
+
.. math ::
|
1138 |
+
|
1139 |
+
E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}.
|
1140 |
+
|
1141 |
+
**Plots**
|
1142 |
+
|
1143 |
+
.. literalinclude :: /plots/ellipe.py
|
1144 |
+
.. image :: /plots/ellipe.png
|
1145 |
+
|
1146 |
+
**Examples for the complete integral**
|
1147 |
+
|
1148 |
+
Basic values and limits::
|
1149 |
+
|
1150 |
+
>>> from mpmath import *
|
1151 |
+
>>> mp.dps = 25; mp.pretty = True
|
1152 |
+
>>> ellipe(0)
|
1153 |
+
1.570796326794896619231322
|
1154 |
+
>>> ellipe(1)
|
1155 |
+
1.0
|
1156 |
+
>>> ellipe(-1)
|
1157 |
+
1.910098894513856008952381
|
1158 |
+
>>> ellipe(2)
|
1159 |
+
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
|
1160 |
+
>>> ellipe(inf)
|
1161 |
+
(0.0 + +infj)
|
1162 |
+
>>> ellipe(-inf)
|
1163 |
+
+inf
|
1164 |
+
|
1165 |
+
Verifying the defining integral and hypergeometric
|
1166 |
+
representation::
|
1167 |
+
|
1168 |
+
>>> ellipe(0.5)
|
1169 |
+
1.350643881047675502520175
|
1170 |
+
>>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
|
1171 |
+
1.350643881047675502520175
|
1172 |
+
>>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
|
1173 |
+
1.350643881047675502520175
|
1174 |
+
|
1175 |
+
Evaluation is supported for arbitrary complex `m`::
|
1176 |
+
|
1177 |
+
>>> ellipe(0.5+0.25j)
|
1178 |
+
(1.360868682163129682716687 - 0.1238733442561786843557315j)
|
1179 |
+
>>> ellipe(3+4j)
|
1180 |
+
(1.499553520933346954333612 - 1.577879007912758274533309j)
|
1181 |
+
|
1182 |
+
A definite integral::
|
1183 |
+
|
1184 |
+
>>> quad(ellipe, [0,1])
|
1185 |
+
1.333333333333333333333333
|
1186 |
+
|
1187 |
+
**Examples for the incomplete integral**
|
1188 |
+
|
1189 |
+
Basic values and limits::
|
1190 |
+
|
1191 |
+
>>> ellipe(0,1)
|
1192 |
+
0.0
|
1193 |
+
>>> ellipe(0,0)
|
1194 |
+
0.0
|
1195 |
+
>>> ellipe(1,0)
|
1196 |
+
1.0
|
1197 |
+
>>> ellipe(2+3j,0)
|
1198 |
+
(2.0 + 3.0j)
|
1199 |
+
>>> ellipe(1,1); sin(1)
|
1200 |
+
0.8414709848078965066525023
|
1201 |
+
0.8414709848078965066525023
|
1202 |
+
>>> ellipe(pi/2, -0.5); ellipe(-0.5)
|
1203 |
+
1.751771275694817862026502
|
1204 |
+
1.751771275694817862026502
|
1205 |
+
>>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
|
1206 |
+
1.0
|
1207 |
+
-1.0
|
1208 |
+
>>> ellipe(1.5, 1)
|
1209 |
+
0.9974949866040544309417234
|
1210 |
+
|
1211 |
+
Comparing with numerical integration::
|
1212 |
+
|
1213 |
+
>>> z,m = 0.5, 1.25
|
1214 |
+
>>> ellipe(z,m)
|
1215 |
+
0.4740152182652628394264449
|
1216 |
+
>>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
|
1217 |
+
0.4740152182652628394264449
|
1218 |
+
|
1219 |
+
The arguments may be complex numbers::
|
1220 |
+
|
1221 |
+
>>> ellipe(3j, 0.5)
|
1222 |
+
(0.0 + 7.551991234890371873502105j)
|
1223 |
+
>>> ellipe(3+4j, 5-6j)
|
1224 |
+
(24.15299022574220502424466 + 75.2503670480325997418156j)
|
1225 |
+
>>> k = 35
|
1226 |
+
>>> z,m = 2+3j, 1.25
|
1227 |
+
>>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
|
1228 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
1229 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
1230 |
+
|
1231 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
1232 |
+
hypergeometric series of two variables
|
1233 |
+
(see :func:`~mpmath.appellf1`)::
|
1234 |
+
|
1235 |
+
>>> z,m = 0.5, 0.25
|
1236 |
+
>>> ellipe(z,m)
|
1237 |
+
0.4950017030164151928870375
|
1238 |
+
>>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
|
1239 |
+
0.4950017030164151928870376
|
1240 |
+
|
1241 |
+
"""
|
1242 |
+
if len(args) == 1:
|
1243 |
+
return ctx._ellipe(args[0])
|
1244 |
+
else:
|
1245 |
+
phi, m = args
|
1246 |
+
z = phi
|
1247 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
1248 |
+
if m == 0:
|
1249 |
+
return z + m
|
1250 |
+
if z == 0:
|
1251 |
+
return z * m
|
1252 |
+
if m == ctx.inf or m == ctx.ninf:
|
1253 |
+
return ctx.inf
|
1254 |
+
raise ValueError
|
1255 |
+
x = z.real
|
1256 |
+
ctx.prec += max(0, ctx.mag(x))
|
1257 |
+
pi = +ctx.pi
|
1258 |
+
away = abs(x) > pi/2
|
1259 |
+
if away:
|
1260 |
+
d = ctx.nint(x/pi)
|
1261 |
+
z = z-pi*d
|
1262 |
+
P = 2*d*ctx.ellipe(m)
|
1263 |
+
else:
|
1264 |
+
P = 0
|
1265 |
+
def terms():
|
1266 |
+
c, s = ctx.cos_sin(z)
|
1267 |
+
x = c**2
|
1268 |
+
y = 1-m*s**2
|
1269 |
+
RF = ctx.elliprf(x, y, 1)
|
1270 |
+
RD = ctx.elliprd(x, y, 1)
|
1271 |
+
return s*RF, -m*s**3*RD/3
|
1272 |
+
return ctx.sum_accurately(terms) + P
|
1273 |
+
|
1274 |
+
@defun_wrapped
|
1275 |
+
def ellippi(ctx, *args):
|
1276 |
+
r"""
|
1277 |
+
Called with three arguments `n, \phi, m`, evaluates the Legendre
|
1278 |
+
incomplete elliptic integral of the third kind
|
1279 |
+
|
1280 |
+
.. math ::
|
1281 |
+
|
1282 |
+
\Pi(n; \phi, m) = \int_0^{\phi}
|
1283 |
+
\frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
|
1284 |
+
\int_0^{\sin \phi}
|
1285 |
+
\frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
|
1286 |
+
|
1287 |
+
Called with two arguments `n, m`, evaluates the complete
|
1288 |
+
elliptic integral of the third kind
|
1289 |
+
`\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
|
1290 |
+
|
1291 |
+
In the defining integral, it is assumed that the principal branch
|
1292 |
+
of the square root is taken and that the path of integration avoids
|
1293 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
1294 |
+
the function extends quasi-periodically as
|
1295 |
+
|
1296 |
+
.. math ::
|
1297 |
+
|
1298 |
+
\Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
|
1299 |
+
|
1300 |
+
**Plots**
|
1301 |
+
|
1302 |
+
.. literalinclude :: /plots/ellippi.py
|
1303 |
+
.. image :: /plots/ellippi.png
|
1304 |
+
|
1305 |
+
**Examples for the complete integral**
|
1306 |
+
|
1307 |
+
Some basic values and limits::
|
1308 |
+
|
1309 |
+
>>> from mpmath import *
|
1310 |
+
>>> mp.dps = 25; mp.pretty = True
|
1311 |
+
>>> ellippi(0,-5); ellipk(-5)
|
1312 |
+
0.9555039270640439337379334
|
1313 |
+
0.9555039270640439337379334
|
1314 |
+
>>> ellippi(inf,2)
|
1315 |
+
0.0
|
1316 |
+
>>> ellippi(2,inf)
|
1317 |
+
0.0
|
1318 |
+
>>> abs(ellippi(1,5))
|
1319 |
+
+inf
|
1320 |
+
>>> abs(ellippi(0.25,1))
|
1321 |
+
+inf
|
1322 |
+
|
1323 |
+
Evaluation in terms of simpler functions::
|
1324 |
+
|
1325 |
+
>>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
|
1326 |
+
1.956616279119236207279727
|
1327 |
+
1.956616279119236207279727
|
1328 |
+
>>> ellippi(3,0); pi/(2*sqrt(-2))
|
1329 |
+
(0.0 - 1.11072073453959156175397j)
|
1330 |
+
(0.0 - 1.11072073453959156175397j)
|
1331 |
+
>>> ellippi(-3,0); pi/(2*sqrt(4))
|
1332 |
+
0.7853981633974483096156609
|
1333 |
+
0.7853981633974483096156609
|
1334 |
+
|
1335 |
+
**Examples for the incomplete integral**
|
1336 |
+
|
1337 |
+
Basic values and limits::
|
1338 |
+
|
1339 |
+
>>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
|
1340 |
+
1.622944760954741603710555
|
1341 |
+
1.622944760954741603710555
|
1342 |
+
>>> ellippi(1,0,1)
|
1343 |
+
0.0
|
1344 |
+
>>> ellippi(inf,0,1)
|
1345 |
+
0.0
|
1346 |
+
>>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
|
1347 |
+
0.2513040086544925794134591
|
1348 |
+
0.2513040086544925794134591
|
1349 |
+
>>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
|
1350 |
+
2.054332933256248668692452
|
1351 |
+
2.054332933256248668692452
|
1352 |
+
>>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
|
1353 |
+
135.240868757890840755058
|
1354 |
+
135.240868757890840755058
|
1355 |
+
>>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
|
1356 |
+
0.9190227391656969903987269
|
1357 |
+
0.9190227391656969903987269
|
1358 |
+
|
1359 |
+
Complex arguments are supported::
|
1360 |
+
|
1361 |
+
>>> ellippi(0.5, 5+6j-2*pi, -7-8j)
|
1362 |
+
(-0.3612856620076747660410167 + 0.5217735339984807829755815j)
|
1363 |
+
|
1364 |
+
Some degenerate cases::
|
1365 |
+
|
1366 |
+
>>> ellippi(1,1)
|
1367 |
+
+inf
|
1368 |
+
>>> ellippi(1,0)
|
1369 |
+
+inf
|
1370 |
+
>>> ellippi(1,2,0)
|
1371 |
+
+inf
|
1372 |
+
>>> ellippi(1,2,1)
|
1373 |
+
+inf
|
1374 |
+
>>> ellippi(1,0,1)
|
1375 |
+
0.0
|
1376 |
+
|
1377 |
+
"""
|
1378 |
+
if len(args) == 2:
|
1379 |
+
n, m = args
|
1380 |
+
complete = True
|
1381 |
+
z = phi = ctx.pi/2
|
1382 |
+
else:
|
1383 |
+
n, phi, m = args
|
1384 |
+
complete = False
|
1385 |
+
z = phi
|
1386 |
+
if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
|
1387 |
+
if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
|
1388 |
+
raise ValueError
|
1389 |
+
if complete:
|
1390 |
+
if m == 0:
|
1391 |
+
if n == 1:
|
1392 |
+
return ctx.inf
|
1393 |
+
return ctx.pi/(2*ctx.sqrt(1-n))
|
1394 |
+
if n == 0: return ctx.ellipk(m)
|
1395 |
+
if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
|
1396 |
+
else:
|
1397 |
+
if z == 0: return z
|
1398 |
+
if ctx.isinf(n): return ctx.zero
|
1399 |
+
if ctx.isinf(m): return ctx.zero
|
1400 |
+
if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
|
1401 |
+
raise ValueError
|
1402 |
+
if complete:
|
1403 |
+
if m == 1:
|
1404 |
+
if n == 1:
|
1405 |
+
return ctx.inf
|
1406 |
+
return -ctx.inf/ctx.sign(n-1)
|
1407 |
+
away = False
|
1408 |
+
else:
|
1409 |
+
x = z.real
|
1410 |
+
ctx.prec += max(0, ctx.mag(x))
|
1411 |
+
pi = +ctx.pi
|
1412 |
+
away = abs(x) > pi/2
|
1413 |
+
if away:
|
1414 |
+
d = ctx.nint(x/pi)
|
1415 |
+
z = z-pi*d
|
1416 |
+
P = 2*d*ctx.ellippi(n,m)
|
1417 |
+
if ctx.isinf(P):
|
1418 |
+
return ctx.inf
|
1419 |
+
else:
|
1420 |
+
P = 0
|
1421 |
+
def terms():
|
1422 |
+
if complete:
|
1423 |
+
c, s = ctx.zero, ctx.one
|
1424 |
+
else:
|
1425 |
+
c, s = ctx.cos_sin(z)
|
1426 |
+
x = c**2
|
1427 |
+
y = 1-m*s**2
|
1428 |
+
RF = ctx.elliprf(x, y, 1)
|
1429 |
+
RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
|
1430 |
+
return s*RF, n*s**3*RJ/3
|
1431 |
+
return ctx.sum_accurately(terms) + P
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/expintegrals.py
ADDED
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun, defun_wrapped
|
2 |
+
|
3 |
+
@defun_wrapped
|
4 |
+
def _erf_complex(ctx, z):
|
5 |
+
z2 = ctx.square_exp_arg(z, -1)
|
6 |
+
#z2 = -z**2
|
7 |
+
v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
|
8 |
+
if not ctx._re(z):
|
9 |
+
v = ctx._im(v)*ctx.j
|
10 |
+
return v
|
11 |
+
|
12 |
+
@defun_wrapped
|
13 |
+
def _erfc_complex(ctx, z):
|
14 |
+
if ctx.re(z) > 2:
|
15 |
+
z2 = ctx.square_exp_arg(z)
|
16 |
+
nz2 = ctx.fneg(z2, exact=True)
|
17 |
+
v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
|
18 |
+
else:
|
19 |
+
v = 1 - ctx._erf_complex(z)
|
20 |
+
if not ctx._re(z):
|
21 |
+
v = 1+ctx._im(v)*ctx.j
|
22 |
+
return v
|
23 |
+
|
24 |
+
@defun
|
25 |
+
def erf(ctx, z):
|
26 |
+
z = ctx.convert(z)
|
27 |
+
if ctx._is_real_type(z):
|
28 |
+
try:
|
29 |
+
return ctx._erf(z)
|
30 |
+
except NotImplementedError:
|
31 |
+
pass
|
32 |
+
if ctx._is_complex_type(z) and not z.imag:
|
33 |
+
try:
|
34 |
+
return type(z)(ctx._erf(z.real))
|
35 |
+
except NotImplementedError:
|
36 |
+
pass
|
37 |
+
return ctx._erf_complex(z)
|
38 |
+
|
39 |
+
@defun
|
40 |
+
def erfc(ctx, z):
|
41 |
+
z = ctx.convert(z)
|
42 |
+
if ctx._is_real_type(z):
|
43 |
+
try:
|
44 |
+
return ctx._erfc(z)
|
45 |
+
except NotImplementedError:
|
46 |
+
pass
|
47 |
+
if ctx._is_complex_type(z) and not z.imag:
|
48 |
+
try:
|
49 |
+
return type(z)(ctx._erfc(z.real))
|
50 |
+
except NotImplementedError:
|
51 |
+
pass
|
52 |
+
return ctx._erfc_complex(z)
|
53 |
+
|
54 |
+
@defun
|
55 |
+
def square_exp_arg(ctx, z, mult=1, reciprocal=False):
|
56 |
+
prec = ctx.prec*4+20
|
57 |
+
if reciprocal:
|
58 |
+
z2 = ctx.fmul(z, z, prec=prec)
|
59 |
+
z2 = ctx.fdiv(ctx.one, z2, prec=prec)
|
60 |
+
else:
|
61 |
+
z2 = ctx.fmul(z, z, prec=prec)
|
62 |
+
if mult != 1:
|
63 |
+
z2 = ctx.fmul(z2, mult, exact=True)
|
64 |
+
return z2
|
65 |
+
|
66 |
+
@defun_wrapped
|
67 |
+
def erfi(ctx, z):
|
68 |
+
if not z:
|
69 |
+
return z
|
70 |
+
z2 = ctx.square_exp_arg(z)
|
71 |
+
v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
|
72 |
+
if not ctx._re(z):
|
73 |
+
v = ctx._im(v)*ctx.j
|
74 |
+
return v
|
75 |
+
|
76 |
+
@defun_wrapped
|
77 |
+
def erfinv(ctx, x):
|
78 |
+
xre = ctx._re(x)
|
79 |
+
if (xre != x) or (xre < -1) or (xre > 1):
|
80 |
+
return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
|
81 |
+
x = xre
|
82 |
+
#if ctx.isnan(x): return x
|
83 |
+
if not x: return x
|
84 |
+
if x == 1: return ctx.inf
|
85 |
+
if x == -1: return ctx.ninf
|
86 |
+
if abs(x) < 0.9:
|
87 |
+
a = 0.53728*x**3 + 0.813198*x
|
88 |
+
else:
|
89 |
+
# An asymptotic formula
|
90 |
+
u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
|
91 |
+
a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
|
92 |
+
ctx.prec += 10
|
93 |
+
return ctx.findroot(lambda t: ctx.erf(t)-x, a)
|
94 |
+
|
95 |
+
@defun_wrapped
|
96 |
+
def npdf(ctx, x, mu=0, sigma=1):
|
97 |
+
sigma = ctx.convert(sigma)
|
98 |
+
return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
|
99 |
+
|
100 |
+
@defun_wrapped
|
101 |
+
def ncdf(ctx, x, mu=0, sigma=1):
|
102 |
+
a = (x-mu)/(sigma*ctx.sqrt(2))
|
103 |
+
if a < 0:
|
104 |
+
return ctx.erfc(-a)/2
|
105 |
+
else:
|
106 |
+
return (1+ctx.erf(a))/2
|
107 |
+
|
108 |
+
@defun_wrapped
|
109 |
+
def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
|
110 |
+
if x1 == x2:
|
111 |
+
v = 0
|
112 |
+
elif not x1:
|
113 |
+
if x1 == 0 and x2 == 1:
|
114 |
+
v = ctx.beta(a, b)
|
115 |
+
else:
|
116 |
+
v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
|
117 |
+
else:
|
118 |
+
m, d = ctx.nint_distance(a)
|
119 |
+
if m <= 0:
|
120 |
+
if d < -ctx.prec:
|
121 |
+
h = +ctx.eps
|
122 |
+
ctx.prec *= 2
|
123 |
+
a += h
|
124 |
+
elif d < -4:
|
125 |
+
ctx.prec -= d
|
126 |
+
s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
|
127 |
+
s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
|
128 |
+
v = (s1 - s2) / a
|
129 |
+
if regularized:
|
130 |
+
v /= ctx.beta(a,b)
|
131 |
+
return v
|
132 |
+
|
133 |
+
@defun
|
134 |
+
def gammainc(ctx, z, a=0, b=None, regularized=False):
|
135 |
+
regularized = bool(regularized)
|
136 |
+
z = ctx.convert(z)
|
137 |
+
if a is None:
|
138 |
+
a = ctx.zero
|
139 |
+
lower_modified = False
|
140 |
+
else:
|
141 |
+
a = ctx.convert(a)
|
142 |
+
lower_modified = a != ctx.zero
|
143 |
+
if b is None:
|
144 |
+
b = ctx.inf
|
145 |
+
upper_modified = False
|
146 |
+
else:
|
147 |
+
b = ctx.convert(b)
|
148 |
+
upper_modified = b != ctx.inf
|
149 |
+
# Complete gamma function
|
150 |
+
if not (upper_modified or lower_modified):
|
151 |
+
if regularized:
|
152 |
+
if ctx.re(z) < 0:
|
153 |
+
return ctx.inf
|
154 |
+
elif ctx.re(z) > 0:
|
155 |
+
return ctx.one
|
156 |
+
else:
|
157 |
+
return ctx.nan
|
158 |
+
return ctx.gamma(z)
|
159 |
+
if a == b:
|
160 |
+
return ctx.zero
|
161 |
+
# Standardize
|
162 |
+
if ctx.re(a) > ctx.re(b):
|
163 |
+
return -ctx.gammainc(z, b, a, regularized)
|
164 |
+
# Generalized gamma
|
165 |
+
if upper_modified and lower_modified:
|
166 |
+
return +ctx._gamma3(z, a, b, regularized)
|
167 |
+
# Upper gamma
|
168 |
+
elif lower_modified:
|
169 |
+
return ctx._upper_gamma(z, a, regularized)
|
170 |
+
# Lower gamma
|
171 |
+
elif upper_modified:
|
172 |
+
return ctx._lower_gamma(z, b, regularized)
|
173 |
+
|
174 |
+
@defun
|
175 |
+
def _lower_gamma(ctx, z, b, regularized=False):
|
176 |
+
# Pole
|
177 |
+
if ctx.isnpint(z):
|
178 |
+
return type(z)(ctx.inf)
|
179 |
+
G = [z] * regularized
|
180 |
+
negb = ctx.fneg(b, exact=True)
|
181 |
+
def h(z):
|
182 |
+
T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
|
183 |
+
return (T1,)
|
184 |
+
return ctx.hypercomb(h, [z])
|
185 |
+
|
186 |
+
@defun
|
187 |
+
def _upper_gamma(ctx, z, a, regularized=False):
|
188 |
+
# Fast integer case, when available
|
189 |
+
if ctx.isint(z):
|
190 |
+
try:
|
191 |
+
if regularized:
|
192 |
+
# Gamma pole
|
193 |
+
if ctx.isnpint(z):
|
194 |
+
return type(z)(ctx.zero)
|
195 |
+
orig = ctx.prec
|
196 |
+
try:
|
197 |
+
ctx.prec += 10
|
198 |
+
return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
|
199 |
+
finally:
|
200 |
+
ctx.prec = orig
|
201 |
+
else:
|
202 |
+
return ctx._gamma_upper_int(z, a)
|
203 |
+
except NotImplementedError:
|
204 |
+
pass
|
205 |
+
# hypercomb is unable to detect the exact zeros, so handle them here
|
206 |
+
if z == 2 and a == -1:
|
207 |
+
return (z+a)*0
|
208 |
+
if z == 3 and (a == -1-1j or a == -1+1j):
|
209 |
+
return (z+a)*0
|
210 |
+
nega = ctx.fneg(a, exact=True)
|
211 |
+
G = [z] * regularized
|
212 |
+
# Use 2F0 series when possible; fall back to lower gamma representation
|
213 |
+
try:
|
214 |
+
def h(z):
|
215 |
+
r = z-1
|
216 |
+
return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
|
217 |
+
return ctx.hypercomb(h, [z], force_series=True)
|
218 |
+
except ctx.NoConvergence:
|
219 |
+
def h(z):
|
220 |
+
T1 = [], [1, z-1], [z], G, [], [], 0
|
221 |
+
T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
|
222 |
+
return T1, T2
|
223 |
+
return ctx.hypercomb(h, [z])
|
224 |
+
|
225 |
+
@defun
|
226 |
+
def _gamma3(ctx, z, a, b, regularized=False):
|
227 |
+
pole = ctx.isnpint(z)
|
228 |
+
if regularized and pole:
|
229 |
+
return ctx.zero
|
230 |
+
try:
|
231 |
+
ctx.prec += 15
|
232 |
+
# We don't know in advance whether it's better to write as a difference
|
233 |
+
# of lower or upper gamma functions, so try both
|
234 |
+
T1 = ctx.gammainc(z, a, regularized=regularized)
|
235 |
+
T2 = ctx.gammainc(z, b, regularized=regularized)
|
236 |
+
R = T1 - T2
|
237 |
+
if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
|
238 |
+
return R
|
239 |
+
if not pole:
|
240 |
+
T1 = ctx.gammainc(z, 0, b, regularized=regularized)
|
241 |
+
T2 = ctx.gammainc(z, 0, a, regularized=regularized)
|
242 |
+
R = T1 - T2
|
243 |
+
# May be ok, but should probably at least print a warning
|
244 |
+
# about possible cancellation
|
245 |
+
if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
|
246 |
+
return R
|
247 |
+
finally:
|
248 |
+
ctx.prec -= 15
|
249 |
+
raise NotImplementedError
|
250 |
+
|
251 |
+
@defun_wrapped
|
252 |
+
def expint(ctx, n, z):
|
253 |
+
if ctx.isint(n) and ctx._is_real_type(z):
|
254 |
+
try:
|
255 |
+
return ctx._expint_int(n, z)
|
256 |
+
except NotImplementedError:
|
257 |
+
pass
|
258 |
+
if ctx.isnan(n) or ctx.isnan(z):
|
259 |
+
return z*n
|
260 |
+
if z == ctx.inf:
|
261 |
+
return 1/z
|
262 |
+
if z == 0:
|
263 |
+
# integral from 1 to infinity of t^n
|
264 |
+
if ctx.re(n) <= 1:
|
265 |
+
# TODO: reasonable sign of infinity
|
266 |
+
return type(z)(ctx.inf)
|
267 |
+
else:
|
268 |
+
return ctx.one/(n-1)
|
269 |
+
if n == 0:
|
270 |
+
return ctx.exp(-z)/z
|
271 |
+
if n == -1:
|
272 |
+
return ctx.exp(-z)*(z+1)/z**2
|
273 |
+
return z**(n-1) * ctx.gammainc(1-n, z)
|
274 |
+
|
275 |
+
@defun_wrapped
|
276 |
+
def li(ctx, z, offset=False):
|
277 |
+
if offset:
|
278 |
+
if z == 2:
|
279 |
+
return ctx.zero
|
280 |
+
return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
|
281 |
+
if not z:
|
282 |
+
return z
|
283 |
+
if z == 1:
|
284 |
+
return ctx.ninf
|
285 |
+
return ctx.ei(ctx.ln(z))
|
286 |
+
|
287 |
+
@defun
|
288 |
+
def ei(ctx, z):
|
289 |
+
try:
|
290 |
+
return ctx._ei(z)
|
291 |
+
except NotImplementedError:
|
292 |
+
return ctx._ei_generic(z)
|
293 |
+
|
294 |
+
@defun_wrapped
|
295 |
+
def _ei_generic(ctx, z):
|
296 |
+
# Note: the following is currently untested because mp and fp
|
297 |
+
# both use special-case ei code
|
298 |
+
if z == ctx.inf:
|
299 |
+
return z
|
300 |
+
if z == ctx.ninf:
|
301 |
+
return ctx.zero
|
302 |
+
if ctx.mag(z) > 1:
|
303 |
+
try:
|
304 |
+
r = ctx.one/z
|
305 |
+
v = ctx.exp(z)*ctx.hyper([1,1],[],r,
|
306 |
+
maxterms=ctx.prec, force_series=True)/z
|
307 |
+
im = ctx._im(z)
|
308 |
+
if im > 0:
|
309 |
+
v += ctx.pi*ctx.j
|
310 |
+
if im < 0:
|
311 |
+
v -= ctx.pi*ctx.j
|
312 |
+
return v
|
313 |
+
except ctx.NoConvergence:
|
314 |
+
pass
|
315 |
+
v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
|
316 |
+
if ctx._im(z):
|
317 |
+
v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
|
318 |
+
else:
|
319 |
+
v += ctx.log(abs(z))
|
320 |
+
return v
|
321 |
+
|
322 |
+
@defun
|
323 |
+
def e1(ctx, z):
|
324 |
+
try:
|
325 |
+
return ctx._e1(z)
|
326 |
+
except NotImplementedError:
|
327 |
+
return ctx.expint(1, z)
|
328 |
+
|
329 |
+
@defun
|
330 |
+
def ci(ctx, z):
|
331 |
+
try:
|
332 |
+
return ctx._ci(z)
|
333 |
+
except NotImplementedError:
|
334 |
+
return ctx._ci_generic(z)
|
335 |
+
|
336 |
+
@defun_wrapped
|
337 |
+
def _ci_generic(ctx, z):
|
338 |
+
if ctx.isinf(z):
|
339 |
+
if z == ctx.inf: return ctx.zero
|
340 |
+
if z == ctx.ninf: return ctx.pi*1j
|
341 |
+
jz = ctx.fmul(ctx.j,z,exact=True)
|
342 |
+
njz = ctx.fneg(jz,exact=True)
|
343 |
+
v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
|
344 |
+
zreal = ctx._re(z)
|
345 |
+
zimag = ctx._im(z)
|
346 |
+
if zreal == 0:
|
347 |
+
if zimag > 0: v += ctx.pi*0.5j
|
348 |
+
if zimag < 0: v -= ctx.pi*0.5j
|
349 |
+
if zreal < 0:
|
350 |
+
if zimag >= 0: v += ctx.pi*1j
|
351 |
+
if zimag < 0: v -= ctx.pi*1j
|
352 |
+
if ctx._is_real_type(z) and zreal > 0:
|
353 |
+
v = ctx._re(v)
|
354 |
+
return v
|
355 |
+
|
356 |
+
@defun
|
357 |
+
def si(ctx, z):
|
358 |
+
try:
|
359 |
+
return ctx._si(z)
|
360 |
+
except NotImplementedError:
|
361 |
+
return ctx._si_generic(z)
|
362 |
+
|
363 |
+
@defun_wrapped
|
364 |
+
def _si_generic(ctx, z):
|
365 |
+
if ctx.isinf(z):
|
366 |
+
if z == ctx.inf: return 0.5*ctx.pi
|
367 |
+
if z == ctx.ninf: return -0.5*ctx.pi
|
368 |
+
# Suffers from cancellation near 0
|
369 |
+
if ctx.mag(z) >= -1:
|
370 |
+
jz = ctx.fmul(ctx.j,z,exact=True)
|
371 |
+
njz = ctx.fneg(jz,exact=True)
|
372 |
+
v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
|
373 |
+
zreal = ctx._re(z)
|
374 |
+
if zreal > 0:
|
375 |
+
v -= 0.5*ctx.pi
|
376 |
+
if zreal < 0:
|
377 |
+
v += 0.5*ctx.pi
|
378 |
+
if ctx._is_real_type(z):
|
379 |
+
v = ctx._re(v)
|
380 |
+
return v
|
381 |
+
else:
|
382 |
+
return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
|
383 |
+
|
384 |
+
@defun_wrapped
|
385 |
+
def chi(ctx, z):
|
386 |
+
nz = ctx.fneg(z, exact=True)
|
387 |
+
v = 0.5*(ctx.ei(z) + ctx.ei(nz))
|
388 |
+
zreal = ctx._re(z)
|
389 |
+
zimag = ctx._im(z)
|
390 |
+
if zimag > 0:
|
391 |
+
v += ctx.pi*0.5j
|
392 |
+
elif zimag < 0:
|
393 |
+
v -= ctx.pi*0.5j
|
394 |
+
elif zreal < 0:
|
395 |
+
v += ctx.pi*1j
|
396 |
+
return v
|
397 |
+
|
398 |
+
@defun_wrapped
|
399 |
+
def shi(ctx, z):
|
400 |
+
# Suffers from cancellation near 0
|
401 |
+
if ctx.mag(z) >= -1:
|
402 |
+
nz = ctx.fneg(z, exact=True)
|
403 |
+
v = 0.5*(ctx.ei(z) - ctx.ei(nz))
|
404 |
+
zimag = ctx._im(z)
|
405 |
+
if zimag > 0: v -= 0.5j*ctx.pi
|
406 |
+
if zimag < 0: v += 0.5j*ctx.pi
|
407 |
+
return v
|
408 |
+
else:
|
409 |
+
return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
|
410 |
+
|
411 |
+
@defun_wrapped
|
412 |
+
def fresnels(ctx, z):
|
413 |
+
if z == ctx.inf:
|
414 |
+
return ctx.mpf(0.5)
|
415 |
+
if z == ctx.ninf:
|
416 |
+
return ctx.mpf(-0.5)
|
417 |
+
return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
|
418 |
+
|
419 |
+
@defun_wrapped
|
420 |
+
def fresnelc(ctx, z):
|
421 |
+
if z == ctx.inf:
|
422 |
+
return ctx.mpf(0.5)
|
423 |
+
if z == ctx.ninf:
|
424 |
+
return ctx.mpf(-0.5)
|
425 |
+
return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/factorials.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
from .functions import defun, defun_wrapped
|
3 |
+
|
4 |
+
@defun
|
5 |
+
def gammaprod(ctx, a, b, _infsign=False):
|
6 |
+
a = [ctx.convert(x) for x in a]
|
7 |
+
b = [ctx.convert(x) for x in b]
|
8 |
+
poles_num = []
|
9 |
+
poles_den = []
|
10 |
+
regular_num = []
|
11 |
+
regular_den = []
|
12 |
+
for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x)
|
13 |
+
for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x)
|
14 |
+
# One more pole in numerator or denominator gives 0 or inf
|
15 |
+
if len(poles_num) < len(poles_den): return ctx.zero
|
16 |
+
if len(poles_num) > len(poles_den):
|
17 |
+
# Get correct sign of infinity for x+h, h -> 0 from above
|
18 |
+
# XXX: hack, this should be done properly
|
19 |
+
if _infsign:
|
20 |
+
a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num]
|
21 |
+
b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den]
|
22 |
+
return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf
|
23 |
+
else:
|
24 |
+
return ctx.inf
|
25 |
+
# All poles cancel
|
26 |
+
# lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i)
|
27 |
+
p = ctx.one
|
28 |
+
orig = ctx.prec
|
29 |
+
try:
|
30 |
+
ctx.prec = orig + 15
|
31 |
+
while poles_num:
|
32 |
+
i = poles_num.pop()
|
33 |
+
j = poles_den.pop()
|
34 |
+
p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i)
|
35 |
+
for x in regular_num: p *= ctx.gamma(x)
|
36 |
+
for x in regular_den: p /= ctx.gamma(x)
|
37 |
+
finally:
|
38 |
+
ctx.prec = orig
|
39 |
+
return +p
|
40 |
+
|
41 |
+
@defun
|
42 |
+
def beta(ctx, x, y):
|
43 |
+
x = ctx.convert(x)
|
44 |
+
y = ctx.convert(y)
|
45 |
+
if ctx.isinf(y):
|
46 |
+
x, y = y, x
|
47 |
+
if ctx.isinf(x):
|
48 |
+
if x == ctx.inf and not ctx._im(y):
|
49 |
+
if y == ctx.ninf:
|
50 |
+
return ctx.nan
|
51 |
+
if y > 0:
|
52 |
+
return ctx.zero
|
53 |
+
if ctx.isint(y):
|
54 |
+
return ctx.nan
|
55 |
+
if y < 0:
|
56 |
+
return ctx.sign(ctx.gamma(y)) * ctx.inf
|
57 |
+
return ctx.nan
|
58 |
+
xy = ctx.fadd(x, y, prec=2*ctx.prec)
|
59 |
+
return ctx.gammaprod([x, y], [xy])
|
60 |
+
|
61 |
+
@defun
|
62 |
+
def binomial(ctx, n, k):
|
63 |
+
n1 = ctx.fadd(n, 1, prec=2*ctx.prec)
|
64 |
+
k1 = ctx.fadd(k, 1, prec=2*ctx.prec)
|
65 |
+
nk1 = ctx.fsub(n1, k, prec=2*ctx.prec)
|
66 |
+
return ctx.gammaprod([n1], [k1, nk1])
|
67 |
+
|
68 |
+
@defun
|
69 |
+
def rf(ctx, x, n):
|
70 |
+
xn = ctx.fadd(x, n, prec=2*ctx.prec)
|
71 |
+
return ctx.gammaprod([xn], [x])
|
72 |
+
|
73 |
+
@defun
|
74 |
+
def ff(ctx, x, n):
|
75 |
+
x1 = ctx.fadd(x, 1, prec=2*ctx.prec)
|
76 |
+
xn1 = ctx.fadd(ctx.fsub(x, n, prec=2*ctx.prec), 1, prec=2*ctx.prec)
|
77 |
+
return ctx.gammaprod([x1], [xn1])
|
78 |
+
|
79 |
+
@defun_wrapped
|
80 |
+
def fac2(ctx, x):
|
81 |
+
if ctx.isinf(x):
|
82 |
+
if x == ctx.inf:
|
83 |
+
return x
|
84 |
+
return ctx.nan
|
85 |
+
return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1)
|
86 |
+
|
87 |
+
@defun_wrapped
|
88 |
+
def barnesg(ctx, z):
|
89 |
+
if ctx.isinf(z):
|
90 |
+
if z == ctx.inf:
|
91 |
+
return z
|
92 |
+
return ctx.nan
|
93 |
+
if ctx.isnan(z):
|
94 |
+
return z
|
95 |
+
if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)):
|
96 |
+
return z*0
|
97 |
+
# Account for size (would not be needed if computing log(G))
|
98 |
+
if abs(z) > 5:
|
99 |
+
ctx.dps += 2*ctx.log(abs(z),2)
|
100 |
+
# Reflection formula
|
101 |
+
if ctx.re(z) < -ctx.dps:
|
102 |
+
w = 1-z
|
103 |
+
pi2 = 2*ctx.pi
|
104 |
+
u = ctx.expjpi(2*w)
|
105 |
+
v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \
|
106 |
+
ctx.j*ctx.polylog(2, u)/pi2
|
107 |
+
v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w
|
108 |
+
if ctx._is_real_type(z):
|
109 |
+
v = ctx._re(v)
|
110 |
+
return v
|
111 |
+
# Estimate terms for asymptotic expansion
|
112 |
+
# TODO: fixme, obviously
|
113 |
+
N = ctx.dps // 2 + 5
|
114 |
+
G = 1
|
115 |
+
while abs(z) < N or ctx.re(z) < 1:
|
116 |
+
G /= ctx.gamma(z)
|
117 |
+
z += 1
|
118 |
+
z -= 1
|
119 |
+
s = ctx.mpf(1)/12
|
120 |
+
s -= ctx.log(ctx.glaisher)
|
121 |
+
s += z*ctx.log(2*ctx.pi)/2
|
122 |
+
s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z)
|
123 |
+
s -= 3*z**2/4
|
124 |
+
z2k = z2 = z**2
|
125 |
+
for k in xrange(1, N+1):
|
126 |
+
t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k)
|
127 |
+
if abs(t) < ctx.eps:
|
128 |
+
#print k, N # check how many terms were needed
|
129 |
+
break
|
130 |
+
z2k *= z2
|
131 |
+
s += t
|
132 |
+
#if k == N:
|
133 |
+
# print "warning: series for barnesg failed to converge", ctx.dps
|
134 |
+
return G*ctx.exp(s)
|
135 |
+
|
136 |
+
@defun
|
137 |
+
def superfac(ctx, z):
|
138 |
+
return ctx.barnesg(z+2)
|
139 |
+
|
140 |
+
@defun_wrapped
|
141 |
+
def hyperfac(ctx, z):
|
142 |
+
# XXX: estimate needed extra bits accurately
|
143 |
+
if z == ctx.inf:
|
144 |
+
return z
|
145 |
+
if abs(z) > 5:
|
146 |
+
extra = 4*int(ctx.log(abs(z),2))
|
147 |
+
else:
|
148 |
+
extra = 0
|
149 |
+
ctx.prec += extra
|
150 |
+
if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)):
|
151 |
+
n = int(ctx.re(z))
|
152 |
+
h = ctx.hyperfac(-n-1)
|
153 |
+
if ((n+1)//2) & 1:
|
154 |
+
h = -h
|
155 |
+
if ctx._is_complex_type(z):
|
156 |
+
return h + 0j
|
157 |
+
return h
|
158 |
+
zp1 = z+1
|
159 |
+
# Wrong branch cut
|
160 |
+
#v = ctx.gamma(zp1)**z
|
161 |
+
#ctx.prec -= extra
|
162 |
+
#return v / ctx.barnesg(zp1)
|
163 |
+
v = ctx.exp(z*ctx.loggamma(zp1))
|
164 |
+
ctx.prec -= extra
|
165 |
+
return v / ctx.barnesg(zp1)
|
166 |
+
|
167 |
+
'''
|
168 |
+
@defun
|
169 |
+
def psi0(ctx, z):
|
170 |
+
"""Shortcut for psi(0,z) (the digamma function)"""
|
171 |
+
return ctx.psi(0, z)
|
172 |
+
|
173 |
+
@defun
|
174 |
+
def psi1(ctx, z):
|
175 |
+
"""Shortcut for psi(1,z) (the trigamma function)"""
|
176 |
+
return ctx.psi(1, z)
|
177 |
+
|
178 |
+
@defun
|
179 |
+
def psi2(ctx, z):
|
180 |
+
"""Shortcut for psi(2,z) (the tetragamma function)"""
|
181 |
+
return ctx.psi(2, z)
|
182 |
+
|
183 |
+
@defun
|
184 |
+
def psi3(ctx, z):
|
185 |
+
"""Shortcut for psi(3,z) (the pentagamma function)"""
|
186 |
+
return ctx.psi(3, z)
|
187 |
+
'''
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/functions.py
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
|
3 |
+
class SpecialFunctions(object):
|
4 |
+
"""
|
5 |
+
This class implements special functions using high-level code.
|
6 |
+
|
7 |
+
Elementary and some other functions (e.g. gamma function, basecase
|
8 |
+
hypergeometric series) are assumed to be predefined by the context as
|
9 |
+
"builtins" or "low-level" functions.
|
10 |
+
"""
|
11 |
+
defined_functions = {}
|
12 |
+
|
13 |
+
# The series for the Jacobi theta functions converge for |q| < 1;
|
14 |
+
# in the current implementation they throw a ValueError for
|
15 |
+
# abs(q) > THETA_Q_LIM
|
16 |
+
THETA_Q_LIM = 1 - 10**-7
|
17 |
+
|
18 |
+
def __init__(self):
|
19 |
+
cls = self.__class__
|
20 |
+
for name in cls.defined_functions:
|
21 |
+
f, wrap = cls.defined_functions[name]
|
22 |
+
cls._wrap_specfun(name, f, wrap)
|
23 |
+
|
24 |
+
self.mpq_1 = self._mpq((1,1))
|
25 |
+
self.mpq_0 = self._mpq((0,1))
|
26 |
+
self.mpq_1_2 = self._mpq((1,2))
|
27 |
+
self.mpq_3_2 = self._mpq((3,2))
|
28 |
+
self.mpq_1_4 = self._mpq((1,4))
|
29 |
+
self.mpq_1_16 = self._mpq((1,16))
|
30 |
+
self.mpq_3_16 = self._mpq((3,16))
|
31 |
+
self.mpq_5_2 = self._mpq((5,2))
|
32 |
+
self.mpq_3_4 = self._mpq((3,4))
|
33 |
+
self.mpq_7_4 = self._mpq((7,4))
|
34 |
+
self.mpq_5_4 = self._mpq((5,4))
|
35 |
+
self.mpq_1_3 = self._mpq((1,3))
|
36 |
+
self.mpq_2_3 = self._mpq((2,3))
|
37 |
+
self.mpq_4_3 = self._mpq((4,3))
|
38 |
+
self.mpq_1_6 = self._mpq((1,6))
|
39 |
+
self.mpq_5_6 = self._mpq((5,6))
|
40 |
+
self.mpq_5_3 = self._mpq((5,3))
|
41 |
+
|
42 |
+
self._misc_const_cache = {}
|
43 |
+
|
44 |
+
self._aliases.update({
|
45 |
+
'phase' : 'arg',
|
46 |
+
'conjugate' : 'conj',
|
47 |
+
'nthroot' : 'root',
|
48 |
+
'polygamma' : 'psi',
|
49 |
+
'hurwitz' : 'zeta',
|
50 |
+
#'digamma' : 'psi0',
|
51 |
+
#'trigamma' : 'psi1',
|
52 |
+
#'tetragamma' : 'psi2',
|
53 |
+
#'pentagamma' : 'psi3',
|
54 |
+
'fibonacci' : 'fib',
|
55 |
+
'factorial' : 'fac',
|
56 |
+
})
|
57 |
+
|
58 |
+
self.zetazero_memoized = self.memoize(self.zetazero)
|
59 |
+
|
60 |
+
# Default -- do nothing
|
61 |
+
@classmethod
|
62 |
+
def _wrap_specfun(cls, name, f, wrap):
|
63 |
+
setattr(cls, name, f)
|
64 |
+
|
65 |
+
# Optional fast versions of common functions in common cases.
|
66 |
+
# If not overridden, default (generic hypergeometric series)
|
67 |
+
# implementations will be used
|
68 |
+
def _besselj(ctx, n, z): raise NotImplementedError
|
69 |
+
def _erf(ctx, z): raise NotImplementedError
|
70 |
+
def _erfc(ctx, z): raise NotImplementedError
|
71 |
+
def _gamma_upper_int(ctx, z, a): raise NotImplementedError
|
72 |
+
def _expint_int(ctx, n, z): raise NotImplementedError
|
73 |
+
def _zeta(ctx, s): raise NotImplementedError
|
74 |
+
def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
|
75 |
+
def _ei(ctx, z): raise NotImplementedError
|
76 |
+
def _e1(ctx, z): raise NotImplementedError
|
77 |
+
def _ci(ctx, z): raise NotImplementedError
|
78 |
+
def _si(ctx, z): raise NotImplementedError
|
79 |
+
def _altzeta(ctx, s): raise NotImplementedError
|
80 |
+
|
81 |
+
def defun_wrapped(f):
|
82 |
+
SpecialFunctions.defined_functions[f.__name__] = f, True
|
83 |
+
return f
|
84 |
+
|
85 |
+
def defun(f):
|
86 |
+
SpecialFunctions.defined_functions[f.__name__] = f, False
|
87 |
+
return f
|
88 |
+
|
89 |
+
def defun_static(f):
|
90 |
+
setattr(SpecialFunctions, f.__name__, f)
|
91 |
+
return f
|
92 |
+
|
93 |
+
@defun_wrapped
|
94 |
+
def cot(ctx, z): return ctx.one / ctx.tan(z)
|
95 |
+
|
96 |
+
@defun_wrapped
|
97 |
+
def sec(ctx, z): return ctx.one / ctx.cos(z)
|
98 |
+
|
99 |
+
@defun_wrapped
|
100 |
+
def csc(ctx, z): return ctx.one / ctx.sin(z)
|
101 |
+
|
102 |
+
@defun_wrapped
|
103 |
+
def coth(ctx, z): return ctx.one / ctx.tanh(z)
|
104 |
+
|
105 |
+
@defun_wrapped
|
106 |
+
def sech(ctx, z): return ctx.one / ctx.cosh(z)
|
107 |
+
|
108 |
+
@defun_wrapped
|
109 |
+
def csch(ctx, z): return ctx.one / ctx.sinh(z)
|
110 |
+
|
111 |
+
@defun_wrapped
|
112 |
+
def acot(ctx, z):
|
113 |
+
if not z:
|
114 |
+
return ctx.pi * 0.5
|
115 |
+
else:
|
116 |
+
return ctx.atan(ctx.one / z)
|
117 |
+
|
118 |
+
@defun_wrapped
|
119 |
+
def asec(ctx, z): return ctx.acos(ctx.one / z)
|
120 |
+
|
121 |
+
@defun_wrapped
|
122 |
+
def acsc(ctx, z): return ctx.asin(ctx.one / z)
|
123 |
+
|
124 |
+
@defun_wrapped
|
125 |
+
def acoth(ctx, z):
|
126 |
+
if not z:
|
127 |
+
return ctx.pi * 0.5j
|
128 |
+
else:
|
129 |
+
return ctx.atanh(ctx.one / z)
|
130 |
+
|
131 |
+
|
132 |
+
@defun_wrapped
|
133 |
+
def asech(ctx, z): return ctx.acosh(ctx.one / z)
|
134 |
+
|
135 |
+
@defun_wrapped
|
136 |
+
def acsch(ctx, z): return ctx.asinh(ctx.one / z)
|
137 |
+
|
138 |
+
@defun
|
139 |
+
def sign(ctx, x):
|
140 |
+
x = ctx.convert(x)
|
141 |
+
if not x or ctx.isnan(x):
|
142 |
+
return x
|
143 |
+
if ctx._is_real_type(x):
|
144 |
+
if x > 0:
|
145 |
+
return ctx.one
|
146 |
+
else:
|
147 |
+
return -ctx.one
|
148 |
+
return x / abs(x)
|
149 |
+
|
150 |
+
@defun
|
151 |
+
def agm(ctx, a, b=1):
|
152 |
+
if b == 1:
|
153 |
+
return ctx.agm1(a)
|
154 |
+
a = ctx.convert(a)
|
155 |
+
b = ctx.convert(b)
|
156 |
+
return ctx._agm(a, b)
|
157 |
+
|
158 |
+
@defun_wrapped
|
159 |
+
def sinc(ctx, x):
|
160 |
+
if ctx.isinf(x):
|
161 |
+
return 1/x
|
162 |
+
if not x:
|
163 |
+
return x+1
|
164 |
+
return ctx.sin(x)/x
|
165 |
+
|
166 |
+
@defun_wrapped
|
167 |
+
def sincpi(ctx, x):
|
168 |
+
if ctx.isinf(x):
|
169 |
+
return 1/x
|
170 |
+
if not x:
|
171 |
+
return x+1
|
172 |
+
return ctx.sinpi(x)/(ctx.pi*x)
|
173 |
+
|
174 |
+
# TODO: tests; improve implementation
|
175 |
+
@defun_wrapped
|
176 |
+
def expm1(ctx, x):
|
177 |
+
if not x:
|
178 |
+
return ctx.zero
|
179 |
+
# exp(x) - 1 ~ x
|
180 |
+
if ctx.mag(x) < -ctx.prec:
|
181 |
+
return x + 0.5*x**2
|
182 |
+
# TODO: accurately eval the smaller of the real/imag parts
|
183 |
+
return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
|
184 |
+
|
185 |
+
@defun_wrapped
|
186 |
+
def log1p(ctx, x):
|
187 |
+
if not x:
|
188 |
+
return ctx.zero
|
189 |
+
if ctx.mag(x) < -ctx.prec:
|
190 |
+
return x - 0.5*x**2
|
191 |
+
return ctx.log(ctx.fadd(1, x, prec=2*ctx.prec))
|
192 |
+
|
193 |
+
@defun_wrapped
|
194 |
+
def powm1(ctx, x, y):
|
195 |
+
mag = ctx.mag
|
196 |
+
one = ctx.one
|
197 |
+
w = x**y - one
|
198 |
+
M = mag(w)
|
199 |
+
# Only moderate cancellation
|
200 |
+
if M > -8:
|
201 |
+
return w
|
202 |
+
# Check for the only possible exact cases
|
203 |
+
if not w:
|
204 |
+
if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
|
205 |
+
return w
|
206 |
+
x1 = x - one
|
207 |
+
magy = mag(y)
|
208 |
+
lnx = ctx.ln(x)
|
209 |
+
# Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
|
210 |
+
if magy + mag(lnx) < -ctx.prec:
|
211 |
+
return lnx*y + (lnx*y)**2/2
|
212 |
+
# TODO: accurately eval the smaller of the real/imag part
|
213 |
+
return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
|
214 |
+
|
215 |
+
@defun
|
216 |
+
def _rootof1(ctx, k, n):
|
217 |
+
k = int(k)
|
218 |
+
n = int(n)
|
219 |
+
k %= n
|
220 |
+
if not k:
|
221 |
+
return ctx.one
|
222 |
+
elif 2*k == n:
|
223 |
+
return -ctx.one
|
224 |
+
elif 4*k == n:
|
225 |
+
return ctx.j
|
226 |
+
elif 4*k == 3*n:
|
227 |
+
return -ctx.j
|
228 |
+
return ctx.expjpi(2*ctx.mpf(k)/n)
|
229 |
+
|
230 |
+
@defun
|
231 |
+
def root(ctx, x, n, k=0):
|
232 |
+
n = int(n)
|
233 |
+
x = ctx.convert(x)
|
234 |
+
if k:
|
235 |
+
# Special case: there is an exact real root
|
236 |
+
if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
|
237 |
+
return -ctx.root(-x, n)
|
238 |
+
# Multiply by root of unity
|
239 |
+
prec = ctx.prec
|
240 |
+
try:
|
241 |
+
ctx.prec += 10
|
242 |
+
v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
|
243 |
+
finally:
|
244 |
+
ctx.prec = prec
|
245 |
+
return +v
|
246 |
+
return ctx._nthroot(x, n)
|
247 |
+
|
248 |
+
@defun
|
249 |
+
def unitroots(ctx, n, primitive=False):
|
250 |
+
gcd = ctx._gcd
|
251 |
+
prec = ctx.prec
|
252 |
+
try:
|
253 |
+
ctx.prec += 10
|
254 |
+
if primitive:
|
255 |
+
v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
|
256 |
+
else:
|
257 |
+
# TODO: this can be done *much* faster
|
258 |
+
v = [ctx._rootof1(k,n) for k in range(n)]
|
259 |
+
finally:
|
260 |
+
ctx.prec = prec
|
261 |
+
return [+x for x in v]
|
262 |
+
|
263 |
+
@defun
|
264 |
+
def arg(ctx, x):
|
265 |
+
x = ctx.convert(x)
|
266 |
+
re = ctx._re(x)
|
267 |
+
im = ctx._im(x)
|
268 |
+
return ctx.atan2(im, re)
|
269 |
+
|
270 |
+
@defun
|
271 |
+
def fabs(ctx, x):
|
272 |
+
return abs(ctx.convert(x))
|
273 |
+
|
274 |
+
@defun
|
275 |
+
def re(ctx, x):
|
276 |
+
x = ctx.convert(x)
|
277 |
+
if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
|
278 |
+
return x.real
|
279 |
+
return x
|
280 |
+
|
281 |
+
@defun
|
282 |
+
def im(ctx, x):
|
283 |
+
x = ctx.convert(x)
|
284 |
+
if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
|
285 |
+
return x.imag
|
286 |
+
return ctx.zero
|
287 |
+
|
288 |
+
@defun
|
289 |
+
def conj(ctx, x):
|
290 |
+
x = ctx.convert(x)
|
291 |
+
try:
|
292 |
+
return x.conjugate()
|
293 |
+
except AttributeError:
|
294 |
+
return x
|
295 |
+
|
296 |
+
@defun
|
297 |
+
def polar(ctx, z):
|
298 |
+
return (ctx.fabs(z), ctx.arg(z))
|
299 |
+
|
300 |
+
@defun_wrapped
|
301 |
+
def rect(ctx, r, phi):
|
302 |
+
return r * ctx.mpc(*ctx.cos_sin(phi))
|
303 |
+
|
304 |
+
@defun
|
305 |
+
def log(ctx, x, b=None):
|
306 |
+
if b is None:
|
307 |
+
return ctx.ln(x)
|
308 |
+
wp = ctx.prec + 20
|
309 |
+
return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
|
310 |
+
|
311 |
+
@defun
|
312 |
+
def log10(ctx, x):
|
313 |
+
return ctx.log(x, 10)
|
314 |
+
|
315 |
+
@defun
|
316 |
+
def fmod(ctx, x, y):
|
317 |
+
return ctx.convert(x) % ctx.convert(y)
|
318 |
+
|
319 |
+
@defun
|
320 |
+
def degrees(ctx, x):
|
321 |
+
return x / ctx.degree
|
322 |
+
|
323 |
+
@defun
|
324 |
+
def radians(ctx, x):
|
325 |
+
return x * ctx.degree
|
326 |
+
|
327 |
+
def _lambertw_special(ctx, z, k):
|
328 |
+
# W(0,0) = 0; all other branches are singular
|
329 |
+
if not z:
|
330 |
+
if not k:
|
331 |
+
return z
|
332 |
+
return ctx.ninf + z
|
333 |
+
if z == ctx.inf:
|
334 |
+
if k == 0:
|
335 |
+
return z
|
336 |
+
else:
|
337 |
+
return z + 2*k*ctx.pi*ctx.j
|
338 |
+
if z == ctx.ninf:
|
339 |
+
return (-z) + (2*k+1)*ctx.pi*ctx.j
|
340 |
+
# Some kind of nan or complex inf/nan?
|
341 |
+
return ctx.ln(z)
|
342 |
+
|
343 |
+
import math
|
344 |
+
import cmath
|
345 |
+
|
346 |
+
def _lambertw_approx_hybrid(z, k):
|
347 |
+
imag_sign = 0
|
348 |
+
if hasattr(z, "imag"):
|
349 |
+
x = float(z.real)
|
350 |
+
y = z.imag
|
351 |
+
if y:
|
352 |
+
imag_sign = (-1) ** (y < 0)
|
353 |
+
y = float(y)
|
354 |
+
else:
|
355 |
+
x = float(z)
|
356 |
+
y = 0.0
|
357 |
+
imag_sign = 0
|
358 |
+
# hack to work regardless of whether Python supports -0.0
|
359 |
+
if not y:
|
360 |
+
y = 0.0
|
361 |
+
z = complex(x,y)
|
362 |
+
if k == 0:
|
363 |
+
if -4.0 < y < 4.0 and -1.0 < x < 2.5:
|
364 |
+
if imag_sign:
|
365 |
+
# Taylor series in upper/lower half-plane
|
366 |
+
if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j))
|
367 |
+
if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j))
|
368 |
+
if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j))
|
369 |
+
if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j))
|
370 |
+
# Taylor series near -1
|
371 |
+
if x < -0.5:
|
372 |
+
if imag_sign >= 0:
|
373 |
+
return (-0.318+1.34j) + (-0.697-0.593j)*(z+1)
|
374 |
+
else:
|
375 |
+
return (-0.318-1.34j) + (-0.697+0.593j)*(z+1)
|
376 |
+
# return real type
|
377 |
+
r = -0.367879441171442
|
378 |
+
if (not imag_sign) and x > r:
|
379 |
+
z = x
|
380 |
+
# Singularity near -1/e
|
381 |
+
if x < -0.2:
|
382 |
+
return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
|
383 |
+
# Taylor series near 0
|
384 |
+
if x < 0.5: return z
|
385 |
+
# Simple linear approximation
|
386 |
+
return 0.2 + 0.3*z
|
387 |
+
if (not imag_sign) and x > 0.0:
|
388 |
+
L1 = math.log(x); L2 = math.log(L1)
|
389 |
+
else:
|
390 |
+
L1 = cmath.log(z); L2 = cmath.log(L1)
|
391 |
+
elif k == -1:
|
392 |
+
# return real type
|
393 |
+
r = -0.367879441171442
|
394 |
+
if (not imag_sign) and r < x < 0.0:
|
395 |
+
z = x
|
396 |
+
if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2:
|
397 |
+
return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
|
398 |
+
if (not imag_sign) and -0.2 <= x < 0.0:
|
399 |
+
L1 = math.log(-x)
|
400 |
+
return L1 - math.log(-L1)
|
401 |
+
else:
|
402 |
+
if imag_sign == -1 and (not y) and x < 0.0:
|
403 |
+
L1 = cmath.log(z) - 3.1415926535897932j
|
404 |
+
else:
|
405 |
+
L1 = cmath.log(z) - 6.2831853071795865j
|
406 |
+
L2 = cmath.log(L1)
|
407 |
+
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2)
|
408 |
+
|
409 |
+
def _lambertw_series(ctx, z, k, tol):
|
410 |
+
"""
|
411 |
+
Return rough approximation for W_k(z) from an asymptotic series,
|
412 |
+
sufficiently accurate for the Halley iteration to converge to
|
413 |
+
the correct value.
|
414 |
+
"""
|
415 |
+
magz = ctx.mag(z)
|
416 |
+
if (-10 < magz < 900) and (-1000 < k < 1000):
|
417 |
+
# Near the branch point at -1/e
|
418 |
+
if magz < 1 and abs(z+0.36787944117144) < 0.05:
|
419 |
+
if k == 0 or (k == -1 and ctx._im(z) >= 0) or \
|
420 |
+
(k == 1 and ctx._im(z) < 0):
|
421 |
+
delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)])
|
422 |
+
cancellation = -ctx.mag(delta)
|
423 |
+
ctx.prec += cancellation
|
424 |
+
# Use series given in Corless et al.
|
425 |
+
p = ctx.sqrt(2*(ctx.e*z+1))
|
426 |
+
ctx.prec -= cancellation
|
427 |
+
u = {0:ctx.mpf(-1), 1:ctx.mpf(1)}
|
428 |
+
a = {0:ctx.mpf(2), 1:ctx.mpf(-1)}
|
429 |
+
if k != 0:
|
430 |
+
p = -p
|
431 |
+
s = ctx.zero
|
432 |
+
# The series converges, so we could use it directly, but unless
|
433 |
+
# *extremely* close, it is better to just use the first few
|
434 |
+
# terms to get a good approximation for the iteration
|
435 |
+
for l in xrange(max(2,cancellation)):
|
436 |
+
if l not in u:
|
437 |
+
a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l))
|
438 |
+
u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1)
|
439 |
+
term = u[l] * p**l
|
440 |
+
s += term
|
441 |
+
if ctx.mag(term) < -tol:
|
442 |
+
return s, True
|
443 |
+
l += 1
|
444 |
+
ctx.prec += cancellation//2
|
445 |
+
return s, False
|
446 |
+
if k == 0 or k == -1:
|
447 |
+
return _lambertw_approx_hybrid(z, k), False
|
448 |
+
if k == 0:
|
449 |
+
if magz < -1:
|
450 |
+
return z*(1-z), False
|
451 |
+
L1 = ctx.ln(z)
|
452 |
+
L2 = ctx.ln(L1)
|
453 |
+
elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0):
|
454 |
+
L1 = ctx.ln(-z)
|
455 |
+
return L1 - ctx.ln(-L1), False
|
456 |
+
else:
|
457 |
+
# This holds both as z -> 0 and z -> inf.
|
458 |
+
# Relative error is O(1/log(z)).
|
459 |
+
L1 = ctx.ln(z) + 2j*ctx.pi*k
|
460 |
+
L2 = ctx.ln(L1)
|
461 |
+
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False
|
462 |
+
|
463 |
+
@defun
|
464 |
+
def lambertw(ctx, z, k=0):
|
465 |
+
z = ctx.convert(z)
|
466 |
+
k = int(k)
|
467 |
+
if not ctx.isnormal(z):
|
468 |
+
return _lambertw_special(ctx, z, k)
|
469 |
+
prec = ctx.prec
|
470 |
+
ctx.prec += 20 + ctx.mag(k or 1)
|
471 |
+
wp = ctx.prec
|
472 |
+
tol = wp - 5
|
473 |
+
w, done = _lambertw_series(ctx, z, k, tol)
|
474 |
+
if not done:
|
475 |
+
# Use Halley iteration to solve w*exp(w) = z
|
476 |
+
two = ctx.mpf(2)
|
477 |
+
for i in xrange(100):
|
478 |
+
ew = ctx.exp(w)
|
479 |
+
wew = w*ew
|
480 |
+
wewz = wew-z
|
481 |
+
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
|
482 |
+
if ctx.mag(wn-w) <= ctx.mag(wn) - tol:
|
483 |
+
w = wn
|
484 |
+
break
|
485 |
+
else:
|
486 |
+
w = wn
|
487 |
+
if i == 100:
|
488 |
+
ctx.warn("Lambert W iteration failed to converge for z = %s" % z)
|
489 |
+
ctx.prec = prec
|
490 |
+
return +w
|
491 |
+
|
492 |
+
@defun_wrapped
|
493 |
+
def bell(ctx, n, x=1):
|
494 |
+
x = ctx.convert(x)
|
495 |
+
if not n:
|
496 |
+
if ctx.isnan(x):
|
497 |
+
return x
|
498 |
+
return type(x)(1)
|
499 |
+
if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
|
500 |
+
return x**n
|
501 |
+
if n == 1: return x
|
502 |
+
if n == 2: return x*(x+1)
|
503 |
+
if x == 0: return ctx.sincpi(n)
|
504 |
+
return _polyexp(ctx, n, x, True) / ctx.exp(x)
|
505 |
+
|
506 |
+
def _polyexp(ctx, n, x, extra=False):
|
507 |
+
def _terms():
|
508 |
+
if extra:
|
509 |
+
yield ctx.sincpi(n)
|
510 |
+
t = x
|
511 |
+
k = 1
|
512 |
+
while 1:
|
513 |
+
yield k**n * t
|
514 |
+
k += 1
|
515 |
+
t = t*x/k
|
516 |
+
return ctx.sum_accurately(_terms, check_step=4)
|
517 |
+
|
518 |
+
@defun_wrapped
|
519 |
+
def polyexp(ctx, s, z):
|
520 |
+
if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
|
521 |
+
return z**s
|
522 |
+
if z == 0: return z*s
|
523 |
+
if s == 0: return ctx.expm1(z)
|
524 |
+
if s == 1: return ctx.exp(z)*z
|
525 |
+
if s == 2: return ctx.exp(z)*z*(z+1)
|
526 |
+
return _polyexp(ctx, s, z)
|
527 |
+
|
528 |
+
@defun_wrapped
|
529 |
+
def cyclotomic(ctx, n, z):
|
530 |
+
n = int(n)
|
531 |
+
if n < 0:
|
532 |
+
raise ValueError("n cannot be negative")
|
533 |
+
p = ctx.one
|
534 |
+
if n == 0:
|
535 |
+
return p
|
536 |
+
if n == 1:
|
537 |
+
return z - p
|
538 |
+
if n == 2:
|
539 |
+
return z + p
|
540 |
+
# Use divisor product representation. Unfortunately, this sometimes
|
541 |
+
# includes singularities for roots of unity, which we have to cancel out.
|
542 |
+
# Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
|
543 |
+
a_prod = 1
|
544 |
+
b_prod = 1
|
545 |
+
num_zeros = 0
|
546 |
+
num_poles = 0
|
547 |
+
for d in range(1,n+1):
|
548 |
+
if not n % d:
|
549 |
+
w = ctx.moebius(n//d)
|
550 |
+
# Use powm1 because it is important that we get 0 only
|
551 |
+
# if it really is exactly 0
|
552 |
+
b = -ctx.powm1(z, d)
|
553 |
+
if b:
|
554 |
+
p *= b**w
|
555 |
+
else:
|
556 |
+
if w == 1:
|
557 |
+
a_prod *= d
|
558 |
+
num_zeros += 1
|
559 |
+
elif w == -1:
|
560 |
+
b_prod *= d
|
561 |
+
num_poles += 1
|
562 |
+
#print n, num_zeros, num_poles
|
563 |
+
if num_zeros:
|
564 |
+
if num_zeros > num_poles:
|
565 |
+
p *= 0
|
566 |
+
else:
|
567 |
+
p *= a_prod
|
568 |
+
p /= b_prod
|
569 |
+
return p
|
570 |
+
|
571 |
+
@defun
|
572 |
+
def mangoldt(ctx, n):
|
573 |
+
r"""
|
574 |
+
Evaluates the von Mangoldt function `\Lambda(n) = \log p`
|
575 |
+
if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise.
|
576 |
+
|
577 |
+
**Examples**
|
578 |
+
|
579 |
+
>>> from mpmath import *
|
580 |
+
>>> mp.dps = 25; mp.pretty = True
|
581 |
+
>>> [mangoldt(n) for n in range(-2,3)]
|
582 |
+
[0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321]
|
583 |
+
>>> mangoldt(6)
|
584 |
+
0.0
|
585 |
+
>>> mangoldt(7)
|
586 |
+
1.945910149055313305105353
|
587 |
+
>>> mangoldt(8)
|
588 |
+
0.6931471805599453094172321
|
589 |
+
>>> fsum(mangoldt(n) for n in range(101))
|
590 |
+
94.04531122935739224600493
|
591 |
+
>>> fsum(mangoldt(n) for n in range(10001))
|
592 |
+
10013.39669326311478372032
|
593 |
+
|
594 |
+
"""
|
595 |
+
n = int(n)
|
596 |
+
if n < 2:
|
597 |
+
return ctx.zero
|
598 |
+
if n % 2 == 0:
|
599 |
+
# Must be a power of two
|
600 |
+
if n & (n-1) == 0:
|
601 |
+
return +ctx.ln2
|
602 |
+
else:
|
603 |
+
return ctx.zero
|
604 |
+
# TODO: the following could be generalized into a perfect
|
605 |
+
# power testing function
|
606 |
+
# ---
|
607 |
+
# Look for a small factor
|
608 |
+
for p in (3,5,7,11,13,17,19,23,29,31):
|
609 |
+
if not n % p:
|
610 |
+
q, r = n // p, 0
|
611 |
+
while q > 1:
|
612 |
+
q, r = divmod(q, p)
|
613 |
+
if r:
|
614 |
+
return ctx.zero
|
615 |
+
return ctx.ln(p)
|
616 |
+
if ctx.isprime(n):
|
617 |
+
return ctx.ln(n)
|
618 |
+
# Obviously, we could use arbitrary-precision arithmetic for this...
|
619 |
+
if n > 10**30:
|
620 |
+
raise NotImplementedError
|
621 |
+
k = 2
|
622 |
+
while 1:
|
623 |
+
p = int(n**(1./k) + 0.5)
|
624 |
+
if p < 2:
|
625 |
+
return ctx.zero
|
626 |
+
if p ** k == n:
|
627 |
+
if ctx.isprime(p):
|
628 |
+
return ctx.ln(p)
|
629 |
+
k += 1
|
630 |
+
|
631 |
+
@defun
|
632 |
+
def stirling1(ctx, n, k, exact=False):
|
633 |
+
v = ctx._stirling1(int(n), int(k))
|
634 |
+
if exact:
|
635 |
+
return int(v)
|
636 |
+
else:
|
637 |
+
return ctx.mpf(v)
|
638 |
+
|
639 |
+
@defun
|
640 |
+
def stirling2(ctx, n, k, exact=False):
|
641 |
+
v = ctx._stirling2(int(n), int(k))
|
642 |
+
if exact:
|
643 |
+
return int(v)
|
644 |
+
else:
|
645 |
+
return ctx.mpf(v)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/hypergeometric.py
ADDED
@@ -0,0 +1,1413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..libmp.backend import xrange
|
2 |
+
from .functions import defun, defun_wrapped
|
3 |
+
|
4 |
+
def _check_need_perturb(ctx, terms, prec, discard_known_zeros):
|
5 |
+
perturb = recompute = False
|
6 |
+
extraprec = 0
|
7 |
+
discard = []
|
8 |
+
for term_index, term in enumerate(terms):
|
9 |
+
w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term
|
10 |
+
have_singular_nongamma_weight = False
|
11 |
+
# Avoid division by zero in leading factors (TODO:
|
12 |
+
# also check for near division by zero?)
|
13 |
+
for k, w in enumerate(w_s):
|
14 |
+
if not w:
|
15 |
+
if ctx.re(c_s[k]) <= 0 and c_s[k]:
|
16 |
+
perturb = recompute = True
|
17 |
+
have_singular_nongamma_weight = True
|
18 |
+
pole_count = [0, 0, 0]
|
19 |
+
# Check for gamma and series poles and near-poles
|
20 |
+
for data_index, data in enumerate([alpha_s, beta_s, b_s]):
|
21 |
+
for i, x in enumerate(data):
|
22 |
+
n, d = ctx.nint_distance(x)
|
23 |
+
# Poles
|
24 |
+
if n > 0:
|
25 |
+
continue
|
26 |
+
if d == ctx.ninf:
|
27 |
+
# OK if we have a polynomial
|
28 |
+
# ------------------------------
|
29 |
+
ok = False
|
30 |
+
if data_index == 2:
|
31 |
+
for u in a_s:
|
32 |
+
if ctx.isnpint(u) and u >= int(n):
|
33 |
+
ok = True
|
34 |
+
break
|
35 |
+
if ok:
|
36 |
+
continue
|
37 |
+
pole_count[data_index] += 1
|
38 |
+
# ------------------------------
|
39 |
+
#perturb = recompute = True
|
40 |
+
#return perturb, recompute, extraprec
|
41 |
+
elif d < -4:
|
42 |
+
extraprec += -d
|
43 |
+
recompute = True
|
44 |
+
if discard_known_zeros and pole_count[1] > pole_count[0] + pole_count[2] \
|
45 |
+
and not have_singular_nongamma_weight:
|
46 |
+
discard.append(term_index)
|
47 |
+
elif sum(pole_count):
|
48 |
+
perturb = recompute = True
|
49 |
+
return perturb, recompute, extraprec, discard
|
50 |
+
|
51 |
+
_hypercomb_msg = """
|
52 |
+
hypercomb() failed to converge to the requested %i bits of accuracy
|
53 |
+
using a working precision of %i bits. The function value may be zero or
|
54 |
+
infinite; try passing zeroprec=N or infprec=M to bound finite values between
|
55 |
+
2^(-N) and 2^M. Otherwise try a higher maxprec or maxterms.
|
56 |
+
"""
|
57 |
+
|
58 |
+
@defun
|
59 |
+
def hypercomb(ctx, function, params=[], discard_known_zeros=True, **kwargs):
|
60 |
+
orig = ctx.prec
|
61 |
+
sumvalue = ctx.zero
|
62 |
+
dist = ctx.nint_distance
|
63 |
+
ninf = ctx.ninf
|
64 |
+
orig_params = params[:]
|
65 |
+
verbose = kwargs.get('verbose', False)
|
66 |
+
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(orig))
|
67 |
+
kwargs['maxprec'] = maxprec # For calls to hypsum
|
68 |
+
zeroprec = kwargs.get('zeroprec')
|
69 |
+
infprec = kwargs.get('infprec')
|
70 |
+
perturbed_reference_value = None
|
71 |
+
hextra = 0
|
72 |
+
try:
|
73 |
+
while 1:
|
74 |
+
ctx.prec += 10
|
75 |
+
if ctx.prec > maxprec:
|
76 |
+
raise ValueError(_hypercomb_msg % (orig, ctx.prec))
|
77 |
+
orig2 = ctx.prec
|
78 |
+
params = orig_params[:]
|
79 |
+
terms = function(*params)
|
80 |
+
if verbose:
|
81 |
+
print()
|
82 |
+
print("ENTERING hypercomb main loop")
|
83 |
+
print("prec =", ctx.prec)
|
84 |
+
print("hextra", hextra)
|
85 |
+
perturb, recompute, extraprec, discard = \
|
86 |
+
_check_need_perturb(ctx, terms, orig, discard_known_zeros)
|
87 |
+
ctx.prec += extraprec
|
88 |
+
if perturb:
|
89 |
+
if "hmag" in kwargs:
|
90 |
+
hmag = kwargs["hmag"]
|
91 |
+
elif ctx._fixed_precision:
|
92 |
+
hmag = int(ctx.prec*0.3)
|
93 |
+
else:
|
94 |
+
hmag = orig + 10 + hextra
|
95 |
+
h = ctx.ldexp(ctx.one, -hmag)
|
96 |
+
ctx.prec = orig2 + 10 + hmag + 10
|
97 |
+
for k in range(len(params)):
|
98 |
+
params[k] += h
|
99 |
+
# Heuristically ensure that the perturbations
|
100 |
+
# are "independent" so that two perturbations
|
101 |
+
# don't accidentally cancel each other out
|
102 |
+
# in a subtraction.
|
103 |
+
h += h/(k+1)
|
104 |
+
if recompute:
|
105 |
+
terms = function(*params)
|
106 |
+
if discard_known_zeros:
|
107 |
+
terms = [term for (i, term) in enumerate(terms) if i not in discard]
|
108 |
+
if not terms:
|
109 |
+
return ctx.zero
|
110 |
+
evaluated_terms = []
|
111 |
+
for term_index, term_data in enumerate(terms):
|
112 |
+
w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term_data
|
113 |
+
if verbose:
|
114 |
+
print()
|
115 |
+
print(" Evaluating term %i/%i : %iF%i" % \
|
116 |
+
(term_index+1, len(terms), len(a_s), len(b_s)))
|
117 |
+
print(" powers", ctx.nstr(w_s), ctx.nstr(c_s))
|
118 |
+
print(" gamma", ctx.nstr(alpha_s), ctx.nstr(beta_s))
|
119 |
+
print(" hyper", ctx.nstr(a_s), ctx.nstr(b_s))
|
120 |
+
print(" z", ctx.nstr(z))
|
121 |
+
#v = ctx.hyper(a_s, b_s, z, **kwargs)
|
122 |
+
#for a in alpha_s: v *= ctx.gamma(a)
|
123 |
+
#for b in beta_s: v *= ctx.rgamma(b)
|
124 |
+
#for w, c in zip(w_s, c_s): v *= ctx.power(w, c)
|
125 |
+
v = ctx.fprod([ctx.hyper(a_s, b_s, z, **kwargs)] + \
|
126 |
+
[ctx.gamma(a) for a in alpha_s] + \
|
127 |
+
[ctx.rgamma(b) for b in beta_s] + \
|
128 |
+
[ctx.power(w,c) for (w,c) in zip(w_s,c_s)])
|
129 |
+
if verbose:
|
130 |
+
print(" Value:", v)
|
131 |
+
evaluated_terms.append(v)
|
132 |
+
|
133 |
+
if len(terms) == 1 and (not perturb):
|
134 |
+
sumvalue = evaluated_terms[0]
|
135 |
+
break
|
136 |
+
|
137 |
+
if ctx._fixed_precision:
|
138 |
+
sumvalue = ctx.fsum(evaluated_terms)
|
139 |
+
break
|
140 |
+
|
141 |
+
sumvalue = ctx.fsum(evaluated_terms)
|
142 |
+
term_magnitudes = [ctx.mag(x) for x in evaluated_terms]
|
143 |
+
max_magnitude = max(term_magnitudes)
|
144 |
+
sum_magnitude = ctx.mag(sumvalue)
|
145 |
+
cancellation = max_magnitude - sum_magnitude
|
146 |
+
if verbose:
|
147 |
+
print()
|
148 |
+
print(" Cancellation:", cancellation, "bits")
|
149 |
+
print(" Increased precision:", ctx.prec - orig, "bits")
|
150 |
+
|
151 |
+
precision_ok = cancellation < ctx.prec - orig
|
152 |
+
|
153 |
+
if zeroprec is None:
|
154 |
+
zero_ok = False
|
155 |
+
else:
|
156 |
+
zero_ok = max_magnitude - ctx.prec < -zeroprec
|
157 |
+
if infprec is None:
|
158 |
+
inf_ok = False
|
159 |
+
else:
|
160 |
+
inf_ok = max_magnitude > infprec
|
161 |
+
|
162 |
+
if precision_ok and (not perturb) or ctx.isnan(cancellation):
|
163 |
+
break
|
164 |
+
elif precision_ok:
|
165 |
+
if perturbed_reference_value is None:
|
166 |
+
hextra += 20
|
167 |
+
perturbed_reference_value = sumvalue
|
168 |
+
continue
|
169 |
+
elif ctx.mag(sumvalue - perturbed_reference_value) <= \
|
170 |
+
ctx.mag(sumvalue) - orig:
|
171 |
+
break
|
172 |
+
elif zero_ok:
|
173 |
+
sumvalue = ctx.zero
|
174 |
+
break
|
175 |
+
elif inf_ok:
|
176 |
+
sumvalue = ctx.inf
|
177 |
+
break
|
178 |
+
elif 'hmag' in kwargs:
|
179 |
+
break
|
180 |
+
else:
|
181 |
+
hextra *= 2
|
182 |
+
perturbed_reference_value = sumvalue
|
183 |
+
# Increase precision
|
184 |
+
else:
|
185 |
+
increment = min(max(cancellation, orig//2), max(extraprec,orig))
|
186 |
+
ctx.prec += increment
|
187 |
+
if verbose:
|
188 |
+
print(" Must start over with increased precision")
|
189 |
+
continue
|
190 |
+
finally:
|
191 |
+
ctx.prec = orig
|
192 |
+
return +sumvalue
|
193 |
+
|
194 |
+
@defun
|
195 |
+
def hyper(ctx, a_s, b_s, z, **kwargs):
|
196 |
+
"""
|
197 |
+
Hypergeometric function, general case.
|
198 |
+
"""
|
199 |
+
z = ctx.convert(z)
|
200 |
+
p = len(a_s)
|
201 |
+
q = len(b_s)
|
202 |
+
a_s = [ctx._convert_param(a) for a in a_s]
|
203 |
+
b_s = [ctx._convert_param(b) for b in b_s]
|
204 |
+
# Reduce degree by eliminating common parameters
|
205 |
+
if kwargs.get('eliminate', True):
|
206 |
+
elim_nonpositive = kwargs.get('eliminate_all', False)
|
207 |
+
i = 0
|
208 |
+
while i < q and a_s:
|
209 |
+
b = b_s[i]
|
210 |
+
if b in a_s and (elim_nonpositive or not ctx.isnpint(b[0])):
|
211 |
+
a_s.remove(b)
|
212 |
+
b_s.remove(b)
|
213 |
+
p -= 1
|
214 |
+
q -= 1
|
215 |
+
else:
|
216 |
+
i += 1
|
217 |
+
# Handle special cases
|
218 |
+
if p == 0:
|
219 |
+
if q == 1: return ctx._hyp0f1(b_s, z, **kwargs)
|
220 |
+
elif q == 0: return ctx.exp(z)
|
221 |
+
elif p == 1:
|
222 |
+
if q == 1: return ctx._hyp1f1(a_s, b_s, z, **kwargs)
|
223 |
+
elif q == 2: return ctx._hyp1f2(a_s, b_s, z, **kwargs)
|
224 |
+
elif q == 0: return ctx._hyp1f0(a_s[0][0], z)
|
225 |
+
elif p == 2:
|
226 |
+
if q == 1: return ctx._hyp2f1(a_s, b_s, z, **kwargs)
|
227 |
+
elif q == 2: return ctx._hyp2f2(a_s, b_s, z, **kwargs)
|
228 |
+
elif q == 3: return ctx._hyp2f3(a_s, b_s, z, **kwargs)
|
229 |
+
elif q == 0: return ctx._hyp2f0(a_s, b_s, z, **kwargs)
|
230 |
+
elif p == q+1:
|
231 |
+
return ctx._hypq1fq(p, q, a_s, b_s, z, **kwargs)
|
232 |
+
elif p > q+1 and not kwargs.get('force_series'):
|
233 |
+
return ctx._hyp_borel(p, q, a_s, b_s, z, **kwargs)
|
234 |
+
coeffs, types = zip(*(a_s+b_s))
|
235 |
+
return ctx.hypsum(p, q, types, coeffs, z, **kwargs)
|
236 |
+
|
237 |
+
@defun
|
238 |
+
def hyp0f1(ctx,b,z,**kwargs):
|
239 |
+
return ctx.hyper([],[b],z,**kwargs)
|
240 |
+
|
241 |
+
@defun
|
242 |
+
def hyp1f1(ctx,a,b,z,**kwargs):
|
243 |
+
return ctx.hyper([a],[b],z,**kwargs)
|
244 |
+
|
245 |
+
@defun
|
246 |
+
def hyp1f2(ctx,a1,b1,b2,z,**kwargs):
|
247 |
+
return ctx.hyper([a1],[b1,b2],z,**kwargs)
|
248 |
+
|
249 |
+
@defun
|
250 |
+
def hyp2f1(ctx,a,b,c,z,**kwargs):
|
251 |
+
return ctx.hyper([a,b],[c],z,**kwargs)
|
252 |
+
|
253 |
+
@defun
|
254 |
+
def hyp2f2(ctx,a1,a2,b1,b2,z,**kwargs):
|
255 |
+
return ctx.hyper([a1,a2],[b1,b2],z,**kwargs)
|
256 |
+
|
257 |
+
@defun
|
258 |
+
def hyp2f3(ctx,a1,a2,b1,b2,b3,z,**kwargs):
|
259 |
+
return ctx.hyper([a1,a2],[b1,b2,b3],z,**kwargs)
|
260 |
+
|
261 |
+
@defun
|
262 |
+
def hyp2f0(ctx,a,b,z,**kwargs):
|
263 |
+
return ctx.hyper([a,b],[],z,**kwargs)
|
264 |
+
|
265 |
+
@defun
|
266 |
+
def hyp3f2(ctx,a1,a2,a3,b1,b2,z,**kwargs):
|
267 |
+
return ctx.hyper([a1,a2,a3],[b1,b2],z,**kwargs)
|
268 |
+
|
269 |
+
@defun_wrapped
|
270 |
+
def _hyp1f0(ctx, a, z):
|
271 |
+
return (1-z) ** (-a)
|
272 |
+
|
273 |
+
@defun
|
274 |
+
def _hyp0f1(ctx, b_s, z, **kwargs):
|
275 |
+
(b, btype), = b_s
|
276 |
+
if z:
|
277 |
+
magz = ctx.mag(z)
|
278 |
+
else:
|
279 |
+
magz = 0
|
280 |
+
if magz >= 8 and not kwargs.get('force_series'):
|
281 |
+
try:
|
282 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
283 |
+
# Hypergeometric0F1/06/02/03/0004/
|
284 |
+
# TODO: handle the all-real case more efficiently!
|
285 |
+
# TODO: figure out how much precision is needed (exponential growth)
|
286 |
+
orig = ctx.prec
|
287 |
+
try:
|
288 |
+
ctx.prec += 12 + magz//2
|
289 |
+
def h():
|
290 |
+
w = ctx.sqrt(-z)
|
291 |
+
jw = ctx.j*w
|
292 |
+
u = 1/(4*jw)
|
293 |
+
c = ctx.mpq_1_2 - b
|
294 |
+
E = ctx.exp(2*jw)
|
295 |
+
T1 = ([-jw,E], [c,-1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], -u)
|
296 |
+
T2 = ([jw,E], [c,1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], u)
|
297 |
+
return T1, T2
|
298 |
+
v = ctx.hypercomb(h, [], force_series=True)
|
299 |
+
v = ctx.gamma(b)/(2*ctx.sqrt(ctx.pi))*v
|
300 |
+
finally:
|
301 |
+
ctx.prec = orig
|
302 |
+
if ctx._is_real_type(b) and ctx._is_real_type(z):
|
303 |
+
v = ctx._re(v)
|
304 |
+
return +v
|
305 |
+
except ctx.NoConvergence:
|
306 |
+
pass
|
307 |
+
return ctx.hypsum(0, 1, (btype,), [b], z, **kwargs)
|
308 |
+
|
309 |
+
@defun
|
310 |
+
def _hyp1f1(ctx, a_s, b_s, z, **kwargs):
|
311 |
+
(a, atype), = a_s
|
312 |
+
(b, btype), = b_s
|
313 |
+
if not z:
|
314 |
+
return ctx.one+z
|
315 |
+
magz = ctx.mag(z)
|
316 |
+
if magz >= 7 and not (ctx.isint(a) and ctx.re(a) <= 0):
|
317 |
+
if ctx.isinf(z):
|
318 |
+
if ctx.sign(a) == ctx.sign(b) == ctx.sign(z) == 1:
|
319 |
+
return ctx.inf
|
320 |
+
return ctx.nan * z
|
321 |
+
try:
|
322 |
+
try:
|
323 |
+
ctx.prec += magz
|
324 |
+
sector = ctx._im(z) < 0
|
325 |
+
def h(a,b):
|
326 |
+
if sector:
|
327 |
+
E = ctx.expjpi(ctx.fneg(a, exact=True))
|
328 |
+
else:
|
329 |
+
E = ctx.expjpi(a)
|
330 |
+
rz = 1/z
|
331 |
+
T1 = ([E,z], [1,-a], [b], [b-a], [a, 1+a-b], [], -rz)
|
332 |
+
T2 = ([ctx.exp(z),z], [1,a-b], [b], [a], [b-a, 1-a], [], rz)
|
333 |
+
return T1, T2
|
334 |
+
v = ctx.hypercomb(h, [a,b], force_series=True)
|
335 |
+
if ctx._is_real_type(a) and ctx._is_real_type(b) and ctx._is_real_type(z):
|
336 |
+
v = ctx._re(v)
|
337 |
+
return +v
|
338 |
+
except ctx.NoConvergence:
|
339 |
+
pass
|
340 |
+
finally:
|
341 |
+
ctx.prec -= magz
|
342 |
+
v = ctx.hypsum(1, 1, (atype, btype), [a, b], z, **kwargs)
|
343 |
+
return v
|
344 |
+
|
345 |
+
def _hyp2f1_gosper(ctx,a,b,c,z,**kwargs):
|
346 |
+
# Use Gosper's recurrence
|
347 |
+
# See http://www.math.utexas.edu/pipermail/maxima/2006/000126.html
|
348 |
+
_a,_b,_c,_z = a, b, c, z
|
349 |
+
orig = ctx.prec
|
350 |
+
maxprec = kwargs.get('maxprec', 100*orig)
|
351 |
+
extra = 10
|
352 |
+
while 1:
|
353 |
+
ctx.prec = orig + extra
|
354 |
+
#a = ctx.convert(_a)
|
355 |
+
#b = ctx.convert(_b)
|
356 |
+
#c = ctx.convert(_c)
|
357 |
+
z = ctx.convert(_z)
|
358 |
+
d = ctx.mpf(0)
|
359 |
+
e = ctx.mpf(1)
|
360 |
+
f = ctx.mpf(0)
|
361 |
+
k = 0
|
362 |
+
# Common subexpression elimination, unfortunately making
|
363 |
+
# things a bit unreadable. The formula is quite messy to begin
|
364 |
+
# with, though...
|
365 |
+
abz = a*b*z
|
366 |
+
ch = c * ctx.mpq_1_2
|
367 |
+
c1h = (c+1) * ctx.mpq_1_2
|
368 |
+
nz = 1-z
|
369 |
+
g = z/nz
|
370 |
+
abg = a*b*g
|
371 |
+
cba = c-b-a
|
372 |
+
z2 = z-2
|
373 |
+
tol = -ctx.prec - 10
|
374 |
+
nstr = ctx.nstr
|
375 |
+
nprint = ctx.nprint
|
376 |
+
mag = ctx.mag
|
377 |
+
maxmag = ctx.ninf
|
378 |
+
while 1:
|
379 |
+
kch = k+ch
|
380 |
+
kakbz = (k+a)*(k+b)*z / (4*(k+1)*kch*(k+c1h))
|
381 |
+
d1 = kakbz*(e-(k+cba)*d*g)
|
382 |
+
e1 = kakbz*(d*abg+(k+c)*e)
|
383 |
+
ft = d*(k*(cba*z+k*z2-c)-abz)/(2*kch*nz)
|
384 |
+
f1 = f + e - ft
|
385 |
+
maxmag = max(maxmag, mag(f1))
|
386 |
+
if mag(f1-f) < tol:
|
387 |
+
break
|
388 |
+
d, e, f = d1, e1, f1
|
389 |
+
k += 1
|
390 |
+
cancellation = maxmag - mag(f1)
|
391 |
+
if cancellation < extra:
|
392 |
+
break
|
393 |
+
else:
|
394 |
+
extra += cancellation
|
395 |
+
if extra > maxprec:
|
396 |
+
raise ctx.NoConvergence
|
397 |
+
return f1
|
398 |
+
|
399 |
+
@defun
|
400 |
+
def _hyp2f1(ctx, a_s, b_s, z, **kwargs):
|
401 |
+
(a, atype), (b, btype) = a_s
|
402 |
+
(c, ctype), = b_s
|
403 |
+
if z == 1:
|
404 |
+
# TODO: the following logic can be simplified
|
405 |
+
convergent = ctx.re(c-a-b) > 0
|
406 |
+
finite = (ctx.isint(a) and a <= 0) or (ctx.isint(b) and b <= 0)
|
407 |
+
zerodiv = ctx.isint(c) and c <= 0 and not \
|
408 |
+
((ctx.isint(a) and c <= a <= 0) or (ctx.isint(b) and c <= b <= 0))
|
409 |
+
#print "bz", a, b, c, z, convergent, finite, zerodiv
|
410 |
+
# Gauss's theorem gives the value if convergent
|
411 |
+
if (convergent or finite) and not zerodiv:
|
412 |
+
return ctx.gammaprod([c, c-a-b], [c-a, c-b], _infsign=True)
|
413 |
+
# Otherwise, there is a pole and we take the
|
414 |
+
# sign to be that when approaching from below
|
415 |
+
# XXX: this evaluation is not necessarily correct in all cases
|
416 |
+
return ctx.hyp2f1(a,b,c,1-ctx.eps*2) * ctx.inf
|
417 |
+
|
418 |
+
# Equal to 1 (first term), unless there is a subsequent
|
419 |
+
# division by zero
|
420 |
+
if not z:
|
421 |
+
# Division by zero but power of z is higher than
|
422 |
+
# first order so cancels
|
423 |
+
if c or a == 0 or b == 0:
|
424 |
+
return 1+z
|
425 |
+
# Indeterminate
|
426 |
+
return ctx.nan
|
427 |
+
|
428 |
+
# Hit zero denominator unless numerator goes to 0 first
|
429 |
+
if ctx.isint(c) and c <= 0:
|
430 |
+
if (ctx.isint(a) and c <= a <= 0) or \
|
431 |
+
(ctx.isint(b) and c <= b <= 0):
|
432 |
+
pass
|
433 |
+
else:
|
434 |
+
# Pole in series
|
435 |
+
return ctx.inf
|
436 |
+
|
437 |
+
absz = abs(z)
|
438 |
+
|
439 |
+
# Fast case: standard series converges rapidly,
|
440 |
+
# possibly in finitely many terms
|
441 |
+
if absz <= 0.8 or (ctx.isint(a) and a <= 0 and a >= -1000) or \
|
442 |
+
(ctx.isint(b) and b <= 0 and b >= -1000):
|
443 |
+
return ctx.hypsum(2, 1, (atype, btype, ctype), [a, b, c], z, **kwargs)
|
444 |
+
|
445 |
+
orig = ctx.prec
|
446 |
+
try:
|
447 |
+
ctx.prec += 10
|
448 |
+
|
449 |
+
# Use 1/z transformation
|
450 |
+
if absz >= 1.3:
|
451 |
+
def h(a,b):
|
452 |
+
t = ctx.mpq_1-c; ab = a-b; rz = 1/z
|
453 |
+
T1 = ([-z],[-a], [c,-ab],[b,c-a], [a,t+a],[ctx.mpq_1+ab], rz)
|
454 |
+
T2 = ([-z],[-b], [c,ab],[a,c-b], [b,t+b],[ctx.mpq_1-ab], rz)
|
455 |
+
return T1, T2
|
456 |
+
v = ctx.hypercomb(h, [a,b], **kwargs)
|
457 |
+
|
458 |
+
# Use 1-z transformation
|
459 |
+
elif abs(1-z) <= 0.75:
|
460 |
+
def h(a,b):
|
461 |
+
t = c-a-b; ca = c-a; cb = c-b; rz = 1-z
|
462 |
+
T1 = [], [], [c,t], [ca,cb], [a,b], [1-t], rz
|
463 |
+
T2 = [rz], [t], [c,a+b-c], [a,b], [ca,cb], [1+t], rz
|
464 |
+
return T1, T2
|
465 |
+
v = ctx.hypercomb(h, [a,b], **kwargs)
|
466 |
+
|
467 |
+
# Use z/(z-1) transformation
|
468 |
+
elif abs(z/(z-1)) <= 0.75:
|
469 |
+
v = ctx.hyp2f1(a, c-b, c, z/(z-1)) / (1-z)**a
|
470 |
+
|
471 |
+
# Remaining part of unit circle
|
472 |
+
else:
|
473 |
+
v = _hyp2f1_gosper(ctx,a,b,c,z,**kwargs)
|
474 |
+
|
475 |
+
finally:
|
476 |
+
ctx.prec = orig
|
477 |
+
return +v
|
478 |
+
|
479 |
+
@defun
|
480 |
+
def _hypq1fq(ctx, p, q, a_s, b_s, z, **kwargs):
|
481 |
+
r"""
|
482 |
+
Evaluates 3F2, 4F3, 5F4, ...
|
483 |
+
"""
|
484 |
+
a_s, a_types = zip(*a_s)
|
485 |
+
b_s, b_types = zip(*b_s)
|
486 |
+
a_s = list(a_s)
|
487 |
+
b_s = list(b_s)
|
488 |
+
absz = abs(z)
|
489 |
+
ispoly = False
|
490 |
+
for a in a_s:
|
491 |
+
if ctx.isint(a) and a <= 0:
|
492 |
+
ispoly = True
|
493 |
+
break
|
494 |
+
# Direct summation
|
495 |
+
if absz < 1 or ispoly:
|
496 |
+
try:
|
497 |
+
return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs)
|
498 |
+
except ctx.NoConvergence:
|
499 |
+
if absz > 1.1 or ispoly:
|
500 |
+
raise
|
501 |
+
# Use expansion at |z-1| -> 0.
|
502 |
+
# Reference: Wolfgang Buhring, "Generalized Hypergeometric Functions at
|
503 |
+
# Unit Argument", Proc. Amer. Math. Soc., Vol. 114, No. 1 (Jan. 1992),
|
504 |
+
# pp.145-153
|
505 |
+
# The current implementation has several problems:
|
506 |
+
# 1. We only implement it for 3F2. The expansion coefficients are
|
507 |
+
# given by extremely messy nested sums in the higher degree cases
|
508 |
+
# (see reference). Is efficient sequential generation of the coefficients
|
509 |
+
# possible in the > 3F2 case?
|
510 |
+
# 2. Although the series converges, it may do so slowly, so we need
|
511 |
+
# convergence acceleration. The acceleration implemented by
|
512 |
+
# nsum does not always help, so results returned are sometimes
|
513 |
+
# inaccurate! Can we do better?
|
514 |
+
# 3. We should check conditions for convergence, and possibly
|
515 |
+
# do a better job of cancelling out gamma poles if possible.
|
516 |
+
if z == 1:
|
517 |
+
# XXX: should also check for division by zero in the
|
518 |
+
# denominator of the series (cf. hyp2f1)
|
519 |
+
S = ctx.re(sum(b_s)-sum(a_s))
|
520 |
+
if S <= 0:
|
521 |
+
#return ctx.hyper(a_s, b_s, 1-ctx.eps*2, **kwargs) * ctx.inf
|
522 |
+
return ctx.hyper(a_s, b_s, 0.9, **kwargs) * ctx.inf
|
523 |
+
if (p,q) == (3,2) and abs(z-1) < 0.05: # and kwargs.get('sum1')
|
524 |
+
#print "Using alternate summation (experimental)"
|
525 |
+
a1,a2,a3 = a_s
|
526 |
+
b1,b2 = b_s
|
527 |
+
u = b1+b2-a3
|
528 |
+
initial = ctx.gammaprod([b2-a3,b1-a3,a1,a2],[b2-a3,b1-a3,1,u])
|
529 |
+
def term(k, _cache={0:initial}):
|
530 |
+
u = b1+b2-a3+k
|
531 |
+
if k in _cache:
|
532 |
+
t = _cache[k]
|
533 |
+
else:
|
534 |
+
t = _cache[k-1]
|
535 |
+
t *= (b1+k-a3-1)*(b2+k-a3-1)
|
536 |
+
t /= k*(u-1)
|
537 |
+
_cache[k] = t
|
538 |
+
return t * ctx.hyp2f1(a1,a2,u,z)
|
539 |
+
try:
|
540 |
+
S = ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'),
|
541 |
+
strict=kwargs.get('strict', True))
|
542 |
+
return S * ctx.gammaprod([b1,b2],[a1,a2,a3])
|
543 |
+
except ctx.NoConvergence:
|
544 |
+
pass
|
545 |
+
# Try to use convergence acceleration on and close to the unit circle.
|
546 |
+
# Problem: the convergence acceleration degenerates as |z-1| -> 0,
|
547 |
+
# except for special cases. Everywhere else, the Shanks transformation
|
548 |
+
# is very efficient.
|
549 |
+
if absz < 1.1 and ctx._re(z) <= 1:
|
550 |
+
|
551 |
+
def term(kk, _cache={0:ctx.one}):
|
552 |
+
k = int(kk)
|
553 |
+
if k != kk:
|
554 |
+
t = z ** ctx.mpf(kk) / ctx.fac(kk)
|
555 |
+
for a in a_s: t *= ctx.rf(a,kk)
|
556 |
+
for b in b_s: t /= ctx.rf(b,kk)
|
557 |
+
return t
|
558 |
+
if k in _cache:
|
559 |
+
return _cache[k]
|
560 |
+
t = term(k-1)
|
561 |
+
m = k-1
|
562 |
+
for j in xrange(p): t *= (a_s[j]+m)
|
563 |
+
for j in xrange(q): t /= (b_s[j]+m)
|
564 |
+
t *= z
|
565 |
+
t /= k
|
566 |
+
_cache[k] = t
|
567 |
+
return t
|
568 |
+
|
569 |
+
sum_method = kwargs.get('sum_method', 'r+s+e')
|
570 |
+
|
571 |
+
try:
|
572 |
+
return ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'),
|
573 |
+
strict=kwargs.get('strict', True),
|
574 |
+
method=sum_method.replace('e',''))
|
575 |
+
except ctx.NoConvergence:
|
576 |
+
if 'e' not in sum_method:
|
577 |
+
raise
|
578 |
+
pass
|
579 |
+
|
580 |
+
if kwargs.get('verbose'):
|
581 |
+
print("Attempting Euler-Maclaurin summation")
|
582 |
+
|
583 |
+
|
584 |
+
"""
|
585 |
+
Somewhat slower version (one diffs_exp for each factor).
|
586 |
+
However, this would be faster with fast direct derivatives
|
587 |
+
of the gamma function.
|
588 |
+
|
589 |
+
def power_diffs(k0):
|
590 |
+
r = 0
|
591 |
+
l = ctx.log(z)
|
592 |
+
while 1:
|
593 |
+
yield z**ctx.mpf(k0) * l**r
|
594 |
+
r += 1
|
595 |
+
|
596 |
+
def loggamma_diffs(x, reciprocal=False):
|
597 |
+
sign = (-1) ** reciprocal
|
598 |
+
yield sign * ctx.loggamma(x)
|
599 |
+
i = 0
|
600 |
+
while 1:
|
601 |
+
yield sign * ctx.psi(i,x)
|
602 |
+
i += 1
|
603 |
+
|
604 |
+
def hyper_diffs(k0):
|
605 |
+
b2 = b_s + [1]
|
606 |
+
A = [ctx.diffs_exp(loggamma_diffs(a+k0)) for a in a_s]
|
607 |
+
B = [ctx.diffs_exp(loggamma_diffs(b+k0,True)) for b in b2]
|
608 |
+
Z = [power_diffs(k0)]
|
609 |
+
C = ctx.gammaprod([b for b in b2], [a for a in a_s])
|
610 |
+
for d in ctx.diffs_prod(A + B + Z):
|
611 |
+
v = C * d
|
612 |
+
yield v
|
613 |
+
"""
|
614 |
+
|
615 |
+
def log_diffs(k0):
|
616 |
+
b2 = b_s + [1]
|
617 |
+
yield sum(ctx.loggamma(a+k0) for a in a_s) - \
|
618 |
+
sum(ctx.loggamma(b+k0) for b in b2) + k0*ctx.log(z)
|
619 |
+
i = 0
|
620 |
+
while 1:
|
621 |
+
v = sum(ctx.psi(i,a+k0) for a in a_s) - \
|
622 |
+
sum(ctx.psi(i,b+k0) for b in b2)
|
623 |
+
if i == 0:
|
624 |
+
v += ctx.log(z)
|
625 |
+
yield v
|
626 |
+
i += 1
|
627 |
+
|
628 |
+
def hyper_diffs(k0):
|
629 |
+
C = ctx.gammaprod([b for b in b_s], [a for a in a_s])
|
630 |
+
for d in ctx.diffs_exp(log_diffs(k0)):
|
631 |
+
v = C * d
|
632 |
+
yield v
|
633 |
+
|
634 |
+
tol = ctx.eps / 1024
|
635 |
+
prec = ctx.prec
|
636 |
+
try:
|
637 |
+
trunc = 50 * ctx.dps
|
638 |
+
ctx.prec += 20
|
639 |
+
for i in xrange(5):
|
640 |
+
head = ctx.fsum(term(k) for k in xrange(trunc))
|
641 |
+
tail, err = ctx.sumem(term, [trunc, ctx.inf], tol=tol,
|
642 |
+
adiffs=hyper_diffs(trunc),
|
643 |
+
verbose=kwargs.get('verbose'),
|
644 |
+
error=True,
|
645 |
+
_fast_abort=True)
|
646 |
+
if err < tol:
|
647 |
+
v = head + tail
|
648 |
+
break
|
649 |
+
trunc *= 2
|
650 |
+
# Need to increase precision because calculation of
|
651 |
+
# derivatives may be inaccurate
|
652 |
+
ctx.prec += ctx.prec//2
|
653 |
+
if i == 4:
|
654 |
+
raise ctx.NoConvergence(\
|
655 |
+
"Euler-Maclaurin summation did not converge")
|
656 |
+
finally:
|
657 |
+
ctx.prec = prec
|
658 |
+
return +v
|
659 |
+
|
660 |
+
# Use 1/z transformation
|
661 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
662 |
+
# HypergeometricPFQ/06/01/05/02/0004/
|
663 |
+
def h(*args):
|
664 |
+
a_s = list(args[:p])
|
665 |
+
b_s = list(args[p:])
|
666 |
+
Ts = []
|
667 |
+
recz = ctx.one/z
|
668 |
+
negz = ctx.fneg(z, exact=True)
|
669 |
+
for k in range(q+1):
|
670 |
+
ak = a_s[k]
|
671 |
+
C = [negz]
|
672 |
+
Cp = [-ak]
|
673 |
+
Gn = b_s + [ak] + [a_s[j]-ak for j in range(q+1) if j != k]
|
674 |
+
Gd = a_s + [b_s[j]-ak for j in range(q)]
|
675 |
+
Fn = [ak] + [ak-b_s[j]+1 for j in range(q)]
|
676 |
+
Fd = [1-a_s[j]+ak for j in range(q+1) if j != k]
|
677 |
+
Ts.append((C, Cp, Gn, Gd, Fn, Fd, recz))
|
678 |
+
return Ts
|
679 |
+
return ctx.hypercomb(h, a_s+b_s, **kwargs)
|
680 |
+
|
681 |
+
@defun
|
682 |
+
def _hyp_borel(ctx, p, q, a_s, b_s, z, **kwargs):
|
683 |
+
if a_s:
|
684 |
+
a_s, a_types = zip(*a_s)
|
685 |
+
a_s = list(a_s)
|
686 |
+
else:
|
687 |
+
a_s, a_types = [], ()
|
688 |
+
if b_s:
|
689 |
+
b_s, b_types = zip(*b_s)
|
690 |
+
b_s = list(b_s)
|
691 |
+
else:
|
692 |
+
b_s, b_types = [], ()
|
693 |
+
kwargs['maxterms'] = kwargs.get('maxterms', ctx.prec)
|
694 |
+
try:
|
695 |
+
return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs)
|
696 |
+
except ctx.NoConvergence:
|
697 |
+
pass
|
698 |
+
prec = ctx.prec
|
699 |
+
try:
|
700 |
+
tol = kwargs.get('asymp_tol', ctx.eps/4)
|
701 |
+
ctx.prec += 10
|
702 |
+
# hypsum is has a conservative tolerance. So we try again:
|
703 |
+
def term(k, cache={0:ctx.one}):
|
704 |
+
if k in cache:
|
705 |
+
return cache[k]
|
706 |
+
t = term(k-1)
|
707 |
+
for a in a_s: t *= (a+(k-1))
|
708 |
+
for b in b_s: t /= (b+(k-1))
|
709 |
+
t *= z
|
710 |
+
t /= k
|
711 |
+
cache[k] = t
|
712 |
+
return t
|
713 |
+
s = ctx.one
|
714 |
+
for k in xrange(1, ctx.prec):
|
715 |
+
t = term(k)
|
716 |
+
s += t
|
717 |
+
if abs(t) <= tol:
|
718 |
+
return s
|
719 |
+
finally:
|
720 |
+
ctx.prec = prec
|
721 |
+
if p <= q+3:
|
722 |
+
contour = kwargs.get('contour')
|
723 |
+
if not contour:
|
724 |
+
if ctx.arg(z) < 0.25:
|
725 |
+
u = z / max(1, abs(z))
|
726 |
+
if ctx.arg(z) >= 0:
|
727 |
+
contour = [0, 2j, (2j+2)/u, 2/u, ctx.inf]
|
728 |
+
else:
|
729 |
+
contour = [0, -2j, (-2j+2)/u, 2/u, ctx.inf]
|
730 |
+
#contour = [0, 2j/z, 2/z, ctx.inf]
|
731 |
+
#contour = [0, 2j, 2/z, ctx.inf]
|
732 |
+
#contour = [0, 2j, ctx.inf]
|
733 |
+
else:
|
734 |
+
contour = [0, ctx.inf]
|
735 |
+
quad_kwargs = kwargs.get('quad_kwargs', {})
|
736 |
+
def g(t):
|
737 |
+
return ctx.exp(-t)*ctx.hyper(a_s, b_s+[1], t*z)
|
738 |
+
I, err = ctx.quad(g, contour, error=True, **quad_kwargs)
|
739 |
+
if err <= abs(I)*ctx.eps*8:
|
740 |
+
return I
|
741 |
+
raise ctx.NoConvergence
|
742 |
+
|
743 |
+
|
744 |
+
@defun
|
745 |
+
def _hyp2f2(ctx, a_s, b_s, z, **kwargs):
|
746 |
+
(a1, a1type), (a2, a2type) = a_s
|
747 |
+
(b1, b1type), (b2, b2type) = b_s
|
748 |
+
|
749 |
+
absz = abs(z)
|
750 |
+
magz = ctx.mag(z)
|
751 |
+
orig = ctx.prec
|
752 |
+
|
753 |
+
# Asymptotic expansion is ~ exp(z)
|
754 |
+
asymp_extraprec = magz
|
755 |
+
|
756 |
+
# Asymptotic series is in terms of 3F1
|
757 |
+
can_use_asymptotic = (not kwargs.get('force_series')) and \
|
758 |
+
(ctx.mag(absz) > 3)
|
759 |
+
|
760 |
+
# TODO: much of the following could be shared with 2F3 instead of
|
761 |
+
# copypasted
|
762 |
+
if can_use_asymptotic:
|
763 |
+
#print "using asymp"
|
764 |
+
try:
|
765 |
+
try:
|
766 |
+
ctx.prec += asymp_extraprec
|
767 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
768 |
+
# Hypergeometric2F2/06/02/02/0002/
|
769 |
+
def h(a1,a2,b1,b2):
|
770 |
+
X = a1+a2-b1-b2
|
771 |
+
A2 = a1+a2
|
772 |
+
B2 = b1+b2
|
773 |
+
c = {}
|
774 |
+
c[0] = ctx.one
|
775 |
+
c[1] = (A2-1)*X+b1*b2-a1*a2
|
776 |
+
s1 = 0
|
777 |
+
k = 0
|
778 |
+
tprev = 0
|
779 |
+
while 1:
|
780 |
+
if k not in c:
|
781 |
+
uu1 = 1-B2+2*a1+a1**2+2*a2+a2**2-A2*B2+a1*a2+b1*b2+(2*B2-3*(A2+1))*k+2*k**2
|
782 |
+
uu2 = (k-A2+b1-1)*(k-A2+b2-1)*(k-X-2)
|
783 |
+
c[k] = ctx.one/k * (uu1*c[k-1]-uu2*c[k-2])
|
784 |
+
t1 = c[k] * z**(-k)
|
785 |
+
if abs(t1) < 0.1*ctx.eps:
|
786 |
+
#print "Convergence :)"
|
787 |
+
break
|
788 |
+
# Quit if the series doesn't converge quickly enough
|
789 |
+
if k > 5 and abs(tprev) / abs(t1) < 1.5:
|
790 |
+
#print "No convergence :("
|
791 |
+
raise ctx.NoConvergence
|
792 |
+
s1 += t1
|
793 |
+
tprev = t1
|
794 |
+
k += 1
|
795 |
+
S = ctx.exp(z)*s1
|
796 |
+
T1 = [z,S], [X,1], [b1,b2],[a1,a2],[],[],0
|
797 |
+
T2 = [-z],[-a1],[b1,b2,a2-a1],[a2,b1-a1,b2-a1],[a1,a1-b1+1,a1-b2+1],[a1-a2+1],-1/z
|
798 |
+
T3 = [-z],[-a2],[b1,b2,a1-a2],[a1,b1-a2,b2-a2],[a2,a2-b1+1,a2-b2+1],[-a1+a2+1],-1/z
|
799 |
+
return T1, T2, T3
|
800 |
+
v = ctx.hypercomb(h, [a1,a2,b1,b2], force_series=True, maxterms=4*ctx.prec)
|
801 |
+
if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,z]) == 5:
|
802 |
+
v = ctx.re(v)
|
803 |
+
return v
|
804 |
+
except ctx.NoConvergence:
|
805 |
+
pass
|
806 |
+
finally:
|
807 |
+
ctx.prec = orig
|
808 |
+
|
809 |
+
return ctx.hypsum(2, 2, (a1type, a2type, b1type, b2type), [a1, a2, b1, b2], z, **kwargs)
|
810 |
+
|
811 |
+
|
812 |
+
|
813 |
+
@defun
|
814 |
+
def _hyp1f2(ctx, a_s, b_s, z, **kwargs):
|
815 |
+
(a1, a1type), = a_s
|
816 |
+
(b1, b1type), (b2, b2type) = b_s
|
817 |
+
|
818 |
+
absz = abs(z)
|
819 |
+
magz = ctx.mag(z)
|
820 |
+
orig = ctx.prec
|
821 |
+
|
822 |
+
# Asymptotic expansion is ~ exp(sqrt(z))
|
823 |
+
asymp_extraprec = z and magz//2
|
824 |
+
|
825 |
+
# Asymptotic series is in terms of 3F0
|
826 |
+
can_use_asymptotic = (not kwargs.get('force_series')) and \
|
827 |
+
(ctx.mag(absz) > 19) and \
|
828 |
+
(ctx.sqrt(absz) > 1.5*orig) # and \
|
829 |
+
# ctx._hyp_check_convergence([a1, a1-b1+1, a1-b2+1], [],
|
830 |
+
# 1/absz, orig+40+asymp_extraprec)
|
831 |
+
|
832 |
+
# TODO: much of the following could be shared with 2F3 instead of
|
833 |
+
# copypasted
|
834 |
+
if can_use_asymptotic:
|
835 |
+
#print "using asymp"
|
836 |
+
try:
|
837 |
+
try:
|
838 |
+
ctx.prec += asymp_extraprec
|
839 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
840 |
+
# Hypergeometric1F2/06/02/03/
|
841 |
+
def h(a1,b1,b2):
|
842 |
+
X = ctx.mpq_1_2*(a1-b1-b2+ctx.mpq_1_2)
|
843 |
+
c = {}
|
844 |
+
c[0] = ctx.one
|
845 |
+
c[1] = 2*(ctx.mpq_1_4*(3*a1+b1+b2-2)*(a1-b1-b2)+b1*b2-ctx.mpq_3_16)
|
846 |
+
c[2] = 2*(b1*b2+ctx.mpq_1_4*(a1-b1-b2)*(3*a1+b1+b2-2)-ctx.mpq_3_16)**2+\
|
847 |
+
ctx.mpq_1_16*(-16*(2*a1-3)*b1*b2 + \
|
848 |
+
4*(a1-b1-b2)*(-8*a1**2+11*a1+b1+b2-2)-3)
|
849 |
+
s1 = 0
|
850 |
+
s2 = 0
|
851 |
+
k = 0
|
852 |
+
tprev = 0
|
853 |
+
while 1:
|
854 |
+
if k not in c:
|
855 |
+
uu1 = (3*k**2+(-6*a1+2*b1+2*b2-4)*k + 3*a1**2 - \
|
856 |
+
(b1-b2)**2 - 2*a1*(b1+b2-2) + ctx.mpq_1_4)
|
857 |
+
uu2 = (k-a1+b1-b2-ctx.mpq_1_2)*(k-a1-b1+b2-ctx.mpq_1_2)*\
|
858 |
+
(k-a1+b1+b2-ctx.mpq_5_2)
|
859 |
+
c[k] = ctx.one/(2*k)*(uu1*c[k-1]-uu2*c[k-2])
|
860 |
+
w = c[k] * (-z)**(-0.5*k)
|
861 |
+
t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w
|
862 |
+
t2 = ctx.j**k * ctx.mpf(2)**(-k) * w
|
863 |
+
if abs(t1) < 0.1*ctx.eps:
|
864 |
+
#print "Convergence :)"
|
865 |
+
break
|
866 |
+
# Quit if the series doesn't converge quickly enough
|
867 |
+
if k > 5 and abs(tprev) / abs(t1) < 1.5:
|
868 |
+
#print "No convergence :("
|
869 |
+
raise ctx.NoConvergence
|
870 |
+
s1 += t1
|
871 |
+
s2 += t2
|
872 |
+
tprev = t1
|
873 |
+
k += 1
|
874 |
+
S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \
|
875 |
+
ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2
|
876 |
+
T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2], [a1],\
|
877 |
+
[], [], 0
|
878 |
+
T2 = [-z], [-a1], [b1,b2],[b1-a1,b2-a1], \
|
879 |
+
[a1,a1-b1+1,a1-b2+1], [], 1/z
|
880 |
+
return T1, T2
|
881 |
+
v = ctx.hypercomb(h, [a1,b1,b2], force_series=True, maxterms=4*ctx.prec)
|
882 |
+
if sum(ctx._is_real_type(u) for u in [a1,b1,b2,z]) == 4:
|
883 |
+
v = ctx.re(v)
|
884 |
+
return v
|
885 |
+
except ctx.NoConvergence:
|
886 |
+
pass
|
887 |
+
finally:
|
888 |
+
ctx.prec = orig
|
889 |
+
|
890 |
+
#print "not using asymp"
|
891 |
+
return ctx.hypsum(1, 2, (a1type, b1type, b2type), [a1, b1, b2], z, **kwargs)
|
892 |
+
|
893 |
+
|
894 |
+
|
895 |
+
@defun
|
896 |
+
def _hyp2f3(ctx, a_s, b_s, z, **kwargs):
|
897 |
+
(a1, a1type), (a2, a2type) = a_s
|
898 |
+
(b1, b1type), (b2, b2type), (b3, b3type) = b_s
|
899 |
+
|
900 |
+
absz = abs(z)
|
901 |
+
magz = ctx.mag(z)
|
902 |
+
|
903 |
+
# Asymptotic expansion is ~ exp(sqrt(z))
|
904 |
+
asymp_extraprec = z and magz//2
|
905 |
+
orig = ctx.prec
|
906 |
+
|
907 |
+
# Asymptotic series is in terms of 4F1
|
908 |
+
# The square root below empirically provides a plausible criterion
|
909 |
+
# for the leading series to converge
|
910 |
+
can_use_asymptotic = (not kwargs.get('force_series')) and \
|
911 |
+
(ctx.mag(absz) > 19) and (ctx.sqrt(absz) > 1.5*orig)
|
912 |
+
|
913 |
+
if can_use_asymptotic:
|
914 |
+
#print "using asymp"
|
915 |
+
try:
|
916 |
+
try:
|
917 |
+
ctx.prec += asymp_extraprec
|
918 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
919 |
+
# Hypergeometric2F3/06/02/03/01/0002/
|
920 |
+
def h(a1,a2,b1,b2,b3):
|
921 |
+
X = ctx.mpq_1_2*(a1+a2-b1-b2-b3+ctx.mpq_1_2)
|
922 |
+
A2 = a1+a2
|
923 |
+
B3 = b1+b2+b3
|
924 |
+
A = a1*a2
|
925 |
+
B = b1*b2+b3*b2+b1*b3
|
926 |
+
R = b1*b2*b3
|
927 |
+
c = {}
|
928 |
+
c[0] = ctx.one
|
929 |
+
c[1] = 2*(B - A + ctx.mpq_1_4*(3*A2+B3-2)*(A2-B3) - ctx.mpq_3_16)
|
930 |
+
c[2] = ctx.mpq_1_2*c[1]**2 + ctx.mpq_1_16*(-16*(2*A2-3)*(B-A) + 32*R +\
|
931 |
+
4*(-8*A2**2 + 11*A2 + 8*A + B3 - 2)*(A2-B3)-3)
|
932 |
+
s1 = 0
|
933 |
+
s2 = 0
|
934 |
+
k = 0
|
935 |
+
tprev = 0
|
936 |
+
while 1:
|
937 |
+
if k not in c:
|
938 |
+
uu1 = (k-2*X-3)*(k-2*X-2*b1-1)*(k-2*X-2*b2-1)*\
|
939 |
+
(k-2*X-2*b3-1)
|
940 |
+
uu2 = (4*(k-1)**3 - 6*(4*X+B3)*(k-1)**2 + \
|
941 |
+
2*(24*X**2+12*B3*X+4*B+B3-1)*(k-1) - 32*X**3 - \
|
942 |
+
24*B3*X**2 - 4*B - 8*R - 4*(4*B+B3-1)*X + 2*B3-1)
|
943 |
+
uu3 = (5*(k-1)**2+2*(-10*X+A2-3*B3+3)*(k-1)+2*c[1])
|
944 |
+
c[k] = ctx.one/(2*k)*(uu1*c[k-3]-uu2*c[k-2]+uu3*c[k-1])
|
945 |
+
w = c[k] * ctx.power(-z, -0.5*k)
|
946 |
+
t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w
|
947 |
+
t2 = ctx.j**k * ctx.mpf(2)**(-k) * w
|
948 |
+
if abs(t1) < 0.1*ctx.eps:
|
949 |
+
break
|
950 |
+
# Quit if the series doesn't converge quickly enough
|
951 |
+
if k > 5 and abs(tprev) / abs(t1) < 1.5:
|
952 |
+
raise ctx.NoConvergence
|
953 |
+
s1 += t1
|
954 |
+
s2 += t2
|
955 |
+
tprev = t1
|
956 |
+
k += 1
|
957 |
+
S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \
|
958 |
+
ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2
|
959 |
+
T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2, b3], [a1, a2],\
|
960 |
+
[], [], 0
|
961 |
+
T2 = [-z], [-a1], [b1,b2,b3,a2-a1],[a2,b1-a1,b2-a1,b3-a1], \
|
962 |
+
[a1,a1-b1+1,a1-b2+1,a1-b3+1], [a1-a2+1], 1/z
|
963 |
+
T3 = [-z], [-a2], [b1,b2,b3,a1-a2],[a1,b1-a2,b2-a2,b3-a2], \
|
964 |
+
[a2,a2-b1+1,a2-b2+1,a2-b3+1],[-a1+a2+1], 1/z
|
965 |
+
return T1, T2, T3
|
966 |
+
v = ctx.hypercomb(h, [a1,a2,b1,b2,b3], force_series=True, maxterms=4*ctx.prec)
|
967 |
+
if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,b3,z]) == 6:
|
968 |
+
v = ctx.re(v)
|
969 |
+
return v
|
970 |
+
except ctx.NoConvergence:
|
971 |
+
pass
|
972 |
+
finally:
|
973 |
+
ctx.prec = orig
|
974 |
+
|
975 |
+
return ctx.hypsum(2, 3, (a1type, a2type, b1type, b2type, b3type), [a1, a2, b1, b2, b3], z, **kwargs)
|
976 |
+
|
977 |
+
@defun
|
978 |
+
def _hyp2f0(ctx, a_s, b_s, z, **kwargs):
|
979 |
+
(a, atype), (b, btype) = a_s
|
980 |
+
# We want to try aggressively to use the asymptotic expansion,
|
981 |
+
# and fall back only when absolutely necessary
|
982 |
+
try:
|
983 |
+
kwargsb = kwargs.copy()
|
984 |
+
kwargsb['maxterms'] = kwargsb.get('maxterms', ctx.prec)
|
985 |
+
return ctx.hypsum(2, 0, (atype,btype), [a,b], z, **kwargsb)
|
986 |
+
except ctx.NoConvergence:
|
987 |
+
if kwargs.get('force_series'):
|
988 |
+
raise
|
989 |
+
pass
|
990 |
+
def h(a, b):
|
991 |
+
w = ctx.sinpi(b)
|
992 |
+
rz = -1/z
|
993 |
+
T1 = ([ctx.pi,w,rz],[1,-1,a],[],[a-b+1,b],[a],[b],rz)
|
994 |
+
T2 = ([-ctx.pi,w,rz],[1,-1,1+a-b],[],[a,2-b],[a-b+1],[2-b],rz)
|
995 |
+
return T1, T2
|
996 |
+
return ctx.hypercomb(h, [a, 1+a-b], **kwargs)
|
997 |
+
|
998 |
+
@defun
|
999 |
+
def meijerg(ctx, a_s, b_s, z, r=1, series=None, **kwargs):
|
1000 |
+
an, ap = a_s
|
1001 |
+
bm, bq = b_s
|
1002 |
+
n = len(an)
|
1003 |
+
p = n + len(ap)
|
1004 |
+
m = len(bm)
|
1005 |
+
q = m + len(bq)
|
1006 |
+
a = an+ap
|
1007 |
+
b = bm+bq
|
1008 |
+
a = [ctx.convert(_) for _ in a]
|
1009 |
+
b = [ctx.convert(_) for _ in b]
|
1010 |
+
z = ctx.convert(z)
|
1011 |
+
if series is None:
|
1012 |
+
if p < q: series = 1
|
1013 |
+
if p > q: series = 2
|
1014 |
+
if p == q:
|
1015 |
+
if m+n == p and abs(z) > 1:
|
1016 |
+
series = 2
|
1017 |
+
else:
|
1018 |
+
series = 1
|
1019 |
+
if kwargs.get('verbose'):
|
1020 |
+
print("Meijer G m,n,p,q,series =", m,n,p,q,series)
|
1021 |
+
if series == 1:
|
1022 |
+
def h(*args):
|
1023 |
+
a = args[:p]
|
1024 |
+
b = args[p:]
|
1025 |
+
terms = []
|
1026 |
+
for k in range(m):
|
1027 |
+
bases = [z]
|
1028 |
+
expts = [b[k]/r]
|
1029 |
+
gn = [b[j]-b[k] for j in range(m) if j != k]
|
1030 |
+
gn += [1-a[j]+b[k] for j in range(n)]
|
1031 |
+
gd = [a[j]-b[k] for j in range(n,p)]
|
1032 |
+
gd += [1-b[j]+b[k] for j in range(m,q)]
|
1033 |
+
hn = [1-a[j]+b[k] for j in range(p)]
|
1034 |
+
hd = [1-b[j]+b[k] for j in range(q) if j != k]
|
1035 |
+
hz = (-ctx.one)**(p-m-n) * z**(ctx.one/r)
|
1036 |
+
terms.append((bases, expts, gn, gd, hn, hd, hz))
|
1037 |
+
return terms
|
1038 |
+
else:
|
1039 |
+
def h(*args):
|
1040 |
+
a = args[:p]
|
1041 |
+
b = args[p:]
|
1042 |
+
terms = []
|
1043 |
+
for k in range(n):
|
1044 |
+
bases = [z]
|
1045 |
+
if r == 1:
|
1046 |
+
expts = [a[k]-1]
|
1047 |
+
else:
|
1048 |
+
expts = [(a[k]-1)/ctx.convert(r)]
|
1049 |
+
gn = [a[k]-a[j] for j in range(n) if j != k]
|
1050 |
+
gn += [1-a[k]+b[j] for j in range(m)]
|
1051 |
+
gd = [a[k]-b[j] for j in range(m,q)]
|
1052 |
+
gd += [1-a[k]+a[j] for j in range(n,p)]
|
1053 |
+
hn = [1-a[k]+b[j] for j in range(q)]
|
1054 |
+
hd = [1+a[j]-a[k] for j in range(p) if j != k]
|
1055 |
+
hz = (-ctx.one)**(q-m-n) / z**(ctx.one/r)
|
1056 |
+
terms.append((bases, expts, gn, gd, hn, hd, hz))
|
1057 |
+
return terms
|
1058 |
+
return ctx.hypercomb(h, a+b, **kwargs)
|
1059 |
+
|
1060 |
+
@defun_wrapped
|
1061 |
+
def appellf1(ctx,a,b1,b2,c,x,y,**kwargs):
|
1062 |
+
# Assume x smaller
|
1063 |
+
# We will use x for the outer loop
|
1064 |
+
if abs(x) > abs(y):
|
1065 |
+
x, y = y, x
|
1066 |
+
b1, b2 = b2, b1
|
1067 |
+
def ok(x):
|
1068 |
+
return abs(x) < 0.99
|
1069 |
+
# Finite cases
|
1070 |
+
if ctx.isnpint(a):
|
1071 |
+
pass
|
1072 |
+
elif ctx.isnpint(b1):
|
1073 |
+
pass
|
1074 |
+
elif ctx.isnpint(b2):
|
1075 |
+
x, y, b1, b2 = y, x, b2, b1
|
1076 |
+
else:
|
1077 |
+
#print x, y
|
1078 |
+
# Note: ok if |y| > 1, because
|
1079 |
+
# 2F1 implements analytic continuation
|
1080 |
+
if not ok(x):
|
1081 |
+
u1 = (x-y)/(x-1)
|
1082 |
+
if not ok(u1):
|
1083 |
+
raise ValueError("Analytic continuation not implemented")
|
1084 |
+
#print "Using analytic continuation"
|
1085 |
+
return (1-x)**(-b1)*(1-y)**(c-a-b2)*\
|
1086 |
+
ctx.appellf1(c-a,b1,c-b1-b2,c,u1,y,**kwargs)
|
1087 |
+
return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m+n':[c]}, x,y, **kwargs)
|
1088 |
+
|
1089 |
+
@defun
|
1090 |
+
def appellf2(ctx,a,b1,b2,c1,c2,x,y,**kwargs):
|
1091 |
+
# TODO: continuation
|
1092 |
+
return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]},
|
1093 |
+
{'m':[c1],'n':[c2]}, x,y, **kwargs)
|
1094 |
+
|
1095 |
+
@defun
|
1096 |
+
def appellf3(ctx,a1,a2,b1,b2,c,x,y,**kwargs):
|
1097 |
+
outer_polynomial = ctx.isnpint(a1) or ctx.isnpint(b1)
|
1098 |
+
inner_polynomial = ctx.isnpint(a2) or ctx.isnpint(b2)
|
1099 |
+
if not outer_polynomial:
|
1100 |
+
if inner_polynomial or abs(x) > abs(y):
|
1101 |
+
x, y = y, x
|
1102 |
+
a1,a2,b1,b2 = a2,a1,b2,b1
|
1103 |
+
return ctx.hyper2d({'m':[a1,b1],'n':[a2,b2]}, {'m+n':[c]},x,y,**kwargs)
|
1104 |
+
|
1105 |
+
@defun
|
1106 |
+
def appellf4(ctx,a,b,c1,c2,x,y,**kwargs):
|
1107 |
+
# TODO: continuation
|
1108 |
+
return ctx.hyper2d({'m+n':[a,b]}, {'m':[c1],'n':[c2]},x,y,**kwargs)
|
1109 |
+
|
1110 |
+
@defun
|
1111 |
+
def hyper2d(ctx, a, b, x, y, **kwargs):
|
1112 |
+
r"""
|
1113 |
+
Sums the generalized 2D hypergeometric series
|
1114 |
+
|
1115 |
+
.. math ::
|
1116 |
+
|
1117 |
+
\sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
|
1118 |
+
\frac{P((a),m,n)}{Q((b),m,n)}
|
1119 |
+
\frac{x^m y^n} {m! n!}
|
1120 |
+
|
1121 |
+
where `(a) = (a_1,\ldots,a_r)`, `(b) = (b_1,\ldots,b_s)` and where
|
1122 |
+
`P` and `Q` are products of rising factorials such as `(a_j)_n` or
|
1123 |
+
`(a_j)_{m+n}`. `P` and `Q` are specified in the form of dicts, with
|
1124 |
+
the `m` and `n` dependence as keys and parameter lists as values.
|
1125 |
+
The supported rising factorials are given in the following table
|
1126 |
+
(note that only a few are supported in `Q`):
|
1127 |
+
|
1128 |
+
+------------+-------------------+--------+
|
1129 |
+
| Key | Rising factorial | `Q` |
|
1130 |
+
+============+===================+========+
|
1131 |
+
| ``'m'`` | `(a_j)_m` | Yes |
|
1132 |
+
+------------+-------------------+--------+
|
1133 |
+
| ``'n'`` | `(a_j)_n` | Yes |
|
1134 |
+
+------------+-------------------+--------+
|
1135 |
+
| ``'m+n'`` | `(a_j)_{m+n}` | Yes |
|
1136 |
+
+------------+-------------------+--------+
|
1137 |
+
| ``'m-n'`` | `(a_j)_{m-n}` | No |
|
1138 |
+
+------------+-------------------+--------+
|
1139 |
+
| ``'n-m'`` | `(a_j)_{n-m}` | No |
|
1140 |
+
+------------+-------------------+--------+
|
1141 |
+
| ``'2m+n'`` | `(a_j)_{2m+n}` | No |
|
1142 |
+
+------------+-------------------+--------+
|
1143 |
+
| ``'2m-n'`` | `(a_j)_{2m-n}` | No |
|
1144 |
+
+------------+-------------------+--------+
|
1145 |
+
| ``'2n-m'`` | `(a_j)_{2n-m}` | No |
|
1146 |
+
+------------+-------------------+--------+
|
1147 |
+
|
1148 |
+
For example, the Appell F1 and F4 functions
|
1149 |
+
|
1150 |
+
.. math ::
|
1151 |
+
|
1152 |
+
F_1 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
|
1153 |
+
\frac{(a)_{m+n} (b)_m (c)_n}{(d)_{m+n}}
|
1154 |
+
\frac{x^m y^n}{m! n!}
|
1155 |
+
|
1156 |
+
F_4 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
|
1157 |
+
\frac{(a)_{m+n} (b)_{m+n}}{(c)_m (d)_{n}}
|
1158 |
+
\frac{x^m y^n}{m! n!}
|
1159 |
+
|
1160 |
+
can be represented respectively as
|
1161 |
+
|
1162 |
+
``hyper2d({'m+n':[a], 'm':[b], 'n':[c]}, {'m+n':[d]}, x, y)``
|
1163 |
+
|
1164 |
+
``hyper2d({'m+n':[a,b]}, {'m':[c], 'n':[d]}, x, y)``
|
1165 |
+
|
1166 |
+
More generally, :func:`~mpmath.hyper2d` can evaluate any of the 34 distinct
|
1167 |
+
convergent second-order (generalized Gaussian) hypergeometric
|
1168 |
+
series enumerated by Horn, as well as the Kampe de Feriet
|
1169 |
+
function.
|
1170 |
+
|
1171 |
+
The series is computed by rewriting it so that the inner
|
1172 |
+
series (i.e. the series containing `n` and `y`) has the form of an
|
1173 |
+
ordinary generalized hypergeometric series and thereby can be
|
1174 |
+
evaluated efficiently using :func:`~mpmath.hyper`. If possible,
|
1175 |
+
manually swapping `x` and `y` and the corresponding parameters
|
1176 |
+
can sometimes give better results.
|
1177 |
+
|
1178 |
+
**Examples**
|
1179 |
+
|
1180 |
+
Two separable cases: a product of two geometric series, and a
|
1181 |
+
product of two Gaussian hypergeometric functions::
|
1182 |
+
|
1183 |
+
>>> from mpmath import *
|
1184 |
+
>>> mp.dps = 25; mp.pretty = True
|
1185 |
+
>>> x, y = mpf(0.25), mpf(0.5)
|
1186 |
+
>>> hyper2d({'m':1,'n':1}, {}, x,y)
|
1187 |
+
2.666666666666666666666667
|
1188 |
+
>>> 1/(1-x)/(1-y)
|
1189 |
+
2.666666666666666666666667
|
1190 |
+
>>> hyper2d({'m':[1,2],'n':[3,4]}, {'m':[5],'n':[6]}, x,y)
|
1191 |
+
4.164358531238938319669856
|
1192 |
+
>>> hyp2f1(1,2,5,x)*hyp2f1(3,4,6,y)
|
1193 |
+
4.164358531238938319669856
|
1194 |
+
|
1195 |
+
Some more series that can be done in closed form::
|
1196 |
+
|
1197 |
+
>>> hyper2d({'m':1,'n':1},{'m+n':1},x,y)
|
1198 |
+
2.013417124712514809623881
|
1199 |
+
>>> (exp(x)*x-exp(y)*y)/(x-y)
|
1200 |
+
2.013417124712514809623881
|
1201 |
+
|
1202 |
+
Six of the 34 Horn functions, G1-G3 and H1-H3::
|
1203 |
+
|
1204 |
+
>>> from mpmath import *
|
1205 |
+
>>> mp.dps = 10; mp.pretty = True
|
1206 |
+
>>> x, y = 0.0625, 0.125
|
1207 |
+
>>> a1,a2,b1,b2,c1,c2,d = 1.1,-1.2,-1.3,-1.4,1.5,-1.6,1.7
|
1208 |
+
>>> hyper2d({'m+n':a1,'n-m':b1,'m-n':b2},{},x,y) # G1
|
1209 |
+
1.139090746
|
1210 |
+
>>> nsum(lambda m,n: rf(a1,m+n)*rf(b1,n-m)*rf(b2,m-n)*\
|
1211 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1212 |
+
1.139090746
|
1213 |
+
>>> hyper2d({'m':a1,'n':a2,'n-m':b1,'m-n':b2},{},x,y) # G2
|
1214 |
+
0.9503682696
|
1215 |
+
>>> nsum(lambda m,n: rf(a1,m)*rf(a2,n)*rf(b1,n-m)*rf(b2,m-n)*\
|
1216 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1217 |
+
0.9503682696
|
1218 |
+
>>> hyper2d({'2n-m':a1,'2m-n':a2},{},x,y) # G3
|
1219 |
+
1.029372029
|
1220 |
+
>>> nsum(lambda m,n: rf(a1,2*n-m)*rf(a2,2*m-n)*\
|
1221 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1222 |
+
1.029372029
|
1223 |
+
>>> hyper2d({'m-n':a1,'m+n':b1,'n':c1},{'m':d},x,y) # H1
|
1224 |
+
-1.605331256
|
1225 |
+
>>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m+n)*rf(c1,n)/rf(d,m)*\
|
1226 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1227 |
+
-1.605331256
|
1228 |
+
>>> hyper2d({'m-n':a1,'m':b1,'n':[c1,c2]},{'m':d},x,y) # H2
|
1229 |
+
-2.35405404
|
1230 |
+
>>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m)*rf(c1,n)*rf(c2,n)/rf(d,m)*\
|
1231 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1232 |
+
-2.35405404
|
1233 |
+
>>> hyper2d({'2m+n':a1,'n':b1},{'m+n':c1},x,y) # H3
|
1234 |
+
0.974479074
|
1235 |
+
>>> nsum(lambda m,n: rf(a1,2*m+n)*rf(b1,n)/rf(c1,m+n)*\
|
1236 |
+
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
|
1237 |
+
0.974479074
|
1238 |
+
|
1239 |
+
**References**
|
1240 |
+
|
1241 |
+
1. [SrivastavaKarlsson]_
|
1242 |
+
2. [Weisstein]_ http://mathworld.wolfram.com/HornFunction.html
|
1243 |
+
3. [Weisstein]_ http://mathworld.wolfram.com/AppellHypergeometricFunction.html
|
1244 |
+
|
1245 |
+
"""
|
1246 |
+
x = ctx.convert(x)
|
1247 |
+
y = ctx.convert(y)
|
1248 |
+
def parse(dct, key):
|
1249 |
+
args = dct.pop(key, [])
|
1250 |
+
try:
|
1251 |
+
args = list(args)
|
1252 |
+
except TypeError:
|
1253 |
+
args = [args]
|
1254 |
+
return [ctx.convert(arg) for arg in args]
|
1255 |
+
a_s = dict(a)
|
1256 |
+
b_s = dict(b)
|
1257 |
+
a_m = parse(a, 'm')
|
1258 |
+
a_n = parse(a, 'n')
|
1259 |
+
a_m_add_n = parse(a, 'm+n')
|
1260 |
+
a_m_sub_n = parse(a, 'm-n')
|
1261 |
+
a_n_sub_m = parse(a, 'n-m')
|
1262 |
+
a_2m_add_n = parse(a, '2m+n')
|
1263 |
+
a_2m_sub_n = parse(a, '2m-n')
|
1264 |
+
a_2n_sub_m = parse(a, '2n-m')
|
1265 |
+
b_m = parse(b, 'm')
|
1266 |
+
b_n = parse(b, 'n')
|
1267 |
+
b_m_add_n = parse(b, 'm+n')
|
1268 |
+
if a: raise ValueError("unsupported key: %r" % a.keys()[0])
|
1269 |
+
if b: raise ValueError("unsupported key: %r" % b.keys()[0])
|
1270 |
+
s = 0
|
1271 |
+
outer = ctx.one
|
1272 |
+
m = ctx.mpf(0)
|
1273 |
+
ok_count = 0
|
1274 |
+
prec = ctx.prec
|
1275 |
+
maxterms = kwargs.get('maxterms', 20*prec)
|
1276 |
+
try:
|
1277 |
+
ctx.prec += 10
|
1278 |
+
tol = +ctx.eps
|
1279 |
+
while 1:
|
1280 |
+
inner_sign = 1
|
1281 |
+
outer_sign = 1
|
1282 |
+
inner_a = list(a_n)
|
1283 |
+
inner_b = list(b_n)
|
1284 |
+
outer_a = [a+m for a in a_m]
|
1285 |
+
outer_b = [b+m for b in b_m]
|
1286 |
+
# (a)_{m+n} = (a)_m (a+m)_n
|
1287 |
+
for a in a_m_add_n:
|
1288 |
+
a = a+m
|
1289 |
+
inner_a.append(a)
|
1290 |
+
outer_a.append(a)
|
1291 |
+
# (b)_{m+n} = (b)_m (b+m)_n
|
1292 |
+
for b in b_m_add_n:
|
1293 |
+
b = b+m
|
1294 |
+
inner_b.append(b)
|
1295 |
+
outer_b.append(b)
|
1296 |
+
# (a)_{n-m} = (a-m)_n / (a-m)_m
|
1297 |
+
for a in a_n_sub_m:
|
1298 |
+
inner_a.append(a-m)
|
1299 |
+
outer_b.append(a-m-1)
|
1300 |
+
# (a)_{m-n} = (-1)^(m+n) (1-a-m)_m / (1-a-m)_n
|
1301 |
+
for a in a_m_sub_n:
|
1302 |
+
inner_sign *= (-1)
|
1303 |
+
outer_sign *= (-1)**(m)
|
1304 |
+
inner_b.append(1-a-m)
|
1305 |
+
outer_a.append(-a-m)
|
1306 |
+
# (a)_{2m+n} = (a)_{2m} (a+2m)_n
|
1307 |
+
for a in a_2m_add_n:
|
1308 |
+
inner_a.append(a+2*m)
|
1309 |
+
outer_a.append((a+2*m)*(1+a+2*m))
|
1310 |
+
# (a)_{2m-n} = (-1)^(2m+n) (1-a-2m)_{2m} / (1-a-2m)_n
|
1311 |
+
for a in a_2m_sub_n:
|
1312 |
+
inner_sign *= (-1)
|
1313 |
+
inner_b.append(1-a-2*m)
|
1314 |
+
outer_a.append((a+2*m)*(1+a+2*m))
|
1315 |
+
# (a)_{2n-m} = 4^n ((a-m)/2)_n ((a-m+1)/2)_n / (a-m)_m
|
1316 |
+
for a in a_2n_sub_m:
|
1317 |
+
inner_sign *= 4
|
1318 |
+
inner_a.append(0.5*(a-m))
|
1319 |
+
inner_a.append(0.5*(a-m+1))
|
1320 |
+
outer_b.append(a-m-1)
|
1321 |
+
inner = ctx.hyper(inner_a, inner_b, inner_sign*y,
|
1322 |
+
zeroprec=ctx.prec, **kwargs)
|
1323 |
+
term = outer * inner * outer_sign
|
1324 |
+
if abs(term) < tol:
|
1325 |
+
ok_count += 1
|
1326 |
+
else:
|
1327 |
+
ok_count = 0
|
1328 |
+
if ok_count >= 3 or not outer:
|
1329 |
+
break
|
1330 |
+
s += term
|
1331 |
+
for a in outer_a: outer *= a
|
1332 |
+
for b in outer_b: outer /= b
|
1333 |
+
m += 1
|
1334 |
+
outer = outer * x / m
|
1335 |
+
if m > maxterms:
|
1336 |
+
raise ctx.NoConvergence("maxterms exceeded in hyper2d")
|
1337 |
+
finally:
|
1338 |
+
ctx.prec = prec
|
1339 |
+
return +s
|
1340 |
+
|
1341 |
+
"""
|
1342 |
+
@defun
|
1343 |
+
def kampe_de_feriet(ctx,a,b,c,d,e,f,x,y,**kwargs):
|
1344 |
+
return ctx.hyper2d({'m+n':a,'m':b,'n':c},
|
1345 |
+
{'m+n':d,'m':e,'n':f}, x,y, **kwargs)
|
1346 |
+
"""
|
1347 |
+
|
1348 |
+
@defun
|
1349 |
+
def bihyper(ctx, a_s, b_s, z, **kwargs):
|
1350 |
+
r"""
|
1351 |
+
Evaluates the bilateral hypergeometric series
|
1352 |
+
|
1353 |
+
.. math ::
|
1354 |
+
|
1355 |
+
\,_AH_B(a_1, \ldots, a_k; b_1, \ldots, b_B; z) =
|
1356 |
+
\sum_{n=-\infty}^{\infty}
|
1357 |
+
\frac{(a_1)_n \ldots (a_A)_n}
|
1358 |
+
{(b_1)_n \ldots (b_B)_n} \, z^n
|
1359 |
+
|
1360 |
+
where, for direct convergence, `A = B` and `|z| = 1`, although a
|
1361 |
+
regularized sum exists more generally by considering the
|
1362 |
+
bilateral series as a sum of two ordinary hypergeometric
|
1363 |
+
functions. In order for the series to make sense, none of the
|
1364 |
+
parameters may be integers.
|
1365 |
+
|
1366 |
+
**Examples**
|
1367 |
+
|
1368 |
+
The value of `\,_2H_2` at `z = 1` is given by Dougall's formula::
|
1369 |
+
|
1370 |
+
>>> from mpmath import *
|
1371 |
+
>>> mp.dps = 25; mp.pretty = True
|
1372 |
+
>>> a,b,c,d = 0.5, 1.5, 2.25, 3.25
|
1373 |
+
>>> bihyper([a,b],[c,d],1)
|
1374 |
+
-14.49118026212345786148847
|
1375 |
+
>>> gammaprod([c,d,1-a,1-b,c+d-a-b-1],[c-a,d-a,c-b,d-b])
|
1376 |
+
-14.49118026212345786148847
|
1377 |
+
|
1378 |
+
The regularized function `\,_1H_0` can be expressed as the
|
1379 |
+
sum of one `\,_2F_0` function and one `\,_1F_1` function::
|
1380 |
+
|
1381 |
+
>>> a = mpf(0.25)
|
1382 |
+
>>> z = mpf(0.75)
|
1383 |
+
>>> bihyper([a], [], z)
|
1384 |
+
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
|
1385 |
+
>>> hyper([a,1],[],z) + (hyper([1],[1-a],-1/z)-1)
|
1386 |
+
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
|
1387 |
+
>>> hyper([a,1],[],z) + hyper([1],[2-a],-1/z)/z/(a-1)
|
1388 |
+
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
|
1389 |
+
|
1390 |
+
**References**
|
1391 |
+
|
1392 |
+
1. [Slater]_ (chapter 6: "Bilateral Series", pp. 180-189)
|
1393 |
+
2. [Wikipedia]_ http://en.wikipedia.org/wiki/Bilateral_hypergeometric_series
|
1394 |
+
|
1395 |
+
"""
|
1396 |
+
z = ctx.convert(z)
|
1397 |
+
c_s = a_s + b_s
|
1398 |
+
p = len(a_s)
|
1399 |
+
q = len(b_s)
|
1400 |
+
if (p, q) == (0,0) or (p, q) == (1,1):
|
1401 |
+
return ctx.zero * z
|
1402 |
+
neg = (p-q) % 2
|
1403 |
+
def h(*c_s):
|
1404 |
+
a_s = list(c_s[:p])
|
1405 |
+
b_s = list(c_s[p:])
|
1406 |
+
aa_s = [2-b for b in b_s]
|
1407 |
+
bb_s = [2-a for a in a_s]
|
1408 |
+
rp = [(-1)**neg * z] + [1-b for b in b_s] + [1-a for a in a_s]
|
1409 |
+
rc = [-1] + [1]*len(b_s) + [-1]*len(a_s)
|
1410 |
+
T1 = [], [], [], [], a_s + [1], b_s, z
|
1411 |
+
T2 = rp, rc, [], [], aa_s + [1], bb_s, (-1)**neg / z
|
1412 |
+
return T1, T2
|
1413 |
+
return ctx.hypercomb(h, c_s, **kwargs)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/orthogonal.py
ADDED
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun, defun_wrapped
|
2 |
+
|
3 |
+
def _hermite_param(ctx, n, z, parabolic_cylinder):
|
4 |
+
"""
|
5 |
+
Combined calculation of the Hermite polynomial H_n(z) (and its
|
6 |
+
generalization to complex n) and the parabolic cylinder
|
7 |
+
function D.
|
8 |
+
"""
|
9 |
+
n, ntyp = ctx._convert_param(n)
|
10 |
+
z = ctx.convert(z)
|
11 |
+
q = -ctx.mpq_1_2
|
12 |
+
# For re(z) > 0, 2F0 -- http://functions.wolfram.com/
|
13 |
+
# HypergeometricFunctions/HermiteHGeneral/06/02/0009/
|
14 |
+
# Otherwise, there is a reflection formula
|
15 |
+
# 2F0 + http://functions.wolfram.com/HypergeometricFunctions/
|
16 |
+
# HermiteHGeneral/16/01/01/0006/
|
17 |
+
#
|
18 |
+
# TODO:
|
19 |
+
# An alternative would be to use
|
20 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
21 |
+
# HermiteHGeneral/06/02/0006/
|
22 |
+
#
|
23 |
+
# Also, the 1F1 expansion
|
24 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
25 |
+
# HermiteHGeneral/26/01/02/0001/
|
26 |
+
# should probably be used for tiny z
|
27 |
+
if not z:
|
28 |
+
T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0
|
29 |
+
if parabolic_cylinder:
|
30 |
+
T1[1][0] += q*n
|
31 |
+
return T1,
|
32 |
+
can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \
|
33 |
+
(ctx.re(z) == 0 and ctx.im(z) > 0)
|
34 |
+
expprec = ctx.prec*4 + 20
|
35 |
+
if parabolic_cylinder:
|
36 |
+
u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True)
|
37 |
+
w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec)
|
38 |
+
else:
|
39 |
+
w = z
|
40 |
+
w2 = ctx.fmul(w, w, prec=expprec)
|
41 |
+
rw2 = ctx.fdiv(1, w2, prec=expprec)
|
42 |
+
nrw2 = ctx.fneg(rw2, exact=True)
|
43 |
+
nw = ctx.fneg(w, exact=True)
|
44 |
+
if can_use_2f0:
|
45 |
+
T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
|
46 |
+
terms = [T1]
|
47 |
+
else:
|
48 |
+
T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
|
49 |
+
T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2
|
50 |
+
terms = [T1,T2]
|
51 |
+
# Multiply by prefactor for D_n
|
52 |
+
if parabolic_cylinder:
|
53 |
+
expu = ctx.exp(u)
|
54 |
+
for i in range(len(terms)):
|
55 |
+
terms[i][1][0] += q*n
|
56 |
+
terms[i][0].append(expu)
|
57 |
+
terms[i][1].append(1)
|
58 |
+
return tuple(terms)
|
59 |
+
|
60 |
+
@defun
|
61 |
+
def hermite(ctx, n, z, **kwargs):
|
62 |
+
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs)
|
63 |
+
|
64 |
+
@defun
|
65 |
+
def pcfd(ctx, n, z, **kwargs):
|
66 |
+
r"""
|
67 |
+
Gives the parabolic cylinder function in Whittaker's notation
|
68 |
+
`D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`).
|
69 |
+
It solves the differential equation
|
70 |
+
|
71 |
+
.. math ::
|
72 |
+
|
73 |
+
y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0.
|
74 |
+
|
75 |
+
and can be represented in terms of Hermite polynomials
|
76 |
+
(see :func:`~mpmath.hermite`) as
|
77 |
+
|
78 |
+
.. math ::
|
79 |
+
|
80 |
+
D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right).
|
81 |
+
|
82 |
+
**Plots**
|
83 |
+
|
84 |
+
.. literalinclude :: /plots/pcfd.py
|
85 |
+
.. image :: /plots/pcfd.png
|
86 |
+
|
87 |
+
**Examples**
|
88 |
+
|
89 |
+
>>> from mpmath import *
|
90 |
+
>>> mp.dps = 25; mp.pretty = True
|
91 |
+
>>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0)
|
92 |
+
1.0
|
93 |
+
0.0
|
94 |
+
-1.0
|
95 |
+
0.0
|
96 |
+
>>> pcfd(4,0); pcfd(-3,0)
|
97 |
+
3.0
|
98 |
+
0.6266570686577501256039413
|
99 |
+
>>> pcfd('1/2', 2+3j)
|
100 |
+
(-5.363331161232920734849056 - 3.858877821790010714163487j)
|
101 |
+
>>> pcfd(2, -10)
|
102 |
+
1.374906442631438038871515e-9
|
103 |
+
|
104 |
+
Verifying the differential equation::
|
105 |
+
|
106 |
+
>>> n = mpf(2.5)
|
107 |
+
>>> y = lambda z: pcfd(n,z)
|
108 |
+
>>> z = 1.75
|
109 |
+
>>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z))
|
110 |
+
0.0
|
111 |
+
|
112 |
+
Rational Taylor series expansion when `n` is an integer::
|
113 |
+
|
114 |
+
>>> taylor(lambda z: pcfd(5,z), 0, 7)
|
115 |
+
[0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625]
|
116 |
+
|
117 |
+
"""
|
118 |
+
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs)
|
119 |
+
|
120 |
+
@defun
|
121 |
+
def pcfu(ctx, a, z, **kwargs):
|
122 |
+
r"""
|
123 |
+
Gives the parabolic cylinder function `U(a,z)`, which may be
|
124 |
+
defined for `\Re(z) > 0` in terms of the confluent
|
125 |
+
U-function (see :func:`~mpmath.hyperu`) by
|
126 |
+
|
127 |
+
.. math ::
|
128 |
+
|
129 |
+
U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2}
|
130 |
+
U\left(\frac{a}{2}+\frac{1}{4},
|
131 |
+
\frac{1}{2}, \frac{1}{2}z^2\right)
|
132 |
+
|
133 |
+
or, for arbitrary `z`,
|
134 |
+
|
135 |
+
.. math ::
|
136 |
+
|
137 |
+
e^{-\frac{1}{4}z^2} U(a,z) =
|
138 |
+
U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4};
|
139 |
+
\tfrac{1}{2}; -\tfrac{1}{2}z^2\right) +
|
140 |
+
U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4};
|
141 |
+
\tfrac{3}{2}; -\tfrac{1}{2}z^2\right).
|
142 |
+
|
143 |
+
**Examples**
|
144 |
+
|
145 |
+
Connection to other functions::
|
146 |
+
|
147 |
+
>>> from mpmath import *
|
148 |
+
>>> mp.dps = 25; mp.pretty = True
|
149 |
+
>>> z = mpf(3)
|
150 |
+
>>> pcfu(0.5,z)
|
151 |
+
0.03210358129311151450551963
|
152 |
+
>>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2))
|
153 |
+
0.03210358129311151450551963
|
154 |
+
>>> pcfu(0.5,-z)
|
155 |
+
23.75012332835297233711255
|
156 |
+
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
|
157 |
+
23.75012332835297233711255
|
158 |
+
>>> pcfu(0.5,-z)
|
159 |
+
23.75012332835297233711255
|
160 |
+
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
|
161 |
+
23.75012332835297233711255
|
162 |
+
|
163 |
+
"""
|
164 |
+
n, _ = ctx._convert_param(a)
|
165 |
+
return ctx.pcfd(-n-ctx.mpq_1_2, z)
|
166 |
+
|
167 |
+
@defun
|
168 |
+
def pcfv(ctx, a, z, **kwargs):
|
169 |
+
r"""
|
170 |
+
Gives the parabolic cylinder function `V(a,z)`, which can be
|
171 |
+
represented in terms of :func:`~mpmath.pcfu` as
|
172 |
+
|
173 |
+
.. math ::
|
174 |
+
|
175 |
+
V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}.
|
176 |
+
|
177 |
+
**Examples**
|
178 |
+
|
179 |
+
Wronskian relation between `U` and `V`::
|
180 |
+
|
181 |
+
>>> from mpmath import *
|
182 |
+
>>> mp.dps = 25; mp.pretty = True
|
183 |
+
>>> a, z = 2, 3
|
184 |
+
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
|
185 |
+
0.7978845608028653558798921
|
186 |
+
>>> sqrt(2/pi)
|
187 |
+
0.7978845608028653558798921
|
188 |
+
>>> a, z = 2.5, 3
|
189 |
+
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
|
190 |
+
0.7978845608028653558798921
|
191 |
+
>>> a, z = 0.25, -1
|
192 |
+
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
|
193 |
+
0.7978845608028653558798921
|
194 |
+
>>> a, z = 2+1j, 2+3j
|
195 |
+
>>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z))
|
196 |
+
0.7978845608028653558798921
|
197 |
+
|
198 |
+
"""
|
199 |
+
n, ntype = ctx._convert_param(a)
|
200 |
+
z = ctx.convert(z)
|
201 |
+
q = ctx.mpq_1_2
|
202 |
+
r = ctx.mpq_1_4
|
203 |
+
if ntype == 'Q' and ctx.isint(n*2):
|
204 |
+
# Faster for half-integers
|
205 |
+
def h():
|
206 |
+
jz = ctx.fmul(z, -1j, exact=True)
|
207 |
+
T1terms = _hermite_param(ctx, -n-q, z, 1)
|
208 |
+
T2terms = _hermite_param(ctx, n-q, jz, 1)
|
209 |
+
for T in T1terms:
|
210 |
+
T[0].append(1j)
|
211 |
+
T[1].append(1)
|
212 |
+
T[3].append(q-n)
|
213 |
+
u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi)
|
214 |
+
for T in T2terms:
|
215 |
+
T[0].append(u)
|
216 |
+
T[1].append(1)
|
217 |
+
return T1terms + T2terms
|
218 |
+
v = ctx.hypercomb(h, [], **kwargs)
|
219 |
+
if ctx._is_real_type(n) and ctx._is_real_type(z):
|
220 |
+
v = ctx._re(v)
|
221 |
+
return v
|
222 |
+
else:
|
223 |
+
def h(n):
|
224 |
+
w = ctx.square_exp_arg(z, -0.25)
|
225 |
+
u = ctx.square_exp_arg(z, 0.5)
|
226 |
+
e = ctx.exp(w)
|
227 |
+
l = [ctx.pi, q, ctx.exp(w)]
|
228 |
+
Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u
|
229 |
+
Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u
|
230 |
+
c, s = ctx.cospi_sinpi(r+q*n)
|
231 |
+
Y1[0].append(s)
|
232 |
+
Y2[0].append(c)
|
233 |
+
for Y in (Y1, Y2):
|
234 |
+
Y[1].append(1)
|
235 |
+
Y[3].append(q-n)
|
236 |
+
return Y1, Y2
|
237 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
238 |
+
|
239 |
+
|
240 |
+
@defun
|
241 |
+
def pcfw(ctx, a, z, **kwargs):
|
242 |
+
r"""
|
243 |
+
Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14).
|
244 |
+
|
245 |
+
**Examples**
|
246 |
+
|
247 |
+
Value at the origin::
|
248 |
+
|
249 |
+
>>> from mpmath import *
|
250 |
+
>>> mp.dps = 25; mp.pretty = True
|
251 |
+
>>> a = mpf(0.25)
|
252 |
+
>>> pcfw(a,0)
|
253 |
+
0.9722833245718180765617104
|
254 |
+
>>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a)))
|
255 |
+
0.9722833245718180765617104
|
256 |
+
>>> diff(pcfw,(a,0),(0,1))
|
257 |
+
-0.5142533944210078966003624
|
258 |
+
>>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a)))
|
259 |
+
-0.5142533944210078966003624
|
260 |
+
|
261 |
+
"""
|
262 |
+
n, _ = ctx._convert_param(a)
|
263 |
+
z = ctx.convert(z)
|
264 |
+
def terms():
|
265 |
+
phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n))
|
266 |
+
phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j
|
267 |
+
rho = ctx.pi/8 + 0.5*phi2
|
268 |
+
# XXX: cancellation computing k
|
269 |
+
k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n)
|
270 |
+
C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n)
|
271 |
+
yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25))
|
272 |
+
yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25))
|
273 |
+
v = ctx.sum_accurately(terms)
|
274 |
+
if ctx._is_real_type(n) and ctx._is_real_type(z):
|
275 |
+
v = ctx._re(v)
|
276 |
+
return v
|
277 |
+
|
278 |
+
"""
|
279 |
+
Even/odd PCFs. Useful?
|
280 |
+
|
281 |
+
@defun
|
282 |
+
def pcfy1(ctx, a, z, **kwargs):
|
283 |
+
a, _ = ctx._convert_param(n)
|
284 |
+
z = ctx.convert(z)
|
285 |
+
def h():
|
286 |
+
w = ctx.square_exp_arg(z)
|
287 |
+
w1 = ctx.fmul(w, -0.25, exact=True)
|
288 |
+
w2 = ctx.fmul(w, 0.5, exact=True)
|
289 |
+
e = ctx.exp(w1)
|
290 |
+
return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2
|
291 |
+
return ctx.hypercomb(h, [], **kwargs)
|
292 |
+
|
293 |
+
@defun
|
294 |
+
def pcfy2(ctx, a, z, **kwargs):
|
295 |
+
a, _ = ctx._convert_param(n)
|
296 |
+
z = ctx.convert(z)
|
297 |
+
def h():
|
298 |
+
w = ctx.square_exp_arg(z)
|
299 |
+
w1 = ctx.fmul(w, -0.25, exact=True)
|
300 |
+
w2 = ctx.fmul(w, 0.5, exact=True)
|
301 |
+
e = ctx.exp(w1)
|
302 |
+
return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \
|
303 |
+
[ctx.mpq_3_2], w2
|
304 |
+
return ctx.hypercomb(h, [], **kwargs)
|
305 |
+
"""
|
306 |
+
|
307 |
+
@defun_wrapped
|
308 |
+
def gegenbauer(ctx, n, a, z, **kwargs):
|
309 |
+
# Special cases: a+0.5, a*2 poles
|
310 |
+
if ctx.isnpint(a):
|
311 |
+
return 0*(z+n)
|
312 |
+
if ctx.isnpint(a+0.5):
|
313 |
+
# TODO: something else is required here
|
314 |
+
# E.g.: gegenbauer(-2, -0.5, 3) == -12
|
315 |
+
if ctx.isnpint(n+1):
|
316 |
+
raise NotImplementedError("Gegenbauer function with two limits")
|
317 |
+
def h(a):
|
318 |
+
a2 = 2*a
|
319 |
+
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
|
320 |
+
return [T]
|
321 |
+
return ctx.hypercomb(h, [a], **kwargs)
|
322 |
+
def h(n):
|
323 |
+
a2 = 2*a
|
324 |
+
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
|
325 |
+
return [T]
|
326 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
327 |
+
|
328 |
+
@defun_wrapped
|
329 |
+
def jacobi(ctx, n, a, b, x, **kwargs):
|
330 |
+
if not ctx.isnpint(a):
|
331 |
+
def h(n):
|
332 |
+
return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),)
|
333 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
334 |
+
if not ctx.isint(b):
|
335 |
+
def h(n, a):
|
336 |
+
return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),)
|
337 |
+
return ctx.hypercomb(h, [n, a], **kwargs)
|
338 |
+
# XXX: determine appropriate limit
|
339 |
+
return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs)
|
340 |
+
|
341 |
+
@defun_wrapped
|
342 |
+
def laguerre(ctx, n, a, z, **kwargs):
|
343 |
+
# XXX: limits, poles
|
344 |
+
#if ctx.isnpint(n):
|
345 |
+
# return 0*(a+z)
|
346 |
+
def h(a):
|
347 |
+
return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),)
|
348 |
+
return ctx.hypercomb(h, [a], **kwargs)
|
349 |
+
|
350 |
+
@defun_wrapped
|
351 |
+
def legendre(ctx, n, x, **kwargs):
|
352 |
+
if ctx.isint(n):
|
353 |
+
n = int(n)
|
354 |
+
# Accuracy near zeros
|
355 |
+
if (n + (n < 0)) & 1:
|
356 |
+
if not x:
|
357 |
+
return x
|
358 |
+
mag = ctx.mag(x)
|
359 |
+
if mag < -2*ctx.prec-10:
|
360 |
+
return x
|
361 |
+
if mag < -5:
|
362 |
+
ctx.prec += -mag
|
363 |
+
return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs)
|
364 |
+
|
365 |
+
@defun
|
366 |
+
def legenp(ctx, n, m, z, type=2, **kwargs):
|
367 |
+
# Legendre function, 1st kind
|
368 |
+
n = ctx.convert(n)
|
369 |
+
m = ctx.convert(m)
|
370 |
+
# Faster
|
371 |
+
if not m:
|
372 |
+
return ctx.legendre(n, z, **kwargs)
|
373 |
+
# TODO: correct evaluation at singularities
|
374 |
+
if type == 2:
|
375 |
+
def h(n,m):
|
376 |
+
g = m*0.5
|
377 |
+
T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
|
378 |
+
return (T,)
|
379 |
+
return ctx.hypercomb(h, [n,m], **kwargs)
|
380 |
+
if type == 3:
|
381 |
+
def h(n,m):
|
382 |
+
g = m*0.5
|
383 |
+
T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
|
384 |
+
return (T,)
|
385 |
+
return ctx.hypercomb(h, [n,m], **kwargs)
|
386 |
+
raise ValueError("requires type=2 or type=3")
|
387 |
+
|
388 |
+
@defun
|
389 |
+
def legenq(ctx, n, m, z, type=2, **kwargs):
|
390 |
+
# Legendre function, 2nd kind
|
391 |
+
n = ctx.convert(n)
|
392 |
+
m = ctx.convert(m)
|
393 |
+
z = ctx.convert(z)
|
394 |
+
if z in (1, -1):
|
395 |
+
#if ctx.isint(m):
|
396 |
+
# return ctx.nan
|
397 |
+
#return ctx.inf # unsigned
|
398 |
+
return ctx.nan
|
399 |
+
if type == 2:
|
400 |
+
def h(n, m):
|
401 |
+
cos, sin = ctx.cospi_sinpi(m)
|
402 |
+
s = 2 * sin / ctx.pi
|
403 |
+
c = cos
|
404 |
+
a = 1+z
|
405 |
+
b = 1-z
|
406 |
+
u = m/2
|
407 |
+
w = (1-z)/2
|
408 |
+
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
|
409 |
+
[-n, n+1], [1-m], w
|
410 |
+
T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \
|
411 |
+
[-n, n+1], [m+1], w
|
412 |
+
return T1, T2
|
413 |
+
return ctx.hypercomb(h, [n, m], **kwargs)
|
414 |
+
if type == 3:
|
415 |
+
# The following is faster when there only is a single series
|
416 |
+
# Note: not valid for -1 < z < 0 (?)
|
417 |
+
if abs(z) > 1:
|
418 |
+
def h(n, m):
|
419 |
+
T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \
|
420 |
+
[1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \
|
421 |
+
[n+m+1], [n+1.5], \
|
422 |
+
[0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2)
|
423 |
+
return [T1]
|
424 |
+
return ctx.hypercomb(h, [n, m], **kwargs)
|
425 |
+
else:
|
426 |
+
# not valid for 1 < z < inf ?
|
427 |
+
def h(n, m):
|
428 |
+
s = 2 * ctx.sinpi(m) / ctx.pi
|
429 |
+
c = ctx.expjpi(m)
|
430 |
+
a = 1+z
|
431 |
+
b = z-1
|
432 |
+
u = m/2
|
433 |
+
w = (1-z)/2
|
434 |
+
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
|
435 |
+
[-n, n+1], [1-m], w
|
436 |
+
T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \
|
437 |
+
[-n, n+1], [m+1], w
|
438 |
+
return T1, T2
|
439 |
+
return ctx.hypercomb(h, [n, m], **kwargs)
|
440 |
+
raise ValueError("requires type=2 or type=3")
|
441 |
+
|
442 |
+
@defun_wrapped
|
443 |
+
def chebyt(ctx, n, x, **kwargs):
|
444 |
+
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
|
445 |
+
return x * 0
|
446 |
+
return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs)
|
447 |
+
|
448 |
+
@defun_wrapped
|
449 |
+
def chebyu(ctx, n, x, **kwargs):
|
450 |
+
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
|
451 |
+
return x * 0
|
452 |
+
return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs)
|
453 |
+
|
454 |
+
@defun
|
455 |
+
def spherharm(ctx, l, m, theta, phi, **kwargs):
|
456 |
+
l = ctx.convert(l)
|
457 |
+
m = ctx.convert(m)
|
458 |
+
theta = ctx.convert(theta)
|
459 |
+
phi = ctx.convert(phi)
|
460 |
+
l_isint = ctx.isint(l)
|
461 |
+
l_natural = l_isint and l >= 0
|
462 |
+
m_isint = ctx.isint(m)
|
463 |
+
if l_isint and l < 0 and m_isint:
|
464 |
+
return ctx.spherharm(-(l+1), m, theta, phi, **kwargs)
|
465 |
+
if theta == 0 and m_isint and m < 0:
|
466 |
+
return ctx.zero * 1j
|
467 |
+
if l_natural and m_isint:
|
468 |
+
if abs(m) > l:
|
469 |
+
return ctx.zero * 1j
|
470 |
+
# http://functions.wolfram.com/Polynomials/
|
471 |
+
# SphericalHarmonicY/26/01/02/0004/
|
472 |
+
def h(l,m):
|
473 |
+
absm = abs(m)
|
474 |
+
C = [-1, ctx.expj(m*phi),
|
475 |
+
(2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm),
|
476 |
+
ctx.sin(theta)**2,
|
477 |
+
ctx.fac(absm), 2]
|
478 |
+
P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1]
|
479 |
+
return ((C, P, [], [], [absm-l, l+absm+1], [absm+1],
|
480 |
+
ctx.sin(0.5*theta)**2),)
|
481 |
+
else:
|
482 |
+
# http://functions.wolfram.com/HypergeometricFunctions/
|
483 |
+
# SphericalHarmonicYGeneral/26/01/02/0001/
|
484 |
+
def h(l,m):
|
485 |
+
if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m):
|
486 |
+
return (([0], [-1], [], [], [], [], 0),)
|
487 |
+
cos, sin = ctx.cos_sin(0.5*theta)
|
488 |
+
C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi,
|
489 |
+
ctx.gamma(l-m+1), ctx.gamma(l+m+1),
|
490 |
+
cos**2, sin**2]
|
491 |
+
P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m]
|
492 |
+
return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),)
|
493 |
+
return ctx.hypercomb(h, [l,m], **kwargs)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/qfunctions.py
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun, defun_wrapped
|
2 |
+
|
3 |
+
@defun
|
4 |
+
def qp(ctx, a, q=None, n=None, **kwargs):
|
5 |
+
r"""
|
6 |
+
Evaluates the q-Pochhammer symbol (or q-rising factorial)
|
7 |
+
|
8 |
+
.. math ::
|
9 |
+
|
10 |
+
(a; q)_n = \prod_{k=0}^{n-1} (1-a q^k)
|
11 |
+
|
12 |
+
where `n = \infty` is permitted if `|q| < 1`. Called with two arguments,
|
13 |
+
``qp(a,q)`` computes `(a;q)_{\infty}`; with a single argument, ``qp(q)``
|
14 |
+
computes `(q;q)_{\infty}`. The special case
|
15 |
+
|
16 |
+
.. math ::
|
17 |
+
|
18 |
+
\phi(q) = (q; q)_{\infty} = \prod_{k=1}^{\infty} (1-q^k) =
|
19 |
+
\sum_{k=-\infty}^{\infty} (-1)^k q^{(3k^2-k)/2}
|
20 |
+
|
21 |
+
is also known as the Euler function, or (up to a factor `q^{-1/24}`)
|
22 |
+
the Dedekind eta function.
|
23 |
+
|
24 |
+
**Examples**
|
25 |
+
|
26 |
+
If `n` is a positive integer, the function amounts to a finite product::
|
27 |
+
|
28 |
+
>>> from mpmath import *
|
29 |
+
>>> mp.dps = 25; mp.pretty = True
|
30 |
+
>>> qp(2,3,5)
|
31 |
+
-725305.0
|
32 |
+
>>> fprod(1-2*3**k for k in range(5))
|
33 |
+
-725305.0
|
34 |
+
>>> qp(2,3,0)
|
35 |
+
1.0
|
36 |
+
|
37 |
+
Complex arguments are allowed::
|
38 |
+
|
39 |
+
>>> qp(2-1j, 0.75j)
|
40 |
+
(0.4628842231660149089976379 + 4.481821753552703090628793j)
|
41 |
+
|
42 |
+
The regular Pochhammer symbol `(a)_n` is obtained in the
|
43 |
+
following limit as `q \to 1`::
|
44 |
+
|
45 |
+
>>> a, n = 4, 7
|
46 |
+
>>> limit(lambda q: qp(q**a,q,n) / (1-q)**n, 1)
|
47 |
+
604800.0
|
48 |
+
>>> rf(a,n)
|
49 |
+
604800.0
|
50 |
+
|
51 |
+
The Taylor series of the reciprocal Euler function gives
|
52 |
+
the partition function `P(n)`, i.e. the number of ways of writing
|
53 |
+
`n` as a sum of positive integers::
|
54 |
+
|
55 |
+
>>> taylor(lambda q: 1/qp(q), 0, 10)
|
56 |
+
[1.0, 1.0, 2.0, 3.0, 5.0, 7.0, 11.0, 15.0, 22.0, 30.0, 42.0]
|
57 |
+
|
58 |
+
Special values include::
|
59 |
+
|
60 |
+
>>> qp(0)
|
61 |
+
1.0
|
62 |
+
>>> findroot(diffun(qp), -0.4) # location of maximum
|
63 |
+
-0.4112484791779547734440257
|
64 |
+
>>> qp(_)
|
65 |
+
1.228348867038575112586878
|
66 |
+
|
67 |
+
The q-Pochhammer symbol is related to the Jacobi theta functions.
|
68 |
+
For example, the following identity holds::
|
69 |
+
|
70 |
+
>>> q = mpf(0.5) # arbitrary
|
71 |
+
>>> qp(q)
|
72 |
+
0.2887880950866024212788997
|
73 |
+
>>> root(3,-2)*root(q,-24)*jtheta(2,pi/6,root(q,6))
|
74 |
+
0.2887880950866024212788997
|
75 |
+
|
76 |
+
"""
|
77 |
+
a = ctx.convert(a)
|
78 |
+
if n is None:
|
79 |
+
n = ctx.inf
|
80 |
+
else:
|
81 |
+
n = ctx.convert(n)
|
82 |
+
if n < 0:
|
83 |
+
raise ValueError("n cannot be negative")
|
84 |
+
if q is None:
|
85 |
+
q = a
|
86 |
+
else:
|
87 |
+
q = ctx.convert(q)
|
88 |
+
if n == 0:
|
89 |
+
return ctx.one + 0*(a+q)
|
90 |
+
infinite = (n == ctx.inf)
|
91 |
+
same = (a == q)
|
92 |
+
if infinite:
|
93 |
+
if abs(q) >= 1:
|
94 |
+
if same and (q == -1 or q == 1):
|
95 |
+
return ctx.zero * q
|
96 |
+
raise ValueError("q-function only defined for |q| < 1")
|
97 |
+
elif q == 0:
|
98 |
+
return ctx.one - a
|
99 |
+
maxterms = kwargs.get('maxterms', 50*ctx.prec)
|
100 |
+
if infinite and same:
|
101 |
+
# Euler's pentagonal theorem
|
102 |
+
def terms():
|
103 |
+
t = 1
|
104 |
+
yield t
|
105 |
+
k = 1
|
106 |
+
x1 = q
|
107 |
+
x2 = q**2
|
108 |
+
while 1:
|
109 |
+
yield (-1)**k * x1
|
110 |
+
yield (-1)**k * x2
|
111 |
+
x1 *= q**(3*k+1)
|
112 |
+
x2 *= q**(3*k+2)
|
113 |
+
k += 1
|
114 |
+
if k > maxterms:
|
115 |
+
raise ctx.NoConvergence
|
116 |
+
return ctx.sum_accurately(terms)
|
117 |
+
# return ctx.nprod(lambda k: 1-a*q**k, [0,n-1])
|
118 |
+
def factors():
|
119 |
+
k = 0
|
120 |
+
r = ctx.one
|
121 |
+
while 1:
|
122 |
+
yield 1 - a*r
|
123 |
+
r *= q
|
124 |
+
k += 1
|
125 |
+
if k >= n:
|
126 |
+
return
|
127 |
+
if k > maxterms:
|
128 |
+
raise ctx.NoConvergence
|
129 |
+
return ctx.mul_accurately(factors)
|
130 |
+
|
131 |
+
@defun_wrapped
|
132 |
+
def qgamma(ctx, z, q, **kwargs):
|
133 |
+
r"""
|
134 |
+
Evaluates the q-gamma function
|
135 |
+
|
136 |
+
.. math ::
|
137 |
+
|
138 |
+
\Gamma_q(z) = \frac{(q; q)_{\infty}}{(q^z; q)_{\infty}} (1-q)^{1-z}.
|
139 |
+
|
140 |
+
|
141 |
+
**Examples**
|
142 |
+
|
143 |
+
Evaluation for real and complex arguments::
|
144 |
+
|
145 |
+
>>> from mpmath import *
|
146 |
+
>>> mp.dps = 25; mp.pretty = True
|
147 |
+
>>> qgamma(4,0.75)
|
148 |
+
4.046875
|
149 |
+
>>> qgamma(6,6)
|
150 |
+
121226245.0
|
151 |
+
>>> qgamma(3+4j, 0.5j)
|
152 |
+
(0.1663082382255199834630088 + 0.01952474576025952984418217j)
|
153 |
+
|
154 |
+
The q-gamma function satisfies a functional equation similar
|
155 |
+
to that of the ordinary gamma function::
|
156 |
+
|
157 |
+
>>> q = mpf(0.25)
|
158 |
+
>>> z = mpf(2.5)
|
159 |
+
>>> qgamma(z+1,q)
|
160 |
+
1.428277424823760954685912
|
161 |
+
>>> (1-q**z)/(1-q)*qgamma(z,q)
|
162 |
+
1.428277424823760954685912
|
163 |
+
|
164 |
+
"""
|
165 |
+
if abs(q) > 1:
|
166 |
+
return ctx.qgamma(z,1/q)*q**((z-2)*(z-1)*0.5)
|
167 |
+
return ctx.qp(q, q, None, **kwargs) / \
|
168 |
+
ctx.qp(q**z, q, None, **kwargs) * (1-q)**(1-z)
|
169 |
+
|
170 |
+
@defun_wrapped
|
171 |
+
def qfac(ctx, z, q, **kwargs):
|
172 |
+
r"""
|
173 |
+
Evaluates the q-factorial,
|
174 |
+
|
175 |
+
.. math ::
|
176 |
+
|
177 |
+
[n]_q! = (1+q)(1+q+q^2)\cdots(1+q+\cdots+q^{n-1})
|
178 |
+
|
179 |
+
or more generally
|
180 |
+
|
181 |
+
.. math ::
|
182 |
+
|
183 |
+
[z]_q! = \frac{(q;q)_z}{(1-q)^z}.
|
184 |
+
|
185 |
+
**Examples**
|
186 |
+
|
187 |
+
>>> from mpmath import *
|
188 |
+
>>> mp.dps = 25; mp.pretty = True
|
189 |
+
>>> qfac(0,0)
|
190 |
+
1.0
|
191 |
+
>>> qfac(4,3)
|
192 |
+
2080.0
|
193 |
+
>>> qfac(5,6)
|
194 |
+
121226245.0
|
195 |
+
>>> qfac(1+1j, 2+1j)
|
196 |
+
(0.4370556551322672478613695 + 0.2609739839216039203708921j)
|
197 |
+
|
198 |
+
"""
|
199 |
+
if ctx.isint(z) and ctx._re(z) > 0:
|
200 |
+
n = int(ctx._re(z))
|
201 |
+
return ctx.qp(q, q, n, **kwargs) / (1-q)**n
|
202 |
+
return ctx.qgamma(z+1, q, **kwargs)
|
203 |
+
|
204 |
+
@defun
|
205 |
+
def qhyper(ctx, a_s, b_s, q, z, **kwargs):
|
206 |
+
r"""
|
207 |
+
Evaluates the basic hypergeometric series or hypergeometric q-series
|
208 |
+
|
209 |
+
.. math ::
|
210 |
+
|
211 |
+
\,_r\phi_s \left[\begin{matrix}
|
212 |
+
a_1 & a_2 & \ldots & a_r \\
|
213 |
+
b_1 & b_2 & \ldots & b_s
|
214 |
+
\end{matrix} ; q,z \right] =
|
215 |
+
\sum_{n=0}^\infty
|
216 |
+
\frac{(a_1;q)_n, \ldots, (a_r;q)_n}
|
217 |
+
{(b_1;q)_n, \ldots, (b_s;q)_n}
|
218 |
+
\left((-1)^n q^{n\choose 2}\right)^{1+s-r}
|
219 |
+
\frac{z^n}{(q;q)_n}
|
220 |
+
|
221 |
+
where `(a;q)_n` denotes the q-Pochhammer symbol (see :func:`~mpmath.qp`).
|
222 |
+
|
223 |
+
**Examples**
|
224 |
+
|
225 |
+
Evaluation works for real and complex arguments::
|
226 |
+
|
227 |
+
>>> from mpmath import *
|
228 |
+
>>> mp.dps = 25; mp.pretty = True
|
229 |
+
>>> qhyper([0.5], [2.25], 0.25, 4)
|
230 |
+
-0.1975849091263356009534385
|
231 |
+
>>> qhyper([0.5], [2.25], 0.25-0.25j, 4)
|
232 |
+
(2.806330244925716649839237 + 3.568997623337943121769938j)
|
233 |
+
>>> qhyper([1+j], [2,3+0.5j], 0.25, 3+4j)
|
234 |
+
(9.112885171773400017270226 - 1.272756997166375050700388j)
|
235 |
+
|
236 |
+
Comparing with a summation of the defining series, using
|
237 |
+
:func:`~mpmath.nsum`::
|
238 |
+
|
239 |
+
>>> b, q, z = 3, 0.25, 0.5
|
240 |
+
>>> qhyper([], [b], q, z)
|
241 |
+
0.6221136748254495583228324
|
242 |
+
>>> nsum(lambda n: z**n / qp(q,q,n)/qp(b,q,n) * q**(n*(n-1)), [0,inf])
|
243 |
+
0.6221136748254495583228324
|
244 |
+
|
245 |
+
"""
|
246 |
+
#a_s = [ctx._convert_param(a)[0] for a in a_s]
|
247 |
+
#b_s = [ctx._convert_param(b)[0] for b in b_s]
|
248 |
+
#q = ctx._convert_param(q)[0]
|
249 |
+
a_s = [ctx.convert(a) for a in a_s]
|
250 |
+
b_s = [ctx.convert(b) for b in b_s]
|
251 |
+
q = ctx.convert(q)
|
252 |
+
z = ctx.convert(z)
|
253 |
+
r = len(a_s)
|
254 |
+
s = len(b_s)
|
255 |
+
d = 1+s-r
|
256 |
+
maxterms = kwargs.get('maxterms', 50*ctx.prec)
|
257 |
+
def terms():
|
258 |
+
t = ctx.one
|
259 |
+
yield t
|
260 |
+
qk = 1
|
261 |
+
k = 0
|
262 |
+
x = 1
|
263 |
+
while 1:
|
264 |
+
for a in a_s:
|
265 |
+
p = 1 - a*qk
|
266 |
+
t *= p
|
267 |
+
for b in b_s:
|
268 |
+
p = 1 - b*qk
|
269 |
+
if not p:
|
270 |
+
raise ValueError
|
271 |
+
t /= p
|
272 |
+
t *= z
|
273 |
+
x *= (-1)**d * qk ** d
|
274 |
+
qk *= q
|
275 |
+
t /= (1 - qk)
|
276 |
+
k += 1
|
277 |
+
yield t * x
|
278 |
+
if k > maxterms:
|
279 |
+
raise ctx.NoConvergence
|
280 |
+
return ctx.sum_accurately(terms)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/rszeta.py
ADDED
@@ -0,0 +1,1403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
---------------------------------------------------------------------
|
3 |
+
.. sectionauthor:: Juan Arias de Reyna <[email protected]>
|
4 |
+
|
5 |
+
This module implements zeta-related functions using the Riemann-Siegel
|
6 |
+
expansion: zeta_offline(s,k=0)
|
7 |
+
|
8 |
+
* coef(J, eps): Need in the computation of Rzeta(s,k)
|
9 |
+
|
10 |
+
* Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously
|
11 |
+
for 0 <= k <= der. Used by zeta_offline and z_offline
|
12 |
+
|
13 |
+
* Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by
|
14 |
+
z_half(t,k) and zeta_half
|
15 |
+
|
16 |
+
* z_offline(w,k): Z(w) and its derivatives of order k <= 4
|
17 |
+
* z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4
|
18 |
+
* zeta_offline(s): zeta(s) and its derivatives of order k<= 4
|
19 |
+
* zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4
|
20 |
+
|
21 |
+
* rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline
|
22 |
+
* rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half
|
23 |
+
----------------------------------------------------------------------
|
24 |
+
|
25 |
+
This program uses Riemann-Siegel expansion even to compute
|
26 |
+
zeta(s) on points s = sigma + i t with sigma arbitrary not
|
27 |
+
necessarily equal to 1/2.
|
28 |
+
|
29 |
+
It is founded on a new deduction of the formula, with rigorous
|
30 |
+
and sharp bounds for the terms and rest of this expansion.
|
31 |
+
|
32 |
+
More information on the papers:
|
33 |
+
|
34 |
+
J. Arias de Reyna, High Precision Computation of Riemann's
|
35 |
+
Zeta Function by the Riemann-Siegel Formula I, II
|
36 |
+
|
37 |
+
We refer to them as I, II.
|
38 |
+
|
39 |
+
In them we shall find detailed explanation of all the
|
40 |
+
procedure.
|
41 |
+
|
42 |
+
The program uses Riemann-Siegel expansion.
|
43 |
+
This is useful when t is big, ( say t > 10000 ).
|
44 |
+
The precision is limited, roughly it can compute zeta(sigma+it)
|
45 |
+
with an error less than exp(-c t) for some constant c depending
|
46 |
+
on sigma. The program gives an error when the Riemann-Siegel
|
47 |
+
formula can not compute to the wanted precision.
|
48 |
+
|
49 |
+
"""
|
50 |
+
|
51 |
+
import math
|
52 |
+
|
53 |
+
class RSCache(object):
|
54 |
+
def __init__(ctx):
|
55 |
+
ctx._rs_cache = [0, 10, {}, {}]
|
56 |
+
|
57 |
+
from .functions import defun
|
58 |
+
|
59 |
+
#-------------------------------------------------------------------------------#
|
60 |
+
# #
|
61 |
+
# coef(ctx, J, eps, _cache=[0, 10, {} ] ) #
|
62 |
+
# #
|
63 |
+
#-------------------------------------------------------------------------------#
|
64 |
+
|
65 |
+
# This function computes the coefficients c[n] defined on (I, equation (47))
|
66 |
+
# but see also (II, section 3.14).
|
67 |
+
#
|
68 |
+
# Since these coefficients are very difficult to compute we save the values
|
69 |
+
# in a cache. So if we compute several values of the functions Rzeta(s) for
|
70 |
+
# near values of s, we do not recompute these coefficients.
|
71 |
+
#
|
72 |
+
# c[n] are the Taylor coefficients of the function:
|
73 |
+
#
|
74 |
+
# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z))
|
75 |
+
#
|
76 |
+
#
|
77 |
+
|
78 |
+
def _coef(ctx, J, eps):
|
79 |
+
r"""
|
80 |
+
Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps
|
81 |
+
|
82 |
+
**Definition**
|
83 |
+
|
84 |
+
The coefficients c_n are defined by
|
85 |
+
|
86 |
+
.. math ::
|
87 |
+
|
88 |
+
\begin{equation}
|
89 |
+
F(z)=\frac{e^{\pi i
|
90 |
+
\bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi
|
91 |
+
z}=\sum_{n=0}^\infty c_{2n} z^{2n}
|
92 |
+
\end{equation}
|
93 |
+
|
94 |
+
they are computed applying the relation
|
95 |
+
|
96 |
+
.. math ::
|
97 |
+
|
98 |
+
\begin{multline}
|
99 |
+
c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n}
|
100 |
+
\sum_{k=0}^n\frac{(-1)^k}{(2k)!}
|
101 |
+
2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\
|
102 |
+
+e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{
|
103 |
+
E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}.
|
104 |
+
\end{multline}
|
105 |
+
"""
|
106 |
+
|
107 |
+
newJ = J+2 # compute more coefficients that are needed
|
108 |
+
neweps6 = eps/2. # compute with a slight more precision that are needed
|
109 |
+
|
110 |
+
# PREPARATION FOR THE COMPUTATION OF V(N) AND W(N)
|
111 |
+
# See II Section 3.16
|
112 |
+
#
|
113 |
+
# Computing the exponent wpvw of the error II equation (81)
|
114 |
+
wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6))
|
115 |
+
|
116 |
+
# Preparation of Euler numbers (we need until the 2*RS_NEWJ)
|
117 |
+
E = ctx._eulernum(2*newJ)
|
118 |
+
|
119 |
+
# Now we have in the cache all the needed Euler numbers.
|
120 |
+
#
|
121 |
+
# Computing the powers of pi
|
122 |
+
#
|
123 |
+
# We need to compute the powers pi**n for 1<= n <= 2*J
|
124 |
+
# with relative error less than 2**(-wpvw)
|
125 |
+
# it is easy to show that this is obtained
|
126 |
+
# taking wppi as the least d with
|
127 |
+
# 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw
|
128 |
+
# In II Section 3.9 we need also that
|
129 |
+
# wppi > wptcoef[0], and that the powers
|
130 |
+
# here computed 0<= k <= 2*newJ are more
|
131 |
+
# than those needed there that are 2*L-2.
|
132 |
+
# so we need J >= L this will be checked
|
133 |
+
# before computing tcoef[]
|
134 |
+
wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw)
|
135 |
+
ctx.prec = wppi
|
136 |
+
pipower = {}
|
137 |
+
pipower[0] = ctx.one
|
138 |
+
pipower[1] = ctx.pi
|
139 |
+
for n in range(2,2*newJ+1):
|
140 |
+
pipower[n] = pipower[n-1]*ctx.pi
|
141 |
+
|
142 |
+
# COMPUTING THE COEFFICIENTS v(n) AND w(n)
|
143 |
+
# see II equation (61) and equations (81) and (82)
|
144 |
+
ctx.prec = wpvw+2
|
145 |
+
v={}
|
146 |
+
w={}
|
147 |
+
for n in range(0,newJ+1):
|
148 |
+
va = (-1)**n * ctx._eulernum(2*n)
|
149 |
+
va = ctx.mpf(va)/ctx.fac(2*n)
|
150 |
+
v[n]=va*pipower[2*n]
|
151 |
+
for n in range(0,2*newJ+1):
|
152 |
+
wa = ctx.one/ctx.fac(n)
|
153 |
+
wa=wa/(2**n)
|
154 |
+
w[n]=wa*pipower[n]
|
155 |
+
|
156 |
+
# COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2
|
157 |
+
# See II Section 3.16
|
158 |
+
ctx.prec = 15
|
159 |
+
wpp1a = 9 - ctx.mag(neweps6)
|
160 |
+
P1 = {}
|
161 |
+
for n in range(0,newJ+1):
|
162 |
+
ctx.prec = 15
|
163 |
+
wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
|
164 |
+
ctx.prec = wpp1
|
165 |
+
sump = 0
|
166 |
+
for k in range(0,n+1):
|
167 |
+
sump += ((-1)**k) * v[k]*w[2*n-2*k]
|
168 |
+
P1[n]=((-1)**(n+1))*ctx.j*sump
|
169 |
+
P2={}
|
170 |
+
for n in range(0,newJ+1):
|
171 |
+
ctx.prec = 15
|
172 |
+
wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
|
173 |
+
ctx.prec = wpp2
|
174 |
+
sump = 0
|
175 |
+
for k in range(0,n+1):
|
176 |
+
sump += (ctx.j**(n-k)) * v[k]*w[n-k]
|
177 |
+
P2[n]=sump
|
178 |
+
# COMPUTING THE COEFFICIENTS c[2n]
|
179 |
+
# See II Section 3.14
|
180 |
+
ctx.prec = 15
|
181 |
+
wpc0 = 5 - ctx.mag(neweps6)
|
182 |
+
wpc = max(6,4*newJ+wpc0)
|
183 |
+
ctx.prec = wpc
|
184 |
+
mu = ctx.sqrt(ctx.mpf('2'))/2
|
185 |
+
nu = ctx.expjpi(3./8)/2
|
186 |
+
c={}
|
187 |
+
for n in range(0,newJ):
|
188 |
+
ctx.prec = 15
|
189 |
+
wpc = max(6,4*n+wpc0)
|
190 |
+
ctx.prec = wpc
|
191 |
+
c[2*n] = mu*P1[n]+nu*P2[n]
|
192 |
+
for n in range(1,2*newJ,2):
|
193 |
+
c[n] = 0
|
194 |
+
return [newJ, neweps6, c, pipower]
|
195 |
+
|
196 |
+
def coef(ctx, J, eps):
|
197 |
+
_cache = ctx._rs_cache
|
198 |
+
if J <= _cache[0] and eps >= _cache[1]:
|
199 |
+
return _cache[2], _cache[3]
|
200 |
+
orig = ctx._mp.prec
|
201 |
+
try:
|
202 |
+
data = _coef(ctx._mp, J, eps)
|
203 |
+
finally:
|
204 |
+
ctx._mp.prec = orig
|
205 |
+
if ctx is not ctx._mp:
|
206 |
+
data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items())
|
207 |
+
data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items())
|
208 |
+
ctx._rs_cache[:] = data
|
209 |
+
return ctx._rs_cache[2], ctx._rs_cache[3]
|
210 |
+
|
211 |
+
#-------------------------------------------------------------------------------#
|
212 |
+
# #
|
213 |
+
# Rzeta_simul(s,k=0) #
|
214 |
+
# #
|
215 |
+
#-------------------------------------------------------------------------------#
|
216 |
+
# This function return a list with the values:
|
217 |
+
# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)),
|
218 |
+
# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it))
|
219 |
+
#
|
220 |
+
# Useful to compute the function zeta(s) and Z(w) or its derivatives.
|
221 |
+
#
|
222 |
+
|
223 |
+
def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL):
|
224 |
+
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
225 |
+
# See II Section 3.11 equations (47) and (48)
|
226 |
+
aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi)
|
227 |
+
aux1 = ctx.ln(aux1)
|
228 |
+
aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2
|
229 |
+
m = 3*xL-3
|
230 |
+
aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
|
231 |
+
while((aux1 < m*aux2+ aux3)and (m>1)):
|
232 |
+
m = m - 1
|
233 |
+
aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
|
234 |
+
xM = m
|
235 |
+
return xM
|
236 |
+
|
237 |
+
def aux_J_needed(ctx, xA, xeps4, a, xB1, xM):
|
238 |
+
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
|
239 |
+
# IN THE TAYLOR SERIES OF F.
|
240 |
+
# See II Section 3.11 equation (49))
|
241 |
+
# Only determine one
|
242 |
+
h1 = xeps4/(632*xA)
|
243 |
+
h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3)
|
244 |
+
h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM
|
245 |
+
h3 = min(h1,h2)
|
246 |
+
return h3
|
247 |
+
|
248 |
+
def Rzeta_simul(ctx, s, der=0):
|
249 |
+
# First we take the value of ctx.prec
|
250 |
+
wpinitial = ctx.prec
|
251 |
+
|
252 |
+
# INITIALIZATION
|
253 |
+
# Take the real and imaginary part of s
|
254 |
+
t = ctx._im(s)
|
255 |
+
xsigma = ctx._re(s)
|
256 |
+
ysigma = 1 - xsigma
|
257 |
+
|
258 |
+
# Now compute several parameter that appear on the program
|
259 |
+
ctx.prec = 15
|
260 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
261 |
+
xasigma = a ** xsigma
|
262 |
+
yasigma = a ** ysigma
|
263 |
+
|
264 |
+
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
|
265 |
+
xA1=ctx.power(2, ctx.mag(xasigma)-1)
|
266 |
+
yA1=ctx.power(2, ctx.mag(yasigma)-1)
|
267 |
+
|
268 |
+
# We compute various epsilon's (see II end of Section 3.1)
|
269 |
+
eps = ctx.power(2, -wpinitial)
|
270 |
+
eps1 = eps/6.
|
271 |
+
xeps2 = eps * xA1/3.
|
272 |
+
yeps2 = eps * yA1/3.
|
273 |
+
|
274 |
+
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
|
275 |
+
# ON sigma
|
276 |
+
# constant b and c (see I Theorem 2 formula (26) )
|
277 |
+
# coefficients A and B1 (see I Section 6.1 equation (50))
|
278 |
+
#
|
279 |
+
# here we not need high precision
|
280 |
+
ctx.prec = 15
|
281 |
+
if xsigma > 0:
|
282 |
+
xb = 2.
|
283 |
+
xc = math.pow(9,xsigma)/4.44288
|
284 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
285 |
+
xA = math.pow(9,xsigma)
|
286 |
+
xB1 = 1
|
287 |
+
else:
|
288 |
+
xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
289 |
+
xc = math.pow(2,-xsigma)/4.44288
|
290 |
+
xA = math.pow(2,-xsigma)
|
291 |
+
xB1 = 1.10789 # = 2*sqrt(1-log(2))
|
292 |
+
|
293 |
+
if(ysigma > 0):
|
294 |
+
yb = 2.
|
295 |
+
yc = math.pow(9,ysigma)/4.44288
|
296 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
297 |
+
yA = math.pow(9,ysigma)
|
298 |
+
yB1 = 1
|
299 |
+
else:
|
300 |
+
yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
301 |
+
yc = math.pow(2,-ysigma)/4.44288
|
302 |
+
yA = math.pow(2,-ysigma)
|
303 |
+
yB1 = 1.10789 # = 2*sqrt(1-log(2))
|
304 |
+
|
305 |
+
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
|
306 |
+
# CORRECTION
|
307 |
+
# See II Section 3.2
|
308 |
+
ctx.prec = 15
|
309 |
+
xL = 1
|
310 |
+
while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2:
|
311 |
+
xL = xL+1
|
312 |
+
xL = max(2,xL)
|
313 |
+
yL = 1
|
314 |
+
while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2:
|
315 |
+
yL = yL+1
|
316 |
+
yL = max(2,yL)
|
317 |
+
|
318 |
+
# The number L has to satify some conditions.
|
319 |
+
# If not RS can not compute Rzeta(s) with the prescribed precision
|
320 |
+
# (see II, Section 3.2 condition (20) ) and
|
321 |
+
# (II, Section 3.3 condition (22) ). Also we have added
|
322 |
+
# an additional technical condition in Section 3.17 Proposition 17
|
323 |
+
if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \
|
324 |
+
(3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)):
|
325 |
+
ctx.prec = wpinitial
|
326 |
+
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
|
327 |
+
|
328 |
+
# We take the maximum of the two values
|
329 |
+
L = max(xL, yL)
|
330 |
+
|
331 |
+
# INITIALIZATION (CONTINUATION)
|
332 |
+
#
|
333 |
+
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
|
334 |
+
# each term of the RS correction must be computed with error <= eps3
|
335 |
+
xeps3 = xeps2/(4*xL)
|
336 |
+
yeps3 = yeps2/(4*yL)
|
337 |
+
|
338 |
+
# eps4 is defined on (II Section 3.6 equation (30) )
|
339 |
+
# each component of the formula (II Section 3.6 equation (29) )
|
340 |
+
# must be computed with error <= eps4
|
341 |
+
xeps4 = xeps3/(3*xL)
|
342 |
+
yeps4 = yeps3/(3*yL)
|
343 |
+
|
344 |
+
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
345 |
+
xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL)
|
346 |
+
yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL)
|
347 |
+
M = max(xM, yM)
|
348 |
+
|
349 |
+
# COMPUTING NUMBER OF TERMS J NEEDED
|
350 |
+
h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM)
|
351 |
+
h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM)
|
352 |
+
h3 = min(h3,h4)
|
353 |
+
J = 12
|
354 |
+
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
|
355 |
+
while jvalue > h3:
|
356 |
+
J = J+1
|
357 |
+
jvalue = (2*ctx.pi)*jvalue/J
|
358 |
+
|
359 |
+
# COMPUTING eps5[m] for 1 <= m <= 21
|
360 |
+
# See II Section 10 equation (43)
|
361 |
+
# We choose the minimum of the two possibilities
|
362 |
+
eps5={}
|
363 |
+
xforeps5 = math.pi*math.pi*xB1*a
|
364 |
+
yforeps5 = math.pi*math.pi*yB1*a
|
365 |
+
for m in range(0,22):
|
366 |
+
xaux1 = math.pow(xforeps5, m/3)/(316.*xA)
|
367 |
+
yaux1 = math.pow(yforeps5, m/3)/(316.*yA)
|
368 |
+
aux1 = min(xaux1, yaux1)
|
369 |
+
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
|
370 |
+
aux2 = math.sqrt(aux2)
|
371 |
+
eps5[m] = (aux1*aux2*min(xeps4,yeps4))
|
372 |
+
|
373 |
+
# COMPUTING wpfp
|
374 |
+
# See II Section 3.13 equation (59)
|
375 |
+
twenty = min(3*L-3, 21)+1
|
376 |
+
aux = 6812*J
|
377 |
+
wpfp = ctx.mag(44*J)
|
378 |
+
for m in range(0,twenty):
|
379 |
+
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
|
380 |
+
|
381 |
+
# COMPUTING N AND p
|
382 |
+
# See II Section
|
383 |
+
ctx.prec = wpfp + ctx.mag(t)+20
|
384 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
385 |
+
N = ctx.floor(a)
|
386 |
+
p = 1-2*(a-N)
|
387 |
+
|
388 |
+
# now we get a rounded version of p
|
389 |
+
# to the precision wpfp
|
390 |
+
# this possibly is not necessary
|
391 |
+
num=ctx.floor(p*(ctx.mpf('2')**wpfp))
|
392 |
+
difference = p * (ctx.mpf('2')**wpfp)-num
|
393 |
+
if (difference < 0.5):
|
394 |
+
num = num
|
395 |
+
else:
|
396 |
+
num = num+1
|
397 |
+
p = ctx.convert(num * (ctx.mpf('2')**(-wpfp)))
|
398 |
+
|
399 |
+
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
|
400 |
+
# We shall use the notation cc[n], since there is
|
401 |
+
# a constant that is called c
|
402 |
+
# See II Section 3.14
|
403 |
+
# We compute the coefficients and also save then in a
|
404 |
+
# cache. The bulk of the computation is passed to
|
405 |
+
# the function coef()
|
406 |
+
#
|
407 |
+
# eps6 is defined in II Section 3.13 equation (58)
|
408 |
+
eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J)
|
409 |
+
|
410 |
+
# Now we compute the coefficients
|
411 |
+
cc = {}
|
412 |
+
cont = {}
|
413 |
+
cont, pipowers = coef(ctx, J, eps6)
|
414 |
+
cc=cont.copy() # we need a copy since we have to change his values.
|
415 |
+
Fp={} # this is the adequate locus of this
|
416 |
+
for n in range(M, 3*L-2):
|
417 |
+
Fp[n] = 0
|
418 |
+
Fp={}
|
419 |
+
ctx.prec = wpfp
|
420 |
+
for m in range(0,M+1):
|
421 |
+
sumP = 0
|
422 |
+
for k in range(2*J-m-1,-1,-1):
|
423 |
+
sumP = (sumP * p)+ cc[k]
|
424 |
+
Fp[m] = sumP
|
425 |
+
# preparation of the new coefficients
|
426 |
+
for k in range(0,2*J-m-1):
|
427 |
+
cc[k] = (k+1)* cc[k+1]
|
428 |
+
|
429 |
+
# COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k]
|
430 |
+
# See II Section 3.17
|
431 |
+
#
|
432 |
+
# First we compute the working precisions xwpd[k]
|
433 |
+
# Se II equation (92)
|
434 |
+
xwpd={}
|
435 |
+
d1 = max(6,ctx.mag(40*L*L))
|
436 |
+
xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1
|
437 |
+
xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2
|
438 |
+
for n in range(0,L):
|
439 |
+
xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2
|
440 |
+
xwpd[n]=max(xd3,d1)
|
441 |
+
|
442 |
+
# procedure of II Section 3.17
|
443 |
+
ctx.prec = xwpd[1]+10
|
444 |
+
xpsigma = 1-(2*xsigma)
|
445 |
+
xd = {}
|
446 |
+
xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0
|
447 |
+
xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0
|
448 |
+
for n in range(1,L):
|
449 |
+
ctx.prec = xwpd[n]+10
|
450 |
+
for k in range(0,3*n//2+1):
|
451 |
+
m = 3*n-2*k
|
452 |
+
if(m!=0):
|
453 |
+
m1 = ctx.one/m
|
454 |
+
c1= m1/4
|
455 |
+
c2=(xpsigma*m1)/2
|
456 |
+
c3=-(m+1)
|
457 |
+
xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1]
|
458 |
+
else:
|
459 |
+
xd[0,n,k]=0
|
460 |
+
for r in range(0,k):
|
461 |
+
add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
462 |
+
xd[0,n,k] -= ((-1)**(k-r))*add
|
463 |
+
xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0
|
464 |
+
for mu in range(-2,der+1):
|
465 |
+
for n in range(-2,L):
|
466 |
+
for k in range(-3,max(1,3*n//2+2)):
|
467 |
+
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
468 |
+
xd[mu,n,k] = 0
|
469 |
+
for mu in range(1,der+1):
|
470 |
+
for n in range(0,L):
|
471 |
+
ctx.prec = xwpd[n]+10
|
472 |
+
for k in range(0,3*n//2+1):
|
473 |
+
aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3]
|
474 |
+
xd[mu,n,k] = aux - xd[mu-1,n-1,k-1]
|
475 |
+
|
476 |
+
# Now we compute the working precisions ywpd[k]
|
477 |
+
# Se II equation (92)
|
478 |
+
ywpd={}
|
479 |
+
d1 = max(6,ctx.mag(40*L*L))
|
480 |
+
yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1
|
481 |
+
yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2
|
482 |
+
for n in range(0,L):
|
483 |
+
yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2
|
484 |
+
ywpd[n]=max(yd3,d1)
|
485 |
+
|
486 |
+
# procedure of II Section 3.17
|
487 |
+
ctx.prec = ywpd[1]+10
|
488 |
+
ypsigma = 1-(2*ysigma)
|
489 |
+
yd = {}
|
490 |
+
yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0
|
491 |
+
yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0
|
492 |
+
for n in range(1,L):
|
493 |
+
ctx.prec = ywpd[n]+10
|
494 |
+
for k in range(0,3*n//2+1):
|
495 |
+
m = 3*n-2*k
|
496 |
+
if(m!=0):
|
497 |
+
m1 = ctx.one/m
|
498 |
+
c1= m1/4
|
499 |
+
c2=(ypsigma*m1)/2
|
500 |
+
c3=-(m+1)
|
501 |
+
yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1]
|
502 |
+
else:
|
503 |
+
yd[0,n,k]=0
|
504 |
+
for r in range(0,k):
|
505 |
+
add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
506 |
+
yd[0,n,k] -= ((-1)**(k-r))*add
|
507 |
+
yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0
|
508 |
+
|
509 |
+
for mu in range(-2,der+1):
|
510 |
+
for n in range(-2,L):
|
511 |
+
for k in range(-3,max(1,3*n//2+2)):
|
512 |
+
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
513 |
+
yd[mu,n,k] = 0
|
514 |
+
for mu in range(1,der+1):
|
515 |
+
for n in range(0,L):
|
516 |
+
ctx.prec = ywpd[n]+10
|
517 |
+
for k in range(0,3*n//2+1):
|
518 |
+
aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3]
|
519 |
+
yd[mu,n,k] = aux - yd[mu-1,n-1,k-1]
|
520 |
+
|
521 |
+
# COMPUTING THE COEFFICIENTS xtcoef[k,l]
|
522 |
+
# See II Section 3.9
|
523 |
+
#
|
524 |
+
# computing the needed wp
|
525 |
+
xwptcoef={}
|
526 |
+
xwpterm={}
|
527 |
+
ctx.prec = 15
|
528 |
+
c1 = ctx.mag(40*(L+2))
|
529 |
+
xc2 = ctx.mag(68*(L+2)*xA)
|
530 |
+
xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1
|
531 |
+
for k in range(0,L):
|
532 |
+
xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2.
|
533 |
+
xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5
|
534 |
+
xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20)
|
535 |
+
ywptcoef={}
|
536 |
+
ywpterm={}
|
537 |
+
ctx.prec = 15
|
538 |
+
c1 = ctx.mag(40*(L+2))
|
539 |
+
yc2 = ctx.mag(68*(L+2)*yA)
|
540 |
+
yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1
|
541 |
+
for k in range(0,L):
|
542 |
+
yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2.
|
543 |
+
ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5
|
544 |
+
ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10
|
545 |
+
|
546 |
+
# check of power of pi
|
547 |
+
# computing the fortcoef[mu,k,ell]
|
548 |
+
xfortcoef={}
|
549 |
+
for mu in range(0,der+1):
|
550 |
+
for k in range(0,L):
|
551 |
+
for ell in range(-2,3*k//2+1):
|
552 |
+
xfortcoef[mu,k,ell]=0
|
553 |
+
for mu in range(0,der+1):
|
554 |
+
for k in range(0,L):
|
555 |
+
ctx.prec = xwptcoef[k]
|
556 |
+
for ell in range(0,3*k//2+1):
|
557 |
+
xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
558 |
+
xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
559 |
+
|
560 |
+
def trunc_a(t):
|
561 |
+
wp = ctx.prec
|
562 |
+
ctx.prec = wp + 2
|
563 |
+
aa = ctx.sqrt(t/(2*ctx.pi))
|
564 |
+
ctx.prec = wp
|
565 |
+
return aa
|
566 |
+
|
567 |
+
# computing the tcoef[k,ell]
|
568 |
+
xtcoef={}
|
569 |
+
for mu in range(0,der+1):
|
570 |
+
for k in range(0,L):
|
571 |
+
for ell in range(-2,3*k//2+1):
|
572 |
+
xtcoef[mu,k,ell]=0
|
573 |
+
ctx.prec = max(xwptcoef[0],ywptcoef[0])+3
|
574 |
+
aa= trunc_a(t)
|
575 |
+
la = -ctx.ln(aa)
|
576 |
+
|
577 |
+
for chi in range(0,der+1):
|
578 |
+
for k in range(0,L):
|
579 |
+
ctx.prec = xwptcoef[k]
|
580 |
+
for ell in range(0,3*k//2+1):
|
581 |
+
xtcoef[chi,k,ell] =0
|
582 |
+
for mu in range(0, chi+1):
|
583 |
+
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell]
|
584 |
+
xtcoef[chi,k,ell] += tcoefter
|
585 |
+
|
586 |
+
# COMPUTING THE COEFFICIENTS ytcoef[k,l]
|
587 |
+
# See II Section 3.9
|
588 |
+
#
|
589 |
+
# computing the needed wp
|
590 |
+
# check of power of pi
|
591 |
+
# computing the fortcoef[mu,k,ell]
|
592 |
+
yfortcoef={}
|
593 |
+
for mu in range(0,der+1):
|
594 |
+
for k in range(0,L):
|
595 |
+
for ell in range(-2,3*k//2+1):
|
596 |
+
yfortcoef[mu,k,ell]=0
|
597 |
+
for mu in range(0,der+1):
|
598 |
+
for k in range(0,L):
|
599 |
+
ctx.prec = ywptcoef[k]
|
600 |
+
for ell in range(0,3*k//2+1):
|
601 |
+
yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
602 |
+
yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
603 |
+
# computing the tcoef[k,ell]
|
604 |
+
ytcoef={}
|
605 |
+
for chi in range(0,der+1):
|
606 |
+
for k in range(0,L):
|
607 |
+
for ell in range(-2,3*k//2+1):
|
608 |
+
ytcoef[chi,k,ell]=0
|
609 |
+
for chi in range(0,der+1):
|
610 |
+
for k in range(0,L):
|
611 |
+
ctx.prec = ywptcoef[k]
|
612 |
+
for ell in range(0,3*k//2+1):
|
613 |
+
ytcoef[chi,k,ell] =0
|
614 |
+
for mu in range(0, chi+1):
|
615 |
+
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell]
|
616 |
+
ytcoef[chi,k,ell] += tcoefter
|
617 |
+
|
618 |
+
# COMPUTING tv[k,ell]
|
619 |
+
# See II Section 3.8
|
620 |
+
#
|
621 |
+
# a has a good value
|
622 |
+
ctx.prec = max(xwptcoef[0], ywptcoef[0])+2
|
623 |
+
av = {}
|
624 |
+
av[0] = 1
|
625 |
+
av[1] = av[0]/a
|
626 |
+
|
627 |
+
ctx.prec = max(xwptcoef[0],ywptcoef[0])
|
628 |
+
for k in range(2,L):
|
629 |
+
av[k] = av[k-1] * av[1]
|
630 |
+
|
631 |
+
# Computing the quotients
|
632 |
+
xtv = {}
|
633 |
+
for chi in range(0,der+1):
|
634 |
+
for k in range(0,L):
|
635 |
+
ctx.prec = xwptcoef[k]
|
636 |
+
for ell in range(0,3*k//2+1):
|
637 |
+
xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k]
|
638 |
+
# Computing the quotients
|
639 |
+
ytv = {}
|
640 |
+
for chi in range(0,der+1):
|
641 |
+
for k in range(0,L):
|
642 |
+
ctx.prec = ywptcoef[k]
|
643 |
+
for ell in range(0,3*k//2+1):
|
644 |
+
ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k]
|
645 |
+
|
646 |
+
# COMPUTING THE TERMS xterm[k]
|
647 |
+
# See II Section 3.6
|
648 |
+
xterm = {}
|
649 |
+
for chi in range(0,der+1):
|
650 |
+
for n in range(0,L):
|
651 |
+
ctx.prec = xwpterm[n]
|
652 |
+
te = 0
|
653 |
+
for k in range(0, 3*n//2+1):
|
654 |
+
te += xtv[chi,n,k]
|
655 |
+
xterm[chi,n] = te
|
656 |
+
|
657 |
+
# COMPUTING THE TERMS yterm[k]
|
658 |
+
# See II Section 3.6
|
659 |
+
yterm = {}
|
660 |
+
for chi in range(0,der+1):
|
661 |
+
for n in range(0,L):
|
662 |
+
ctx.prec = ywpterm[n]
|
663 |
+
te = 0
|
664 |
+
for k in range(0, 3*n//2+1):
|
665 |
+
te += ytv[chi,n,k]
|
666 |
+
yterm[chi,n] = te
|
667 |
+
|
668 |
+
# COMPUTING rssum
|
669 |
+
# See II Section 3.5
|
670 |
+
xrssum={}
|
671 |
+
ctx.prec=15
|
672 |
+
xrsbound = math.sqrt(ctx.pi) * xc /(xb*a)
|
673 |
+
ctx.prec=15
|
674 |
+
xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2)
|
675 |
+
xwprssum = max(xwprssum, ctx.mag(10*(L+1)))
|
676 |
+
ctx.prec = xwprssum
|
677 |
+
for chi in range(0,der+1):
|
678 |
+
xrssum[chi] = 0
|
679 |
+
for k in range(1,L+1):
|
680 |
+
xrssum[chi] += xterm[chi,L-k]
|
681 |
+
yrssum={}
|
682 |
+
ctx.prec=15
|
683 |
+
yrsbound = math.sqrt(ctx.pi) * yc /(yb*a)
|
684 |
+
ctx.prec=15
|
685 |
+
ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2)
|
686 |
+
ywprssum = max(ywprssum, ctx.mag(10*(L+1)))
|
687 |
+
ctx.prec = ywprssum
|
688 |
+
for chi in range(0,der+1):
|
689 |
+
yrssum[chi] = 0
|
690 |
+
for k in range(1,L+1):
|
691 |
+
yrssum[chi] += yterm[chi,L-k]
|
692 |
+
|
693 |
+
# COMPUTING S3
|
694 |
+
# See II Section 3.19
|
695 |
+
ctx.prec = 15
|
696 |
+
A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0]))))
|
697 |
+
eps8 = eps/(3*A2)
|
698 |
+
T = t *ctx.ln(t/(2*ctx.pi))
|
699 |
+
xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T)
|
700 |
+
ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T)
|
701 |
+
|
702 |
+
ctx.prec = max(xwps3, ywps3)
|
703 |
+
|
704 |
+
tpi = t/(2*ctx.pi)
|
705 |
+
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
|
706 |
+
U = ctx.expj(-arg)
|
707 |
+
a = trunc_a(t)
|
708 |
+
xasigma = ctx.power(a, -xsigma)
|
709 |
+
yasigma = ctx.power(a, -ysigma)
|
710 |
+
xS3 = ((-1)**(N-1)) * xasigma * U
|
711 |
+
yS3 = ((-1)**(N-1)) * yasigma * U
|
712 |
+
|
713 |
+
# COMPUTING S1 the zetasum
|
714 |
+
# See II Section 3.18
|
715 |
+
ctx.prec = 15
|
716 |
+
xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1)
|
717 |
+
ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1)
|
718 |
+
wpsum = max(xwpsum, ywpsum)
|
719 |
+
|
720 |
+
ctx.prec = wpsum +10
|
721 |
+
'''
|
722 |
+
# This can be improved
|
723 |
+
xS1={}
|
724 |
+
yS1={}
|
725 |
+
for chi in range(0,der+1):
|
726 |
+
xS1[chi] = 0
|
727 |
+
yS1[chi] = 0
|
728 |
+
for n in range(1,int(N)+1):
|
729 |
+
ln = ctx.ln(n)
|
730 |
+
xexpn = ctx.exp(-ln*(xsigma+ctx.j*t))
|
731 |
+
yexpn = ctx.conj(1/(n*xexpn))
|
732 |
+
for chi in range(0,der+1):
|
733 |
+
pown = ctx.power(-ln, chi)
|
734 |
+
xterm = pown*xexpn
|
735 |
+
yterm = pown*yexpn
|
736 |
+
xS1[chi] += xterm
|
737 |
+
yS1[chi] += yterm
|
738 |
+
'''
|
739 |
+
xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True)
|
740 |
+
|
741 |
+
# END OF COMPUTATION of xrz, yrz
|
742 |
+
# See II Section 3.1
|
743 |
+
ctx.prec = 15
|
744 |
+
xabsS1 = abs(xS1[der])
|
745 |
+
xabsS2 = abs(xrssum[der] * xS3)
|
746 |
+
xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) )
|
747 |
+
|
748 |
+
ctx.prec = xwpend
|
749 |
+
xrz={}
|
750 |
+
for chi in range(0,der+1):
|
751 |
+
xrz[chi] = xS1[chi]+xrssum[chi]*xS3
|
752 |
+
|
753 |
+
ctx.prec = 15
|
754 |
+
yabsS1 = abs(yS1[der])
|
755 |
+
yabsS2 = abs(yrssum[der] * yS3)
|
756 |
+
ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) )
|
757 |
+
|
758 |
+
ctx.prec = ywpend
|
759 |
+
yrz={}
|
760 |
+
for chi in range(0,der+1):
|
761 |
+
yrz[chi] = yS1[chi]+yrssum[chi]*yS3
|
762 |
+
yrz[chi] = ctx.conj(yrz[chi])
|
763 |
+
ctx.prec = wpinitial
|
764 |
+
return xrz, yrz
|
765 |
+
|
766 |
+
def Rzeta_set(ctx, s, derivatives=[0]):
|
767 |
+
r"""
|
768 |
+
Computes several derivatives of the auxiliary function of Riemann `R(s)`.
|
769 |
+
|
770 |
+
**Definition**
|
771 |
+
|
772 |
+
The function is defined by
|
773 |
+
|
774 |
+
.. math ::
|
775 |
+
|
776 |
+
\begin{equation}
|
777 |
+
{\mathop{\mathcal R }\nolimits}(s)=
|
778 |
+
\int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}-
|
779 |
+
e^{-\pi i x}}\,dx
|
780 |
+
\end{equation}
|
781 |
+
|
782 |
+
To this function we apply the Riemann-Siegel expansion.
|
783 |
+
"""
|
784 |
+
der = max(derivatives)
|
785 |
+
# First we take the value of ctx.prec
|
786 |
+
# During the computation we will change ctx.prec, and finally we will
|
787 |
+
# restaurate the initial value
|
788 |
+
wpinitial = ctx.prec
|
789 |
+
# Take the real and imaginary part of s
|
790 |
+
t = ctx._im(s)
|
791 |
+
sigma = ctx._re(s)
|
792 |
+
# Now compute several parameter that appear on the program
|
793 |
+
ctx.prec = 15
|
794 |
+
a = ctx.sqrt(t/(2*ctx.pi)) # Careful
|
795 |
+
asigma = ctx.power(a, sigma) # Careful
|
796 |
+
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
|
797 |
+
A1 = ctx.power(2, ctx.mag(asigma)-1)
|
798 |
+
# We compute various epsilon's (see II end of Section 3.1)
|
799 |
+
eps = ctx.power(2, -wpinitial)
|
800 |
+
eps1 = eps/6.
|
801 |
+
eps2 = eps * A1/3.
|
802 |
+
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
|
803 |
+
# ON sigma
|
804 |
+
# constant b and c (see I Theorem 2 formula (26) )
|
805 |
+
# coefficients A and B1 (see I Section 6.1 equation (50))
|
806 |
+
# here we not need high precision
|
807 |
+
ctx.prec = 15
|
808 |
+
if sigma > 0:
|
809 |
+
b = 2.
|
810 |
+
c = math.pow(9,sigma)/4.44288
|
811 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
812 |
+
A = math.pow(9,sigma)
|
813 |
+
B1 = 1
|
814 |
+
else:
|
815 |
+
b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
816 |
+
c = math.pow(2,-sigma)/4.44288
|
817 |
+
A = math.pow(2,-sigma)
|
818 |
+
B1 = 1.10789 # = 2*sqrt(1-log(2))
|
819 |
+
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
|
820 |
+
# CORRECTION
|
821 |
+
# See II Section 3.2
|
822 |
+
ctx.prec = 15
|
823 |
+
L = 1
|
824 |
+
while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2:
|
825 |
+
L = L+1
|
826 |
+
L = max(2,L)
|
827 |
+
# The number L has to satify some conditions.
|
828 |
+
# If not RS can not compute Rzeta(s) with the prescribed precision
|
829 |
+
# (see II, Section 3.2 condition (20) ) and
|
830 |
+
# (II, Section 3.3 condition (22) ). Also we have added
|
831 |
+
# an additional technical condition in Section 3.17 Proposition 17
|
832 |
+
if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)):
|
833 |
+
#print 'Error Riemann-Siegel can not compute with such precision'
|
834 |
+
ctx.prec = wpinitial
|
835 |
+
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
|
836 |
+
|
837 |
+
# INITIALIZATION (CONTINUATION)
|
838 |
+
#
|
839 |
+
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
|
840 |
+
# each term of the RS correction must be computed with error <= eps3
|
841 |
+
eps3 = eps2/(4*L)
|
842 |
+
|
843 |
+
# eps4 is defined on (II Section 3.6 equation (30) )
|
844 |
+
# each component of the formula (II Section 3.6 equation (29) )
|
845 |
+
# must be computed with error <= eps4
|
846 |
+
eps4 = eps3/(3*L)
|
847 |
+
|
848 |
+
# COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
849 |
+
M = aux_M_Fp(ctx, A, eps4, a, B1, L)
|
850 |
+
Fp = {}
|
851 |
+
for n in range(M, 3*L-2):
|
852 |
+
Fp[n] = 0
|
853 |
+
|
854 |
+
# But I have not seen an instance of M != 3*L-3
|
855 |
+
#
|
856 |
+
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
|
857 |
+
# IN THE TAYLOR SERIES OF F.
|
858 |
+
# See II Section 3.11 equation (49))
|
859 |
+
h1 = eps4/(632*A)
|
860 |
+
h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e
|
861 |
+
h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M
|
862 |
+
h3 = min(h1,h2)
|
863 |
+
J=12
|
864 |
+
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
|
865 |
+
while jvalue > h3:
|
866 |
+
J = J+1
|
867 |
+
jvalue = (2*ctx.pi)*jvalue/J
|
868 |
+
|
869 |
+
# COMPUTING eps5[m] for 1 <= m <= 21
|
870 |
+
# See II Section 10 equation (43)
|
871 |
+
eps5={}
|
872 |
+
foreps5 = math.pi*math.pi*B1*a
|
873 |
+
for m in range(0,22):
|
874 |
+
aux1 = math.pow(foreps5, m/3)/(316.*A)
|
875 |
+
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
|
876 |
+
aux2 = math.sqrt(aux2)
|
877 |
+
eps5[m] = aux1*aux2*eps4
|
878 |
+
|
879 |
+
# COMPUTING wpfp
|
880 |
+
# See II Section 3.13 equation (59)
|
881 |
+
twenty = min(3*L-3, 21)+1
|
882 |
+
aux = 6812*J
|
883 |
+
wpfp = ctx.mag(44*J)
|
884 |
+
for m in range(0, twenty):
|
885 |
+
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
|
886 |
+
# COMPUTING N AND p
|
887 |
+
# See II Section
|
888 |
+
ctx.prec = wpfp + ctx.mag(t) + 20
|
889 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
890 |
+
N = ctx.floor(a)
|
891 |
+
p = 1-2*(a-N)
|
892 |
+
|
893 |
+
# now we get a rounded version of p to the precision wpfp
|
894 |
+
# this possibly is not necessary
|
895 |
+
num = ctx.floor(p*(ctx.mpf(2)**wpfp))
|
896 |
+
difference = p * (ctx.mpf(2)**wpfp)-num
|
897 |
+
if difference < 0.5:
|
898 |
+
num = num
|
899 |
+
else:
|
900 |
+
num = num+1
|
901 |
+
p = ctx.convert(num * (ctx.mpf(2)**(-wpfp)))
|
902 |
+
|
903 |
+
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
|
904 |
+
# We shall use the notation cc[n], since there is
|
905 |
+
# a constant that is called c
|
906 |
+
# See II Section 3.14
|
907 |
+
# We compute the coefficients and also save then in a
|
908 |
+
# cache. The bulk of the computation is passed to
|
909 |
+
# the function coef()
|
910 |
+
#
|
911 |
+
# eps6 is defined in II Section 3.13 equation (58)
|
912 |
+
eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J)
|
913 |
+
|
914 |
+
# Now we compute the coefficients
|
915 |
+
cc={}
|
916 |
+
cont={}
|
917 |
+
cont, pipowers = coef(ctx, J, eps6)
|
918 |
+
cc = cont.copy() # we need a copy since we have
|
919 |
+
Fp={}
|
920 |
+
for n in range(M, 3*L-2):
|
921 |
+
Fp[n] = 0
|
922 |
+
ctx.prec = wpfp
|
923 |
+
for m in range(0,M+1):
|
924 |
+
sumP = 0
|
925 |
+
for k in range(2*J-m-1,-1,-1):
|
926 |
+
sumP = (sumP * p) + cc[k]
|
927 |
+
Fp[m] = sumP
|
928 |
+
# preparation of the new coefficients
|
929 |
+
for k in range(0, 2*J-m-1):
|
930 |
+
cc[k] = (k+1) * cc[k+1]
|
931 |
+
|
932 |
+
# COMPUTING THE NUMBERS d[n,k]
|
933 |
+
# See II Section 3.17
|
934 |
+
|
935 |
+
# First we compute the working precisions wpd[k]
|
936 |
+
# Se II equation (92)
|
937 |
+
wpd = {}
|
938 |
+
d1 = max(6, ctx.mag(40*L*L))
|
939 |
+
d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1
|
940 |
+
const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2
|
941 |
+
for n in range(0,L):
|
942 |
+
d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2
|
943 |
+
wpd[n] = max(d3,d1)
|
944 |
+
|
945 |
+
# procedure of II Section 3.17
|
946 |
+
ctx.prec = wpd[1]+10
|
947 |
+
psigma = 1-(2*sigma)
|
948 |
+
d = {}
|
949 |
+
d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0
|
950 |
+
d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0
|
951 |
+
for n in range(1,L):
|
952 |
+
ctx.prec = wpd[n]+10
|
953 |
+
for k in range(0,3*n//2+1):
|
954 |
+
m = 3*n-2*k
|
955 |
+
if (m!=0):
|
956 |
+
m1 = ctx.one/m
|
957 |
+
c1 = m1/4
|
958 |
+
c2 = (psigma*m1)/2
|
959 |
+
c3 = -(m+1)
|
960 |
+
d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1]
|
961 |
+
else:
|
962 |
+
d[0,n,k]=0
|
963 |
+
for r in range(0,k):
|
964 |
+
add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
965 |
+
d[0,n,k] -= ((-1)**(k-r))*add
|
966 |
+
d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0
|
967 |
+
|
968 |
+
for mu in range(-2,der+1):
|
969 |
+
for n in range(-2,L):
|
970 |
+
for k in range(-3,max(1,3*n//2+2)):
|
971 |
+
if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
972 |
+
d[mu,n,k] = 0
|
973 |
+
|
974 |
+
for mu in range(1,der+1):
|
975 |
+
for n in range(0,L):
|
976 |
+
ctx.prec = wpd[n]+10
|
977 |
+
for k in range(0,3*n//2+1):
|
978 |
+
aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3]
|
979 |
+
d[mu,n,k] = aux - d[mu-1,n-1,k-1]
|
980 |
+
|
981 |
+
# COMPUTING THE COEFFICIENTS t[k,l]
|
982 |
+
# See II Section 3.9
|
983 |
+
#
|
984 |
+
# computing the needed wp
|
985 |
+
wptcoef = {}
|
986 |
+
wpterm = {}
|
987 |
+
ctx.prec = 15
|
988 |
+
c1 = ctx.mag(40*(L+2))
|
989 |
+
c2 = ctx.mag(68*(L+2)*A)
|
990 |
+
c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1
|
991 |
+
for k in range(0,L):
|
992 |
+
c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2.
|
993 |
+
wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10
|
994 |
+
wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10
|
995 |
+
|
996 |
+
# check of power of pi
|
997 |
+
|
998 |
+
# computing the fortcoef[mu,k,ell]
|
999 |
+
fortcoef={}
|
1000 |
+
for mu in derivatives:
|
1001 |
+
for k in range(0,L):
|
1002 |
+
for ell in range(-2,3*k//2+1):
|
1003 |
+
fortcoef[mu,k,ell]=0
|
1004 |
+
|
1005 |
+
for mu in derivatives:
|
1006 |
+
for k in range(0,L):
|
1007 |
+
ctx.prec = wptcoef[k]
|
1008 |
+
for ell in range(0,3*k//2+1):
|
1009 |
+
fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
1010 |
+
fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
1011 |
+
|
1012 |
+
def trunc_a(t):
|
1013 |
+
wp = ctx.prec
|
1014 |
+
ctx.prec = wp + 2
|
1015 |
+
aa = ctx.sqrt(t/(2*ctx.pi))
|
1016 |
+
ctx.prec = wp
|
1017 |
+
return aa
|
1018 |
+
|
1019 |
+
# computing the tcoef[chi,k,ell]
|
1020 |
+
tcoef={}
|
1021 |
+
for chi in derivatives:
|
1022 |
+
for k in range(0,L):
|
1023 |
+
for ell in range(-2,3*k//2+1):
|
1024 |
+
tcoef[chi,k,ell]=0
|
1025 |
+
ctx.prec = wptcoef[0]+3
|
1026 |
+
aa = trunc_a(t)
|
1027 |
+
la = -ctx.ln(aa)
|
1028 |
+
|
1029 |
+
for chi in derivatives:
|
1030 |
+
for k in range(0,L):
|
1031 |
+
ctx.prec = wptcoef[k]
|
1032 |
+
for ell in range(0,3*k//2+1):
|
1033 |
+
tcoef[chi,k,ell] = 0
|
1034 |
+
for mu in range(0, chi+1):
|
1035 |
+
tcoefter = ctx.binomial(chi,mu) * la**mu * \
|
1036 |
+
fortcoef[chi-mu,k,ell]
|
1037 |
+
tcoef[chi,k,ell] += tcoefter
|
1038 |
+
|
1039 |
+
# COMPUTING tv[k,ell]
|
1040 |
+
# See II Section 3.8
|
1041 |
+
|
1042 |
+
# Computing the powers av[k] = a**(-k)
|
1043 |
+
ctx.prec = wptcoef[0] + 2
|
1044 |
+
|
1045 |
+
# a has a good value of a.
|
1046 |
+
# See II Section 3.6
|
1047 |
+
av = {}
|
1048 |
+
av[0] = 1
|
1049 |
+
av[1] = av[0]/a
|
1050 |
+
|
1051 |
+
ctx.prec = wptcoef[0]
|
1052 |
+
for k in range(2,L):
|
1053 |
+
av[k] = av[k-1] * av[1]
|
1054 |
+
|
1055 |
+
# Computing the quotients
|
1056 |
+
tv = {}
|
1057 |
+
for chi in derivatives:
|
1058 |
+
for k in range(0,L):
|
1059 |
+
ctx.prec = wptcoef[k]
|
1060 |
+
for ell in range(0,3*k//2+1):
|
1061 |
+
tv[chi,k,ell] = tcoef[chi,k,ell]* av[k]
|
1062 |
+
|
1063 |
+
# COMPUTING THE TERMS term[k]
|
1064 |
+
# See II Section 3.6
|
1065 |
+
term = {}
|
1066 |
+
for chi in derivatives:
|
1067 |
+
for n in range(0,L):
|
1068 |
+
ctx.prec = wpterm[n]
|
1069 |
+
te = 0
|
1070 |
+
for k in range(0, 3*n//2+1):
|
1071 |
+
te += tv[chi,n,k]
|
1072 |
+
term[chi,n] = te
|
1073 |
+
|
1074 |
+
# COMPUTING rssum
|
1075 |
+
# See II Section 3.5
|
1076 |
+
rssum={}
|
1077 |
+
ctx.prec=15
|
1078 |
+
rsbound = math.sqrt(ctx.pi) * c /(b*a)
|
1079 |
+
ctx.prec=15
|
1080 |
+
wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2)
|
1081 |
+
wprssum = max(wprssum, ctx.mag(10*(L+1)))
|
1082 |
+
ctx.prec = wprssum
|
1083 |
+
for chi in derivatives:
|
1084 |
+
rssum[chi] = 0
|
1085 |
+
for k in range(1,L+1):
|
1086 |
+
rssum[chi] += term[chi,L-k]
|
1087 |
+
|
1088 |
+
# COMPUTING S3
|
1089 |
+
# See II Section 3.19
|
1090 |
+
ctx.prec = 15
|
1091 |
+
A2 = 2**(ctx.mag(rssum[0]))
|
1092 |
+
eps8 = eps/(3* A2)
|
1093 |
+
T = t * ctx.ln(t/(2*ctx.pi))
|
1094 |
+
wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T)
|
1095 |
+
|
1096 |
+
ctx.prec = wps3
|
1097 |
+
tpi = t/(2*ctx.pi)
|
1098 |
+
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
|
1099 |
+
U = ctx.expj(-arg)
|
1100 |
+
a = trunc_a(t)
|
1101 |
+
asigma = ctx.power(a, -sigma)
|
1102 |
+
S3 = ((-1)**(N-1)) * asigma * U
|
1103 |
+
|
1104 |
+
# COMPUTING S1 the zetasum
|
1105 |
+
# See II Section 3.18
|
1106 |
+
ctx.prec = 15
|
1107 |
+
wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1)
|
1108 |
+
|
1109 |
+
ctx.prec = wpsum + 10
|
1110 |
+
'''
|
1111 |
+
# This can be improved
|
1112 |
+
S1 = {}
|
1113 |
+
for chi in derivatives:
|
1114 |
+
S1[chi] = 0
|
1115 |
+
for n in range(1,int(N)+1):
|
1116 |
+
ln = ctx.ln(n)
|
1117 |
+
expn = ctx.exp(-ln*(sigma+ctx.j*t))
|
1118 |
+
for chi in derivatives:
|
1119 |
+
term = ctx.power(-ln, chi)*expn
|
1120 |
+
S1[chi] += term
|
1121 |
+
'''
|
1122 |
+
S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0]
|
1123 |
+
|
1124 |
+
# END OF COMPUTATION
|
1125 |
+
# See II Section 3.1
|
1126 |
+
ctx.prec = 15
|
1127 |
+
absS1 = abs(S1[der])
|
1128 |
+
absS2 = abs(rssum[der] * S3)
|
1129 |
+
wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2)))
|
1130 |
+
ctx.prec = wpend
|
1131 |
+
rz = {}
|
1132 |
+
for chi in derivatives:
|
1133 |
+
rz[chi] = S1[chi]+rssum[chi]*S3
|
1134 |
+
ctx.prec = wpinitial
|
1135 |
+
return rz
|
1136 |
+
|
1137 |
+
|
1138 |
+
def z_half(ctx,t,der=0):
|
1139 |
+
r"""
|
1140 |
+
z_half(t,der=0) Computes Z^(der)(t)
|
1141 |
+
"""
|
1142 |
+
s=ctx.mpf('0.5')+ctx.j*t
|
1143 |
+
wpinitial = ctx.prec
|
1144 |
+
ctx.prec = 15
|
1145 |
+
tt = t/(2*ctx.pi)
|
1146 |
+
wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt))
|
1147 |
+
wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt))
|
1148 |
+
ctx.prec = wptheta
|
1149 |
+
theta = ctx.siegeltheta(t)
|
1150 |
+
ctx.prec = wpz
|
1151 |
+
rz = Rzeta_set(ctx,s, range(der+1))
|
1152 |
+
if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2)
|
1153 |
+
if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4)
|
1154 |
+
if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8)
|
1155 |
+
if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16)
|
1156 |
+
exptheta = ctx.expj(theta)
|
1157 |
+
if der == 0:
|
1158 |
+
z = 2*exptheta*rz[0]
|
1159 |
+
if der == 1:
|
1160 |
+
zf = 2j*exptheta
|
1161 |
+
z = zf*(ps1*rz[0]+rz[1])
|
1162 |
+
if der == 2:
|
1163 |
+
zf = 2 * exptheta
|
1164 |
+
z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2)
|
1165 |
+
if der == 3:
|
1166 |
+
zf = -2j*exptheta
|
1167 |
+
z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2]
|
1168 |
+
z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3)
|
1169 |
+
if der == 4:
|
1170 |
+
zf = 2*exptheta
|
1171 |
+
z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2]
|
1172 |
+
z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2
|
1173 |
+
z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4
|
1174 |
+
z = zf*z
|
1175 |
+
ctx.prec = wpinitial
|
1176 |
+
return ctx._re(z)
|
1177 |
+
|
1178 |
+
def zeta_half(ctx, s, k=0):
|
1179 |
+
"""
|
1180 |
+
zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5
|
1181 |
+
"""
|
1182 |
+
wpinitial = ctx.prec
|
1183 |
+
sigma = ctx._re(s)
|
1184 |
+
t = ctx._im(s)
|
1185 |
+
#--- compute wptheta, wpR, wpbasic ---
|
1186 |
+
ctx.prec = 53
|
1187 |
+
# X see II Section 3.21 (109) and (110)
|
1188 |
+
if sigma > 0:
|
1189 |
+
X = ctx.sqrt(abs(s))
|
1190 |
+
else:
|
1191 |
+
X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma)
|
1192 |
+
# M1 see II Section 3.21 (111) and (112)
|
1193 |
+
if sigma > 0:
|
1194 |
+
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
|
1195 |
+
else:
|
1196 |
+
M1 = 4 * t * X
|
1197 |
+
# T see II Section 3.21 (113)
|
1198 |
+
abst = abs(0.5-s)
|
1199 |
+
T = 2* abst*math.log(abst)
|
1200 |
+
# computing wpbasic, wptheta, wpR see II Section 3.21
|
1201 |
+
wpbasic = max(6,3+ctx.mag(t))
|
1202 |
+
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1
|
1203 |
+
wpbasic = max(wpbasic, wpbasic2)
|
1204 |
+
wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1)
|
1205 |
+
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
|
1206 |
+
ctx.prec = wptheta
|
1207 |
+
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
|
1208 |
+
if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2
|
1209 |
+
if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4
|
1210 |
+
if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8
|
1211 |
+
if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16
|
1212 |
+
ctx.prec = wpR
|
1213 |
+
xrz = Rzeta_set(ctx,s,range(k+1))
|
1214 |
+
yrz={}
|
1215 |
+
for chi in range(0,k+1):
|
1216 |
+
yrz[chi] = ctx.conj(xrz[chi])
|
1217 |
+
ctx.prec = wpbasic
|
1218 |
+
exptheta = ctx.expj(-2*theta)
|
1219 |
+
if k==0:
|
1220 |
+
zv = xrz[0]+exptheta*yrz[0]
|
1221 |
+
if k==1:
|
1222 |
+
zv1 = -yrz[1] - 2*yrz[0]*ps1
|
1223 |
+
zv = xrz[1] + exptheta*zv1
|
1224 |
+
if k==2:
|
1225 |
+
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2
|
1226 |
+
zv = xrz[2]+exptheta*zv1
|
1227 |
+
if k==3:
|
1228 |
+
zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
|
1229 |
+
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
|
1230 |
+
zv = xrz[3]+exptheta*zv1
|
1231 |
+
if k == 4:
|
1232 |
+
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
|
1233 |
+
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
|
1234 |
+
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
|
1235 |
+
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
|
1236 |
+
zv = xrz[4]+exptheta*zv1
|
1237 |
+
ctx.prec = wpinitial
|
1238 |
+
return zv
|
1239 |
+
|
1240 |
+
def zeta_offline(ctx, s, k=0):
|
1241 |
+
"""
|
1242 |
+
Computes zeta^(k)(s) off the line
|
1243 |
+
"""
|
1244 |
+
wpinitial = ctx.prec
|
1245 |
+
sigma = ctx._re(s)
|
1246 |
+
t = ctx._im(s)
|
1247 |
+
#--- compute wptheta, wpR, wpbasic ---
|
1248 |
+
ctx.prec = 53
|
1249 |
+
# X see II Section 3.21 (109) and (110)
|
1250 |
+
if sigma > 0:
|
1251 |
+
X = ctx.power(abs(s), 0.5)
|
1252 |
+
else:
|
1253 |
+
X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma)
|
1254 |
+
# M1 see II Section 3.21 (111) and (112)
|
1255 |
+
if (sigma > 0):
|
1256 |
+
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
|
1257 |
+
else:
|
1258 |
+
M1 = 4 * t * X
|
1259 |
+
# M2 see II Section 3.21 (111) and (112)
|
1260 |
+
if (1-sigma > 0):
|
1261 |
+
M2 = 2*ctx.sqrt(t/(2*ctx.pi))
|
1262 |
+
else:
|
1263 |
+
M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5)
|
1264 |
+
# T see II Section 3.21 (113)
|
1265 |
+
abst = abs(0.5-s)
|
1266 |
+
T = 2* abst*math.log(abst)
|
1267 |
+
# computing wpbasic, wptheta, wpR see II Section 3.21
|
1268 |
+
wpbasic = max(6,3+ctx.mag(t))
|
1269 |
+
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1
|
1270 |
+
wpbasic = max(wpbasic, wpbasic2)
|
1271 |
+
wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1)
|
1272 |
+
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
|
1273 |
+
ctx.prec = wptheta
|
1274 |
+
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
|
1275 |
+
s1 = s
|
1276 |
+
s2 = ctx.conj(1-s1)
|
1277 |
+
ctx.prec = wpR
|
1278 |
+
xrz, yrz = Rzeta_simul(ctx, s, k)
|
1279 |
+
if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2
|
1280 |
+
if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8
|
1281 |
+
if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16
|
1282 |
+
if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32
|
1283 |
+
ctx.prec = wpbasic
|
1284 |
+
exptheta = ctx.expj(-2*theta)
|
1285 |
+
if k == 0:
|
1286 |
+
zv = xrz[0]+exptheta*yrz[0]
|
1287 |
+
if k == 1:
|
1288 |
+
zv1 = -yrz[1]-2*yrz[0]*ps1
|
1289 |
+
zv = xrz[1]+exptheta*zv1
|
1290 |
+
if k == 2:
|
1291 |
+
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2
|
1292 |
+
zv = xrz[2]+exptheta*zv1
|
1293 |
+
if k == 3:
|
1294 |
+
zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
|
1295 |
+
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
|
1296 |
+
zv = xrz[3]+exptheta*zv1
|
1297 |
+
if k == 4:
|
1298 |
+
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
|
1299 |
+
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
|
1300 |
+
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
|
1301 |
+
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
|
1302 |
+
zv = xrz[4]+exptheta*zv1
|
1303 |
+
ctx.prec = wpinitial
|
1304 |
+
return zv
|
1305 |
+
|
1306 |
+
def z_offline(ctx, w, k=0):
|
1307 |
+
r"""
|
1308 |
+
Computes Z(w) and its derivatives off the line
|
1309 |
+
"""
|
1310 |
+
s = ctx.mpf('0.5')+ctx.j*w
|
1311 |
+
s1 = s
|
1312 |
+
s2 = ctx.conj(1-s1)
|
1313 |
+
wpinitial = ctx.prec
|
1314 |
+
ctx.prec = 35
|
1315 |
+
# X see II Section 3.21 (109) and (110)
|
1316 |
+
# M1 see II Section 3.21 (111) and (112)
|
1317 |
+
if (ctx._re(s1) >= 0):
|
1318 |
+
M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))
|
1319 |
+
X = ctx.sqrt(abs(s1))
|
1320 |
+
else:
|
1321 |
+
X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))
|
1322 |
+
M1 = 4 * ctx._im(s1)*X
|
1323 |
+
# M2 see II Section 3.21 (111) and (112)
|
1324 |
+
if (ctx._re(s2) >= 0):
|
1325 |
+
M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))
|
1326 |
+
else:
|
1327 |
+
M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))
|
1328 |
+
# T see II Section 3.21 Prop. 27
|
1329 |
+
T = 2*abs(ctx.siegeltheta(w))
|
1330 |
+
# defining some precisions
|
1331 |
+
# see II Section 3.22 (115), (116), (117)
|
1332 |
+
aux1 = ctx.sqrt(X)
|
1333 |
+
aux2 = aux1*(M1+M2)
|
1334 |
+
aux3 = 3 +wpinitial
|
1335 |
+
wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)
|
1336 |
+
wptheta = max(4,ctx.mag(2.04*aux2)+aux3)
|
1337 |
+
wpR = ctx.mag(4*aux1)+aux3
|
1338 |
+
# now the computations
|
1339 |
+
ctx.prec = wptheta
|
1340 |
+
theta = ctx.siegeltheta(w)
|
1341 |
+
ctx.prec = wpR
|
1342 |
+
xrz, yrz = Rzeta_simul(ctx,s,k)
|
1343 |
+
pta = 0.25 + 0.5j*w
|
1344 |
+
ptb = 0.25 - 0.5j*w
|
1345 |
+
if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2
|
1346 |
+
if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))
|
1347 |
+
if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))
|
1348 |
+
if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))
|
1349 |
+
ctx.prec = wpbasic
|
1350 |
+
exptheta = ctx.expj(theta)
|
1351 |
+
if k == 0:
|
1352 |
+
zv = exptheta*xrz[0]+yrz[0]/exptheta
|
1353 |
+
j = ctx.j
|
1354 |
+
if k == 1:
|
1355 |
+
zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta
|
1356 |
+
if k == 2:
|
1357 |
+
zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)
|
1358 |
+
zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta
|
1359 |
+
if k == 3:
|
1360 |
+
zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2
|
1361 |
+
zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta
|
1362 |
+
zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2
|
1363 |
+
zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta
|
1364 |
+
zv = zv1+zv2
|
1365 |
+
if k == 4:
|
1366 |
+
zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2
|
1367 |
+
zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2
|
1368 |
+
zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3
|
1369 |
+
zv1 = zv1+xrz[4]+j*xrz[0]*ps4
|
1370 |
+
zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2
|
1371 |
+
zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2
|
1372 |
+
zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3
|
1373 |
+
zv2 = zv2+yrz[4]-j*yrz[0]*ps4
|
1374 |
+
zv = exptheta*zv1+zv2/exptheta
|
1375 |
+
ctx.prec = wpinitial
|
1376 |
+
return zv
|
1377 |
+
|
1378 |
+
@defun
|
1379 |
+
def rs_zeta(ctx, s, derivative=0, **kwargs):
|
1380 |
+
if derivative > 4:
|
1381 |
+
raise NotImplementedError
|
1382 |
+
s = ctx.convert(s)
|
1383 |
+
re = ctx._re(s); im = ctx._im(s)
|
1384 |
+
if im < 0:
|
1385 |
+
z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative))
|
1386 |
+
return z
|
1387 |
+
critical_line = (re == 0.5)
|
1388 |
+
if critical_line:
|
1389 |
+
return zeta_half(ctx, s, derivative)
|
1390 |
+
else:
|
1391 |
+
return zeta_offline(ctx, s, derivative)
|
1392 |
+
|
1393 |
+
@defun
|
1394 |
+
def rs_z(ctx, w, derivative=0):
|
1395 |
+
w = ctx.convert(w)
|
1396 |
+
re = ctx._re(w); im = ctx._im(w)
|
1397 |
+
if re < 0:
|
1398 |
+
return rs_z(ctx, -w, derivative)
|
1399 |
+
critical_line = (im == 0)
|
1400 |
+
if critical_line :
|
1401 |
+
return z_half(ctx, w, derivative)
|
1402 |
+
else:
|
1403 |
+
return z_offline(ctx, w, derivative)
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/signals.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun_wrapped
|
2 |
+
|
3 |
+
@defun_wrapped
|
4 |
+
def squarew(ctx, t, amplitude=1, period=1):
|
5 |
+
P = period
|
6 |
+
A = amplitude
|
7 |
+
return A*((-1)**ctx.floor(2*t/P))
|
8 |
+
|
9 |
+
@defun_wrapped
|
10 |
+
def trianglew(ctx, t, amplitude=1, period=1):
|
11 |
+
A = amplitude
|
12 |
+
P = period
|
13 |
+
|
14 |
+
return 2*A*(0.5 - ctx.fabs(1 - 2*ctx.frac(t/P + 0.25)))
|
15 |
+
|
16 |
+
@defun_wrapped
|
17 |
+
def sawtoothw(ctx, t, amplitude=1, period=1):
|
18 |
+
A = amplitude
|
19 |
+
P = period
|
20 |
+
return A*ctx.frac(t/P)
|
21 |
+
|
22 |
+
@defun_wrapped
|
23 |
+
def unit_triangle(ctx, t, amplitude=1):
|
24 |
+
A = amplitude
|
25 |
+
if t <= -1 or t >= 1:
|
26 |
+
return ctx.zero
|
27 |
+
return A*(-ctx.fabs(t) + 1)
|
28 |
+
|
29 |
+
@defun_wrapped
|
30 |
+
def sigmoid(ctx, t, amplitude=1):
|
31 |
+
A = amplitude
|
32 |
+
return A / (1 + ctx.exp(-t))
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/theta.py
ADDED
@@ -0,0 +1,1049 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .functions import defun, defun_wrapped
|
2 |
+
|
3 |
+
@defun
|
4 |
+
def _jacobi_theta2(ctx, z, q):
|
5 |
+
extra1 = 10
|
6 |
+
extra2 = 20
|
7 |
+
# the loops below break when the fixed precision quantities
|
8 |
+
# a and b go to zero;
|
9 |
+
# right shifting small negative numbers by wp one obtains -1, not zero,
|
10 |
+
# so the condition a**2 + b**2 > MIN is used to break the loops.
|
11 |
+
MIN = 2
|
12 |
+
if z == ctx.zero:
|
13 |
+
if (not ctx._im(q)):
|
14 |
+
wp = ctx.prec + extra1
|
15 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
16 |
+
x2 = (x*x) >> wp
|
17 |
+
a = b = x2
|
18 |
+
s = x2
|
19 |
+
while abs(a) > MIN:
|
20 |
+
b = (b*x2) >> wp
|
21 |
+
a = (a*b) >> wp
|
22 |
+
s += a
|
23 |
+
s = (1 << (wp+1)) + (s << 1)
|
24 |
+
s = ctx.ldexp(s, -wp)
|
25 |
+
else:
|
26 |
+
wp = ctx.prec + extra1
|
27 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
28 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
29 |
+
x2re = (xre*xre - xim*xim) >> wp
|
30 |
+
x2im = (xre*xim) >> (wp-1)
|
31 |
+
are = bre = x2re
|
32 |
+
aim = bim = x2im
|
33 |
+
sre = (1<<wp) + are
|
34 |
+
sim = aim
|
35 |
+
while are**2 + aim**2 > MIN:
|
36 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
37 |
+
(bre * x2im + bim * x2re) >> wp
|
38 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
39 |
+
(are * bim + aim * bre) >> wp
|
40 |
+
sre += are
|
41 |
+
sim += aim
|
42 |
+
sre = (sre << 1)
|
43 |
+
sim = (sim << 1)
|
44 |
+
sre = ctx.ldexp(sre, -wp)
|
45 |
+
sim = ctx.ldexp(sim, -wp)
|
46 |
+
s = ctx.mpc(sre, sim)
|
47 |
+
else:
|
48 |
+
if (not ctx._im(q)) and (not ctx._im(z)):
|
49 |
+
wp = ctx.prec + extra1
|
50 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
51 |
+
x2 = (x*x) >> wp
|
52 |
+
a = b = x2
|
53 |
+
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
|
54 |
+
cn = c1 = ctx.to_fixed(c1, wp)
|
55 |
+
sn = s1 = ctx.to_fixed(s1, wp)
|
56 |
+
c2 = (c1*c1 - s1*s1) >> wp
|
57 |
+
s2 = (c1 * s1) >> (wp - 1)
|
58 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
59 |
+
s = c1 + ((a * cn) >> wp)
|
60 |
+
while abs(a) > MIN:
|
61 |
+
b = (b*x2) >> wp
|
62 |
+
a = (a*b) >> wp
|
63 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
64 |
+
s += (a * cn) >> wp
|
65 |
+
s = (s << 1)
|
66 |
+
s = ctx.ldexp(s, -wp)
|
67 |
+
s *= ctx.nthroot(q, 4)
|
68 |
+
return s
|
69 |
+
# case z real, q complex
|
70 |
+
elif not ctx._im(z):
|
71 |
+
wp = ctx.prec + extra2
|
72 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
73 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
74 |
+
x2re = (xre*xre - xim*xim) >> wp
|
75 |
+
x2im = (xre*xim) >> (wp - 1)
|
76 |
+
are = bre = x2re
|
77 |
+
aim = bim = x2im
|
78 |
+
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
|
79 |
+
cn = c1 = ctx.to_fixed(c1, wp)
|
80 |
+
sn = s1 = ctx.to_fixed(s1, wp)
|
81 |
+
c2 = (c1*c1 - s1*s1) >> wp
|
82 |
+
s2 = (c1 * s1) >> (wp - 1)
|
83 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
84 |
+
sre = c1 + ((are * cn) >> wp)
|
85 |
+
sim = ((aim * cn) >> wp)
|
86 |
+
while are**2 + aim**2 > MIN:
|
87 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
88 |
+
(bre * x2im + bim * x2re) >> wp
|
89 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
90 |
+
(are * bim + aim * bre) >> wp
|
91 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
92 |
+
sre += ((are * cn) >> wp)
|
93 |
+
sim += ((aim * cn) >> wp)
|
94 |
+
sre = (sre << 1)
|
95 |
+
sim = (sim << 1)
|
96 |
+
sre = ctx.ldexp(sre, -wp)
|
97 |
+
sim = ctx.ldexp(sim, -wp)
|
98 |
+
s = ctx.mpc(sre, sim)
|
99 |
+
#case z complex, q real
|
100 |
+
elif not ctx._im(q):
|
101 |
+
wp = ctx.prec + extra2
|
102 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
103 |
+
x2 = (x*x) >> wp
|
104 |
+
a = b = x2
|
105 |
+
prec0 = ctx.prec
|
106 |
+
ctx.prec = wp
|
107 |
+
c1, s1 = ctx.cos_sin(z)
|
108 |
+
ctx.prec = prec0
|
109 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
110 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
111 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
112 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
113 |
+
#c2 = (c1*c1 - s1*s1) >> wp
|
114 |
+
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
|
115 |
+
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
|
116 |
+
#s2 = (c1 * s1) >> (wp - 1)
|
117 |
+
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
|
118 |
+
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
|
119 |
+
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
120 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
121 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
122 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
123 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
124 |
+
cnre = t1
|
125 |
+
cnim = t2
|
126 |
+
snre = t3
|
127 |
+
snim = t4
|
128 |
+
sre = c1re + ((a * cnre) >> wp)
|
129 |
+
sim = c1im + ((a * cnim) >> wp)
|
130 |
+
while abs(a) > MIN:
|
131 |
+
b = (b*x2) >> wp
|
132 |
+
a = (a*b) >> wp
|
133 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
134 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
135 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
136 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
137 |
+
cnre = t1
|
138 |
+
cnim = t2
|
139 |
+
snre = t3
|
140 |
+
snim = t4
|
141 |
+
sre += ((a * cnre) >> wp)
|
142 |
+
sim += ((a * cnim) >> wp)
|
143 |
+
sre = (sre << 1)
|
144 |
+
sim = (sim << 1)
|
145 |
+
sre = ctx.ldexp(sre, -wp)
|
146 |
+
sim = ctx.ldexp(sim, -wp)
|
147 |
+
s = ctx.mpc(sre, sim)
|
148 |
+
# case z and q complex
|
149 |
+
else:
|
150 |
+
wp = ctx.prec + extra2
|
151 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
152 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
153 |
+
x2re = (xre*xre - xim*xim) >> wp
|
154 |
+
x2im = (xre*xim) >> (wp - 1)
|
155 |
+
are = bre = x2re
|
156 |
+
aim = bim = x2im
|
157 |
+
prec0 = ctx.prec
|
158 |
+
ctx.prec = wp
|
159 |
+
# cos(z), sin(z) with z complex
|
160 |
+
c1, s1 = ctx.cos_sin(z)
|
161 |
+
ctx.prec = prec0
|
162 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
163 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
164 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
165 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
166 |
+
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
|
167 |
+
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
|
168 |
+
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
|
169 |
+
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
|
170 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
171 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
172 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
173 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
174 |
+
cnre = t1
|
175 |
+
cnim = t2
|
176 |
+
snre = t3
|
177 |
+
snim = t4
|
178 |
+
n = 1
|
179 |
+
termre = c1re
|
180 |
+
termim = c1im
|
181 |
+
sre = c1re + ((are * cnre - aim * cnim) >> wp)
|
182 |
+
sim = c1im + ((are * cnim + aim * cnre) >> wp)
|
183 |
+
n = 3
|
184 |
+
termre = ((are * cnre - aim * cnim) >> wp)
|
185 |
+
termim = ((are * cnim + aim * cnre) >> wp)
|
186 |
+
sre = c1re + ((are * cnre - aim * cnim) >> wp)
|
187 |
+
sim = c1im + ((are * cnim + aim * cnre) >> wp)
|
188 |
+
n = 5
|
189 |
+
while are**2 + aim**2 > MIN:
|
190 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
191 |
+
(bre * x2im + bim * x2re) >> wp
|
192 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
193 |
+
(are * bim + aim * bre) >> wp
|
194 |
+
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
195 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
196 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
197 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
198 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
199 |
+
cnre = t1
|
200 |
+
cnim = t2
|
201 |
+
snre = t3
|
202 |
+
snim = t4
|
203 |
+
termre = ((are * cnre - aim * cnim) >> wp)
|
204 |
+
termim = ((aim * cnre + are * cnim) >> wp)
|
205 |
+
sre += ((are * cnre - aim * cnim) >> wp)
|
206 |
+
sim += ((aim * cnre + are * cnim) >> wp)
|
207 |
+
n += 2
|
208 |
+
sre = (sre << 1)
|
209 |
+
sim = (sim << 1)
|
210 |
+
sre = ctx.ldexp(sre, -wp)
|
211 |
+
sim = ctx.ldexp(sim, -wp)
|
212 |
+
s = ctx.mpc(sre, sim)
|
213 |
+
s *= ctx.nthroot(q, 4)
|
214 |
+
return s
|
215 |
+
|
216 |
+
@defun
|
217 |
+
def _djacobi_theta2(ctx, z, q, nd):
|
218 |
+
MIN = 2
|
219 |
+
extra1 = 10
|
220 |
+
extra2 = 20
|
221 |
+
if (not ctx._im(q)) and (not ctx._im(z)):
|
222 |
+
wp = ctx.prec + extra1
|
223 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
224 |
+
x2 = (x*x) >> wp
|
225 |
+
a = b = x2
|
226 |
+
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
|
227 |
+
cn = c1 = ctx.to_fixed(c1, wp)
|
228 |
+
sn = s1 = ctx.to_fixed(s1, wp)
|
229 |
+
c2 = (c1*c1 - s1*s1) >> wp
|
230 |
+
s2 = (c1 * s1) >> (wp - 1)
|
231 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
232 |
+
if (nd&1):
|
233 |
+
s = s1 + ((a * sn * 3**nd) >> wp)
|
234 |
+
else:
|
235 |
+
s = c1 + ((a * cn * 3**nd) >> wp)
|
236 |
+
n = 2
|
237 |
+
while abs(a) > MIN:
|
238 |
+
b = (b*x2) >> wp
|
239 |
+
a = (a*b) >> wp
|
240 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
241 |
+
if nd&1:
|
242 |
+
s += (a * sn * (2*n+1)**nd) >> wp
|
243 |
+
else:
|
244 |
+
s += (a * cn * (2*n+1)**nd) >> wp
|
245 |
+
n += 1
|
246 |
+
s = -(s << 1)
|
247 |
+
s = ctx.ldexp(s, -wp)
|
248 |
+
# case z real, q complex
|
249 |
+
elif not ctx._im(z):
|
250 |
+
wp = ctx.prec + extra2
|
251 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
252 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
253 |
+
x2re = (xre*xre - xim*xim) >> wp
|
254 |
+
x2im = (xre*xim) >> (wp - 1)
|
255 |
+
are = bre = x2re
|
256 |
+
aim = bim = x2im
|
257 |
+
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
|
258 |
+
cn = c1 = ctx.to_fixed(c1, wp)
|
259 |
+
sn = s1 = ctx.to_fixed(s1, wp)
|
260 |
+
c2 = (c1*c1 - s1*s1) >> wp
|
261 |
+
s2 = (c1 * s1) >> (wp - 1)
|
262 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
263 |
+
if (nd&1):
|
264 |
+
sre = s1 + ((are * sn * 3**nd) >> wp)
|
265 |
+
sim = ((aim * sn * 3**nd) >> wp)
|
266 |
+
else:
|
267 |
+
sre = c1 + ((are * cn * 3**nd) >> wp)
|
268 |
+
sim = ((aim * cn * 3**nd) >> wp)
|
269 |
+
n = 5
|
270 |
+
while are**2 + aim**2 > MIN:
|
271 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
272 |
+
(bre * x2im + bim * x2re) >> wp
|
273 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
274 |
+
(are * bim + aim * bre) >> wp
|
275 |
+
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
276 |
+
|
277 |
+
if (nd&1):
|
278 |
+
sre += ((are * sn * n**nd) >> wp)
|
279 |
+
sim += ((aim * sn * n**nd) >> wp)
|
280 |
+
else:
|
281 |
+
sre += ((are * cn * n**nd) >> wp)
|
282 |
+
sim += ((aim * cn * n**nd) >> wp)
|
283 |
+
n += 2
|
284 |
+
sre = -(sre << 1)
|
285 |
+
sim = -(sim << 1)
|
286 |
+
sre = ctx.ldexp(sre, -wp)
|
287 |
+
sim = ctx.ldexp(sim, -wp)
|
288 |
+
s = ctx.mpc(sre, sim)
|
289 |
+
#case z complex, q real
|
290 |
+
elif not ctx._im(q):
|
291 |
+
wp = ctx.prec + extra2
|
292 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
293 |
+
x2 = (x*x) >> wp
|
294 |
+
a = b = x2
|
295 |
+
prec0 = ctx.prec
|
296 |
+
ctx.prec = wp
|
297 |
+
c1, s1 = ctx.cos_sin(z)
|
298 |
+
ctx.prec = prec0
|
299 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
300 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
301 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
302 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
303 |
+
#c2 = (c1*c1 - s1*s1) >> wp
|
304 |
+
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
|
305 |
+
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
|
306 |
+
#s2 = (c1 * s1) >> (wp - 1)
|
307 |
+
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
|
308 |
+
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
|
309 |
+
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
|
310 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
311 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
312 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
313 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
314 |
+
cnre = t1
|
315 |
+
cnim = t2
|
316 |
+
snre = t3
|
317 |
+
snim = t4
|
318 |
+
if (nd&1):
|
319 |
+
sre = s1re + ((a * snre * 3**nd) >> wp)
|
320 |
+
sim = s1im + ((a * snim * 3**nd) >> wp)
|
321 |
+
else:
|
322 |
+
sre = c1re + ((a * cnre * 3**nd) >> wp)
|
323 |
+
sim = c1im + ((a * cnim * 3**nd) >> wp)
|
324 |
+
n = 5
|
325 |
+
while abs(a) > MIN:
|
326 |
+
b = (b*x2) >> wp
|
327 |
+
a = (a*b) >> wp
|
328 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
329 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
330 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
331 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
332 |
+
cnre = t1
|
333 |
+
cnim = t2
|
334 |
+
snre = t3
|
335 |
+
snim = t4
|
336 |
+
if (nd&1):
|
337 |
+
sre += ((a * snre * n**nd) >> wp)
|
338 |
+
sim += ((a * snim * n**nd) >> wp)
|
339 |
+
else:
|
340 |
+
sre += ((a * cnre * n**nd) >> wp)
|
341 |
+
sim += ((a * cnim * n**nd) >> wp)
|
342 |
+
n += 2
|
343 |
+
sre = -(sre << 1)
|
344 |
+
sim = -(sim << 1)
|
345 |
+
sre = ctx.ldexp(sre, -wp)
|
346 |
+
sim = ctx.ldexp(sim, -wp)
|
347 |
+
s = ctx.mpc(sre, sim)
|
348 |
+
# case z and q complex
|
349 |
+
else:
|
350 |
+
wp = ctx.prec + extra2
|
351 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
352 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
353 |
+
x2re = (xre*xre - xim*xim) >> wp
|
354 |
+
x2im = (xre*xim) >> (wp - 1)
|
355 |
+
are = bre = x2re
|
356 |
+
aim = bim = x2im
|
357 |
+
prec0 = ctx.prec
|
358 |
+
ctx.prec = wp
|
359 |
+
# cos(2*z), sin(2*z) with z complex
|
360 |
+
c1, s1 = ctx.cos_sin(z)
|
361 |
+
ctx.prec = prec0
|
362 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
363 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
364 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
365 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
366 |
+
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
|
367 |
+
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
|
368 |
+
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
|
369 |
+
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
|
370 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
371 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
372 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
373 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
374 |
+
cnre = t1
|
375 |
+
cnim = t2
|
376 |
+
snre = t3
|
377 |
+
snim = t4
|
378 |
+
if (nd&1):
|
379 |
+
sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp)
|
380 |
+
sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp)
|
381 |
+
else:
|
382 |
+
sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp)
|
383 |
+
sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp)
|
384 |
+
n = 5
|
385 |
+
while are**2 + aim**2 > MIN:
|
386 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
387 |
+
(bre * x2im + bim * x2re) >> wp
|
388 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
389 |
+
(are * bim + aim * bre) >> wp
|
390 |
+
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
391 |
+
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
|
392 |
+
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
|
393 |
+
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
|
394 |
+
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
|
395 |
+
cnre = t1
|
396 |
+
cnim = t2
|
397 |
+
snre = t3
|
398 |
+
snim = t4
|
399 |
+
if (nd&1):
|
400 |
+
sre += (((are * snre - aim * snim) * n**nd) >> wp)
|
401 |
+
sim += (((aim * snre + are * snim) * n**nd) >> wp)
|
402 |
+
else:
|
403 |
+
sre += (((are * cnre - aim * cnim) * n**nd) >> wp)
|
404 |
+
sim += (((aim * cnre + are * cnim) * n**nd) >> wp)
|
405 |
+
n += 2
|
406 |
+
sre = -(sre << 1)
|
407 |
+
sim = -(sim << 1)
|
408 |
+
sre = ctx.ldexp(sre, -wp)
|
409 |
+
sim = ctx.ldexp(sim, -wp)
|
410 |
+
s = ctx.mpc(sre, sim)
|
411 |
+
s *= ctx.nthroot(q, 4)
|
412 |
+
if (nd&1):
|
413 |
+
return (-1)**(nd//2) * s
|
414 |
+
else:
|
415 |
+
return (-1)**(1 + nd//2) * s
|
416 |
+
|
417 |
+
@defun
|
418 |
+
def _jacobi_theta3(ctx, z, q):
|
419 |
+
extra1 = 10
|
420 |
+
extra2 = 20
|
421 |
+
MIN = 2
|
422 |
+
if z == ctx.zero:
|
423 |
+
if not ctx._im(q):
|
424 |
+
wp = ctx.prec + extra1
|
425 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
426 |
+
s = x
|
427 |
+
a = b = x
|
428 |
+
x2 = (x*x) >> wp
|
429 |
+
while abs(a) > MIN:
|
430 |
+
b = (b*x2) >> wp
|
431 |
+
a = (a*b) >> wp
|
432 |
+
s += a
|
433 |
+
s = (1 << wp) + (s << 1)
|
434 |
+
s = ctx.ldexp(s, -wp)
|
435 |
+
return s
|
436 |
+
else:
|
437 |
+
wp = ctx.prec + extra1
|
438 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
439 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
440 |
+
x2re = (xre*xre - xim*xim) >> wp
|
441 |
+
x2im = (xre*xim) >> (wp - 1)
|
442 |
+
sre = are = bre = xre
|
443 |
+
sim = aim = bim = xim
|
444 |
+
while are**2 + aim**2 > MIN:
|
445 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
446 |
+
(bre * x2im + bim * x2re) >> wp
|
447 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
448 |
+
(are * bim + aim * bre) >> wp
|
449 |
+
sre += are
|
450 |
+
sim += aim
|
451 |
+
sre = (1 << wp) + (sre << 1)
|
452 |
+
sim = (sim << 1)
|
453 |
+
sre = ctx.ldexp(sre, -wp)
|
454 |
+
sim = ctx.ldexp(sim, -wp)
|
455 |
+
s = ctx.mpc(sre, sim)
|
456 |
+
return s
|
457 |
+
else:
|
458 |
+
if (not ctx._im(q)) and (not ctx._im(z)):
|
459 |
+
s = 0
|
460 |
+
wp = ctx.prec + extra1
|
461 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
462 |
+
a = b = x
|
463 |
+
x2 = (x*x) >> wp
|
464 |
+
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
|
465 |
+
c1 = ctx.to_fixed(c1, wp)
|
466 |
+
s1 = ctx.to_fixed(s1, wp)
|
467 |
+
cn = c1
|
468 |
+
sn = s1
|
469 |
+
s += (a * cn) >> wp
|
470 |
+
while abs(a) > MIN:
|
471 |
+
b = (b*x2) >> wp
|
472 |
+
a = (a*b) >> wp
|
473 |
+
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
474 |
+
s += (a * cn) >> wp
|
475 |
+
s = (1 << wp) + (s << 1)
|
476 |
+
s = ctx.ldexp(s, -wp)
|
477 |
+
return s
|
478 |
+
# case z real, q complex
|
479 |
+
elif not ctx._im(z):
|
480 |
+
wp = ctx.prec + extra2
|
481 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
482 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
483 |
+
x2re = (xre*xre - xim*xim) >> wp
|
484 |
+
x2im = (xre*xim) >> (wp - 1)
|
485 |
+
are = bre = xre
|
486 |
+
aim = bim = xim
|
487 |
+
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
|
488 |
+
c1 = ctx.to_fixed(c1, wp)
|
489 |
+
s1 = ctx.to_fixed(s1, wp)
|
490 |
+
cn = c1
|
491 |
+
sn = s1
|
492 |
+
sre = (are * cn) >> wp
|
493 |
+
sim = (aim * cn) >> wp
|
494 |
+
while are**2 + aim**2 > MIN:
|
495 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
496 |
+
(bre * x2im + bim * x2re) >> wp
|
497 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
498 |
+
(are * bim + aim * bre) >> wp
|
499 |
+
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
500 |
+
sre += (are * cn) >> wp
|
501 |
+
sim += (aim * cn) >> wp
|
502 |
+
sre = (1 << wp) + (sre << 1)
|
503 |
+
sim = (sim << 1)
|
504 |
+
sre = ctx.ldexp(sre, -wp)
|
505 |
+
sim = ctx.ldexp(sim, -wp)
|
506 |
+
s = ctx.mpc(sre, sim)
|
507 |
+
return s
|
508 |
+
#case z complex, q real
|
509 |
+
elif not ctx._im(q):
|
510 |
+
wp = ctx.prec + extra2
|
511 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
512 |
+
a = b = x
|
513 |
+
x2 = (x*x) >> wp
|
514 |
+
prec0 = ctx.prec
|
515 |
+
ctx.prec = wp
|
516 |
+
c1, s1 = ctx.cos_sin(2*z)
|
517 |
+
ctx.prec = prec0
|
518 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
519 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
520 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
521 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
522 |
+
sre = (a * cnre) >> wp
|
523 |
+
sim = (a * cnim) >> wp
|
524 |
+
while abs(a) > MIN:
|
525 |
+
b = (b*x2) >> wp
|
526 |
+
a = (a*b) >> wp
|
527 |
+
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
|
528 |
+
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
|
529 |
+
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
|
530 |
+
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
|
531 |
+
cnre = t1
|
532 |
+
cnim = t2
|
533 |
+
snre = t3
|
534 |
+
snim = t4
|
535 |
+
sre += (a * cnre) >> wp
|
536 |
+
sim += (a * cnim) >> wp
|
537 |
+
sre = (1 << wp) + (sre << 1)
|
538 |
+
sim = (sim << 1)
|
539 |
+
sre = ctx.ldexp(sre, -wp)
|
540 |
+
sim = ctx.ldexp(sim, -wp)
|
541 |
+
s = ctx.mpc(sre, sim)
|
542 |
+
return s
|
543 |
+
# case z and q complex
|
544 |
+
else:
|
545 |
+
wp = ctx.prec + extra2
|
546 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
547 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
548 |
+
x2re = (xre*xre - xim*xim) >> wp
|
549 |
+
x2im = (xre*xim) >> (wp - 1)
|
550 |
+
are = bre = xre
|
551 |
+
aim = bim = xim
|
552 |
+
prec0 = ctx.prec
|
553 |
+
ctx.prec = wp
|
554 |
+
# cos(2*z), sin(2*z) with z complex
|
555 |
+
c1, s1 = ctx.cos_sin(2*z)
|
556 |
+
ctx.prec = prec0
|
557 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
558 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
559 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
560 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
561 |
+
sre = (are * cnre - aim * cnim) >> wp
|
562 |
+
sim = (aim * cnre + are * cnim) >> wp
|
563 |
+
while are**2 + aim**2 > MIN:
|
564 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
565 |
+
(bre * x2im + bim * x2re) >> wp
|
566 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
567 |
+
(are * bim + aim * bre) >> wp
|
568 |
+
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
|
569 |
+
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
|
570 |
+
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
|
571 |
+
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
|
572 |
+
cnre = t1
|
573 |
+
cnim = t2
|
574 |
+
snre = t3
|
575 |
+
snim = t4
|
576 |
+
sre += (are * cnre - aim * cnim) >> wp
|
577 |
+
sim += (aim * cnre + are * cnim) >> wp
|
578 |
+
sre = (1 << wp) + (sre << 1)
|
579 |
+
sim = (sim << 1)
|
580 |
+
sre = ctx.ldexp(sre, -wp)
|
581 |
+
sim = ctx.ldexp(sim, -wp)
|
582 |
+
s = ctx.mpc(sre, sim)
|
583 |
+
return s
|
584 |
+
|
585 |
+
@defun
|
586 |
+
def _djacobi_theta3(ctx, z, q, nd):
|
587 |
+
"""nd=1,2,3 order of the derivative with respect to z"""
|
588 |
+
MIN = 2
|
589 |
+
extra1 = 10
|
590 |
+
extra2 = 20
|
591 |
+
if (not ctx._im(q)) and (not ctx._im(z)):
|
592 |
+
s = 0
|
593 |
+
wp = ctx.prec + extra1
|
594 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
595 |
+
a = b = x
|
596 |
+
x2 = (x*x) >> wp
|
597 |
+
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
|
598 |
+
c1 = ctx.to_fixed(c1, wp)
|
599 |
+
s1 = ctx.to_fixed(s1, wp)
|
600 |
+
cn = c1
|
601 |
+
sn = s1
|
602 |
+
if (nd&1):
|
603 |
+
s += (a * sn) >> wp
|
604 |
+
else:
|
605 |
+
s += (a * cn) >> wp
|
606 |
+
n = 2
|
607 |
+
while abs(a) > MIN:
|
608 |
+
b = (b*x2) >> wp
|
609 |
+
a = (a*b) >> wp
|
610 |
+
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
611 |
+
if nd&1:
|
612 |
+
s += (a * sn * n**nd) >> wp
|
613 |
+
else:
|
614 |
+
s += (a * cn * n**nd) >> wp
|
615 |
+
n += 1
|
616 |
+
s = -(s << (nd+1))
|
617 |
+
s = ctx.ldexp(s, -wp)
|
618 |
+
# case z real, q complex
|
619 |
+
elif not ctx._im(z):
|
620 |
+
wp = ctx.prec + extra2
|
621 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
622 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
623 |
+
x2re = (xre*xre - xim*xim) >> wp
|
624 |
+
x2im = (xre*xim) >> (wp - 1)
|
625 |
+
are = bre = xre
|
626 |
+
aim = bim = xim
|
627 |
+
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
|
628 |
+
c1 = ctx.to_fixed(c1, wp)
|
629 |
+
s1 = ctx.to_fixed(s1, wp)
|
630 |
+
cn = c1
|
631 |
+
sn = s1
|
632 |
+
if (nd&1):
|
633 |
+
sre = (are * sn) >> wp
|
634 |
+
sim = (aim * sn) >> wp
|
635 |
+
else:
|
636 |
+
sre = (are * cn) >> wp
|
637 |
+
sim = (aim * cn) >> wp
|
638 |
+
n = 2
|
639 |
+
while are**2 + aim**2 > MIN:
|
640 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
641 |
+
(bre * x2im + bim * x2re) >> wp
|
642 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
643 |
+
(are * bim + aim * bre) >> wp
|
644 |
+
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
|
645 |
+
if nd&1:
|
646 |
+
sre += (are * sn * n**nd) >> wp
|
647 |
+
sim += (aim * sn * n**nd) >> wp
|
648 |
+
else:
|
649 |
+
sre += (are * cn * n**nd) >> wp
|
650 |
+
sim += (aim * cn * n**nd) >> wp
|
651 |
+
n += 1
|
652 |
+
sre = -(sre << (nd+1))
|
653 |
+
sim = -(sim << (nd+1))
|
654 |
+
sre = ctx.ldexp(sre, -wp)
|
655 |
+
sim = ctx.ldexp(sim, -wp)
|
656 |
+
s = ctx.mpc(sre, sim)
|
657 |
+
#case z complex, q real
|
658 |
+
elif not ctx._im(q):
|
659 |
+
wp = ctx.prec + extra2
|
660 |
+
x = ctx.to_fixed(ctx._re(q), wp)
|
661 |
+
a = b = x
|
662 |
+
x2 = (x*x) >> wp
|
663 |
+
prec0 = ctx.prec
|
664 |
+
ctx.prec = wp
|
665 |
+
c1, s1 = ctx.cos_sin(2*z)
|
666 |
+
ctx.prec = prec0
|
667 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
668 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
669 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
670 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
671 |
+
if (nd&1):
|
672 |
+
sre = (a * snre) >> wp
|
673 |
+
sim = (a * snim) >> wp
|
674 |
+
else:
|
675 |
+
sre = (a * cnre) >> wp
|
676 |
+
sim = (a * cnim) >> wp
|
677 |
+
n = 2
|
678 |
+
while abs(a) > MIN:
|
679 |
+
b = (b*x2) >> wp
|
680 |
+
a = (a*b) >> wp
|
681 |
+
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
|
682 |
+
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
|
683 |
+
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
|
684 |
+
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
|
685 |
+
cnre = t1
|
686 |
+
cnim = t2
|
687 |
+
snre = t3
|
688 |
+
snim = t4
|
689 |
+
if (nd&1):
|
690 |
+
sre += (a * snre * n**nd) >> wp
|
691 |
+
sim += (a * snim * n**nd) >> wp
|
692 |
+
else:
|
693 |
+
sre += (a * cnre * n**nd) >> wp
|
694 |
+
sim += (a * cnim * n**nd) >> wp
|
695 |
+
n += 1
|
696 |
+
sre = -(sre << (nd+1))
|
697 |
+
sim = -(sim << (nd+1))
|
698 |
+
sre = ctx.ldexp(sre, -wp)
|
699 |
+
sim = ctx.ldexp(sim, -wp)
|
700 |
+
s = ctx.mpc(sre, sim)
|
701 |
+
# case z and q complex
|
702 |
+
else:
|
703 |
+
wp = ctx.prec + extra2
|
704 |
+
xre = ctx.to_fixed(ctx._re(q), wp)
|
705 |
+
xim = ctx.to_fixed(ctx._im(q), wp)
|
706 |
+
x2re = (xre*xre - xim*xim) >> wp
|
707 |
+
x2im = (xre*xim) >> (wp - 1)
|
708 |
+
are = bre = xre
|
709 |
+
aim = bim = xim
|
710 |
+
prec0 = ctx.prec
|
711 |
+
ctx.prec = wp
|
712 |
+
# cos(2*z), sin(2*z) with z complex
|
713 |
+
c1, s1 = ctx.cos_sin(2*z)
|
714 |
+
ctx.prec = prec0
|
715 |
+
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
|
716 |
+
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
|
717 |
+
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
|
718 |
+
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
|
719 |
+
if (nd&1):
|
720 |
+
sre = (are * snre - aim * snim) >> wp
|
721 |
+
sim = (aim * snre + are * snim) >> wp
|
722 |
+
else:
|
723 |
+
sre = (are * cnre - aim * cnim) >> wp
|
724 |
+
sim = (aim * cnre + are * cnim) >> wp
|
725 |
+
n = 2
|
726 |
+
while are**2 + aim**2 > MIN:
|
727 |
+
bre, bim = (bre * x2re - bim * x2im) >> wp, \
|
728 |
+
(bre * x2im + bim * x2re) >> wp
|
729 |
+
are, aim = (are * bre - aim * bim) >> wp, \
|
730 |
+
(are * bim + aim * bre) >> wp
|
731 |
+
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
|
732 |
+
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
|
733 |
+
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
|
734 |
+
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
|
735 |
+
cnre = t1
|
736 |
+
cnim = t2
|
737 |
+
snre = t3
|
738 |
+
snim = t4
|
739 |
+
if(nd&1):
|
740 |
+
sre += ((are * snre - aim * snim) * n**nd) >> wp
|
741 |
+
sim += ((aim * snre + are * snim) * n**nd) >> wp
|
742 |
+
else:
|
743 |
+
sre += ((are * cnre - aim * cnim) * n**nd) >> wp
|
744 |
+
sim += ((aim * cnre + are * cnim) * n**nd) >> wp
|
745 |
+
n += 1
|
746 |
+
sre = -(sre << (nd+1))
|
747 |
+
sim = -(sim << (nd+1))
|
748 |
+
sre = ctx.ldexp(sre, -wp)
|
749 |
+
sim = ctx.ldexp(sim, -wp)
|
750 |
+
s = ctx.mpc(sre, sim)
|
751 |
+
if (nd&1):
|
752 |
+
return (-1)**(nd//2) * s
|
753 |
+
else:
|
754 |
+
return (-1)**(1 + nd//2) * s
|
755 |
+
|
756 |
+
@defun
|
757 |
+
def _jacobi_theta2a(ctx, z, q):
|
758 |
+
"""
|
759 |
+
case ctx._im(z) != 0
|
760 |
+
theta(2, z, q) =
|
761 |
+
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf)
|
762 |
+
max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z)
|
763 |
+
n0 = int(ctx._im(z)/log(q).real - 1/2)
|
764 |
+
theta(2, z, q) =
|
765 |
+
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) +
|
766 |
+
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf)
|
767 |
+
"""
|
768 |
+
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
|
769 |
+
e2 = ctx.expj(2*z)
|
770 |
+
e = e0 = ctx.expj((2*n+1)*z)
|
771 |
+
a = q**(n*n + n)
|
772 |
+
# leading term
|
773 |
+
term = a * e
|
774 |
+
s = term
|
775 |
+
eps1 = ctx.eps*abs(term)
|
776 |
+
while 1:
|
777 |
+
n += 1
|
778 |
+
e = e * e2
|
779 |
+
term = q**(n*n + n) * e
|
780 |
+
if abs(term) < eps1:
|
781 |
+
break
|
782 |
+
s += term
|
783 |
+
e = e0
|
784 |
+
e2 = ctx.expj(-2*z)
|
785 |
+
n = n0
|
786 |
+
while 1:
|
787 |
+
n -= 1
|
788 |
+
e = e * e2
|
789 |
+
term = q**(n*n + n) * e
|
790 |
+
if abs(term) < eps1:
|
791 |
+
break
|
792 |
+
s += term
|
793 |
+
s = s * ctx.nthroot(q, 4)
|
794 |
+
return s
|
795 |
+
|
796 |
+
@defun
|
797 |
+
def _jacobi_theta3a(ctx, z, q):
|
798 |
+
"""
|
799 |
+
case ctx._im(z) != 0
|
800 |
+
theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf)
|
801 |
+
max term for n*abs(log(q).real) + ctx._im(z) ~= 0
|
802 |
+
n0 = int(- ctx._im(z)/abs(log(q).real))
|
803 |
+
"""
|
804 |
+
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
|
805 |
+
e2 = ctx.expj(2*z)
|
806 |
+
e = e0 = ctx.expj(2*n*z)
|
807 |
+
s = term = q**(n*n) * e
|
808 |
+
eps1 = ctx.eps*abs(term)
|
809 |
+
while 1:
|
810 |
+
n += 1
|
811 |
+
e = e * e2
|
812 |
+
term = q**(n*n) * e
|
813 |
+
if abs(term) < eps1:
|
814 |
+
break
|
815 |
+
s += term
|
816 |
+
e = e0
|
817 |
+
e2 = ctx.expj(-2*z)
|
818 |
+
n = n0
|
819 |
+
while 1:
|
820 |
+
n -= 1
|
821 |
+
e = e * e2
|
822 |
+
term = q**(n*n) * e
|
823 |
+
if abs(term) < eps1:
|
824 |
+
break
|
825 |
+
s += term
|
826 |
+
return s
|
827 |
+
|
828 |
+
@defun
|
829 |
+
def _djacobi_theta2a(ctx, z, q, nd):
|
830 |
+
"""
|
831 |
+
case ctx._im(z) != 0
|
832 |
+
dtheta(2, z, q, nd) =
|
833 |
+
j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf)
|
834 |
+
max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0
|
835 |
+
n0 = int(ctx._im(z)/log(q).real - 1/2)
|
836 |
+
"""
|
837 |
+
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
|
838 |
+
e2 = ctx.expj(2*z)
|
839 |
+
e = e0 = ctx.expj((2*n + 1)*z)
|
840 |
+
a = q**(n*n + n)
|
841 |
+
# leading term
|
842 |
+
term = (2*n+1)**nd * a * e
|
843 |
+
s = term
|
844 |
+
eps1 = ctx.eps*abs(term)
|
845 |
+
while 1:
|
846 |
+
n += 1
|
847 |
+
e = e * e2
|
848 |
+
term = (2*n+1)**nd * q**(n*n + n) * e
|
849 |
+
if abs(term) < eps1:
|
850 |
+
break
|
851 |
+
s += term
|
852 |
+
e = e0
|
853 |
+
e2 = ctx.expj(-2*z)
|
854 |
+
n = n0
|
855 |
+
while 1:
|
856 |
+
n -= 1
|
857 |
+
e = e * e2
|
858 |
+
term = (2*n+1)**nd * q**(n*n + n) * e
|
859 |
+
if abs(term) < eps1:
|
860 |
+
break
|
861 |
+
s += term
|
862 |
+
return ctx.j**nd * s * ctx.nthroot(q, 4)
|
863 |
+
|
864 |
+
@defun
|
865 |
+
def _djacobi_theta3a(ctx, z, q, nd):
|
866 |
+
"""
|
867 |
+
case ctx._im(z) != 0
|
868 |
+
djtheta3(z, q, nd) = (2*j)**nd *
|
869 |
+
Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf)
|
870 |
+
max term for minimum n*abs(log(q).real) + ctx._im(z)
|
871 |
+
"""
|
872 |
+
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
|
873 |
+
e2 = ctx.expj(2*z)
|
874 |
+
e = e0 = ctx.expj(2*n*z)
|
875 |
+
a = q**(n*n) * e
|
876 |
+
s = term = n**nd * a
|
877 |
+
if n != 0:
|
878 |
+
eps1 = ctx.eps*abs(term)
|
879 |
+
else:
|
880 |
+
eps1 = ctx.eps*abs(a)
|
881 |
+
while 1:
|
882 |
+
n += 1
|
883 |
+
e = e * e2
|
884 |
+
a = q**(n*n) * e
|
885 |
+
term = n**nd * a
|
886 |
+
if n != 0:
|
887 |
+
aterm = abs(term)
|
888 |
+
else:
|
889 |
+
aterm = abs(a)
|
890 |
+
if aterm < eps1:
|
891 |
+
break
|
892 |
+
s += term
|
893 |
+
e = e0
|
894 |
+
e2 = ctx.expj(-2*z)
|
895 |
+
n = n0
|
896 |
+
while 1:
|
897 |
+
n -= 1
|
898 |
+
e = e * e2
|
899 |
+
a = q**(n*n) * e
|
900 |
+
term = n**nd * a
|
901 |
+
if n != 0:
|
902 |
+
aterm = abs(term)
|
903 |
+
else:
|
904 |
+
aterm = abs(a)
|
905 |
+
if aterm < eps1:
|
906 |
+
break
|
907 |
+
s += term
|
908 |
+
return (2*ctx.j)**nd * s
|
909 |
+
|
910 |
+
@defun
|
911 |
+
def jtheta(ctx, n, z, q, derivative=0):
|
912 |
+
if derivative:
|
913 |
+
return ctx._djtheta(n, z, q, derivative)
|
914 |
+
|
915 |
+
z = ctx.convert(z)
|
916 |
+
q = ctx.convert(q)
|
917 |
+
|
918 |
+
# Implementation note
|
919 |
+
# If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3
|
920 |
+
# are used,
|
921 |
+
# which compute the series starting from n=0 using fixed precision
|
922 |
+
# numbers;
|
923 |
+
# otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute
|
924 |
+
# the series starting from n=n0, which is the largest term.
|
925 |
+
|
926 |
+
# TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point
|
927 |
+
|
928 |
+
if abs(q) > ctx.THETA_Q_LIM:
|
929 |
+
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
|
930 |
+
|
931 |
+
extra = 10
|
932 |
+
if z:
|
933 |
+
M = ctx.mag(z)
|
934 |
+
if M > 5 or (n == 1 and M < -5):
|
935 |
+
extra += 2*abs(M)
|
936 |
+
cz = 0.5
|
937 |
+
extra2 = 50
|
938 |
+
prec0 = ctx.prec
|
939 |
+
try:
|
940 |
+
ctx.prec += extra
|
941 |
+
if n == 1:
|
942 |
+
if ctx._im(z):
|
943 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
944 |
+
ctx.dps += extra2
|
945 |
+
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
|
946 |
+
else:
|
947 |
+
ctx.dps += 10
|
948 |
+
res = ctx._jacobi_theta2a(z - ctx.pi/2, q)
|
949 |
+
else:
|
950 |
+
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
|
951 |
+
elif n == 2:
|
952 |
+
if ctx._im(z):
|
953 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
954 |
+
ctx.dps += extra2
|
955 |
+
res = ctx._jacobi_theta2(z, q)
|
956 |
+
else:
|
957 |
+
ctx.dps += 10
|
958 |
+
res = ctx._jacobi_theta2a(z, q)
|
959 |
+
else:
|
960 |
+
res = ctx._jacobi_theta2(z, q)
|
961 |
+
elif n == 3:
|
962 |
+
if ctx._im(z):
|
963 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
964 |
+
ctx.dps += extra2
|
965 |
+
res = ctx._jacobi_theta3(z, q)
|
966 |
+
else:
|
967 |
+
ctx.dps += 10
|
968 |
+
res = ctx._jacobi_theta3a(z, q)
|
969 |
+
else:
|
970 |
+
res = ctx._jacobi_theta3(z, q)
|
971 |
+
elif n == 4:
|
972 |
+
if ctx._im(z):
|
973 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
974 |
+
ctx.dps += extra2
|
975 |
+
res = ctx._jacobi_theta3(z, -q)
|
976 |
+
else:
|
977 |
+
ctx.dps += 10
|
978 |
+
res = ctx._jacobi_theta3a(z, -q)
|
979 |
+
else:
|
980 |
+
res = ctx._jacobi_theta3(z, -q)
|
981 |
+
else:
|
982 |
+
raise ValueError
|
983 |
+
finally:
|
984 |
+
ctx.prec = prec0
|
985 |
+
return res
|
986 |
+
|
987 |
+
@defun
|
988 |
+
def _djtheta(ctx, n, z, q, derivative=1):
|
989 |
+
z = ctx.convert(z)
|
990 |
+
q = ctx.convert(q)
|
991 |
+
nd = int(derivative)
|
992 |
+
|
993 |
+
if abs(q) > ctx.THETA_Q_LIM:
|
994 |
+
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
|
995 |
+
extra = 10 + ctx.prec * nd // 10
|
996 |
+
if z:
|
997 |
+
M = ctx.mag(z)
|
998 |
+
if M > 5 or (n != 1 and M < -5):
|
999 |
+
extra += 2*abs(M)
|
1000 |
+
cz = 0.5
|
1001 |
+
extra2 = 50
|
1002 |
+
prec0 = ctx.prec
|
1003 |
+
try:
|
1004 |
+
ctx.prec += extra
|
1005 |
+
if n == 1:
|
1006 |
+
if ctx._im(z):
|
1007 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
1008 |
+
ctx.dps += extra2
|
1009 |
+
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
|
1010 |
+
else:
|
1011 |
+
ctx.dps += 10
|
1012 |
+
res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd)
|
1013 |
+
else:
|
1014 |
+
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
|
1015 |
+
elif n == 2:
|
1016 |
+
if ctx._im(z):
|
1017 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
1018 |
+
ctx.dps += extra2
|
1019 |
+
res = ctx._djacobi_theta2(z, q, nd)
|
1020 |
+
else:
|
1021 |
+
ctx.dps += 10
|
1022 |
+
res = ctx._djacobi_theta2a(z, q, nd)
|
1023 |
+
else:
|
1024 |
+
res = ctx._djacobi_theta2(z, q, nd)
|
1025 |
+
elif n == 3:
|
1026 |
+
if ctx._im(z):
|
1027 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
1028 |
+
ctx.dps += extra2
|
1029 |
+
res = ctx._djacobi_theta3(z, q, nd)
|
1030 |
+
else:
|
1031 |
+
ctx.dps += 10
|
1032 |
+
res = ctx._djacobi_theta3a(z, q, nd)
|
1033 |
+
else:
|
1034 |
+
res = ctx._djacobi_theta3(z, q, nd)
|
1035 |
+
elif n == 4:
|
1036 |
+
if ctx._im(z):
|
1037 |
+
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
|
1038 |
+
ctx.dps += extra2
|
1039 |
+
res = ctx._djacobi_theta3(z, -q, nd)
|
1040 |
+
else:
|
1041 |
+
ctx.dps += 10
|
1042 |
+
res = ctx._djacobi_theta3a(z, -q, nd)
|
1043 |
+
else:
|
1044 |
+
res = ctx._djacobi_theta3(z, -q, nd)
|
1045 |
+
else:
|
1046 |
+
raise ValueError
|
1047 |
+
finally:
|
1048 |
+
ctx.prec = prec0
|
1049 |
+
return +res
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/zeta.py
ADDED
@@ -0,0 +1,1154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import print_function
|
2 |
+
|
3 |
+
from ..libmp.backend import xrange
|
4 |
+
from .functions import defun, defun_wrapped, defun_static
|
5 |
+
|
6 |
+
@defun
|
7 |
+
def stieltjes(ctx, n, a=1):
|
8 |
+
n = ctx.convert(n)
|
9 |
+
a = ctx.convert(a)
|
10 |
+
if n < 0:
|
11 |
+
return ctx.bad_domain("Stieltjes constants defined for n >= 0")
|
12 |
+
if hasattr(ctx, "stieltjes_cache"):
|
13 |
+
stieltjes_cache = ctx.stieltjes_cache
|
14 |
+
else:
|
15 |
+
stieltjes_cache = ctx.stieltjes_cache = {}
|
16 |
+
if a == 1:
|
17 |
+
if n == 0:
|
18 |
+
return +ctx.euler
|
19 |
+
if n in stieltjes_cache:
|
20 |
+
prec, s = stieltjes_cache[n]
|
21 |
+
if prec >= ctx.prec:
|
22 |
+
return +s
|
23 |
+
mag = 1
|
24 |
+
def f(x):
|
25 |
+
xa = x/a
|
26 |
+
v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1)
|
27 |
+
return ctx._re(v) / mag
|
28 |
+
orig = ctx.prec
|
29 |
+
try:
|
30 |
+
# Normalize integrand by approx. magnitude to
|
31 |
+
# speed up quadrature (which uses absolute error)
|
32 |
+
if n > 50:
|
33 |
+
ctx.prec = 20
|
34 |
+
mag = ctx.quad(f, [0,ctx.inf], maxdegree=3)
|
35 |
+
ctx.prec = orig + 10 + int(n**0.5)
|
36 |
+
s = ctx.quad(f, [0,ctx.inf], maxdegree=20)
|
37 |
+
v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag
|
38 |
+
finally:
|
39 |
+
ctx.prec = orig
|
40 |
+
if a == 1 and ctx.isint(n):
|
41 |
+
stieltjes_cache[n] = (ctx.prec, v)
|
42 |
+
return +v
|
43 |
+
|
44 |
+
@defun_wrapped
|
45 |
+
def siegeltheta(ctx, t, derivative=0):
|
46 |
+
d = int(derivative)
|
47 |
+
if (t == ctx.inf or t == ctx.ninf):
|
48 |
+
if d < 2:
|
49 |
+
if t == ctx.ninf and d == 0:
|
50 |
+
return ctx.ninf
|
51 |
+
return ctx.inf
|
52 |
+
else:
|
53 |
+
return ctx.zero
|
54 |
+
if d == 0:
|
55 |
+
if ctx._im(t):
|
56 |
+
# XXX: cancellation occurs
|
57 |
+
a = ctx.loggamma(0.25+0.5j*t)
|
58 |
+
b = ctx.loggamma(0.25-0.5j*t)
|
59 |
+
return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b)
|
60 |
+
else:
|
61 |
+
if ctx.isinf(t):
|
62 |
+
return t
|
63 |
+
return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t
|
64 |
+
if d > 0:
|
65 |
+
a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t)
|
66 |
+
b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t)
|
67 |
+
if ctx._im(t):
|
68 |
+
if d == 1:
|
69 |
+
return -0.5*ctx.log(ctx.pi)+0.25*(a+b)
|
70 |
+
else:
|
71 |
+
return 0.25*(a+b)
|
72 |
+
else:
|
73 |
+
if d == 1:
|
74 |
+
return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b))
|
75 |
+
else:
|
76 |
+
return ctx._re(0.25*(a+b))
|
77 |
+
|
78 |
+
@defun_wrapped
|
79 |
+
def grampoint(ctx, n):
|
80 |
+
# asymptotic expansion, from
|
81 |
+
# http://mathworld.wolfram.com/GramPoint.html
|
82 |
+
g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e)))
|
83 |
+
return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g)
|
84 |
+
|
85 |
+
|
86 |
+
@defun_wrapped
|
87 |
+
def siegelz(ctx, t, **kwargs):
|
88 |
+
d = int(kwargs.get("derivative", 0))
|
89 |
+
t = ctx.convert(t)
|
90 |
+
t1 = ctx._re(t)
|
91 |
+
t2 = ctx._im(t)
|
92 |
+
prec = ctx.prec
|
93 |
+
try:
|
94 |
+
if abs(t1) > 500*prec and t2**2 < t1:
|
95 |
+
v = ctx.rs_z(t, d)
|
96 |
+
if ctx._is_real_type(t):
|
97 |
+
return ctx._re(v)
|
98 |
+
return v
|
99 |
+
except NotImplementedError:
|
100 |
+
pass
|
101 |
+
ctx.prec += 21
|
102 |
+
e1 = ctx.expj(ctx.siegeltheta(t))
|
103 |
+
z = ctx.zeta(0.5+ctx.j*t)
|
104 |
+
if d == 0:
|
105 |
+
v = e1*z
|
106 |
+
ctx.prec=prec
|
107 |
+
if ctx._is_real_type(t):
|
108 |
+
return ctx._re(v)
|
109 |
+
return +v
|
110 |
+
z1 = ctx.zeta(0.5+ctx.j*t, derivative=1)
|
111 |
+
theta1 = ctx.siegeltheta(t, derivative=1)
|
112 |
+
if d == 1:
|
113 |
+
v = ctx.j*e1*(z1+z*theta1)
|
114 |
+
ctx.prec=prec
|
115 |
+
if ctx._is_real_type(t):
|
116 |
+
return ctx._re(v)
|
117 |
+
return +v
|
118 |
+
z2 = ctx.zeta(0.5+ctx.j*t, derivative=2)
|
119 |
+
theta2 = ctx.siegeltheta(t, derivative=2)
|
120 |
+
comb1 = theta1**2-ctx.j*theta2
|
121 |
+
if d == 2:
|
122 |
+
def terms():
|
123 |
+
return [2*z1*theta1, z2, z*comb1]
|
124 |
+
v = ctx.sum_accurately(terms, 1)
|
125 |
+
v = -e1*v
|
126 |
+
ctx.prec = prec
|
127 |
+
if ctx._is_real_type(t):
|
128 |
+
return ctx._re(v)
|
129 |
+
return +v
|
130 |
+
ctx.prec += 10
|
131 |
+
z3 = ctx.zeta(0.5+ctx.j*t, derivative=3)
|
132 |
+
theta3 = ctx.siegeltheta(t, derivative=3)
|
133 |
+
comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3
|
134 |
+
if d == 3:
|
135 |
+
def terms():
|
136 |
+
return [3*theta1*z2, 3*z1*comb1, z3+z*comb2]
|
137 |
+
v = ctx.sum_accurately(terms, 1)
|
138 |
+
v = -ctx.j*e1*v
|
139 |
+
ctx.prec = prec
|
140 |
+
if ctx._is_real_type(t):
|
141 |
+
return ctx._re(v)
|
142 |
+
return +v
|
143 |
+
z4 = ctx.zeta(0.5+ctx.j*t, derivative=4)
|
144 |
+
theta4 = ctx.siegeltheta(t, derivative=4)
|
145 |
+
def terms():
|
146 |
+
return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2,
|
147 |
+
-4*theta1*theta3, ctx.j*theta4]
|
148 |
+
comb3 = ctx.sum_accurately(terms, 1)
|
149 |
+
if d == 4:
|
150 |
+
def terms():
|
151 |
+
return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3,
|
152 |
+
4*z1*comb2, z4, z*comb3]
|
153 |
+
v = ctx.sum_accurately(terms, 1)
|
154 |
+
v = e1*v
|
155 |
+
ctx.prec = prec
|
156 |
+
if ctx._is_real_type(t):
|
157 |
+
return ctx._re(v)
|
158 |
+
return +v
|
159 |
+
if d > 4:
|
160 |
+
h = lambda x: ctx.siegelz(x, derivative=4)
|
161 |
+
return ctx.diff(h, t, n=d-4)
|
162 |
+
|
163 |
+
|
164 |
+
_zeta_zeros = [
|
165 |
+
14.134725142,21.022039639,25.010857580,30.424876126,32.935061588,
|
166 |
+
37.586178159,40.918719012,43.327073281,48.005150881,49.773832478,
|
167 |
+
52.970321478,56.446247697,59.347044003,60.831778525,65.112544048,
|
168 |
+
67.079810529,69.546401711,72.067157674,75.704690699,77.144840069,
|
169 |
+
79.337375020,82.910380854,84.735492981,87.425274613,88.809111208,
|
170 |
+
92.491899271,94.651344041,95.870634228,98.831194218,101.317851006,
|
171 |
+
103.725538040,105.446623052,107.168611184,111.029535543,111.874659177,
|
172 |
+
114.320220915,116.226680321,118.790782866,121.370125002,122.946829294,
|
173 |
+
124.256818554,127.516683880,129.578704200,131.087688531,133.497737203,
|
174 |
+
134.756509753,138.116042055,139.736208952,141.123707404,143.111845808,
|
175 |
+
146.000982487,147.422765343,150.053520421,150.925257612,153.024693811,
|
176 |
+
156.112909294,157.597591818,158.849988171,161.188964138,163.030709687,
|
177 |
+
165.537069188,167.184439978,169.094515416,169.911976479,173.411536520,
|
178 |
+
174.754191523,176.441434298,178.377407776,179.916484020,182.207078484,
|
179 |
+
184.874467848,185.598783678,187.228922584,189.416158656,192.026656361,
|
180 |
+
193.079726604,195.265396680,196.876481841,198.015309676,201.264751944,
|
181 |
+
202.493594514,204.189671803,205.394697202,207.906258888,209.576509717,
|
182 |
+
211.690862595,213.347919360,214.547044783,216.169538508,219.067596349,
|
183 |
+
220.714918839,221.430705555,224.007000255,224.983324670,227.421444280,
|
184 |
+
229.337413306,231.250188700,231.987235253,233.693404179,236.524229666,
|
185 |
+
]
|
186 |
+
|
187 |
+
def _load_zeta_zeros(url):
|
188 |
+
import urllib
|
189 |
+
d = urllib.urlopen(url)
|
190 |
+
L = [float(x) for x in d.readlines()]
|
191 |
+
# Sanity check
|
192 |
+
assert round(L[0]) == 14
|
193 |
+
_zeta_zeros[:] = L
|
194 |
+
|
195 |
+
@defun
|
196 |
+
def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'):
|
197 |
+
n = int(n)
|
198 |
+
if n < 0:
|
199 |
+
return ctx.zetazero(-n).conjugate()
|
200 |
+
if n == 0:
|
201 |
+
raise ValueError("n must be nonzero")
|
202 |
+
if n > len(_zeta_zeros) and n <= 100000:
|
203 |
+
_load_zeta_zeros(url)
|
204 |
+
if n > len(_zeta_zeros):
|
205 |
+
raise NotImplementedError("n too large for zetazeros")
|
206 |
+
return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1]))
|
207 |
+
|
208 |
+
@defun_wrapped
|
209 |
+
def riemannr(ctx, x):
|
210 |
+
if x == 0:
|
211 |
+
return ctx.zero
|
212 |
+
# Check if a simple asymptotic estimate is accurate enough
|
213 |
+
if abs(x) > 1000:
|
214 |
+
a = ctx.li(x)
|
215 |
+
b = 0.5*ctx.li(ctx.sqrt(x))
|
216 |
+
if abs(b) < abs(a)*ctx.eps:
|
217 |
+
return a
|
218 |
+
if abs(x) < 0.01:
|
219 |
+
# XXX
|
220 |
+
ctx.prec += int(-ctx.log(abs(x),2))
|
221 |
+
# Sum Gram's series
|
222 |
+
s = t = ctx.one
|
223 |
+
u = ctx.ln(x)
|
224 |
+
k = 1
|
225 |
+
while abs(t) > abs(s)*ctx.eps:
|
226 |
+
t = t * u / k
|
227 |
+
s += t / (k * ctx._zeta_int(k+1))
|
228 |
+
k += 1
|
229 |
+
return s
|
230 |
+
|
231 |
+
@defun_static
|
232 |
+
def primepi(ctx, x):
|
233 |
+
x = int(x)
|
234 |
+
if x < 2:
|
235 |
+
return 0
|
236 |
+
return len(ctx.list_primes(x))
|
237 |
+
|
238 |
+
# TODO: fix the interface wrt contexts
|
239 |
+
@defun_wrapped
|
240 |
+
def primepi2(ctx, x):
|
241 |
+
x = int(x)
|
242 |
+
if x < 2:
|
243 |
+
return ctx._iv.zero
|
244 |
+
if x < 2657:
|
245 |
+
return ctx._iv.mpf(ctx.primepi(x))
|
246 |
+
mid = ctx.li(x)
|
247 |
+
# Schoenfeld's estimate for x >= 2657, assuming RH
|
248 |
+
err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d')
|
249 |
+
a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d')
|
250 |
+
b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u')
|
251 |
+
return ctx._iv.mpf([a,b])
|
252 |
+
|
253 |
+
@defun_wrapped
|
254 |
+
def primezeta(ctx, s):
|
255 |
+
if ctx.isnan(s):
|
256 |
+
return s
|
257 |
+
if ctx.re(s) <= 0:
|
258 |
+
raise ValueError("prime zeta function defined only for re(s) > 0")
|
259 |
+
if s == 1:
|
260 |
+
return ctx.inf
|
261 |
+
if s == 0.5:
|
262 |
+
return ctx.mpc(ctx.ninf, ctx.pi)
|
263 |
+
r = ctx.re(s)
|
264 |
+
if r > ctx.prec:
|
265 |
+
return 0.5**s
|
266 |
+
else:
|
267 |
+
wp = ctx.prec + int(r)
|
268 |
+
def terms():
|
269 |
+
orig = ctx.prec
|
270 |
+
# zeta ~ 1+eps; need to set precision
|
271 |
+
# to get logarithm accurately
|
272 |
+
k = 0
|
273 |
+
while 1:
|
274 |
+
k += 1
|
275 |
+
u = ctx.moebius(k)
|
276 |
+
if not u:
|
277 |
+
continue
|
278 |
+
ctx.prec = wp
|
279 |
+
t = u*ctx.ln(ctx.zeta(k*s))/k
|
280 |
+
if not t:
|
281 |
+
return
|
282 |
+
#print ctx.prec, ctx.nstr(t)
|
283 |
+
ctx.prec = orig
|
284 |
+
yield t
|
285 |
+
return ctx.sum_accurately(terms)
|
286 |
+
|
287 |
+
# TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered
|
288 |
+
|
289 |
+
@defun_wrapped
|
290 |
+
def bernpoly(ctx, n, z):
|
291 |
+
# Slow implementation:
|
292 |
+
#return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
|
293 |
+
n = int(n)
|
294 |
+
if n < 0:
|
295 |
+
raise ValueError("Bernoulli polynomials only defined for n >= 0")
|
296 |
+
if z == 0 or (z == 1 and n > 1):
|
297 |
+
return ctx.bernoulli(n)
|
298 |
+
if z == 0.5:
|
299 |
+
return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n)
|
300 |
+
if n <= 3:
|
301 |
+
if n == 0: return z ** 0
|
302 |
+
if n == 1: return z - 0.5
|
303 |
+
if n == 2: return (6*z*(z-1)+1)/6
|
304 |
+
if n == 3: return z*(z*(z-1.5)+0.5)
|
305 |
+
if ctx.isinf(z):
|
306 |
+
return z ** n
|
307 |
+
if ctx.isnan(z):
|
308 |
+
return z
|
309 |
+
if abs(z) > 2:
|
310 |
+
def terms():
|
311 |
+
t = ctx.one
|
312 |
+
yield t
|
313 |
+
r = ctx.one/z
|
314 |
+
k = 1
|
315 |
+
while k <= n:
|
316 |
+
t = t*(n+1-k)/k*r
|
317 |
+
if not (k > 2 and k & 1):
|
318 |
+
yield t*ctx.bernoulli(k)
|
319 |
+
k += 1
|
320 |
+
return ctx.sum_accurately(terms) * z**n
|
321 |
+
else:
|
322 |
+
def terms():
|
323 |
+
yield ctx.bernoulli(n)
|
324 |
+
t = ctx.one
|
325 |
+
k = 1
|
326 |
+
while k <= n:
|
327 |
+
t = t*(n+1-k)/k * z
|
328 |
+
m = n-k
|
329 |
+
if not (m > 2 and m & 1):
|
330 |
+
yield t*ctx.bernoulli(m)
|
331 |
+
k += 1
|
332 |
+
return ctx.sum_accurately(terms)
|
333 |
+
|
334 |
+
@defun_wrapped
|
335 |
+
def eulerpoly(ctx, n, z):
|
336 |
+
n = int(n)
|
337 |
+
if n < 0:
|
338 |
+
raise ValueError("Euler polynomials only defined for n >= 0")
|
339 |
+
if n <= 2:
|
340 |
+
if n == 0: return z ** 0
|
341 |
+
if n == 1: return z - 0.5
|
342 |
+
if n == 2: return z*(z-1)
|
343 |
+
if ctx.isinf(z):
|
344 |
+
return z**n
|
345 |
+
if ctx.isnan(z):
|
346 |
+
return z
|
347 |
+
m = n+1
|
348 |
+
if z == 0:
|
349 |
+
return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
|
350 |
+
if z == 1:
|
351 |
+
return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
|
352 |
+
if z == 0.5:
|
353 |
+
if n % 2:
|
354 |
+
return ctx.zero
|
355 |
+
# Use exact code for Euler numbers
|
356 |
+
if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25:
|
357 |
+
return ctx.ldexp(ctx._eulernum(n), -n)
|
358 |
+
# http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/
|
359 |
+
def terms():
|
360 |
+
t = ctx.one
|
361 |
+
k = 0
|
362 |
+
w = ctx.ldexp(1,n+2)
|
363 |
+
while 1:
|
364 |
+
v = n-k+1
|
365 |
+
if not (v > 2 and v & 1):
|
366 |
+
yield (2-w)*ctx.bernoulli(v)*t
|
367 |
+
k += 1
|
368 |
+
if k > n:
|
369 |
+
break
|
370 |
+
t = t*z*(n-k+2)/k
|
371 |
+
w *= 0.5
|
372 |
+
return ctx.sum_accurately(terms) / m
|
373 |
+
|
374 |
+
@defun
|
375 |
+
def eulernum(ctx, n, exact=False):
|
376 |
+
n = int(n)
|
377 |
+
if exact:
|
378 |
+
return int(ctx._eulernum(n))
|
379 |
+
if n < 100:
|
380 |
+
return ctx.mpf(ctx._eulernum(n))
|
381 |
+
if n % 2:
|
382 |
+
return ctx.zero
|
383 |
+
return ctx.ldexp(ctx.eulerpoly(n,0.5), n)
|
384 |
+
|
385 |
+
# TODO: this should be implemented low-level
|
386 |
+
def polylog_series(ctx, s, z):
|
387 |
+
tol = +ctx.eps
|
388 |
+
l = ctx.zero
|
389 |
+
k = 1
|
390 |
+
zk = z
|
391 |
+
while 1:
|
392 |
+
term = zk / k**s
|
393 |
+
l += term
|
394 |
+
if abs(term) < tol:
|
395 |
+
break
|
396 |
+
zk *= z
|
397 |
+
k += 1
|
398 |
+
return l
|
399 |
+
|
400 |
+
def polylog_continuation(ctx, n, z):
|
401 |
+
if n < 0:
|
402 |
+
return z*0
|
403 |
+
twopij = 2j * ctx.pi
|
404 |
+
a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij)
|
405 |
+
if ctx._is_real_type(z) and z < 0:
|
406 |
+
a = ctx._re(a)
|
407 |
+
if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1):
|
408 |
+
a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1)
|
409 |
+
return a
|
410 |
+
|
411 |
+
def polylog_unitcircle(ctx, n, z):
|
412 |
+
tol = +ctx.eps
|
413 |
+
if n > 1:
|
414 |
+
l = ctx.zero
|
415 |
+
logz = ctx.ln(z)
|
416 |
+
logmz = ctx.one
|
417 |
+
m = 0
|
418 |
+
while 1:
|
419 |
+
if (n-m) != 1:
|
420 |
+
term = ctx.zeta(n-m) * logmz / ctx.fac(m)
|
421 |
+
if term and abs(term) < tol:
|
422 |
+
break
|
423 |
+
l += term
|
424 |
+
logmz *= logz
|
425 |
+
m += 1
|
426 |
+
l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z)))
|
427 |
+
elif n < 1: # else
|
428 |
+
l = ctx.fac(-n)*(-ctx.ln(z))**(n-1)
|
429 |
+
logz = ctx.ln(z)
|
430 |
+
logkz = ctx.one
|
431 |
+
k = 0
|
432 |
+
while 1:
|
433 |
+
b = ctx.bernoulli(k-n+1)
|
434 |
+
if b:
|
435 |
+
term = b*logkz/(ctx.fac(k)*(k-n+1))
|
436 |
+
if abs(term) < tol:
|
437 |
+
break
|
438 |
+
l -= term
|
439 |
+
logkz *= logz
|
440 |
+
k += 1
|
441 |
+
else:
|
442 |
+
raise ValueError
|
443 |
+
if ctx._is_real_type(z) and z < 0:
|
444 |
+
l = ctx._re(l)
|
445 |
+
return l
|
446 |
+
|
447 |
+
def polylog_general(ctx, s, z):
|
448 |
+
v = ctx.zero
|
449 |
+
u = ctx.ln(z)
|
450 |
+
if not abs(u) < 5: # theoretically |u| < 2*pi
|
451 |
+
j = ctx.j
|
452 |
+
v = 1-s
|
453 |
+
y = ctx.ln(-z)/(2*ctx.pi*j)
|
454 |
+
return ctx.gamma(v)*(j**v*ctx.zeta(v,0.5+y) + j**-v*ctx.zeta(v,0.5-y))/(2*ctx.pi)**v
|
455 |
+
t = 1
|
456 |
+
k = 0
|
457 |
+
while 1:
|
458 |
+
term = ctx.zeta(s-k) * t
|
459 |
+
if abs(term) < ctx.eps:
|
460 |
+
break
|
461 |
+
v += term
|
462 |
+
k += 1
|
463 |
+
t *= u
|
464 |
+
t /= k
|
465 |
+
return ctx.gamma(1-s)*(-u)**(s-1) + v
|
466 |
+
|
467 |
+
@defun_wrapped
|
468 |
+
def polylog(ctx, s, z):
|
469 |
+
s = ctx.convert(s)
|
470 |
+
z = ctx.convert(z)
|
471 |
+
if z == 1:
|
472 |
+
return ctx.zeta(s)
|
473 |
+
if z == -1:
|
474 |
+
return -ctx.altzeta(s)
|
475 |
+
if s == 0:
|
476 |
+
return z/(1-z)
|
477 |
+
if s == 1:
|
478 |
+
return -ctx.ln(1-z)
|
479 |
+
if s == -1:
|
480 |
+
return z/(1-z)**2
|
481 |
+
if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9):
|
482 |
+
return polylog_series(ctx, s, z)
|
483 |
+
if abs(z) >= 1.4 and ctx.isint(s):
|
484 |
+
return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, int(ctx.re(s)), z)
|
485 |
+
if ctx.isint(s):
|
486 |
+
return polylog_unitcircle(ctx, int(ctx.re(s)), z)
|
487 |
+
return polylog_general(ctx, s, z)
|
488 |
+
|
489 |
+
@defun_wrapped
|
490 |
+
def clsin(ctx, s, z, pi=False):
|
491 |
+
if ctx.isint(s) and s < 0 and int(s) % 2 == 1:
|
492 |
+
return z*0
|
493 |
+
if pi:
|
494 |
+
a = ctx.expjpi(z)
|
495 |
+
else:
|
496 |
+
a = ctx.expj(z)
|
497 |
+
if ctx._is_real_type(z) and ctx._is_real_type(s):
|
498 |
+
return ctx.im(ctx.polylog(s,a))
|
499 |
+
b = 1/a
|
500 |
+
return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b))
|
501 |
+
|
502 |
+
@defun_wrapped
|
503 |
+
def clcos(ctx, s, z, pi=False):
|
504 |
+
if ctx.isint(s) and s < 0 and int(s) % 2 == 0:
|
505 |
+
return z*0
|
506 |
+
if pi:
|
507 |
+
a = ctx.expjpi(z)
|
508 |
+
else:
|
509 |
+
a = ctx.expj(z)
|
510 |
+
if ctx._is_real_type(z) and ctx._is_real_type(s):
|
511 |
+
return ctx.re(ctx.polylog(s,a))
|
512 |
+
b = 1/a
|
513 |
+
return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b))
|
514 |
+
|
515 |
+
@defun
|
516 |
+
def altzeta(ctx, s, **kwargs):
|
517 |
+
try:
|
518 |
+
return ctx._altzeta(s, **kwargs)
|
519 |
+
except NotImplementedError:
|
520 |
+
return ctx._altzeta_generic(s)
|
521 |
+
|
522 |
+
@defun_wrapped
|
523 |
+
def _altzeta_generic(ctx, s):
|
524 |
+
if s == 1:
|
525 |
+
return ctx.ln2 + 0*s
|
526 |
+
return -ctx.powm1(2, 1-s) * ctx.zeta(s)
|
527 |
+
|
528 |
+
@defun
|
529 |
+
def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs):
|
530 |
+
d = int(derivative)
|
531 |
+
if a == 1 and not (d or method):
|
532 |
+
try:
|
533 |
+
return ctx._zeta(s, **kwargs)
|
534 |
+
except NotImplementedError:
|
535 |
+
pass
|
536 |
+
s = ctx.convert(s)
|
537 |
+
prec = ctx.prec
|
538 |
+
method = kwargs.get('method')
|
539 |
+
verbose = kwargs.get('verbose')
|
540 |
+
if (not s) and (not derivative):
|
541 |
+
return ctx.mpf(0.5) - ctx._convert_param(a)[0]
|
542 |
+
if a == 1 and method != 'euler-maclaurin':
|
543 |
+
im = abs(ctx._im(s))
|
544 |
+
re = abs(ctx._re(s))
|
545 |
+
#if (im < prec or method == 'borwein') and not derivative:
|
546 |
+
# try:
|
547 |
+
# if verbose:
|
548 |
+
# print "zeta: Attempting to use the Borwein algorithm"
|
549 |
+
# return ctx._zeta(s, **kwargs)
|
550 |
+
# except NotImplementedError:
|
551 |
+
# if verbose:
|
552 |
+
# print "zeta: Could not use the Borwein algorithm"
|
553 |
+
# pass
|
554 |
+
if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \
|
555 |
+
method == 'riemann-siegel':
|
556 |
+
try: # py2.4 compatible try block
|
557 |
+
try:
|
558 |
+
if verbose:
|
559 |
+
print("zeta: Attempting to use the Riemann-Siegel algorithm")
|
560 |
+
return ctx.rs_zeta(s, derivative, **kwargs)
|
561 |
+
except NotImplementedError:
|
562 |
+
if verbose:
|
563 |
+
print("zeta: Could not use the Riemann-Siegel algorithm")
|
564 |
+
pass
|
565 |
+
finally:
|
566 |
+
ctx.prec = prec
|
567 |
+
if s == 1:
|
568 |
+
return ctx.inf
|
569 |
+
abss = abs(s)
|
570 |
+
if abss == ctx.inf:
|
571 |
+
if ctx.re(s) == ctx.inf:
|
572 |
+
if d == 0:
|
573 |
+
return ctx.one
|
574 |
+
return ctx.zero
|
575 |
+
return s*0
|
576 |
+
elif ctx.isnan(abss):
|
577 |
+
return 1/s
|
578 |
+
if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative:
|
579 |
+
return ctx.one + ctx.power(2, -s)
|
580 |
+
return +ctx._hurwitz(s, a, d, **kwargs)
|
581 |
+
|
582 |
+
@defun
|
583 |
+
def _hurwitz(ctx, s, a=1, d=0, **kwargs):
|
584 |
+
prec = ctx.prec
|
585 |
+
verbose = kwargs.get('verbose')
|
586 |
+
try:
|
587 |
+
extraprec = 10
|
588 |
+
ctx.prec += extraprec
|
589 |
+
# We strongly want to special-case rational a
|
590 |
+
a, atype = ctx._convert_param(a)
|
591 |
+
if ctx.re(s) < 0:
|
592 |
+
if verbose:
|
593 |
+
print("zeta: Attempting reflection formula")
|
594 |
+
try:
|
595 |
+
return _hurwitz_reflection(ctx, s, a, d, atype)
|
596 |
+
except NotImplementedError:
|
597 |
+
pass
|
598 |
+
if verbose:
|
599 |
+
print("zeta: Reflection formula failed")
|
600 |
+
if verbose:
|
601 |
+
print("zeta: Using the Euler-Maclaurin algorithm")
|
602 |
+
while 1:
|
603 |
+
ctx.prec = prec + extraprec
|
604 |
+
T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose)
|
605 |
+
cancellation = ctx.mag(T1) - ctx.mag(T1+T2)
|
606 |
+
if verbose:
|
607 |
+
print("Term 1:", T1)
|
608 |
+
print("Term 2:", T2)
|
609 |
+
print("Cancellation:", cancellation, "bits")
|
610 |
+
if cancellation < extraprec:
|
611 |
+
return T1 + T2
|
612 |
+
else:
|
613 |
+
extraprec = max(2*extraprec, min(cancellation + 5, 100*prec))
|
614 |
+
if extraprec > kwargs.get('maxprec', 100*prec):
|
615 |
+
raise ctx.NoConvergence("zeta: too much cancellation")
|
616 |
+
finally:
|
617 |
+
ctx.prec = prec
|
618 |
+
|
619 |
+
def _hurwitz_reflection(ctx, s, a, d, atype):
|
620 |
+
# TODO: implement for derivatives
|
621 |
+
if d != 0:
|
622 |
+
raise NotImplementedError
|
623 |
+
res = ctx.re(s)
|
624 |
+
negs = -s
|
625 |
+
# Integer reflection formula
|
626 |
+
if ctx.isnpint(s):
|
627 |
+
n = int(res)
|
628 |
+
if n <= 0:
|
629 |
+
return ctx.bernpoly(1-n, a) / (n-1)
|
630 |
+
if not (atype == 'Q' or atype == 'Z'):
|
631 |
+
raise NotImplementedError
|
632 |
+
t = 1-s
|
633 |
+
# We now require a to be standardized
|
634 |
+
v = 0
|
635 |
+
shift = 0
|
636 |
+
b = a
|
637 |
+
while ctx.re(b) > 1:
|
638 |
+
b -= 1
|
639 |
+
v -= b**negs
|
640 |
+
shift -= 1
|
641 |
+
while ctx.re(b) <= 0:
|
642 |
+
v += b**negs
|
643 |
+
b += 1
|
644 |
+
shift += 1
|
645 |
+
# Rational reflection formula
|
646 |
+
try:
|
647 |
+
p, q = a._mpq_
|
648 |
+
except:
|
649 |
+
assert a == int(a)
|
650 |
+
p = int(a)
|
651 |
+
q = 1
|
652 |
+
p += shift*q
|
653 |
+
assert 1 <= p <= q
|
654 |
+
g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \
|
655 |
+
for k in range(1,q+1))
|
656 |
+
g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t
|
657 |
+
v += g
|
658 |
+
return v
|
659 |
+
|
660 |
+
def _hurwitz_em(ctx, s, a, d, prec, verbose):
|
661 |
+
# May not be converted at this point
|
662 |
+
a = ctx.convert(a)
|
663 |
+
tol = -prec
|
664 |
+
# Estimate number of terms for Euler-Maclaurin summation; could be improved
|
665 |
+
M1 = 0
|
666 |
+
M2 = prec // 3
|
667 |
+
N = M2
|
668 |
+
lsum = 0
|
669 |
+
# This speeds up the recurrence for derivatives
|
670 |
+
if ctx.isint(s):
|
671 |
+
s = int(ctx._re(s))
|
672 |
+
s1 = s-1
|
673 |
+
while 1:
|
674 |
+
# Truncated L-series
|
675 |
+
l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0]
|
676 |
+
#if d:
|
677 |
+
# l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2))
|
678 |
+
#else:
|
679 |
+
# l = ctx.fsum((n+a)**negs for n in range(M1,M2))
|
680 |
+
lsum += l
|
681 |
+
M2a = M2+a
|
682 |
+
logM2a = ctx.ln(M2a)
|
683 |
+
logM2ad = logM2a**d
|
684 |
+
logs = [logM2ad]
|
685 |
+
logr = 1/logM2a
|
686 |
+
rM2a = 1/M2a
|
687 |
+
M2as = M2a**(-s)
|
688 |
+
if d:
|
689 |
+
tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1)
|
690 |
+
else:
|
691 |
+
tailsum = 1/((s1)*(M2a)**s1)
|
692 |
+
tailsum += 0.5 * logM2ad * M2as
|
693 |
+
U = [1]
|
694 |
+
r = M2as
|
695 |
+
fact = 2
|
696 |
+
for j in range(1, N+1):
|
697 |
+
# TODO: the following could perhaps be tidied a bit
|
698 |
+
j2 = 2*j
|
699 |
+
if j == 1:
|
700 |
+
upds = [1]
|
701 |
+
else:
|
702 |
+
upds = [j2-2, j2-1]
|
703 |
+
for m in upds:
|
704 |
+
D = min(m,d+1)
|
705 |
+
if m <= d:
|
706 |
+
logs.append(logs[-1] * logr)
|
707 |
+
Un = [0]*(D+1)
|
708 |
+
for i in xrange(D): Un[i] = (1-m-s)*U[i]
|
709 |
+
for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1]
|
710 |
+
U = Un
|
711 |
+
r *= rM2a
|
712 |
+
t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact)
|
713 |
+
tailsum += t
|
714 |
+
if ctx.mag(t) < tol:
|
715 |
+
return lsum, (-1)**d * tailsum
|
716 |
+
fact *= (j2+1)*(j2+2)
|
717 |
+
if verbose:
|
718 |
+
print("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol)
|
719 |
+
M1, M2 = M2, M2*2
|
720 |
+
if ctx.re(s) < 0:
|
721 |
+
N += N//2
|
722 |
+
|
723 |
+
|
724 |
+
|
725 |
+
@defun
|
726 |
+
def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False):
|
727 |
+
"""
|
728 |
+
Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where
|
729 |
+
|
730 |
+
xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s )
|
731 |
+
ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) )
|
732 |
+
|
733 |
+
D^k = kth derivative with respect to s, k ranges over the given list of
|
734 |
+
derivatives (which should consist of either a single element
|
735 |
+
or a range 0,1,...r). If reflect=False, the ydks are not computed.
|
736 |
+
"""
|
737 |
+
#print "zetasum", s, a, n
|
738 |
+
# don't use the fixed-point code if there are large exponentials
|
739 |
+
if abs(ctx.re(s)) < 0.5 * ctx.prec:
|
740 |
+
try:
|
741 |
+
return ctx._zetasum_fast(s, a, n, derivatives, reflect)
|
742 |
+
except NotImplementedError:
|
743 |
+
pass
|
744 |
+
negs = ctx.fneg(s, exact=True)
|
745 |
+
have_derivatives = derivatives != [0]
|
746 |
+
have_one_derivative = len(derivatives) == 1
|
747 |
+
if not reflect:
|
748 |
+
if not have_derivatives:
|
749 |
+
return [ctx.fsum((a+k)**negs for k in xrange(n+1))], []
|
750 |
+
if have_one_derivative:
|
751 |
+
d = derivatives[0]
|
752 |
+
x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1))
|
753 |
+
return [(-1)**d * x], []
|
754 |
+
maxd = max(derivatives)
|
755 |
+
if not have_one_derivative:
|
756 |
+
derivatives = range(maxd+1)
|
757 |
+
xs = [ctx.zero for d in derivatives]
|
758 |
+
if reflect:
|
759 |
+
ys = [ctx.zero for d in derivatives]
|
760 |
+
else:
|
761 |
+
ys = []
|
762 |
+
for k in xrange(n+1):
|
763 |
+
w = a + k
|
764 |
+
xterm = w ** negs
|
765 |
+
if reflect:
|
766 |
+
yterm = ctx.conj(ctx.one / (w * xterm))
|
767 |
+
if have_derivatives:
|
768 |
+
logw = -ctx.ln(w)
|
769 |
+
if have_one_derivative:
|
770 |
+
logw = logw ** maxd
|
771 |
+
xs[0] += xterm * logw
|
772 |
+
if reflect:
|
773 |
+
ys[0] += yterm * logw
|
774 |
+
else:
|
775 |
+
t = ctx.one
|
776 |
+
for d in derivatives:
|
777 |
+
xs[d] += xterm * t
|
778 |
+
if reflect:
|
779 |
+
ys[d] += yterm * t
|
780 |
+
t *= logw
|
781 |
+
else:
|
782 |
+
xs[0] += xterm
|
783 |
+
if reflect:
|
784 |
+
ys[0] += yterm
|
785 |
+
return xs, ys
|
786 |
+
|
787 |
+
@defun
|
788 |
+
def dirichlet(ctx, s, chi=[1], derivative=0):
|
789 |
+
s = ctx.convert(s)
|
790 |
+
q = len(chi)
|
791 |
+
d = int(derivative)
|
792 |
+
if d > 2:
|
793 |
+
raise NotImplementedError("arbitrary order derivatives")
|
794 |
+
prec = ctx.prec
|
795 |
+
try:
|
796 |
+
ctx.prec += 10
|
797 |
+
if s == 1:
|
798 |
+
have_pole = True
|
799 |
+
for x in chi:
|
800 |
+
if x and x != 1:
|
801 |
+
have_pole = False
|
802 |
+
h = +ctx.eps
|
803 |
+
ctx.prec *= 2*(d+1)
|
804 |
+
s += h
|
805 |
+
if have_pole:
|
806 |
+
return +ctx.inf
|
807 |
+
z = ctx.zero
|
808 |
+
for p in range(1,q+1):
|
809 |
+
if chi[p%q]:
|
810 |
+
if d == 1:
|
811 |
+
z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \
|
812 |
+
ctx.zeta(s, (p,q))*ctx.log(q))
|
813 |
+
else:
|
814 |
+
z += chi[p%q] * ctx.zeta(s, (p,q))
|
815 |
+
z /= q**s
|
816 |
+
finally:
|
817 |
+
ctx.prec = prec
|
818 |
+
return +z
|
819 |
+
|
820 |
+
|
821 |
+
def secondzeta_main_term(ctx, s, a, **kwargs):
|
822 |
+
tol = ctx.eps
|
823 |
+
f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s)
|
824 |
+
totsum = term = ctx.zero
|
825 |
+
mg = ctx.inf
|
826 |
+
n = 0
|
827 |
+
while mg > tol:
|
828 |
+
totsum += term
|
829 |
+
n += 1
|
830 |
+
gamm = ctx.im(ctx.zetazero_memoized(n))
|
831 |
+
term = f(n)
|
832 |
+
mg = abs(term)
|
833 |
+
err = 0
|
834 |
+
if kwargs.get("error"):
|
835 |
+
sg = ctx.re(s)
|
836 |
+
err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\
|
837 |
+
ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2))
|
838 |
+
err = abs(err)
|
839 |
+
return +totsum, err, n
|
840 |
+
|
841 |
+
def secondzeta_prime_term(ctx, s, a, **kwargs):
|
842 |
+
tol = ctx.eps
|
843 |
+
f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\
|
844 |
+
((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\
|
845 |
+
(2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi))
|
846 |
+
totsum = term = ctx.zero
|
847 |
+
mg = ctx.inf
|
848 |
+
n = 1
|
849 |
+
while mg > tol or n < 9:
|
850 |
+
totsum += term
|
851 |
+
n += 1
|
852 |
+
term = f(n)
|
853 |
+
if term == 0:
|
854 |
+
mg = ctx.inf
|
855 |
+
else:
|
856 |
+
mg = abs(term)
|
857 |
+
if kwargs.get("error"):
|
858 |
+
err = mg
|
859 |
+
return +totsum, err, n
|
860 |
+
|
861 |
+
def secondzeta_exp_term(ctx, s, a):
|
862 |
+
if ctx.isint(s) and ctx.re(s) <= 0:
|
863 |
+
m = int(round(ctx.re(s)))
|
864 |
+
if not m & 1:
|
865 |
+
return ctx.mpf('-0.25')**(-m//2)
|
866 |
+
tol = ctx.eps
|
867 |
+
f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n))
|
868 |
+
totsum = ctx.zero
|
869 |
+
term = f(0)
|
870 |
+
mg = ctx.inf
|
871 |
+
n = 0
|
872 |
+
while mg > tol:
|
873 |
+
totsum += term
|
874 |
+
n += 1
|
875 |
+
term = f(n)
|
876 |
+
mg = abs(term)
|
877 |
+
v = a**(0.5*s)*totsum/ctx.gamma(0.5*s)
|
878 |
+
return v
|
879 |
+
|
880 |
+
def secondzeta_singular_term(ctx, s, a, **kwargs):
|
881 |
+
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
|
882 |
+
extraprec = ctx.mag(factor)
|
883 |
+
ctx.prec += extraprec
|
884 |
+
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
|
885 |
+
tol = ctx.eps
|
886 |
+
f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\
|
887 |
+
ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n))
|
888 |
+
totsum = ctx.zero
|
889 |
+
mg1 = ctx.inf
|
890 |
+
n = 1
|
891 |
+
term = f(n)
|
892 |
+
mg2 = abs(term)
|
893 |
+
while mg2 > tol and mg2 <= mg1:
|
894 |
+
totsum += term
|
895 |
+
n += 1
|
896 |
+
term = f(n)
|
897 |
+
totsum += term
|
898 |
+
n +=1
|
899 |
+
term = f(n)
|
900 |
+
mg1 = mg2
|
901 |
+
mg2 = abs(term)
|
902 |
+
totsum += term
|
903 |
+
pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1)
|
904 |
+
st = factor*(pole+totsum)
|
905 |
+
err = 0
|
906 |
+
if kwargs.get("error"):
|
907 |
+
if not ((mg2 > tol) and (mg2 <= mg1)):
|
908 |
+
if mg2 <= tol:
|
909 |
+
err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10))
|
910 |
+
if mg2 > mg1:
|
911 |
+
err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10))
|
912 |
+
err = max(err, ctx.eps*1.)
|
913 |
+
ctx.prec -= extraprec
|
914 |
+
return +st, err
|
915 |
+
|
916 |
+
@defun
|
917 |
+
def secondzeta(ctx, s, a = 0.015, **kwargs):
|
918 |
+
r"""
|
919 |
+
Evaluates the secondary zeta function `Z(s)`, defined for
|
920 |
+
`\mathrm{Re}(s)>1` by
|
921 |
+
|
922 |
+
.. math ::
|
923 |
+
|
924 |
+
Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s}
|
925 |
+
|
926 |
+
where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with
|
927 |
+
imaginary part positive.
|
928 |
+
|
929 |
+
`Z(s)` extends to a meromorphic function on `\mathbb{C}` with a
|
930 |
+
double pole at `s=1` and simple poles at the points `-2n` for
|
931 |
+
`n=0`, 1, 2, ...
|
932 |
+
|
933 |
+
**Examples**
|
934 |
+
|
935 |
+
>>> from mpmath import *
|
936 |
+
>>> mp.pretty = True; mp.dps = 15
|
937 |
+
>>> secondzeta(2)
|
938 |
+
0.023104993115419
|
939 |
+
>>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s)
|
940 |
+
>>> Xi = lambda t: xi(0.5+t*j)
|
941 |
+
>>> chop(-0.5*diff(Xi,0,n=2)/Xi(0))
|
942 |
+
0.023104993115419
|
943 |
+
|
944 |
+
We may ask for an approximate error value::
|
945 |
+
|
946 |
+
>>> secondzeta(0.5+100j, error=True)
|
947 |
+
((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16)
|
948 |
+
|
949 |
+
The function has poles at the negative odd integers,
|
950 |
+
and dyadic rational values at the negative even integers::
|
951 |
+
|
952 |
+
>>> mp.dps = 30
|
953 |
+
>>> secondzeta(-8)
|
954 |
+
-0.67236328125
|
955 |
+
>>> secondzeta(-7)
|
956 |
+
+inf
|
957 |
+
|
958 |
+
**Implementation notes**
|
959 |
+
|
960 |
+
The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)`
|
961 |
+
respectively main, prime, exponential and singular terms.
|
962 |
+
The main term `A(s)` is computed from the zeros of zeta.
|
963 |
+
The prime term depends on the von Mangoldt function.
|
964 |
+
The singular term is responsible for the poles of the function.
|
965 |
+
|
966 |
+
The four terms depends on a small parameter `a`. We may change the
|
967 |
+
value of `a`. Theoretically this has no effect on the sum of the four
|
968 |
+
terms, but in practice may be important.
|
969 |
+
|
970 |
+
A smaller value of the parameter `a` makes `A(s)` depend on
|
971 |
+
a smaller number of zeros of zeta, but `P(s)` uses more values of
|
972 |
+
von Mangoldt function.
|
973 |
+
|
974 |
+
We may also add a verbose option to obtain data about the
|
975 |
+
values of the four terms.
|
976 |
+
|
977 |
+
>>> mp.dps = 10
|
978 |
+
>>> secondzeta(0.5 + 40j, error=True, verbose=True)
|
979 |
+
main term = (-30190318549.138656312556 - 13964804384.624622876523j)
|
980 |
+
computed using 19 zeros of zeta
|
981 |
+
prime term = (132717176.89212754625045 + 188980555.17563978290601j)
|
982 |
+
computed using 9 values of the von Mangoldt function
|
983 |
+
exponential term = (542447428666.07179812536 + 362434922978.80192435203j)
|
984 |
+
singular term = (512124392939.98154322355 + 348281138038.65531023921j)
|
985 |
+
((0.059471043 + 0.3463514534j), 1.455191523e-11)
|
986 |
+
|
987 |
+
>>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True)
|
988 |
+
main term = (-151962888.19606243907725 - 217930683.90210294051982j)
|
989 |
+
computed using 9 zeros of zeta
|
990 |
+
prime term = (2476659342.3038722372461 + 28711581821.921627163136j)
|
991 |
+
computed using 37 values of the von Mangoldt function
|
992 |
+
exponential term = (178506047114.7838188264 + 819674143244.45677330576j)
|
993 |
+
singular term = (175877424884.22441310708 + 790744630738.28669174871j)
|
994 |
+
((0.059471043 + 0.3463514534j), 1.455191523e-11)
|
995 |
+
|
996 |
+
Notice the great cancellation between the four terms. Changing `a`, the
|
997 |
+
four terms are very different numbers but the cancellation gives
|
998 |
+
the good value of Z(s).
|
999 |
+
|
1000 |
+
**References**
|
1001 |
+
|
1002 |
+
A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier,
|
1003 |
+
53, (2003) 665--699.
|
1004 |
+
|
1005 |
+
A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes
|
1006 |
+
of the Unione Matematica Italiana, Springer, 2009.
|
1007 |
+
"""
|
1008 |
+
s = ctx.convert(s)
|
1009 |
+
a = ctx.convert(a)
|
1010 |
+
tol = ctx.eps
|
1011 |
+
if ctx.isint(s) and ctx.re(s) <= 1:
|
1012 |
+
if abs(s-1) < tol*1000:
|
1013 |
+
return ctx.inf
|
1014 |
+
m = int(round(ctx.re(s)))
|
1015 |
+
if m & 1:
|
1016 |
+
return ctx.inf
|
1017 |
+
else:
|
1018 |
+
return ((-1)**(-m//2)*\
|
1019 |
+
ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3)))
|
1020 |
+
prec = ctx.prec
|
1021 |
+
try:
|
1022 |
+
t3 = secondzeta_exp_term(ctx, s, a)
|
1023 |
+
extraprec = max(ctx.mag(t3),0)
|
1024 |
+
ctx.prec += extraprec + 3
|
1025 |
+
t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True')
|
1026 |
+
t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True')
|
1027 |
+
t4, r4 = secondzeta_singular_term(ctx,s,a,error='True')
|
1028 |
+
t3 = secondzeta_exp_term(ctx, s, a)
|
1029 |
+
err = r1+r2+r4
|
1030 |
+
t = t1-t2+t3-t4
|
1031 |
+
if kwargs.get("verbose"):
|
1032 |
+
print('main term =', t1)
|
1033 |
+
print(' computed using', gt, 'zeros of zeta')
|
1034 |
+
print('prime term =', t2)
|
1035 |
+
print(' computed using', pt, 'values of the von Mangoldt function')
|
1036 |
+
print('exponential term =', t3)
|
1037 |
+
print('singular term =', t4)
|
1038 |
+
finally:
|
1039 |
+
ctx.prec = prec
|
1040 |
+
if kwargs.get("error"):
|
1041 |
+
w = max(ctx.mag(abs(t)),0)
|
1042 |
+
err = max(err*2**w, ctx.eps*1.*2**w)
|
1043 |
+
return +t, err
|
1044 |
+
return +t
|
1045 |
+
|
1046 |
+
|
1047 |
+
@defun_wrapped
|
1048 |
+
def lerchphi(ctx, z, s, a):
|
1049 |
+
r"""
|
1050 |
+
Gives the Lerch transcendent, defined for `|z| < 1` and
|
1051 |
+
`\Re{a} > 0` by
|
1052 |
+
|
1053 |
+
.. math ::
|
1054 |
+
|
1055 |
+
\Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s}
|
1056 |
+
|
1057 |
+
and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}`
|
1058 |
+
along with the integral representation valid for `\Re{a} > 0`
|
1059 |
+
|
1060 |
+
.. math ::
|
1061 |
+
|
1062 |
+
\Phi(z,s,a) = \frac{1}{2 a^s} +
|
1063 |
+
\int_0^{\infty} \frac{z^t}{(a+t)^s} dt -
|
1064 |
+
2 \int_0^{\infty} \frac{\sin(t \log z - s
|
1065 |
+
\operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2}
|
1066 |
+
(e^{2 \pi t}-1)} dt.
|
1067 |
+
|
1068 |
+
The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta`
|
1069 |
+
(`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`).
|
1070 |
+
|
1071 |
+
**Examples**
|
1072 |
+
|
1073 |
+
Several evaluations in terms of simpler functions::
|
1074 |
+
|
1075 |
+
>>> from mpmath import *
|
1076 |
+
>>> mp.dps = 25; mp.pretty = True
|
1077 |
+
>>> lerchphi(-1,2,0.5); 4*catalan
|
1078 |
+
3.663862376708876060218414
|
1079 |
+
3.663862376708876060218414
|
1080 |
+
>>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2)
|
1081 |
+
0.2131391994087528954617607
|
1082 |
+
0.2131391994087528954617607
|
1083 |
+
>>> lerchphi(-4,1,1); log(5)/4
|
1084 |
+
0.4023594781085250936501898
|
1085 |
+
0.4023594781085250936501898
|
1086 |
+
>>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j)
|
1087 |
+
(1.142423447120257137774002 + 0.2118232380980201350495795j)
|
1088 |
+
(1.142423447120257137774002 + 0.2118232380980201350495795j)
|
1089 |
+
|
1090 |
+
Evaluation works for complex arguments and `|z| \ge 1`::
|
1091 |
+
|
1092 |
+
>>> lerchphi(1+2j, 3-j, 4+2j)
|
1093 |
+
(0.002025009957009908600539469 + 0.003327897536813558807438089j)
|
1094 |
+
>>> lerchphi(-2,2,-2.5)
|
1095 |
+
-12.28676272353094275265944
|
1096 |
+
>>> lerchphi(10,10,10)
|
1097 |
+
(-4.462130727102185701817349e-11 - 1.575172198981096218823481e-12j)
|
1098 |
+
>>> lerchphi(10,10,-10.5)
|
1099 |
+
(112658784011940.5605789002 - 498113185.5756221777743631j)
|
1100 |
+
|
1101 |
+
Some degenerate cases::
|
1102 |
+
|
1103 |
+
>>> lerchphi(0,1,2)
|
1104 |
+
0.5
|
1105 |
+
>>> lerchphi(0,1,-2)
|
1106 |
+
-0.5
|
1107 |
+
|
1108 |
+
Reduction to simpler functions::
|
1109 |
+
|
1110 |
+
>>> lerchphi(1, 4.25+1j, 1)
|
1111 |
+
(1.044674457556746668033975 - 0.04674508654012658932271226j)
|
1112 |
+
>>> zeta(4.25+1j)
|
1113 |
+
(1.044674457556746668033975 - 0.04674508654012658932271226j)
|
1114 |
+
>>> lerchphi(1 - 0.5**10, 4.25+1j, 1)
|
1115 |
+
(1.044629338021507546737197 - 0.04667768813963388181708101j)
|
1116 |
+
>>> lerchphi(3, 4, 1)
|
1117 |
+
(1.249503297023366545192592 - 0.2314252413375664776474462j)
|
1118 |
+
>>> polylog(4, 3) / 3
|
1119 |
+
(1.249503297023366545192592 - 0.2314252413375664776474462j)
|
1120 |
+
>>> lerchphi(3, 4, 1 - 0.5**10)
|
1121 |
+
(1.253978063946663945672674 - 0.2316736622836535468765376j)
|
1122 |
+
|
1123 |
+
**References**
|
1124 |
+
|
1125 |
+
1. [DLMF]_ section 25.14
|
1126 |
+
|
1127 |
+
"""
|
1128 |
+
if z == 0:
|
1129 |
+
return a ** (-s)
|
1130 |
+
# Faster, but these cases are useful for testing right now
|
1131 |
+
if z == 1:
|
1132 |
+
return ctx.zeta(s, a)
|
1133 |
+
if a == 1:
|
1134 |
+
return ctx.polylog(s, z) / z
|
1135 |
+
if ctx.re(a) < 1:
|
1136 |
+
if ctx.isnpint(a):
|
1137 |
+
raise ValueError("Lerch transcendent complex infinity")
|
1138 |
+
m = int(ctx.ceil(1-ctx.re(a)))
|
1139 |
+
v = ctx.zero
|
1140 |
+
zpow = ctx.one
|
1141 |
+
for n in xrange(m):
|
1142 |
+
v += zpow / (a+n)**s
|
1143 |
+
zpow *= z
|
1144 |
+
return zpow * ctx.lerchphi(z,s, a+m) + v
|
1145 |
+
g = ctx.ln(z)
|
1146 |
+
v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a
|
1147 |
+
h = s / 2
|
1148 |
+
r = 2*ctx.pi
|
1149 |
+
f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \
|
1150 |
+
((a**2+t**2)**h * ctx.expm1(r*t))
|
1151 |
+
v += 2*ctx.quad(f, [0, ctx.inf])
|
1152 |
+
if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1:
|
1153 |
+
v = ctx.chop(v)
|
1154 |
+
return v
|
env-llmeval/lib/python3.10/site-packages/mpmath/functions/zetazeros.py
ADDED
@@ -0,0 +1,1018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The function zetazero(n) computes the n-th nontrivial zero of zeta(s).
|
3 |
+
|
4 |
+
The general strategy is to locate a block of Gram intervals B where we
|
5 |
+
know exactly the number of zeros contained and which of those zeros
|
6 |
+
is that which we search.
|
7 |
+
|
8 |
+
If n <= 400 000 000 we know exactly the Rosser exceptions, contained
|
9 |
+
in a list in this file. Hence for n<=400 000 000 we simply
|
10 |
+
look at these list of exceptions. If our zero is implicated in one of
|
11 |
+
these exceptions we have our block B. In other case we simply locate
|
12 |
+
the good Rosser block containing our zero.
|
13 |
+
|
14 |
+
For n > 400 000 000 we apply the method of Turing, as complemented by
|
15 |
+
Lehman, Brent and Trudgian to find a suitable B.
|
16 |
+
"""
|
17 |
+
|
18 |
+
from .functions import defun, defun_wrapped
|
19 |
+
|
20 |
+
def find_rosser_block_zero(ctx, n):
|
21 |
+
"""for n<400 000 000 determines a block were one find our zero"""
|
22 |
+
for k in range(len(_ROSSER_EXCEPTIONS)//2):
|
23 |
+
a=_ROSSER_EXCEPTIONS[2*k][0]
|
24 |
+
b=_ROSSER_EXCEPTIONS[2*k][1]
|
25 |
+
if ((a<= n-2) and (n-1 <= b)):
|
26 |
+
t0 = ctx.grampoint(a)
|
27 |
+
t1 = ctx.grampoint(b)
|
28 |
+
v0 = ctx._fp.siegelz(t0)
|
29 |
+
v1 = ctx._fp.siegelz(t1)
|
30 |
+
my_zero_number = n-a-1
|
31 |
+
zero_number_block = b-a
|
32 |
+
pattern = _ROSSER_EXCEPTIONS[2*k+1]
|
33 |
+
return (my_zero_number, [a,b], [t0,t1], [v0,v1])
|
34 |
+
k = n-2
|
35 |
+
t,v,b = compute_triple_tvb(ctx, k)
|
36 |
+
T = [t]
|
37 |
+
V = [v]
|
38 |
+
while b < 0:
|
39 |
+
k -= 1
|
40 |
+
t,v,b = compute_triple_tvb(ctx, k)
|
41 |
+
T.insert(0,t)
|
42 |
+
V.insert(0,v)
|
43 |
+
my_zero_number = n-k-1
|
44 |
+
m = n-1
|
45 |
+
t,v,b = compute_triple_tvb(ctx, m)
|
46 |
+
T.append(t)
|
47 |
+
V.append(v)
|
48 |
+
while b < 0:
|
49 |
+
m += 1
|
50 |
+
t,v,b = compute_triple_tvb(ctx, m)
|
51 |
+
T.append(t)
|
52 |
+
V.append(v)
|
53 |
+
return (my_zero_number, [k,m], T, V)
|
54 |
+
|
55 |
+
def wpzeros(t):
|
56 |
+
"""Precision needed to compute higher zeros"""
|
57 |
+
wp = 53
|
58 |
+
if t > 3*10**8:
|
59 |
+
wp = 63
|
60 |
+
if t > 10**11:
|
61 |
+
wp = 70
|
62 |
+
if t > 10**14:
|
63 |
+
wp = 83
|
64 |
+
return wp
|
65 |
+
|
66 |
+
def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None,
|
67 |
+
fp_tolerance=None):
|
68 |
+
"""Separate the zeros contained in the block T, limitloop
|
69 |
+
determines how long one must search"""
|
70 |
+
if limitloop is None:
|
71 |
+
limitloop = ctx.inf
|
72 |
+
loopnumber = 0
|
73 |
+
variations = count_variations(V)
|
74 |
+
while ((variations < zero_number_block) and (loopnumber <limitloop)):
|
75 |
+
a = T[0]
|
76 |
+
v = V[0]
|
77 |
+
newT = [a]
|
78 |
+
newV = [v]
|
79 |
+
variations = 0
|
80 |
+
for n in range(1,len(T)):
|
81 |
+
b2 = T[n]
|
82 |
+
u = V[n]
|
83 |
+
if (u*v>0):
|
84 |
+
alpha = ctx.sqrt(u/v)
|
85 |
+
b= (alpha*a+b2)/(alpha+1)
|
86 |
+
else:
|
87 |
+
b = (a+b2)/2
|
88 |
+
if fp_tolerance < 10:
|
89 |
+
w = ctx._fp.siegelz(b)
|
90 |
+
if abs(w)<fp_tolerance:
|
91 |
+
w = ctx.siegelz(b)
|
92 |
+
else:
|
93 |
+
w=ctx.siegelz(b)
|
94 |
+
if v*w<0:
|
95 |
+
variations += 1
|
96 |
+
newT.append(b)
|
97 |
+
newV.append(w)
|
98 |
+
u = V[n]
|
99 |
+
if u*w <0:
|
100 |
+
variations += 1
|
101 |
+
newT.append(b2)
|
102 |
+
newV.append(u)
|
103 |
+
a = b2
|
104 |
+
v = u
|
105 |
+
T = newT
|
106 |
+
V = newV
|
107 |
+
loopnumber +=1
|
108 |
+
if (limitloop>ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block):
|
109 |
+
dtMax=0
|
110 |
+
dtSec=0
|
111 |
+
kMax = 0
|
112 |
+
for k1 in range(1,len(T)):
|
113 |
+
dt = T[k1]-T[k1-1]
|
114 |
+
if dt > dtMax:
|
115 |
+
kMax=k1
|
116 |
+
dtSec = dtMax
|
117 |
+
dtMax = dt
|
118 |
+
elif (dt<dtMax) and(dt >dtSec):
|
119 |
+
dtSec = dt
|
120 |
+
if dtMax>3*dtSec:
|
121 |
+
f = lambda x: ctx.rs_z(x,derivative=1)
|
122 |
+
t0=T[kMax-1]
|
123 |
+
t1 = T[kMax]
|
124 |
+
t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False)
|
125 |
+
v = ctx.siegelz(t)
|
126 |
+
if (t0<t) and (t<t1) and (v*V[kMax]<0):
|
127 |
+
T.insert(kMax,t)
|
128 |
+
V.insert(kMax,v)
|
129 |
+
variations = count_variations(V)
|
130 |
+
if variations == zero_number_block:
|
131 |
+
separated = True
|
132 |
+
else:
|
133 |
+
separated = False
|
134 |
+
return (T,V, separated)
|
135 |
+
|
136 |
+
def separate_my_zero(ctx, my_zero_number, zero_number_block, T, V, prec):
|
137 |
+
"""If we know which zero of this block is mine,
|
138 |
+
the function separates the zero"""
|
139 |
+
variations = 0
|
140 |
+
v0 = V[0]
|
141 |
+
for k in range(1,len(V)):
|
142 |
+
v1 = V[k]
|
143 |
+
if v0*v1 < 0:
|
144 |
+
variations +=1
|
145 |
+
if variations == my_zero_number:
|
146 |
+
k0 = k
|
147 |
+
leftv = v0
|
148 |
+
rightv = v1
|
149 |
+
v0 = v1
|
150 |
+
t1 = T[k0]
|
151 |
+
t0 = T[k0-1]
|
152 |
+
ctx.prec = prec
|
153 |
+
wpz = wpzeros(my_zero_number*ctx.log(my_zero_number))
|
154 |
+
|
155 |
+
guard = 4*ctx.mag(my_zero_number)
|
156 |
+
precs = [ctx.prec+4]
|
157 |
+
index=0
|
158 |
+
while precs[0] > 2*wpz:
|
159 |
+
index +=1
|
160 |
+
precs = [precs[0] // 2 +3+2*index] + precs
|
161 |
+
ctx.prec = precs[0] + guard
|
162 |
+
r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False)
|
163 |
+
#print "first step at", ctx.dps, "digits"
|
164 |
+
z=ctx.mpc(0.5,r)
|
165 |
+
for prec in precs[1:]:
|
166 |
+
ctx.prec = prec + guard
|
167 |
+
#print "refining to", ctx.dps, "digits"
|
168 |
+
znew = z - ctx.zeta(z) / ctx.zeta(z, derivative=1)
|
169 |
+
#print "difference", ctx.nstr(abs(z-znew))
|
170 |
+
z=ctx.mpc(0.5,ctx.im(znew))
|
171 |
+
return ctx.im(z)
|
172 |
+
|
173 |
+
def sure_number_block(ctx, n):
|
174 |
+
"""The number of good Rosser blocks needed to apply
|
175 |
+
Turing method
|
176 |
+
References:
|
177 |
+
R. P. Brent, On the Zeros of the Riemann Zeta Function
|
178 |
+
in the Critical Strip, Math. Comp. 33 (1979) 1361--1372
|
179 |
+
T. Trudgian, Improvements to Turing Method, Math. Comp."""
|
180 |
+
if n < 9*10**5:
|
181 |
+
return(2)
|
182 |
+
g = ctx.grampoint(n-100)
|
183 |
+
lg = ctx._fp.ln(g)
|
184 |
+
brent = 0.0061 * lg**2 +0.08*lg
|
185 |
+
trudgian = 0.0031 * lg**2 +0.11*lg
|
186 |
+
N = ctx.ceil(min(brent,trudgian))
|
187 |
+
N = int(N)
|
188 |
+
return N
|
189 |
+
|
190 |
+
def compute_triple_tvb(ctx, n):
|
191 |
+
t = ctx.grampoint(n)
|
192 |
+
v = ctx._fp.siegelz(t)
|
193 |
+
if ctx.mag(abs(v))<ctx.mag(t)-45:
|
194 |
+
v = ctx.siegelz(t)
|
195 |
+
b = v*(-1)**n
|
196 |
+
return t,v,b
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
ITERATION_LIMIT = 4
|
201 |
+
|
202 |
+
def search_supergood_block(ctx, n, fp_tolerance):
|
203 |
+
"""To use for n>400 000 000"""
|
204 |
+
sb = sure_number_block(ctx, n)
|
205 |
+
number_goodblocks = 0
|
206 |
+
m2 = n-1
|
207 |
+
t, v, b = compute_triple_tvb(ctx, m2)
|
208 |
+
Tf = [t]
|
209 |
+
Vf = [v]
|
210 |
+
while b < 0:
|
211 |
+
m2 += 1
|
212 |
+
t,v,b = compute_triple_tvb(ctx, m2)
|
213 |
+
Tf.append(t)
|
214 |
+
Vf.append(v)
|
215 |
+
goodpoints = [m2]
|
216 |
+
T = [t]
|
217 |
+
V = [v]
|
218 |
+
while number_goodblocks < 2*sb:
|
219 |
+
m2 += 1
|
220 |
+
t, v, b = compute_triple_tvb(ctx, m2)
|
221 |
+
T.append(t)
|
222 |
+
V.append(v)
|
223 |
+
while b < 0:
|
224 |
+
m2 += 1
|
225 |
+
t,v,b = compute_triple_tvb(ctx, m2)
|
226 |
+
T.append(t)
|
227 |
+
V.append(v)
|
228 |
+
goodpoints.append(m2)
|
229 |
+
zn = len(T)-1
|
230 |
+
A, B, separated =\
|
231 |
+
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT,
|
232 |
+
fp_tolerance=fp_tolerance)
|
233 |
+
Tf.pop()
|
234 |
+
Tf.extend(A)
|
235 |
+
Vf.pop()
|
236 |
+
Vf.extend(B)
|
237 |
+
if separated:
|
238 |
+
number_goodblocks += 1
|
239 |
+
else:
|
240 |
+
number_goodblocks = 0
|
241 |
+
T = [t]
|
242 |
+
V = [v]
|
243 |
+
# Now the same procedure to the left
|
244 |
+
number_goodblocks = 0
|
245 |
+
m2 = n-2
|
246 |
+
t, v, b = compute_triple_tvb(ctx, m2)
|
247 |
+
Tf.insert(0,t)
|
248 |
+
Vf.insert(0,v)
|
249 |
+
while b < 0:
|
250 |
+
m2 -= 1
|
251 |
+
t,v,b = compute_triple_tvb(ctx, m2)
|
252 |
+
Tf.insert(0,t)
|
253 |
+
Vf.insert(0,v)
|
254 |
+
goodpoints.insert(0,m2)
|
255 |
+
T = [t]
|
256 |
+
V = [v]
|
257 |
+
while number_goodblocks < 2*sb:
|
258 |
+
m2 -= 1
|
259 |
+
t, v, b = compute_triple_tvb(ctx, m2)
|
260 |
+
T.insert(0,t)
|
261 |
+
V.insert(0,v)
|
262 |
+
while b < 0:
|
263 |
+
m2 -= 1
|
264 |
+
t,v,b = compute_triple_tvb(ctx, m2)
|
265 |
+
T.insert(0,t)
|
266 |
+
V.insert(0,v)
|
267 |
+
goodpoints.insert(0,m2)
|
268 |
+
zn = len(T)-1
|
269 |
+
A, B, separated =\
|
270 |
+
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
|
271 |
+
A.pop()
|
272 |
+
Tf = A+Tf
|
273 |
+
B.pop()
|
274 |
+
Vf = B+Vf
|
275 |
+
if separated:
|
276 |
+
number_goodblocks += 1
|
277 |
+
else:
|
278 |
+
number_goodblocks = 0
|
279 |
+
T = [t]
|
280 |
+
V = [v]
|
281 |
+
r = goodpoints[2*sb]
|
282 |
+
lg = len(goodpoints)
|
283 |
+
s = goodpoints[lg-2*sb-1]
|
284 |
+
tr, vr, br = compute_triple_tvb(ctx, r)
|
285 |
+
ar = Tf.index(tr)
|
286 |
+
ts, vs, bs = compute_triple_tvb(ctx, s)
|
287 |
+
as1 = Tf.index(ts)
|
288 |
+
T = Tf[ar:as1+1]
|
289 |
+
V = Vf[ar:as1+1]
|
290 |
+
zn = s-r
|
291 |
+
A, B, separated =\
|
292 |
+
separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
|
293 |
+
if separated:
|
294 |
+
return (n-r-1,[r,s],A,B)
|
295 |
+
q = goodpoints[sb]
|
296 |
+
lg = len(goodpoints)
|
297 |
+
t = goodpoints[lg-sb-1]
|
298 |
+
tq, vq, bq = compute_triple_tvb(ctx, q)
|
299 |
+
aq = Tf.index(tq)
|
300 |
+
tt, vt, bt = compute_triple_tvb(ctx, t)
|
301 |
+
at = Tf.index(tt)
|
302 |
+
T = Tf[aq:at+1]
|
303 |
+
V = Vf[aq:at+1]
|
304 |
+
return (n-q-1,[q,t],T,V)
|
305 |
+
|
306 |
+
def count_variations(V):
|
307 |
+
count = 0
|
308 |
+
vold = V[0]
|
309 |
+
for n in range(1, len(V)):
|
310 |
+
vnew = V[n]
|
311 |
+
if vold*vnew < 0:
|
312 |
+
count +=1
|
313 |
+
vold = vnew
|
314 |
+
return count
|
315 |
+
|
316 |
+
def pattern_construct(ctx, block, T, V):
|
317 |
+
pattern = '('
|
318 |
+
a = block[0]
|
319 |
+
b = block[1]
|
320 |
+
t0,v0,b0 = compute_triple_tvb(ctx, a)
|
321 |
+
k = 0
|
322 |
+
k0 = 0
|
323 |
+
for n in range(a+1,b+1):
|
324 |
+
t1,v1,b1 = compute_triple_tvb(ctx, n)
|
325 |
+
lgT =len(T)
|
326 |
+
while (k < lgT) and (T[k] <= t1):
|
327 |
+
k += 1
|
328 |
+
L = V[k0:k]
|
329 |
+
L.append(v1)
|
330 |
+
L.insert(0,v0)
|
331 |
+
count = count_variations(L)
|
332 |
+
pattern = pattern + ("%s" % count)
|
333 |
+
if b1 > 0:
|
334 |
+
pattern = pattern + ')('
|
335 |
+
k0 = k
|
336 |
+
t0,v0,b0 = t1,v1,b1
|
337 |
+
pattern = pattern[:-1]
|
338 |
+
return pattern
|
339 |
+
|
340 |
+
@defun
|
341 |
+
def zetazero(ctx, n, info=False, round=True):
|
342 |
+
r"""
|
343 |
+
Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line,
|
344 |
+
i.e. returns an approximation of the `n`-th largest complex number
|
345 |
+
`s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the
|
346 |
+
imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`).
|
347 |
+
|
348 |
+
**Examples**
|
349 |
+
|
350 |
+
The first few zeros::
|
351 |
+
|
352 |
+
>>> from mpmath import *
|
353 |
+
>>> mp.dps = 25; mp.pretty = True
|
354 |
+
>>> zetazero(1)
|
355 |
+
(0.5 + 14.13472514173469379045725j)
|
356 |
+
>>> zetazero(2)
|
357 |
+
(0.5 + 21.02203963877155499262848j)
|
358 |
+
>>> zetazero(20)
|
359 |
+
(0.5 + 77.14484006887480537268266j)
|
360 |
+
|
361 |
+
Verifying that the values are zeros::
|
362 |
+
|
363 |
+
>>> for n in range(1,5):
|
364 |
+
... s = zetazero(n)
|
365 |
+
... chop(zeta(s)), chop(siegelz(s.imag))
|
366 |
+
...
|
367 |
+
(0.0, 0.0)
|
368 |
+
(0.0, 0.0)
|
369 |
+
(0.0, 0.0)
|
370 |
+
(0.0, 0.0)
|
371 |
+
|
372 |
+
Negative indices give the conjugate zeros (`n = 0` is undefined)::
|
373 |
+
|
374 |
+
>>> zetazero(-1)
|
375 |
+
(0.5 - 14.13472514173469379045725j)
|
376 |
+
|
377 |
+
:func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision::
|
378 |
+
|
379 |
+
>>> mp.dps = 15
|
380 |
+
>>> zetazero(1234567)
|
381 |
+
(0.5 + 727690.906948208j)
|
382 |
+
>>> mp.dps = 50
|
383 |
+
>>> zetazero(1234567)
|
384 |
+
(0.5 + 727690.9069482075392389420041147142092708393819935j)
|
385 |
+
>>> chop(zeta(_)/_)
|
386 |
+
0.0
|
387 |
+
|
388 |
+
with *info=True*, :func:`~mpmath.zetazero` gives additional information::
|
389 |
+
|
390 |
+
>>> mp.dps = 15
|
391 |
+
>>> zetazero(542964976,info=True)
|
392 |
+
((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)')
|
393 |
+
|
394 |
+
This means that the zero is between Gram points 542964969 and 542964978;
|
395 |
+
it is the 6-th zero between them. Finally (01311110) is the pattern
|
396 |
+
of zeros in this interval. The numbers indicate the number of zeros
|
397 |
+
in each Gram interval (Rosser blocks between parenthesis). In this case
|
398 |
+
there is only one Rosser block of length nine.
|
399 |
+
"""
|
400 |
+
n = int(n)
|
401 |
+
if n < 0:
|
402 |
+
return ctx.zetazero(-n).conjugate()
|
403 |
+
if n == 0:
|
404 |
+
raise ValueError("n must be nonzero")
|
405 |
+
wpinitial = ctx.prec
|
406 |
+
try:
|
407 |
+
wpz, fp_tolerance = comp_fp_tolerance(ctx, n)
|
408 |
+
ctx.prec = wpz
|
409 |
+
if n < 400000000:
|
410 |
+
my_zero_number, block, T, V =\
|
411 |
+
find_rosser_block_zero(ctx, n)
|
412 |
+
else:
|
413 |
+
my_zero_number, block, T, V =\
|
414 |
+
search_supergood_block(ctx, n, fp_tolerance)
|
415 |
+
zero_number_block = block[1]-block[0]
|
416 |
+
T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V,
|
417 |
+
limitloop=ctx.inf, fp_tolerance=fp_tolerance)
|
418 |
+
if info:
|
419 |
+
pattern = pattern_construct(ctx,block,T,V)
|
420 |
+
prec = max(wpinitial, wpz)
|
421 |
+
t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec)
|
422 |
+
v = ctx.mpc(0.5,t)
|
423 |
+
finally:
|
424 |
+
ctx.prec = wpinitial
|
425 |
+
if round:
|
426 |
+
v =+v
|
427 |
+
if info:
|
428 |
+
return (v,block,my_zero_number,pattern)
|
429 |
+
else:
|
430 |
+
return v
|
431 |
+
|
432 |
+
def gram_index(ctx, t):
|
433 |
+
if t > 10**13:
|
434 |
+
wp = 3*ctx.log(t, 10)
|
435 |
+
else:
|
436 |
+
wp = 0
|
437 |
+
prec = ctx.prec
|
438 |
+
try:
|
439 |
+
ctx.prec += wp
|
440 |
+
h = int(ctx.siegeltheta(t)/ctx.pi)
|
441 |
+
finally:
|
442 |
+
ctx.prec = prec
|
443 |
+
return(h)
|
444 |
+
|
445 |
+
def count_to(ctx, t, T, V):
|
446 |
+
count = 0
|
447 |
+
vold = V[0]
|
448 |
+
told = T[0]
|
449 |
+
tnew = T[1]
|
450 |
+
k = 1
|
451 |
+
while tnew < t:
|
452 |
+
vnew = V[k]
|
453 |
+
if vold*vnew < 0:
|
454 |
+
count += 1
|
455 |
+
vold = vnew
|
456 |
+
k += 1
|
457 |
+
tnew = T[k]
|
458 |
+
a = ctx.siegelz(t)
|
459 |
+
if a*vold < 0:
|
460 |
+
count += 1
|
461 |
+
return count
|
462 |
+
|
463 |
+
def comp_fp_tolerance(ctx, n):
|
464 |
+
wpz = wpzeros(n*ctx.log(n))
|
465 |
+
if n < 15*10**8:
|
466 |
+
fp_tolerance = 0.0005
|
467 |
+
elif n <= 10**14:
|
468 |
+
fp_tolerance = 0.1
|
469 |
+
else:
|
470 |
+
fp_tolerance = 100
|
471 |
+
return wpz, fp_tolerance
|
472 |
+
|
473 |
+
@defun
|
474 |
+
def nzeros(ctx, t):
|
475 |
+
r"""
|
476 |
+
Computes the number of zeros of the Riemann zeta function in
|
477 |
+
`(0,1) \times (0,t]`, usually denoted by `N(t)`.
|
478 |
+
|
479 |
+
**Examples**
|
480 |
+
|
481 |
+
The first zero has imaginary part between 14 and 15::
|
482 |
+
|
483 |
+
>>> from mpmath import *
|
484 |
+
>>> mp.dps = 15; mp.pretty = True
|
485 |
+
>>> nzeros(14)
|
486 |
+
0
|
487 |
+
>>> nzeros(15)
|
488 |
+
1
|
489 |
+
>>> zetazero(1)
|
490 |
+
(0.5 + 14.1347251417347j)
|
491 |
+
|
492 |
+
Some closely spaced zeros::
|
493 |
+
|
494 |
+
>>> nzeros(10**7)
|
495 |
+
21136125
|
496 |
+
>>> zetazero(21136125)
|
497 |
+
(0.5 + 9999999.32718175j)
|
498 |
+
>>> zetazero(21136126)
|
499 |
+
(0.5 + 10000000.2400236j)
|
500 |
+
>>> nzeros(545439823.215)
|
501 |
+
1500000001
|
502 |
+
>>> zetazero(1500000001)
|
503 |
+
(0.5 + 545439823.201985j)
|
504 |
+
>>> zetazero(1500000002)
|
505 |
+
(0.5 + 545439823.325697j)
|
506 |
+
|
507 |
+
This confirms the data given by J. van de Lune,
|
508 |
+
H. J. J. te Riele and D. T. Winter in 1986.
|
509 |
+
"""
|
510 |
+
if t < 14.1347251417347:
|
511 |
+
return 0
|
512 |
+
x = gram_index(ctx, t)
|
513 |
+
k = int(ctx.floor(x))
|
514 |
+
wpinitial = ctx.prec
|
515 |
+
wpz, fp_tolerance = comp_fp_tolerance(ctx, k)
|
516 |
+
ctx.prec = wpz
|
517 |
+
a = ctx.siegelz(t)
|
518 |
+
if k == -1 and a < 0:
|
519 |
+
return 0
|
520 |
+
elif k == -1 and a > 0:
|
521 |
+
return 1
|
522 |
+
if k+2 < 400000000:
|
523 |
+
Rblock = find_rosser_block_zero(ctx, k+2)
|
524 |
+
else:
|
525 |
+
Rblock = search_supergood_block(ctx, k+2, fp_tolerance)
|
526 |
+
n1, n2 = Rblock[1]
|
527 |
+
if n2-n1 == 1:
|
528 |
+
b = Rblock[3][0]
|
529 |
+
if a*b > 0:
|
530 |
+
ctx.prec = wpinitial
|
531 |
+
return k+1
|
532 |
+
else:
|
533 |
+
ctx.prec = wpinitial
|
534 |
+
return k+2
|
535 |
+
my_zero_number,block, T, V = Rblock
|
536 |
+
zero_number_block = n2-n1
|
537 |
+
T, V, separated = separate_zeros_in_block(ctx,\
|
538 |
+
zero_number_block, T, V,\
|
539 |
+
limitloop=ctx.inf,\
|
540 |
+
fp_tolerance=fp_tolerance)
|
541 |
+
n = count_to(ctx, t, T, V)
|
542 |
+
ctx.prec = wpinitial
|
543 |
+
return n+n1+1
|
544 |
+
|
545 |
+
@defun_wrapped
|
546 |
+
def backlunds(ctx, t):
|
547 |
+
r"""
|
548 |
+
Computes the function
|
549 |
+
`S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`.
|
550 |
+
|
551 |
+
See Titchmarsh Section 9.3 for details of the definition.
|
552 |
+
|
553 |
+
**Examples**
|
554 |
+
|
555 |
+
>>> from mpmath import *
|
556 |
+
>>> mp.dps = 15; mp.pretty = True
|
557 |
+
>>> backlunds(217.3)
|
558 |
+
0.16302205431184
|
559 |
+
|
560 |
+
Generally, the value is a small number. At Gram points it is an integer,
|
561 |
+
frequently equal to 0::
|
562 |
+
|
563 |
+
>>> chop(backlunds(grampoint(200)))
|
564 |
+
0.0
|
565 |
+
>>> backlunds(extraprec(10)(grampoint)(211))
|
566 |
+
1.0
|
567 |
+
>>> backlunds(extraprec(10)(grampoint)(232))
|
568 |
+
-1.0
|
569 |
+
|
570 |
+
The number of zeros of the Riemann zeta function up to height `t`
|
571 |
+
satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and
|
572 |
+
:func:`siegeltheta`)::
|
573 |
+
|
574 |
+
>>> t = 1234.55
|
575 |
+
>>> nzeros(t)
|
576 |
+
842
|
577 |
+
>>> siegeltheta(t)/pi+1+backlunds(t)
|
578 |
+
842.0
|
579 |
+
|
580 |
+
"""
|
581 |
+
return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi
|
582 |
+
|
583 |
+
|
584 |
+
"""
|
585 |
+
_ROSSER_EXCEPTIONS is a list of all exceptions to
|
586 |
+
Rosser's rule for n <= 400 000 000.
|
587 |
+
|
588 |
+
Alternately the entry is of type [n,m], or a string.
|
589 |
+
The string is the zero pattern of the Block and the relevant
|
590 |
+
adjacent. For example (010)3 corresponds to a block
|
591 |
+
composed of three Gram intervals, the first ant third without
|
592 |
+
a zero and the intermediate with a zero. The next Gram interval
|
593 |
+
contain three zeros. So that in total we have 4 zeros in 4 Gram
|
594 |
+
blocks. n and m are the indices of the Gram points of this
|
595 |
+
interval of four Gram intervals. The Rosser exception is therefore
|
596 |
+
formed by the three Gram intervals that are signaled between
|
597 |
+
parenthesis.
|
598 |
+
|
599 |
+
We have included also some Rosser's exceptions beyond n=400 000 000
|
600 |
+
that are noted in the literature by some reason.
|
601 |
+
|
602 |
+
The list is composed from the data published in the references:
|
603 |
+
|
604 |
+
R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter,
|
605 |
+
'On the Zeros of the Riemann Zeta Function in the Critical Strip. II',
|
606 |
+
Math. Comp. 39 (1982) 681--688.
|
607 |
+
See also Corrigenda in Math. Comp. 46 (1986) 771.
|
608 |
+
|
609 |
+
J. van de Lune, H. J. J. te Riele,
|
610 |
+
'On the Zeros of the Riemann Zeta Function in the Critical Strip. III',
|
611 |
+
Math. Comp. 41 (1983) 759--767.
|
612 |
+
See also Corrigenda in Math. Comp. 46 (1986) 771.
|
613 |
+
|
614 |
+
J. van de Lune,
|
615 |
+
'Sums of Equal Powers of Positive Integers',
|
616 |
+
Dissertation,
|
617 |
+
Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica,
|
618 |
+
Amsterdam, 1984.
|
619 |
+
|
620 |
+
Thanks to the authors all this papers and those others that have
|
621 |
+
contributed to make this possible.
|
622 |
+
"""
|
623 |
+
|
624 |
+
|
625 |
+
|
626 |
+
|
627 |
+
|
628 |
+
|
629 |
+
|
630 |
+
_ROSSER_EXCEPTIONS = \
|
631 |
+
[[13999525, 13999528], '(00)3',
|
632 |
+
[30783329, 30783332], '(00)3',
|
633 |
+
[30930926, 30930929], '3(00)',
|
634 |
+
[37592215, 37592218], '(00)3',
|
635 |
+
[40870156, 40870159], '(00)3',
|
636 |
+
[43628107, 43628110], '(00)3',
|
637 |
+
[46082042, 46082045], '(00)3',
|
638 |
+
[46875667, 46875670], '(00)3',
|
639 |
+
[49624540, 49624543], '3(00)',
|
640 |
+
[50799238, 50799241], '(00)3',
|
641 |
+
[55221453, 55221456], '3(00)',
|
642 |
+
[56948779, 56948782], '3(00)',
|
643 |
+
[60515663, 60515666], '(00)3',
|
644 |
+
[61331766, 61331770], '(00)40',
|
645 |
+
[69784843, 69784846], '3(00)',
|
646 |
+
[75052114, 75052117], '(00)3',
|
647 |
+
[79545240, 79545243], '3(00)',
|
648 |
+
[79652247, 79652250], '3(00)',
|
649 |
+
[83088043, 83088046], '(00)3',
|
650 |
+
[83689522, 83689525], '3(00)',
|
651 |
+
[85348958, 85348961], '(00)3',
|
652 |
+
[86513820, 86513823], '(00)3',
|
653 |
+
[87947596, 87947599], '3(00)',
|
654 |
+
[88600095, 88600098], '(00)3',
|
655 |
+
[93681183, 93681186], '(00)3',
|
656 |
+
[100316551, 100316554], '3(00)',
|
657 |
+
[100788444, 100788447], '(00)3',
|
658 |
+
[106236172, 106236175], '(00)3',
|
659 |
+
[106941327, 106941330], '3(00)',
|
660 |
+
[107287955, 107287958], '(00)3',
|
661 |
+
[107532016, 107532019], '3(00)',
|
662 |
+
[110571044, 110571047], '(00)3',
|
663 |
+
[111885253, 111885256], '3(00)',
|
664 |
+
[113239783, 113239786], '(00)3',
|
665 |
+
[120159903, 120159906], '(00)3',
|
666 |
+
[121424391, 121424394], '3(00)',
|
667 |
+
[121692931, 121692934], '3(00)',
|
668 |
+
[121934170, 121934173], '3(00)',
|
669 |
+
[122612848, 122612851], '3(00)',
|
670 |
+
[126116567, 126116570], '(00)3',
|
671 |
+
[127936513, 127936516], '(00)3',
|
672 |
+
[128710277, 128710280], '3(00)',
|
673 |
+
[129398902, 129398905], '3(00)',
|
674 |
+
[130461096, 130461099], '3(00)',
|
675 |
+
[131331947, 131331950], '3(00)',
|
676 |
+
[137334071, 137334074], '3(00)',
|
677 |
+
[137832603, 137832606], '(00)3',
|
678 |
+
[138799471, 138799474], '3(00)',
|
679 |
+
[139027791, 139027794], '(00)3',
|
680 |
+
[141617806, 141617809], '(00)3',
|
681 |
+
[144454931, 144454934], '(00)3',
|
682 |
+
[145402379, 145402382], '3(00)',
|
683 |
+
[146130245, 146130248], '3(00)',
|
684 |
+
[147059770, 147059773], '(00)3',
|
685 |
+
[147896099, 147896102], '3(00)',
|
686 |
+
[151097113, 151097116], '(00)3',
|
687 |
+
[152539438, 152539441], '(00)3',
|
688 |
+
[152863168, 152863171], '3(00)',
|
689 |
+
[153522726, 153522729], '3(00)',
|
690 |
+
[155171524, 155171527], '3(00)',
|
691 |
+
[155366607, 155366610], '(00)3',
|
692 |
+
[157260686, 157260689], '3(00)',
|
693 |
+
[157269224, 157269227], '(00)3',
|
694 |
+
[157755123, 157755126], '(00)3',
|
695 |
+
[158298484, 158298487], '3(00)',
|
696 |
+
[160369050, 160369053], '3(00)',
|
697 |
+
[162962787, 162962790], '(00)3',
|
698 |
+
[163724709, 163724712], '(00)3',
|
699 |
+
[164198113, 164198116], '3(00)',
|
700 |
+
[164689301, 164689305], '(00)40',
|
701 |
+
[164880228, 164880231], '3(00)',
|
702 |
+
[166201932, 166201935], '(00)3',
|
703 |
+
[168573836, 168573839], '(00)3',
|
704 |
+
[169750763, 169750766], '(00)3',
|
705 |
+
[170375507, 170375510], '(00)3',
|
706 |
+
[170704879, 170704882], '3(00)',
|
707 |
+
[172000992, 172000995], '3(00)',
|
708 |
+
[173289941, 173289944], '(00)3',
|
709 |
+
[173737613, 173737616], '3(00)',
|
710 |
+
[174102513, 174102516], '(00)3',
|
711 |
+
[174284990, 174284993], '(00)3',
|
712 |
+
[174500513, 174500516], '(00)3',
|
713 |
+
[175710609, 175710612], '(00)3',
|
714 |
+
[176870843, 176870846], '3(00)',
|
715 |
+
[177332732, 177332735], '3(00)',
|
716 |
+
[177902861, 177902864], '3(00)',
|
717 |
+
[179979095, 179979098], '(00)3',
|
718 |
+
[181233726, 181233729], '3(00)',
|
719 |
+
[181625435, 181625438], '(00)3',
|
720 |
+
[182105255, 182105259], '22(00)',
|
721 |
+
[182223559, 182223562], '3(00)',
|
722 |
+
[191116404, 191116407], '3(00)',
|
723 |
+
[191165599, 191165602], '3(00)',
|
724 |
+
[191297535, 191297539], '(00)22',
|
725 |
+
[192485616, 192485619], '(00)3',
|
726 |
+
[193264634, 193264638], '22(00)',
|
727 |
+
[194696968, 194696971], '(00)3',
|
728 |
+
[195876805, 195876808], '(00)3',
|
729 |
+
[195916548, 195916551], '3(00)',
|
730 |
+
[196395160, 196395163], '3(00)',
|
731 |
+
[196676303, 196676306], '(00)3',
|
732 |
+
[197889882, 197889885], '3(00)',
|
733 |
+
[198014122, 198014125], '(00)3',
|
734 |
+
[199235289, 199235292], '(00)3',
|
735 |
+
[201007375, 201007378], '(00)3',
|
736 |
+
[201030605, 201030608], '3(00)',
|
737 |
+
[201184290, 201184293], '3(00)',
|
738 |
+
[201685414, 201685418], '(00)22',
|
739 |
+
[202762875, 202762878], '3(00)',
|
740 |
+
[202860957, 202860960], '3(00)',
|
741 |
+
[203832577, 203832580], '3(00)',
|
742 |
+
[205880544, 205880547], '(00)3',
|
743 |
+
[206357111, 206357114], '(00)3',
|
744 |
+
[207159767, 207159770], '3(00)',
|
745 |
+
[207167343, 207167346], '3(00)',
|
746 |
+
[207482539, 207482543], '3(010)',
|
747 |
+
[207669540, 207669543], '3(00)',
|
748 |
+
[208053426, 208053429], '(00)3',
|
749 |
+
[208110027, 208110030], '3(00)',
|
750 |
+
[209513826, 209513829], '3(00)',
|
751 |
+
[212623522, 212623525], '(00)3',
|
752 |
+
[213841715, 213841718], '(00)3',
|
753 |
+
[214012333, 214012336], '(00)3',
|
754 |
+
[214073567, 214073570], '(00)3',
|
755 |
+
[215170600, 215170603], '3(00)',
|
756 |
+
[215881039, 215881042], '3(00)',
|
757 |
+
[216274604, 216274607], '3(00)',
|
758 |
+
[216957120, 216957123], '3(00)',
|
759 |
+
[217323208, 217323211], '(00)3',
|
760 |
+
[218799264, 218799267], '(00)3',
|
761 |
+
[218803557, 218803560], '3(00)',
|
762 |
+
[219735146, 219735149], '(00)3',
|
763 |
+
[219830062, 219830065], '3(00)',
|
764 |
+
[219897904, 219897907], '(00)3',
|
765 |
+
[221205545, 221205548], '(00)3',
|
766 |
+
[223601929, 223601932], '(00)3',
|
767 |
+
[223907076, 223907079], '3(00)',
|
768 |
+
[223970397, 223970400], '(00)3',
|
769 |
+
[224874044, 224874048], '22(00)',
|
770 |
+
[225291157, 225291160], '(00)3',
|
771 |
+
[227481734, 227481737], '(00)3',
|
772 |
+
[228006442, 228006445], '3(00)',
|
773 |
+
[228357900, 228357903], '(00)3',
|
774 |
+
[228386399, 228386402], '(00)3',
|
775 |
+
[228907446, 228907449], '(00)3',
|
776 |
+
[228984552, 228984555], '3(00)',
|
777 |
+
[229140285, 229140288], '3(00)',
|
778 |
+
[231810024, 231810027], '(00)3',
|
779 |
+
[232838062, 232838065], '3(00)',
|
780 |
+
[234389088, 234389091], '3(00)',
|
781 |
+
[235588194, 235588197], '(00)3',
|
782 |
+
[236645695, 236645698], '(00)3',
|
783 |
+
[236962876, 236962879], '3(00)',
|
784 |
+
[237516723, 237516727], '04(00)',
|
785 |
+
[240004911, 240004914], '(00)3',
|
786 |
+
[240221306, 240221309], '3(00)',
|
787 |
+
[241389213, 241389217], '(010)3',
|
788 |
+
[241549003, 241549006], '(00)3',
|
789 |
+
[241729717, 241729720], '(00)3',
|
790 |
+
[241743684, 241743687], '3(00)',
|
791 |
+
[243780200, 243780203], '3(00)',
|
792 |
+
[243801317, 243801320], '(00)3',
|
793 |
+
[244122072, 244122075], '(00)3',
|
794 |
+
[244691224, 244691227], '3(00)',
|
795 |
+
[244841577, 244841580], '(00)3',
|
796 |
+
[245813461, 245813464], '(00)3',
|
797 |
+
[246299475, 246299478], '(00)3',
|
798 |
+
[246450176, 246450179], '3(00)',
|
799 |
+
[249069349, 249069352], '(00)3',
|
800 |
+
[250076378, 250076381], '(00)3',
|
801 |
+
[252442157, 252442160], '3(00)',
|
802 |
+
[252904231, 252904234], '3(00)',
|
803 |
+
[255145220, 255145223], '(00)3',
|
804 |
+
[255285971, 255285974], '3(00)',
|
805 |
+
[256713230, 256713233], '(00)3',
|
806 |
+
[257992082, 257992085], '(00)3',
|
807 |
+
[258447955, 258447959], '22(00)',
|
808 |
+
[259298045, 259298048], '3(00)',
|
809 |
+
[262141503, 262141506], '(00)3',
|
810 |
+
[263681743, 263681746], '3(00)',
|
811 |
+
[266527881, 266527885], '(010)3',
|
812 |
+
[266617122, 266617125], '(00)3',
|
813 |
+
[266628044, 266628047], '3(00)',
|
814 |
+
[267305763, 267305766], '(00)3',
|
815 |
+
[267388404, 267388407], '3(00)',
|
816 |
+
[267441672, 267441675], '3(00)',
|
817 |
+
[267464886, 267464889], '(00)3',
|
818 |
+
[267554907, 267554910], '3(00)',
|
819 |
+
[269787480, 269787483], '(00)3',
|
820 |
+
[270881434, 270881437], '(00)3',
|
821 |
+
[270997583, 270997586], '3(00)',
|
822 |
+
[272096378, 272096381], '3(00)',
|
823 |
+
[272583009, 272583012], '(00)3',
|
824 |
+
[274190881, 274190884], '3(00)',
|
825 |
+
[274268747, 274268750], '(00)3',
|
826 |
+
[275297429, 275297432], '3(00)',
|
827 |
+
[275545476, 275545479], '3(00)',
|
828 |
+
[275898479, 275898482], '3(00)',
|
829 |
+
[275953000, 275953003], '(00)3',
|
830 |
+
[277117197, 277117201], '(00)22',
|
831 |
+
[277447310, 277447313], '3(00)',
|
832 |
+
[279059657, 279059660], '3(00)',
|
833 |
+
[279259144, 279259147], '3(00)',
|
834 |
+
[279513636, 279513639], '3(00)',
|
835 |
+
[279849069, 279849072], '3(00)',
|
836 |
+
[280291419, 280291422], '(00)3',
|
837 |
+
[281449425, 281449428], '3(00)',
|
838 |
+
[281507953, 281507956], '3(00)',
|
839 |
+
[281825600, 281825603], '(00)3',
|
840 |
+
[282547093, 282547096], '3(00)',
|
841 |
+
[283120963, 283120966], '3(00)',
|
842 |
+
[283323493, 283323496], '(00)3',
|
843 |
+
[284764535, 284764538], '3(00)',
|
844 |
+
[286172639, 286172642], '3(00)',
|
845 |
+
[286688824, 286688827], '(00)3',
|
846 |
+
[287222172, 287222175], '3(00)',
|
847 |
+
[287235534, 287235537], '3(00)',
|
848 |
+
[287304861, 287304864], '3(00)',
|
849 |
+
[287433571, 287433574], '(00)3',
|
850 |
+
[287823551, 287823554], '(00)3',
|
851 |
+
[287872422, 287872425], '3(00)',
|
852 |
+
[288766615, 288766618], '3(00)',
|
853 |
+
[290122963, 290122966], '3(00)',
|
854 |
+
[290450849, 290450853], '(00)22',
|
855 |
+
[291426141, 291426144], '3(00)',
|
856 |
+
[292810353, 292810356], '3(00)',
|
857 |
+
[293109861, 293109864], '3(00)',
|
858 |
+
[293398054, 293398057], '3(00)',
|
859 |
+
[294134426, 294134429], '3(00)',
|
860 |
+
[294216438, 294216441], '(00)3',
|
861 |
+
[295367141, 295367144], '3(00)',
|
862 |
+
[297834111, 297834114], '3(00)',
|
863 |
+
[299099969, 299099972], '3(00)',
|
864 |
+
[300746958, 300746961], '3(00)',
|
865 |
+
[301097423, 301097426], '(00)3',
|
866 |
+
[301834209, 301834212], '(00)3',
|
867 |
+
[302554791, 302554794], '(00)3',
|
868 |
+
[303497445, 303497448], '3(00)',
|
869 |
+
[304165344, 304165347], '3(00)',
|
870 |
+
[304790218, 304790222], '3(010)',
|
871 |
+
[305302352, 305302355], '(00)3',
|
872 |
+
[306785996, 306785999], '3(00)',
|
873 |
+
[307051443, 307051446], '3(00)',
|
874 |
+
[307481539, 307481542], '3(00)',
|
875 |
+
[308605569, 308605572], '3(00)',
|
876 |
+
[309237610, 309237613], '3(00)',
|
877 |
+
[310509287, 310509290], '(00)3',
|
878 |
+
[310554057, 310554060], '3(00)',
|
879 |
+
[310646345, 310646348], '3(00)',
|
880 |
+
[311274896, 311274899], '(00)3',
|
881 |
+
[311894272, 311894275], '3(00)',
|
882 |
+
[312269470, 312269473], '(00)3',
|
883 |
+
[312306601, 312306605], '(00)40',
|
884 |
+
[312683193, 312683196], '3(00)',
|
885 |
+
[314499804, 314499807], '3(00)',
|
886 |
+
[314636802, 314636805], '(00)3',
|
887 |
+
[314689897, 314689900], '3(00)',
|
888 |
+
[314721319, 314721322], '3(00)',
|
889 |
+
[316132890, 316132893], '3(00)',
|
890 |
+
[316217470, 316217474], '(010)3',
|
891 |
+
[316465705, 316465708], '3(00)',
|
892 |
+
[316542790, 316542793], '(00)3',
|
893 |
+
[320822347, 320822350], '3(00)',
|
894 |
+
[321733242, 321733245], '3(00)',
|
895 |
+
[324413970, 324413973], '(00)3',
|
896 |
+
[325950140, 325950143], '(00)3',
|
897 |
+
[326675884, 326675887], '(00)3',
|
898 |
+
[326704208, 326704211], '3(00)',
|
899 |
+
[327596247, 327596250], '3(00)',
|
900 |
+
[328123172, 328123175], '3(00)',
|
901 |
+
[328182212, 328182215], '(00)3',
|
902 |
+
[328257498, 328257501], '3(00)',
|
903 |
+
[328315836, 328315839], '(00)3',
|
904 |
+
[328800974, 328800977], '(00)3',
|
905 |
+
[328998509, 328998512], '3(00)',
|
906 |
+
[329725370, 329725373], '(00)3',
|
907 |
+
[332080601, 332080604], '(00)3',
|
908 |
+
[332221246, 332221249], '(00)3',
|
909 |
+
[332299899, 332299902], '(00)3',
|
910 |
+
[332532822, 332532825], '(00)3',
|
911 |
+
[333334544, 333334548], '(00)22',
|
912 |
+
[333881266, 333881269], '3(00)',
|
913 |
+
[334703267, 334703270], '3(00)',
|
914 |
+
[334875138, 334875141], '3(00)',
|
915 |
+
[336531451, 336531454], '3(00)',
|
916 |
+
[336825907, 336825910], '(00)3',
|
917 |
+
[336993167, 336993170], '(00)3',
|
918 |
+
[337493998, 337494001], '3(00)',
|
919 |
+
[337861034, 337861037], '3(00)',
|
920 |
+
[337899191, 337899194], '(00)3',
|
921 |
+
[337958123, 337958126], '(00)3',
|
922 |
+
[342331982, 342331985], '3(00)',
|
923 |
+
[342676068, 342676071], '3(00)',
|
924 |
+
[347063781, 347063784], '3(00)',
|
925 |
+
[347697348, 347697351], '3(00)',
|
926 |
+
[347954319, 347954322], '3(00)',
|
927 |
+
[348162775, 348162778], '3(00)',
|
928 |
+
[349210702, 349210705], '(00)3',
|
929 |
+
[349212913, 349212916], '3(00)',
|
930 |
+
[349248650, 349248653], '(00)3',
|
931 |
+
[349913500, 349913503], '3(00)',
|
932 |
+
[350891529, 350891532], '3(00)',
|
933 |
+
[351089323, 351089326], '3(00)',
|
934 |
+
[351826158, 351826161], '3(00)',
|
935 |
+
[352228580, 352228583], '(00)3',
|
936 |
+
[352376244, 352376247], '3(00)',
|
937 |
+
[352853758, 352853761], '(00)3',
|
938 |
+
[355110439, 355110442], '(00)3',
|
939 |
+
[355808090, 355808094], '(00)40',
|
940 |
+
[355941556, 355941559], '3(00)',
|
941 |
+
[356360231, 356360234], '(00)3',
|
942 |
+
[356586657, 356586660], '3(00)',
|
943 |
+
[356892926, 356892929], '(00)3',
|
944 |
+
[356908232, 356908235], '3(00)',
|
945 |
+
[357912730, 357912733], '3(00)',
|
946 |
+
[358120344, 358120347], '3(00)',
|
947 |
+
[359044096, 359044099], '(00)3',
|
948 |
+
[360819357, 360819360], '3(00)',
|
949 |
+
[361399662, 361399666], '(010)3',
|
950 |
+
[362361315, 362361318], '(00)3',
|
951 |
+
[363610112, 363610115], '(00)3',
|
952 |
+
[363964804, 363964807], '3(00)',
|
953 |
+
[364527375, 364527378], '(00)3',
|
954 |
+
[365090327, 365090330], '(00)3',
|
955 |
+
[365414539, 365414542], '3(00)',
|
956 |
+
[366738474, 366738477], '3(00)',
|
957 |
+
[368714778, 368714783], '04(010)',
|
958 |
+
[368831545, 368831548], '(00)3',
|
959 |
+
[368902387, 368902390], '(00)3',
|
960 |
+
[370109769, 370109772], '3(00)',
|
961 |
+
[370963333, 370963336], '3(00)',
|
962 |
+
[372541136, 372541140], '3(010)',
|
963 |
+
[372681562, 372681565], '(00)3',
|
964 |
+
[373009410, 373009413], '(00)3',
|
965 |
+
[373458970, 373458973], '3(00)',
|
966 |
+
[375648658, 375648661], '3(00)',
|
967 |
+
[376834728, 376834731], '3(00)',
|
968 |
+
[377119945, 377119948], '(00)3',
|
969 |
+
[377335703, 377335706], '(00)3',
|
970 |
+
[378091745, 378091748], '3(00)',
|
971 |
+
[379139522, 379139525], '3(00)',
|
972 |
+
[380279160, 380279163], '(00)3',
|
973 |
+
[380619442, 380619445], '3(00)',
|
974 |
+
[381244231, 381244234], '3(00)',
|
975 |
+
[382327446, 382327450], '(010)3',
|
976 |
+
[382357073, 382357076], '3(00)',
|
977 |
+
[383545479, 383545482], '3(00)',
|
978 |
+
[384363766, 384363769], '(00)3',
|
979 |
+
[384401786, 384401790], '22(00)',
|
980 |
+
[385198212, 385198215], '3(00)',
|
981 |
+
[385824476, 385824479], '(00)3',
|
982 |
+
[385908194, 385908197], '3(00)',
|
983 |
+
[386946806, 386946809], '3(00)',
|
984 |
+
[387592175, 387592179], '22(00)',
|
985 |
+
[388329293, 388329296], '(00)3',
|
986 |
+
[388679566, 388679569], '3(00)',
|
987 |
+
[388832142, 388832145], '3(00)',
|
988 |
+
[390087103, 390087106], '(00)3',
|
989 |
+
[390190926, 390190930], '(00)22',
|
990 |
+
[390331207, 390331210], '3(00)',
|
991 |
+
[391674495, 391674498], '3(00)',
|
992 |
+
[391937831, 391937834], '3(00)',
|
993 |
+
[391951632, 391951636], '(00)22',
|
994 |
+
[392963986, 392963989], '(00)3',
|
995 |
+
[393007921, 393007924], '3(00)',
|
996 |
+
[393373210, 393373213], '3(00)',
|
997 |
+
[393759572, 393759575], '(00)3',
|
998 |
+
[394036662, 394036665], '(00)3',
|
999 |
+
[395813866, 395813869], '(00)3',
|
1000 |
+
[395956690, 395956693], '3(00)',
|
1001 |
+
[396031670, 396031673], '3(00)',
|
1002 |
+
[397076433, 397076436], '3(00)',
|
1003 |
+
[397470601, 397470604], '3(00)',
|
1004 |
+
[398289458, 398289461], '3(00)',
|
1005 |
+
#
|
1006 |
+
[368714778, 368714783], '04(010)',
|
1007 |
+
[437953499, 437953504], '04(010)',
|
1008 |
+
[526196233, 526196238], '032(00)',
|
1009 |
+
[744719566, 744719571], '(010)40',
|
1010 |
+
[750375857, 750375862], '032(00)',
|
1011 |
+
[958241932, 958241937], '04(010)',
|
1012 |
+
[983377342, 983377347], '(00)410',
|
1013 |
+
[1003780080, 1003780085], '04(010)',
|
1014 |
+
[1070232754, 1070232759], '(00)230',
|
1015 |
+
[1209834865, 1209834870], '032(00)',
|
1016 |
+
[1257209100, 1257209105], '(00)410',
|
1017 |
+
[1368002233, 1368002238], '(00)230'
|
1018 |
+
]
|
env-llmeval/lib/python3.10/site-packages/mpmath/libmp/__init__.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .libmpf import (prec_to_dps, dps_to_prec, repr_dps,
|
2 |
+
round_down, round_up, round_floor, round_ceiling, round_nearest,
|
3 |
+
to_pickable, from_pickable, ComplexResult,
|
4 |
+
fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf,
|
5 |
+
math_float_inf, round_int, normalize, normalize1,
|
6 |
+
from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor,
|
7 |
+
mpf_nint, mpf_frac,
|
8 |
+
from_float, from_npfloat, from_Decimal, to_float, from_rational, to_rational, to_fixed,
|
9 |
+
mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge,
|
10 |
+
mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum,
|
11 |
+
mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp,
|
12 |
+
mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int,
|
13 |
+
mpf_perturb,
|
14 |
+
to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr,
|
15 |
+
mpf_sqrt, mpf_hypot)
|
16 |
+
|
17 |
+
from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half,
|
18 |
+
mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash,
|
19 |
+
mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf,
|
20 |
+
mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs,
|
21 |
+
mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square,
|
22 |
+
mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int,
|
23 |
+
mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div,
|
24 |
+
complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int,
|
25 |
+
mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin,
|
26 |
+
mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh,
|
27 |
+
mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh,
|
28 |
+
mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi,
|
29 |
+
mpc_cos_sin, mpc_cos_sin_pi)
|
30 |
+
|
31 |
+
from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10,
|
32 |
+
pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi,
|
33 |
+
degree_fixed, mpf_degree,
|
34 |
+
mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed,
|
35 |
+
mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan,
|
36 |
+
mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh,
|
37 |
+
mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin,
|
38 |
+
mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci)
|
39 |
+
|
40 |
+
from .libhyper import (NoConvergence, make_hyp_summator,
|
41 |
+
mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint,
|
42 |
+
mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn,
|
43 |
+
mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1,
|
44 |
+
mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe)
|
45 |
+
|
46 |
+
from .gammazeta import (catalan_fixed, mpf_catalan,
|
47 |
+
khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher,
|
48 |
+
apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed,
|
49 |
+
mpf_mertens, twinprime_fixed, mpf_twinprime,
|
50 |
+
mpf_bernoulli, bernfrac, mpf_gamma_int,
|
51 |
+
mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma,
|
52 |
+
mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma,
|
53 |
+
mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0,
|
54 |
+
mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta,
|
55 |
+
mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum)
|
56 |
+
|
57 |
+
from .libmpi import (mpi_str,
|
58 |
+
mpi_from_str, mpi_to_str,
|
59 |
+
mpi_eq, mpi_ne,
|
60 |
+
mpi_lt, mpi_le, mpi_gt, mpi_ge,
|
61 |
+
mpi_add, mpi_sub, mpi_delta, mpi_mid,
|
62 |
+
mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp,
|
63 |
+
mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin,
|
64 |
+
mpi_cos, mpi_sin, mpi_tan, mpi_cot,
|
65 |
+
mpi_atan, mpi_atan2,
|
66 |
+
mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
|
67 |
+
mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin,
|
68 |
+
mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma,
|
69 |
+
mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial)
|
70 |
+
|
71 |
+
from .libintmath import (trailing, bitcount, numeral, bin_to_radix,
|
72 |
+
isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac,
|
73 |
+
list_primes, isprime, moebius, gcd, eulernum, stirling1, stirling2)
|
74 |
+
|
75 |
+
from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE,
|
76 |
+
MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types,
|
77 |
+
HASH_MODULUS, HASH_BITS)
|
env-llmeval/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (6.98 kB). View file
|
|