applied-ai-018 commited on
Commit
39ccdaf
·
verified ·
1 Parent(s): 1a338e6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__init__.py +6 -0
  14. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/approximation.py +246 -0
  25. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/calculus.py +6 -0
  26. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/differentiation.py +647 -0
  27. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py +2115 -0
  28. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py +973 -0
  29. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/odes.py +288 -0
  30. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/optimization.py +1102 -0
  31. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/polynomials.py +213 -0
  32. llmeval-env/lib/python3.10/site-packages/mpmath/calculus/quadrature.py +1115 -0
  33. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/peft/__init__.py +90 -0
  42. llmeval-env/lib/python3.10/site-packages/peft/auto.py +170 -0
  43. llmeval-env/lib/python3.10/site-packages/peft/config.py +270 -0
  44. llmeval-env/lib/python3.10/site-packages/peft/helpers.py +113 -0
  45. llmeval-env/lib/python3.10/site-packages/peft/import_utils.py +73 -0
  46. llmeval-env/lib/python3.10/site-packages/peft/mapping.py +168 -0
  47. llmeval-env/lib/python3.10/site-packages/peft/mixed_model.py +409 -0
  48. llmeval-env/lib/python3.10/site-packages/peft/peft_model.py +1986 -0
  49. llmeval-env/lib/python3.10/site-packages/peft/py.typed +0 -0
  50. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (8.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc ADDED
Binary file (7.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc ADDED
Binary file (284 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc ADDED
Binary file (9.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import calculus
2
+ # XXX: hack to set methods
3
+ from . import approximation
4
+ from . import differentiation
5
+ from . import extrapolation
6
+ from . import polynomials
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (365 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc ADDED
Binary file (9.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc ADDED
Binary file (489 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc ADDED
Binary file (69.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc ADDED
Binary file (7.75 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc ADDED
Binary file (39.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/approximation.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Approximation methods #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # The Chebyshev approximation formula is given at:
9
+ # http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
10
+
11
+ # The only major changes in the following code is that we return the
12
+ # expanded polynomial coefficients instead of Chebyshev coefficients,
13
+ # and that we automatically transform [a,b] -> [-1,1] and back
14
+ # for convenience.
15
+
16
+ # Coefficient in Chebyshev approximation
17
+ def chebcoeff(ctx,f,a,b,j,N):
18
+ s = ctx.mpf(0)
19
+ h = ctx.mpf(0.5)
20
+ for k in range(1, N+1):
21
+ t = ctx.cospi((k-h)/N)
22
+ s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
23
+ return 2*s/N
24
+
25
+ # Generate Chebyshev polynomials T_n(ax+b) in expanded form
26
+ def chebT(ctx, a=1, b=0):
27
+ Tb = [1]
28
+ yield Tb
29
+ Ta = [b, a]
30
+ while 1:
31
+ yield Ta
32
+ # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
33
+ Tmp = [0] + [2*a*t for t in Ta]
34
+ for i, c in enumerate(Ta): Tmp[i] += 2*b*c
35
+ for i, c in enumerate(Tb): Tmp[i] -= c
36
+ Ta, Tb = Tmp, Ta
37
+
38
+ @defun
39
+ def chebyfit(ctx, f, interval, N, error=False):
40
+ r"""
41
+ Computes a polynomial of degree `N-1` that approximates the
42
+ given function `f` on the interval `[a, b]`. With ``error=True``,
43
+ :func:`~mpmath.chebyfit` also returns an accurate estimate of the
44
+ maximum absolute error; that is, the maximum value of
45
+ `|f(x) - P(x)|` for `x \in [a, b]`.
46
+
47
+ :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
48
+ which gives a nearly optimal solution: that is, the maximum
49
+ error of the approximating polynomial is very close to
50
+ the smallest possible for any polynomial of the same degree.
51
+
52
+ Chebyshev approximation is very useful if one needs repeated
53
+ evaluation of an expensive function, such as function defined
54
+ implicitly by an integral or a differential equation. (For
55
+ example, it could be used to turn a slow mpmath function
56
+ into a fast machine-precision version of the same.)
57
+
58
+ **Examples**
59
+
60
+ Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
61
+ of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
62
+
63
+ >>> from mpmath import *
64
+ >>> mp.dps = 15; mp.pretty = True
65
+ >>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
66
+ >>> nprint(poly)
67
+ [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
68
+ >>> nprint(err, 12)
69
+ 1.61351758081e-5
70
+
71
+ The polynomial can be evaluated using ``polyval``::
72
+
73
+ >>> nprint(polyval(poly, 1.6), 12)
74
+ -0.0291858904138
75
+ >>> nprint(cos(1.6), 12)
76
+ -0.0291995223013
77
+
78
+ Sampling the true error at 1000 points shows that the error
79
+ estimate generated by ``chebyfit`` is remarkably good::
80
+
81
+ >>> error = lambda x: abs(cos(x) - polyval(poly, x))
82
+ >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
83
+ 1.61349954245e-5
84
+
85
+ **Choice of degree**
86
+
87
+ The degree `N` can be set arbitrarily high, to obtain an
88
+ arbitrarily good approximation. As a rule of thumb, an
89
+ `N`-term Chebyshev approximation is good to `N/(b-a)` decimal
90
+ places on a unit interval (although this depends on how
91
+ well-behaved `f` is). The cost grows accordingly: ``chebyfit``
92
+ evaluates the function `(N^2)/2` times to compute the
93
+ coefficients and an additional `N` times to estimate the error.
94
+
95
+ **Possible issues**
96
+
97
+ One should be careful to use a sufficiently high working
98
+ precision both when calling ``chebyfit`` and when evaluating
99
+ the resulting polynomial, as the polynomial is sometimes
100
+ ill-conditioned. It is for example difficult to reach
101
+ 15-digit accuracy when evaluating the polynomial using
102
+ machine precision floats, no matter the theoretical
103
+ accuracy of the polynomial. (The option to return the
104
+ coefficients in Chebyshev form should be made available
105
+ in the future.)
106
+
107
+ It is important to note the Chebyshev approximation works
108
+ poorly if `f` is not smooth. A function containing singularities,
109
+ rapid oscillation, etc can be approximated more effectively by
110
+ multiplying it by a weight function that cancels out the
111
+ nonsmooth features, or by dividing the interval into several
112
+ segments.
113
+ """
114
+ a, b = ctx._as_points(interval)
115
+ orig = ctx.prec
116
+ try:
117
+ ctx.prec = orig + int(N**0.5) + 20
118
+ c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
119
+ d = [ctx.zero] * N
120
+ d[0] = -c[0]/2
121
+ h = ctx.mpf(0.5)
122
+ T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
123
+ for (k, Tk) in zip(range(N), T):
124
+ for i in range(len(Tk)):
125
+ d[i] += c[k]*Tk[i]
126
+ d = d[::-1]
127
+ # Estimate maximum error
128
+ err = ctx.zero
129
+ for k in range(N):
130
+ x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
131
+ err = max(err, abs(f(x) - ctx.polyval(d, x)))
132
+ finally:
133
+ ctx.prec = orig
134
+ if error:
135
+ return d, +err
136
+ else:
137
+ return d
138
+
139
+ @defun
140
+ def fourier(ctx, f, interval, N):
141
+ r"""
142
+ Computes the Fourier series of degree `N` of the given function
143
+ on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
144
+ two lists `(c, s)` of coefficients (the cosine series and sine
145
+ series, respectively), such that
146
+
147
+ .. math ::
148
+
149
+ f(x) \sim \sum_{k=0}^N
150
+ c_k \cos(k m x) + s_k \sin(k m x)
151
+
152
+ where `m = 2 \pi / (b-a)`.
153
+
154
+ Note that many texts define the first coefficient as `2 c_0` instead
155
+ of `c_0`. The easiest way to evaluate the computed series correctly
156
+ is to pass it to :func:`~mpmath.fourierval`.
157
+
158
+ **Examples**
159
+
160
+ The function `f(x) = x` has a simple Fourier series on the standard
161
+ interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
162
+ the function has odd symmetry), and the sine coefficients are
163
+ rational numbers::
164
+
165
+ >>> from mpmath import *
166
+ >>> mp.dps = 15; mp.pretty = True
167
+ >>> c, s = fourier(lambda x: x, [-pi, pi], 5)
168
+ >>> nprint(c)
169
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
170
+ >>> nprint(s)
171
+ [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
172
+
173
+ This computes a Fourier series of a nonsymmetric function on
174
+ a nonstandard interval::
175
+
176
+ >>> I = [-1, 1.5]
177
+ >>> f = lambda x: x**2 - 4*x + 1
178
+ >>> cs = fourier(f, I, 4)
179
+ >>> nprint(cs[0])
180
+ [0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
181
+ >>> nprint(cs[1])
182
+ [0.0, -2.6255, 0.580905, 0.219974, -0.540057]
183
+
184
+ It is instructive to plot a function along with its truncated
185
+ Fourier series::
186
+
187
+ >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
188
+
189
+ Fourier series generally converge slowly (and may not converge
190
+ pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
191
+ series gives an `L^2` error corresponding to 2-digit accuracy::
192
+
193
+ >>> I = [-1, 1]
194
+ >>> cs = fourier(cosh, I, 9)
195
+ >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
196
+ >>> nprint(sqrt(quad(g, I)))
197
+ 0.00467963
198
+
199
+ :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
200
+ the accuracy (and speed) can be improved by including all singular
201
+ points in the interval specification::
202
+
203
+ >>> nprint(fourier(abs, [-1, 1], 0), 10)
204
+ ([0.5000441648], [0.0])
205
+ >>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
206
+ ([0.5], [0.0])
207
+
208
+ """
209
+ interval = ctx._as_points(interval)
210
+ a = interval[0]
211
+ b = interval[-1]
212
+ L = b-a
213
+ cos_series = []
214
+ sin_series = []
215
+ cutoff = ctx.eps*10
216
+ for n in xrange(N+1):
217
+ m = 2*n*ctx.pi/L
218
+ an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
219
+ bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
220
+ if n == 0:
221
+ an /= 2
222
+ if abs(an) < cutoff: an = ctx.zero
223
+ if abs(bn) < cutoff: bn = ctx.zero
224
+ cos_series.append(an)
225
+ sin_series.append(bn)
226
+ return cos_series, sin_series
227
+
228
+ @defun
229
+ def fourierval(ctx, series, interval, x):
230
+ """
231
+ Evaluates a Fourier series (in the format computed by
232
+ by :func:`~mpmath.fourier` for the given interval) at the point `x`.
233
+
234
+ The series should be a pair `(c, s)` where `c` is the
235
+ cosine series and `s` is the sine series. The two lists
236
+ need not have the same length.
237
+ """
238
+ cs, ss = series
239
+ ab = ctx._as_points(interval)
240
+ a = interval[0]
241
+ b = interval[-1]
242
+ m = 2*ctx.pi/(ab[-1]-ab[0])
243
+ s = ctx.zero
244
+ s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
245
+ s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
246
+ return s
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/calculus.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class CalculusMethods(object):
2
+ pass
3
+
4
+ def defun(f):
5
+ setattr(CalculusMethods, f.__name__, f)
6
+ return f
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/differentiation.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ try:
5
+ iteritems = dict.iteritems
6
+ except AttributeError:
7
+ iteritems = dict.items
8
+
9
+ #----------------------------------------------------------------------------#
10
+ # Differentiation #
11
+ #----------------------------------------------------------------------------#
12
+
13
+ @defun
14
+ def difference(ctx, s, n):
15
+ r"""
16
+ Given a sequence `(s_k)` containing at least `n+1` items, returns the
17
+ `n`-th forward difference,
18
+
19
+ .. math ::
20
+
21
+ \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
22
+ """
23
+ n = int(n)
24
+ d = ctx.zero
25
+ b = (-1) ** (n & 1)
26
+ for k in xrange(n+1):
27
+ d += b * s[k]
28
+ b = (b * (k-n)) // (k+1)
29
+ return d
30
+
31
+ def hsteps(ctx, f, x, n, prec, **options):
32
+ singular = options.get('singular')
33
+ addprec = options.get('addprec', 10)
34
+ direction = options.get('direction', 0)
35
+ workprec = (prec+2*addprec) * (n+1)
36
+ orig = ctx.prec
37
+ try:
38
+ ctx.prec = workprec
39
+ h = options.get('h')
40
+ if h is None:
41
+ if options.get('relative'):
42
+ hextramag = int(ctx.mag(x))
43
+ else:
44
+ hextramag = 0
45
+ h = ctx.ldexp(1, -prec-addprec-hextramag)
46
+ else:
47
+ h = ctx.convert(h)
48
+ # Directed: steps x, x+h, ... x+n*h
49
+ direction = options.get('direction', 0)
50
+ if direction:
51
+ h *= ctx.sign(direction)
52
+ steps = xrange(n+1)
53
+ norm = h
54
+ # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
55
+ else:
56
+ steps = xrange(-n, n+1, 2)
57
+ norm = (2*h)
58
+ # Perturb
59
+ if singular:
60
+ x += 0.5*h
61
+ values = [f(x+k*h) for k in steps]
62
+ return values, norm, workprec
63
+ finally:
64
+ ctx.prec = orig
65
+
66
+
67
+ @defun
68
+ def diff(ctx, f, x, n=1, **options):
69
+ r"""
70
+ Numerically computes the derivative of `f`, `f'(x)`, or generally for
71
+ an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
72
+ A few basic examples are::
73
+
74
+ >>> from mpmath import *
75
+ >>> mp.dps = 15; mp.pretty = True
76
+ >>> diff(lambda x: x**2 + x, 1.0)
77
+ 3.0
78
+ >>> diff(lambda x: x**2 + x, 1.0, 2)
79
+ 2.0
80
+ >>> diff(lambda x: x**2 + x, 1.0, 3)
81
+ 0.0
82
+ >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
83
+ [20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
84
+
85
+ Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
86
+ and order `(n_1, \ldots, n_k)`, the partial derivative
87
+ `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
88
+
89
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
90
+ 2.75
91
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
92
+ 3.0
93
+
94
+ **Options**
95
+
96
+ The following optional keyword arguments are recognized:
97
+
98
+ ``method``
99
+ Supported methods are ``'step'`` or ``'quad'``: derivatives may be
100
+ computed using either a finite difference with a small step
101
+ size `h` (default), or numerical quadrature.
102
+ ``direction``
103
+ Direction of finite difference: can be -1 for a left
104
+ difference, 0 for a central difference (default), or +1
105
+ for a right difference; more generally can be any complex number.
106
+ ``addprec``
107
+ Extra precision for `h` used to account for the function's
108
+ sensitivity to perturbations (default = 10).
109
+ ``relative``
110
+ Choose `h` relative to the magnitude of `x`, rather than an
111
+ absolute value; useful for large or tiny `x` (default = False).
112
+ ``h``
113
+ As an alternative to ``addprec`` and ``relative``, manually
114
+ select the step size `h`.
115
+ ``singular``
116
+ If True, evaluation exactly at the point `x` is avoided; this is
117
+ useful for differentiating functions with removable singularities.
118
+ Default = False.
119
+ ``radius``
120
+ Radius of integration contour (with ``method = 'quad'``).
121
+ Default = 0.25. A larger radius typically is faster and more
122
+ accurate, but it must be chosen so that `f` has no
123
+ singularities within the radius from the evaluation point.
124
+
125
+ A finite difference requires `n+1` function evaluations and must be
126
+ performed at `(n+1)` times the target precision. Accordingly, `f` must
127
+ support fast evaluation at high precision.
128
+
129
+ With integration, a larger number of function evaluations is
130
+ required, but not much extra precision is required. For high order
131
+ derivatives, this method may thus be faster if f is very expensive to
132
+ evaluate at high precision.
133
+
134
+ **Further examples**
135
+
136
+ The direction option is useful for computing left- or right-sided
137
+ derivatives of nonsmooth functions::
138
+
139
+ >>> diff(abs, 0, direction=0)
140
+ 0.0
141
+ >>> diff(abs, 0, direction=1)
142
+ 1.0
143
+ >>> diff(abs, 0, direction=-1)
144
+ -1.0
145
+
146
+ More generally, if the direction is nonzero, a right difference
147
+ is computed where the step size is multiplied by sign(direction).
148
+ For example, with direction=+j, the derivative from the positive
149
+ imaginary direction will be computed::
150
+
151
+ >>> diff(abs, 0, direction=j)
152
+ (0.0 - 1.0j)
153
+
154
+ With integration, the result may have a small imaginary part
155
+ even even if the result is purely real::
156
+
157
+ >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
158
+ (0.5 - 4.59...e-26j)
159
+ >>> chop(_)
160
+ 0.5
161
+
162
+ Adding precision to obtain an accurate value::
163
+
164
+ >>> diff(cos, 1e-30)
165
+ 0.0
166
+ >>> diff(cos, 1e-30, h=0.0001)
167
+ -9.99999998328279e-31
168
+ >>> diff(cos, 1e-30, addprec=100)
169
+ -1.0e-30
170
+
171
+ """
172
+ partial = False
173
+ try:
174
+ orders = list(n)
175
+ x = list(x)
176
+ partial = True
177
+ except TypeError:
178
+ pass
179
+ if partial:
180
+ x = [ctx.convert(_) for _ in x]
181
+ return _partial_diff(ctx, f, x, orders, options)
182
+ method = options.get('method', 'step')
183
+ if n == 0 and method != 'quad' and not options.get('singular'):
184
+ return f(ctx.convert(x))
185
+ prec = ctx.prec
186
+ try:
187
+ if method == 'step':
188
+ values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
189
+ ctx.prec = workprec
190
+ v = ctx.difference(values, n) / norm**n
191
+ elif method == 'quad':
192
+ ctx.prec += 10
193
+ radius = ctx.convert(options.get('radius', 0.25))
194
+ def g(t):
195
+ rei = radius*ctx.expj(t)
196
+ z = x + rei
197
+ return f(z) / rei**n
198
+ d = ctx.quadts(g, [0, 2*ctx.pi])
199
+ v = d * ctx.factorial(n) / (2*ctx.pi)
200
+ else:
201
+ raise ValueError("unknown method: %r" % method)
202
+ finally:
203
+ ctx.prec = prec
204
+ return +v
205
+
206
+ def _partial_diff(ctx, f, xs, orders, options):
207
+ if not orders:
208
+ return f()
209
+ if not sum(orders):
210
+ return f(*xs)
211
+ i = 0
212
+ for i in range(len(orders)):
213
+ if orders[i]:
214
+ break
215
+ order = orders[i]
216
+ def fdiff_inner(*f_args):
217
+ def inner(t):
218
+ return f(*(f_args[:i] + (t,) + f_args[i+1:]))
219
+ return ctx.diff(inner, f_args[i], order, **options)
220
+ orders[i] = 0
221
+ return _partial_diff(ctx, fdiff_inner, xs, orders, options)
222
+
223
+ @defun
224
+ def diffs(ctx, f, x, n=None, **options):
225
+ r"""
226
+ Returns a generator that yields the sequence of derivatives
227
+
228
+ .. math ::
229
+
230
+ f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
231
+
232
+ With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
233
+ function evaluations to generate the first `k` derivatives,
234
+ rather than the roughly `O(k^2)` evaluations
235
+ required if one calls :func:`~mpmath.diff` `k` separate times.
236
+
237
+ With `n < \infty`, the generator stops as soon as the
238
+ `n`-th derivative has been generated. If the exact number of
239
+ needed derivatives is known in advance, this is further
240
+ slightly more efficient.
241
+
242
+ Options are the same as for :func:`~mpmath.diff`.
243
+
244
+ **Examples**
245
+
246
+ >>> from mpmath import *
247
+ >>> mp.dps = 15
248
+ >>> nprint(list(diffs(cos, 1, 5)))
249
+ [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
250
+ >>> for i, d in zip(range(6), diffs(cos, 1)):
251
+ ... print("%s %s" % (i, d))
252
+ ...
253
+ 0 0.54030230586814
254
+ 1 -0.841470984807897
255
+ 2 -0.54030230586814
256
+ 3 0.841470984807897
257
+ 4 0.54030230586814
258
+ 5 -0.841470984807897
259
+
260
+ """
261
+ if n is None:
262
+ n = ctx.inf
263
+ else:
264
+ n = int(n)
265
+ if options.get('method', 'step') != 'step':
266
+ k = 0
267
+ while k < n + 1:
268
+ yield ctx.diff(f, x, k, **options)
269
+ k += 1
270
+ return
271
+ singular = options.get('singular')
272
+ if singular:
273
+ yield ctx.diff(f, x, 0, singular=True)
274
+ else:
275
+ yield f(ctx.convert(x))
276
+ if n < 1:
277
+ return
278
+ if n == ctx.inf:
279
+ A, B = 1, 2
280
+ else:
281
+ A, B = 1, n+1
282
+ while 1:
283
+ callprec = ctx.prec
284
+ y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
285
+ for k in xrange(A, B):
286
+ try:
287
+ ctx.prec = workprec
288
+ d = ctx.difference(y, k) / norm**k
289
+ finally:
290
+ ctx.prec = callprec
291
+ yield +d
292
+ if k >= n:
293
+ return
294
+ A, B = B, int(A*1.4+1)
295
+ B = min(B, n)
296
+
297
+ def iterable_to_function(gen):
298
+ gen = iter(gen)
299
+ data = []
300
+ def f(k):
301
+ for i in xrange(len(data), k+1):
302
+ data.append(next(gen))
303
+ return data[k]
304
+ return f
305
+
306
+ @defun
307
+ def diffs_prod(ctx, factors):
308
+ r"""
309
+ Given a list of `N` iterables or generators yielding
310
+ `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
311
+ generate `g(x), g'(x), g''(x), \ldots` where
312
+ `g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
313
+
314
+ At high precision and for large orders, this is typically more efficient
315
+ than numerical differentiation if the derivatives of each `f_k(x)`
316
+ admit direct computation.
317
+
318
+ Note: This function does not increase the working precision internally,
319
+ so guard digits may have to be added externally for full accuracy.
320
+
321
+ **Examples**
322
+
323
+ >>> from mpmath import *
324
+ >>> mp.dps = 15; mp.pretty = True
325
+ >>> f = lambda x: exp(x)*cos(x)*sin(x)
326
+ >>> u = diffs(f, 1)
327
+ >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
328
+ >>> next(u); next(v)
329
+ 1.23586333600241
330
+ 1.23586333600241
331
+ >>> next(u); next(v)
332
+ 0.104658952245596
333
+ 0.104658952245596
334
+ >>> next(u); next(v)
335
+ -5.96999877552086
336
+ -5.96999877552086
337
+ >>> next(u); next(v)
338
+ -12.4632923122697
339
+ -12.4632923122697
340
+
341
+ """
342
+ N = len(factors)
343
+ if N == 1:
344
+ for c in factors[0]:
345
+ yield c
346
+ else:
347
+ u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
348
+ v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
349
+ n = 0
350
+ while 1:
351
+ #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
352
+ s = u(n) * v(0)
353
+ a = 1
354
+ for k in xrange(1,n+1):
355
+ a = a * (n-k+1) // k
356
+ s += a * u(n-k) * v(k)
357
+ yield s
358
+ n += 1
359
+
360
+ def dpoly(n, _cache={}):
361
+ """
362
+ nth differentiation polynomial for exp (Faa di Bruno's formula).
363
+
364
+ TODO: most exponents are zero, so maybe a sparse representation
365
+ would be better.
366
+ """
367
+ if n in _cache:
368
+ return _cache[n]
369
+ if not _cache:
370
+ _cache[0] = {(0,):1}
371
+ R = dpoly(n-1)
372
+ R = dict((c+(0,),v) for (c,v) in iteritems(R))
373
+ Ra = {}
374
+ for powers, count in iteritems(R):
375
+ powers1 = (powers[0]+1,) + powers[1:]
376
+ if powers1 in Ra:
377
+ Ra[powers1] += count
378
+ else:
379
+ Ra[powers1] = count
380
+ for powers, count in iteritems(R):
381
+ if not sum(powers):
382
+ continue
383
+ for k,p in enumerate(powers):
384
+ if p:
385
+ powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
386
+ if powers2 in Ra:
387
+ Ra[powers2] += p*count
388
+ else:
389
+ Ra[powers2] = p*count
390
+ _cache[n] = Ra
391
+ return _cache[n]
392
+
393
+ @defun
394
+ def diffs_exp(ctx, fdiffs):
395
+ r"""
396
+ Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
397
+ generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
398
+
399
+ At high precision and for large orders, this is typically more efficient
400
+ than numerical differentiation if the derivatives of `f(x)`
401
+ admit direct computation.
402
+
403
+ Note: This function does not increase the working precision internally,
404
+ so guard digits may have to be added externally for full accuracy.
405
+
406
+ **Examples**
407
+
408
+ The derivatives of the gamma function can be computed using
409
+ logarithmic differentiation::
410
+
411
+ >>> from mpmath import *
412
+ >>> mp.dps = 15; mp.pretty = True
413
+ >>>
414
+ >>> def diffs_loggamma(x):
415
+ ... yield loggamma(x)
416
+ ... i = 0
417
+ ... while 1:
418
+ ... yield psi(i,x)
419
+ ... i += 1
420
+ ...
421
+ >>> u = diffs_exp(diffs_loggamma(3))
422
+ >>> v = diffs(gamma, 3)
423
+ >>> next(u); next(v)
424
+ 2.0
425
+ 2.0
426
+ >>> next(u); next(v)
427
+ 1.84556867019693
428
+ 1.84556867019693
429
+ >>> next(u); next(v)
430
+ 2.49292999190269
431
+ 2.49292999190269
432
+ >>> next(u); next(v)
433
+ 3.44996501352367
434
+ 3.44996501352367
435
+
436
+ """
437
+ fn = iterable_to_function(fdiffs)
438
+ f0 = ctx.exp(fn(0))
439
+ yield f0
440
+ i = 1
441
+ while 1:
442
+ s = ctx.mpf(0)
443
+ for powers, c in iteritems(dpoly(i)):
444
+ s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
445
+ yield s * f0
446
+ i += 1
447
+
448
+ @defun
449
+ def differint(ctx, f, x, n=1, x0=0):
450
+ r"""
451
+ Calculates the Riemann-Liouville differintegral, or fractional
452
+ derivative, defined by
453
+
454
+ .. math ::
455
+
456
+ \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
457
+ \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
458
+
459
+ where `f` is a given (presumably well-behaved) function,
460
+ `x` is the evaluation point, `n` is the order, and `x_0` is
461
+ the reference point of integration (`m` is an arbitrary
462
+ parameter selected automatically).
463
+
464
+ With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
465
+ the second derivative `f''(x)`, etc. With `n = -1`, it gives
466
+ `\int_{x_0}^x f(t) dt`, with `n = -2`
467
+ it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
468
+
469
+ As `n` is permitted to be any number, this operator generalizes
470
+ iterated differentiation and iterated integration to a single
471
+ operator with a continuous order parameter.
472
+
473
+ **Examples**
474
+
475
+ There is an exact formula for the fractional derivative of a
476
+ monomial `x^p`, which may be used as a reference. For example,
477
+ the following gives a half-derivative (order 0.5)::
478
+
479
+ >>> from mpmath import *
480
+ >>> mp.dps = 15; mp.pretty = True
481
+ >>> x = mpf(3); p = 2; n = 0.5
482
+ >>> differint(lambda t: t**p, x, n)
483
+ 7.81764019044672
484
+ >>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
485
+ 7.81764019044672
486
+
487
+ Another useful test function is the exponential function, whose
488
+ integration / differentiation formula easy generalizes
489
+ to arbitrary order. Here we first compute a third derivative,
490
+ and then a triply nested integral. (The reference point `x_0`
491
+ is set to `-\infty` to avoid nonzero endpoint terms.)::
492
+
493
+ >>> differint(lambda x: exp(pi*x), -1.5, 3)
494
+ 0.278538406900792
495
+ >>> exp(pi*-1.5) * pi**3
496
+ 0.278538406900792
497
+ >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
498
+ 1922.50563031149
499
+ >>> exp(pi*3.5) / pi**3
500
+ 1922.50563031149
501
+
502
+ However, for noninteger `n`, the differentiation formula for the
503
+ exponential function must be modified to give the same result as the
504
+ Riemann-Liouville differintegral::
505
+
506
+ >>> x = mpf(3.5)
507
+ >>> c = pi
508
+ >>> n = 1+2*j
509
+ >>> differint(lambda x: exp(c*x), x, n)
510
+ (-123295.005390743 + 140955.117867654j)
511
+ >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
512
+ (-123295.005390743 + 140955.117867654j)
513
+
514
+
515
+ """
516
+ m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
517
+ r = m-n-1
518
+ g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
519
+ return ctx.diff(g, x, m) / ctx.gamma(m-n)
520
+
521
+ @defun
522
+ def diffun(ctx, f, n=1, **options):
523
+ r"""
524
+ Given a function `f`, returns a function `g(x)` that evaluates the nth
525
+ derivative `f^{(n)}(x)`::
526
+
527
+ >>> from mpmath import *
528
+ >>> mp.dps = 15; mp.pretty = True
529
+ >>> cos2 = diffun(sin)
530
+ >>> sin2 = diffun(sin, 4)
531
+ >>> cos(1.3), cos2(1.3)
532
+ (0.267498828624587, 0.267498828624587)
533
+ >>> sin(1.3), sin2(1.3)
534
+ (0.963558185417193, 0.963558185417193)
535
+
536
+ The function `f` must support arbitrary precision evaluation.
537
+ See :func:`~mpmath.diff` for additional details and supported
538
+ keyword options.
539
+ """
540
+ if n == 0:
541
+ return f
542
+ def g(x):
543
+ return ctx.diff(f, x, n, **options)
544
+ return g
545
+
546
+ @defun
547
+ def taylor(ctx, f, x, n, **options):
548
+ r"""
549
+ Produces a degree-`n` Taylor polynomial around the point `x` of the
550
+ given function `f`. The coefficients are returned as a list.
551
+
552
+ >>> from mpmath import *
553
+ >>> mp.dps = 15; mp.pretty = True
554
+ >>> nprint(chop(taylor(sin, 0, 5)))
555
+ [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
556
+
557
+ The coefficients are computed using high-order numerical
558
+ differentiation. The function must be possible to evaluate
559
+ to arbitrary precision. See :func:`~mpmath.diff` for additional details
560
+ and supported keyword options.
561
+
562
+ Note that to evaluate the Taylor polynomial as an approximation
563
+ of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
564
+ and the point of the Taylor expansion must be subtracted from
565
+ the argument:
566
+
567
+ >>> p = taylor(exp, 2.0, 10)
568
+ >>> polyval(p[::-1], 2.5 - 2.0)
569
+ 12.1824939606092
570
+ >>> exp(2.5)
571
+ 12.1824939607035
572
+
573
+ """
574
+ gen = enumerate(ctx.diffs(f, x, n, **options))
575
+ if options.get("chop", True):
576
+ return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
577
+ else:
578
+ return [d/ctx.factorial(i) for i, d in gen]
579
+
580
+ @defun
581
+ def pade(ctx, a, L, M):
582
+ r"""
583
+ Computes a Pade approximation of degree `(L, M)` to a function.
584
+ Given at least `L+M+1` Taylor coefficients `a` approximating
585
+ a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
586
+ polynomials `P, Q` satisfying
587
+
588
+ .. math ::
589
+
590
+ P = \sum_{k=0}^L p_k x^k
591
+
592
+ Q = \sum_{k=0}^M q_k x^k
593
+
594
+ Q_0 = 1
595
+
596
+ A(x) Q(x) = P(x) + O(x^{L+M+1})
597
+
598
+ `P(x)/Q(x)` can provide a good approximation to an analytic function
599
+ beyond the radius of convergence of its Taylor series (example
600
+ from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
601
+ Ch.1A)::
602
+
603
+ >>> from mpmath import *
604
+ >>> mp.dps = 15; mp.pretty = True
605
+ >>> one = mpf(1)
606
+ >>> def f(x):
607
+ ... return sqrt((one + 2*x)/(one + x))
608
+ ...
609
+ >>> a = taylor(f, 0, 6)
610
+ >>> p, q = pade(a, 3, 3)
611
+ >>> x = 10
612
+ >>> polyval(p[::-1], x)/polyval(q[::-1], x)
613
+ 1.38169105566806
614
+ >>> f(x)
615
+ 1.38169855941551
616
+
617
+ """
618
+ # To determine L+1 coefficients of P and M coefficients of Q
619
+ # L+M+1 coefficients of A must be provided
620
+ if len(a) < L+M+1:
621
+ raise ValueError("L+M+1 Coefficients should be provided")
622
+
623
+ if M == 0:
624
+ if L == 0:
625
+ return [ctx.one], [ctx.one]
626
+ else:
627
+ return a[:L+1], [ctx.one]
628
+
629
+ # Solve first
630
+ # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
631
+ # ...
632
+ # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
633
+ A = ctx.matrix(M)
634
+ for j in range(M):
635
+ for i in range(min(M, L+j+1)):
636
+ A[j, i] = a[L+j-i]
637
+ v = -ctx.matrix(a[(L+1):(L+M+1)])
638
+ x = ctx.lu_solve(A, v)
639
+ q = [ctx.one] + list(x)
640
+ # compute p
641
+ p = [0]*(L+1)
642
+ for i in range(L+1):
643
+ s = a[i]
644
+ for j in range(1, min(M,i) + 1):
645
+ s += q[j]*a[i-j]
646
+ p[i] = s
647
+ return p, q
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py ADDED
@@ -0,0 +1,2115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from itertools import izip
3
+ except ImportError:
4
+ izip = zip
5
+
6
+ from ..libmp.backend import xrange
7
+ from .calculus import defun
8
+
9
+ try:
10
+ next = next
11
+ except NameError:
12
+ next = lambda _: _.next()
13
+
14
+ @defun
15
+ def richardson(ctx, seq):
16
+ r"""
17
+ Given a list ``seq`` of the first `N` elements of a slowly convergent
18
+ infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
19
+ Richardson extrapolate for the limit.
20
+
21
+ :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
22
+ limit and `c` is the magnitude of the largest weight used during the
23
+ computation. The weight provides an estimate of the precision
24
+ lost to cancellation. Due to cancellation effects, the sequence must
25
+ be typically be computed at a much higher precision than the target
26
+ accuracy of the extrapolation.
27
+
28
+ **Applicability and issues**
29
+
30
+ The `N`-step Richardson extrapolation algorithm used by
31
+ :func:`~mpmath.richardson` is described in [1].
32
+
33
+ Richardson extrapolation only works for a specific type of sequence,
34
+ namely one converging like partial sums of
35
+ `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
36
+ When the sequence does not convergence at such a rate
37
+ :func:`~mpmath.richardson` generally produces garbage.
38
+
39
+ Richardson extrapolation has the advantage of being fast: the `N`-term
40
+ extrapolate requires only `O(N)` arithmetic operations, and usually
41
+ produces an estimate that is accurate to `O(N)` digits. Contrast with
42
+ the Shanks transformation (see :func:`~mpmath.shanks`), which requires
43
+ `O(N^2)` operations.
44
+
45
+ :func:`~mpmath.richardson` is unable to produce an estimate for the
46
+ approximation error. One way to estimate the error is to perform
47
+ two extrapolations with slightly different `N` and comparing the
48
+ results.
49
+
50
+ Richardson extrapolation does not work for oscillating sequences.
51
+ As a simple workaround, :func:`~mpmath.richardson` detects if the last
52
+ three elements do not differ monotonically, and in that case
53
+ applies extrapolation only to the even-index elements.
54
+
55
+ **Example**
56
+
57
+ Applying Richardson extrapolation to the Leibniz series for `\pi`::
58
+
59
+ >>> from mpmath import *
60
+ >>> mp.dps = 30; mp.pretty = True
61
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
62
+ ... for m in range(1,30)]
63
+ >>> v, c = richardson(S[:10])
64
+ >>> v
65
+ 3.2126984126984126984126984127
66
+ >>> nprint([v-pi, c])
67
+ [0.0711058, 2.0]
68
+
69
+ >>> v, c = richardson(S[:30])
70
+ >>> v
71
+ 3.14159265468624052829954206226
72
+ >>> nprint([v-pi, c])
73
+ [1.09645e-9, 20833.3]
74
+
75
+ **References**
76
+
77
+ 1. [BenderOrszag]_ pp. 375-376
78
+
79
+ """
80
+ if len(seq) < 3:
81
+ raise ValueError("seq should be of minimum length 3")
82
+ if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
83
+ seq = seq[::2]
84
+ N = len(seq)//2-1
85
+ s = ctx.zero
86
+ # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
87
+ # To avoid repeated factorials, we simplify the quotient
88
+ # of successive weights to obtain a recurrence relation
89
+ c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
90
+ maxc = 1
91
+ for k in xrange(N+1):
92
+ s += c * seq[N+k]
93
+ maxc = max(abs(c), maxc)
94
+ c *= (k-N)*ctx.mpf(k+N+1)**N
95
+ c /= ((1+k)*ctx.mpf(k+N)**N)
96
+ return s, maxc
97
+
98
+ @defun
99
+ def shanks(ctx, seq, table=None, randomized=False):
100
+ r"""
101
+ Given a list ``seq`` of the first `N` elements of a slowly
102
+ convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
103
+ Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
104
+ transformation often provides strong convergence acceleration,
105
+ especially if the sequence is oscillating.
106
+
107
+ The iterated Shanks transformation is computed using the Wynn
108
+ epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
109
+ epsilon table generated by Wynn's algorithm, which can be read
110
+ off as follows:
111
+
112
+ * The table is a list of lists forming a lower triangular matrix,
113
+ where higher row and column indices correspond to more accurate
114
+ values.
115
+ * The columns with even index hold dummy entries (required for the
116
+ computation) and the columns with odd index hold the actual
117
+ extrapolates.
118
+ * The last element in the last row is typically the most
119
+ accurate estimate of the limit.
120
+ * The difference to the third last element in the last row
121
+ provides an estimate of the approximation error.
122
+ * The magnitude of the second last element provides an estimate
123
+ of the numerical accuracy lost to cancellation.
124
+
125
+ For convenience, so the extrapolation is stopped at an odd index
126
+ so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
127
+ limit.
128
+
129
+ Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
130
+ This can be used to efficiently extend a previous computation after
131
+ new elements have been appended to the sequence. The table will
132
+ then be updated in-place.
133
+
134
+ **The Shanks transformation**
135
+
136
+ The Shanks transformation is defined as follows (see [2]): given
137
+ the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
138
+ given by
139
+
140
+ .. math ::
141
+
142
+ S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
143
+
144
+ The Shanks transformation gives the exact limit `A_{\infty}` in a
145
+ single step if `A_k = A + a q^k`. Note in particular that it
146
+ extrapolates the exact sum of a geometric series in a single step.
147
+
148
+ Applying the Shanks transformation once often improves convergence
149
+ substantially for an arbitrary sequence, but the optimal effect is
150
+ obtained by applying it iteratively:
151
+ `S(S(A_k)), S(S(S(A_k))), \ldots`.
152
+
153
+ Wynn's epsilon algorithm provides an efficient way to generate
154
+ the table of iterated Shanks transformations. It reduces the
155
+ computation of each element to essentially a single division, at
156
+ the cost of requiring dummy elements in the table. See [1] for
157
+ details.
158
+
159
+ **Precision issues**
160
+
161
+ Due to cancellation effects, the sequence must be typically be
162
+ computed at a much higher precision than the target accuracy
163
+ of the extrapolation.
164
+
165
+ If the Shanks transformation converges to the exact limit (such
166
+ as if the sequence is a geometric series), then a division by
167
+ zero occurs. By default, :func:`~mpmath.shanks` handles this case by
168
+ terminating the iteration and returning the table it has
169
+ generated so far. With *randomized=True*, it will instead
170
+ replace the zero by a pseudorandom number close to zero.
171
+ (TODO: find a better solution to this problem.)
172
+
173
+ **Examples**
174
+
175
+ We illustrate by applying Shanks transformation to the Leibniz
176
+ series for `\pi`::
177
+
178
+ >>> from mpmath import *
179
+ >>> mp.dps = 50
180
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
181
+ ... for m in range(1,30)]
182
+ >>>
183
+ >>> T = shanks(S[:7])
184
+ >>> for row in T:
185
+ ... nprint(row)
186
+ ...
187
+ [-0.75]
188
+ [1.25, 3.16667]
189
+ [-1.75, 3.13333, -28.75]
190
+ [2.25, 3.14524, 82.25, 3.14234]
191
+ [-2.75, 3.13968, -177.75, 3.14139, -969.937]
192
+ [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
193
+
194
+ The extrapolated accuracy is about 4 digits, and about 4 digits
195
+ may have been lost due to cancellation::
196
+
197
+ >>> L = T[-1]
198
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
199
+ [2.22532e-5, 4.78309e-5, 3515.06]
200
+
201
+ Now we extend the computation::
202
+
203
+ >>> T = shanks(S[:25], T)
204
+ >>> L = T[-1]
205
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
206
+ [3.75527e-19, 1.48478e-19, 2.96014e+17]
207
+
208
+ The value for pi is now accurate to 18 digits. About 18 digits may
209
+ also have been lost to cancellation.
210
+
211
+ Here is an example with a geometric series, where the convergence
212
+ is immediate (the sum is exactly 1)::
213
+
214
+ >>> mp.dps = 15
215
+ >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
216
+ ... nprint(row)
217
+ [4.0]
218
+ [8.0, 1.0]
219
+
220
+ **References**
221
+
222
+ 1. [GravesMorris]_
223
+
224
+ 2. [BenderOrszag]_ pp. 368-375
225
+
226
+ """
227
+ if len(seq) < 2:
228
+ raise ValueError("seq should be of minimum length 2")
229
+ if table:
230
+ START = len(table)
231
+ else:
232
+ START = 0
233
+ table = []
234
+ STOP = len(seq) - 1
235
+ if STOP & 1:
236
+ STOP -= 1
237
+ one = ctx.one
238
+ eps = +ctx.eps
239
+ if randomized:
240
+ from random import Random
241
+ rnd = Random()
242
+ rnd.seed(START)
243
+ for i in xrange(START, STOP):
244
+ row = []
245
+ for j in xrange(i+1):
246
+ if j == 0:
247
+ a, b = 0, seq[i+1]-seq[i]
248
+ else:
249
+ if j == 1:
250
+ a = seq[i]
251
+ else:
252
+ a = table[i-1][j-2]
253
+ b = row[j-1] - table[i-1][j-1]
254
+ if not b:
255
+ if randomized:
256
+ b = (1 + rnd.getrandbits(10))*eps
257
+ elif i & 1:
258
+ return table[:-1]
259
+ else:
260
+ return table
261
+ row.append(a + one/b)
262
+ table.append(row)
263
+ return table
264
+
265
+
266
+ class levin_class:
267
+ # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
268
+ r"""
269
+ This interface implements Levin's (nonlinear) sequence transformation for
270
+ convergence acceleration and summation of divergent series. It performs
271
+ better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
272
+ or alternating divergent series.
273
+
274
+ Let *A* be the series we want to sum:
275
+
276
+ .. math ::
277
+
278
+ A = \sum_{k=0}^{\infty} a_k
279
+
280
+ Attention: all `a_k` must be non-zero!
281
+
282
+ Let `s_n` be the partial sums of this series:
283
+
284
+ .. math ::
285
+
286
+ s_n = \sum_{k=0}^n a_k.
287
+
288
+ **Methods**
289
+
290
+ Calling ``levin`` returns an object with the following methods.
291
+
292
+ ``update(...)`` works with the list of individual terms `a_k` of *A*, and
293
+ ``update_step(...)`` works with the list of partial sums `s_k` of *A*:
294
+
295
+ .. code ::
296
+
297
+ v, e = ...update([a_0, a_1,..., a_k])
298
+ v, e = ...update_psum([s_0, s_1,..., s_k])
299
+
300
+ ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
301
+ works with the partial sums `s_k`:
302
+
303
+ .. code ::
304
+
305
+ v, e = ...step(a_k)
306
+ v, e = ...step_psum(s_k)
307
+
308
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
309
+ simply the difference between the current estimate and the last estimate.
310
+ One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
311
+
312
+ **A word of caution**
313
+
314
+ One can only hope for good results (i.e. convergence acceleration or
315
+ resummation) if the `s_n` have some well defind asymptotic behavior for
316
+ large `n` and are not erratic or random. Furthermore one usually needs very
317
+ high working precision because of the numerical cancellation. If the working
318
+ precision is insufficient, levin may produce silently numerical garbage.
319
+ Furthermore even if the Levin-transformation converges, in the general case
320
+ there is no proof that the result is mathematically sound. Only for very
321
+ special classes of problems one can prove that the Levin-transformation
322
+ converges to the expected result (for example Stieltjes-type integrals).
323
+ Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
324
+ to Shanks/Wynn-epsilon, Richardson & co.
325
+ In summary one can say that the Levin-transformation is powerful but
326
+ unreliable and that it may need a copious amount of working precision.
327
+
328
+ The Levin transform has several variants differing in the choice of weights.
329
+ Some variants are better suited for the possible flavours of convergence
330
+ behaviour of *A* than other variants:
331
+
332
+ .. code ::
333
+
334
+ convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
335
+
336
+ logarithmic + - + -
337
+ linear + + + +
338
+ alternating divergent + + + +
339
+
340
+ "+" means the variant is suitable,"-" means the variant is not suitable;
341
+ for comparison the Shanks/Wynn-epsilon transform is listed, too.
342
+
343
+ The variant is controlled though the variant keyword (i.e. ``variant="u"``,
344
+ ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
345
+
346
+ Finally it is possible to use the Sidi-S transform instead of the Levin transform
347
+ by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
348
+ Levin transformation for some divergent series (see the examples).
349
+
350
+ Parameters:
351
+
352
+ .. code ::
353
+
354
+ method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
355
+ variant "u","t" or "v" chooses the weight variant.
356
+
357
+ The Levin transform is also accessible through the nsum interface.
358
+ ``method="l"`` or ``method="levin"`` select the normal Levin transform while
359
+ ``method="sidi"``
360
+ selects the Sidi-S transform. The variant is in both cases selected through the
361
+ levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
362
+ it will miss the point where the Levin transform converges resulting in numerical
363
+ overflow/garbage. For highly divergent series a copious amount of working precision
364
+ must be chosen.
365
+
366
+ **Examples**
367
+
368
+ First we sum the zeta function::
369
+
370
+ >>> from mpmath import mp
371
+ >>> mp.prec = 53
372
+ >>> eps = mp.mpf(mp.eps)
373
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
374
+ ... L = mp.levin(method = "levin", variant = "u")
375
+ ... S, s, n = [], 0, 1
376
+ ... while 1:
377
+ ... s += mp.one / (n * n)
378
+ ... n += 1
379
+ ... S.append(s)
380
+ ... v, e = L.update_psum(S)
381
+ ... if e < eps:
382
+ ... break
383
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
384
+ >>> print(mp.chop(v - mp.pi ** 2 / 6))
385
+ 0.0
386
+ >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
387
+ >>> print(mp.chop(v - w))
388
+ 0.0
389
+
390
+ Now we sum the zeta function outside its range of convergence
391
+ (attention: This does not work at the negative integers!)::
392
+
393
+ >>> eps = mp.mpf(mp.eps)
394
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
395
+ ... L = mp.levin(method = "levin", variant = "v")
396
+ ... A, n = [], 1
397
+ ... while 1:
398
+ ... s = mp.mpf(n) ** (2 + 3j)
399
+ ... n += 1
400
+ ... A.append(s)
401
+ ... v, e = L.update(A)
402
+ ... if e < eps:
403
+ ... break
404
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
405
+ >>> print(mp.chop(v - mp.zeta(-2-3j)))
406
+ 0.0
407
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
408
+ >>> print(mp.chop(v - w))
409
+ 0.0
410
+
411
+ Now we sum the divergent asymptotic expansion of an integral related to the
412
+ exponential integral (see also [2] p.373). The Sidi-S transform works best here::
413
+
414
+ >>> z = mp.mpf(10)
415
+ >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
416
+ >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
417
+ >>> eps = mp.mpf(mp.eps)
418
+ >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
419
+ ... L = mp.levin(method = "sidi", variant = "t")
420
+ ... n = 0
421
+ ... while 1:
422
+ ... s = (-1)**n * mp.fac(n) * z ** (-n)
423
+ ... v, e = L.step(s)
424
+ ... n += 1
425
+ ... if e < eps:
426
+ ... break
427
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
428
+ >>> print(mp.chop(v - exact))
429
+ 0.0
430
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
431
+ >>> print(mp.chop(v - w))
432
+ 0.0
433
+
434
+ Another highly divergent integral is also summable::
435
+
436
+ >>> z = mp.mpf(2)
437
+ >>> eps = mp.mpf(mp.eps)
438
+ >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
439
+ >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
440
+ >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
441
+ ... L = mp.levin(method = "levin", variant = "t")
442
+ ... n, s = 0, 0
443
+ ... while 1:
444
+ ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
445
+ ... n += 1
446
+ ... v, e = L.step_psum(s)
447
+ ... if e < eps:
448
+ ... break
449
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
450
+ >>> print(mp.chop(v - exact))
451
+ 0.0
452
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
453
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
454
+ >>> print(mp.chop(v - w))
455
+ 0.0
456
+
457
+ These examples run with 15-20 decimal digits precision. For higher precision the
458
+ working precision must be raised.
459
+
460
+ **Examples for nsum**
461
+
462
+ Here we calculate Euler's constant as the constant term in the Laurent
463
+ expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
464
+ the logarithmic convergence behaviour of the Dirichlet series for zeta::
465
+
466
+ >>> mp.dps = 30
467
+ >>> z = mp.mpf(10) ** (-10)
468
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
469
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
470
+ 0.0
471
+
472
+ The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
473
+
474
+ >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
475
+ >>> print(mp.chop(a - mp.log(2)))
476
+ 0.0
477
+
478
+ Hypergeometric series can also be summed outside their range of convergence.
479
+ The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
480
+ point where the Levin transform converges resulting in numerical overflow/garbage::
481
+
482
+ >>> z = 2 + 1j
483
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
484
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
485
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
486
+ >>> print(mp.chop(exact-v))
487
+ 0.0
488
+
489
+ References:
490
+
491
+ [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
492
+ Convergence and the Summation of Divergent Series" arXiv:math/0306302
493
+
494
+ [2] A. Sidi - "Pratical Extrapolation Methods"
495
+
496
+ [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
497
+
498
+ """
499
+
500
+ def __init__(self, method = "levin", variant = "u"):
501
+ self.variant = variant
502
+ self.n = 0
503
+ self.a0 = 0
504
+ self.theta = 1
505
+ self.A = []
506
+ self.B = []
507
+ self.last = 0
508
+ self.last_s = False
509
+
510
+ if method == "levin":
511
+ self.factor = self.factor_levin
512
+ elif method == "sidi":
513
+ self.factor = self.factor_sidi
514
+ else:
515
+ raise ValueError("levin: unknown method \"%s\"" % method)
516
+
517
+ def factor_levin(self, i):
518
+ # original levin
519
+ # [1] p.50,e.7.5-7 (with n-j replaced by i)
520
+ return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
521
+
522
+ def factor_sidi(self, i):
523
+ # sidi analogon to levin (factorial series)
524
+ # [1] p.59,e.8.3-16 (with n-j replaced by i)
525
+ return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
526
+
527
+ def run(self, s, a0, a1 = 0):
528
+ if self.variant=="t":
529
+ # levin t
530
+ w=a0
531
+ elif self.variant=="u":
532
+ # levin u
533
+ w=a0*(self.theta+self.n)
534
+ elif self.variant=="v":
535
+ # levin v
536
+ w=a0*a1/(a0-a1)
537
+ else:
538
+ assert False, "unknown variant"
539
+
540
+ if w==0:
541
+ raise ValueError("levin: zero weight")
542
+
543
+ self.A.append(s/w)
544
+ self.B.append(1/w)
545
+
546
+ for i in range(self.n-1,-1,-1):
547
+ if i==self.n-1:
548
+ f=1
549
+ else:
550
+ f=self.factor(i)
551
+
552
+ self.A[i]=self.A[i+1]-f*self.A[i]
553
+ self.B[i]=self.B[i+1]-f*self.B[i]
554
+
555
+ self.n+=1
556
+
557
+ ###########################################################################
558
+
559
+ def update_psum(self,S):
560
+ """
561
+ This routine applies the convergence acceleration to the list of partial sums.
562
+
563
+ A = sum(a_k, k = 0..infinity)
564
+ s_n = sum(a_k, k = 0..n)
565
+
566
+ v, e = ...update_psum([s_0, s_1,..., s_k])
567
+
568
+ output:
569
+ v current estimate of the series A
570
+ e an error estimate which is simply the difference between the current
571
+ estimate and the last estimate.
572
+ """
573
+
574
+ if self.variant!="v":
575
+ if self.n==0:
576
+ self.run(S[0],S[0])
577
+ while self.n<len(S):
578
+ self.run(S[self.n],S[self.n]-S[self.n-1])
579
+ else:
580
+ if len(S)==1:
581
+ self.last=0
582
+ return S[0],abs(S[0])
583
+
584
+ if self.n==0:
585
+ self.a1=S[1]-S[0]
586
+ self.run(S[0],S[0],self.a1)
587
+
588
+ while self.n<len(S)-1:
589
+ na1=S[self.n+1]-S[self.n]
590
+ self.run(S[self.n],self.a1,na1)
591
+ self.a1=na1
592
+
593
+ value=self.A[0]/self.B[0]
594
+ err=abs(value-self.last)
595
+ self.last=value
596
+
597
+ return value,err
598
+
599
+ def update(self,X):
600
+ """
601
+ This routine applies the convergence acceleration to the list of individual terms.
602
+
603
+ A = sum(a_k, k = 0..infinity)
604
+
605
+ v, e = ...update([a_0, a_1,..., a_k])
606
+
607
+ output:
608
+ v current estimate of the series A
609
+ e an error estimate which is simply the difference between the current
610
+ estimate and the last estimate.
611
+ """
612
+
613
+ if self.variant!="v":
614
+ if self.n==0:
615
+ self.s=X[0]
616
+ self.run(self.s,X[0])
617
+ while self.n<len(X):
618
+ self.s+=X[self.n]
619
+ self.run(self.s,X[self.n])
620
+ else:
621
+ if len(X)==1:
622
+ self.last=0
623
+ return X[0],abs(X[0])
624
+
625
+ if self.n==0:
626
+ self.s=X[0]
627
+ self.run(self.s,X[0],X[1])
628
+
629
+ while self.n<len(X)-1:
630
+ self.s+=X[self.n]
631
+ self.run(self.s,X[self.n],X[self.n+1])
632
+
633
+ value=self.A[0]/self.B[0]
634
+ err=abs(value-self.last)
635
+ self.last=value
636
+
637
+ return value,err
638
+
639
+ ###########################################################################
640
+
641
+ def step_psum(self,s):
642
+ """
643
+ This routine applies the convergence acceleration to the partial sums.
644
+
645
+ A = sum(a_k, k = 0..infinity)
646
+ s_n = sum(a_k, k = 0..n)
647
+
648
+ v, e = ...step_psum(s_k)
649
+
650
+ output:
651
+ v current estimate of the series A
652
+ e an error estimate which is simply the difference between the current
653
+ estimate and the last estimate.
654
+ """
655
+
656
+ if self.variant!="v":
657
+ if self.n==0:
658
+ self.last_s=s
659
+ self.run(s,s)
660
+ else:
661
+ self.run(s,s-self.last_s)
662
+ self.last_s=s
663
+ else:
664
+ if isinstance(self.last_s,bool):
665
+ self.last_s=s
666
+ self.last_w=s
667
+ self.last=0
668
+ return s,abs(s)
669
+
670
+ na1=s-self.last_s
671
+ self.run(self.last_s,self.last_w,na1)
672
+ self.last_w=na1
673
+ self.last_s=s
674
+
675
+ value=self.A[0]/self.B[0]
676
+ err=abs(value-self.last)
677
+ self.last=value
678
+
679
+ return value,err
680
+
681
+ def step(self,x):
682
+ """
683
+ This routine applies the convergence acceleration to the individual terms.
684
+
685
+ A = sum(a_k, k = 0..infinity)
686
+
687
+ v, e = ...step(a_k)
688
+
689
+ output:
690
+ v current estimate of the series A
691
+ e an error estimate which is simply the difference between the current
692
+ estimate and the last estimate.
693
+ """
694
+
695
+ if self.variant!="v":
696
+ if self.n==0:
697
+ self.s=x
698
+ self.run(self.s,x)
699
+ else:
700
+ self.s+=x
701
+ self.run(self.s,x)
702
+ else:
703
+ if isinstance(self.last_s,bool):
704
+ self.last_s=x
705
+ self.s=0
706
+ self.last=0
707
+ return x,abs(x)
708
+
709
+ self.s+=self.last_s
710
+ self.run(self.s,self.last_s,x)
711
+ self.last_s=x
712
+
713
+ value=self.A[0]/self.B[0]
714
+ err=abs(value-self.last)
715
+ self.last=value
716
+
717
+ return value,err
718
+
719
+ def levin(ctx, method = "levin", variant = "u"):
720
+ L = levin_class(method = method, variant = variant)
721
+ L.ctx = ctx
722
+ return L
723
+
724
+ levin.__doc__ = levin_class.__doc__
725
+ defun(levin)
726
+
727
+
728
+ class cohen_alt_class:
729
+ # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
730
+ r"""
731
+ This interface implements the convergence acceleration of alternating series
732
+ as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
733
+ of Alternating Series". This series transformation works only well if the
734
+ individual terms of the series have an alternating sign. It belongs to the
735
+ class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
736
+ or Levin transform). This series transformation is also able to sum some types
737
+ of divergent series. See the paper under which conditions this resummation is
738
+ mathematical sound.
739
+
740
+ Let *A* be the series we want to sum:
741
+
742
+ .. math ::
743
+
744
+ A = \sum_{k=0}^{\infty} a_k
745
+
746
+ Let `s_n` be the partial sums of this series:
747
+
748
+ .. math ::
749
+
750
+ s_n = \sum_{k=0}^n a_k.
751
+
752
+
753
+ **Interface**
754
+
755
+ Calling ``cohen_alt`` returns an object with the following methods.
756
+
757
+ Then ``update(...)`` works with the list of individual terms `a_k` and
758
+ ``update_psum(...)`` works with the list of partial sums `s_k`:
759
+
760
+ .. code ::
761
+
762
+ v, e = ...update([a_0, a_1,..., a_k])
763
+ v, e = ...update_psum([s_0, s_1,..., s_k])
764
+
765
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
766
+ simply the difference between the current estimate and the last estimate.
767
+
768
+ **Examples**
769
+
770
+ Here we compute the alternating zeta function using ``update_psum``::
771
+
772
+ >>> from mpmath import mp
773
+ >>> AC = mp.cohen_alt()
774
+ >>> S, s, n = [], 0, 1
775
+ >>> while 1:
776
+ ... s += -((-1) ** n) * mp.one / (n * n)
777
+ ... n += 1
778
+ ... S.append(s)
779
+ ... v, e = AC.update_psum(S)
780
+ ... if e < mp.eps:
781
+ ... break
782
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
783
+ >>> print(mp.chop(v - mp.pi ** 2 / 12))
784
+ 0.0
785
+
786
+ Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
787
+
788
+ >>> A = []
789
+ >>> AC = mp.cohen_alt()
790
+ >>> n = 1
791
+ >>> while 1:
792
+ ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
793
+ ... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
794
+ ... n += 1
795
+ ... v, e = AC.update(A)
796
+ ... if e < mp.eps:
797
+ ... break
798
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
799
+ >>> v = mp.exp(v)
800
+ >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
801
+ 0.0
802
+
803
+ ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
804
+
805
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
806
+ >>> print(mp.chop(v - mp.log(2)))
807
+ 0.0
808
+ >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
809
+ >>> print(mp.chop(v - mp.pi / 4))
810
+ 0.0
811
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
812
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
813
+ 0.0
814
+
815
+ """
816
+
817
+ def __init__(self):
818
+ self.last=0
819
+
820
+ def update(self, A):
821
+ """
822
+ This routine applies the convergence acceleration to the list of individual terms.
823
+
824
+ A = sum(a_k, k = 0..infinity)
825
+
826
+ v, e = ...update([a_0, a_1,..., a_k])
827
+
828
+ output:
829
+ v current estimate of the series A
830
+ e an error estimate which is simply the difference between the current
831
+ estimate and the last estimate.
832
+ """
833
+
834
+ n = len(A)
835
+ d = (3 + self.ctx.sqrt(8)) ** n
836
+ d = (d + 1 / d) / 2
837
+ b = -self.ctx.one
838
+ c = -d
839
+ s = 0
840
+
841
+ for k in xrange(n):
842
+ c = b - c
843
+ if k % 2 == 0:
844
+ s = s + c * A[k]
845
+ else:
846
+ s = s - c * A[k]
847
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
848
+
849
+ value = s / d
850
+
851
+ err = abs(value - self.last)
852
+ self.last = value
853
+
854
+ return value, err
855
+
856
+ def update_psum(self, S):
857
+ """
858
+ This routine applies the convergence acceleration to the list of partial sums.
859
+
860
+ A = sum(a_k, k = 0..infinity)
861
+ s_n = sum(a_k ,k = 0..n)
862
+
863
+ v, e = ...update_psum([s_0, s_1,..., s_k])
864
+
865
+ output:
866
+ v current estimate of the series A
867
+ e an error estimate which is simply the difference between the current
868
+ estimate and the last estimate.
869
+ """
870
+
871
+ n = len(S)
872
+ d = (3 + self.ctx.sqrt(8)) ** n
873
+ d = (d + 1 / d) / 2
874
+ b = self.ctx.one
875
+ s = 0
876
+
877
+ for k in xrange(n):
878
+ b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
879
+ s += b * S[k]
880
+
881
+ value = s / d
882
+
883
+ err = abs(value - self.last)
884
+ self.last = value
885
+
886
+ return value, err
887
+
888
+ def cohen_alt(ctx):
889
+ L = cohen_alt_class()
890
+ L.ctx = ctx
891
+ return L
892
+
893
+ cohen_alt.__doc__ = cohen_alt_class.__doc__
894
+ defun(cohen_alt)
895
+
896
+
897
+ @defun
898
+ def sumap(ctx, f, interval, integral=None, error=False):
899
+ r"""
900
+ Evaluates an infinite series of an analytic summand *f* using the
901
+ Abel-Plana formula
902
+
903
+ .. math ::
904
+
905
+ \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
906
+ i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
907
+
908
+ Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
909
+ the Abel-Plana formula does not require derivatives. However,
910
+ it only works when `|f(it)-f(-it)|` does not
911
+ increase too rapidly with `t`.
912
+
913
+ **Examples**
914
+
915
+ The Abel-Plana formula is particularly useful when the summand
916
+ decreases like a power of `k`; for example when the sum is a pure
917
+ zeta function::
918
+
919
+ >>> from mpmath import *
920
+ >>> mp.dps = 25; mp.pretty = True
921
+ >>> sumap(lambda k: 1/k**2.5, [1,inf])
922
+ 1.34148725725091717975677
923
+ >>> zeta(2.5)
924
+ 1.34148725725091717975677
925
+ >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
926
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
927
+ >>> zeta(2.5+2.5j, 1+1j)
928
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
929
+
930
+ If the series is alternating, numerical quadrature along the real
931
+ line is likely to give poor results, so it is better to evaluate
932
+ the first term symbolically whenever possible:
933
+
934
+ >>> n=3; z=-0.75
935
+ >>> I = expint(n,-log(z))
936
+ >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
937
+ -0.6917036036904594510141448
938
+ >>> polylog(n,z)
939
+ -0.6917036036904594510141448
940
+
941
+ """
942
+ prec = ctx.prec
943
+ try:
944
+ ctx.prec += 10
945
+ a, b = interval
946
+ if b != ctx.inf:
947
+ raise ValueError("b should be equal to ctx.inf")
948
+ g = lambda x: f(x+a)
949
+ if integral is None:
950
+ i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
951
+ else:
952
+ i1, err1 = integral, 0
953
+ j = ctx.j
954
+ p = ctx.pi * 2
955
+ if ctx._is_real_type(i1):
956
+ h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
957
+ else:
958
+ h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
959
+ i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
960
+ err = err1+err2
961
+ v = i1+i2+0.5*g(ctx.mpf(0))
962
+ finally:
963
+ ctx.prec = prec
964
+ if error:
965
+ return +v, err
966
+ return +v
967
+
968
+
969
+ @defun
970
+ def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
971
+ adiffs=None, bdiffs=None, verbose=False, error=False,
972
+ _fast_abort=False):
973
+ r"""
974
+ Uses the Euler-Maclaurin formula to compute an approximation accurate
975
+ to within ``tol`` (which defaults to the present epsilon) of the sum
976
+
977
+ .. math ::
978
+
979
+ S = \sum_{k=a}^b f(k)
980
+
981
+ where `(a,b)` are given by ``interval`` and `a` or `b` may be
982
+ infinite. The approximation is
983
+
984
+ .. math ::
985
+
986
+ S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
987
+ \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
988
+ \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
989
+
990
+ The last sum in the Euler-Maclaurin formula is not generally
991
+ convergent (a notable exception is if `f` is a polynomial, in
992
+ which case Euler-Maclaurin actually gives an exact result).
993
+
994
+ The summation is stopped as soon as the quotient between two
995
+ consecutive terms falls below *reject*. That is, by default
996
+ (*reject* = 10), the summation is continued as long as each
997
+ term adds at least one decimal.
998
+
999
+ Although not convergent, convergence to a given tolerance can
1000
+ often be "forced" if `b = \infty` by summing up to `a+N` and then
1001
+ applying the Euler-Maclaurin formula to the sum over the range
1002
+ `(a+N+1, \ldots, \infty)`. This procedure is implemented by
1003
+ :func:`~mpmath.nsum`.
1004
+
1005
+ By default numerical quadrature and differentiation is used.
1006
+ If the symbolic values of the integral and endpoint derivatives
1007
+ are known, it is more efficient to pass the value of the
1008
+ integral explicitly as ``integral`` and the derivatives
1009
+ explicitly as ``adiffs`` and ``bdiffs``. The derivatives
1010
+ should be given as iterables that yield
1011
+ `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
1012
+
1013
+ **Examples**
1014
+
1015
+ Summation of an infinite series, with automatic and symbolic
1016
+ integral and derivative values (the second should be much faster)::
1017
+
1018
+ >>> from mpmath import *
1019
+ >>> mp.dps = 50; mp.pretty = True
1020
+ >>> sumem(lambda n: 1/n**2, [32, inf])
1021
+ 0.03174336652030209012658168043874142714132886413417
1022
+ >>> I = mpf(1)/32
1023
+ >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
1024
+ >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
1025
+ 0.03174336652030209012658168043874142714132886413417
1026
+
1027
+ An exact evaluation of a finite polynomial sum::
1028
+
1029
+ >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
1030
+ 10500155000624963999742499550000.0
1031
+ >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
1032
+ 10500155000624963999742499550000
1033
+
1034
+ """
1035
+ tol = tol or +ctx.eps
1036
+ interval = ctx._as_points(interval)
1037
+ a = ctx.convert(interval[0])
1038
+ b = ctx.convert(interval[-1])
1039
+ err = ctx.zero
1040
+ prev = 0
1041
+ M = 10000
1042
+ if a == ctx.ninf: adiffs = (0 for n in xrange(M))
1043
+ else: adiffs = adiffs or ctx.diffs(f, a)
1044
+ if b == ctx.inf: bdiffs = (0 for n in xrange(M))
1045
+ else: bdiffs = bdiffs or ctx.diffs(f, b)
1046
+ orig = ctx.prec
1047
+ #verbose = 1
1048
+ try:
1049
+ ctx.prec += 10
1050
+ s = ctx.zero
1051
+ for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
1052
+ if k & 1:
1053
+ term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
1054
+ mag = abs(term)
1055
+ if verbose:
1056
+ print("term", k, "magnitude =", ctx.nstr(mag))
1057
+ if k > 4 and mag < tol:
1058
+ s += term
1059
+ break
1060
+ elif k > 4 and abs(prev) / mag < reject:
1061
+ err += mag
1062
+ if _fast_abort:
1063
+ return [s, (s, err)][error]
1064
+ if verbose:
1065
+ print("Failed to converge")
1066
+ break
1067
+ else:
1068
+ s += term
1069
+ prev = term
1070
+ # Endpoint correction
1071
+ if a != ctx.ninf: s += f(a)/2
1072
+ if b != ctx.inf: s += f(b)/2
1073
+ # Tail integral
1074
+ if verbose:
1075
+ print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
1076
+ if integral:
1077
+ s += integral
1078
+ else:
1079
+ integral, ierr = ctx.quad(f, interval, error=True)
1080
+ if verbose:
1081
+ print("Integration error:", ierr)
1082
+ s += integral
1083
+ err += ierr
1084
+ finally:
1085
+ ctx.prec = orig
1086
+ if error:
1087
+ return s, err
1088
+ else:
1089
+ return s
1090
+
1091
+ @defun
1092
+ def adaptive_extrapolation(ctx, update, emfun, kwargs):
1093
+ option = kwargs.get
1094
+ if ctx._fixed_precision:
1095
+ tol = option('tol', ctx.eps*2**10)
1096
+ else:
1097
+ tol = option('tol', ctx.eps/2**10)
1098
+ verbose = option('verbose', False)
1099
+ maxterms = option('maxterms', ctx.dps*10)
1100
+ method = set(option('method', 'r+s').split('+'))
1101
+ skip = option('skip', 0)
1102
+ steps = iter(option('steps', xrange(10, 10**9, 10)))
1103
+ strict = option('strict')
1104
+ #steps = (10 for i in xrange(1000))
1105
+ summer=[]
1106
+ if 'd' in method or 'direct' in method:
1107
+ TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
1108
+ else:
1109
+ TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
1110
+ TRY_SHANKS = ('s' in method) or ('shanks' in method)
1111
+ TRY_EULER_MACLAURIN = ('e' in method) or \
1112
+ ('euler-maclaurin' in method)
1113
+
1114
+ def init_levin(m):
1115
+ variant = kwargs.get("levin_variant", "u")
1116
+ if isinstance(variant, str):
1117
+ if variant == "all":
1118
+ variant = ["u", "v", "t"]
1119
+ else:
1120
+ variant = [variant]
1121
+ for s in variant:
1122
+ L = levin_class(method = m, variant = s)
1123
+ L.ctx = ctx
1124
+ L.name = m + "(" + s + ")"
1125
+ summer.append(L)
1126
+
1127
+ if ('l' in method) or ('levin' in method):
1128
+ init_levin("levin")
1129
+
1130
+ if ('sidi' in method):
1131
+ init_levin("sidi")
1132
+
1133
+ if ('a' in method) or ('alternating' in method):
1134
+ L = cohen_alt_class()
1135
+ L.ctx = ctx
1136
+ L.name = "alternating"
1137
+ summer.append(L)
1138
+
1139
+ last_richardson_value = 0
1140
+ shanks_table = []
1141
+ index = 0
1142
+ step = 10
1143
+ partial = []
1144
+ best = ctx.zero
1145
+ orig = ctx.prec
1146
+ try:
1147
+ if 'workprec' in kwargs:
1148
+ ctx.prec = kwargs['workprec']
1149
+ elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
1150
+ ctx.prec = (ctx.prec+10) * 4
1151
+ else:
1152
+ ctx.prec += 30
1153
+ while 1:
1154
+ if index >= maxterms:
1155
+ break
1156
+
1157
+ # Get new batch of terms
1158
+ try:
1159
+ step = next(steps)
1160
+ except StopIteration:
1161
+ pass
1162
+ if verbose:
1163
+ print("-"*70)
1164
+ print("Adding terms #%i-#%i" % (index, index+step))
1165
+ update(partial, xrange(index, index+step))
1166
+ index += step
1167
+
1168
+ # Check direct error
1169
+ best = partial[-1]
1170
+ error = abs(best - partial[-2])
1171
+ if verbose:
1172
+ print("Direct error: %s" % ctx.nstr(error))
1173
+ if error <= tol:
1174
+ return best
1175
+
1176
+ # Check each extrapolation method
1177
+ if TRY_RICHARDSON:
1178
+ value, maxc = ctx.richardson(partial)
1179
+ # Convergence
1180
+ richardson_error = abs(value - last_richardson_value)
1181
+ if verbose:
1182
+ print("Richardson error: %s" % ctx.nstr(richardson_error))
1183
+ # Convergence
1184
+ if richardson_error <= tol:
1185
+ return value
1186
+ last_richardson_value = value
1187
+ # Unreliable due to cancellation
1188
+ if ctx.eps*maxc > tol:
1189
+ if verbose:
1190
+ print("Ran out of precision for Richardson")
1191
+ TRY_RICHARDSON = False
1192
+ if richardson_error < error:
1193
+ error = richardson_error
1194
+ best = value
1195
+ if TRY_SHANKS:
1196
+ shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
1197
+ row = shanks_table[-1]
1198
+ if len(row) == 2:
1199
+ est1 = row[-1]
1200
+ shanks_error = 0
1201
+ else:
1202
+ est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
1203
+ shanks_error = abs(est1-est2)
1204
+ if verbose:
1205
+ print("Shanks error: %s" % ctx.nstr(shanks_error))
1206
+ if shanks_error <= tol:
1207
+ return est1
1208
+ if ctx.eps*maxc > tol:
1209
+ if verbose:
1210
+ print("Ran out of precision for Shanks")
1211
+ TRY_SHANKS = False
1212
+ if shanks_error < error:
1213
+ error = shanks_error
1214
+ best = est1
1215
+ for L in summer:
1216
+ est, lerror = L.update_psum(partial)
1217
+ if verbose:
1218
+ print("%s error: %s" % (L.name, ctx.nstr(lerror)))
1219
+ if lerror <= tol:
1220
+ return est
1221
+ if lerror < error:
1222
+ error = lerror
1223
+ best = est
1224
+ if TRY_EULER_MACLAURIN:
1225
+ if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
1226
+ if verbose:
1227
+ print ("NOT using Euler-Maclaurin: the series appears"
1228
+ " to be alternating, so numerical\n quadrature"
1229
+ " will most likely fail")
1230
+ TRY_EULER_MACLAURIN = False
1231
+ else:
1232
+ value, em_error = emfun(index, tol)
1233
+ value += partial[-1]
1234
+ if verbose:
1235
+ print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
1236
+ if em_error <= tol:
1237
+ return value
1238
+ if em_error < error:
1239
+ best = value
1240
+ finally:
1241
+ ctx.prec = orig
1242
+ if strict:
1243
+ raise ctx.NoConvergence
1244
+ if verbose:
1245
+ print("Warning: failed to converge to target accuracy")
1246
+ return best
1247
+
1248
+ @defun
1249
+ def nsum(ctx, f, *intervals, **options):
1250
+ r"""
1251
+ Computes the sum
1252
+
1253
+ .. math :: S = \sum_{k=a}^b f(k)
1254
+
1255
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1256
+ `b = \infty` are allowed, or more generally
1257
+
1258
+ .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
1259
+ \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
1260
+
1261
+ if multiple intervals are given.
1262
+
1263
+ Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
1264
+ where the first converges rapidly and the second converges slowly,
1265
+ are::
1266
+
1267
+ >>> from mpmath import *
1268
+ >>> mp.dps = 15; mp.pretty = True
1269
+ >>> nsum(lambda n: 1/fac(n), [0, inf])
1270
+ 2.71828182845905
1271
+ >>> nsum(lambda n: 1/n**2, [1, inf])
1272
+ 1.64493406684823
1273
+
1274
+ When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
1275
+ accurately estimate the sums of slowly convergent series. If the series is
1276
+ finite, :func:`~mpmath.nsum` currently does not attempt to perform any
1277
+ extrapolation, and simply calls :func:`~mpmath.fsum`.
1278
+
1279
+ Multidimensional infinite series are reduced to a single-dimensional
1280
+ series over expanding hypercubes; if both infinite and finite dimensions
1281
+ are present, the finite ranges are moved innermost. For more advanced
1282
+ control over the summation order, use nested calls to :func:`~mpmath.nsum`,
1283
+ or manually rewrite the sum as a single-dimensional series.
1284
+
1285
+ **Options**
1286
+
1287
+ *tol*
1288
+ Desired maximum final error. Defaults roughly to the
1289
+ epsilon of the working precision.
1290
+
1291
+ *method*
1292
+ Which summation algorithm to use (described below).
1293
+ Default: ``'richardson+shanks'``.
1294
+
1295
+ *maxterms*
1296
+ Cancel after at most this many terms. Default: 10*dps.
1297
+
1298
+ *steps*
1299
+ An iterable giving the number of terms to add between
1300
+ each extrapolation attempt. The default sequence is
1301
+ [10, 20, 30, 40, ...]. For example, if you know that
1302
+ approximately 100 terms will be required, efficiency might be
1303
+ improved by setting this to [100, 10]. Then the first
1304
+ extrapolation will be performed after 100 terms, the second
1305
+ after 110, etc.
1306
+
1307
+ *verbose*
1308
+ Print details about progress.
1309
+
1310
+ *ignore*
1311
+ If enabled, any term that raises ``ArithmeticError``
1312
+ or ``ValueError`` (e.g. through division by zero) is replaced
1313
+ by a zero. This is convenient for lattice sums with
1314
+ a singular term near the origin.
1315
+
1316
+ **Methods**
1317
+
1318
+ Unfortunately, an algorithm that can efficiently sum any infinite
1319
+ series does not exist. :func:`~mpmath.nsum` implements several different
1320
+ algorithms that each work well in different cases. The *method*
1321
+ keyword argument selects a method.
1322
+
1323
+ The default method is ``'r+s'``, i.e. both Richardson extrapolation
1324
+ and Shanks transformation is attempted. A slower method that
1325
+ handles more cases is ``'r+s+e'``. For very high precision
1326
+ summation, or if the summation needs to be fast (for example if
1327
+ multiple sums need to be evaluated), it is a good idea to
1328
+ investigate which one method works best and only use that.
1329
+
1330
+ ``'richardson'`` / ``'r'``:
1331
+ Uses Richardson extrapolation. Provides useful extrapolation
1332
+ when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
1333
+ for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
1334
+ additional information.
1335
+
1336
+ ``'shanks'`` / ``'s'``:
1337
+ Uses Shanks transformation. Typically provides useful
1338
+ extrapolation when `f(k) \sim c^k` or when successive terms
1339
+ alternate signs. Is able to sum some divergent series.
1340
+ See :func:`~mpmath.shanks` for additional information.
1341
+
1342
+ ``'levin'`` / ``'l'``:
1343
+ Uses the Levin transformation. It performs better than the Shanks
1344
+ transformation for logarithmic convergent or alternating divergent
1345
+ series. The ``'levin_variant'``-keyword selects the variant. Valid
1346
+ choices are "u", "t", "v" and "all" whereby "all" uses all three
1347
+ u,t and v simultanously (This is good for performance comparison in
1348
+ conjunction with "verbose=True"). Instead of the Levin transform one can
1349
+ also use the Sidi-S transform by selecting the method ``'sidi'``.
1350
+ See :func:`~mpmath.levin` for additional details.
1351
+
1352
+ ``'alternating'`` / ``'a'``:
1353
+ This is the convergence acceleration of alternating series developped
1354
+ by Cohen, Villegras and Zagier.
1355
+ See :func:`~mpmath.cohen_alt` for additional details.
1356
+
1357
+ ``'euler-maclaurin'`` / ``'e'``:
1358
+ Uses the Euler-Maclaurin summation formula to approximate
1359
+ the remainder sum by an integral. This requires high-order
1360
+ numerical derivatives and numerical integration. The advantage
1361
+ of this algorithm is that it works regardless of the
1362
+ decay rate of `f`, as long as `f` is sufficiently smooth.
1363
+ See :func:`~mpmath.sumem` for additional information.
1364
+
1365
+ ``'direct'`` / ``'d'``:
1366
+ Does not perform any extrapolation. This can be used
1367
+ (and should only be used for) rapidly convergent series.
1368
+ The summation automatically stops when the terms
1369
+ decrease below the target tolerance.
1370
+
1371
+ **Basic examples**
1372
+
1373
+ A finite sum::
1374
+
1375
+ >>> nsum(lambda k: 1/k, [1, 6])
1376
+ 2.45
1377
+
1378
+ Summation of a series going to negative infinity and a doubly
1379
+ infinite series::
1380
+
1381
+ >>> nsum(lambda k: 1/k**2, [-inf, -1])
1382
+ 1.64493406684823
1383
+ >>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
1384
+ 3.15334809493716
1385
+
1386
+ :func:`~mpmath.nsum` handles sums of complex numbers::
1387
+
1388
+ >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
1389
+ (1.6 + 0.8j)
1390
+
1391
+ The following sum converges very rapidly, so it is most
1392
+ efficient to sum it by disabling convergence acceleration::
1393
+
1394
+ >>> mp.dps = 1000
1395
+ >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
1396
+ ... method='direct')
1397
+ >>> b = (cos(1)+sin(1))/4
1398
+ >>> abs(a-b) < mpf('1e-998')
1399
+ True
1400
+
1401
+ **Examples with Richardson extrapolation**
1402
+
1403
+ Richardson extrapolation works well for sums over rational
1404
+ functions, as well as their alternating counterparts::
1405
+
1406
+ >>> mp.dps = 50
1407
+ >>> nsum(lambda k: 1 / k**3, [1, inf],
1408
+ ... method='richardson')
1409
+ 1.2020569031595942853997381615114499907649862923405
1410
+ >>> zeta(3)
1411
+ 1.2020569031595942853997381615114499907649862923405
1412
+
1413
+ >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
1414
+ ... method='richardson')
1415
+ 2.9348022005446793094172454999380755676568497036204
1416
+ >>> pi**2/2-2
1417
+ 2.9348022005446793094172454999380755676568497036204
1418
+
1419
+ >>> nsum(lambda k: (-1)**k / k**3, [1, inf],
1420
+ ... method='richardson')
1421
+ -0.90154267736969571404980362113358749307373971925537
1422
+ >>> -3*zeta(3)/4
1423
+ -0.90154267736969571404980362113358749307373971925538
1424
+
1425
+ **Examples with Shanks transformation**
1426
+
1427
+ The Shanks transformation works well for geometric series
1428
+ and typically provides excellent acceleration for Taylor
1429
+ series near the border of their disk of convergence.
1430
+ Here we apply it to a series for `\log(2)`, which can be
1431
+ seen as the Taylor series for `\log(1+x)` with `x = 1`::
1432
+
1433
+ >>> nsum(lambda k: -(-1)**k/k, [1, inf],
1434
+ ... method='shanks')
1435
+ 0.69314718055994530941723212145817656807550013436025
1436
+ >>> log(2)
1437
+ 0.69314718055994530941723212145817656807550013436025
1438
+
1439
+ Here we apply it to a slowly convergent geometric series::
1440
+
1441
+ >>> nsum(lambda k: mpf('0.995')**k, [0, inf],
1442
+ ... method='shanks')
1443
+ 200.0
1444
+
1445
+ Finally, Shanks' method works very well for alternating series
1446
+ where `f(k) = (-1)^k g(k)`, and often does so regardless of
1447
+ the exact decay rate of `g(k)`::
1448
+
1449
+ >>> mp.dps = 15
1450
+ >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
1451
+ ... method='shanks')
1452
+ 0.765147024625408
1453
+ >>> (2-sqrt(2))*zeta(1.5)/2
1454
+ 0.765147024625408
1455
+
1456
+ The following slowly convergent alternating series has no known
1457
+ closed-form value. Evaluating the sum a second time at higher
1458
+ precision indicates that the value is probably correct::
1459
+
1460
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1461
+ ... method='shanks')
1462
+ 0.924299897222939
1463
+ >>> mp.dps = 30
1464
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1465
+ ... method='shanks')
1466
+ 0.92429989722293885595957018136
1467
+
1468
+ **Examples with Levin transformation**
1469
+
1470
+ The following example calculates Euler's constant as the constant term in
1471
+ the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
1472
+ because of the logarithmic convergence behaviour of the Dirichlet series
1473
+ for zeta.
1474
+
1475
+ >>> mp.dps = 30
1476
+ >>> z = mp.mpf(10) ** (-10)
1477
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
1478
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
1479
+ 0.0
1480
+
1481
+ Now we sum the zeta function outside its range of convergence
1482
+ (attention: This does not work at the negative integers!):
1483
+
1484
+ >>> mp.dps = 15
1485
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
1486
+ >>> print(mp.chop(w - mp.zeta(-2-3j)))
1487
+ 0.0
1488
+
1489
+ The next example resummates an asymptotic series expansion of an integral
1490
+ related to the exponential integral.
1491
+
1492
+ >>> mp.dps = 15
1493
+ >>> z = mp.mpf(10)
1494
+ >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
1495
+ >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
1496
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
1497
+ >>> print(mp.chop(w - exact))
1498
+ 0.0
1499
+
1500
+ Following highly divergent asymptotic expansion needs some care. Firstly we
1501
+ need copious amount of working precision. Secondly the stepsize must not be
1502
+ chosen to large, otherwise nsum may miss the point where the Levin transform
1503
+ converges and reach the point where only numerical garbage is produced due to
1504
+ numerical cancellation.
1505
+
1506
+ >>> mp.dps = 15
1507
+ >>> z = mp.mpf(2)
1508
+ >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
1509
+ >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
1510
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
1511
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
1512
+ >>> print(mp.chop(w - exact))
1513
+ 0.0
1514
+
1515
+ The hypergeoemtric function can also be summed outside its range of convergence:
1516
+
1517
+ >>> mp.dps = 15
1518
+ >>> z = 2 + 1j
1519
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
1520
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
1521
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
1522
+ >>> print(mp.chop(exact-v))
1523
+ 0.0
1524
+
1525
+ **Examples with Cohen's alternating series resummation**
1526
+
1527
+ The next example sums the alternating zeta function:
1528
+
1529
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
1530
+ >>> print(mp.chop(v - mp.log(2)))
1531
+ 0.0
1532
+
1533
+ The derivate of the alternating zeta function outside its range of
1534
+ convergence:
1535
+
1536
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
1537
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
1538
+ 0.0
1539
+
1540
+ **Examples with Euler-Maclaurin summation**
1541
+
1542
+ The sum in the following example has the wrong rate of convergence
1543
+ for either Richardson or Shanks to be effective.
1544
+
1545
+ >>> f = lambda k: log(k)/k**2.5
1546
+ >>> mp.dps = 15
1547
+ >>> nsum(f, [1, inf], method='euler-maclaurin')
1548
+ 0.38734195032621
1549
+ >>> -diff(zeta, 2.5)
1550
+ 0.38734195032621
1551
+
1552
+ Increasing ``steps`` improves speed at higher precision::
1553
+
1554
+ >>> mp.dps = 50
1555
+ >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
1556
+ 0.38734195032620997271199237593105101319948228874688
1557
+ >>> -diff(zeta, 2.5)
1558
+ 0.38734195032620997271199237593105101319948228874688
1559
+
1560
+ **Divergent series**
1561
+
1562
+ The Shanks transformation is able to sum some *divergent*
1563
+ series. In particular, it is often able to sum Taylor series
1564
+ beyond their radius of convergence (this is due to a relation
1565
+ between the Shanks transformation and Pade approximations;
1566
+ see :func:`~mpmath.pade` for an alternative way to evaluate divergent
1567
+ Taylor series). Furthermore the Levin-transform examples above
1568
+ contain some divergent series resummation.
1569
+
1570
+ Here we apply it to `\log(1+x)` far outside the region of
1571
+ convergence::
1572
+
1573
+ >>> mp.dps = 50
1574
+ >>> nsum(lambda k: -(-9)**k/k, [1, inf],
1575
+ ... method='shanks')
1576
+ 2.3025850929940456840179914546843642076011014886288
1577
+ >>> log(10)
1578
+ 2.3025850929940456840179914546843642076011014886288
1579
+
1580
+ A particular type of divergent series that can be summed
1581
+ using the Shanks transformation is geometric series.
1582
+ The result is the same as using the closed-form formula
1583
+ for an infinite geometric series::
1584
+
1585
+ >>> mp.dps = 15
1586
+ >>> for n in range(-8, 8):
1587
+ ... if n == 1:
1588
+ ... continue
1589
+ ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
1590
+ ... nsum(lambda k: n**k, [0, inf], method='shanks')))
1591
+ ...
1592
+ -8.0 0.111111111111111 0.111111111111111
1593
+ -7.0 0.125 0.125
1594
+ -6.0 0.142857142857143 0.142857142857143
1595
+ -5.0 0.166666666666667 0.166666666666667
1596
+ -4.0 0.2 0.2
1597
+ -3.0 0.25 0.25
1598
+ -2.0 0.333333333333333 0.333333333333333
1599
+ -1.0 0.5 0.5
1600
+ 0.0 1.0 1.0
1601
+ 2.0 -1.0 -1.0
1602
+ 3.0 -0.5 -0.5
1603
+ 4.0 -0.333333333333333 -0.333333333333333
1604
+ 5.0 -0.25 -0.25
1605
+ 6.0 -0.2 -0.2
1606
+ 7.0 -0.166666666666667 -0.166666666666667
1607
+
1608
+ **Multidimensional sums**
1609
+
1610
+ Any combination of finite and infinite ranges is allowed for the
1611
+ summation indices::
1612
+
1613
+ >>> mp.dps = 15
1614
+ >>> nsum(lambda x,y: x+y, [2,3], [4,5])
1615
+ 28.0
1616
+ >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
1617
+ 6.0
1618
+ >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
1619
+ 6.0
1620
+ >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
1621
+ 7.0
1622
+ >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
1623
+ 7.0
1624
+ >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
1625
+ 7.0
1626
+
1627
+ Some nice examples of double series with analytic solutions or
1628
+ reductions to single-dimensional series (see [1])::
1629
+
1630
+ >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
1631
+ 1.60669515241529
1632
+ >>> nsum(lambda n: 1/(2**n-1), [1,inf])
1633
+ 1.60669515241529
1634
+
1635
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
1636
+ 0.278070510848213
1637
+ >>> pi*(pi-3*ln2)/12
1638
+ 0.278070510848213
1639
+
1640
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
1641
+ 0.129319852864168
1642
+ >>> altzeta(2) - altzeta(1)
1643
+ 0.129319852864168
1644
+
1645
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
1646
+ 0.0790756439455825
1647
+ >>> altzeta(3) - altzeta(2)
1648
+ 0.0790756439455825
1649
+
1650
+ >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
1651
+ ... [1,inf], [1,inf])
1652
+ 0.28125
1653
+ >>> mpf(9)/32
1654
+ 0.28125
1655
+
1656
+ >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
1657
+ ... [1,inf], [1,inf], workprec=400)
1658
+ 1.64493406684823
1659
+ >>> zeta(2)
1660
+ 1.64493406684823
1661
+
1662
+ A hard example of a multidimensional sum is the Madelung constant
1663
+ in three dimensions (see [2]). The defining sum converges very
1664
+ slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
1665
+ obtain an accurate value through convergence acceleration. The
1666
+ second evaluation below uses a much more efficient, rapidly
1667
+ convergent 2D sum::
1668
+
1669
+ >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
1670
+ ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
1671
+ -1.74756459463318
1672
+ >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
1673
+ ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
1674
+ -1.74756459463318
1675
+
1676
+ Another example of a lattice sum in 2D::
1677
+
1678
+ >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
1679
+ ... [-inf,inf], ignore=True)
1680
+ -2.1775860903036
1681
+ >>> -pi*ln2
1682
+ -2.1775860903036
1683
+
1684
+ An example of an Eisenstein series::
1685
+
1686
+ >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
1687
+ ... ignore=True)
1688
+ (3.1512120021539 + 0.0j)
1689
+
1690
+ **References**
1691
+
1692
+ 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
1693
+ 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
1694
+
1695
+ """
1696
+ infinite, g = standardize(ctx, f, intervals, options)
1697
+ if not infinite:
1698
+ return +g()
1699
+
1700
+ def update(partial_sums, indices):
1701
+ if partial_sums:
1702
+ psum = partial_sums[-1]
1703
+ else:
1704
+ psum = ctx.zero
1705
+ for k in indices:
1706
+ psum = psum + g(ctx.mpf(k))
1707
+ partial_sums.append(psum)
1708
+
1709
+ prec = ctx.prec
1710
+
1711
+ def emfun(point, tol):
1712
+ workprec = ctx.prec
1713
+ ctx.prec = prec + 10
1714
+ v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
1715
+ ctx.prec = workprec
1716
+ return v
1717
+
1718
+ return +ctx.adaptive_extrapolation(update, emfun, options)
1719
+
1720
+
1721
+ def wrapsafe(f):
1722
+ def g(*args):
1723
+ try:
1724
+ return f(*args)
1725
+ except (ArithmeticError, ValueError):
1726
+ return 0
1727
+ return g
1728
+
1729
+ def standardize(ctx, f, intervals, options):
1730
+ if options.get("ignore"):
1731
+ f = wrapsafe(f)
1732
+ finite = []
1733
+ infinite = []
1734
+ for k, points in enumerate(intervals):
1735
+ a, b = ctx._as_points(points)
1736
+ if b < a:
1737
+ return False, (lambda: ctx.zero)
1738
+ if a == ctx.ninf or b == ctx.inf:
1739
+ infinite.append((k, (a,b)))
1740
+ else:
1741
+ finite.append((k, (int(a), int(b))))
1742
+ if finite:
1743
+ f = fold_finite(ctx, f, finite)
1744
+ if not infinite:
1745
+ return False, lambda: f(*([0]*len(intervals)))
1746
+ if infinite:
1747
+ f = standardize_infinite(ctx, f, infinite)
1748
+ f = fold_infinite(ctx, f, infinite)
1749
+ args = [0] * len(intervals)
1750
+ d = infinite[0][0]
1751
+ def g(k):
1752
+ args[d] = k
1753
+ return f(*args)
1754
+ return True, g
1755
+
1756
+ # backwards compatible itertools.product
1757
+ def cartesian_product(args):
1758
+ pools = map(tuple, args)
1759
+ result = [[]]
1760
+ for pool in pools:
1761
+ result = [x+[y] for x in result for y in pool]
1762
+ for prod in result:
1763
+ yield tuple(prod)
1764
+
1765
+ def fold_finite(ctx, f, intervals):
1766
+ if not intervals:
1767
+ return f
1768
+ indices = [v[0] for v in intervals]
1769
+ points = [v[1] for v in intervals]
1770
+ ranges = [xrange(a, b+1) for (a,b) in points]
1771
+ def g(*args):
1772
+ args = list(args)
1773
+ s = ctx.zero
1774
+ for xs in cartesian_product(ranges):
1775
+ for dim, x in zip(indices, xs):
1776
+ args[dim] = ctx.mpf(x)
1777
+ s += f(*args)
1778
+ return s
1779
+ #print "Folded finite", indices
1780
+ return g
1781
+
1782
+ # Standardize each interval to [0,inf]
1783
+ def standardize_infinite(ctx, f, intervals):
1784
+ if not intervals:
1785
+ return f
1786
+ dim, [a,b] = intervals[-1]
1787
+ if a == ctx.ninf:
1788
+ if b == ctx.inf:
1789
+ def g(*args):
1790
+ args = list(args)
1791
+ k = args[dim]
1792
+ if k:
1793
+ s = f(*args)
1794
+ args[dim] = -k
1795
+ s += f(*args)
1796
+ return s
1797
+ else:
1798
+ return f(*args)
1799
+ else:
1800
+ def g(*args):
1801
+ args = list(args)
1802
+ args[dim] = b - args[dim]
1803
+ return f(*args)
1804
+ else:
1805
+ def g(*args):
1806
+ args = list(args)
1807
+ args[dim] += a
1808
+ return f(*args)
1809
+ #print "Standardized infinity along dimension", dim, a, b
1810
+ return standardize_infinite(ctx, g, intervals[:-1])
1811
+
1812
+ def fold_infinite(ctx, f, intervals):
1813
+ if len(intervals) < 2:
1814
+ return f
1815
+ dim1 = intervals[-2][0]
1816
+ dim2 = intervals[-1][0]
1817
+ # Assume intervals are [0,inf] x [0,inf] x ...
1818
+ def g(*args):
1819
+ args = list(args)
1820
+ #args.insert(dim2, None)
1821
+ n = int(args[dim1])
1822
+ s = ctx.zero
1823
+ #y = ctx.mpf(n)
1824
+ args[dim2] = ctx.mpf(n) #y
1825
+ for x in xrange(n+1):
1826
+ args[dim1] = ctx.mpf(x)
1827
+ s += f(*args)
1828
+ args[dim1] = ctx.mpf(n) #ctx.mpf(n)
1829
+ for y in xrange(n):
1830
+ args[dim2] = ctx.mpf(y)
1831
+ s += f(*args)
1832
+ return s
1833
+ #print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
1834
+ return fold_infinite(ctx, g, intervals[:-1])
1835
+
1836
+ @defun
1837
+ def nprod(ctx, f, interval, nsum=False, **kwargs):
1838
+ r"""
1839
+ Computes the product
1840
+
1841
+ .. math ::
1842
+
1843
+ P = \prod_{k=a}^b f(k)
1844
+
1845
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1846
+ `b = \infty` are allowed.
1847
+
1848
+ By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
1849
+ :func:`~mpmath.nsum`, except applied to the partial products rather than
1850
+ partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
1851
+ supported. If ``nsum=True``, the product is instead computed via
1852
+ :func:`~mpmath.nsum` as
1853
+
1854
+ .. math ::
1855
+
1856
+ P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
1857
+
1858
+ This is slower, but can sometimes yield better results. It is
1859
+ also required (and used automatically) when Euler-Maclaurin
1860
+ summation is requested.
1861
+
1862
+ **Examples**
1863
+
1864
+ A simple finite product::
1865
+
1866
+ >>> from mpmath import *
1867
+ >>> mp.dps = 25; mp.pretty = True
1868
+ >>> nprod(lambda k: k, [1, 4])
1869
+ 24.0
1870
+
1871
+ A large number of infinite products have known exact values,
1872
+ and can therefore be used as a reference. Most of the following
1873
+ examples are taken from MathWorld [1].
1874
+
1875
+ A few infinite products with simple values are::
1876
+
1877
+ >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
1878
+ 3.141592653589793238462643
1879
+ >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
1880
+ 2.0
1881
+ >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
1882
+ 0.6666666666666666666666667
1883
+ >>> nprod(lambda k: (1-1/k**2), [2, inf])
1884
+ 0.5
1885
+
1886
+ Next, several more infinite products with more complicated
1887
+ values::
1888
+
1889
+ >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
1890
+ 5.180668317897115748416626
1891
+ 5.180668317897115748416626
1892
+
1893
+ >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
1894
+ 0.2720290549821331629502366
1895
+ 0.2720290549821331629502366
1896
+
1897
+ >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
1898
+ 0.8480540493529003921296502
1899
+ >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
1900
+ 0.8480540493529003921296502
1901
+
1902
+ >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
1903
+ 1.848936182858244485224927
1904
+ >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
1905
+ 1.848936182858244485224927
1906
+
1907
+ >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
1908
+ 0.9190194775937444301739244
1909
+ 0.9190194775937444301739244
1910
+
1911
+ >>> nprod(lambda k: (1-1/k**6), [2, inf])
1912
+ 0.9826842777421925183244759
1913
+ >>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
1914
+ 0.9826842777421925183244759
1915
+
1916
+ >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
1917
+ 1.838038955187488860347849
1918
+ 1.838038955187488860347849
1919
+
1920
+ >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
1921
+ 1.447255926890365298959138
1922
+ >>> exp(1+euler/2)/sqrt(2*pi)
1923
+ 1.447255926890365298959138
1924
+
1925
+ The following two products are equivalent and can be evaluated in
1926
+ terms of a Jacobi theta function. Pi can be replaced by any value
1927
+ (as long as convergence is preserved)::
1928
+
1929
+ >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
1930
+ 0.3838451207481672404778686
1931
+ >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
1932
+ 0.3838451207481672404778686
1933
+ >>> jtheta(4,0,1/pi)
1934
+ 0.3838451207481672404778686
1935
+
1936
+ This product does not have a known closed form value::
1937
+
1938
+ >>> nprod(lambda k: (1-1/2**k), [1, inf])
1939
+ 0.2887880950866024212788997
1940
+
1941
+ A product taken from `-\infty`::
1942
+
1943
+ >>> nprod(lambda k: 1-k**(-3), [-inf,-2])
1944
+ 0.8093965973662901095786805
1945
+ >>> cosh(pi*sqrt(3)/2)/(3*pi)
1946
+ 0.8093965973662901095786805
1947
+
1948
+ A doubly infinite product::
1949
+
1950
+ >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
1951
+ 23.41432688231864337420035
1952
+ >>> exp(pi/tanh(pi))
1953
+ 23.41432688231864337420035
1954
+
1955
+ A product requiring the use of Euler-Maclaurin summation to compute
1956
+ an accurate value::
1957
+
1958
+ >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
1959
+ 0.696155111336231052898125
1960
+
1961
+ **References**
1962
+
1963
+ 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
1964
+
1965
+ """
1966
+ if nsum or ('e' in kwargs.get('method', '')):
1967
+ orig = ctx.prec
1968
+ try:
1969
+ # TODO: we are evaluating log(1+eps) -> eps, which is
1970
+ # inaccurate. This currently works because nsum greatly
1971
+ # increases the working precision. But we should be
1972
+ # more intelligent and handle the precision here.
1973
+ ctx.prec += 10
1974
+ v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
1975
+ finally:
1976
+ ctx.prec = orig
1977
+ return +ctx.exp(v)
1978
+
1979
+ a, b = ctx._as_points(interval)
1980
+ if a == ctx.ninf:
1981
+ if b == ctx.inf:
1982
+ return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
1983
+ return ctx.nprod(f, [-b, ctx.inf], **kwargs)
1984
+ elif b != ctx.inf:
1985
+ return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
1986
+
1987
+ a = int(a)
1988
+
1989
+ def update(partial_products, indices):
1990
+ if partial_products:
1991
+ pprod = partial_products[-1]
1992
+ else:
1993
+ pprod = ctx.one
1994
+ for k in indices:
1995
+ pprod = pprod * f(a + ctx.mpf(k))
1996
+ partial_products.append(pprod)
1997
+
1998
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
1999
+
2000
+
2001
+ @defun
2002
+ def limit(ctx, f, x, direction=1, exp=False, **kwargs):
2003
+ r"""
2004
+ Computes an estimate of the limit
2005
+
2006
+ .. math ::
2007
+
2008
+ \lim_{t \to x} f(t)
2009
+
2010
+ where `x` may be finite or infinite.
2011
+
2012
+ For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
2013
+ consecutive integer values of `n`, where the approach direction
2014
+ `d` may be specified using the *direction* keyword argument.
2015
+ For infinite `x`, :func:`~mpmath.limit` evaluates values of
2016
+ `f(\mathrm{sign}(x) \cdot n)`.
2017
+
2018
+ If the approach to the limit is not sufficiently fast to give
2019
+ an accurate estimate directly, :func:`~mpmath.limit` attempts to find
2020
+ the limit using Richardson extrapolation or the Shanks
2021
+ transformation. You can select between these methods using
2022
+ the *method* keyword (see documentation of :func:`~mpmath.nsum` for
2023
+ more information).
2024
+
2025
+ **Options**
2026
+
2027
+ The following options are available with essentially the
2028
+ same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
2029
+ *steps*, *verbose*.
2030
+
2031
+ If the option *exp=True* is set, `f` will be
2032
+ sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
2033
+ instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
2034
+ This can sometimes improve the rate of convergence so that
2035
+ :func:`~mpmath.limit` may return a more accurate answer (and faster).
2036
+ However, do note that this can only be used if `f`
2037
+ supports fast and accurate evaluation for arguments that
2038
+ are extremely close to the limit point (or if infinite,
2039
+ very large arguments).
2040
+
2041
+ **Examples**
2042
+
2043
+ A basic evaluation of a removable singularity::
2044
+
2045
+ >>> from mpmath import *
2046
+ >>> mp.dps = 30; mp.pretty = True
2047
+ >>> limit(lambda x: (x-sin(x))/x**3, 0)
2048
+ 0.166666666666666666666666666667
2049
+
2050
+ Computing the exponential function using its limit definition::
2051
+
2052
+ >>> limit(lambda n: (1+3/n)**n, inf)
2053
+ 20.0855369231876677409285296546
2054
+ >>> exp(3)
2055
+ 20.0855369231876677409285296546
2056
+
2057
+ A limit for `\pi`::
2058
+
2059
+ >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
2060
+ >>> limit(f, inf)
2061
+ 3.14159265358979323846264338328
2062
+
2063
+ Calculating the coefficient in Stirling's formula::
2064
+
2065
+ >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
2066
+ 2.50662827463100050241576528481
2067
+ >>> sqrt(2*pi)
2068
+ 2.50662827463100050241576528481
2069
+
2070
+ Evaluating Euler's constant `\gamma` using the limit representation
2071
+
2072
+ .. math ::
2073
+
2074
+ \gamma = \lim_{n \rightarrow \infty } \left[ \left(
2075
+ \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
2076
+
2077
+ (which converges notoriously slowly)::
2078
+
2079
+ >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
2080
+ >>> limit(f, inf)
2081
+ 0.577215664901532860606512090082
2082
+ >>> +euler
2083
+ 0.577215664901532860606512090082
2084
+
2085
+ With default settings, the following limit converges too slowly
2086
+ to be evaluated accurately. Changing to exponential sampling
2087
+ however gives a perfect result::
2088
+
2089
+ >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
2090
+ >>> limit(f, inf)
2091
+ 0.992831158558330281129249686491
2092
+ >>> limit(f, inf, exp=True)
2093
+ 1.0
2094
+
2095
+ """
2096
+
2097
+ if ctx.isinf(x):
2098
+ direction = ctx.sign(x)
2099
+ g = lambda k: f(ctx.mpf(k+1)*direction)
2100
+ else:
2101
+ direction *= ctx.one
2102
+ g = lambda k: f(x + direction/(k+1))
2103
+ if exp:
2104
+ h = g
2105
+ g = lambda k: h(2**k)
2106
+
2107
+ def update(values, indices):
2108
+ for k in indices:
2109
+ values.append(g(k+1))
2110
+
2111
+ # XXX: steps used by nsum don't work well
2112
+ if not 'steps' in kwargs:
2113
+ kwargs['steps'] = [10]
2114
+
2115
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # contributed to mpmath by Kristopher L. Kuhlman, February 2017
2
+ # contributed to mpmath by Guillermo Navas-Palencia, February 2022
3
+
4
+ class InverseLaplaceTransform(object):
5
+ r"""
6
+ Inverse Laplace transform methods are implemented using this
7
+ class, in order to simplify the code and provide a common
8
+ infrastructure.
9
+
10
+ Implement a custom inverse Laplace transform algorithm by
11
+ subclassing :class:`InverseLaplaceTransform` and implementing the
12
+ appropriate methods. The subclass can then be used by
13
+ :func:`~mpmath.invertlaplace` by passing it as the *method*
14
+ argument.
15
+ """
16
+
17
+ def __init__(self, ctx):
18
+ self.ctx = ctx
19
+
20
+ def calc_laplace_parameter(self, t, **kwargs):
21
+ r"""
22
+ Determine the vector of Laplace parameter values needed for an
23
+ algorithm, this will depend on the choice of algorithm (de
24
+ Hoog is default), the algorithm-specific parameters passed (or
25
+ default ones), and desired time.
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def calc_time_domain_solution(self, fp):
30
+ r"""
31
+ Compute the time domain solution, after computing the
32
+ Laplace-space function evaluations at the abscissa required
33
+ for the algorithm. Abscissa computed for one algorithm are
34
+ typically not useful for another algorithm.
35
+ """
36
+ raise NotImplementedError
37
+
38
+
39
+ class FixedTalbot(InverseLaplaceTransform):
40
+
41
+ def calc_laplace_parameter(self, t, **kwargs):
42
+ r"""The "fixed" Talbot method deforms the Bromwich contour towards
43
+ `-\infty` in the shape of a parabola. Traditionally the Talbot
44
+ algorithm has adjustable parameters, but the "fixed" version
45
+ does not. The `r` parameter could be passed in as a parameter,
46
+ if you want to override the default given by (Abate & Valko,
47
+ 2004).
48
+
49
+ The Laplace parameter is sampled along a parabola opening
50
+ along the negative imaginary axis, with the base of the
51
+ parabola along the real axis at
52
+ `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
53
+ the approximation (degree) grows, the abscissa required for
54
+ function evaluation tend towards `-\infty`, requiring high
55
+ precision to prevent overflow. If any poles, branch cuts or
56
+ other singularities exist such that the deformed Bromwich
57
+ contour lies to the left of the singularity, the method will
58
+ fail.
59
+
60
+ **Optional arguments**
61
+
62
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
63
+ recognizes the following keywords
64
+
65
+ *tmax*
66
+ maximum time associated with vector of times
67
+ (typically just the time requested)
68
+ *degree*
69
+ integer order of approximation (M = number of terms)
70
+ *r*
71
+ abscissa for `p_0` (otherwise computed using rule
72
+ of thumb `2M/5`)
73
+
74
+ The working precision will be increased according to a rule of
75
+ thumb. If 'degree' is not specified, the working precision and
76
+ degree are chosen to hopefully achieve the dps of the calling
77
+ context. If 'degree' is specified, the working precision is
78
+ chosen to achieve maximum resulting precision for the
79
+ specified degree.
80
+
81
+ .. math ::
82
+
83
+ p_0=\frac{r}{t}
84
+
85
+ .. math ::
86
+
87
+ p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
88
+ \frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
89
+
90
+ where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
91
+ maximum specified time.
92
+
93
+ """
94
+
95
+ # required
96
+ # ------------------------------
97
+ # time of desired approximation
98
+ self.t = self.ctx.convert(t)
99
+
100
+ # optional
101
+ # ------------------------------
102
+ # maximum time desired (used for scaling) default is requested
103
+ # time.
104
+ self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
105
+
106
+ # empirical relationships used here based on a linear fit of
107
+ # requested and delivered dps for exponentially decaying time
108
+ # functions for requested dps up to 512.
109
+
110
+ if 'degree' in kwargs:
111
+ self.degree = kwargs['degree']
112
+ self.dps_goal = self.degree
113
+ else:
114
+ self.dps_goal = int(1.72*self.ctx.dps)
115
+ self.degree = max(12, int(1.38*self.dps_goal))
116
+
117
+ M = self.degree
118
+
119
+ # this is adjusting the dps of the calling context hopefully
120
+ # the caller doesn't monkey around with it between calling
121
+ # this routine and calc_time_domain_solution()
122
+ self.dps_orig = self.ctx.dps
123
+ self.ctx.dps = self.dps_goal
124
+
125
+ # Abate & Valko rule of thumb for r parameter
126
+ self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
127
+
128
+ self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
129
+
130
+ self.cot_theta = self.ctx.matrix(M, 1)
131
+ self.cot_theta[0] = 0 # not used
132
+
133
+ # all but time-dependent part of p
134
+ self.delta = self.ctx.matrix(M, 1)
135
+ self.delta[0] = self.r
136
+
137
+ for i in range(1, M):
138
+ self.cot_theta[i] = self.ctx.cot(self.theta[i])
139
+ self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
140
+
141
+ self.p = self.ctx.matrix(M, 1)
142
+ self.p = self.delta/self.tmax
143
+
144
+ # NB: p is complex (mpc)
145
+
146
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
147
+ r"""The fixed Talbot time-domain solution is computed from the
148
+ Laplace-space function evaluations using
149
+
150
+ .. math ::
151
+
152
+ f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
153
+ \gamma_k \bar{f}(p_k)\right]
154
+
155
+ where
156
+
157
+ .. math ::
158
+
159
+ \gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
160
+
161
+ .. math ::
162
+
163
+ \gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
164
+ \cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
165
+ \frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
166
+
167
+ Again, `j=\sqrt{-1}`.
168
+
169
+ Before calling this function, call
170
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
171
+ to set the parameters and compute the required coefficients.
172
+
173
+ **References**
174
+
175
+ 1. Abate, J., P. Valko (2004). Multi-precision Laplace
176
+ transform inversion. *International Journal for Numerical
177
+ Methods in Engineering* 60:979-993,
178
+ http://dx.doi.org/10.1002/nme.995
179
+ 2. Talbot, A. (1979). The accurate numerical inversion of
180
+ Laplace transforms. *IMA Journal of Applied Mathematics*
181
+ 23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
182
+ """
183
+
184
+ # required
185
+ # ------------------------------
186
+ self.t = self.ctx.convert(t)
187
+
188
+ # assume fp was computed from p matrix returned from
189
+ # calc_laplace_parameter(), so is already a list or matrix of
190
+ # mpmath 'mpc' types
191
+
192
+ # these were computed in previous call to
193
+ # calc_laplace_parameter()
194
+ theta = self.theta
195
+ delta = self.delta
196
+ M = self.degree
197
+ p = self.p
198
+ r = self.r
199
+
200
+ ans = self.ctx.matrix(M, 1)
201
+ ans[0] = self.ctx.exp(delta[0])*fp[0]/2
202
+
203
+ for i in range(1, M):
204
+ ans[i] = self.ctx.exp(delta[i])*fp[i]*(
205
+ 1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
206
+ 1j*self.cot_theta[i])
207
+
208
+ result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
209
+
210
+ # setting dps back to value when calc_laplace_parameter was
211
+ # called, unless flag is set.
212
+ if not manual_prec:
213
+ self.ctx.dps = self.dps_orig
214
+
215
+ return result.real
216
+
217
+
218
+ # ****************************************
219
+
220
+ class Stehfest(InverseLaplaceTransform):
221
+
222
+ def calc_laplace_parameter(self, t, **kwargs):
223
+ r"""
224
+ The Gaver-Stehfest method is a discrete approximation of the
225
+ Widder-Post inversion algorithm, rather than a direct
226
+ approximation of the Bromwich contour integral.
227
+
228
+ The method abscissa along the real axis, and therefore has
229
+ issues inverting oscillatory functions (which have poles in
230
+ pairs away from the real axis).
231
+
232
+ The working precision will be increased according to a rule of
233
+ thumb. If 'degree' is not specified, the working precision and
234
+ degree are chosen to hopefully achieve the dps of the calling
235
+ context. If 'degree' is specified, the working precision is
236
+ chosen to achieve maximum resulting precision for the
237
+ specified degree.
238
+
239
+ .. math ::
240
+
241
+ p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
242
+ """
243
+
244
+ # required
245
+ # ------------------------------
246
+ # time of desired approximation
247
+ self.t = self.ctx.convert(t)
248
+
249
+ # optional
250
+ # ------------------------------
251
+
252
+ # empirical relationships used here based on a linear fit of
253
+ # requested and delivered dps for exponentially decaying time
254
+ # functions for requested dps up to 512.
255
+
256
+ if 'degree' in kwargs:
257
+ self.degree = kwargs['degree']
258
+ self.dps_goal = int(1.38*self.degree)
259
+ else:
260
+ self.dps_goal = int(2.93*self.ctx.dps)
261
+ self.degree = max(16, self.dps_goal)
262
+
263
+ # _coeff routine requires even degree
264
+ if self.degree % 2 > 0:
265
+ self.degree += 1
266
+
267
+ M = self.degree
268
+
269
+ # this is adjusting the dps of the calling context
270
+ # hopefully the caller doesn't monkey around with it
271
+ # between calling this routine and calc_time_domain_solution()
272
+ self.dps_orig = self.ctx.dps
273
+ self.ctx.dps = self.dps_goal
274
+
275
+ self.V = self._coeff()
276
+ self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
277
+
278
+ # NB: p is real (mpf)
279
+
280
+ def _coeff(self):
281
+ r"""Salzer summation weights (aka, "Stehfest coefficients")
282
+ only depend on the approximation order (M) and the precision"""
283
+
284
+ M = self.degree
285
+ M2 = int(M/2) # checked earlier that M is even
286
+
287
+ V = self.ctx.matrix(M, 1)
288
+
289
+ # Salzer summation weights
290
+ # get very large in magnitude and oscillate in sign,
291
+ # if the precision is not high enough, there will be
292
+ # catastrophic cancellation
293
+ for k in range(1, M+1):
294
+ z = self.ctx.matrix(min(k, M2)+1, 1)
295
+ for j in range(int((k+1)/2), min(k, M2)+1):
296
+ z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
297
+ (self.ctx.fac(M2-j)*self.ctx.fac(j)*
298
+ self.ctx.fac(j-1)*self.ctx.fac(k-j)*
299
+ self.ctx.fac(2*j-k)))
300
+ V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
301
+
302
+ return V
303
+
304
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
305
+ r"""Compute time-domain Stehfest algorithm solution.
306
+
307
+ .. math ::
308
+
309
+ f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
310
+ p_k \right)
311
+
312
+ where
313
+
314
+ .. math ::
315
+
316
+ V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
317
+ \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
318
+ \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
319
+
320
+ As the degree increases, the abscissa (`p_k`) only increase
321
+ linearly towards `\infty`, but the Stehfest coefficients
322
+ (`V_k`) alternate in sign and increase rapidly in sign,
323
+ requiring high precision to prevent overflow or loss of
324
+ significance when evaluating the sum.
325
+
326
+ **References**
327
+
328
+ 1. Widder, D. (1941). *The Laplace Transform*. Princeton.
329
+ 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
330
+ Laplace transforms. *Communications of the ACM* 13(1):47-49,
331
+ http://dx.doi.org/10.1145/361953.361969
332
+
333
+ """
334
+
335
+ # required
336
+ self.t = self.ctx.convert(t)
337
+
338
+ # assume fp was computed from p matrix returned from
339
+ # calc_laplace_parameter(), so is already
340
+ # a list or matrix of mpmath 'mpf' types
341
+
342
+ result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
343
+
344
+ # setting dps back to value when calc_laplace_parameter was called
345
+ if not manual_prec:
346
+ self.ctx.dps = self.dps_orig
347
+
348
+ # ignore any small imaginary part
349
+ return result.real
350
+
351
+
352
+ # ****************************************
353
+
354
+ class deHoog(InverseLaplaceTransform):
355
+
356
+ def calc_laplace_parameter(self, t, **kwargs):
357
+ r"""the de Hoog, Knight & Stokes algorithm is an
358
+ accelerated form of the Fourier series numerical
359
+ inverse Laplace transform algorithms.
360
+
361
+ .. math ::
362
+
363
+ p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
364
+
365
+ where
366
+
367
+ .. math ::
368
+
369
+ \gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
370
+
371
+ `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
372
+ `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
373
+ rightmost pole or singularity, which is chosen based on the
374
+ desired accuracy (assuming the rightmost singularity is 0),
375
+ and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
376
+ chosen in relation to `\alpha`.`
377
+
378
+ When increasing the degree, the abscissa increase towards
379
+ `j\infty`, but more slowly than the fixed Talbot
380
+ algorithm. The de Hoog et al. algorithm typically does better
381
+ with oscillatory functions of time, and less well-behaved
382
+ functions. The method tends to be slower than the Talbot and
383
+ Stehfest algorithsm, especially so at very high precision
384
+ (e.g., `>500` digits precision).
385
+
386
+ """
387
+
388
+ # required
389
+ # ------------------------------
390
+ self.t = self.ctx.convert(t)
391
+
392
+ # optional
393
+ # ------------------------------
394
+ self.tmax = kwargs.get('tmax', self.t)
395
+
396
+ # empirical relationships used here based on a linear fit of
397
+ # requested and delivered dps for exponentially decaying time
398
+ # functions for requested dps up to 512.
399
+
400
+ if 'degree' in kwargs:
401
+ self.degree = kwargs['degree']
402
+ self.dps_goal = int(1.38*self.degree)
403
+ else:
404
+ self.dps_goal = int(self.ctx.dps*1.36)
405
+ self.degree = max(10, self.dps_goal)
406
+
407
+ # 2*M+1 terms in approximation
408
+ M = self.degree
409
+
410
+ # adjust alpha component of abscissa of convergence for higher
411
+ # precision
412
+ tmp = self.ctx.power(10.0, -self.dps_goal)
413
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
414
+
415
+ # desired tolerance (here simply related to alpha)
416
+ self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
417
+ self.np = 2*self.degree+1 # number of terms in approximation
418
+
419
+ # this is adjusting the dps of the calling context
420
+ # hopefully the caller doesn't monkey around with it
421
+ # between calling this routine and calc_time_domain_solution()
422
+ self.dps_orig = self.ctx.dps
423
+ self.ctx.dps = self.dps_goal
424
+
425
+ # scaling factor (likely tun-able, but 2 is typical)
426
+ self.scale = kwargs.get('scale', 2)
427
+ self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
428
+
429
+ self.p = self.ctx.matrix(2*M+1, 1)
430
+ self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
431
+ self.p = (self.gamma + self.ctx.pi*
432
+ self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
433
+
434
+ # NB: p is complex (mpc)
435
+
436
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
437
+ r"""Calculate time-domain solution for
438
+ de Hoog, Knight & Stokes algorithm.
439
+
440
+ The un-accelerated Fourier series approach is:
441
+
442
+ .. math ::
443
+
444
+ f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
445
+ \Re\left[\bar{f}\left( p_k \right)
446
+ e^{i\pi t/T} \right],
447
+
448
+ where the prime on the summation indicates the first term is halved.
449
+
450
+ This simplistic approach requires so many function evaluations
451
+ that it is not practical. Non-linear acceleration is
452
+ accomplished via Pade-approximation and an analytic expression
453
+ for the remainder of the continued fraction. See the original
454
+ paper (reference 2 below) a detailed description of the
455
+ numerical approach.
456
+
457
+ **References**
458
+
459
+ 1. Davies, B. (2005). *Integral Transforms and their
460
+ Applications*, Third Edition. Springer.
461
+ 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
462
+ method for numerical inversion of Laplace transforms. *SIAM
463
+ Journal of Scientific and Statistical Computing* 3:357-366,
464
+ http://dx.doi.org/10.1137/0903022
465
+
466
+ """
467
+
468
+ M = self.degree
469
+ np = self.np
470
+ T = self.T
471
+
472
+ self.t = self.ctx.convert(t)
473
+
474
+ # would it be useful to try re-using
475
+ # space between e&q and A&B?
476
+ e = self.ctx.zeros(np, M+1)
477
+ q = self.ctx.matrix(2*M, M)
478
+ d = self.ctx.matrix(np, 1)
479
+ A = self.ctx.zeros(np+1, 1)
480
+ B = self.ctx.ones(np+1, 1)
481
+
482
+ # initialize Q-D table
483
+ e[:, 0] = 0.0 + 0j
484
+ q[0, 0] = fp[1]/(fp[0]/2)
485
+ for i in range(1, 2*M):
486
+ q[i, 0] = fp[i+1]/fp[i]
487
+
488
+ # rhombus rule for filling triangular Q-D table (e & q)
489
+ for r in range(1, M+1):
490
+ # start with e, column 1, 0:2*M-2
491
+ mr = 2*(M-r) + 1
492
+ e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
493
+ if not r == M:
494
+ rq = r+1
495
+ mr = 2*(M-rq)+1 + 2
496
+ for i in range(mr):
497
+ q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
498
+
499
+ # build up continued fraction coefficients (d)
500
+ d[0] = fp[0]/2
501
+ for r in range(1, M+1):
502
+ d[2*r-1] = -q[0, r-1] # even terms
503
+ d[2*r] = -e[0, r] # odd terms
504
+
505
+ # seed A and B for recurrence
506
+ A[0] = 0.0 + 0.0j
507
+ A[1] = d[0]
508
+ B[0:2] = 1.0 + 0.0j
509
+
510
+ # base of the power series
511
+ z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
512
+
513
+ # coefficients of Pade approximation (A & B)
514
+ # using recurrence for all but last term
515
+ for i in range(1, 2*M):
516
+ A[i+1] = A[i] + d[i]*A[i-1]*z
517
+ B[i+1] = B[i] + d[i]*B[i-1]*z
518
+
519
+ # "improved remainder" to continued fraction
520
+ brem = (1 + (d[2*M-1] - d[2*M])*z)/2
521
+ # powm1(x,y) computes x^y - 1 more accurately near zero
522
+ rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
523
+ self.ctx.fraction(1, 2))
524
+
525
+ # last term of recurrence using new remainder
526
+ A[np] = A[2*M] + rem*A[2*M-1]
527
+ B[np] = B[2*M] + rem*B[2*M-1]
528
+
529
+ # diagonal Pade approximation
530
+ # F=A/B represents accelerated trapezoid rule
531
+ result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
532
+
533
+ # setting dps back to value when calc_laplace_parameter was called
534
+ if not manual_prec:
535
+ self.ctx.dps = self.dps_orig
536
+
537
+ return result
538
+
539
+
540
+ # ****************************************
541
+
542
+ class Cohen(InverseLaplaceTransform):
543
+
544
+ def calc_laplace_parameter(self, t, **kwargs):
545
+ r"""The Cohen algorithm accelerates the convergence of the nearly
546
+ alternating series resulting from the application of the trapezoidal
547
+ rule to the Bromwich contour inversion integral.
548
+
549
+ .. math ::
550
+
551
+ p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
552
+
553
+ where
554
+
555
+ .. math ::
556
+
557
+ \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
558
+
559
+ `d = \mathrm{dps\_goal}`, which is chosen based on the desired
560
+ accuracy using the method developed in [1] to improve numerical
561
+ stability. The Cohen algorithm shows robustness similar to the de Hoog
562
+ et al. algorithm, but it is faster than the fixed Talbot algorithm.
563
+
564
+ **Optional arguments**
565
+
566
+ *degree*
567
+ integer order of the approximation (M = number of terms)
568
+ *alpha*
569
+ abscissa for `p_0` (controls the discretization error)
570
+
571
+ The working precision will be increased according to a rule of
572
+ thumb. If 'degree' is not specified, the working precision and
573
+ degree are chosen to hopefully achieve the dps of the calling
574
+ context. If 'degree' is specified, the working precision is
575
+ chosen to achieve maximum resulting precision for the
576
+ specified degree.
577
+
578
+ **References**
579
+
580
+ 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
581
+ distribution in the Gaussian copula model: a comparison of methods.
582
+ *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
583
+
584
+ """
585
+ self.t = self.ctx.convert(t)
586
+
587
+ if 'degree' in kwargs:
588
+ self.degree = kwargs['degree']
589
+ self.dps_goal = int(1.5 * self.degree)
590
+ else:
591
+ self.dps_goal = int(self.ctx.dps * 1.74)
592
+ self.degree = max(22, int(1.31 * self.dps_goal))
593
+
594
+ M = self.degree + 1
595
+
596
+ # this is adjusting the dps of the calling context hopefully
597
+ # the caller doesn't monkey around with it between calling
598
+ # this routine and calc_time_domain_solution()
599
+ self.dps_orig = self.ctx.dps
600
+ self.ctx.dps = self.dps_goal
601
+
602
+ ttwo = 2 * self.t
603
+ tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
604
+ tmp = self.ctx.fraction(2, 3) * tmp
605
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
606
+
607
+ # all but time-dependent part of p
608
+ a_t = self.alpha / ttwo
609
+ p_t = self.ctx.pi * 1j / self.t
610
+
611
+ self.p = self.ctx.matrix(M, 1)
612
+ self.p[0] = a_t
613
+
614
+ for i in range(1, M):
615
+ self.p[i] = a_t + i * p_t
616
+
617
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
618
+ r"""Calculate time-domain solution for Cohen algorithm.
619
+
620
+ The accelerated nearly alternating series is:
621
+
622
+ .. math ::
623
+
624
+ f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
625
+ \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
626
+ \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
627
+ \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
628
+
629
+ where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
630
+
631
+ 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
632
+ acceleration of alternating series. *Experiment. Math* 9(1):3-12
633
+
634
+ """
635
+ self.t = self.ctx.convert(t)
636
+
637
+ n = self.degree
638
+ M = n + 1
639
+
640
+ A = self.ctx.matrix(M, 1)
641
+ for i in range(M):
642
+ A[i] = fp[i].real
643
+
644
+ d = (3 + self.ctx.sqrt(8)) ** n
645
+ d = (d + 1 / d) / 2
646
+ b = -self.ctx.one
647
+ c = -d
648
+ s = 0
649
+
650
+ for k in range(n):
651
+ c = b - c
652
+ s = s + c * A[k + 1]
653
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
654
+
655
+ result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
656
+
657
+ # setting dps back to value when calc_laplace_parameter was
658
+ # called, unless flag is set.
659
+ if not manual_prec:
660
+ self.ctx.dps = self.dps_orig
661
+
662
+ return result
663
+
664
+
665
+ # ****************************************
666
+
667
+ class LaplaceTransformInversionMethods(object):
668
+ def __init__(ctx, *args, **kwargs):
669
+ ctx._fixed_talbot = FixedTalbot(ctx)
670
+ ctx._stehfest = Stehfest(ctx)
671
+ ctx._de_hoog = deHoog(ctx)
672
+ ctx._cohen = Cohen(ctx)
673
+
674
+ def invertlaplace(ctx, f, t, **kwargs):
675
+ r"""Computes the numerical inverse Laplace transform for a
676
+ Laplace-space function at a given time. The function being
677
+ evaluated is assumed to be a real-valued function of time.
678
+
679
+ The user must supply a Laplace-space function `\bar{f}(p)`,
680
+ and a desired time at which to estimate the time-domain
681
+ solution `f(t)`.
682
+
683
+ A few basic examples of Laplace-space functions with known
684
+ inverses (see references [1,2]) :
685
+
686
+ .. math ::
687
+
688
+ \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
689
+
690
+ .. math ::
691
+
692
+ \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
693
+
694
+ .. math ::
695
+
696
+ \bar{f}(p) = \frac{1}{(p+1)^2}
697
+
698
+ .. math ::
699
+
700
+ f(t) = t e^{-t}
701
+
702
+ >>> from mpmath import *
703
+ >>> mp.dps = 15; mp.pretty = True
704
+ >>> tt = [0.001, 0.01, 0.1, 1, 10]
705
+ >>> fp = lambda p: 1/(p+1)**2
706
+ >>> ft = lambda t: t*exp(-t)
707
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
708
+ (0.000999000499833375, 8.57923043561212e-20)
709
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
710
+ (0.00990049833749168, 3.27007646698047e-19)
711
+ >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
712
+ (0.090483741803596, -1.75215800052168e-18)
713
+ >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
714
+ (0.367879441171442, 1.2428864009344e-17)
715
+ >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
716
+ (0.000453999297624849, 4.04513489306658e-20)
717
+
718
+ The methods also work for higher precision:
719
+
720
+ >>> mp.dps = 100; mp.pretty = True
721
+ >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
722
+ ('0.000999000499833375', '-4.96868310693356e-105')
723
+ >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
724
+ ('0.00990049833749168', '1.23032291513122e-104')
725
+
726
+ .. math ::
727
+
728
+ \bar{f}(p) = \frac{1}{p^2+1}
729
+
730
+ .. math ::
731
+
732
+ f(t) = \mathrm{J}_0(t)
733
+
734
+ >>> mp.dps = 15; mp.pretty = True
735
+ >>> fp = lambda p: 1/sqrt(p*p + 1)
736
+ >>> ft = lambda t: besselj(0,t)
737
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
738
+ (0.999999750000016, -6.09717765032273e-18)
739
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
740
+ (0.99997500015625, -5.61756281076169e-17)
741
+
742
+ .. math ::
743
+
744
+ \bar{f}(p) = \frac{\log p}{p}
745
+
746
+ .. math ::
747
+
748
+ f(t) = -\gamma -\log t
749
+
750
+ >>> mp.dps = 15; mp.pretty = True
751
+ >>> fp = lambda p: log(p)/p
752
+ >>> ft = lambda t: -euler-log(t)
753
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
754
+ (6.3305396140806, -1.92126634837863e-16)
755
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
756
+ (4.02795452108656, -4.81486093200704e-16)
757
+
758
+ **Options**
759
+
760
+ :func:`~mpmath.invertlaplace` recognizes the following optional
761
+ keywords valid for all methods:
762
+
763
+ *method*
764
+ Chooses numerical inverse Laplace transform algorithm
765
+ (described below).
766
+ *degree*
767
+ Number of terms used in the approximation
768
+
769
+ **Algorithms**
770
+
771
+ Mpmath implements four numerical inverse Laplace transform
772
+ algorithms, attributed to: Talbot, Stehfest, and de Hoog,
773
+ Knight and Stokes. These can be selected by using
774
+ *method='talbot'*, *method='stehfest'*, *method='dehoog'* or
775
+ *method='cohen'* or by passing the classes *method=FixedTalbot*,
776
+ *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
777
+ :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
778
+ :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
779
+ are also available as shortcuts.
780
+
781
+ All four algorithms implement a heuristic balance between the
782
+ requested precision and the precision used internally for the
783
+ calculations. This has been tuned for a typical exponentially
784
+ decaying function and precision up to few hundred decimal
785
+ digits.
786
+
787
+ The Laplace transform converts the variable time (i.e., along
788
+ a line) into a parameter given by the right half of the
789
+ complex `p`-plane. Singularities, poles, and branch cuts in
790
+ the complex `p`-plane contain all the information regarding
791
+ the time behavior of the corresponding function. Any numerical
792
+ method must therefore sample `p`-plane "close enough" to the
793
+ singularities to accurately characterize them, while not
794
+ getting too close to have catastrophic cancellation, overflow,
795
+ or underflow issues. Most significantly, if one or more of the
796
+ singularities in the `p`-plane is not on the left side of the
797
+ Bromwich contour, its effects will be left out of the computed
798
+ solution, and the answer will be completely wrong.
799
+
800
+ *Talbot*
801
+
802
+ The fixed Talbot method is high accuracy and fast, but the
803
+ method can catastrophically fail for certain classes of time-domain
804
+ behavior, including a Heaviside step function for positive
805
+ time (e.g., `H(t-2)`), or some oscillatory behaviors. The
806
+ Talbot method usually has adjustable parameters, but the
807
+ "fixed" variety implemented here does not. This method
808
+ deforms the Bromwich integral contour in the shape of a
809
+ parabola towards `-\infty`, which leads to problems
810
+ when the solution has a decaying exponential in it (e.g., a
811
+ Heaviside step function is equivalent to multiplying by a
812
+ decaying exponential in Laplace space).
813
+
814
+ *Stehfest*
815
+
816
+ The Stehfest algorithm only uses abscissa along the real axis
817
+ of the complex `p`-plane to estimate the time-domain
818
+ function. Oscillatory time-domain functions have poles away
819
+ from the real axis, so this method does not work well with
820
+ oscillatory functions, especially high-frequency ones. This
821
+ method also depends on summation of terms in a series that
822
+ grows very large, and will have catastrophic cancellation
823
+ during summation if the working precision is too low.
824
+
825
+ *de Hoog et al.*
826
+
827
+ The de Hoog, Knight, and Stokes method is essentially a
828
+ Fourier-series quadrature-type approximation to the Bromwich
829
+ contour integral, with non-linear series acceleration and an
830
+ analytical expression for the remainder term. This method is
831
+ typically one of the most robust. This method also involves the
832
+ greatest amount of overhead, so it is typically the slowest of the
833
+ four methods at high precision.
834
+
835
+ *Cohen*
836
+
837
+ The Cohen method is a trapezoidal rule approximation to the Bromwich
838
+ contour integral, with linear acceleration for alternating
839
+ series. This method is as robust as the de Hoog et al method and the
840
+ fastest of the four methods at high precision, and is therefore the
841
+ default method.
842
+
843
+ **Singularities**
844
+
845
+ All numerical inverse Laplace transform methods have problems
846
+ at large time when the Laplace-space function has poles,
847
+ singularities, or branch cuts to the right of the origin in
848
+ the complex plane. For simple poles in `\bar{f}(p)` at the
849
+ `p`-plane origin, the time function is constant in time (e.g.,
850
+ `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
851
+ `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
852
+ decreasing function of time (e.g., `\mathcal{L}\left\lbrace
853
+ e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
854
+ a pole to the right of the origin leads to an increasing
855
+ function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
856
+ \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
857
+ singularities occur off the real `p` axis, the time-domain
858
+ function is oscillatory. For example `\mathcal{L}\left\lbrace
859
+ \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
860
+ starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
861
+ function, This range of behaviors is illustrated in Duffy [3]
862
+ Figure 4.10.4, p. 228.
863
+
864
+ In general as `p \rightarrow \infty` `t \rightarrow 0` and
865
+ vice-versa. All numerical inverse Laplace transform methods
866
+ require their abscissa to shift closer to the origin for
867
+ larger times. If the abscissa shift left of the rightmost
868
+ singularity in the Laplace domain, the answer will be
869
+ completely wrong (the effect of singularities to the right of
870
+ the Bromwich contour are not included in the results).
871
+
872
+ For example, the following exponentially growing function has
873
+ a pole at `p=3`:
874
+
875
+ .. math ::
876
+
877
+ \bar{f}(p)=\frac{1}{p^2-9}
878
+
879
+ .. math ::
880
+
881
+ f(t)=\frac{1}{3}\sinh 3t
882
+
883
+ >>> mp.dps = 15; mp.pretty = True
884
+ >>> fp = lambda p: 1/(p*p-9)
885
+ >>> ft = lambda t: sinh(3*t)/3
886
+ >>> tt = [0.01,0.1,1.0,10.0]
887
+ >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
888
+ (0.0100015000675014, 0.0100015000675014)
889
+ >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
890
+ (0.101506764482381, 0.101506764482381)
891
+ >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
892
+ (3.33929164246997, 3.33929164246997)
893
+ >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
894
+ (1781079096920.74, -1.61331069624091e-14)
895
+
896
+ **References**
897
+
898
+ 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
899
+ 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
900
+ Inversion, Springer.
901
+ 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
902
+
903
+ **Numerical Inverse Laplace Transform Reviews**
904
+
905
+ 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
906
+ inversion of the Laplace transform: Applications to Biology,
907
+ Economics, Engineering, and Physics*. Elsevier.
908
+ 2. Davies, B., B. Martin (1979). Numerical inversion of the
909
+ Laplace transform: a survey and comparison of methods. *Journal
910
+ of Computational Physics* 33:1-32,
911
+ http://dx.doi.org/10.1016/0021-9991(79)90025-1
912
+ 3. Duffy, D.G. (1993). On the numerical inversion of Laplace
913
+ transforms: Comparison of three new methods on characteristic
914
+ problems from applications. *ACM Transactions on Mathematical
915
+ Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
916
+ 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
917
+ Algorithms for Laplace-Space Numerical Approaches, *Numerical
918
+ Algorithms*, 63(2):339-355.
919
+ http://dx.doi.org/10.1007/s11075-012-9625-3
920
+
921
+ """
922
+
923
+ rule = kwargs.get('method', 'cohen')
924
+ if type(rule) is str:
925
+ lrule = rule.lower()
926
+ if lrule == 'talbot':
927
+ rule = ctx._fixed_talbot
928
+ elif lrule == 'stehfest':
929
+ rule = ctx._stehfest
930
+ elif lrule == 'dehoog':
931
+ rule = ctx._de_hoog
932
+ elif rule == 'cohen':
933
+ rule = ctx._cohen
934
+ else:
935
+ raise ValueError("unknown invlap algorithm: %s" % rule)
936
+ else:
937
+ rule = rule(ctx)
938
+
939
+ # determine the vector of Laplace-space parameter
940
+ # needed for the requested method and desired time
941
+ rule.calc_laplace_parameter(t, **kwargs)
942
+
943
+ # compute the Laplace-space function evalutations
944
+ # at the required abscissa.
945
+ fp = [f(p) for p in rule.p]
946
+
947
+ # compute the time-domain solution from the
948
+ # Laplace-space function evaluations
949
+ return rule.calc_time_domain_solution(fp, t)
950
+
951
+ # shortcuts for the above function for specific methods
952
+ def invlaptalbot(ctx, *args, **kwargs):
953
+ kwargs['method'] = 'talbot'
954
+ return ctx.invertlaplace(*args, **kwargs)
955
+
956
+ def invlapstehfest(ctx, *args, **kwargs):
957
+ kwargs['method'] = 'stehfest'
958
+ return ctx.invertlaplace(*args, **kwargs)
959
+
960
+ def invlapdehoog(ctx, *args, **kwargs):
961
+ kwargs['method'] = 'dehoog'
962
+ return ctx.invertlaplace(*args, **kwargs)
963
+
964
+ def invlapcohen(ctx, *args, **kwargs):
965
+ kwargs['method'] = 'cohen'
966
+ return ctx.invertlaplace(*args, **kwargs)
967
+
968
+
969
+ # ****************************************
970
+
971
+ if __name__ == '__main__':
972
+ import doctest
973
+ doctest.testmod()
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/odes.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bisect import bisect
2
+ from ..libmp.backend import xrange
3
+
4
+ class ODEMethods(object):
5
+ pass
6
+
7
+ def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
8
+ h = tol = ctx.ldexp(1, -tol_prec)
9
+ dim = len(y0)
10
+ xs = [x0]
11
+ ys = [y0]
12
+ x = x0
13
+ y = y0
14
+ orig = ctx.prec
15
+ try:
16
+ ctx.prec = orig*(1+n)
17
+ # Use n steps with Euler's method to get
18
+ # evaluation points for derivatives
19
+ for i in range(n):
20
+ fxy = derivs(x, y)
21
+ y = [y[i]+h*fxy[i] for i in xrange(len(y))]
22
+ x += h
23
+ xs.append(x)
24
+ ys.append(y)
25
+ # Compute derivatives
26
+ ser = [[] for d in range(dim)]
27
+ for j in range(n+1):
28
+ s = [0]*dim
29
+ b = (-1) ** (j & 1)
30
+ k = 1
31
+ for i in range(j+1):
32
+ for d in range(dim):
33
+ s[d] += b * ys[i][d]
34
+ b = (b * (j-k+1)) // (-k)
35
+ k += 1
36
+ scale = h**(-j) / ctx.fac(j)
37
+ for d in range(dim):
38
+ s[d] = s[d] * scale
39
+ ser[d].append(s[d])
40
+ finally:
41
+ ctx.prec = orig
42
+ # Estimate radius for which we can get full accuracy.
43
+ # XXX: do this right for zeros
44
+ radius = ctx.one
45
+ for ts in ser:
46
+ if ts[-1]:
47
+ radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
48
+ radius /= 2 # XXX
49
+ return ser, x0+radius
50
+
51
+ def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
52
+ r"""
53
+ Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
54
+ that is a numerical solution of the `n+1`-dimensional first-order
55
+ ordinary differential equation (ODE) system
56
+
57
+ .. math ::
58
+
59
+ y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
60
+
61
+ y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
62
+
63
+ \vdots
64
+
65
+ y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
66
+
67
+ The derivatives are specified by the vector-valued function
68
+ *F* that evaluates
69
+ `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
70
+ The initial point `x_0` is specified by the scalar argument *x0*,
71
+ and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
72
+ specified by the vector argument *y0*.
73
+
74
+ For convenience, if the system is one-dimensional, you may optionally
75
+ provide just a scalar value for *y0*. In this case, *F* should accept
76
+ a scalar *y* argument and return a scalar. The solution function
77
+ *y* will return scalar values instead of length-1 vectors.
78
+
79
+ Evaluation of the solution function `y(x)` is permitted
80
+ for any `x \ge x_0`.
81
+
82
+ A high-order ODE can be solved by transforming it into first-order
83
+ vector form. This transformation is described in standard texts
84
+ on ODEs. Examples will also be given below.
85
+
86
+ **Options, speed and accuracy**
87
+
88
+ By default, :func:`~mpmath.odefun` uses a high-order Taylor series
89
+ method. For reasonably well-behaved problems, the solution will
90
+ be fully accurate to within the working precision. Note that
91
+ *F* must be possible to evaluate to very high precision
92
+ for the generation of Taylor series to work.
93
+
94
+ To get a faster but less accurate solution, you can set a large
95
+ value for *tol* (which defaults roughly to *eps*). If you just
96
+ want to plot the solution or perform a basic simulation,
97
+ *tol = 0.01* is likely sufficient.
98
+
99
+ The *degree* argument controls the degree of the solver (with
100
+ *method='taylor'*, this is the degree of the Taylor series
101
+ expansion). A higher degree means that a longer step can be taken
102
+ before a new local solution must be generated from *F*,
103
+ meaning that fewer steps are required to get from `x_0` to a given
104
+ `x_1`. On the other hand, a higher degree also means that each
105
+ local solution becomes more expensive (i.e., more evaluations of
106
+ *F* are required per step, and at higher precision).
107
+
108
+ The optimal setting therefore involves a tradeoff. Generally,
109
+ decreasing the *degree* for Taylor series is likely to give faster
110
+ solution at low precision, while increasing is likely to be better
111
+ at higher precision.
112
+
113
+ The function
114
+ object returned by :func:`~mpmath.odefun` caches the solutions at all step
115
+ points and uses polynomial interpolation between step points.
116
+ Therefore, once `y(x_1)` has been evaluated for some `x_1`,
117
+ `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
118
+ and continuing the evaluation up to `x_2 > x_1` is also fast.
119
+
120
+ **Examples of first-order ODEs**
121
+
122
+ We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
123
+ which has explicit solution `y(x) = \exp(x)`::
124
+
125
+ >>> from mpmath import *
126
+ >>> mp.dps = 15; mp.pretty = True
127
+ >>> f = odefun(lambda x, y: y, 0, 1)
128
+ >>> for x in [0, 1, 2.5]:
129
+ ... print((f(x), exp(x)))
130
+ ...
131
+ (1.0, 1.0)
132
+ (2.71828182845905, 2.71828182845905)
133
+ (12.1824939607035, 12.1824939607035)
134
+
135
+ The solution with high precision::
136
+
137
+ >>> mp.dps = 50
138
+ >>> f = odefun(lambda x, y: y, 0, 1)
139
+ >>> f(1)
140
+ 2.7182818284590452353602874713526624977572470937
141
+ >>> exp(1)
142
+ 2.7182818284590452353602874713526624977572470937
143
+
144
+ Using the more general vectorized form, the test problem
145
+ can be input as (note that *f* returns a 1-element vector)::
146
+
147
+ >>> mp.dps = 15
148
+ >>> f = odefun(lambda x, y: [y[0]], 0, [1])
149
+ >>> f(1)
150
+ [2.71828182845905]
151
+
152
+ :func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
153
+ impossible (and at best difficult) to solve analytically. As
154
+ an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
155
+ for `y(0) = \pi/2`. An exact solution happens to be known
156
+ for this problem, and is given by
157
+ `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
158
+
159
+ >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
160
+ >>> for x in [2, 5, 10]:
161
+ ... print((f(x), 2*atan(exp(mpf(x)**2/2))))
162
+ ...
163
+ (2.87255666284091, 2.87255666284091)
164
+ (3.14158520028345, 3.14158520028345)
165
+ (3.14159265358979, 3.14159265358979)
166
+
167
+ If `F` is independent of `y`, an ODE can be solved using direct
168
+ integration. We can therefore obtain a reference solution with
169
+ :func:`~mpmath.quad`::
170
+
171
+ >>> f = lambda x: (1+x**2)/(1+x**3)
172
+ >>> g = odefun(lambda x, y: f(x), pi, 0)
173
+ >>> g(2*pi)
174
+ 0.72128263801696
175
+ >>> quad(f, [pi, 2*pi])
176
+ 0.72128263801696
177
+
178
+ **Examples of second-order ODEs**
179
+
180
+ We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
181
+ To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
182
+ whereby the original equation can be written as `y_1' + y_0' = 0`. Put
183
+ together, we get the first-order, two-dimensional vector ODE
184
+
185
+ .. math ::
186
+
187
+ \begin{cases}
188
+ y_0' = y_1 \\
189
+ y_1' = -y_0
190
+ \end{cases}
191
+
192
+ To get a well-defined IVP, we need two initial values. With
193
+ `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
194
+ course be solved by `y(x) = y_0(x) = \cos(x)` and
195
+ `-y'(x) = y_1(x) = \sin(x)`. We check this::
196
+
197
+ >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
198
+ >>> for x in [0, 1, 2.5, 10]:
199
+ ... nprint(f(x), 15)
200
+ ... nprint([cos(x), sin(x)], 15)
201
+ ... print("---")
202
+ ...
203
+ [1.0, 0.0]
204
+ [1.0, 0.0]
205
+ ---
206
+ [0.54030230586814, 0.841470984807897]
207
+ [0.54030230586814, 0.841470984807897]
208
+ ---
209
+ [-0.801143615546934, 0.598472144103957]
210
+ [-0.801143615546934, 0.598472144103957]
211
+ ---
212
+ [-0.839071529076452, -0.54402111088937]
213
+ [-0.839071529076452, -0.54402111088937]
214
+ ---
215
+
216
+ Note that we get both the sine and the cosine solutions
217
+ simultaneously.
218
+
219
+ **TODO**
220
+
221
+ * Better automatic choice of degree and step size
222
+ * Make determination of Taylor series convergence radius
223
+ more robust
224
+ * Allow solution for `x < x_0`
225
+ * Allow solution for complex `x`
226
+ * Test for difficult (ill-conditioned) problems
227
+ * Implement Runge-Kutta and other algorithms
228
+
229
+ """
230
+ if tol:
231
+ tol_prec = int(-ctx.log(tol, 2))+10
232
+ else:
233
+ tol_prec = ctx.prec+10
234
+ degree = degree or (3 + int(3*ctx.dps/2.))
235
+ workprec = ctx.prec + 40
236
+ try:
237
+ len(y0)
238
+ return_vector = True
239
+ except TypeError:
240
+ F_ = F
241
+ F = lambda x, y: [F_(x, y[0])]
242
+ y0 = [y0]
243
+ return_vector = False
244
+ ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
245
+ series_boundaries = [x0, xb]
246
+ series_data = [(ser, x0, xb)]
247
+ # We will be working with vectors of Taylor series
248
+ def mpolyval(ser, a):
249
+ return [ctx.polyval(s[::-1], a) for s in ser]
250
+ # Find nearest expansion point; compute if necessary
251
+ def get_series(x):
252
+ if x < x0:
253
+ raise ValueError
254
+ n = bisect(series_boundaries, x)
255
+ if n < len(series_boundaries):
256
+ return series_data[n-1]
257
+ while 1:
258
+ ser, xa, xb = series_data[-1]
259
+ if verbose:
260
+ print("Computing Taylor series for [%f, %f]" % (xa, xb))
261
+ y = mpolyval(ser, xb-xa)
262
+ xa = xb
263
+ ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
264
+ series_boundaries.append(xb)
265
+ series_data.append((ser, xa, xb))
266
+ if x <= xb:
267
+ return series_data[-1]
268
+ # Evaluation function
269
+ def interpolant(x):
270
+ x = ctx.convert(x)
271
+ orig = ctx.prec
272
+ try:
273
+ ctx.prec = workprec
274
+ ser, xa, xb = get_series(x)
275
+ y = mpolyval(ser, x-xa)
276
+ finally:
277
+ ctx.prec = orig
278
+ if return_vector:
279
+ return [+yk for yk in y]
280
+ else:
281
+ return +y[0]
282
+ return interpolant
283
+
284
+ ODEMethods.odefun = odefun
285
+
286
+ if __name__ == "__main__":
287
+ import doctest
288
+ doctest.testmod()
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/optimization.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+
3
+ from copy import copy
4
+
5
+ from ..libmp.backend import xrange
6
+
7
+ class OptimizationMethods(object):
8
+ def __init__(ctx):
9
+ pass
10
+
11
+ ##############
12
+ # 1D-SOLVERS #
13
+ ##############
14
+
15
+ class Newton:
16
+ """
17
+ 1d-solver generating pairs of approximative root and error.
18
+
19
+ Needs starting points x0 close to the root.
20
+
21
+ Pro:
22
+
23
+ * converges fast
24
+ * sometimes more robust than secant with bad second starting point
25
+
26
+ Contra:
27
+
28
+ * converges slowly for multiple roots
29
+ * needs first derivative
30
+ * 2 function evaluations per iteration
31
+ """
32
+ maxsteps = 20
33
+
34
+ def __init__(self, ctx, f, x0, **kwargs):
35
+ self.ctx = ctx
36
+ if len(x0) == 1:
37
+ self.x0 = x0[0]
38
+ else:
39
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
40
+ self.f = f
41
+ if not 'df' in kwargs:
42
+ def df(x):
43
+ return self.ctx.diff(f, x)
44
+ else:
45
+ df = kwargs['df']
46
+ self.df = df
47
+
48
+ def __iter__(self):
49
+ f = self.f
50
+ df = self.df
51
+ x0 = self.x0
52
+ while True:
53
+ x1 = x0 - f(x0) / df(x0)
54
+ error = abs(x1 - x0)
55
+ x0 = x1
56
+ yield (x1, error)
57
+
58
+ class Secant:
59
+ """
60
+ 1d-solver generating pairs of approximative root and error.
61
+
62
+ Needs starting points x0 and x1 close to the root.
63
+ x1 defaults to x0 + 0.25.
64
+
65
+ Pro:
66
+
67
+ * converges fast
68
+
69
+ Contra:
70
+
71
+ * converges slowly for multiple roots
72
+ """
73
+ maxsteps = 30
74
+
75
+ def __init__(self, ctx, f, x0, **kwargs):
76
+ self.ctx = ctx
77
+ if len(x0) == 1:
78
+ self.x0 = x0[0]
79
+ self.x1 = self.x0 + 0.25
80
+ elif len(x0) == 2:
81
+ self.x0 = x0[0]
82
+ self.x1 = x0[1]
83
+ else:
84
+ raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
85
+ self.f = f
86
+
87
+ def __iter__(self):
88
+ f = self.f
89
+ x0 = self.x0
90
+ x1 = self.x1
91
+ f0 = f(x0)
92
+ while True:
93
+ f1 = f(x1)
94
+ l = x1 - x0
95
+ if not l:
96
+ break
97
+ s = (f1 - f0) / l
98
+ if not s:
99
+ break
100
+ x0, x1 = x1, x1 - f1/s
101
+ f0 = f1
102
+ yield x1, abs(l)
103
+
104
+ class MNewton:
105
+ """
106
+ 1d-solver generating pairs of approximative root and error.
107
+
108
+ Needs starting point x0 close to the root.
109
+ Uses modified Newton's method that converges fast regardless of the
110
+ multiplicity of the root.
111
+
112
+ Pro:
113
+
114
+ * converges fast for multiple roots
115
+
116
+ Contra:
117
+
118
+ * needs first and second derivative of f
119
+ * 3 function evaluations per iteration
120
+ """
121
+ maxsteps = 20
122
+
123
+ def __init__(self, ctx, f, x0, **kwargs):
124
+ self.ctx = ctx
125
+ if not len(x0) == 1:
126
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
127
+ self.x0 = x0[0]
128
+ self.f = f
129
+ if not 'df' in kwargs:
130
+ def df(x):
131
+ return self.ctx.diff(f, x)
132
+ else:
133
+ df = kwargs['df']
134
+ self.df = df
135
+ if not 'd2f' in kwargs:
136
+ def d2f(x):
137
+ return self.ctx.diff(df, x)
138
+ else:
139
+ d2f = kwargs['df']
140
+ self.d2f = d2f
141
+
142
+ def __iter__(self):
143
+ x = self.x0
144
+ f = self.f
145
+ df = self.df
146
+ d2f = self.d2f
147
+ while True:
148
+ prevx = x
149
+ fx = f(x)
150
+ if fx == 0:
151
+ break
152
+ dfx = df(x)
153
+ d2fx = d2f(x)
154
+ # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
155
+ x -= fx / (dfx - fx * d2fx / dfx)
156
+ error = abs(x - prevx)
157
+ yield x, error
158
+
159
+ class Halley:
160
+ """
161
+ 1d-solver generating pairs of approximative root and error.
162
+
163
+ Needs a starting point x0 close to the root.
164
+ Uses Halley's method with cubic convergence rate.
165
+
166
+ Pro:
167
+
168
+ * converges even faster the Newton's method
169
+ * useful when computing with *many* digits
170
+
171
+ Contra:
172
+
173
+ * needs first and second derivative of f
174
+ * 3 function evaluations per iteration
175
+ * converges slowly for multiple roots
176
+ """
177
+
178
+ maxsteps = 20
179
+
180
+ def __init__(self, ctx, f, x0, **kwargs):
181
+ self.ctx = ctx
182
+ if not len(x0) == 1:
183
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
184
+ self.x0 = x0[0]
185
+ self.f = f
186
+ if not 'df' in kwargs:
187
+ def df(x):
188
+ return self.ctx.diff(f, x)
189
+ else:
190
+ df = kwargs['df']
191
+ self.df = df
192
+ if not 'd2f' in kwargs:
193
+ def d2f(x):
194
+ return self.ctx.diff(df, x)
195
+ else:
196
+ d2f = kwargs['df']
197
+ self.d2f = d2f
198
+
199
+ def __iter__(self):
200
+ x = self.x0
201
+ f = self.f
202
+ df = self.df
203
+ d2f = self.d2f
204
+ while True:
205
+ prevx = x
206
+ fx = f(x)
207
+ dfx = df(x)
208
+ d2fx = d2f(x)
209
+ x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
210
+ error = abs(x - prevx)
211
+ yield x, error
212
+
213
+ class Muller:
214
+ """
215
+ 1d-solver generating pairs of approximative root and error.
216
+
217
+ Needs starting points x0, x1 and x2 close to the root.
218
+ x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
219
+ Uses Muller's method that converges towards complex roots.
220
+
221
+ Pro:
222
+
223
+ * converges fast (somewhat faster than secant)
224
+ * can find complex roots
225
+
226
+ Contra:
227
+
228
+ * converges slowly for multiple roots
229
+ * may have complex values for real starting points and real roots
230
+
231
+ http://en.wikipedia.org/wiki/Muller's_method
232
+ """
233
+ maxsteps = 30
234
+
235
+ def __init__(self, ctx, f, x0, **kwargs):
236
+ self.ctx = ctx
237
+ if len(x0) == 1:
238
+ self.x0 = x0[0]
239
+ self.x1 = self.x0 + 0.25
240
+ self.x2 = self.x1 + 0.25
241
+ elif len(x0) == 2:
242
+ self.x0 = x0[0]
243
+ self.x1 = x0[1]
244
+ self.x2 = self.x1 + 0.25
245
+ elif len(x0) == 3:
246
+ self.x0 = x0[0]
247
+ self.x1 = x0[1]
248
+ self.x2 = x0[2]
249
+ else:
250
+ raise ValueError('expected 1, 2 or 3 starting points, got %i'
251
+ % len(x0))
252
+ self.f = f
253
+ self.verbose = kwargs['verbose']
254
+
255
+ def __iter__(self):
256
+ f = self.f
257
+ x0 = self.x0
258
+ x1 = self.x1
259
+ x2 = self.x2
260
+ fx0 = f(x0)
261
+ fx1 = f(x1)
262
+ fx2 = f(x2)
263
+ while True:
264
+ # TODO: maybe refactoring with function for divided differences
265
+ # calculate divided differences
266
+ fx2x1 = (fx1 - fx2) / (x1 - x2)
267
+ fx2x0 = (fx0 - fx2) / (x0 - x2)
268
+ fx1x0 = (fx0 - fx1) / (x0 - x1)
269
+ w = fx2x1 + fx2x0 - fx1x0
270
+ fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
271
+ if w == 0 and fx2x1x0 == 0:
272
+ if self.verbose:
273
+ print('canceled with')
274
+ print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
275
+ break
276
+ x0 = x1
277
+ fx0 = fx1
278
+ x1 = x2
279
+ fx1 = fx2
280
+ # denominator should be as large as possible => choose sign
281
+ r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
282
+ if abs(w - r) > abs(w + r):
283
+ r = -r
284
+ x2 -= 2*fx2 / (w + r)
285
+ fx2 = f(x2)
286
+ error = abs(x2 - x1)
287
+ yield x2, error
288
+
289
+ # TODO: consider raising a ValueError when there's no sign change in a and b
290
+ class Bisection:
291
+ """
292
+ 1d-solver generating pairs of approximative root and error.
293
+
294
+ Uses bisection method to find a root of f in [a, b].
295
+ Might fail for multiple roots (needs sign change).
296
+
297
+ Pro:
298
+
299
+ * robust and reliable
300
+
301
+ Contra:
302
+
303
+ * converges slowly
304
+ * needs sign change
305
+ """
306
+ maxsteps = 100
307
+
308
+ def __init__(self, ctx, f, x0, **kwargs):
309
+ self.ctx = ctx
310
+ if len(x0) != 2:
311
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
312
+ self.f = f
313
+ self.a = x0[0]
314
+ self.b = x0[1]
315
+
316
+ def __iter__(self):
317
+ f = self.f
318
+ a = self.a
319
+ b = self.b
320
+ l = b - a
321
+ fb = f(b)
322
+ while True:
323
+ m = self.ctx.ldexp(a + b, -1)
324
+ fm = f(m)
325
+ sign = fm * fb
326
+ if sign < 0:
327
+ a = m
328
+ elif sign > 0:
329
+ b = m
330
+ fb = fm
331
+ else:
332
+ yield m, self.ctx.zero
333
+ l /= 2
334
+ yield (a + b)/2, abs(l)
335
+
336
+ def _getm(method):
337
+ """
338
+ Return a function to calculate m for Illinois-like methods.
339
+ """
340
+ if method == 'illinois':
341
+ def getm(fz, fb):
342
+ return 0.5
343
+ elif method == 'pegasus':
344
+ def getm(fz, fb):
345
+ return fb/(fb + fz)
346
+ elif method == 'anderson':
347
+ def getm(fz, fb):
348
+ m = 1 - fz/fb
349
+ if m > 0:
350
+ return m
351
+ else:
352
+ return 0.5
353
+ else:
354
+ raise ValueError("method '%s' not recognized" % method)
355
+ return getm
356
+
357
+ class Illinois:
358
+ """
359
+ 1d-solver generating pairs of approximative root and error.
360
+
361
+ Uses Illinois method or similar to find a root of f in [a, b].
362
+ Might fail for multiple roots (needs sign change).
363
+ Combines bisect with secant (improved regula falsi).
364
+
365
+ The only difference between the methods is the scaling factor m, which is
366
+ used to ensure convergence (you can choose one using the 'method' keyword):
367
+
368
+ Illinois method ('illinois'):
369
+ m = 0.5
370
+
371
+ Pegasus method ('pegasus'):
372
+ m = fb/(fb + fz)
373
+
374
+ Anderson-Bjoerk method ('anderson'):
375
+ m = 1 - fz/fb if positive else 0.5
376
+
377
+ Pro:
378
+
379
+ * converges very fast
380
+
381
+ Contra:
382
+
383
+ * has problems with multiple roots
384
+ * needs sign change
385
+ """
386
+ maxsteps = 30
387
+
388
+ def __init__(self, ctx, f, x0, **kwargs):
389
+ self.ctx = ctx
390
+ if len(x0) != 2:
391
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
392
+ self.a = x0[0]
393
+ self.b = x0[1]
394
+ self.f = f
395
+ self.tol = kwargs['tol']
396
+ self.verbose = kwargs['verbose']
397
+ self.method = kwargs.get('method', 'illinois')
398
+ self.getm = _getm(self.method)
399
+ if self.verbose:
400
+ print('using %s method' % self.method)
401
+
402
+ def __iter__(self):
403
+ method = self.method
404
+ f = self.f
405
+ a = self.a
406
+ b = self.b
407
+ fa = f(a)
408
+ fb = f(b)
409
+ m = None
410
+ while True:
411
+ l = b - a
412
+ if l == 0:
413
+ break
414
+ s = (fb - fa) / l
415
+ z = a - fa/s
416
+ fz = f(z)
417
+ if abs(fz) < self.tol:
418
+ # TODO: better condition (when f is very flat)
419
+ if self.verbose:
420
+ print('canceled with z =', z)
421
+ yield z, l
422
+ break
423
+ if fz * fb < 0: # root in [z, b]
424
+ a = b
425
+ fa = fb
426
+ b = z
427
+ fb = fz
428
+ else: # root in [a, z]
429
+ m = self.getm(fz, fb)
430
+ b = z
431
+ fb = fz
432
+ fa = m*fa # scale down to ensure convergence
433
+ if self.verbose and m and not method == 'illinois':
434
+ print('m:', m)
435
+ yield (a + b)/2, abs(l)
436
+
437
+ def Pegasus(*args, **kwargs):
438
+ """
439
+ 1d-solver generating pairs of approximative root and error.
440
+
441
+ Uses Pegasus method to find a root of f in [a, b].
442
+ Wrapper for illinois to use method='pegasus'.
443
+ """
444
+ kwargs['method'] = 'pegasus'
445
+ return Illinois(*args, **kwargs)
446
+
447
+ def Anderson(*args, **kwargs):
448
+ """
449
+ 1d-solver generating pairs of approximative root and error.
450
+
451
+ Uses Anderson-Bjoerk method to find a root of f in [a, b].
452
+ Wrapper for illinois to use method='pegasus'.
453
+ """
454
+ kwargs['method'] = 'anderson'
455
+ return Illinois(*args, **kwargs)
456
+
457
+ # TODO: check whether it's possible to combine it with Illinois stuff
458
+ class Ridder:
459
+ """
460
+ 1d-solver generating pairs of approximative root and error.
461
+
462
+ Ridders' method to find a root of f in [a, b].
463
+ Is told to perform as well as Brent's method while being simpler.
464
+
465
+ Pro:
466
+
467
+ * very fast
468
+ * simpler than Brent's method
469
+
470
+ Contra:
471
+
472
+ * two function evaluations per step
473
+ * has problems with multiple roots
474
+ * needs sign change
475
+
476
+ http://en.wikipedia.org/wiki/Ridders'_method
477
+ """
478
+ maxsteps = 30
479
+
480
+ def __init__(self, ctx, f, x0, **kwargs):
481
+ self.ctx = ctx
482
+ self.f = f
483
+ if len(x0) != 2:
484
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
485
+ self.x1 = x0[0]
486
+ self.x2 = x0[1]
487
+ self.verbose = kwargs['verbose']
488
+ self.tol = kwargs['tol']
489
+
490
+ def __iter__(self):
491
+ ctx = self.ctx
492
+ f = self.f
493
+ x1 = self.x1
494
+ fx1 = f(x1)
495
+ x2 = self.x2
496
+ fx2 = f(x2)
497
+ while True:
498
+ x3 = 0.5*(x1 + x2)
499
+ fx3 = f(x3)
500
+ x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
501
+ fx4 = f(x4)
502
+ if abs(fx4) < self.tol:
503
+ # TODO: better condition (when f is very flat)
504
+ if self.verbose:
505
+ print('canceled with f(x4) =', fx4)
506
+ yield x4, abs(x1 - x2)
507
+ break
508
+ if fx4 * fx2 < 0: # root in [x4, x2]
509
+ x1 = x4
510
+ fx1 = fx4
511
+ else: # root in [x1, x4]
512
+ x2 = x4
513
+ fx2 = fx4
514
+ error = abs(x1 - x2)
515
+ yield (x1 + x2)/2, error
516
+
517
+ class ANewton:
518
+ """
519
+ EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
520
+
521
+ Uses Newton's method modified to use Steffensens method when convergence is
522
+ slow. (I.e. for multiple roots.)
523
+ """
524
+ maxsteps = 20
525
+
526
+ def __init__(self, ctx, f, x0, **kwargs):
527
+ self.ctx = ctx
528
+ if not len(x0) == 1:
529
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
530
+ self.x0 = x0[0]
531
+ self.f = f
532
+ if not 'df' in kwargs:
533
+ def df(x):
534
+ return self.ctx.diff(f, x)
535
+ else:
536
+ df = kwargs['df']
537
+ self.df = df
538
+ def phi(x):
539
+ return x - f(x) / df(x)
540
+ self.phi = phi
541
+ self.verbose = kwargs['verbose']
542
+
543
+ def __iter__(self):
544
+ x0 = self.x0
545
+ f = self.f
546
+ df = self.df
547
+ phi = self.phi
548
+ error = 0
549
+ counter = 0
550
+ while True:
551
+ prevx = x0
552
+ try:
553
+ x0 = phi(x0)
554
+ except ZeroDivisionError:
555
+ if self.verbose:
556
+ print('ZeroDivisionError: canceled with x =', x0)
557
+ break
558
+ preverror = error
559
+ error = abs(prevx - x0)
560
+ # TODO: decide not to use convergence acceleration
561
+ if error and abs(error - preverror) / error < 1:
562
+ if self.verbose:
563
+ print('converging slowly')
564
+ counter += 1
565
+ if counter >= 3:
566
+ # accelerate convergence
567
+ phi = steffensen(phi)
568
+ counter = 0
569
+ if self.verbose:
570
+ print('accelerating convergence')
571
+ yield x0, error
572
+
573
+ # TODO: add Brent
574
+
575
+ ############################
576
+ # MULTIDIMENSIONAL SOLVERS #
577
+ ############################
578
+
579
+ def jacobian(ctx, f, x):
580
+ """
581
+ Calculate the Jacobian matrix of a function at the point x0.
582
+
583
+ This is the first derivative of a vectorial function:
584
+
585
+ f : R^m -> R^n with m >= n
586
+ """
587
+ x = ctx.matrix(x)
588
+ h = ctx.sqrt(ctx.eps)
589
+ fx = ctx.matrix(f(*x))
590
+ m = len(fx)
591
+ n = len(x)
592
+ J = ctx.matrix(m, n)
593
+ for j in xrange(n):
594
+ xj = x.copy()
595
+ xj[j] += h
596
+ Jj = (ctx.matrix(f(*xj)) - fx) / h
597
+ for i in xrange(m):
598
+ J[i,j] = Jj[i]
599
+ return J
600
+
601
+ # TODO: test with user-specified jacobian matrix
602
+ class MDNewton:
603
+ """
604
+ Find the root of a vector function numerically using Newton's method.
605
+
606
+ f is a vector function representing a nonlinear equation system.
607
+
608
+ x0 is the starting point close to the root.
609
+
610
+ J is a function returning the Jacobian matrix for a point.
611
+
612
+ Supports overdetermined systems.
613
+
614
+ Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
615
+ The function to calculate the Jacobian matrix can be given using the
616
+ keyword 'J'. Otherwise it will be calculated numerically.
617
+
618
+ Please note that this method converges only locally. Especially for high-
619
+ dimensional systems it is not trivial to find a good starting point being
620
+ close enough to the root.
621
+
622
+ It is recommended to use a faster, low-precision solver from SciPy [1] or
623
+ OpenOpt [2] to get an initial guess. Afterwards you can use this method for
624
+ root-polishing to any precision.
625
+
626
+ [1] http://scipy.org
627
+
628
+ [2] http://openopt.org/Welcome
629
+ """
630
+ maxsteps = 10
631
+
632
+ def __init__(self, ctx, f, x0, **kwargs):
633
+ self.ctx = ctx
634
+ self.f = f
635
+ if isinstance(x0, (tuple, list)):
636
+ x0 = ctx.matrix(x0)
637
+ assert x0.cols == 1, 'need a vector'
638
+ self.x0 = x0
639
+ if 'J' in kwargs:
640
+ self.J = kwargs['J']
641
+ else:
642
+ def J(*x):
643
+ return ctx.jacobian(f, x)
644
+ self.J = J
645
+ self.norm = kwargs['norm']
646
+ self.verbose = kwargs['verbose']
647
+
648
+ def __iter__(self):
649
+ f = self.f
650
+ x0 = self.x0
651
+ norm = self.norm
652
+ J = self.J
653
+ fx = self.ctx.matrix(f(*x0))
654
+ fxnorm = norm(fx)
655
+ cancel = False
656
+ while not cancel:
657
+ # get direction of descent
658
+ fxn = -fx
659
+ Jx = J(*x0)
660
+ s = self.ctx.lu_solve(Jx, fxn)
661
+ if self.verbose:
662
+ print('Jx:')
663
+ print(Jx)
664
+ print('s:', s)
665
+ # damping step size TODO: better strategy (hard task)
666
+ l = self.ctx.one
667
+ x1 = x0 + s
668
+ while True:
669
+ if x1 == x0:
670
+ if self.verbose:
671
+ print("canceled, won't get more excact")
672
+ cancel = True
673
+ break
674
+ fx = self.ctx.matrix(f(*x1))
675
+ newnorm = norm(fx)
676
+ if newnorm < fxnorm:
677
+ # new x accepted
678
+ fxnorm = newnorm
679
+ x0 = x1
680
+ break
681
+ l /= 2
682
+ x1 = x0 + l*s
683
+ yield (x0, fxnorm)
684
+
685
+ #############
686
+ # UTILITIES #
687
+ #############
688
+
689
+ str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
690
+ 'halley':Halley, 'muller':Muller, 'bisect':Bisection,
691
+ 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
692
+ 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
693
+
694
+ def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
695
+ r"""
696
+ Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
697
+ interval for *x*.
698
+
699
+ Multidimensional overdetermined systems are supported.
700
+ You can specify them using a function or a list of functions.
701
+
702
+ Mathematically speaking, this function returns `x` such that
703
+ `|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
704
+ If the computed value does not meet this criterion, an exception is raised.
705
+ This exception can be disabled with *verify=False*.
706
+
707
+ For interval arithmetic (``iv.findroot()``), please note that
708
+ the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
709
+ It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
710
+ regardless of numerical error. This may be improved in the future.
711
+
712
+ **Arguments**
713
+
714
+ *f*
715
+ one dimensional function
716
+ *x0*
717
+ starting point, several starting points or interval (depends on solver)
718
+ *tol*
719
+ the returned solution has an error smaller than this
720
+ *verbose*
721
+ print additional information for each iteration if true
722
+ *verify*
723
+ verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
724
+ *solver*
725
+ a generator for *f* and *x0* returning approximative solution and error
726
+ *maxsteps*
727
+ after how many steps the solver will cancel
728
+ *df*
729
+ first derivative of *f* (used by some solvers)
730
+ *d2f*
731
+ second derivative of *f* (used by some solvers)
732
+ *multidimensional*
733
+ force multidimensional solving
734
+ *J*
735
+ Jacobian matrix of *f* (used by multidimensional solvers)
736
+ *norm*
737
+ used vector norm (used by multidimensional solvers)
738
+
739
+ solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
740
+ yielding pairs of approximative solution and estimated error (which is
741
+ expected to be positive).
742
+ You can use the following string aliases:
743
+ 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
744
+ 'ridder', 'anewton', 'bisect'
745
+
746
+ See mpmath.calculus.optimization for their documentation.
747
+
748
+ **Examples**
749
+
750
+ The function :func:`~mpmath.findroot` locates a root of a given function using the
751
+ secant method by default. A simple example use of the secant method is to
752
+ compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
753
+
754
+ >>> from mpmath import *
755
+ >>> mp.dps = 30; mp.pretty = True
756
+ >>> findroot(sin, 3)
757
+ 3.14159265358979323846264338328
758
+
759
+ The secant method can be used to find complex roots of analytic functions,
760
+ although it must in that case generally be given a nonreal starting value
761
+ (or else it will never leave the real line)::
762
+
763
+ >>> mp.dps = 15
764
+ >>> findroot(lambda x: x**3 + 2*x + 1, j)
765
+ (0.226698825758202 + 1.46771150871022j)
766
+
767
+ A nice application is to compute nontrivial roots of the Riemann zeta
768
+ function with many digits (good initial values are needed for convergence)::
769
+
770
+ >>> mp.dps = 30
771
+ >>> findroot(zeta, 0.5+14j)
772
+ (0.5 + 14.1347251417346937904572519836j)
773
+
774
+ The secant method can also be used as an optimization algorithm, by passing
775
+ it a derivative of a function. The following example locates the positive
776
+ minimum of the gamma function::
777
+
778
+ >>> mp.dps = 20
779
+ >>> findroot(lambda x: diff(gamma, x), 1)
780
+ 1.4616321449683623413
781
+
782
+ Finally, a useful application is to compute inverse functions, such as the
783
+ Lambert W function which is the inverse of `w e^w`, given the first
784
+ term of the solution's asymptotic expansion as the initial value. In basic
785
+ cases, this gives identical results to mpmath's built-in ``lambertw``
786
+ function::
787
+
788
+ >>> def lambert(x):
789
+ ... return findroot(lambda w: w*exp(w) - x, log(1+x))
790
+ ...
791
+ >>> mp.dps = 15
792
+ >>> lambert(1); lambertw(1)
793
+ 0.567143290409784
794
+ 0.567143290409784
795
+ >>> lambert(1000); lambert(1000)
796
+ 5.2496028524016
797
+ 5.2496028524016
798
+
799
+ Multidimensional functions are also supported::
800
+
801
+ >>> f = [lambda x1, x2: x1**2 + x2,
802
+ ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
803
+ >>> findroot(f, (0, 0))
804
+ [-0.618033988749895]
805
+ [-0.381966011250105]
806
+ >>> findroot(f, (10, 10))
807
+ [ 1.61803398874989]
808
+ [-2.61803398874989]
809
+
810
+ You can verify this by solving the system manually.
811
+
812
+ Please note that the following (more general) syntax also works::
813
+
814
+ >>> def f(x1, x2):
815
+ ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
816
+ ...
817
+ >>> findroot(f, (0, 0))
818
+ [-0.618033988749895]
819
+ [-0.381966011250105]
820
+
821
+
822
+ **Multiple roots**
823
+
824
+ For multiple roots all methods of the Newtonian family (including secant)
825
+ converge slowly. Consider this example::
826
+
827
+ >>> f = lambda x: (x - 1)**99
828
+ >>> findroot(f, 0.9, verify=False)
829
+ 0.918073542444929
830
+
831
+ Even for a very close starting point the secant method converges very
832
+ slowly. Use ``verbose=True`` to illustrate this.
833
+
834
+ It is possible to modify Newton's method to make it converge regardless of
835
+ the root's multiplicity::
836
+
837
+ >>> findroot(f, -10, solver='mnewton')
838
+ 1.0
839
+
840
+ This variant uses the first and second derivative of the function, which is
841
+ not very efficient.
842
+
843
+ Alternatively you can use an experimental Newtonian solver that keeps track
844
+ of the speed of convergence and accelerates it using Steffensen's method if
845
+ necessary::
846
+
847
+ >>> findroot(f, -10, solver='anewton', verbose=True)
848
+ x: -9.88888888888888888889
849
+ error: 0.111111111111111111111
850
+ converging slowly
851
+ x: -9.77890011223344556678
852
+ error: 0.10998877665544332211
853
+ converging slowly
854
+ x: -9.67002233332199662166
855
+ error: 0.108877778911448945119
856
+ converging slowly
857
+ accelerating convergence
858
+ x: -9.5622443299551077669
859
+ error: 0.107778003366888854764
860
+ converging slowly
861
+ x: 0.99999999999999999214
862
+ error: 10.562244329955107759
863
+ x: 1.0
864
+ error: 7.8598304758094664213e-18
865
+ ZeroDivisionError: canceled with x = 1.0
866
+ 1.0
867
+
868
+ **Complex roots**
869
+
870
+ For complex roots it's recommended to use Muller's method as it converges
871
+ even for real starting points very fast::
872
+
873
+ >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
874
+ (0.727136084491197 + 0.934099289460529j)
875
+
876
+
877
+ **Intersection methods**
878
+
879
+ When you need to find a root in a known interval, it's highly recommended to
880
+ use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
881
+ Usually they converge faster and more reliable. They have however problems
882
+ with multiple roots and usually need a sign change to find a root::
883
+
884
+ >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
885
+ 0.0
886
+
887
+ Be careful with symmetric functions::
888
+
889
+ >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
890
+ Traceback (most recent call last):
891
+ ...
892
+ ZeroDivisionError
893
+
894
+ It fails even for better starting points, because there is no sign change::
895
+
896
+ >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
897
+ Traceback (most recent call last):
898
+ ...
899
+ ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
900
+ Try another starting point or tweak arguments.
901
+
902
+ """
903
+ prec = ctx.prec
904
+ try:
905
+ ctx.prec += 20
906
+
907
+ # initialize arguments
908
+ if tol is None:
909
+ tol = ctx.eps * 2**10
910
+
911
+ kwargs['verbose'] = kwargs.get('verbose', verbose)
912
+
913
+ if 'd1f' in kwargs:
914
+ kwargs['df'] = kwargs['d1f']
915
+
916
+ kwargs['tol'] = tol
917
+ if isinstance(x0, (list, tuple)):
918
+ x0 = [ctx.convert(x) for x in x0]
919
+ else:
920
+ x0 = [ctx.convert(x0)]
921
+
922
+ if isinstance(solver, str):
923
+ try:
924
+ solver = str2solver[solver]
925
+ except KeyError:
926
+ raise ValueError('could not recognize solver')
927
+
928
+ # accept list of functions
929
+ if isinstance(f, (list, tuple)):
930
+ f2 = copy(f)
931
+ def tmp(*args):
932
+ return [fn(*args) for fn in f2]
933
+ f = tmp
934
+
935
+ # detect multidimensional functions
936
+ try:
937
+ fx = f(*x0)
938
+ multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
939
+ except TypeError:
940
+ fx = f(x0[0])
941
+ multidimensional = False
942
+ if 'multidimensional' in kwargs:
943
+ multidimensional = kwargs['multidimensional']
944
+ if multidimensional:
945
+ # only one multidimensional solver available at the moment
946
+ solver = MDNewton
947
+ if not 'norm' in kwargs:
948
+ norm = lambda x: ctx.norm(x, 'inf')
949
+ kwargs['norm'] = norm
950
+ else:
951
+ norm = kwargs['norm']
952
+ else:
953
+ norm = abs
954
+
955
+ # happily return starting point if it's a root
956
+ if norm(fx) == 0:
957
+ if multidimensional:
958
+ return ctx.matrix(x0)
959
+ else:
960
+ return x0[0]
961
+
962
+ # use solver
963
+ iterations = solver(ctx, f, x0, **kwargs)
964
+ if 'maxsteps' in kwargs:
965
+ maxsteps = kwargs['maxsteps']
966
+ else:
967
+ maxsteps = iterations.maxsteps
968
+ i = 0
969
+ for x, error in iterations:
970
+ if verbose:
971
+ print('x: ', x)
972
+ print('error:', error)
973
+ i += 1
974
+ if error < tol * max(1, norm(x)) or i >= maxsteps:
975
+ break
976
+ else:
977
+ if not i:
978
+ raise ValueError('Could not find root using the given solver.\n'
979
+ 'Try another starting point or tweak arguments.')
980
+ if not isinstance(x, (list, tuple, ctx.matrix)):
981
+ xl = [x]
982
+ else:
983
+ xl = x
984
+ if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
985
+ raise ValueError('Could not find root within given tolerance. '
986
+ '(%s > %s)\n'
987
+ 'Try another starting point or tweak arguments.'
988
+ % (norm(f(*xl))**2, tol))
989
+ return x
990
+ finally:
991
+ ctx.prec = prec
992
+
993
+
994
+ def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
995
+ """
996
+ Return the multiplicity of a given root of f.
997
+
998
+ Internally, numerical derivatives are used. This might be inefficient for
999
+ higher order derviatives. Due to this, ``multiplicity`` cancels after
1000
+ evaluating 10 derivatives by default. You can be specify the n-th derivative
1001
+ using the dnf keyword.
1002
+
1003
+ >>> from mpmath import *
1004
+ >>> multiplicity(lambda x: sin(x) - 1, pi/2)
1005
+ 2
1006
+
1007
+ """
1008
+ if tol is None:
1009
+ tol = ctx.eps ** 0.8
1010
+ kwargs['d0f'] = f
1011
+ for i in xrange(maxsteps):
1012
+ dfstr = 'd' + str(i) + 'f'
1013
+ if dfstr in kwargs:
1014
+ df = kwargs[dfstr]
1015
+ else:
1016
+ df = lambda x: ctx.diff(f, x, i)
1017
+ if not abs(df(root)) < tol:
1018
+ break
1019
+ return i
1020
+
1021
+ def steffensen(f):
1022
+ """
1023
+ linear convergent function -> quadratic convergent function
1024
+
1025
+ Steffensen's method for quadratic convergence of a linear converging
1026
+ sequence.
1027
+ Don not use it for higher rates of convergence.
1028
+ It may even work for divergent sequences.
1029
+
1030
+ Definition:
1031
+ F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
1032
+
1033
+ Example
1034
+ .......
1035
+
1036
+ You can use Steffensen's method to accelerate a fixpoint iteration of linear
1037
+ (or less) convergence.
1038
+
1039
+ x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
1040
+ phi(x) = x**2 there are two fixpoints: 0 and 1.
1041
+
1042
+ Let's try Steffensen's method:
1043
+
1044
+ >>> f = lambda x: x**2
1045
+ >>> from mpmath.calculus.optimization import steffensen
1046
+ >>> F = steffensen(f)
1047
+ >>> for x in [0.5, 0.9, 2.0]:
1048
+ ... fx = Fx = x
1049
+ ... for i in xrange(9):
1050
+ ... try:
1051
+ ... fx = f(fx)
1052
+ ... except OverflowError:
1053
+ ... pass
1054
+ ... try:
1055
+ ... Fx = F(Fx)
1056
+ ... except ZeroDivisionError:
1057
+ ... pass
1058
+ ... print('%20g %20g' % (fx, Fx))
1059
+ 0.25 -0.5
1060
+ 0.0625 0.1
1061
+ 0.00390625 -0.0011236
1062
+ 1.52588e-05 1.41691e-09
1063
+ 2.32831e-10 -2.84465e-27
1064
+ 5.42101e-20 2.30189e-80
1065
+ 2.93874e-39 -1.2197e-239
1066
+ 8.63617e-78 0
1067
+ 7.45834e-155 0
1068
+ 0.81 1.02676
1069
+ 0.6561 1.00134
1070
+ 0.430467 1
1071
+ 0.185302 1
1072
+ 0.0343368 1
1073
+ 0.00117902 1
1074
+ 1.39008e-06 1
1075
+ 1.93233e-12 1
1076
+ 3.73392e-24 1
1077
+ 4 1.6
1078
+ 16 1.2962
1079
+ 256 1.10194
1080
+ 65536 1.01659
1081
+ 4.29497e+09 1.00053
1082
+ 1.84467e+19 1
1083
+ 3.40282e+38 1
1084
+ 1.15792e+77 1
1085
+ 1.34078e+154 1
1086
+
1087
+ Unmodified, the iteration converges only towards 0. Modified it converges
1088
+ not only much faster, it converges even to the repelling fixpoint 1.
1089
+ """
1090
+ def F(x):
1091
+ fx = f(x)
1092
+ ffx = f(fx)
1093
+ return (x*ffx - fx**2) / (ffx - 2*fx + x)
1094
+ return F
1095
+
1096
+ OptimizationMethods.jacobian = jacobian
1097
+ OptimizationMethods.findroot = findroot
1098
+ OptimizationMethods.multiplicity = multiplicity
1099
+
1100
+ if __name__ == '__main__':
1101
+ import doctest
1102
+ doctest.testmod()
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/polynomials.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Polynomials #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # XXX: extra precision
9
+ @defun
10
+ def polyval(ctx, coeffs, x, derivative=False):
11
+ r"""
12
+ Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
13
+ :func:`~mpmath.polyval` evaluates the polynomial
14
+
15
+ .. math ::
16
+
17
+ P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
18
+
19
+ If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
20
+ evaluates `P(x)` with the derivative, `P'(x)`, and returns the
21
+ tuple `(P(x), P'(x))`.
22
+
23
+ >>> from mpmath import *
24
+ >>> mp.pretty = True
25
+ >>> polyval([3, 0, 2], 0.5)
26
+ 2.75
27
+ >>> polyval([3, 0, 2], 0.5, derivative=True)
28
+ (2.75, 3.0)
29
+
30
+ The coefficients and the evaluation point may be any combination
31
+ of real or complex numbers.
32
+ """
33
+ if not coeffs:
34
+ return ctx.zero
35
+ p = ctx.convert(coeffs[0])
36
+ q = ctx.zero
37
+ for c in coeffs[1:]:
38
+ if derivative:
39
+ q = p + x*q
40
+ p = c + x*p
41
+ if derivative:
42
+ return p, q
43
+ else:
44
+ return p
45
+
46
+ @defun
47
+ def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
48
+ error=False, roots_init=None):
49
+ """
50
+ Computes all roots (real or complex) of a given polynomial.
51
+
52
+ The roots are returned as a sorted list, where real roots appear first
53
+ followed by complex conjugate roots as adjacent elements. The polynomial
54
+ should be given as a list of coefficients, in the format used by
55
+ :func:`~mpmath.polyval`. The leading coefficient must be nonzero.
56
+
57
+ With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
58
+ where *err* is an estimate of the maximum error among the computed roots.
59
+
60
+ **Examples**
61
+
62
+ Finding the three real roots of `x^3 - x^2 - 14x + 24`::
63
+
64
+ >>> from mpmath import *
65
+ >>> mp.dps = 15; mp.pretty = True
66
+ >>> nprint(polyroots([1,-1,-14,24]), 4)
67
+ [-4.0, 2.0, 3.0]
68
+
69
+ Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
70
+ error estimate::
71
+
72
+ >>> roots, err = polyroots([4,3,2], error=True)
73
+ >>> for r in roots:
74
+ ... print(r)
75
+ ...
76
+ (-0.375 + 0.59947894041409j)
77
+ (-0.375 - 0.59947894041409j)
78
+ >>>
79
+ >>> err
80
+ 2.22044604925031e-16
81
+ >>>
82
+ >>> polyval([4,3,2], roots[0])
83
+ (2.22044604925031e-16 + 0.0j)
84
+ >>> polyval([4,3,2], roots[1])
85
+ (2.22044604925031e-16 + 0.0j)
86
+
87
+ The following example computes all the 5th roots of unity; that is,
88
+ the roots of `x^5 - 1`::
89
+
90
+ >>> mp.dps = 20
91
+ >>> for r in polyroots([1, 0, 0, 0, 0, -1]):
92
+ ... print(r)
93
+ ...
94
+ 1.0
95
+ (-0.8090169943749474241 + 0.58778525229247312917j)
96
+ (-0.8090169943749474241 - 0.58778525229247312917j)
97
+ (0.3090169943749474241 + 0.95105651629515357212j)
98
+ (0.3090169943749474241 - 0.95105651629515357212j)
99
+
100
+ **Precision and conditioning**
101
+
102
+ The roots are computed to the current working precision accuracy. If this
103
+ accuracy cannot be achieved in ``maxsteps`` steps, then a
104
+ ``NoConvergence`` exception is raised. The algorithm internally is using
105
+ the current working precision extended by ``extraprec``. If
106
+ ``NoConvergence`` was raised, that is caused either by not having enough
107
+ extra precision to achieve convergence (in which case increasing
108
+ ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
109
+ case increasing ``maxsteps`` should fix the problem), or a combination of
110
+ both.
111
+
112
+ The user should always do a convergence study with regards to
113
+ ``extraprec`` to ensure accurate results. It is possible to get
114
+ convergence to a wrong answer with too low ``extraprec``.
115
+
116
+ Provided there are no repeated roots, :func:`~mpmath.polyroots` can
117
+ typically compute all roots of an arbitrary polynomial to high precision::
118
+
119
+ >>> mp.dps = 60
120
+ >>> for r in polyroots([1, 0, -10, 0, 1]):
121
+ ... print(r)
122
+ ...
123
+ -3.14626436994197234232913506571557044551247712918732870123249
124
+ -0.317837245195782244725757617296174288373133378433432554879127
125
+ 0.317837245195782244725757617296174288373133378433432554879127
126
+ 3.14626436994197234232913506571557044551247712918732870123249
127
+ >>>
128
+ >>> sqrt(3) + sqrt(2)
129
+ 3.14626436994197234232913506571557044551247712918732870123249
130
+ >>> sqrt(3) - sqrt(2)
131
+ 0.317837245195782244725757617296174288373133378433432554879127
132
+
133
+ **Algorithm**
134
+
135
+ :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
136
+ uses complex arithmetic to locate all roots simultaneously.
137
+ The Durand-Kerner method can be viewed as approximately performing
138
+ simultaneous Newton iteration for all the roots. In particular,
139
+ the convergence to simple roots is quadratic, just like Newton's
140
+ method.
141
+
142
+ Although all roots are internally calculated using complex arithmetic, any
143
+ root found to have an imaginary part smaller than the estimated numerical
144
+ error is truncated to a real number (small real parts are also chopped).
145
+ Real roots are placed first in the returned list, sorted by value. The
146
+ remaining complex roots are sorted by their real parts so that conjugate
147
+ roots end up next to each other.
148
+
149
+ **References**
150
+
151
+ 1. http://en.wikipedia.org/wiki/Durand-Kerner_method
152
+
153
+ """
154
+ if len(coeffs) <= 1:
155
+ if not coeffs or not coeffs[0]:
156
+ raise ValueError("Input to polyroots must not be the zero polynomial")
157
+ # Constant polynomial with no roots
158
+ return []
159
+
160
+ orig = ctx.prec
161
+ tol = +ctx.eps
162
+ with ctx.extraprec(extraprec):
163
+ deg = len(coeffs) - 1
164
+ # Must be monic
165
+ lead = ctx.convert(coeffs[0])
166
+ if lead == 1:
167
+ coeffs = [ctx.convert(c) for c in coeffs]
168
+ else:
169
+ coeffs = [c/lead for c in coeffs]
170
+ f = lambda x: ctx.polyval(coeffs, x)
171
+ if roots_init is None:
172
+ roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
173
+ else:
174
+ roots = [None]*deg;
175
+ deg_init = min(deg, len(roots_init))
176
+ roots[:deg_init] = list(roots_init[:deg_init])
177
+ roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
178
+ in xrange(deg_init,deg)]
179
+ err = [ctx.one for n in xrange(deg)]
180
+ # Durand-Kerner iteration until convergence
181
+ for step in xrange(maxsteps):
182
+ if abs(max(err)) < tol:
183
+ break
184
+ for i in xrange(deg):
185
+ p = roots[i]
186
+ x = f(p)
187
+ for j in range(deg):
188
+ if i != j:
189
+ try:
190
+ x /= (p-roots[j])
191
+ except ZeroDivisionError:
192
+ continue
193
+ roots[i] = p - x
194
+ err[i] = abs(x)
195
+ if abs(max(err)) >= tol:
196
+ raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
197
+ % maxsteps)
198
+ # Remove small real or imaginary parts
199
+ if cleanup:
200
+ for i in xrange(deg):
201
+ if abs(roots[i]) < tol:
202
+ roots[i] = ctx.zero
203
+ elif abs(ctx._im(roots[i])) < tol:
204
+ roots[i] = roots[i].real
205
+ elif abs(ctx._re(roots[i])) < tol:
206
+ roots[i] = roots[i].imag * 1j
207
+ roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
208
+ if error:
209
+ err = max(err)
210
+ err = max(err, ctx.ldexp(1, -orig+1))
211
+ return [+r for r in roots], +err
212
+ else:
213
+ return [+r for r in roots]
llmeval-env/lib/python3.10/site-packages/mpmath/calculus/quadrature.py ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from ..libmp.backend import xrange
4
+
5
+ class QuadratureRule(object):
6
+ """
7
+ Quadrature rules are implemented using this class, in order to
8
+ simplify the code and provide a common infrastructure
9
+ for tasks such as error estimation and node caching.
10
+
11
+ You can implement a custom quadrature rule by subclassing
12
+ :class:`QuadratureRule` and implementing the appropriate
13
+ methods. The subclass can then be used by :func:`~mpmath.quad` by
14
+ passing it as the *method* argument.
15
+
16
+ :class:`QuadratureRule` instances are supposed to be singletons.
17
+ :class:`QuadratureRule` therefore implements instance caching
18
+ in :func:`~mpmath.__new__`.
19
+ """
20
+
21
+ def __init__(self, ctx):
22
+ self.ctx = ctx
23
+ self.standard_cache = {}
24
+ self.transformed_cache = {}
25
+ self.interval_count = {}
26
+
27
+ def clear(self):
28
+ """
29
+ Delete cached node data.
30
+ """
31
+ self.standard_cache = {}
32
+ self.transformed_cache = {}
33
+ self.interval_count = {}
34
+
35
+ def calc_nodes(self, degree, prec, verbose=False):
36
+ r"""
37
+ Compute nodes for the standard interval `[-1, 1]`. Subclasses
38
+ should probably implement only this method, and use
39
+ :func:`~mpmath.get_nodes` method to retrieve the nodes.
40
+ """
41
+ raise NotImplementedError
42
+
43
+ def get_nodes(self, a, b, degree, prec, verbose=False):
44
+ """
45
+ Return nodes for given interval, degree and precision. The
46
+ nodes are retrieved from a cache if already computed;
47
+ otherwise they are computed by calling :func:`~mpmath.calc_nodes`
48
+ and are then cached.
49
+
50
+ Subclasses should probably not implement this method,
51
+ but just implement :func:`~mpmath.calc_nodes` for the actual
52
+ node computation.
53
+ """
54
+ key = (a, b, degree, prec)
55
+ if key in self.transformed_cache:
56
+ return self.transformed_cache[key]
57
+ orig = self.ctx.prec
58
+ try:
59
+ self.ctx.prec = prec+20
60
+ # Get nodes on standard interval
61
+ if (degree, prec) in self.standard_cache:
62
+ nodes = self.standard_cache[degree, prec]
63
+ else:
64
+ nodes = self.calc_nodes(degree, prec, verbose)
65
+ self.standard_cache[degree, prec] = nodes
66
+ # Transform to general interval
67
+ nodes = self.transform_nodes(nodes, a, b, verbose)
68
+ if key in self.interval_count:
69
+ self.transformed_cache[key] = nodes
70
+ else:
71
+ self.interval_count[key] = True
72
+ finally:
73
+ self.ctx.prec = orig
74
+ return nodes
75
+
76
+ def transform_nodes(self, nodes, a, b, verbose=False):
77
+ r"""
78
+ Rescale standardized nodes (for `[-1, 1]`) to a general
79
+ interval `[a, b]`. For a finite interval, a simple linear
80
+ change of variables is used. Otherwise, the following
81
+ transformations are used:
82
+
83
+ .. math ::
84
+
85
+ \lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
86
+
87
+ \lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
88
+
89
+ \lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
90
+
91
+ """
92
+ ctx = self.ctx
93
+ a = ctx.convert(a)
94
+ b = ctx.convert(b)
95
+ one = ctx.one
96
+ if (a, b) == (-one, one):
97
+ return nodes
98
+ half = ctx.mpf(0.5)
99
+ new_nodes = []
100
+ if ctx.isinf(a) or ctx.isinf(b):
101
+ if (a, b) == (ctx.ninf, ctx.inf):
102
+ p05 = -half
103
+ for x, w in nodes:
104
+ x2 = x*x
105
+ px1 = one-x2
106
+ spx1 = px1**p05
107
+ x = x*spx1
108
+ w *= spx1/px1
109
+ new_nodes.append((x, w))
110
+ elif a == ctx.ninf:
111
+ b1 = b+1
112
+ for x, w in nodes:
113
+ u = 2/(x+one)
114
+ x = b1-u
115
+ w *= half*u**2
116
+ new_nodes.append((x, w))
117
+ elif b == ctx.inf:
118
+ a1 = a-1
119
+ for x, w in nodes:
120
+ u = 2/(x+one)
121
+ x = a1+u
122
+ w *= half*u**2
123
+ new_nodes.append((x, w))
124
+ elif a == ctx.inf or b == ctx.ninf:
125
+ return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
126
+ else:
127
+ raise NotImplementedError
128
+ else:
129
+ # Simple linear change of variables
130
+ C = (b-a)/2
131
+ D = (b+a)/2
132
+ for x, w in nodes:
133
+ new_nodes.append((D+C*x, C*w))
134
+ return new_nodes
135
+
136
+ def guess_degree(self, prec):
137
+ """
138
+ Given a desired precision `p` in bits, estimate the degree `m`
139
+ of the quadrature required to accomplish full accuracy for
140
+ typical integrals. By default, :func:`~mpmath.quad` will perform up
141
+ to `m` iterations. The value of `m` should be a slight
142
+ overestimate, so that "slightly bad" integrals can be dealt
143
+ with automatically using a few extra iterations. On the
144
+ other hand, it should not be too big, so :func:`~mpmath.quad` can
145
+ quit within a reasonable amount of time when it is given
146
+ an "unsolvable" integral.
147
+
148
+ The default formula used by :func:`~mpmath.guess_degree` is tuned
149
+ for both :class:`TanhSinh` and :class:`GaussLegendre`.
150
+ The output is roughly as follows:
151
+
152
+ +---------+---------+
153
+ | `p` | `m` |
154
+ +=========+=========+
155
+ | 50 | 6 |
156
+ +---------+---------+
157
+ | 100 | 7 |
158
+ +---------+---------+
159
+ | 500 | 10 |
160
+ +---------+---------+
161
+ | 3000 | 12 |
162
+ +---------+---------+
163
+
164
+ This formula is based purely on a limited amount of
165
+ experimentation and will sometimes be wrong.
166
+ """
167
+ # Expected degree
168
+ # XXX: use mag
169
+ g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
170
+ # Reasonable "worst case"
171
+ g += 2
172
+ return g
173
+
174
+ def estimate_error(self, results, prec, epsilon):
175
+ r"""
176
+ Given results from integrations `[I_1, I_2, \ldots, I_k]` done
177
+ with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
178
+ the error of `I_k`.
179
+
180
+ For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
181
+
182
+ For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
183
+ from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
184
+ that each degree increment roughly doubles the accuracy of
185
+ the quadrature rule (this is true for both :class:`TanhSinh`
186
+ and :class:`GaussLegendre`). The extrapolation formula is given
187
+ by Borwein, Bailey & Girgensohn. Although not very conservative,
188
+ this method seems to be very robust in practice.
189
+ """
190
+ if len(results) == 2:
191
+ return abs(results[0]-results[1])
192
+ try:
193
+ if results[-1] == results[-2] == results[-3]:
194
+ return self.ctx.zero
195
+ D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
196
+ D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
197
+ except ValueError:
198
+ return epsilon
199
+ D3 = -prec
200
+ D4 = min(0, max(D1**2/D2, 2*D1, D3))
201
+ return self.ctx.mpf(10) ** int(D4)
202
+
203
+ def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
204
+ """
205
+ Main integration function. Computes the 1D integral over
206
+ the interval specified by *points*. For each subinterval,
207
+ performs quadrature of degree from 1 up to *max_degree*
208
+ until :func:`~mpmath.estimate_error` signals convergence.
209
+
210
+ :func:`~mpmath.summation` transforms each subintegration to
211
+ the standard interval and then calls :func:`~mpmath.sum_next`.
212
+ """
213
+ ctx = self.ctx
214
+ I = total_err = ctx.zero
215
+ for i in xrange(len(points)-1):
216
+ a, b = points[i], points[i+1]
217
+ if a == b:
218
+ continue
219
+ # XXX: we could use a single variable transformation,
220
+ # but this is not good in practice. We get better accuracy
221
+ # by having 0 as an endpoint.
222
+ if (a, b) == (ctx.ninf, ctx.inf):
223
+ _f = f
224
+ f = lambda x: _f(-x) + _f(x)
225
+ a, b = (ctx.zero, ctx.inf)
226
+ results = []
227
+ err = ctx.zero
228
+ for degree in xrange(1, max_degree+1):
229
+ nodes = self.get_nodes(a, b, degree, prec, verbose)
230
+ if verbose:
231
+ print("Integrating from %s to %s (degree %s of %s)" % \
232
+ (ctx.nstr(a), ctx.nstr(b), degree, max_degree))
233
+ result = self.sum_next(f, nodes, degree, prec, results, verbose)
234
+ results.append(result)
235
+ if degree > 1:
236
+ err = self.estimate_error(results, prec, epsilon)
237
+ if verbose:
238
+ print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
239
+ if err <= epsilon:
240
+ break
241
+ I += results[-1]
242
+ total_err += err
243
+ if total_err > epsilon:
244
+ if verbose:
245
+ print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
246
+ return I, total_err
247
+
248
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
249
+ r"""
250
+ Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
251
+ contains the `(w_k, x_k)` pairs.
252
+
253
+ :func:`~mpmath.summation` will supply the list *results* of
254
+ values computed by :func:`~mpmath.sum_next` at previous degrees, in
255
+ case the quadrature rule is able to reuse them.
256
+ """
257
+ return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
258
+
259
+
260
+ class TanhSinh(QuadratureRule):
261
+ r"""
262
+ This class implements "tanh-sinh" or "doubly exponential"
263
+ quadrature. This quadrature rule is based on the Euler-Maclaurin
264
+ integral formula. By performing a change of variables involving
265
+ nested exponentials / hyperbolic functions (hence the name), the
266
+ derivatives at the endpoints vanish rapidly. Since the error term
267
+ in the Euler-Maclaurin formula depends on the derivatives at the
268
+ endpoints, a simple step sum becomes extremely accurate. In
269
+ practice, this means that doubling the number of evaluation
270
+ points roughly doubles the number of accurate digits.
271
+
272
+ Comparison to Gauss-Legendre:
273
+ * Initial computation of nodes is usually faster
274
+ * Handles endpoint singularities better
275
+ * Handles infinite integration intervals better
276
+ * Is slower for smooth integrands once nodes have been computed
277
+
278
+ The implementation of the tanh-sinh algorithm is based on the
279
+ description given in Borwein, Bailey & Girgensohn, "Experimentation
280
+ in Mathematics - Computational Paths to Discovery", A K Peters,
281
+ 2003, pages 312-313. In the present implementation, a few
282
+ improvements have been made:
283
+
284
+ * A more efficient scheme is used to compute nodes (exploiting
285
+ recurrence for the exponential function)
286
+ * The nodes are computed successively instead of all at once
287
+
288
+ **References**
289
+
290
+ * [Bailey]_
291
+ * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
292
+
293
+ """
294
+
295
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
296
+ """
297
+ Step sum for tanh-sinh quadrature of degree `m`. We exploit the
298
+ fact that half of the abscissas at degree `m` are precisely the
299
+ abscissas from degree `m-1`. Thus reusing the result from
300
+ the previous level allows a 2x speedup.
301
+ """
302
+ h = self.ctx.mpf(2)**(-degree)
303
+ # Abscissas overlap, so reusing saves half of the time
304
+ if previous:
305
+ S = previous[-1]/(h*2)
306
+ else:
307
+ S = self.ctx.zero
308
+ S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
309
+ return h*S
310
+
311
+ def calc_nodes(self, degree, prec, verbose=False):
312
+ r"""
313
+ The abscissas and weights for tanh-sinh quadrature of degree
314
+ `m` are given by
315
+
316
+ .. math::
317
+
318
+ x_k = \tanh(\pi/2 \sinh(t_k))
319
+
320
+ w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
321
+
322
+ where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
323
+ list of nodes is actually infinite, but the weights die off so
324
+ rapidly that only a few are needed.
325
+ """
326
+ ctx = self.ctx
327
+ nodes = []
328
+
329
+ extra = 20
330
+ ctx.prec += extra
331
+ tol = ctx.ldexp(1, -prec-10)
332
+ pi4 = ctx.pi/4
333
+
334
+ # For simplicity, we work in steps h = 1/2^n, with the first point
335
+ # offset so that we can reuse the sum from the previous degree
336
+
337
+ # We define degree 1 to include the "degree 0" steps, including
338
+ # the point x = 0. (It doesn't work well otherwise; not sure why.)
339
+ t0 = ctx.ldexp(1, -degree)
340
+ if degree == 1:
341
+ #nodes.append((mpf(0), pi4))
342
+ #nodes.append((-mpf(0), pi4))
343
+ nodes.append((ctx.zero, ctx.pi/2))
344
+ h = t0
345
+ else:
346
+ h = t0*2
347
+
348
+ # Since h is fixed, we can compute the next exponential
349
+ # by simply multiplying by exp(h)
350
+ expt0 = ctx.exp(t0)
351
+ a = pi4 * expt0
352
+ b = pi4 / expt0
353
+ udelta = ctx.exp(h)
354
+ urdelta = 1/udelta
355
+
356
+ for k in xrange(0, 20*2**degree+1):
357
+ # Reference implementation:
358
+ # t = t0 + k*h
359
+ # x = tanh(pi/2 * sinh(t))
360
+ # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
361
+
362
+ # Fast implementation. Note that c = exp(pi/2 * sinh(t))
363
+ c = ctx.exp(a-b)
364
+ d = 1/c
365
+ co = (c+d)/2
366
+ si = (c-d)/2
367
+ x = si / co
368
+ w = (a+b) / co**2
369
+ diff = abs(x-1)
370
+ if diff <= tol:
371
+ break
372
+
373
+ nodes.append((x, w))
374
+ nodes.append((-x, w))
375
+
376
+ a *= udelta
377
+ b *= urdelta
378
+
379
+ if verbose and k % 300 == 150:
380
+ # Note: the number displayed is rather arbitrary. Should
381
+ # figure out how to print something that looks more like a
382
+ # percentage
383
+ print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
384
+
385
+ ctx.prec -= extra
386
+ return nodes
387
+
388
+
389
+ class GaussLegendre(QuadratureRule):
390
+ r"""
391
+ This class implements Gauss-Legendre quadrature, which is
392
+ exceptionally efficient for polynomials and polynomial-like (i.e.
393
+ very smooth) integrands.
394
+
395
+ The abscissas and weights are given by roots and values of
396
+ Legendre polynomials, which are the orthogonal polynomials
397
+ on `[-1, 1]` with respect to the unit weight
398
+ (see :func:`~mpmath.legendre`).
399
+
400
+ In this implementation, we take the "degree" `m` of the quadrature
401
+ to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
402
+ Borwein, Bailey & Girgensohn). This way we get quadratic, rather
403
+ than linear, convergence as the degree is incremented.
404
+
405
+ Comparison to tanh-sinh quadrature:
406
+ * Is faster for smooth integrands once nodes have been computed
407
+ * Initial computation of nodes is usually slower
408
+ * Handles endpoint singularities worse
409
+ * Handles infinite integration intervals worse
410
+
411
+ """
412
+
413
+ def calc_nodes(self, degree, prec, verbose=False):
414
+ r"""
415
+ Calculates the abscissas and weights for Gauss-Legendre
416
+ quadrature of degree of given degree (actually `3 \cdot 2^m`).
417
+ """
418
+ ctx = self.ctx
419
+ # It is important that the epsilon is set lower than the
420
+ # "real" epsilon
421
+ epsilon = ctx.ldexp(1, -prec-8)
422
+ # Fairly high precision might be required for accurate
423
+ # evaluation of the roots
424
+ orig = ctx.prec
425
+ ctx.prec = int(prec*1.5)
426
+ if degree == 1:
427
+ x = ctx.sqrt(ctx.mpf(3)/5)
428
+ w = ctx.mpf(5)/9
429
+ nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
430
+ ctx.prec = orig
431
+ return nodes
432
+ nodes = []
433
+ n = 3*2**(degree-1)
434
+ upto = n//2 + 1
435
+ for j in xrange(1, upto):
436
+ # Asymptotic formula for the roots
437
+ r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
438
+ # Newton iteration
439
+ while 1:
440
+ t1, t2 = 1, 0
441
+ # Evaluates the Legendre polynomial using its defining
442
+ # recurrence relation
443
+ for j1 in xrange(1,n+1):
444
+ t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
445
+ t4 = n*(r*t1-t2)/(r**2-1)
446
+ a = t1/t4
447
+ r = r - a
448
+ if abs(a) < epsilon:
449
+ break
450
+ x = r
451
+ w = 2/((1-r**2)*t4**2)
452
+ if verbose and j % 30 == 15:
453
+ print("Computing nodes (%i of %i)" % (j, upto))
454
+ nodes.append((x, w))
455
+ nodes.append((-x, w))
456
+ ctx.prec = orig
457
+ return nodes
458
+
459
+ class QuadratureMethods(object):
460
+
461
+ def __init__(ctx, *args, **kwargs):
462
+ ctx._gauss_legendre = GaussLegendre(ctx)
463
+ ctx._tanh_sinh = TanhSinh(ctx)
464
+
465
+ def quad(ctx, f, *points, **kwargs):
466
+ r"""
467
+ Computes a single, double or triple integral over a given
468
+ 1D interval, 2D rectangle, or 3D cuboid. A basic example::
469
+
470
+ >>> from mpmath import *
471
+ >>> mp.dps = 15; mp.pretty = True
472
+ >>> quad(sin, [0, pi])
473
+ 2.0
474
+
475
+ A basic 2D integral::
476
+
477
+ >>> f = lambda x, y: cos(x+y/2)
478
+ >>> quad(f, [-pi/2, pi/2], [0, pi])
479
+ 4.0
480
+
481
+ **Interval format**
482
+
483
+ The integration range for each dimension may be specified
484
+ using a list or tuple. Arguments are interpreted as follows:
485
+
486
+ ``quad(f, [x1, x2])`` -- calculates
487
+ `\int_{x_1}^{x_2} f(x) \, dx`
488
+
489
+ ``quad(f, [x1, x2], [y1, y2])`` -- calculates
490
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
491
+
492
+ ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
493
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
494
+ \, dz \, dy \, dx`
495
+
496
+ Endpoints may be finite or infinite. An interval descriptor
497
+ may also contain more than two points. In this
498
+ case, the integration is split into subintervals, between
499
+ each pair of consecutive points. This is useful for
500
+ dealing with mid-interval discontinuities, or integrating
501
+ over large intervals where the function is irregular or
502
+ oscillates.
503
+
504
+ **Options**
505
+
506
+ :func:`~mpmath.quad` recognizes the following keyword arguments:
507
+
508
+ *method*
509
+ Chooses integration algorithm (described below).
510
+ *error*
511
+ If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
512
+ integral and `e` is the estimated error.
513
+ *maxdegree*
514
+ Maximum degree of the quadrature rule to try before
515
+ quitting.
516
+ *verbose*
517
+ Print details about progress.
518
+
519
+ **Algorithms**
520
+
521
+ Mpmath presently implements two integration algorithms: tanh-sinh
522
+ quadrature and Gauss-Legendre quadrature. These can be selected
523
+ using *method='tanh-sinh'* or *method='gauss-legendre'* or by
524
+ passing the classes *method=TanhSinh*, *method=GaussLegendre*.
525
+ The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
526
+ as shortcuts.
527
+
528
+ Both algorithms have the property that doubling the number of
529
+ evaluation points roughly doubles the accuracy, so both are ideal
530
+ for high precision quadrature (hundreds or thousands of digits).
531
+
532
+ At high precision, computing the nodes and weights for the
533
+ integration can be expensive (more expensive than computing the
534
+ function values). To make repeated integrations fast, nodes
535
+ are automatically cached.
536
+
537
+ The advantages of the tanh-sinh algorithm are that it tends to
538
+ handle endpoint singularities well, and that the nodes are cheap
539
+ to compute on the first run. For these reasons, it is used by
540
+ :func:`~mpmath.quad` as the default algorithm.
541
+
542
+ Gauss-Legendre quadrature often requires fewer function
543
+ evaluations, and is therefore often faster for repeated use, but
544
+ the algorithm does not handle endpoint singularities as well and
545
+ the nodes are more expensive to compute. Gauss-Legendre quadrature
546
+ can be a better choice if the integrand is smooth and repeated
547
+ integrations are required (e.g. for multiple integrals).
548
+
549
+ See the documentation for :class:`TanhSinh` and
550
+ :class:`GaussLegendre` for additional details.
551
+
552
+ **Examples of 1D integrals**
553
+
554
+ Intervals may be infinite or half-infinite. The following two
555
+ examples evaluate the limits of the inverse tangent function
556
+ (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
557
+ `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
558
+
559
+ >>> mp.dps = 15
560
+ >>> quad(lambda x: 2/(x**2+1), [0, inf])
561
+ 3.14159265358979
562
+ >>> quad(lambda x: exp(-x**2), [-inf, inf])**2
563
+ 3.14159265358979
564
+
565
+ Integrals can typically be resolved to high precision.
566
+ The following computes 50 digits of `\pi` by integrating the
567
+ area of the half-circle defined by `x^2 + y^2 \le 1`,
568
+ `-1 \le x \le 1`, `y \ge 0`::
569
+
570
+ >>> mp.dps = 50
571
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
572
+ 3.1415926535897932384626433832795028841971693993751
573
+
574
+ One can just as well compute 1000 digits (output truncated)::
575
+
576
+ >>> mp.dps = 1000
577
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
578
+ 3.141592653589793238462643383279502884...216420199
579
+
580
+ Complex integrals are supported. The following computes
581
+ a residue at `z = 0` by integrating counterclockwise along the
582
+ diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
583
+
584
+ >>> mp.dps = 15
585
+ >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
586
+ (0.0 + 6.28318530717959j)
587
+
588
+ **Examples of 2D and 3D integrals**
589
+
590
+ Here are several nice examples of analytically solvable
591
+ 2D integrals (taken from MathWorld [1]) that can be evaluated
592
+ to high precision fairly rapidly by :func:`~mpmath.quad`::
593
+
594
+ >>> mp.dps = 30
595
+ >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
596
+ >>> quad(f, [0, 1], [0, 1])
597
+ 0.577215664901532860606512090082
598
+ >>> +euler
599
+ 0.577215664901532860606512090082
600
+
601
+ >>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
602
+ >>> quad(f, [-1, 1], [-1, 1])
603
+ 3.17343648530607134219175646705
604
+ >>> 4*log(2+sqrt(3))-2*pi/3
605
+ 3.17343648530607134219175646705
606
+
607
+ >>> f = lambda x, y: 1/(1-x**2 * y**2)
608
+ >>> quad(f, [0, 1], [0, 1])
609
+ 1.23370055013616982735431137498
610
+ >>> pi**2 / 8
611
+ 1.23370055013616982735431137498
612
+
613
+ >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
614
+ 1.64493406684822643647241516665
615
+ >>> pi**2 / 6
616
+ 1.64493406684822643647241516665
617
+
618
+ Multiple integrals may be done over infinite ranges::
619
+
620
+ >>> mp.dps = 15
621
+ >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
622
+ 0.367879441171442
623
+ >>> print(1/e)
624
+ 0.367879441171442
625
+
626
+ For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
627
+ For example, we can replicate the earlier example of calculating
628
+ `\pi` by integrating over the unit-circle, and actually use double
629
+ quadrature to actually measure the area circle::
630
+
631
+ >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
632
+ >>> quad(f, [-1, 1])
633
+ 3.14159265358979
634
+
635
+ Here is a simple triple integral::
636
+
637
+ >>> mp.dps = 15
638
+ >>> f = lambda x,y,z: x*y/(1+z)
639
+ >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
640
+ 0.101366277027041
641
+ >>> (log(3)-log(2))/4
642
+ 0.101366277027041
643
+
644
+ **Singularities**
645
+
646
+ Both tanh-sinh and Gauss-Legendre quadrature are designed to
647
+ integrate smooth (infinitely differentiable) functions. Neither
648
+ algorithm copes well with mid-interval singularities (such as
649
+ mid-interval discontinuities in `f(x)` or `f'(x)`).
650
+ The best solution is to split the integral into parts::
651
+
652
+ >>> mp.dps = 15
653
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
654
+ 3.99900894176779
655
+ >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
656
+ 4.0
657
+
658
+ The tanh-sinh rule often works well for integrands having a
659
+ singularity at one or both endpoints::
660
+
661
+ >>> mp.dps = 15
662
+ >>> quad(log, [0, 1], method='tanh-sinh') # Good
663
+ -1.0
664
+ >>> quad(log, [0, 1], method='gauss-legendre') # Bad
665
+ -0.999932197413801
666
+
667
+ However, the result may still be inaccurate for some functions::
668
+
669
+ >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
670
+ 1.99999999946942
671
+
672
+ This problem is not due to the quadrature rule per se, but to
673
+ numerical amplification of errors in the nodes. The problem can be
674
+ circumvented by temporarily increasing the precision::
675
+
676
+ >>> mp.dps = 30
677
+ >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
678
+ >>> mp.dps = 15
679
+ >>> +a
680
+ 2.0
681
+
682
+ **Highly variable functions**
683
+
684
+ For functions that are smooth (in the sense of being infinitely
685
+ differentiable) but contain sharp mid-interval peaks or many
686
+ "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
687
+ example, with default settings, :func:`~mpmath.quad` is able to integrate
688
+ `\sin(x)` accurately over an interval of length 100 but not over
689
+ length 1000::
690
+
691
+ >>> quad(sin, [0, 100]); 1-cos(100) # Good
692
+ 0.137681127712316
693
+ 0.137681127712316
694
+ >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
695
+ -37.8587612408485
696
+ 0.437620923709297
697
+
698
+ One solution is to break the integration into 10 intervals of
699
+ length 100::
700
+
701
+ >>> quad(sin, linspace(0, 1000, 10)) # Good
702
+ 0.437620923709297
703
+
704
+ Another is to increase the degree of the quadrature::
705
+
706
+ >>> quad(sin, [0, 1000], maxdegree=10) # Also good
707
+ 0.437620923709297
708
+
709
+ Whether splitting the interval or increasing the degree is
710
+ more efficient differs from case to case. Another example is the
711
+ function `1/(1+x^2)`, which has a sharp peak centered around
712
+ `x = 0`::
713
+
714
+ >>> f = lambda x: 1/(1+x**2)
715
+ >>> quad(f, [-100, 100]) # Bad
716
+ 3.64804647105268
717
+ >>> quad(f, [-100, 100], maxdegree=10) # Good
718
+ 3.12159332021646
719
+ >>> quad(f, [-100, 0, 100]) # Also good
720
+ 3.12159332021646
721
+
722
+ **References**
723
+
724
+ 1. http://mathworld.wolfram.com/DoubleIntegral.html
725
+
726
+ """
727
+ rule = kwargs.get('method', 'tanh-sinh')
728
+ if type(rule) is str:
729
+ if rule == 'tanh-sinh':
730
+ rule = ctx._tanh_sinh
731
+ elif rule == 'gauss-legendre':
732
+ rule = ctx._gauss_legendre
733
+ else:
734
+ raise ValueError("unknown quadrature rule: %s" % rule)
735
+ else:
736
+ rule = rule(ctx)
737
+ verbose = kwargs.get('verbose')
738
+ dim = len(points)
739
+ orig = prec = ctx.prec
740
+ epsilon = ctx.eps/8
741
+ m = kwargs.get('maxdegree') or rule.guess_degree(prec)
742
+ points = [ctx._as_points(p) for p in points]
743
+ try:
744
+ ctx.prec += 20
745
+ if dim == 1:
746
+ v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
747
+ elif dim == 2:
748
+ v, err = rule.summation(lambda x: \
749
+ rule.summation(lambda y: f(x,y), \
750
+ points[1], prec, epsilon, m)[0],
751
+ points[0], prec, epsilon, m, verbose)
752
+ elif dim == 3:
753
+ v, err = rule.summation(lambda x: \
754
+ rule.summation(lambda y: \
755
+ rule.summation(lambda z: f(x,y,z), \
756
+ points[2], prec, epsilon, m)[0],
757
+ points[1], prec, epsilon, m)[0],
758
+ points[0], prec, epsilon, m, verbose)
759
+ else:
760
+ raise NotImplementedError("quadrature must have dim 1, 2 or 3")
761
+ finally:
762
+ ctx.prec = orig
763
+ if kwargs.get("error"):
764
+ return +v, err
765
+ return +v
766
+
767
+ def quadts(ctx, *args, **kwargs):
768
+ """
769
+ Performs tanh-sinh quadrature. The call
770
+
771
+ quadts(func, *points, ...)
772
+
773
+ is simply a shortcut for:
774
+
775
+ quad(func, *points, ..., method=TanhSinh)
776
+
777
+ For example, a single integral and a double integral:
778
+
779
+ quadts(lambda x: exp(cos(x)), [0, 1])
780
+ quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
781
+
782
+ See the documentation for quad for information about how points
783
+ arguments and keyword arguments are parsed.
784
+
785
+ See documentation for TanhSinh for algorithmic information about
786
+ tanh-sinh quadrature.
787
+ """
788
+ kwargs['method'] = 'tanh-sinh'
789
+ return ctx.quad(*args, **kwargs)
790
+
791
+ def quadgl(ctx, *args, **kwargs):
792
+ """
793
+ Performs Gauss-Legendre quadrature. The call
794
+
795
+ quadgl(func, *points, ...)
796
+
797
+ is simply a shortcut for:
798
+
799
+ quad(func, *points, ..., method=GaussLegendre)
800
+
801
+ For example, a single integral and a double integral:
802
+
803
+ quadgl(lambda x: exp(cos(x)), [0, 1])
804
+ quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
805
+
806
+ See the documentation for quad for information about how points
807
+ arguments and keyword arguments are parsed.
808
+
809
+ See documentation for TanhSinh for algorithmic information about
810
+ tanh-sinh quadrature.
811
+ """
812
+ kwargs['method'] = 'gauss-legendre'
813
+ return ctx.quad(*args, **kwargs)
814
+
815
+ def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
816
+ r"""
817
+ Calculates
818
+
819
+ .. math ::
820
+
821
+ I = \int_a^b f(x) dx
822
+
823
+ where at least one of `a` and `b` is infinite and where
824
+ `f(x) = g(x) \cos(\omega x + \phi)` for some slowly
825
+ decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
826
+ can also handle oscillatory integrals where the oscillation
827
+ rate is different from a pure sine or cosine wave.
828
+
829
+ In the standard case when `|a| < \infty, b = \infty`,
830
+ :func:`~mpmath.quadosc` works by evaluating the infinite series
831
+
832
+ .. math ::
833
+
834
+ I = \int_a^{x_1} f(x) dx +
835
+ \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
836
+
837
+ where `x_k` are consecutive zeros (alternatively
838
+ some other periodic reference point) of `f(x)`.
839
+ Accordingly, :func:`~mpmath.quadosc` requires information about the
840
+ zeros of `f(x)`. For a periodic function, you can specify
841
+ the zeros by either providing the angular frequency `\omega`
842
+ (*omega*) or the *period* `2 \pi/\omega`. In general, you can
843
+ specify the `n`-th zero by providing the *zeros* arguments.
844
+ Below is an example of each::
845
+
846
+ >>> from mpmath import *
847
+ >>> mp.dps = 15; mp.pretty = True
848
+ >>> f = lambda x: sin(3*x)/(x**2+1)
849
+ >>> quadosc(f, [0,inf], omega=3)
850
+ 0.37833007080198
851
+ >>> quadosc(f, [0,inf], period=2*pi/3)
852
+ 0.37833007080198
853
+ >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
854
+ 0.37833007080198
855
+ >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
856
+ 0.37833007080198
857
+
858
+ Note that *zeros* was specified to multiply `n` by the
859
+ *half-period*, not the full period. In theory, it does not matter
860
+ whether each partial integral is done over a half period or a full
861
+ period. However, if done over half-periods, the infinite series
862
+ passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
863
+ typically makes the extrapolation much more efficient.
864
+
865
+ Here is an example of an integration over the entire real line,
866
+ and a half-infinite integration starting at `-\infty`::
867
+
868
+ >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
869
+ 1.15572734979092
870
+ >>> pi/e
871
+ 1.15572734979092
872
+ >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
873
+ -0.0844109505595739
874
+ >>> cos(1)+si(1)-pi/2
875
+ -0.0844109505595738
876
+
877
+ Of course, the integrand may contain a complex exponential just as
878
+ well as a real sine or cosine::
879
+
880
+ >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
881
+ (0.156410688228254 + 0.0j)
882
+ >>> pi/e**3
883
+ 0.156410688228254
884
+ >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
885
+ (0.00317486988463794 - 0.0447701735209082j)
886
+ >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
887
+ (0.00317486988463794 - 0.0447701735209082j)
888
+
889
+ **Non-periodic functions**
890
+
891
+ If `f(x) = g(x) h(x)` for some function `h(x)` that is not
892
+ strictly periodic, *omega* or *period* might not work, and it might
893
+ be necessary to use *zeros*.
894
+
895
+ A notable exception can be made for Bessel functions which, though not
896
+ periodic, are "asymptotically periodic" in a sufficiently strong sense
897
+ that the sum extrapolation will work out::
898
+
899
+ >>> quadosc(j0, [0, inf], period=2*pi)
900
+ 1.0
901
+ >>> quadosc(j1, [0, inf], period=2*pi)
902
+ 1.0
903
+
904
+ More properly, one should provide the exact Bessel function zeros::
905
+
906
+ >>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
907
+ >>> quadosc(j0, [0, inf], zeros=j0zero)
908
+ 1.0
909
+
910
+ For an example where *zeros* becomes necessary, consider the
911
+ complete Fresnel integrals
912
+
913
+ .. math ::
914
+
915
+ \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
916
+ = \sqrt{\frac{\pi}{8}}.
917
+
918
+ Although the integrands do not decrease in magnitude as
919
+ `x \to \infty`, the integrals are convergent since the oscillation
920
+ rate increases (causing consecutive periods to asymptotically
921
+ cancel out). These integrals are virtually impossible to calculate
922
+ to any kind of accuracy using standard quadrature rules. However,
923
+ if one provides the correct asymptotic distribution of zeros
924
+ (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
925
+
926
+ >>> mp.dps = 30
927
+ >>> f = lambda x: cos(x**2)
928
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
929
+ 0.626657068657750125603941321203
930
+ >>> f = lambda x: sin(x**2)
931
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
932
+ 0.626657068657750125603941321203
933
+ >>> sqrt(pi/8)
934
+ 0.626657068657750125603941321203
935
+
936
+ (Interestingly, these integrals can still be evaluated if one
937
+ places some other constant than `\pi` in the square root sign.)
938
+
939
+ In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
940
+ the inverse-function distribution `h^{-1}(x)`::
941
+
942
+ >>> mp.dps = 15
943
+ >>> f = lambda x: sin(exp(x))
944
+ >>> quadosc(f, [1,inf], zeros=lambda n: log(n))
945
+ -0.25024394235267
946
+ >>> pi/2-si(e)
947
+ -0.250243942352671
948
+
949
+ **Non-alternating functions**
950
+
951
+ If the integrand oscillates around a positive value, without
952
+ alternating signs, the extrapolation might fail. A simple trick
953
+ that sometimes works is to multiply or divide the frequency by 2::
954
+
955
+ >>> f = lambda x: 1/x**2+sin(x)/x**4
956
+ >>> quadosc(f, [1,inf], omega=1) # Bad
957
+ 1.28642190869861
958
+ >>> quadosc(f, [1,inf], omega=0.5) # Perfect
959
+ 1.28652953559617
960
+ >>> 1+(cos(1)+ci(1)+sin(1))/6
961
+ 1.28652953559617
962
+
963
+ **Fast decay**
964
+
965
+ :func:`~mpmath.quadosc` is primarily useful for slowly decaying
966
+ integrands. If the integrand decreases exponentially or faster,
967
+ :func:`~mpmath.quad` will likely handle it without trouble (and generally be
968
+ much faster than :func:`~mpmath.quadosc`)::
969
+
970
+ >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
971
+ 0.5
972
+ >>> quad(lambda x: cos(x)/exp(x), [0, inf])
973
+ 0.5
974
+
975
+ """
976
+ a, b = ctx._as_points(interval)
977
+ a = ctx.convert(a)
978
+ b = ctx.convert(b)
979
+ if [omega, period, zeros].count(None) != 2:
980
+ raise ValueError( \
981
+ "must specify exactly one of omega, period, zeros")
982
+ if a == ctx.ninf and b == ctx.inf:
983
+ s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
984
+ s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
985
+ return s1 + s2
986
+ if a == ctx.ninf:
987
+ if zeros:
988
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
989
+ else:
990
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
991
+ if b != ctx.inf:
992
+ raise ValueError("quadosc requires an infinite integration interval")
993
+ if not zeros:
994
+ if omega:
995
+ period = 2*ctx.pi/omega
996
+ zeros = lambda n: n*period/2
997
+ #for n in range(1,10):
998
+ # p = zeros(n)
999
+ # if p > a:
1000
+ # break
1001
+ #if n >= 9:
1002
+ # raise ValueError("zeros do not appear to be correctly indexed")
1003
+ n = 1
1004
+ s = ctx.quadgl(f, [a, zeros(n)])
1005
+ def term(k):
1006
+ return ctx.quadgl(f, [zeros(k), zeros(k+1)])
1007
+ s += ctx.nsum(term, [n, ctx.inf])
1008
+ return s
1009
+
1010
+ def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
1011
+ """
1012
+ Computes the integral of *f* over the interval or path specified
1013
+ by *interval*, using :func:`~mpmath.quad` together with adaptive
1014
+ subdivision of the interval.
1015
+
1016
+ This function gives an accurate answer for some integrals where
1017
+ :func:`~mpmath.quad` fails::
1018
+
1019
+ >>> from mpmath import *
1020
+ >>> mp.dps = 15; mp.pretty = True
1021
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi])
1022
+ 3.99900894176779
1023
+ >>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
1024
+ 4.0
1025
+ >>> quadsubdiv(sin, [0, 1000])
1026
+ 0.437620923709297
1027
+ >>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
1028
+ 3.12159332021646
1029
+ >>> quadsubdiv(lambda x: ceil(x), [0, 100])
1030
+ 5050.0
1031
+ >>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
1032
+ 0.347400172657248
1033
+
1034
+ The argument *maxintervals* can be set to limit the permissible
1035
+ subdivision::
1036
+
1037
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
1038
+ (-5.40487904307774, 5.011)
1039
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
1040
+ (0.631417921866934, 1.10101120134116e-17)
1041
+
1042
+ Subdivision does not guarantee a correct answer since, the error
1043
+ estimate on subintervals may be inaccurate::
1044
+
1045
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1046
+ (0.210802735500549, 1.0001111101e-17)
1047
+ >>> mp.dps = 20
1048
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1049
+ (0.21080273550054927738, 2.200000001e-24)
1050
+
1051
+ The second answer is correct. We can get an accurate result at lower
1052
+ precision by forcing a finer initial subdivision::
1053
+
1054
+ >>> mp.dps = 15
1055
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
1056
+ 0.210802735500549
1057
+
1058
+ The following integral is too oscillatory for convergence, but we can get a
1059
+ reasonable estimate::
1060
+
1061
+ >>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
1062
+ >>> round(v, 6), round(err, 6)
1063
+ (0.504067, 1e-06)
1064
+ >>> sin(1) - ci(1)
1065
+ 0.504067061906928
1066
+
1067
+ """
1068
+ queue = []
1069
+ for i in range(len(interval)-1):
1070
+ queue.append((interval[i], interval[i+1]))
1071
+ total = ctx.zero
1072
+ total_error = ctx.zero
1073
+ if maxintervals is None:
1074
+ maxintervals = 10 * ctx.prec
1075
+ count = 0
1076
+ quad_args = kwargs.copy()
1077
+ quad_args["verbose"] = False
1078
+ quad_args["error"] = True
1079
+ if tol is None:
1080
+ tol = +ctx.eps
1081
+ orig = ctx.prec
1082
+ try:
1083
+ ctx.prec += 5
1084
+ while queue:
1085
+ a, b = queue.pop()
1086
+ s, err = ctx.quad(f, [a, b], **quad_args)
1087
+ if kwargs.get("verbose"):
1088
+ print("subinterval", count, a, b, err)
1089
+ if err < tol or count > maxintervals:
1090
+ total += s
1091
+ total_error += err
1092
+ else:
1093
+ count += 1
1094
+ if count == maxintervals and kwargs.get("verbose"):
1095
+ print("warning: number of intervals exceeded maxintervals")
1096
+ if a == -ctx.inf and b == ctx.inf:
1097
+ m = 0
1098
+ elif a == -ctx.inf:
1099
+ m = min(b-1, 2*b)
1100
+ elif b == ctx.inf:
1101
+ m = max(a+1, 2*a)
1102
+ else:
1103
+ m = a + (b - a) / 2
1104
+ queue.append((a, m))
1105
+ queue.append((m, b))
1106
+ finally:
1107
+ ctx.prec = orig
1108
+ if kwargs.get("error"):
1109
+ return +total, +total_error
1110
+ else:
1111
+ return +total
1112
+
1113
+ if __name__ == '__main__':
1114
+ import doctest
1115
+ doctest.testmod()
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (600 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc ADDED
Binary file (34.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc ADDED
Binary file (39.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc ADDED
Binary file (7.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ __version__ = "0.10.0"
21
+
22
+ from .auto import (
23
+ AutoPeftModel,
24
+ AutoPeftModelForCausalLM,
25
+ AutoPeftModelForSequenceClassification,
26
+ AutoPeftModelForSeq2SeqLM,
27
+ AutoPeftModelForTokenClassification,
28
+ AutoPeftModelForQuestionAnswering,
29
+ AutoPeftModelForFeatureExtraction,
30
+ )
31
+ from .mapping import (
32
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
33
+ PEFT_TYPE_TO_CONFIG_MAPPING,
34
+ get_peft_config,
35
+ get_peft_model,
36
+ inject_adapter_in_model,
37
+ )
38
+ from .mixed_model import PeftMixedModel
39
+ from .peft_model import (
40
+ PeftModel,
41
+ PeftModelForCausalLM,
42
+ PeftModelForSeq2SeqLM,
43
+ PeftModelForSequenceClassification,
44
+ PeftModelForTokenClassification,
45
+ PeftModelForQuestionAnswering,
46
+ PeftModelForFeatureExtraction,
47
+ )
48
+ from .tuners import (
49
+ AdaptionPromptConfig,
50
+ AdaptionPromptModel,
51
+ LoraConfig,
52
+ LoftQConfig,
53
+ LoraModel,
54
+ LoHaConfig,
55
+ LoHaModel,
56
+ LoKrConfig,
57
+ LoKrModel,
58
+ IA3Config,
59
+ IA3Model,
60
+ AdaLoraConfig,
61
+ AdaLoraModel,
62
+ PrefixEncoder,
63
+ PrefixTuningConfig,
64
+ PromptEmbedding,
65
+ PromptEncoder,
66
+ PromptEncoderConfig,
67
+ PromptEncoderReparameterizationType,
68
+ PromptTuningConfig,
69
+ PromptTuningInit,
70
+ MultitaskPromptTuningConfig,
71
+ MultitaskPromptTuningInit,
72
+ OFTConfig,
73
+ OFTModel,
74
+ PolyConfig,
75
+ PolyModel,
76
+ )
77
+ from .utils import (
78
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
79
+ PeftType,
80
+ TaskType,
81
+ bloom_model_postprocess_past_key_value,
82
+ get_peft_model_state_dict,
83
+ prepare_model_for_kbit_training,
84
+ replace_lora_weights_loftq,
85
+ set_peft_model_state_dict,
86
+ shift_tokens_right,
87
+ load_peft_weights,
88
+ cast_mixed_precision_params,
89
+ )
90
+ from .config import PeftConfig, PromptLearningConfig
llmeval-env/lib/python3.10/site-packages/peft/auto.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import importlib
18
+ import os
19
+ from typing import Optional
20
+
21
+ from transformers import (
22
+ AutoModel,
23
+ AutoModelForCausalLM,
24
+ AutoModelForQuestionAnswering,
25
+ AutoModelForSeq2SeqLM,
26
+ AutoModelForSequenceClassification,
27
+ AutoModelForTokenClassification,
28
+ AutoTokenizer,
29
+ )
30
+
31
+ from .config import PeftConfig
32
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
33
+ from .peft_model import (
34
+ PeftModel,
35
+ PeftModelForCausalLM,
36
+ PeftModelForFeatureExtraction,
37
+ PeftModelForQuestionAnswering,
38
+ PeftModelForSeq2SeqLM,
39
+ PeftModelForSequenceClassification,
40
+ PeftModelForTokenClassification,
41
+ )
42
+ from .utils.constants import TOKENIZER_CONFIG_NAME
43
+ from .utils.other import check_file_exists_on_hf_hub
44
+
45
+
46
+ class _BaseAutoPeftModel:
47
+ _target_class = None
48
+ _target_peft_class = None
49
+
50
+ def __init__(self, *args, **kwargs):
51
+ # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400
52
+ raise EnvironmentError( # noqa: UP024
53
+ f"{self.__class__.__name__} is designed to be instantiated "
54
+ f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
55
+ f"`{self.__class__.__name__}.from_config(config)` methods."
56
+ )
57
+
58
+ @classmethod
59
+ def from_pretrained(
60
+ cls,
61
+ pretrained_model_name_or_path,
62
+ adapter_name: str = "default",
63
+ is_trainable: bool = False,
64
+ config: Optional[PeftConfig] = None,
65
+ **kwargs,
66
+ ):
67
+ r"""
68
+ A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs
69
+ are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
70
+ the config object init.
71
+ """
72
+ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
73
+ base_model_path = peft_config.base_model_name_or_path
74
+
75
+ task_type = getattr(peft_config, "task_type", None)
76
+
77
+ if cls._target_class is not None:
78
+ target_class = cls._target_class
79
+ elif cls._target_class is None and task_type is not None:
80
+ # this is only in the case where we use `AutoPeftModel`
81
+ raise ValueError(
82
+ "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)"
83
+ )
84
+
85
+ if task_type is not None:
86
+ expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
87
+ if cls._target_peft_class.__name__ != expected_target_class.__name__:
88
+ raise ValueError(
89
+ f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }"
90
+ " make sure that you are loading the correct model for your task type."
91
+ )
92
+ elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:
93
+ auto_mapping = getattr(peft_config, "auto_mapping", None)
94
+ base_model_class = auto_mapping["base_model_class"]
95
+ parent_library_name = auto_mapping["parent_library"]
96
+
97
+ parent_library = importlib.import_module(parent_library_name)
98
+ target_class = getattr(parent_library, base_model_class)
99
+ else:
100
+ raise ValueError(
101
+ "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
102
+ )
103
+
104
+ base_model = target_class.from_pretrained(base_model_path, **kwargs)
105
+
106
+ tokenizer_exists = False
107
+ if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
108
+ tokenizer_exists = True
109
+ else:
110
+ token = kwargs.get("token", None)
111
+ if token is None:
112
+ token = kwargs.get("use_auth_token", None)
113
+
114
+ tokenizer_exists = check_file_exists_on_hf_hub(
115
+ repo_id=pretrained_model_name_or_path,
116
+ filename=TOKENIZER_CONFIG_NAME,
117
+ revision=kwargs.get("revision", None),
118
+ repo_type=kwargs.get("repo_type", None),
119
+ token=token,
120
+ )
121
+
122
+ if tokenizer_exists:
123
+ tokenizer = AutoTokenizer.from_pretrained(
124
+ pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False)
125
+ )
126
+ base_model.resize_token_embeddings(len(tokenizer))
127
+
128
+ return cls._target_peft_class.from_pretrained(
129
+ base_model,
130
+ pretrained_model_name_or_path,
131
+ adapter_name=adapter_name,
132
+ is_trainable=is_trainable,
133
+ config=config,
134
+ **kwargs,
135
+ )
136
+
137
+
138
+ class AutoPeftModel(_BaseAutoPeftModel):
139
+ _target_class = None
140
+ _target_peft_class = PeftModel
141
+
142
+
143
+ class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
144
+ _target_class = AutoModelForCausalLM
145
+ _target_peft_class = PeftModelForCausalLM
146
+
147
+
148
+ class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
149
+ _target_class = AutoModelForSeq2SeqLM
150
+ _target_peft_class = PeftModelForSeq2SeqLM
151
+
152
+
153
+ class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
154
+ _target_class = AutoModelForSequenceClassification
155
+ _target_peft_class = PeftModelForSequenceClassification
156
+
157
+
158
+ class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
159
+ _target_class = AutoModelForTokenClassification
160
+ _target_peft_class = PeftModelForTokenClassification
161
+
162
+
163
+ class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
164
+ _target_class = AutoModelForQuestionAnswering
165
+ _target_peft_class = PeftModelForQuestionAnswering
166
+
167
+
168
+ class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
169
+ _target_class = AutoModel
170
+ _target_peft_class = PeftModelForFeatureExtraction
llmeval-env/lib/python3.10/site-packages/peft/config.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import json
16
+ import os
17
+ from dataclasses import asdict, dataclass, field
18
+ from typing import Dict, Optional, Union
19
+
20
+ from huggingface_hub import hf_hub_download
21
+ from transformers.utils import PushToHubMixin
22
+
23
+ from .utils import CONFIG_NAME, PeftType, TaskType
24
+
25
+
26
+ @dataclass
27
+ class PeftConfigMixin(PushToHubMixin):
28
+ r"""
29
+ This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
30
+ PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
31
+ push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
32
+ directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
33
+
34
+ Args:
35
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
36
+ """
37
+
38
+ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
39
+ auto_mapping: Optional[dict] = field(
40
+ default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."}
41
+ )
42
+
43
+ def to_dict(self) -> Dict:
44
+ r"""
45
+ Returns the configuration for your adapter model as a dictionary.
46
+ """
47
+ return asdict(self)
48
+
49
+ def save_pretrained(self, save_directory: str, **kwargs) -> None:
50
+ r"""
51
+ This method saves the configuration of your adapter model in a directory.
52
+
53
+ Args:
54
+ save_directory (`str`):
55
+ The directory where the configuration will be saved.
56
+ kwargs (additional keyword arguments, *optional*):
57
+ Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
58
+ method.
59
+ """
60
+ if os.path.isfile(save_directory):
61
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
62
+
63
+ os.makedirs(save_directory, exist_ok=True)
64
+ auto_mapping_dict = kwargs.pop("auto_mapping_dict", None)
65
+
66
+ output_dict = asdict(self)
67
+ # converting set type to list
68
+ for key, value in output_dict.items():
69
+ if isinstance(value, set):
70
+ output_dict[key] = list(value)
71
+
72
+ output_path = os.path.join(save_directory, CONFIG_NAME)
73
+
74
+ # Add auto mapping details for custom models.
75
+ if auto_mapping_dict is not None:
76
+ output_dict["auto_mapping"] = auto_mapping_dict
77
+
78
+ # save it
79
+ with open(output_path, "w") as writer:
80
+ writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
81
+
82
+ @classmethod
83
+ def from_peft_type(cls, **kwargs):
84
+ r"""
85
+ This method loads the configuration of your adapter model from a set of kwargs.
86
+
87
+ The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided,
88
+ the calling class type is instantiated.
89
+
90
+ Args:
91
+ kwargs (configuration keyword arguments):
92
+ Keyword arguments passed along to the configuration initialization.
93
+ """
94
+ # Avoid circular dependency .. TODO: fix this with a larger refactor
95
+ from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
96
+
97
+ # TODO: this hack is needed to fix the following issue (on commit 702f937):
98
+ # if someone saves a default config and loads it back with `PeftConfig` class it yields to
99
+ # not loading the correct config class.
100
+
101
+ # from peft import AdaLoraConfig, PeftConfig
102
+ # peft_config = AdaLoraConfig()
103
+ # print(peft_config)
104
+ # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,
105
+ # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...
106
+ #
107
+ # peft_config.save_pretrained("./test_config")
108
+ # peft_config = PeftConfig.from_pretrained("./test_config")
109
+ # print(peft_config)
110
+ # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)
111
+
112
+ if "peft_type" in kwargs:
113
+ peft_type = kwargs["peft_type"]
114
+ config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
115
+ else:
116
+ config_cls = cls
117
+
118
+ return config_cls(**kwargs)
119
+
120
+ @classmethod
121
+ def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):
122
+ r"""
123
+ This method loads the configuration of your adapter model from a directory.
124
+
125
+ Args:
126
+ pretrained_model_name_or_path (`str`):
127
+ The directory or the Hub repository id where the configuration is saved.
128
+ kwargs (additional keyword arguments, *optional*):
129
+ Additional keyword arguments passed along to the child class initialization.
130
+ """
131
+ path = (
132
+ os.path.join(pretrained_model_name_or_path, subfolder)
133
+ if subfolder is not None
134
+ else pretrained_model_name_or_path
135
+ )
136
+
137
+ hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)
138
+
139
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
140
+ config_file = os.path.join(path, CONFIG_NAME)
141
+ else:
142
+ try:
143
+ config_file = hf_hub_download(
144
+ pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
145
+ )
146
+ except Exception:
147
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
148
+
149
+ loaded_attributes = cls.from_json_file(config_file)
150
+ kwargs = {**class_kwargs, **loaded_attributes}
151
+ return cls.from_peft_type(**kwargs)
152
+
153
+ @classmethod
154
+ def from_json_file(cls, path_json_file: str, **kwargs):
155
+ r"""
156
+ Loads a configuration file from a json file.
157
+
158
+ Args:
159
+ path_json_file (`str`):
160
+ The path to the json file.
161
+ """
162
+ with open(path_json_file) as file:
163
+ json_object = json.load(file)
164
+
165
+ return json_object
166
+
167
+ @classmethod
168
+ def _split_kwargs(cls, kwargs):
169
+ hf_hub_download_kwargs = {}
170
+ class_kwargs = {}
171
+ other_kwargs = {}
172
+
173
+ for key, value in kwargs.items():
174
+ if key in inspect.signature(hf_hub_download).parameters:
175
+ hf_hub_download_kwargs[key] = value
176
+ elif key in list(cls.__annotations__):
177
+ class_kwargs[key] = value
178
+ else:
179
+ other_kwargs[key] = value
180
+
181
+ return hf_hub_download_kwargs, class_kwargs, other_kwargs
182
+
183
+ @classmethod
184
+ def _get_peft_type(
185
+ cls,
186
+ model_id: str,
187
+ **hf_hub_download_kwargs,
188
+ ):
189
+ subfolder = hf_hub_download_kwargs.get("subfolder", None)
190
+
191
+ path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
192
+
193
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
194
+ config_file = os.path.join(path, CONFIG_NAME)
195
+ else:
196
+ try:
197
+ config_file = hf_hub_download(
198
+ model_id,
199
+ CONFIG_NAME,
200
+ **hf_hub_download_kwargs,
201
+ )
202
+ except Exception:
203
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
204
+
205
+ loaded_attributes = cls.from_json_file(config_file)
206
+ return loaded_attributes["peft_type"]
207
+
208
+ @property
209
+ def is_prompt_learning(self) -> bool:
210
+ r"""
211
+ Utility method to check if the configuration is for prompt learning.
212
+ """
213
+ return False
214
+
215
+ @property
216
+ def is_adaption_prompt(self) -> bool:
217
+ """Return True if this is an adaption prompt config."""
218
+ return False
219
+
220
+
221
+ @dataclass
222
+ class PeftConfig(PeftConfigMixin):
223
+ """
224
+ This is the base configuration class to store the configuration of a [`PeftModel`].
225
+
226
+ Args:
227
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
228
+ task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
229
+ inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
230
+ """
231
+
232
+ base_model_name_or_path: Optional[str] = field(
233
+ default=None, metadata={"help": "The name of the base model to use."}
234
+ )
235
+ revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."})
236
+ peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"})
237
+ task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"})
238
+ inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
239
+
240
+
241
+ @dataclass
242
+ class PromptLearningConfig(PeftConfig):
243
+ """
244
+ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
245
+ [`PromptTuning`].
246
+
247
+ Args:
248
+ num_virtual_tokens (`int`): The number of virtual tokens to use.
249
+ token_dim (`int`): The hidden embedding dimension of the base transformer model.
250
+ num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
251
+ num_attention_heads (`int`): The number of attention heads in the base transformer model.
252
+ num_layers (`int`): The number of layers in the base transformer model.
253
+ """
254
+
255
+ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
256
+ token_dim: int = field(
257
+ default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
258
+ )
259
+ num_transformer_submodules: Optional[int] = field(
260
+ default=None, metadata={"help": "Number of transformer submodules"}
261
+ )
262
+ num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
263
+ num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
264
+
265
+ @property
266
+ def is_prompt_learning(self) -> bool:
267
+ r"""
268
+ Utility method to check if the configuration is for prompt learning.
269
+ """
270
+ return True
llmeval-env/lib/python3.10/site-packages/peft/helpers.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from copy import deepcopy
3
+ from functools import update_wrapper
4
+ from types import MethodType
5
+
6
+ from .peft_model import PeftModel
7
+
8
+
9
+ def update_forward_signature(model: PeftModel) -> None:
10
+ """
11
+ Args:
12
+ Updates the forward signature of the PeftModel to include parents class signature
13
+ model (`PeftModel`): Peft model to update the forward signature
14
+ Example:
15
+
16
+ ```python
17
+ >>> from transformers import WhisperForConditionalGeneration
18
+ >>> from peft import get_peft_model, LoraConfig, update_forward_signature
19
+
20
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
21
+ >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"])
22
+
23
+ >>> peft_model = get_peft_model(model, peft_config)
24
+ >>> update_forward_signature(peft_model)
25
+ ```
26
+ """
27
+
28
+ # Only update signature when the current forward signature only has *args and **kwargs
29
+ current_signature = inspect.signature(model.forward)
30
+ if (
31
+ len(current_signature.parameters) == 2
32
+ and "args" in current_signature.parameters
33
+ and "kwargs" in current_signature.parameters
34
+ ):
35
+ forward = deepcopy(model.forward.__func__)
36
+ update_wrapper(
37
+ forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__")
38
+ )
39
+ model.forward = MethodType(forward, model)
40
+
41
+
42
+ def update_generate_signature(model: PeftModel) -> None:
43
+ """
44
+ Args:
45
+ Updates the generate signature of a PeftModel with overriding generate to include parents class signature
46
+ model (`PeftModel`): Peft model to update the generate signature
47
+ Example:
48
+
49
+ ```python
50
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
51
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature
52
+
53
+ >>> model_name_or_path = "bigscience/mt0-large"
54
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
56
+
57
+ >>> peft_config = LoraConfig(
58
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
59
+ ... )
60
+ >>> peft_model = get_peft_model(model, peft_config)
61
+ >>> update_generate_signature(peft_model)
62
+ >>> help(peft_model.generate)
63
+ ```
64
+ """
65
+ if not hasattr(model, "generate"):
66
+ return
67
+ current_signature = inspect.signature(model.generate)
68
+ if (
69
+ len(current_signature.parameters) == 2
70
+ and "args" in current_signature.parameters
71
+ and "kwargs" in current_signature.parameters
72
+ ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters):
73
+ generate = deepcopy(model.generate.__func__)
74
+ update_wrapper(
75
+ generate,
76
+ type(model.get_base_model()).generate,
77
+ assigned=("__doc__", "__name__", "__annotations__"),
78
+ )
79
+ model.generate = MethodType(generate, model)
80
+
81
+
82
+ def update_signature(model: PeftModel, method: str = "all") -> None:
83
+ """
84
+ Args:
85
+ Updates the signature of a PeftModel include parents class signature for forward or generate method
86
+ model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update
87
+ signature choose one of "forward", "generate", "all"
88
+ Example:
89
+ ```python
90
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
91
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature
92
+
93
+ >>> model_name_or_path = "bigscience/mt0-large"
94
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
95
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
96
+
97
+ >>> peft_config = LoraConfig(
98
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
99
+ ... )
100
+ >>> peft_model = get_peft_model(model, peft_config)
101
+ >>> update_signature(peft_model)
102
+ >>> help(peft_model.generate)
103
+ ```
104
+ """
105
+ if method == "forward":
106
+ update_forward_signature(model)
107
+ elif method == "generate":
108
+ update_generate_signature(model)
109
+ elif method == "all":
110
+ update_forward_signature(model)
111
+ update_generate_signature(model)
112
+ else:
113
+ raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
llmeval-env/lib/python3.10/site-packages/peft/import_utils.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ import importlib.metadata as importlib_metadata
16
+ from functools import lru_cache
17
+
18
+ import packaging.version
19
+
20
+
21
+ def is_bnb_available() -> bool:
22
+ return importlib.util.find_spec("bitsandbytes") is not None
23
+
24
+
25
+ def is_bnb_4bit_available() -> bool:
26
+ if not is_bnb_available():
27
+ return False
28
+
29
+ import bitsandbytes as bnb
30
+
31
+ return hasattr(bnb.nn, "Linear4bit")
32
+
33
+
34
+ def is_auto_gptq_available():
35
+ if importlib.util.find_spec("auto_gptq") is not None:
36
+ AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0")
37
+ version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq"))
38
+ if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq:
39
+ return True
40
+ else:
41
+ raise ImportError(
42
+ f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, "
43
+ f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported"
44
+ )
45
+
46
+
47
+ def is_optimum_available() -> bool:
48
+ return importlib.util.find_spec("optimum") is not None
49
+
50
+
51
+ @lru_cache
52
+ def is_torch_tpu_available(check_device=True):
53
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
54
+ if importlib.util.find_spec("torch_xla") is not None:
55
+ if check_device:
56
+ # We need to check if `xla_device` can be found, will raise a RuntimeError if not
57
+ try:
58
+ import torch_xla.core.xla_model as xm
59
+
60
+ _ = xm.xla_device()
61
+ return True
62
+ except RuntimeError:
63
+ return False
64
+ return True
65
+ return False
66
+
67
+
68
+ def is_aqlm_available():
69
+ return importlib.util.find_spec("aqlm") is not None
70
+
71
+
72
+ def is_auto_awq_available():
73
+ return importlib.util.find_spec("awq") is not None
llmeval-env/lib/python3.10/site-packages/peft/mapping.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ import torch
20
+
21
+ from .config import PeftConfig
22
+ from .mixed_model import PeftMixedModel
23
+ from .peft_model import (
24
+ PeftModel,
25
+ PeftModelForCausalLM,
26
+ PeftModelForFeatureExtraction,
27
+ PeftModelForQuestionAnswering,
28
+ PeftModelForSeq2SeqLM,
29
+ PeftModelForSequenceClassification,
30
+ PeftModelForTokenClassification,
31
+ )
32
+ from .tuners import (
33
+ AdaLoraConfig,
34
+ AdaLoraModel,
35
+ AdaptionPromptConfig,
36
+ IA3Config,
37
+ IA3Model,
38
+ LoHaConfig,
39
+ LoHaModel,
40
+ LoKrConfig,
41
+ LoKrModel,
42
+ LoraConfig,
43
+ LoraModel,
44
+ MultitaskPromptTuningConfig,
45
+ OFTConfig,
46
+ OFTModel,
47
+ PolyConfig,
48
+ PolyModel,
49
+ PrefixTuningConfig,
50
+ PromptEncoderConfig,
51
+ PromptTuningConfig,
52
+ )
53
+ from .utils import _prepare_prompt_learning_config
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from transformers import PreTrainedModel
58
+
59
+
60
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = {
61
+ "SEQ_CLS": PeftModelForSequenceClassification,
62
+ "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM,
63
+ "CAUSAL_LM": PeftModelForCausalLM,
64
+ "TOKEN_CLS": PeftModelForTokenClassification,
65
+ "QUESTION_ANS": PeftModelForQuestionAnswering,
66
+ "FEATURE_EXTRACTION": PeftModelForFeatureExtraction,
67
+ }
68
+
69
+ PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = {
70
+ "ADAPTION_PROMPT": AdaptionPromptConfig,
71
+ "PROMPT_TUNING": PromptTuningConfig,
72
+ "PREFIX_TUNING": PrefixTuningConfig,
73
+ "P_TUNING": PromptEncoderConfig,
74
+ "LORA": LoraConfig,
75
+ "LOHA": LoHaConfig,
76
+ "LOKR": LoKrConfig,
77
+ "ADALORA": AdaLoraConfig,
78
+ "IA3": IA3Config,
79
+ "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig,
80
+ "OFT": OFTConfig,
81
+ "POLY": PolyConfig,
82
+ }
83
+
84
+ PEFT_TYPE_TO_TUNER_MAPPING = {
85
+ "LORA": LoraModel,
86
+ "LOHA": LoHaModel,
87
+ "LOKR": LoKrModel,
88
+ "ADALORA": AdaLoraModel,
89
+ "IA3": IA3Model,
90
+ "OFT": OFTModel,
91
+ "POLY": PolyModel,
92
+ }
93
+
94
+
95
+ def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
96
+ """
97
+ Returns a Peft config object from a dictionary.
98
+
99
+ Args:
100
+ config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
101
+ """
102
+
103
+ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
104
+
105
+
106
+ def get_peft_model(
107
+ model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False
108
+ ) -> PeftModel | PeftMixedModel:
109
+ """
110
+ Returns a Peft model object from a model and a config.
111
+
112
+ Args:
113
+ model ([`transformers.PreTrainedModel`]):
114
+ Model to be wrapped.
115
+ peft_config ([`PeftConfig`]):
116
+ Configuration object containing the parameters of the Peft model.
117
+ adapter_name (`str`, `optional`, defaults to `"default"`):
118
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
119
+ mixed (`bool`, `optional`, defaults to `False`):
120
+ Whether to allow mixing different (compatible) adapter types.
121
+ """
122
+ model_config = getattr(model, "config", {"model_type": "custom"})
123
+ if hasattr(model_config, "to_dict"):
124
+ model_config = model_config.to_dict()
125
+
126
+ peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
127
+
128
+ if mixed:
129
+ return PeftMixedModel(model, peft_config, adapter_name=adapter_name)
130
+
131
+ if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
132
+ return PeftModel(model, peft_config, adapter_name=adapter_name)
133
+
134
+ if peft_config.is_prompt_learning:
135
+ peft_config = _prepare_prompt_learning_config(peft_config, model_config)
136
+ return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
137
+
138
+
139
+ def inject_adapter_in_model(
140
+ peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default"
141
+ ) -> torch.nn.Module:
142
+ r"""
143
+ A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning
144
+ methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API
145
+ calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods.
146
+
147
+ Args:
148
+ peft_config (`PeftConfig`):
149
+ Configuration object containing the parameters of the Peft model.
150
+ model (`torch.nn.Module`):
151
+ The input model where the adapter will be injected.
152
+ adapter_name (`str`, `optional`, defaults to `"default"`):
153
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
154
+ """
155
+ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
156
+ raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.")
157
+
158
+ if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys():
159
+ raise ValueError(
160
+ f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`."
161
+ )
162
+
163
+ tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
164
+
165
+ # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
166
+ peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
167
+
168
+ return peft_model.model
llmeval-env/lib/python3.10/site-packages/peft/mixed_model.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import os
18
+ from contextlib import contextmanager
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ from accelerate.hooks import remove_hook_from_submodules
23
+ from torch import nn
24
+ from transformers.utils import PushToHubMixin
25
+
26
+ from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
27
+
28
+ from .config import PeftConfig
29
+ from .peft_model import PeftModel
30
+ from .tuners import (
31
+ AdaLoraModel,
32
+ IA3Model,
33
+ LoHaModel,
34
+ LoKrModel,
35
+ LoraModel,
36
+ MixedModel,
37
+ OFTModel,
38
+ )
39
+ from .utils import PeftType, _set_adapter, _set_trainable
40
+
41
+
42
+ PEFT_TYPE_TO_MODEL_MAPPING = {
43
+ PeftType.LORA: LoraModel,
44
+ PeftType.LOHA: LoHaModel,
45
+ PeftType.LOKR: LoKrModel,
46
+ PeftType.ADALORA: AdaLoraModel,
47
+ PeftType.IA3: IA3Model,
48
+ PeftType.OFT: OFTModel,
49
+ }
50
+
51
+
52
+ def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None:
53
+ r"""
54
+ Prepares the model for gradient checkpointing if necessary
55
+ """
56
+ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing
57
+ if not getattr(model, "is_gradient_checkpointing", True):
58
+ return model
59
+
60
+ if not (
61
+ getattr(model, "is_loaded_in_8bit", False)
62
+ or getattr(model, "is_loaded_in_4bit", False)
63
+ or getattr(model, "is_quantized", False)
64
+ ):
65
+ if hasattr(model, "enable_input_require_grads"):
66
+ model.enable_input_require_grads()
67
+ elif hasattr(model, "get_input_embeddings"):
68
+
69
+ def make_inputs_require_grad(module, input, output):
70
+ output.requires_grad_(True)
71
+
72
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
73
+
74
+
75
+ def _check_config_compatible(peft_config: PeftConfig) -> None:
76
+ if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES:
77
+ raise ValueError(
78
+ f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. "
79
+ f"Compatible types are: {COMPATIBLE_TUNER_TYPES}"
80
+ )
81
+
82
+
83
+ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
84
+ """
85
+ PeftMixedModel for loading mixing different types of adapters for inference.
86
+
87
+ This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use
88
+ `get_peft_model` with the argument `mixed=True`.
89
+
90
+ <Tip>
91
+
92
+ Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn
93
+ more about using different adapter types.
94
+
95
+ </Tip>
96
+
97
+ Example:
98
+
99
+ ```py
100
+ >>> from peft import get_peft_model
101
+
102
+ >>> base_model = ... # load the base model, e.g. from transformers
103
+ >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval()
104
+ >>> peft_model.load_adapter(path_to_adapter2, "adapter2")
105
+ >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters
106
+ >>> peft_model(data) # forward pass using both adapters
107
+ ```
108
+
109
+ Args:
110
+ model (`torch.nn.Module`):
111
+ The model to be tuned.
112
+ config (`PeftConfig`):
113
+ The config of the model to be tuned. The adapter type must be compatible.
114
+ adapter_name (`str`, `optional`, defaults to `"default"`):
115
+ The name of the first adapter.
116
+ """
117
+
118
+ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
119
+ super().__init__()
120
+ _check_config_compatible(peft_config)
121
+ _prepare_model_for_gradient_checkpointing(model)
122
+ self.modules_to_save = None
123
+ self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
124
+ self.set_modules_to_save(peft_config, adapter_name)
125
+
126
+ self.config = getattr(model, "config", {"model_type": "custom"})
127
+
128
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
129
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
130
+ # behavior we disable that in this line.
131
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
132
+ self.base_model.config.pretraining_tp = 1
133
+
134
+ @property
135
+ def peft_config(self) -> dict[str, PeftConfig]:
136
+ return self.base_model.peft_config
137
+
138
+ @property
139
+ def active_adapter(self) -> str:
140
+ return self.base_model.active_adapter
141
+
142
+ @property
143
+ def active_adapters(self) -> list[str]:
144
+ return self.base_model.active_adapters
145
+
146
+ def get_nb_trainable_parameters(self):
147
+ r"""
148
+ Returns the number of trainable parameters and number of all parameters in the model.
149
+ """
150
+ # note: same as PeftModel.get_nb_trainable_parameters
151
+ trainable_params = 0
152
+ all_param = 0
153
+ for _, param in self.named_parameters():
154
+ num_params = param.numel()
155
+ # if using DS Zero 3 and the weights are initialized empty
156
+ if num_params == 0 and hasattr(param, "ds_numel"):
157
+ num_params = param.ds_numel
158
+
159
+ # Due to the design of 4bit linear layers from bitsandbytes
160
+ # one needs to multiply the number of parameters by 2 to get
161
+ # the correct number of parameters
162
+ if param.__class__.__name__ == "Params4bit":
163
+ num_params = num_params * 2
164
+
165
+ all_param += num_params
166
+ if param.requires_grad:
167
+ trainable_params += num_params
168
+
169
+ return trainable_params, all_param
170
+
171
+ def print_trainable_parameters(self):
172
+ """
173
+ Prints the number of trainable parameters in the model.
174
+
175
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
176
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
177
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
178
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
179
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
180
+ of trainable parameters of the backbone transformer model which can be different.
181
+ """
182
+ # note: same as PeftModel.print_trainable_parameters
183
+ trainable_params, all_param = self.get_nb_trainable_parameters()
184
+
185
+ print(
186
+ f"trainable params: {trainable_params:,d} || "
187
+ f"all params: {all_param:,d} || "
188
+ f"trainable%: {100 * trainable_params / all_param:.4f}"
189
+ )
190
+
191
+ def __getattr__(self, name: str):
192
+ """Forward missing attributes to the wrapped module."""
193
+ try:
194
+ return super().__getattr__(name) # defer to nn.Module's logic
195
+ except AttributeError:
196
+ return getattr(self.base_model, name)
197
+
198
+ def forward(self, *args: Any, **kwargs: Any):
199
+ """
200
+ Forward pass of the model.
201
+ """
202
+ return self.base_model(*args, **kwargs)
203
+
204
+ def generate(self, *args: Any, **kwargs: Any):
205
+ """
206
+ Generate output.
207
+ """
208
+ return self.base_model.generate(*args, **kwargs)
209
+
210
+ @contextmanager
211
+ def disable_adapter(self):
212
+ """
213
+ Disables the adapter module.
214
+ """
215
+ try:
216
+ self.base_model.disable_adapter_layers()
217
+ yield
218
+ finally:
219
+ self.base_model.enable_adapter_layers()
220
+
221
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
222
+ _check_config_compatible(peft_config)
223
+
224
+ try:
225
+ self.peft_config[adapter_name] = peft_config
226
+ self.base_model.inject_adapter(self, adapter_name)
227
+ except Exception: # something went wrong, roll back
228
+ if adapter_name in self.peft_config:
229
+ del self.peft_config[adapter_name]
230
+ raise
231
+
232
+ self.set_modules_to_save(peft_config, adapter_name)
233
+
234
+ def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None:
235
+ if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None:
236
+ return
237
+
238
+ if self.modules_to_save is None:
239
+ self.modules_to_save = set(modules_to_save)
240
+ else:
241
+ self.modules_to_save.update(modules_to_save)
242
+ _set_trainable(self, adapter_name)
243
+
244
+ def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
245
+ """
246
+ Sets the active adapter(s) for the model.
247
+
248
+ Note that the order in which the adapters are applied during the forward pass may not be the same as the order
249
+ in which they are passed to this function. Instead, the order during the forward pass is determined by the
250
+ order in which the adapters were loaded into the model. The active adapters only determine which adapters are
251
+ active during the forward pass, but not the order in which they are applied.
252
+
253
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
254
+ not desired, use the following code.
255
+
256
+ ```py
257
+ >>> for name, param in model_peft.named_parameters():
258
+ ... if ...: # some check on name (ex. if 'lora' in name)
259
+ ... param.requires_grad = False
260
+ ```
261
+
262
+ Args:
263
+ adapter_name (`str` or `List[str]`):
264
+ The name of the adapter(s) to be activated.
265
+ """
266
+ if isinstance(adapter_name, str):
267
+ adapter_name = [adapter_name]
268
+
269
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
270
+ if mismatched:
271
+ raise ValueError(
272
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
273
+ )
274
+
275
+ self.base_model.set_adapter(adapter_name)
276
+ _set_adapter(self, adapter_name)
277
+
278
+ def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
279
+ if isinstance(adapter_name, str):
280
+ adapter_name = [adapter_name]
281
+
282
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
283
+ if mismatched:
284
+ raise ValueError(
285
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
286
+ )
287
+
288
+ self.base_model.delete_adapter(adapter_name)
289
+
290
+ def merge_and_unload(self, *args: Any, **kwargs: Any):
291
+ r"""
292
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
293
+ model as a standalone model.
294
+
295
+ Args:
296
+ progressbar (`bool`):
297
+ whether to show a progressbar indicating the unload and merge process
298
+ safe_merge (`bool`):
299
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
300
+ weights
301
+ adapter_names (`List[str]`, *optional*):
302
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
303
+ to `None`.
304
+ """
305
+ return self.base_model.merge_and_unload(*args, **kwargs)
306
+
307
+ def unload(self, *args: Any, **kwargs: Any):
308
+ """
309
+ Gets back the base model by removing all the adapter modules without merging. This gives back the original base
310
+ model.
311
+ """
312
+ return self.base_model.unload(*args, **kwargs)
313
+
314
+ @classmethod
315
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
316
+ return PeftModel._split_kwargs(kwargs)
317
+
318
+ def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
319
+ output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
320
+ # TODO: not quite clear why this is necessary but tests fail without it
321
+ self.set_adapter(self.active_adapters)
322
+ return output
323
+
324
+ def create_or_update_model_card(self, output_dir: str):
325
+ raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).")
326
+
327
+ def save_pretrained(
328
+ self,
329
+ save_directory: str,
330
+ safe_serialization: bool = False,
331
+ selected_adapters: Optional[list[str]] = None,
332
+ **kwargs: Any,
333
+ ):
334
+ raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).")
335
+
336
+ @classmethod
337
+ def from_pretrained(
338
+ cls,
339
+ model: nn.Module,
340
+ model_id: str | os.PathLike,
341
+ adapter_name: str = "default",
342
+ is_trainable: bool = False,
343
+ config: Optional[PeftConfig] = None,
344
+ **kwargs: Any,
345
+ ):
346
+ r"""
347
+ Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights.
348
+
349
+ Note that the passed `model` may be modified inplace.
350
+
351
+ Args:
352
+ model (`nn.Module`):
353
+ The model to be adapted.
354
+ model_id (`str` or `os.PathLike`):
355
+ The name of the PEFT configuration to use. Can be either:
356
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
357
+ Hub.
358
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
359
+ method (`./my_peft_config_directory/`).
360
+ adapter_name (`str`, *optional*, defaults to `"default"`):
361
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
362
+ is_trainable (`bool`, *optional*, defaults to `False`):
363
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for
364
+ inference
365
+ config ([`~peft.PeftConfig`], *optional*):
366
+ The configuration object to use instead of an automatically loaded configuration. This configuration
367
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
368
+ loaded before calling `from_pretrained`.
369
+ kwargs: (`optional`):
370
+ Additional keyword arguments passed along to the specific PEFT configuration class.
371
+ """
372
+ # note: adapted from PeftModel.from_pretrained
373
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
374
+
375
+ # load the config
376
+ if config is None:
377
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
378
+ PeftConfig._get_peft_type(
379
+ model_id,
380
+ subfolder=kwargs.get("subfolder", None),
381
+ revision=kwargs.get("revision", None),
382
+ cache_dir=kwargs.get("cache_dir", None),
383
+ use_auth_token=kwargs.get("use_auth_token", None),
384
+ )
385
+ ].from_pretrained(model_id, **kwargs)
386
+ elif isinstance(config, PeftConfig):
387
+ config.inference_mode = not is_trainable
388
+ else:
389
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
390
+
391
+ # note: this is different from PeftModel.from_pretrained
392
+ if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING:
393
+ raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.")
394
+
395
+ if (getattr(model, "hf_device_map", None) is not None) and len(
396
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
397
+ ) > 0:
398
+ remove_hook_from_submodules(model)
399
+
400
+ if config.is_prompt_learning and is_trainable:
401
+ # note: should not be possible to reach, but just in case
402
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
403
+ else:
404
+ config.inference_mode = not is_trainable
405
+
406
+ # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel
407
+ model = cls(model, config, adapter_name)
408
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
409
+ return model
llmeval-env/lib/python3.10/site-packages/peft/peft_model.py ADDED
@@ -0,0 +1,1986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import collections
18
+ import inspect
19
+ import os
20
+ import warnings
21
+ from contextlib import contextmanager
22
+ from copy import deepcopy
23
+ from typing import Any, Optional, Union
24
+
25
+ import packaging.version
26
+ import torch
27
+ import transformers
28
+ from accelerate import dispatch_model, infer_auto_device_map
29
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
30
+ from accelerate.utils import get_balanced_memory
31
+ from huggingface_hub import ModelCard, ModelCardData, hf_hub_download
32
+ from safetensors.torch import save_file as safe_save_file
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers import PreTrainedModel
35
+ from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
36
+ from transformers.utils import PushToHubMixin
37
+
38
+ from . import __version__
39
+ from .config import PeftConfig
40
+ from .tuners import (
41
+ AdaLoraModel,
42
+ AdaptionPromptModel,
43
+ IA3Model,
44
+ LoHaModel,
45
+ LoKrModel,
46
+ LoraModel,
47
+ MultitaskPromptEmbedding,
48
+ OFTModel,
49
+ PolyModel,
50
+ PrefixEncoder,
51
+ PromptEmbedding,
52
+ PromptEncoder,
53
+ )
54
+ from .utils import (
55
+ SAFETENSORS_WEIGHTS_NAME,
56
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
57
+ WEIGHTS_NAME,
58
+ PeftType,
59
+ TaskType,
60
+ _get_batch_size,
61
+ _prepare_prompt_learning_config,
62
+ _set_adapter,
63
+ _set_trainable,
64
+ get_peft_model_state_dict,
65
+ id_tensor_storage,
66
+ infer_device,
67
+ load_peft_weights,
68
+ set_peft_model_state_dict,
69
+ shift_tokens_right,
70
+ )
71
+
72
+
73
+ PEFT_TYPE_TO_MODEL_MAPPING = {
74
+ PeftType.LORA: LoraModel,
75
+ PeftType.LOHA: LoHaModel,
76
+ PeftType.LOKR: LoKrModel,
77
+ PeftType.PROMPT_TUNING: PromptEmbedding,
78
+ PeftType.P_TUNING: PromptEncoder,
79
+ PeftType.PREFIX_TUNING: PrefixEncoder,
80
+ PeftType.ADALORA: AdaLoraModel,
81
+ PeftType.ADAPTION_PROMPT: AdaptionPromptModel,
82
+ PeftType.IA3: IA3Model,
83
+ PeftType.OFT: OFTModel,
84
+ PeftType.POLY: PolyModel,
85
+ }
86
+
87
+
88
+ class PeftModel(PushToHubMixin, torch.nn.Module):
89
+ """
90
+ Base model encompassing various Peft methods.
91
+
92
+ Args:
93
+ model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.
94
+ peft_config ([`PeftConfig`]): The configuration of the Peft model.
95
+ adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
96
+
97
+ **Attributes**:
98
+ - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft.
99
+ - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.
100
+ - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when
101
+ saving the model.
102
+ - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if
103
+ using [`PromptLearningConfig`].
104
+ - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if
105
+ using [`PromptLearningConfig`].
106
+ - **transformer_backbone_name** (`str`) -- The name of the transformer
107
+ backbone in the base model if using [`PromptLearningConfig`].
108
+ - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone
109
+ in the base model if using [`PromptLearningConfig`].
110
+ """
111
+
112
+ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None:
113
+ super().__init__()
114
+ self.modules_to_save = None
115
+ self.active_adapter = adapter_name
116
+ self.peft_type = peft_config.peft_type
117
+ # These args are special PEFT arguments that users can pass. They need to be removed before passing them to
118
+ # forward.
119
+ self.special_peft_forward_args = {"adapter_names"}
120
+
121
+ self._is_prompt_learning = peft_config.is_prompt_learning
122
+ if self._is_prompt_learning:
123
+ self._peft_config = {adapter_name: peft_config}
124
+ self.base_model = model
125
+ self.add_adapter(adapter_name, peft_config)
126
+ else:
127
+ self._peft_config = None
128
+ cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
129
+ self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
130
+ self.set_additional_trainable_modules(peft_config, adapter_name)
131
+
132
+ if getattr(model, "is_gradient_checkpointing", True):
133
+ model = self._prepare_model_for_gradient_checkpointing(model)
134
+
135
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
136
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
137
+ # behavior we disable that in this line.
138
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
139
+ self.base_model.config.pretraining_tp = 1
140
+
141
+ @property
142
+ def peft_config(self) -> dict[str, PeftConfig]:
143
+ if self._is_prompt_learning:
144
+ return self._peft_config
145
+ return self.base_model.peft_config
146
+
147
+ @property
148
+ def active_adapters(self) -> list[str]:
149
+ try:
150
+ adapters = self.base_model.active_adapters
151
+ except AttributeError:
152
+ adapters = self.active_adapter
153
+ if isinstance(adapters, str):
154
+ adapters = [adapters]
155
+ return adapters
156
+
157
+ @peft_config.setter
158
+ def peft_config(self, value: dict[str, PeftConfig]):
159
+ if self._is_prompt_learning:
160
+ self._peft_config = value
161
+ else:
162
+ self.base_model.peft_config = value
163
+
164
+ def save_pretrained(
165
+ self,
166
+ save_directory: str,
167
+ safe_serialization: bool = True,
168
+ selected_adapters: Optional[list[str]] = None,
169
+ save_embedding_layers: Union[str, bool] = "auto",
170
+ is_main_process: bool = True,
171
+ **kwargs: Any,
172
+ ) -> None:
173
+ r"""
174
+ This function saves the adapter model and the adapter configuration files to a directory, so that it can be
175
+ reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`]
176
+ method.
177
+
178
+ Args:
179
+ save_directory (`str`):
180
+ Directory where the adapter model and configuration files will be saved (will be created if it does not
181
+ exist).
182
+ safe_serialization (`bool`, *optional*):
183
+ Whether to save the adapter files in safetensors format, defaults to `True`.
184
+ selected_adapters (`List[str]`, *optional*):
185
+ A list of adapters to be saved. If `None`, will default to all adapters.
186
+ save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`):
187
+ If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common
188
+ embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available.
189
+ and automatically sets the boolean flag. This only works for 🤗 transformers models.
190
+ is_main_process (`bool`, *optional*):
191
+ Whether the process calling this is the main process or not. Will default to `True`. Will not save the
192
+ checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
193
+ kwargs (additional keyword arguments, *optional*):
194
+ Additional keyword arguments passed along to the `push_to_hub` method.
195
+ """
196
+ if os.path.isfile(save_directory):
197
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
198
+
199
+ if selected_adapters is None:
200
+ selected_adapters = list(self.peft_config.keys())
201
+ else:
202
+ if any(
203
+ selected_adapter_name not in list(self.peft_config.keys())
204
+ for selected_adapter_name in selected_adapters
205
+ ):
206
+ raise ValueError(
207
+ f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
208
+ f" {list(self.peft_config.keys())} - got {selected_adapters}."
209
+ )
210
+
211
+ if is_main_process:
212
+ os.makedirs(save_directory, exist_ok=True)
213
+ self.create_or_update_model_card(save_directory)
214
+
215
+ for adapter_name in selected_adapters:
216
+ peft_config = self.peft_config[adapter_name]
217
+ # save only the trainable weights
218
+ output_state_dict = get_peft_model_state_dict(
219
+ self,
220
+ state_dict=kwargs.get("state_dict", None),
221
+ adapter_name=adapter_name,
222
+ save_embedding_layers=save_embedding_layers,
223
+ )
224
+ output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory
225
+ os.makedirs(output_dir, exist_ok=True)
226
+
227
+ if is_main_process and safe_serialization:
228
+ # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134
229
+ # Safetensors does not allow tensor aliasing.
230
+ # We're going to remove aliases before saving
231
+ ptrs = collections.defaultdict(list)
232
+ for name, tensor in output_state_dict.items():
233
+ # Sometimes in the state_dict we have non-tensor objects.
234
+ # e.g. in bitsandbytes we have some `str` objects in the state_dict
235
+ if isinstance(tensor, torch.Tensor):
236
+ ptrs[id_tensor_storage(tensor)].append(name)
237
+ else:
238
+ # In the non-tensor case, fall back to the pointer of the object itself
239
+ ptrs[id(tensor)].append(name)
240
+
241
+ # These are all the pointers of shared tensors.
242
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
243
+
244
+ for _, names in shared_ptrs.items():
245
+ # Here we just clone the shared tensors to avoid tensor aliasing which is
246
+ # not supported in safetensors.
247
+ for shared_tensor_name in names[1:]:
248
+ output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone()
249
+
250
+ safe_save_file(
251
+ output_state_dict,
252
+ os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME),
253
+ metadata={"format": "pt"},
254
+ )
255
+ elif is_main_process:
256
+ torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
257
+
258
+ # save the config and change the inference mode to `True`
259
+ if peft_config.base_model_name_or_path is None:
260
+ peft_config.base_model_name_or_path = (
261
+ self.base_model.__dict__.get("name_or_path", None)
262
+ if peft_config.is_prompt_learning
263
+ else self.base_model.model.__dict__.get("name_or_path", None)
264
+ )
265
+ inference_mode = peft_config.inference_mode
266
+ peft_config.inference_mode = True
267
+
268
+ if peft_config.task_type is None:
269
+ # deal with auto mapping
270
+ base_model_class = self._get_base_model_class(
271
+ is_prompt_tuning=peft_config.is_prompt_learning,
272
+ )
273
+ parent_library = base_model_class.__module__
274
+
275
+ auto_mapping_dict = {
276
+ "base_model_class": base_model_class.__name__,
277
+ "parent_library": parent_library,
278
+ }
279
+ else:
280
+ auto_mapping_dict = None
281
+
282
+ if is_main_process:
283
+ peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict)
284
+ peft_config.inference_mode = inference_mode
285
+
286
+ @classmethod
287
+ def from_pretrained(
288
+ cls,
289
+ model: torch.nn.Module,
290
+ model_id: Union[str, os.PathLike],
291
+ adapter_name: str = "default",
292
+ is_trainable: bool = False,
293
+ config: Optional[PeftConfig] = None,
294
+ **kwargs: Any,
295
+ ) -> PeftModel:
296
+ r"""
297
+ Instantiate a PEFT model from a pretrained model and loaded PEFT weights.
298
+
299
+ Note that the passed `model` may be modified inplace.
300
+
301
+ Args:
302
+ model ([`torch.nn.Module`]):
303
+ The model to be adapted. For 🤗 Transformers models, the model should be initialized with the
304
+ [`~transformers.PreTrainedModel.from_pretrained`].
305
+ model_id (`str` or `os.PathLike`):
306
+ The name of the PEFT configuration to use. Can be either:
307
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
308
+ Hub.
309
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
310
+ method (`./my_peft_config_directory/`).
311
+ adapter_name (`str`, *optional*, defaults to `"default"`):
312
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
313
+ is_trainable (`bool`, *optional*, defaults to `False`):
314
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
315
+ used for inference.
316
+ config ([`~peft.PeftConfig`], *optional*):
317
+ The configuration object to use instead of an automatically loaded configuration. This configuration
318
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
319
+ loaded before calling `from_pretrained`.
320
+ kwargs: (`optional`):
321
+ Additional keyword arguments passed along to the specific PEFT configuration class.
322
+ """
323
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
324
+
325
+ # load the config
326
+ if config is None:
327
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
328
+ PeftConfig._get_peft_type(
329
+ model_id,
330
+ subfolder=kwargs.get("subfolder", None),
331
+ revision=kwargs.get("revision", None),
332
+ cache_dir=kwargs.get("cache_dir", None),
333
+ use_auth_token=kwargs.get("use_auth_token", None),
334
+ token=kwargs.get("token", None),
335
+ )
336
+ ].from_pretrained(model_id, **kwargs)
337
+ elif isinstance(config, PeftConfig):
338
+ config.inference_mode = not is_trainable
339
+ else:
340
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
341
+
342
+ if (getattr(model, "hf_device_map", None) is not None) and len(
343
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
344
+ ) > 0:
345
+ remove_hook_from_submodules(model)
346
+
347
+ if config.is_prompt_learning and is_trainable:
348
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
349
+ else:
350
+ config.inference_mode = not is_trainable
351
+
352
+ if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
353
+ model = cls(model, config, adapter_name)
354
+ else:
355
+ model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)
356
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
357
+ return model
358
+
359
+ def _setup_prompt_encoder(self, adapter_name: str):
360
+ config = self.peft_config[adapter_name]
361
+ if not hasattr(self, "prompt_encoder"):
362
+ self.prompt_encoder = torch.nn.ModuleDict({})
363
+ self.prompt_tokens = {}
364
+ transformer_backbone = None
365
+ for name, module in self.base_model.named_children():
366
+ for param in module.parameters():
367
+ param.requires_grad = False
368
+ if isinstance(module, PreTrainedModel):
369
+ # Make sure to freeze Tranformers model
370
+ if transformer_backbone is None:
371
+ transformer_backbone = module
372
+ self.transformer_backbone_name = name
373
+ if transformer_backbone is None:
374
+ transformer_backbone = self.base_model
375
+
376
+ if config.num_transformer_submodules is None:
377
+ config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
378
+
379
+ for named_param, value in list(transformer_backbone.named_parameters()):
380
+ # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0]
381
+ # the actual unsharded shape is stored in "ds_shape" attribute
382
+ # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig
383
+ # has been called before
384
+ # For reference refer to issue: https://github.com/huggingface/peft/issues/996
385
+ deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None)
386
+
387
+ if value.shape[0] == self.base_model.config.vocab_size or (
388
+ deepspeed_distributed_tensor_shape is not None
389
+ and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size
390
+ ):
391
+ self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", ""))
392
+ break
393
+
394
+ if config.peft_type == PeftType.PROMPT_TUNING:
395
+ prompt_encoder = PromptEmbedding(config, self.word_embeddings)
396
+ elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
397
+ prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings)
398
+ elif config.peft_type == PeftType.P_TUNING:
399
+ prompt_encoder = PromptEncoder(config)
400
+ elif config.peft_type == PeftType.PREFIX_TUNING:
401
+ prompt_encoder = PrefixEncoder(config)
402
+ else:
403
+ raise ValueError("Not supported")
404
+
405
+ prompt_encoder = prompt_encoder.to(self.device)
406
+ self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
407
+ self.prompt_tokens[adapter_name] = torch.arange(
408
+ config.num_virtual_tokens * config.num_transformer_submodules
409
+ ).long()
410
+
411
+ def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
412
+ r"""
413
+ Prepares the model for gradient checkpointing if necessary
414
+ """
415
+ if not (
416
+ getattr(model, "is_loaded_in_8bit", False)
417
+ or getattr(model, "is_loaded_in_4bit", False)
418
+ or getattr(model, "is_quantized", False)
419
+ ):
420
+ if hasattr(model, "enable_input_require_grads"):
421
+ model.enable_input_require_grads()
422
+ elif hasattr(model, "get_input_embeddings"):
423
+
424
+ def make_inputs_require_grad(module, input, output):
425
+ output.requires_grad_(True)
426
+
427
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
428
+ return model
429
+
430
+ def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor:
431
+ """
432
+ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning
433
+ method.
434
+ """
435
+ prompt_encoder = self.prompt_encoder[adapter_name]
436
+ prompt_tokens = (
437
+ self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
438
+ )
439
+ if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
440
+ prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]
441
+
442
+ if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING:
443
+ prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens)
444
+ else:
445
+ prompt_embeddings = prompt_encoder(prompt_tokens)
446
+
447
+ return prompt_embeddings[0].detach().cpu()
448
+
449
+ def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor:
450
+ """
451
+ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method.
452
+ """
453
+ peft_config = self.active_peft_config
454
+ prompt_encoder = self.prompt_encoder[self.active_adapter]
455
+ prompt_tokens = (
456
+ self.prompt_tokens[self.active_adapter]
457
+ .unsqueeze(0)
458
+ .expand(batch_size, -1)
459
+ .to(prompt_encoder.embedding.weight.device)
460
+ )
461
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
462
+ prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]
463
+ if peft_config.inference_mode:
464
+ past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
465
+ else:
466
+ past_key_values = prompt_encoder(prompt_tokens)
467
+ if self.base_model_torch_dtype is not None:
468
+ past_key_values = past_key_values.to(self.base_model_torch_dtype)
469
+ past_key_values = past_key_values.view(
470
+ batch_size,
471
+ peft_config.num_virtual_tokens,
472
+ peft_config.num_layers * 2,
473
+ peft_config.num_attention_heads,
474
+ peft_config.token_dim // peft_config.num_attention_heads,
475
+ )
476
+ if peft_config.num_transformer_submodules == 2:
477
+ past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
478
+ past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
479
+ peft_config.num_transformer_submodules * 2
480
+ )
481
+ if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
482
+ post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
483
+ past_key_values = post_process_fn(past_key_values)
484
+ return past_key_values
485
+ else:
486
+ if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
487
+ prompts = prompt_encoder(prompt_tokens, task_ids)
488
+ else:
489
+ if peft_config.inference_mode:
490
+ prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
491
+ else:
492
+ prompts = prompt_encoder(prompt_tokens)
493
+ return prompts
494
+
495
+ def get_nb_trainable_parameters(self) -> tuple[int, int]:
496
+ r"""
497
+ Returns the number of trainable parameters and the number of all parameters in the model.
498
+ """
499
+ trainable_params = 0
500
+ all_param = 0
501
+ for _, param in self.named_parameters():
502
+ num_params = param.numel()
503
+ # if using DS Zero 3 and the weights are initialized empty
504
+ if num_params == 0 and hasattr(param, "ds_numel"):
505
+ num_params = param.ds_numel
506
+
507
+ # Due to the design of 4bit linear layers from bitsandbytes
508
+ # one needs to multiply the number of parameters by 2 to get
509
+ # the correct number of parameters
510
+ if param.__class__.__name__ == "Params4bit":
511
+ num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1
512
+ num_params = num_params * 2 * num_bytes
513
+
514
+ all_param += num_params
515
+ if param.requires_grad:
516
+ trainable_params += num_params
517
+
518
+ return trainable_params, all_param
519
+
520
+ def print_trainable_parameters(self) -> None:
521
+ """
522
+ Prints the number of trainable parameters in the model.
523
+
524
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
525
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
526
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
527
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
528
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
529
+ of trainable parameters of the backbone transformer model which can be different.
530
+ """
531
+ trainable_params, all_param = self.get_nb_trainable_parameters()
532
+
533
+ print(
534
+ f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}"
535
+ )
536
+
537
+ def __getattr__(self, name: str):
538
+ """Forward missing attributes to the wrapped module."""
539
+ try:
540
+ return super().__getattr__(name) # defer to nn.Module's logic
541
+ except AttributeError:
542
+ return getattr(self.base_model, name)
543
+
544
+ @contextmanager
545
+ def _enable_peft_forward_hooks(self, *args, **kwargs):
546
+ # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this
547
+ # runs without any changes
548
+ if hasattr(self.base_model, "_enable_peft_forward_hooks"):
549
+ with self.base_model._enable_peft_forward_hooks(*args, **kwargs):
550
+ yield
551
+ return
552
+ else:
553
+ # nothing to enable
554
+ yield
555
+ return
556
+
557
+ def forward(self, *args: Any, **kwargs: Any):
558
+ """
559
+ Forward pass of the model.
560
+ """
561
+ with self._enable_peft_forward_hooks(*args, **kwargs):
562
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
563
+ return self.get_base_model()(*args, **kwargs)
564
+
565
+ def generate(self, *args, **kwargs):
566
+ with self._enable_peft_forward_hooks(*args, **kwargs):
567
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
568
+ return self.get_base_model().generate(*args, **kwargs)
569
+
570
+ def _get_base_model_class(self, is_prompt_tuning=False):
571
+ """
572
+ Returns the base model class.
573
+ """
574
+ if not is_prompt_tuning:
575
+ return self.base_model.model.__class__
576
+ return self.base_model.__class__
577
+
578
+ @contextmanager
579
+ def disable_adapter(self):
580
+ """
581
+ Context manager that disables the adapter module. Use this to run inference on the base model.
582
+
583
+ Example:
584
+
585
+ ```py
586
+ >>> with model.disable_adapter():
587
+ ... model(inputs)
588
+ ```
589
+ """
590
+ try:
591
+ if self.peft_config[self.active_adapter].is_prompt_learning:
592
+ # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and
593
+ # letting the underlying methods deal with it, same as how LoRA does it.
594
+ old_forward = self.forward
595
+ self.forward = self.base_model.forward
596
+ old_prepare_inputs_for_generation = self.prepare_inputs_for_generation
597
+ self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
598
+ else:
599
+ self.base_model.disable_adapter_layers()
600
+ yield
601
+ finally:
602
+ if self.peft_config[self.active_adapter].is_prompt_learning:
603
+ self.forward = old_forward
604
+ self.prepare_inputs_for_generation = old_prepare_inputs_for_generation
605
+ else:
606
+ self.base_model.enable_adapter_layers()
607
+
608
+ def get_base_model(self) -> torch.nn.Module:
609
+ """
610
+ Returns the base model.
611
+ """
612
+ return (
613
+ self.base_model
614
+ if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY)
615
+ else self.base_model.model
616
+ )
617
+
618
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
619
+ """
620
+ Add an adapter to the model based on the passed configuration.
621
+
622
+ This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
623
+
624
+ The name for the new adapter should be unique.
625
+
626
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
627
+ adapter.
628
+
629
+ Args:
630
+ adapter_name (`str`):
631
+ The name of the adapter to be added.
632
+ peft_config ([`PeftConfig`]):
633
+ The configuration of the adapter to be added.
634
+ """
635
+ if peft_config.peft_type != self.peft_type:
636
+ raise ValueError(
637
+ f"Cannot combine adapters with different peft types. "
638
+ f"Found {self.peft_type} and {peft_config.peft_type}."
639
+ )
640
+
641
+ try:
642
+ if peft_config.is_prompt_learning:
643
+ self.peft_config[adapter_name] = peft_config
644
+ if hasattr(self.config, "to_dict"):
645
+ dict_config = self.config.to_dict()
646
+ else:
647
+ dict_config = self.config
648
+
649
+ peft_config = _prepare_prompt_learning_config(peft_config, dict_config)
650
+ self._setup_prompt_encoder(adapter_name)
651
+ elif peft_config.is_adaption_prompt:
652
+ self.base_model.add_adapter(adapter_name, peft_config)
653
+ else:
654
+ self.peft_config[adapter_name] = peft_config
655
+ self.base_model.inject_adapter(self.base_model.model, adapter_name)
656
+ except Exception: # something went wrong, roll back
657
+ if adapter_name in self.peft_config:
658
+ del self.peft_config[adapter_name]
659
+ raise
660
+
661
+ self.set_additional_trainable_modules(peft_config, adapter_name)
662
+
663
+ def set_additional_trainable_modules(self, peft_config, adapter_name):
664
+ if getattr(peft_config, "modules_to_save", None) is not None:
665
+ if self.modules_to_save is None:
666
+ self.modules_to_save = set(peft_config.modules_to_save)
667
+ else:
668
+ self.modules_to_save.update(peft_config.modules_to_save)
669
+ _set_trainable(self, adapter_name)
670
+
671
+ @classmethod
672
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
673
+ _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",)
674
+ hf_hub_download_kwargs = {}
675
+ other_kwargs = {}
676
+
677
+ for key, value in kwargs.items():
678
+ if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature:
679
+ hf_hub_download_kwargs[key] = value
680
+ else:
681
+ other_kwargs[key] = value
682
+
683
+ return hf_hub_download_kwargs, other_kwargs
684
+
685
+ def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any):
686
+ """
687
+ Load a trained adapter into the model.
688
+
689
+ The name for the new adapter should be unique.
690
+
691
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
692
+ adapter.
693
+
694
+ Args:
695
+ adapter_name (`str`):
696
+ The name of the adapter to be added.
697
+ peft_config ([`PeftConfig`]):
698
+ The configuration of the adapter to be added.
699
+ is_trainable (`bool`, *optional*, defaults to `False`):
700
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
701
+ used for inference.
702
+ kwargs: (`optional`):
703
+ Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub.
704
+ """
705
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
706
+
707
+ hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs)
708
+ torch_device = infer_device()
709
+
710
+ if adapter_name not in self.peft_config:
711
+ # load the config
712
+ peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[
713
+ PeftConfig._get_peft_type(
714
+ model_id,
715
+ **hf_hub_download_kwargs,
716
+ )
717
+ ].from_pretrained(
718
+ model_id,
719
+ **hf_hub_download_kwargs,
720
+ )
721
+ if peft_config.is_prompt_learning and is_trainable:
722
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
723
+ else:
724
+ peft_config.inference_mode = not is_trainable
725
+ self.add_adapter(adapter_name, peft_config)
726
+
727
+ adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
728
+
729
+ # load the weights into the model
730
+ load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)
731
+ if (
732
+ (getattr(self, "hf_device_map", None) is not None)
733
+ and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
734
+ and len(self.peft_config) == 1
735
+ ):
736
+ device_map = kwargs.get("device_map", "auto")
737
+ max_memory = kwargs.get("max_memory", None)
738
+ offload_dir = kwargs.get("offload_folder", None)
739
+ offload_index = kwargs.get("offload_index", None)
740
+
741
+ dispatch_model_kwargs = {}
742
+ # Safety checker for previous `accelerate` versions
743
+ # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
744
+ if "offload_index" in inspect.signature(dispatch_model).parameters:
745
+ dispatch_model_kwargs["offload_index"] = offload_index
746
+
747
+ no_split_module_classes = self._no_split_modules
748
+
749
+ if device_map != "sequential":
750
+ max_memory = get_balanced_memory(
751
+ self,
752
+ max_memory=max_memory,
753
+ no_split_module_classes=no_split_module_classes,
754
+ low_zero=(device_map == "balanced_low_0"),
755
+ )
756
+ if isinstance(device_map, str):
757
+ device_map = infer_auto_device_map(
758
+ self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
759
+ )
760
+ dispatch_model(
761
+ self,
762
+ device_map=device_map,
763
+ offload_dir=offload_dir,
764
+ **dispatch_model_kwargs,
765
+ )
766
+ hook = AlignDevicesHook(io_same_device=True)
767
+ if self.peft_config[adapter_name].is_prompt_learning:
768
+ remove_hook_from_submodules(self.prompt_encoder)
769
+ add_hook_to_module(self.get_base_model(), hook)
770
+
771
+ # Set model in evaluation mode to deactivate Dropout modules by default
772
+ if not is_trainable:
773
+ self.eval()
774
+ return load_result
775
+
776
+ def set_adapter(self, adapter_name: str) -> None:
777
+ """
778
+ Sets the active adapter.
779
+
780
+ Only one adapter can be active at a time.
781
+
782
+ Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
783
+ not desired, use the following code.
784
+
785
+ ```py
786
+ >>> for name, param in model_peft.named_parameters():
787
+ ... if ...: # some check on name (ex. if 'lora' in name)
788
+ ... param.requires_grad = False
789
+ ```
790
+
791
+ Args:
792
+ adapter_name (`str`):
793
+ The name of the adapter to be set as active. The adapter must be loaded first.
794
+ """
795
+ if adapter_name not in self.peft_config:
796
+ raise ValueError(f"Adapter {adapter_name} not found.")
797
+ self.active_adapter = adapter_name
798
+ if not self.peft_config[adapter_name].is_prompt_learning:
799
+ self.base_model.set_adapter(adapter_name)
800
+ _set_adapter(self, adapter_name)
801
+
802
+ @property
803
+ def base_model_torch_dtype(self):
804
+ return getattr(self.base_model, "dtype", None)
805
+
806
+ @property
807
+ def active_peft_config(self):
808
+ return self.peft_config[self.active_adapter]
809
+
810
+ def create_or_update_model_card(self, output_dir: str):
811
+ """
812
+ Updates or create model card to include information about peft:
813
+ 1. Adds `peft` library tag
814
+ 2. Adds peft version
815
+ 3. Adds base model info
816
+ 4. Adds quantization information if it was used
817
+ """
818
+
819
+ filename = os.path.join(output_dir, "README.md")
820
+
821
+ card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData())
822
+
823
+ card.data["library_name"] = "peft"
824
+
825
+ model_config = getattr(self, "config", None)
826
+ if hasattr(model_config, "to_dict"):
827
+ model_config = model_config.to_dict()
828
+ if model_config is not None and "_name_or_path" in model_config:
829
+ card.data["base_model"] = model_config["_name_or_path"]
830
+
831
+ lines = card.text.splitlines()
832
+
833
+ quantization_config = None
834
+ if hasattr(model_config, "quantization_config"):
835
+ quantization_config = self.config.quantization_config.to_dict()
836
+ training_config_text = ""
837
+ quantization_prefix = "The following `bitsandbytes` quantization config was used during training:"
838
+ # Adds quantization information if it was used
839
+ if quantization_config is not None:
840
+ training_config_text += f"\n{quantization_prefix}\n"
841
+ training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()])
842
+ training_config_text += "\n"
843
+
844
+ training_procedure_heading = "## Training procedure"
845
+ if quantization_prefix not in lines and bool(training_config_text):
846
+ if training_procedure_heading in lines:
847
+ lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
848
+ else:
849
+ lines.append(f"{training_procedure_heading}\n{training_config_text}")
850
+
851
+ # Adds peft version
852
+ framework_block_heading = "### Framework versions"
853
+ if f"- PEFT {__version__}" not in lines:
854
+ if framework_block_heading in lines:
855
+ lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}")
856
+ else:
857
+ lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}")
858
+
859
+ card.text = "\n".join(lines)
860
+ card.save(filename)
861
+
862
+
863
+ class PeftModelForSequenceClassification(PeftModel):
864
+ """
865
+ Peft model for sequence classification tasks.
866
+
867
+ Args:
868
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
869
+ peft_config ([`PeftConfig`]): Peft config.
870
+
871
+ **Attributes**:
872
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
873
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
874
+
875
+ Example:
876
+
877
+ ```py
878
+ >>> from transformers import AutoModelForSequenceClassification
879
+ >>> from peft import PeftModelForSequenceClassification, get_peft_config
880
+
881
+ >>> config = {
882
+ ... "peft_type": "PREFIX_TUNING",
883
+ ... "task_type": "SEQ_CLS",
884
+ ... "inference_mode": False,
885
+ ... "num_virtual_tokens": 20,
886
+ ... "token_dim": 768,
887
+ ... "num_transformer_submodules": 1,
888
+ ... "num_attention_heads": 12,
889
+ ... "num_layers": 12,
890
+ ... "encoder_hidden_size": 768,
891
+ ... "prefix_projection": False,
892
+ ... "postprocess_past_key_value_function": None,
893
+ ... }
894
+
895
+ >>> peft_config = get_peft_config(config)
896
+ >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
897
+ >>> peft_model = PeftModelForSequenceClassification(model, peft_config)
898
+ >>> peft_model.print_trainable_parameters()
899
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
900
+ ```
901
+ """
902
+
903
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
904
+ super().__init__(model, peft_config, adapter_name)
905
+ if self.modules_to_save is None:
906
+ self.modules_to_save = {"classifier", "score"}
907
+ else:
908
+ self.modules_to_save.update({"classifier", "score"})
909
+
910
+ for name, _ in self.base_model.named_children():
911
+ if any(module_name in name for module_name in self.modules_to_save):
912
+ self.cls_layer_name = name
913
+ break
914
+
915
+ # to make sure classifier layer is trainable
916
+ _set_trainable(self, adapter_name)
917
+
918
+ def forward(
919
+ self,
920
+ input_ids=None,
921
+ attention_mask=None,
922
+ inputs_embeds=None,
923
+ labels=None,
924
+ output_attentions=None,
925
+ output_hidden_states=None,
926
+ return_dict=None,
927
+ task_ids=None,
928
+ **kwargs,
929
+ ):
930
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
931
+ peft_config = self.active_peft_config
932
+ if not peft_config.is_prompt_learning:
933
+ with self._enable_peft_forward_hooks(**kwargs):
934
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
935
+ if peft_config.peft_type == PeftType.POLY:
936
+ kwargs["task_ids"] = task_ids
937
+ return self.base_model(
938
+ input_ids=input_ids,
939
+ attention_mask=attention_mask,
940
+ inputs_embeds=inputs_embeds,
941
+ labels=labels,
942
+ output_attentions=output_attentions,
943
+ output_hidden_states=output_hidden_states,
944
+ return_dict=return_dict,
945
+ **kwargs,
946
+ )
947
+
948
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
949
+ if attention_mask is not None:
950
+ # concat prompt attention mask
951
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
952
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
953
+ if kwargs.get("position_ids", None) is not None:
954
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
955
+ kwargs["position_ids"] = None
956
+ kwargs.update(
957
+ {
958
+ "attention_mask": attention_mask,
959
+ "labels": labels,
960
+ "output_attentions": output_attentions,
961
+ "output_hidden_states": output_hidden_states,
962
+ "return_dict": return_dict,
963
+ }
964
+ )
965
+
966
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
967
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
968
+ else:
969
+ if kwargs.get("token_type_ids", None) is not None:
970
+ kwargs["token_type_ids"] = torch.cat(
971
+ (
972
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
973
+ kwargs["token_type_ids"],
974
+ ),
975
+ dim=1,
976
+ ).long()
977
+ if inputs_embeds is None:
978
+ inputs_embeds = self.word_embeddings(input_ids)
979
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
980
+ prompts = prompts.to(inputs_embeds.dtype)
981
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
982
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
983
+
984
+ def _prefix_tuning_forward(
985
+ self,
986
+ input_ids=None,
987
+ attention_mask=None,
988
+ inputs_embeds=None,
989
+ labels=None,
990
+ output_attentions=None,
991
+ output_hidden_states=None,
992
+ return_dict=None,
993
+ **kwargs,
994
+ ):
995
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
996
+ past_key_values = self.get_prompt(batch_size)
997
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
998
+ kwargs.update(
999
+ {
1000
+ "input_ids": input_ids,
1001
+ "attention_mask": attention_mask,
1002
+ "inputs_embeds": inputs_embeds,
1003
+ "output_attentions": output_attentions,
1004
+ "output_hidden_states": output_hidden_states,
1005
+ "return_dict": return_dict,
1006
+ "past_key_values": past_key_values,
1007
+ }
1008
+ )
1009
+ if "past_key_values" in fwd_params:
1010
+ return self.base_model(labels=labels, **kwargs)
1011
+ else:
1012
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1013
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1014
+ if "past_key_values" not in fwd_params:
1015
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1016
+ outputs = transformer_backbone_name(**kwargs)
1017
+ pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
1018
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1019
+ pooled_output = self.base_model.dropout(pooled_output)
1020
+ logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
1021
+
1022
+ loss = None
1023
+ if labels is not None:
1024
+ if self.config.problem_type is None:
1025
+ if self.base_model.num_labels == 1:
1026
+ self.config.problem_type = "regression"
1027
+ elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1028
+ self.config.problem_type = "single_label_classification"
1029
+ else:
1030
+ self.config.problem_type = "multi_label_classification"
1031
+
1032
+ if self.config.problem_type == "regression":
1033
+ loss_fct = MSELoss()
1034
+ if self.base_model.num_labels == 1:
1035
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1036
+ else:
1037
+ loss = loss_fct(logits, labels)
1038
+ elif self.config.problem_type == "single_label_classification":
1039
+ loss_fct = CrossEntropyLoss()
1040
+ loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
1041
+ elif self.config.problem_type == "multi_label_classification":
1042
+ loss_fct = BCEWithLogitsLoss()
1043
+ loss = loss_fct(logits, labels)
1044
+ if not return_dict:
1045
+ output = (logits,) + outputs[2:]
1046
+ return ((loss,) + output) if loss is not None else output
1047
+
1048
+ return SequenceClassifierOutput(
1049
+ loss=loss,
1050
+ logits=logits,
1051
+ hidden_states=outputs.hidden_states,
1052
+ attentions=outputs.attentions,
1053
+ )
1054
+
1055
+
1056
+ class PeftModelForCausalLM(PeftModel):
1057
+ """
1058
+ Peft model for causal language modeling.
1059
+
1060
+ Args:
1061
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1062
+ peft_config ([`PeftConfig`]): Peft config.
1063
+
1064
+
1065
+ Example:
1066
+
1067
+ ```py
1068
+ >>> from transformers import AutoModelForCausalLM
1069
+ >>> from peft import PeftModelForCausalLM, get_peft_config
1070
+
1071
+ >>> config = {
1072
+ ... "peft_type": "PREFIX_TUNING",
1073
+ ... "task_type": "CAUSAL_LM",
1074
+ ... "inference_mode": False,
1075
+ ... "num_virtual_tokens": 20,
1076
+ ... "token_dim": 1280,
1077
+ ... "num_transformer_submodules": 1,
1078
+ ... "num_attention_heads": 20,
1079
+ ... "num_layers": 36,
1080
+ ... "encoder_hidden_size": 1280,
1081
+ ... "prefix_projection": False,
1082
+ ... "postprocess_past_key_value_function": None,
1083
+ ... }
1084
+
1085
+ >>> peft_config = get_peft_config(config)
1086
+ >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large")
1087
+ >>> peft_model = PeftModelForCausalLM(model, peft_config)
1088
+ >>> peft_model.print_trainable_parameters()
1089
+ trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544
1090
+ ```
1091
+ """
1092
+
1093
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1094
+ super().__init__(model, peft_config, adapter_name)
1095
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1096
+
1097
+ def forward(
1098
+ self,
1099
+ input_ids=None,
1100
+ attention_mask=None,
1101
+ inputs_embeds=None,
1102
+ labels=None,
1103
+ output_attentions=None,
1104
+ output_hidden_states=None,
1105
+ return_dict=None,
1106
+ task_ids=None,
1107
+ **kwargs,
1108
+ ):
1109
+ peft_config = self.active_peft_config
1110
+ if not peft_config.is_prompt_learning:
1111
+ if self.base_model.config.model_type == "mpt":
1112
+ if inputs_embeds is not None:
1113
+ raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds")
1114
+ return self.base_model(
1115
+ input_ids=input_ids,
1116
+ attention_mask=attention_mask,
1117
+ labels=labels,
1118
+ output_attentions=output_attentions,
1119
+ output_hidden_states=output_hidden_states,
1120
+ return_dict=return_dict,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+ if peft_config.peft_type == PeftType.POLY:
1125
+ kwargs["task_ids"] = task_ids
1126
+
1127
+ with self._enable_peft_forward_hooks(**kwargs):
1128
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1129
+ return self.base_model(
1130
+ input_ids=input_ids,
1131
+ attention_mask=attention_mask,
1132
+ inputs_embeds=inputs_embeds,
1133
+ labels=labels,
1134
+ output_attentions=output_attentions,
1135
+ output_hidden_states=output_hidden_states,
1136
+ return_dict=return_dict,
1137
+ **kwargs,
1138
+ )
1139
+
1140
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1141
+ if attention_mask is not None:
1142
+ # concat prompt attention mask
1143
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1144
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1145
+
1146
+ if kwargs.get("position_ids", None) is not None:
1147
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1148
+ kwargs["position_ids"] = None
1149
+ if kwargs.get("token_type_ids", None) is not None:
1150
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1151
+ kwargs["token_type_ids"] = None
1152
+ kwargs.update(
1153
+ {
1154
+ "attention_mask": attention_mask,
1155
+ "labels": labels,
1156
+ "output_attentions": output_attentions,
1157
+ "output_hidden_states": output_hidden_states,
1158
+ "return_dict": return_dict,
1159
+ }
1160
+ )
1161
+
1162
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1163
+ past_key_values = self.get_prompt(batch_size)
1164
+ return self.base_model(
1165
+ input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs
1166
+ )
1167
+ else:
1168
+ if inputs_embeds is None:
1169
+ inputs_embeds = self.word_embeddings(input_ids)
1170
+ # concat prompt labels
1171
+ if labels is not None:
1172
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1173
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1174
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1175
+ prompts = prompts.to(inputs_embeds.dtype)
1176
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1177
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1178
+
1179
+ def generate(self, *args, **kwargs):
1180
+ peft_config = self.active_peft_config
1181
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1182
+ if hasattr(self.base_model, "model"):
1183
+ self.base_model.model.generation_config = self.generation_config
1184
+ else:
1185
+ self.base_model.generation_config = self.generation_config
1186
+ try:
1187
+ if not peft_config.is_prompt_learning:
1188
+ with self._enable_peft_forward_hooks(*args, **kwargs):
1189
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1190
+ outputs = self.base_model.generate(*args, **kwargs)
1191
+ else:
1192
+ outputs = self.base_model.generate(**kwargs)
1193
+ except:
1194
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1195
+ raise
1196
+ else:
1197
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1198
+ return outputs
1199
+
1200
+ def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs):
1201
+ peft_config = self.active_peft_config
1202
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1203
+
1204
+ # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format
1205
+ # for some architectures which requires a special fix for prompt tuning etc.
1206
+ # TODO: starting with transformers 4.38, all architectures should support caching.
1207
+ uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0")
1208
+ uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0")
1209
+ transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"]
1210
+ uses_cache = uses_transformers_4_38 or (
1211
+ uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs
1212
+ )
1213
+
1214
+ if peft_config.peft_type == PeftType.POLY:
1215
+ model_kwargs["task_ids"] = task_ids
1216
+ if peft_config.is_prompt_learning:
1217
+ if uses_cache and (model_kwargs["past_key_values"] is not None):
1218
+ # change in the logic of `prepare_inputs_for_generation` makes the below code necessary
1219
+ # In prompt learning methods, past key values are longer when compared to the `input_ids`.
1220
+ # As such only consider the last input ids in the autogressive generation phase.
1221
+ if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]:
1222
+ model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:]
1223
+
1224
+ if model_kwargs.get("attention_mask", None) is not None:
1225
+ size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens
1226
+ prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device)
1227
+ model_kwargs["attention_mask"] = torch.cat(
1228
+ (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1
1229
+ )
1230
+
1231
+ if model_kwargs.get("position_ids", None) is not None:
1232
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1233
+ model_kwargs["position_ids"] = None
1234
+
1235
+ if kwargs.get("token_type_ids", None) is not None:
1236
+ warnings.warn(
1237
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1238
+ )
1239
+ kwargs["token_type_ids"] = None
1240
+
1241
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1242
+ past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
1243
+ model_kwargs["past_key_values"] = past_key_values
1244
+ else:
1245
+ if model_kwargs["past_key_values"] is None:
1246
+ inputs_embeds = self.word_embeddings(model_kwargs["input_ids"])
1247
+ prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids)
1248
+ prompts = prompts.to(inputs_embeds.dtype)
1249
+ model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1)
1250
+ model_kwargs["input_ids"] = None
1251
+
1252
+ # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is
1253
+ # passed in the forward pass to keep track of the position ids of the cache. We have to
1254
+ # pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed
1255
+ # `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956
1256
+ _ = model_kwargs.pop("cache_position", None)
1257
+
1258
+ return model_kwargs
1259
+
1260
+
1261
+ class PeftModelForSeq2SeqLM(PeftModel):
1262
+ """
1263
+ Peft model for sequence-to-sequence language modeling.
1264
+
1265
+ Args:
1266
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1267
+ peft_config ([`PeftConfig`]): Peft config.
1268
+
1269
+
1270
+ Example:
1271
+
1272
+ ```py
1273
+ >>> from transformers import AutoModelForSeq2SeqLM
1274
+ >>> from peft import PeftModelForSeq2SeqLM, get_peft_config
1275
+
1276
+ >>> config = {
1277
+ ... "peft_type": "LORA",
1278
+ ... "task_type": "SEQ_2_SEQ_LM",
1279
+ ... "inference_mode": False,
1280
+ ... "r": 8,
1281
+ ... "target_modules": ["q", "v"],
1282
+ ... "lora_alpha": 32,
1283
+ ... "lora_dropout": 0.1,
1284
+ ... "fan_in_fan_out": False,
1285
+ ... "enable_lora": None,
1286
+ ... "bias": "none",
1287
+ ... }
1288
+
1289
+ >>> peft_config = get_peft_config(config)
1290
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
1291
+ >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)
1292
+ >>> peft_model.print_trainable_parameters()
1293
+ trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566
1294
+ ```
1295
+ """
1296
+
1297
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1298
+ super().__init__(model, peft_config, adapter_name)
1299
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1300
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation = (
1301
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation
1302
+ )
1303
+
1304
+ def forward(
1305
+ self,
1306
+ input_ids=None,
1307
+ attention_mask=None,
1308
+ inputs_embeds=None,
1309
+ decoder_input_ids=None,
1310
+ decoder_attention_mask=None,
1311
+ decoder_inputs_embeds=None,
1312
+ labels=None,
1313
+ output_attentions=None,
1314
+ output_hidden_states=None,
1315
+ return_dict=None,
1316
+ task_ids=None,
1317
+ **kwargs,
1318
+ ):
1319
+ peft_config = self.active_peft_config
1320
+ if not peft_config.is_prompt_learning:
1321
+ if peft_config.peft_type == PeftType.POLY:
1322
+ kwargs["task_ids"] = task_ids
1323
+
1324
+ with self._enable_peft_forward_hooks(**kwargs):
1325
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1326
+ return self.base_model(
1327
+ input_ids=input_ids,
1328
+ attention_mask=attention_mask,
1329
+ inputs_embeds=inputs_embeds,
1330
+ decoder_input_ids=decoder_input_ids,
1331
+ decoder_attention_mask=decoder_attention_mask,
1332
+ decoder_inputs_embeds=decoder_inputs_embeds,
1333
+ labels=labels,
1334
+ output_attentions=output_attentions,
1335
+ output_hidden_states=output_hidden_states,
1336
+ return_dict=return_dict,
1337
+ **kwargs,
1338
+ )
1339
+
1340
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1341
+ if decoder_attention_mask is not None:
1342
+ # concat prompt attention mask
1343
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1344
+ decoder_attention_mask.device
1345
+ )
1346
+ if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1347
+ decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
1348
+
1349
+ if kwargs.get("position_ids", None) is not None:
1350
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1351
+ kwargs["position_ids"] = None
1352
+ if kwargs.get("token_type_ids", None) is not None:
1353
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1354
+ kwargs["token_type_ids"] = None
1355
+ kwargs.update(
1356
+ {
1357
+ "attention_mask": attention_mask,
1358
+ "decoder_attention_mask": decoder_attention_mask,
1359
+ "labels": labels,
1360
+ "output_attentions": output_attentions,
1361
+ "output_hidden_states": output_hidden_states,
1362
+ "return_dict": return_dict,
1363
+ }
1364
+ )
1365
+
1366
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1367
+ past_key_values = self.get_prompt(batch_size)
1368
+ return self.base_model(
1369
+ input_ids=input_ids,
1370
+ decoder_input_ids=decoder_input_ids,
1371
+ decoder_inputs_embeds=decoder_inputs_embeds,
1372
+ past_key_values=past_key_values,
1373
+ **kwargs,
1374
+ )
1375
+ elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1376
+ if inputs_embeds is None:
1377
+ inputs_embeds = self.word_embeddings(input_ids)
1378
+
1379
+ if attention_mask is not None:
1380
+ # concat prompt attention mask
1381
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1382
+ attention_mask.device
1383
+ )
1384
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1385
+
1386
+ prompts = self.get_prompt(batch_size=batch_size)
1387
+ prompts = prompts.to(inputs_embeds.dtype)
1388
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1389
+
1390
+ return self.base_model(
1391
+ inputs_embeds=inputs_embeds,
1392
+ decoder_input_ids=decoder_input_ids,
1393
+ decoder_inputs_embeds=decoder_inputs_embeds,
1394
+ **kwargs,
1395
+ )
1396
+ else:
1397
+ if inputs_embeds is None:
1398
+ inputs_embeds = self.word_embeddings(input_ids)
1399
+ if decoder_inputs_embeds is None and decoder_input_ids is None:
1400
+ decoder_input_ids = shift_tokens_right(
1401
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1402
+ )
1403
+ decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
1404
+
1405
+ if attention_mask is not None:
1406
+ # concat prompt attention mask
1407
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1408
+ attention_mask.device
1409
+ )
1410
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1411
+ # concat prompt labels
1412
+ if labels is not None:
1413
+ if peft_config.num_transformer_submodules == 1:
1414
+ kwargs["labels"] = labels
1415
+ elif peft_config.num_transformer_submodules == 2:
1416
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1417
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1418
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1419
+ prompts = prompts.to(inputs_embeds.dtype)
1420
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1421
+ if peft_config.num_transformer_submodules == 1:
1422
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1423
+ elif peft_config.num_transformer_submodules == 2:
1424
+ decoder_inputs_embeds = torch.cat(
1425
+ (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1
1426
+ )
1427
+ return self.base_model(
1428
+ inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs
1429
+ )
1430
+
1431
+ def generate(self, **kwargs):
1432
+ peft_config = self.active_peft_config
1433
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1434
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1435
+ self._prepare_encoder_decoder_kwargs_for_generation
1436
+ )
1437
+ try:
1438
+ if not peft_config.is_prompt_learning:
1439
+ with self._enable_peft_forward_hooks(**kwargs):
1440
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1441
+ outputs = self.base_model.generate(**kwargs)
1442
+ else:
1443
+ if "input_ids" not in kwargs:
1444
+ raise ValueError("input_ids must be provided for Peft model generation")
1445
+ if kwargs.get("position_ids", None) is not None:
1446
+ warnings.warn(
1447
+ "Position ids are not supported for parameter efficient tuning. Ignoring position ids."
1448
+ )
1449
+ kwargs["position_ids"] = None
1450
+ if kwargs.get("token_type_ids", None) is not None:
1451
+ warnings.warn(
1452
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1453
+ )
1454
+ kwargs["token_type_ids"] = None
1455
+
1456
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1457
+ outputs = self.base_model.generate(**kwargs)
1458
+ elif peft_config.peft_type in [
1459
+ PeftType.PROMPT_TUNING,
1460
+ PeftType.P_TUNING,
1461
+ PeftType.MULTITASK_PROMPT_TUNING,
1462
+ ]:
1463
+ kwargs = deepcopy(kwargs)
1464
+
1465
+ if "encoder_outputs" in kwargs:
1466
+ del kwargs["encoder_outputs"]
1467
+ warnings.warn(
1468
+ "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it."
1469
+ )
1470
+
1471
+ input_ids = kwargs.pop("input_ids")
1472
+ inputs_embeds = self.word_embeddings(input_ids)
1473
+ batch_size = inputs_embeds.shape[0]
1474
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None))
1475
+ prompts = prompts.to(inputs_embeds.dtype)
1476
+
1477
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1478
+ kwargs["inputs_embeds"] = inputs_embeds
1479
+
1480
+ if "attention_mask" in kwargs:
1481
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1482
+ kwargs["attention_mask"].device
1483
+ )
1484
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1)
1485
+
1486
+ return self.base_model.generate(**kwargs)
1487
+ else:
1488
+ raise NotImplementedError
1489
+ except:
1490
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1491
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1492
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1493
+ )
1494
+ raise
1495
+ else:
1496
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1497
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1498
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1499
+ )
1500
+ return outputs
1501
+
1502
+ def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs):
1503
+ peft_config = self.active_peft_config
1504
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1505
+ if peft_config.peft_type == PeftType.POLY:
1506
+ model_kwargs["task_ids"] = task_ids
1507
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1508
+ batch_size = model_kwargs["decoder_input_ids"].shape[0]
1509
+ past_key_values = self.get_prompt(batch_size)
1510
+ model_kwargs["past_key_values"] = past_key_values
1511
+
1512
+ return model_kwargs
1513
+
1514
+
1515
+ class PeftModelForTokenClassification(PeftModel):
1516
+ """
1517
+ Peft model for token classification tasks.
1518
+
1519
+ Args:
1520
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1521
+ peft_config ([`PeftConfig`]): Peft config.
1522
+
1523
+ **Attributes**:
1524
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1525
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1526
+
1527
+ Example:
1528
+
1529
+ ```py
1530
+ >>> from transformers import AutoModelForSequenceClassification
1531
+ >>> from peft import PeftModelForTokenClassification, get_peft_config
1532
+
1533
+ >>> config = {
1534
+ ... "peft_type": "PREFIX_TUNING",
1535
+ ... "task_type": "TOKEN_CLS",
1536
+ ... "inference_mode": False,
1537
+ ... "num_virtual_tokens": 20,
1538
+ ... "token_dim": 768,
1539
+ ... "num_transformer_submodules": 1,
1540
+ ... "num_attention_heads": 12,
1541
+ ... "num_layers": 12,
1542
+ ... "encoder_hidden_size": 768,
1543
+ ... "prefix_projection": False,
1544
+ ... "postprocess_past_key_value_function": None,
1545
+ ... }
1546
+
1547
+ >>> peft_config = get_peft_config(config)
1548
+ >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased")
1549
+ >>> peft_model = PeftModelForTokenClassification(model, peft_config)
1550
+ >>> peft_model.print_trainable_parameters()
1551
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
1552
+ ```
1553
+ """
1554
+
1555
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None:
1556
+ super().__init__(model, peft_config, adapter_name)
1557
+ if self.modules_to_save is None:
1558
+ self.modules_to_save = {"classifier", "score"}
1559
+ else:
1560
+ self.modules_to_save.update({"classifier", "score"})
1561
+
1562
+ for name, _ in self.base_model.named_children():
1563
+ if any(module_name in name for module_name in self.modules_to_save):
1564
+ self.cls_layer_name = name
1565
+ break
1566
+
1567
+ # to make sure classifier layer is trainable
1568
+ _set_trainable(self, adapter_name)
1569
+
1570
+ def forward(
1571
+ self,
1572
+ input_ids=None,
1573
+ attention_mask=None,
1574
+ inputs_embeds=None,
1575
+ labels=None,
1576
+ output_attentions=None,
1577
+ output_hidden_states=None,
1578
+ return_dict=None,
1579
+ task_ids=None,
1580
+ **kwargs,
1581
+ ):
1582
+ peft_config = self.active_peft_config
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+
1585
+ if not peft_config.is_prompt_learning:
1586
+ with self._enable_peft_forward_hooks(**kwargs):
1587
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1588
+ if peft_config.peft_type == PeftType.POLY:
1589
+ kwargs["task_ids"] = task_ids
1590
+ return self.base_model(
1591
+ input_ids=input_ids,
1592
+ attention_mask=attention_mask,
1593
+ inputs_embeds=inputs_embeds,
1594
+ labels=labels,
1595
+ output_attentions=output_attentions,
1596
+ output_hidden_states=output_hidden_states,
1597
+ return_dict=return_dict,
1598
+ **kwargs,
1599
+ )
1600
+
1601
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1602
+ if attention_mask is not None:
1603
+ # concat prompt attention mask
1604
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1605
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1606
+ if kwargs.get("position_ids", None) is not None:
1607
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1608
+ kwargs["position_ids"] = None
1609
+ kwargs.update(
1610
+ {
1611
+ "attention_mask": attention_mask,
1612
+ "labels": labels,
1613
+ "output_attentions": output_attentions,
1614
+ "output_hidden_states": output_hidden_states,
1615
+ "return_dict": return_dict,
1616
+ }
1617
+ )
1618
+
1619
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1620
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1621
+ else:
1622
+ if kwargs.get("token_type_ids", None) is not None:
1623
+ kwargs["token_type_ids"] = torch.cat(
1624
+ (
1625
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1626
+ kwargs["token_type_ids"],
1627
+ ),
1628
+ dim=1,
1629
+ ).long()
1630
+ if inputs_embeds is None:
1631
+ inputs_embeds = self.word_embeddings(input_ids)
1632
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1633
+ prompts = prompts.to(inputs_embeds.dtype)
1634
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1635
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1636
+
1637
+ def _prefix_tuning_forward(
1638
+ self,
1639
+ input_ids=None,
1640
+ attention_mask=None,
1641
+ inputs_embeds=None,
1642
+ labels=None,
1643
+ output_attentions=None,
1644
+ output_hidden_states=None,
1645
+ return_dict=None,
1646
+ **kwargs,
1647
+ ):
1648
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1649
+ past_key_values = self.get_prompt(batch_size)
1650
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1651
+ kwargs.update(
1652
+ {
1653
+ "input_ids": input_ids,
1654
+ "attention_mask": attention_mask,
1655
+ "inputs_embeds": inputs_embeds,
1656
+ "output_attentions": output_attentions,
1657
+ "output_hidden_states": output_hidden_states,
1658
+ "return_dict": return_dict,
1659
+ "past_key_values": past_key_values,
1660
+ }
1661
+ )
1662
+ if "past_key_values" in fwd_params:
1663
+ return self.base_model(labels=labels, **kwargs)
1664
+ else:
1665
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1666
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1667
+ if "past_key_values" not in fwd_params:
1668
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1669
+ outputs = transformer_backbone_name(**kwargs)
1670
+ sequence_output = outputs[0]
1671
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1672
+ sequence_output = self.base_model.dropout(sequence_output)
1673
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1674
+
1675
+ loss = None
1676
+ if labels is not None:
1677
+ loss_fct = CrossEntropyLoss()
1678
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1679
+
1680
+ if not return_dict:
1681
+ output = (logits,) + outputs[2:]
1682
+ return ((loss,) + output) if loss is not None else output
1683
+
1684
+ return TokenClassifierOutput(
1685
+ loss=loss,
1686
+ logits=logits,
1687
+ hidden_states=outputs.hidden_states,
1688
+ attentions=outputs.attentions,
1689
+ )
1690
+
1691
+
1692
+ class PeftModelForQuestionAnswering(PeftModel):
1693
+ """
1694
+ Peft model for extractive question answering.
1695
+
1696
+ Args:
1697
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1698
+ peft_config ([`PeftConfig`]): Peft config.
1699
+
1700
+ **Attributes**:
1701
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1702
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1703
+
1704
+ Example:
1705
+
1706
+ ```py
1707
+ >>> from transformers import AutoModelForQuestionAnswering
1708
+ >>> from peft import PeftModelForQuestionAnswering, get_peft_config
1709
+
1710
+ >>> config = {
1711
+ ... "peft_type": "LORA",
1712
+ ... "task_type": "QUESTION_ANS",
1713
+ ... "inference_mode": False,
1714
+ ... "r": 16,
1715
+ ... "target_modules": ["query", "value"],
1716
+ ... "lora_alpha": 32,
1717
+ ... "lora_dropout": 0.05,
1718
+ ... "fan_in_fan_out": False,
1719
+ ... "bias": "none",
1720
+ ... }
1721
+
1722
+ >>> peft_config = get_peft_config(config)
1723
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased")
1724
+ >>> peft_model = PeftModelForQuestionAnswering(model, peft_config)
1725
+ >>> peft_model.print_trainable_parameters()
1726
+ trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013
1727
+ ```
1728
+ """
1729
+
1730
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1731
+ super().__init__(model, peft_config, adapter_name)
1732
+ if self.modules_to_save is None:
1733
+ self.modules_to_save = {"qa_outputs"}
1734
+ else:
1735
+ self.modules_to_save.update({"qa_outputs"})
1736
+
1737
+ for name, _ in self.base_model.named_children():
1738
+ if any(module_name in name for module_name in self.modules_to_save):
1739
+ self.cls_layer_name = name
1740
+ break
1741
+
1742
+ # to make sure classifier layer is trainable
1743
+ _set_trainable(self, adapter_name)
1744
+
1745
+ def forward(
1746
+ self,
1747
+ input_ids=None,
1748
+ attention_mask=None,
1749
+ token_type_ids=None,
1750
+ position_ids=None,
1751
+ inputs_embeds=None,
1752
+ start_positions=None,
1753
+ end_positions=None,
1754
+ output_attentions=None,
1755
+ output_hidden_states=None,
1756
+ return_dict=None,
1757
+ task_ids=None,
1758
+ **kwargs,
1759
+ ):
1760
+ peft_config = self.active_peft_config
1761
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1762
+
1763
+ if not peft_config.is_prompt_learning:
1764
+ if peft_config.peft_type == PeftType.POLY:
1765
+ kwargs["task_ids"] = task_ids
1766
+
1767
+ with self._enable_peft_forward_hooks(**kwargs):
1768
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1769
+ return self.base_model(
1770
+ input_ids=input_ids,
1771
+ attention_mask=attention_mask,
1772
+ inputs_embeds=inputs_embeds,
1773
+ start_positions=start_positions,
1774
+ end_positions=end_positions,
1775
+ output_attentions=output_attentions,
1776
+ output_hidden_states=output_hidden_states,
1777
+ return_dict=return_dict,
1778
+ **kwargs,
1779
+ )
1780
+
1781
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1782
+ if attention_mask is not None:
1783
+ # concat prompt attention mask
1784
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1785
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1786
+ if kwargs.get("position_ids", None) is not None:
1787
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1788
+ kwargs["position_ids"] = None
1789
+ kwargs.update(
1790
+ {
1791
+ "attention_mask": attention_mask,
1792
+ "start_positions": start_positions,
1793
+ "end_positions": end_positions,
1794
+ "output_attentions": output_attentions,
1795
+ "output_hidden_states": output_hidden_states,
1796
+ "return_dict": return_dict,
1797
+ }
1798
+ )
1799
+
1800
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1801
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1802
+ else:
1803
+ if kwargs.get("token_type_ids", None) is not None:
1804
+ kwargs["token_type_ids"] = torch.cat(
1805
+ (
1806
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1807
+ kwargs["token_type_ids"],
1808
+ ),
1809
+ dim=1,
1810
+ ).long()
1811
+ if inputs_embeds is None:
1812
+ inputs_embeds = self.word_embeddings(input_ids)
1813
+ prompts = self.get_prompt(batch_size=batch_size)
1814
+ prompts = prompts.to(inputs_embeds.dtype)
1815
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1816
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1817
+
1818
+ def _prefix_tuning_forward(
1819
+ self,
1820
+ input_ids=None,
1821
+ attention_mask=None,
1822
+ inputs_embeds=None,
1823
+ start_positions=None,
1824
+ end_positions=None,
1825
+ output_attentions=None,
1826
+ output_hidden_states=None,
1827
+ return_dict=None,
1828
+ **kwargs,
1829
+ ):
1830
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1831
+ past_key_values = self.get_prompt(batch_size)
1832
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1833
+ kwargs.update(
1834
+ {
1835
+ "input_ids": input_ids,
1836
+ "attention_mask": attention_mask,
1837
+ "inputs_embeds": inputs_embeds,
1838
+ "output_attentions": output_attentions,
1839
+ "output_hidden_states": output_hidden_states,
1840
+ "return_dict": return_dict,
1841
+ "past_key_values": past_key_values,
1842
+ }
1843
+ )
1844
+ if "past_key_values" in fwd_params:
1845
+ return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
1846
+ else:
1847
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1848
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1849
+ if "past_key_values" not in fwd_params:
1850
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1851
+ outputs = transformer_backbone_name(**kwargs)
1852
+ sequence_output = outputs[0]
1853
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1854
+ sequence_output = self.base_model.dropout(sequence_output)
1855
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1856
+ start_logits, end_logits = logits.split(1, dim=-1)
1857
+ start_logits = start_logits.squeeze(-1).contiguous()
1858
+ end_logits = end_logits.squeeze(-1).contiguous()
1859
+
1860
+ total_loss = None
1861
+ if start_positions is not None and end_positions is not None:
1862
+ # If we are on multi-GPU, split add a dimension
1863
+ if len(start_positions.size()) > 1:
1864
+ start_positions = start_positions.squeeze(-1)
1865
+ if len(end_positions.size()) > 1:
1866
+ end_positions = end_positions.squeeze(-1)
1867
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1868
+ ignored_index = start_logits.size(1)
1869
+ start_positions = start_positions.clamp(0, ignored_index)
1870
+ end_positions = end_positions.clamp(0, ignored_index)
1871
+
1872
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1873
+ start_loss = loss_fct(start_logits, start_positions)
1874
+ end_loss = loss_fct(end_logits, end_positions)
1875
+ total_loss = (start_loss + end_loss) / 2
1876
+
1877
+ if not return_dict:
1878
+ output = (start_logits, end_logits) + outputs[2:]
1879
+ return ((total_loss,) + output) if total_loss is not None else output
1880
+
1881
+ return QuestionAnsweringModelOutput(
1882
+ loss=total_loss,
1883
+ start_logits=start_logits,
1884
+ end_logits=end_logits,
1885
+ hidden_states=outputs.hidden_states,
1886
+ attentions=outputs.attentions,
1887
+ )
1888
+
1889
+
1890
+ class PeftModelForFeatureExtraction(PeftModel):
1891
+ """
1892
+ Peft model for extracting features/embeddings from transformer models
1893
+
1894
+ Args:
1895
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1896
+ peft_config ([`PeftConfig`]): Peft config.
1897
+
1898
+ **Attributes**:
1899
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1900
+
1901
+ Example:
1902
+
1903
+ ```py
1904
+ >>> from transformers import AutoModel
1905
+ >>> from peft import PeftModelForFeatureExtraction, get_peft_config
1906
+
1907
+ >>> config = {
1908
+ ... "peft_type": "LORA",
1909
+ ... "task_type": "FEATURE_EXTRACTION",
1910
+ ... "inference_mode": False,
1911
+ ... "r": 16,
1912
+ ... "target_modules": ["query", "value"],
1913
+ ... "lora_alpha": 32,
1914
+ ... "lora_dropout": 0.05,
1915
+ ... "fan_in_fan_out": False,
1916
+ ... "bias": "none",
1917
+ ... }
1918
+ >>> peft_config = get_peft_config(config)
1919
+ >>> model = AutoModel.from_pretrained("bert-base-cased")
1920
+ >>> peft_model = PeftModelForFeatureExtraction(model, peft_config)
1921
+ >>> peft_model.print_trainable_parameters()
1922
+ ```
1923
+ """
1924
+
1925
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"):
1926
+ super().__init__(model, peft_config, adapter_name)
1927
+
1928
+ def forward(
1929
+ self,
1930
+ input_ids=None,
1931
+ attention_mask=None,
1932
+ inputs_embeds=None,
1933
+ output_attentions=None,
1934
+ output_hidden_states=None,
1935
+ return_dict=None,
1936
+ task_ids=None,
1937
+ **kwargs,
1938
+ ):
1939
+ peft_config = self.active_peft_config
1940
+ if not peft_config.is_prompt_learning:
1941
+ if peft_config.peft_type == PeftType.POLY:
1942
+ kwargs["task_ids"] = task_ids
1943
+
1944
+ with self._enable_peft_forward_hooks(**kwargs):
1945
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1946
+ return self.base_model(
1947
+ input_ids=input_ids,
1948
+ attention_mask=attention_mask,
1949
+ inputs_embeds=inputs_embeds,
1950
+ output_attentions=output_attentions,
1951
+ output_hidden_states=output_hidden_states,
1952
+ return_dict=return_dict,
1953
+ **kwargs,
1954
+ )
1955
+
1956
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1957
+ if attention_mask is not None:
1958
+ # concat prompt attention mask
1959
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1960
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1961
+
1962
+ if kwargs.get("position_ids", None) is not None:
1963
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1964
+ kwargs["position_ids"] = None
1965
+ if kwargs.get("token_type_ids", None) is not None:
1966
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1967
+ kwargs["token_type_ids"] = None
1968
+ kwargs.update(
1969
+ {
1970
+ "attention_mask": attention_mask,
1971
+ "output_attentions": output_attentions,
1972
+ "output_hidden_states": output_hidden_states,
1973
+ "return_dict": return_dict,
1974
+ }
1975
+ )
1976
+
1977
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1978
+ past_key_values = self.get_prompt(batch_size)
1979
+ return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)
1980
+ else:
1981
+ if inputs_embeds is None:
1982
+ inputs_embeds = self.word_embeddings(input_ids)
1983
+ prompts = self.get_prompt(batch_size=batch_size)
1984
+ prompts = prompts.to(inputs_embeds.dtype)
1985
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1986
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
llmeval-env/lib/python3.10/site-packages/peft/py.typed ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (888 Bytes). View file