diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..774ed17218db9dd2eb5242115b5327982f8773f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7da4ec6974385f656e9e8e22a98d19d2fa7120b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb7bf058c6441b61a7fb99ee3cd14a368ea74a74 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a2293f03866c53ee172b1781a6d4a393a483ee1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c1a15809c18e54d45d2d01092c825357000393b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2257a49ce0c78eeddf78675f018cbfaaf3993b26 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa0375cc84ebe17b887c36824a732d7904be2109 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f21f5dceece65d7b6fc503c4508fefb2205c2be3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a3bf9eb459af499e6895801c4b74249527481db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18055ed610643b3fb0037c1b522f2b67ff6d5e35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30d66b087d2cf5e1f412f230a40114e74aee5f68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..857b98b086d588e51c80376f19c3c73eb6606c03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__init__.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..040a3806b968f75b8d1a88ae37a4979fe83d466a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__init__.py @@ -0,0 +1,6 @@ +from . import calculus +# XXX: hack to set methods +from . import approximation +from . import differentiation +from . import extrapolation +from . import polynomials diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9847703361826abddf09b1e531d56172212a38e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e74aeacd2987a8cece783d1e8e5ba0adda2143b9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..309faa5a6812907dbbb359daa5e33e4b6d14094c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f82e2fba27a61fac27fbe839b3965a35422604b4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72659756adf9088a723ca3b40d4c7bc839fb4abd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..651a48d436112f4f01163351e1b4a8ffac3e6f8d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..144c57069daa6395ed8f8a9e4a07c27696c13e92 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34c877cbaad85cd9a28da937832f209370dacb1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..554276ab0c0a7b87d4401b0c1e290570c301a91a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6533d60addba5abf9fbf56f21a9810c56649e5c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/approximation.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/approximation.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca5cc598fb53491cb6ae4a41a40477c58544d53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/approximation.py @@ -0,0 +1,246 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Approximation methods # +#----------------------------------------------------------------------------# + +# The Chebyshev approximation formula is given at: +# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html + +# The only major changes in the following code is that we return the +# expanded polynomial coefficients instead of Chebyshev coefficients, +# and that we automatically transform [a,b] -> [-1,1] and back +# for convenience. + +# Coefficient in Chebyshev approximation +def chebcoeff(ctx,f,a,b,j,N): + s = ctx.mpf(0) + h = ctx.mpf(0.5) + for k in range(1, N+1): + t = ctx.cospi((k-h)/N) + s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N) + return 2*s/N + +# Generate Chebyshev polynomials T_n(ax+b) in expanded form +def chebT(ctx, a=1, b=0): + Tb = [1] + yield Tb + Ta = [b, a] + while 1: + yield Ta + # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b) + Tmp = [0] + [2*a*t for t in Ta] + for i, c in enumerate(Ta): Tmp[i] += 2*b*c + for i, c in enumerate(Tb): Tmp[i] -= c + Ta, Tb = Tmp, Ta + +@defun +def chebyfit(ctx, f, interval, N, error=False): + r""" + Computes a polynomial of degree `N-1` that approximates the + given function `f` on the interval `[a, b]`. With ``error=True``, + :func:`~mpmath.chebyfit` also returns an accurate estimate of the + maximum absolute error; that is, the maximum value of + `|f(x) - P(x)|` for `x \in [a, b]`. + + :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula, + which gives a nearly optimal solution: that is, the maximum + error of the approximating polynomial is very close to + the smallest possible for any polynomial of the same degree. + + Chebyshev approximation is very useful if one needs repeated + evaluation of an expensive function, such as function defined + implicitly by an integral or a differential equation. (For + example, it could be used to turn a slow mpmath function + into a fast machine-precision version of the same.) + + **Examples** + + Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation + of `f(x) = \cos(x)`, valid on the interval `[1, 2]`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> poly, err = chebyfit(cos, [1, 2], 5, error=True) + >>> nprint(poly) + [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553] + >>> nprint(err, 12) + 1.61351758081e-5 + + The polynomial can be evaluated using ``polyval``:: + + >>> nprint(polyval(poly, 1.6), 12) + -0.0291858904138 + >>> nprint(cos(1.6), 12) + -0.0291995223013 + + Sampling the true error at 1000 points shows that the error + estimate generated by ``chebyfit`` is remarkably good:: + + >>> error = lambda x: abs(cos(x) - polyval(poly, x)) + >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12) + 1.61349954245e-5 + + **Choice of degree** + + The degree `N` can be set arbitrarily high, to obtain an + arbitrarily good approximation. As a rule of thumb, an + `N`-term Chebyshev approximation is good to `N/(b-a)` decimal + places on a unit interval (although this depends on how + well-behaved `f` is). The cost grows accordingly: ``chebyfit`` + evaluates the function `(N^2)/2` times to compute the + coefficients and an additional `N` times to estimate the error. + + **Possible issues** + + One should be careful to use a sufficiently high working + precision both when calling ``chebyfit`` and when evaluating + the resulting polynomial, as the polynomial is sometimes + ill-conditioned. It is for example difficult to reach + 15-digit accuracy when evaluating the polynomial using + machine precision floats, no matter the theoretical + accuracy of the polynomial. (The option to return the + coefficients in Chebyshev form should be made available + in the future.) + + It is important to note the Chebyshev approximation works + poorly if `f` is not smooth. A function containing singularities, + rapid oscillation, etc can be approximated more effectively by + multiplying it by a weight function that cancels out the + nonsmooth features, or by dividing the interval into several + segments. + """ + a, b = ctx._as_points(interval) + orig = ctx.prec + try: + ctx.prec = orig + int(N**0.5) + 20 + c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)] + d = [ctx.zero] * N + d[0] = -c[0]/2 + h = ctx.mpf(0.5) + T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a)) + for (k, Tk) in zip(range(N), T): + for i in range(len(Tk)): + d[i] += c[k]*Tk[i] + d = d[::-1] + # Estimate maximum error + err = ctx.zero + for k in range(N): + x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h + err = max(err, abs(f(x) - ctx.polyval(d, x))) + finally: + ctx.prec = orig + if error: + return d, +err + else: + return d + +@defun +def fourier(ctx, f, interval, N): + r""" + Computes the Fourier series of degree `N` of the given function + on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns + two lists `(c, s)` of coefficients (the cosine series and sine + series, respectively), such that + + .. math :: + + f(x) \sim \sum_{k=0}^N + c_k \cos(k m x) + s_k \sin(k m x) + + where `m = 2 \pi / (b-a)`. + + Note that many texts define the first coefficient as `2 c_0` instead + of `c_0`. The easiest way to evaluate the computed series correctly + is to pass it to :func:`~mpmath.fourierval`. + + **Examples** + + The function `f(x) = x` has a simple Fourier series on the standard + interval `[-\pi, \pi]`. The cosine coefficients are all zero (because + the function has odd symmetry), and the sine coefficients are + rational numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> c, s = fourier(lambda x: x, [-pi, pi], 5) + >>> nprint(c) + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint(s) + [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4] + + This computes a Fourier series of a nonsymmetric function on + a nonstandard interval:: + + >>> I = [-1, 1.5] + >>> f = lambda x: x**2 - 4*x + 1 + >>> cs = fourier(f, I, 4) + >>> nprint(cs[0]) + [0.583333, 1.12479, -1.27552, 0.904708, -0.441296] + >>> nprint(cs[1]) + [0.0, -2.6255, 0.580905, 0.219974, -0.540057] + + It is instructive to plot a function along with its truncated + Fourier series:: + + >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP + + Fourier series generally converge slowly (and may not converge + pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier + series gives an `L^2` error corresponding to 2-digit accuracy:: + + >>> I = [-1, 1] + >>> cs = fourier(cosh, I, 9) + >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2 + >>> nprint(sqrt(quad(g, I))) + 0.00467963 + + :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions, + the accuracy (and speed) can be improved by including all singular + points in the interval specification:: + + >>> nprint(fourier(abs, [-1, 1], 0), 10) + ([0.5000441648], [0.0]) + >>> nprint(fourier(abs, [-1, 0, 1], 0), 10) + ([0.5], [0.0]) + + """ + interval = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + L = b-a + cos_series = [] + sin_series = [] + cutoff = ctx.eps*10 + for n in xrange(N+1): + m = 2*n*ctx.pi/L + an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L + bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L + if n == 0: + an /= 2 + if abs(an) < cutoff: an = ctx.zero + if abs(bn) < cutoff: bn = ctx.zero + cos_series.append(an) + sin_series.append(bn) + return cos_series, sin_series + +@defun +def fourierval(ctx, series, interval, x): + """ + Evaluates a Fourier series (in the format computed by + by :func:`~mpmath.fourier` for the given interval) at the point `x`. + + The series should be a pair `(c, s)` where `c` is the + cosine series and `s` is the sine series. The two lists + need not have the same length. + """ + cs, ss = series + ab = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + m = 2*ctx.pi/(ab[-1]-ab[0]) + s = ctx.zero + s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n]) + s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n]) + return s diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/calculus.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..24256f121d6c07e5ce954f2a5f5024f156f64016 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/calculus.py @@ -0,0 +1,6 @@ +class CalculusMethods(object): + pass + +def defun(f): + setattr(CalculusMethods, f.__name__, f) + return f diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/differentiation.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/differentiation.py new file mode 100644 index 0000000000000000000000000000000000000000..f8186bebd4476476eded9e4b2f8dc3eb23ef5ff9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/differentiation.py @@ -0,0 +1,647 @@ +from ..libmp.backend import xrange +from .calculus import defun + +try: + iteritems = dict.iteritems +except AttributeError: + iteritems = dict.items + +#----------------------------------------------------------------------------# +# Differentiation # +#----------------------------------------------------------------------------# + +@defun +def difference(ctx, s, n): + r""" + Given a sequence `(s_k)` containing at least `n+1` items, returns the + `n`-th forward difference, + + .. math :: + + \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k. + """ + n = int(n) + d = ctx.zero + b = (-1) ** (n & 1) + for k in xrange(n+1): + d += b * s[k] + b = (b * (k-n)) // (k+1) + return d + +def hsteps(ctx, f, x, n, prec, **options): + singular = options.get('singular') + addprec = options.get('addprec', 10) + direction = options.get('direction', 0) + workprec = (prec+2*addprec) * (n+1) + orig = ctx.prec + try: + ctx.prec = workprec + h = options.get('h') + if h is None: + if options.get('relative'): + hextramag = int(ctx.mag(x)) + else: + hextramag = 0 + h = ctx.ldexp(1, -prec-addprec-hextramag) + else: + h = ctx.convert(h) + # Directed: steps x, x+h, ... x+n*h + direction = options.get('direction', 0) + if direction: + h *= ctx.sign(direction) + steps = xrange(n+1) + norm = h + # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h + else: + steps = xrange(-n, n+1, 2) + norm = (2*h) + # Perturb + if singular: + x += 0.5*h + values = [f(x+k*h) for k in steps] + return values, norm, workprec + finally: + ctx.prec = orig + + +@defun +def diff(ctx, f, x, n=1, **options): + r""" + Numerically computes the derivative of `f`, `f'(x)`, or generally for + an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`. + A few basic examples are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> diff(lambda x: x**2 + x, 1.0) + 3.0 + >>> diff(lambda x: x**2 + x, 1.0, 2) + 2.0 + >>> diff(lambda x: x**2 + x, 1.0, 3) + 0.0 + >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x) + [20.0855, 20.0855, 20.0855, 20.0855, 20.0855] + + Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)` + and order `(n_1, \ldots, n_k)`, the partial derivative + `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example:: + + >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1)) + 2.75 + >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1)) + 3.0 + + **Options** + + The following optional keyword arguments are recognized: + + ``method`` + Supported methods are ``'step'`` or ``'quad'``: derivatives may be + computed using either a finite difference with a small step + size `h` (default), or numerical quadrature. + ``direction`` + Direction of finite difference: can be -1 for a left + difference, 0 for a central difference (default), or +1 + for a right difference; more generally can be any complex number. + ``addprec`` + Extra precision for `h` used to account for the function's + sensitivity to perturbations (default = 10). + ``relative`` + Choose `h` relative to the magnitude of `x`, rather than an + absolute value; useful for large or tiny `x` (default = False). + ``h`` + As an alternative to ``addprec`` and ``relative``, manually + select the step size `h`. + ``singular`` + If True, evaluation exactly at the point `x` is avoided; this is + useful for differentiating functions with removable singularities. + Default = False. + ``radius`` + Radius of integration contour (with ``method = 'quad'``). + Default = 0.25. A larger radius typically is faster and more + accurate, but it must be chosen so that `f` has no + singularities within the radius from the evaluation point. + + A finite difference requires `n+1` function evaluations and must be + performed at `(n+1)` times the target precision. Accordingly, `f` must + support fast evaluation at high precision. + + With integration, a larger number of function evaluations is + required, but not much extra precision is required. For high order + derivatives, this method may thus be faster if f is very expensive to + evaluate at high precision. + + **Further examples** + + The direction option is useful for computing left- or right-sided + derivatives of nonsmooth functions:: + + >>> diff(abs, 0, direction=0) + 0.0 + >>> diff(abs, 0, direction=1) + 1.0 + >>> diff(abs, 0, direction=-1) + -1.0 + + More generally, if the direction is nonzero, a right difference + is computed where the step size is multiplied by sign(direction). + For example, with direction=+j, the derivative from the positive + imaginary direction will be computed:: + + >>> diff(abs, 0, direction=j) + (0.0 - 1.0j) + + With integration, the result may have a small imaginary part + even even if the result is purely real:: + + >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS + (0.5 - 4.59...e-26j) + >>> chop(_) + 0.5 + + Adding precision to obtain an accurate value:: + + >>> diff(cos, 1e-30) + 0.0 + >>> diff(cos, 1e-30, h=0.0001) + -9.99999998328279e-31 + >>> diff(cos, 1e-30, addprec=100) + -1.0e-30 + + """ + partial = False + try: + orders = list(n) + x = list(x) + partial = True + except TypeError: + pass + if partial: + x = [ctx.convert(_) for _ in x] + return _partial_diff(ctx, f, x, orders, options) + method = options.get('method', 'step') + if n == 0 and method != 'quad' and not options.get('singular'): + return f(ctx.convert(x)) + prec = ctx.prec + try: + if method == 'step': + values, norm, workprec = hsteps(ctx, f, x, n, prec, **options) + ctx.prec = workprec + v = ctx.difference(values, n) / norm**n + elif method == 'quad': + ctx.prec += 10 + radius = ctx.convert(options.get('radius', 0.25)) + def g(t): + rei = radius*ctx.expj(t) + z = x + rei + return f(z) / rei**n + d = ctx.quadts(g, [0, 2*ctx.pi]) + v = d * ctx.factorial(n) / (2*ctx.pi) + else: + raise ValueError("unknown method: %r" % method) + finally: + ctx.prec = prec + return +v + +def _partial_diff(ctx, f, xs, orders, options): + if not orders: + return f() + if not sum(orders): + return f(*xs) + i = 0 + for i in range(len(orders)): + if orders[i]: + break + order = orders[i] + def fdiff_inner(*f_args): + def inner(t): + return f(*(f_args[:i] + (t,) + f_args[i+1:])) + return ctx.diff(inner, f_args[i], order, **options) + orders[i] = 0 + return _partial_diff(ctx, fdiff_inner, xs, orders, options) + +@defun +def diffs(ctx, f, x, n=None, **options): + r""" + Returns a generator that yields the sequence of derivatives + + .. math :: + + f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots + + With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)` + function evaluations to generate the first `k` derivatives, + rather than the roughly `O(k^2)` evaluations + required if one calls :func:`~mpmath.diff` `k` separate times. + + With `n < \infty`, the generator stops as soon as the + `n`-th derivative has been generated. If the exact number of + needed derivatives is known in advance, this is further + slightly more efficient. + + Options are the same as for :func:`~mpmath.diff`. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> nprint(list(diffs(cos, 1, 5))) + [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471] + >>> for i, d in zip(range(6), diffs(cos, 1)): + ... print("%s %s" % (i, d)) + ... + 0 0.54030230586814 + 1 -0.841470984807897 + 2 -0.54030230586814 + 3 0.841470984807897 + 4 0.54030230586814 + 5 -0.841470984807897 + + """ + if n is None: + n = ctx.inf + else: + n = int(n) + if options.get('method', 'step') != 'step': + k = 0 + while k < n + 1: + yield ctx.diff(f, x, k, **options) + k += 1 + return + singular = options.get('singular') + if singular: + yield ctx.diff(f, x, 0, singular=True) + else: + yield f(ctx.convert(x)) + if n < 1: + return + if n == ctx.inf: + A, B = 1, 2 + else: + A, B = 1, n+1 + while 1: + callprec = ctx.prec + y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options) + for k in xrange(A, B): + try: + ctx.prec = workprec + d = ctx.difference(y, k) / norm**k + finally: + ctx.prec = callprec + yield +d + if k >= n: + return + A, B = B, int(A*1.4+1) + B = min(B, n) + +def iterable_to_function(gen): + gen = iter(gen) + data = [] + def f(k): + for i in xrange(len(data), k+1): + data.append(next(gen)) + return data[k] + return f + +@defun +def diffs_prod(ctx, factors): + r""" + Given a list of `N` iterables or generators yielding + `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`, + generate `g(x), g'(x), g''(x), \ldots` where + `g(x) = f_1(x) f_2(x) \cdots f_N(x)`. + + At high precision and for large orders, this is typically more efficient + than numerical differentiation if the derivatives of each `f_k(x)` + admit direct computation. + + Note: This function does not increase the working precision internally, + so guard digits may have to be added externally for full accuracy. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = lambda x: exp(x)*cos(x)*sin(x) + >>> u = diffs(f, 1) + >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)]) + >>> next(u); next(v) + 1.23586333600241 + 1.23586333600241 + >>> next(u); next(v) + 0.104658952245596 + 0.104658952245596 + >>> next(u); next(v) + -5.96999877552086 + -5.96999877552086 + >>> next(u); next(v) + -12.4632923122697 + -12.4632923122697 + + """ + N = len(factors) + if N == 1: + for c in factors[0]: + yield c + else: + u = iterable_to_function(ctx.diffs_prod(factors[:N//2])) + v = iterable_to_function(ctx.diffs_prod(factors[N//2:])) + n = 0 + while 1: + #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1)) + s = u(n) * v(0) + a = 1 + for k in xrange(1,n+1): + a = a * (n-k+1) // k + s += a * u(n-k) * v(k) + yield s + n += 1 + +def dpoly(n, _cache={}): + """ + nth differentiation polynomial for exp (Faa di Bruno's formula). + + TODO: most exponents are zero, so maybe a sparse representation + would be better. + """ + if n in _cache: + return _cache[n] + if not _cache: + _cache[0] = {(0,):1} + R = dpoly(n-1) + R = dict((c+(0,),v) for (c,v) in iteritems(R)) + Ra = {} + for powers, count in iteritems(R): + powers1 = (powers[0]+1,) + powers[1:] + if powers1 in Ra: + Ra[powers1] += count + else: + Ra[powers1] = count + for powers, count in iteritems(R): + if not sum(powers): + continue + for k,p in enumerate(powers): + if p: + powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:] + if powers2 in Ra: + Ra[powers2] += p*count + else: + Ra[powers2] = p*count + _cache[n] = Ra + return _cache[n] + +@defun +def diffs_exp(ctx, fdiffs): + r""" + Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots` + generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`. + + At high precision and for large orders, this is typically more efficient + than numerical differentiation if the derivatives of `f(x)` + admit direct computation. + + Note: This function does not increase the working precision internally, + so guard digits may have to be added externally for full accuracy. + + **Examples** + + The derivatives of the gamma function can be computed using + logarithmic differentiation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> + >>> def diffs_loggamma(x): + ... yield loggamma(x) + ... i = 0 + ... while 1: + ... yield psi(i,x) + ... i += 1 + ... + >>> u = diffs_exp(diffs_loggamma(3)) + >>> v = diffs(gamma, 3) + >>> next(u); next(v) + 2.0 + 2.0 + >>> next(u); next(v) + 1.84556867019693 + 1.84556867019693 + >>> next(u); next(v) + 2.49292999190269 + 2.49292999190269 + >>> next(u); next(v) + 3.44996501352367 + 3.44996501352367 + + """ + fn = iterable_to_function(fdiffs) + f0 = ctx.exp(fn(0)) + yield f0 + i = 1 + while 1: + s = ctx.mpf(0) + for powers, c in iteritems(dpoly(i)): + s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p) + yield s * f0 + i += 1 + +@defun +def differint(ctx, f, x, n=1, x0=0): + r""" + Calculates the Riemann-Liouville differintegral, or fractional + derivative, defined by + + .. math :: + + \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m} + \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt + + where `f` is a given (presumably well-behaved) function, + `x` is the evaluation point, `n` is the order, and `x_0` is + the reference point of integration (`m` is an arbitrary + parameter selected automatically). + + With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`, + the second derivative `f''(x)`, etc. With `n = -1`, it gives + `\int_{x_0}^x f(t) dt`, with `n = -2` + it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc. + + As `n` is permitted to be any number, this operator generalizes + iterated differentiation and iterated integration to a single + operator with a continuous order parameter. + + **Examples** + + There is an exact formula for the fractional derivative of a + monomial `x^p`, which may be used as a reference. For example, + the following gives a half-derivative (order 0.5):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> x = mpf(3); p = 2; n = 0.5 + >>> differint(lambda t: t**p, x, n) + 7.81764019044672 + >>> gamma(p+1)/gamma(p-n+1) * x**(p-n) + 7.81764019044672 + + Another useful test function is the exponential function, whose + integration / differentiation formula easy generalizes + to arbitrary order. Here we first compute a third derivative, + and then a triply nested integral. (The reference point `x_0` + is set to `-\infty` to avoid nonzero endpoint terms.):: + + >>> differint(lambda x: exp(pi*x), -1.5, 3) + 0.278538406900792 + >>> exp(pi*-1.5) * pi**3 + 0.278538406900792 + >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf) + 1922.50563031149 + >>> exp(pi*3.5) / pi**3 + 1922.50563031149 + + However, for noninteger `n`, the differentiation formula for the + exponential function must be modified to give the same result as the + Riemann-Liouville differintegral:: + + >>> x = mpf(3.5) + >>> c = pi + >>> n = 1+2*j + >>> differint(lambda x: exp(c*x), x, n) + (-123295.005390743 + 140955.117867654j) + >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n) + (-123295.005390743 + 140955.117867654j) + + + """ + m = max(int(ctx.ceil(ctx.re(n)))+1, 1) + r = m-n-1 + g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x]) + return ctx.diff(g, x, m) / ctx.gamma(m-n) + +@defun +def diffun(ctx, f, n=1, **options): + r""" + Given a function `f`, returns a function `g(x)` that evaluates the nth + derivative `f^{(n)}(x)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> cos2 = diffun(sin) + >>> sin2 = diffun(sin, 4) + >>> cos(1.3), cos2(1.3) + (0.267498828624587, 0.267498828624587) + >>> sin(1.3), sin2(1.3) + (0.963558185417193, 0.963558185417193) + + The function `f` must support arbitrary precision evaluation. + See :func:`~mpmath.diff` for additional details and supported + keyword options. + """ + if n == 0: + return f + def g(x): + return ctx.diff(f, x, n, **options) + return g + +@defun +def taylor(ctx, f, x, n, **options): + r""" + Produces a degree-`n` Taylor polynomial around the point `x` of the + given function `f`. The coefficients are returned as a list. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(chop(taylor(sin, 0, 5))) + [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333] + + The coefficients are computed using high-order numerical + differentiation. The function must be possible to evaluate + to arbitrary precision. See :func:`~mpmath.diff` for additional details + and supported keyword options. + + Note that to evaluate the Taylor polynomial as an approximation + of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed, + and the point of the Taylor expansion must be subtracted from + the argument: + + >>> p = taylor(exp, 2.0, 10) + >>> polyval(p[::-1], 2.5 - 2.0) + 12.1824939606092 + >>> exp(2.5) + 12.1824939607035 + + """ + gen = enumerate(ctx.diffs(f, x, n, **options)) + if options.get("chop", True): + return [ctx.chop(d)/ctx.factorial(i) for i, d in gen] + else: + return [d/ctx.factorial(i) for i, d in gen] + +@defun +def pade(ctx, a, L, M): + r""" + Computes a Pade approximation of degree `(L, M)` to a function. + Given at least `L+M+1` Taylor coefficients `a` approximating + a function `A(x)`, :func:`~mpmath.pade` returns coefficients of + polynomials `P, Q` satisfying + + .. math :: + + P = \sum_{k=0}^L p_k x^k + + Q = \sum_{k=0}^M q_k x^k + + Q_0 = 1 + + A(x) Q(x) = P(x) + O(x^{L+M+1}) + + `P(x)/Q(x)` can provide a good approximation to an analytic function + beyond the radius of convergence of its Taylor series (example + from G.A. Baker 'Essentials of Pade Approximants' Academic Press, + Ch.1A):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> one = mpf(1) + >>> def f(x): + ... return sqrt((one + 2*x)/(one + x)) + ... + >>> a = taylor(f, 0, 6) + >>> p, q = pade(a, 3, 3) + >>> x = 10 + >>> polyval(p[::-1], x)/polyval(q[::-1], x) + 1.38169105566806 + >>> f(x) + 1.38169855941551 + + """ + # To determine L+1 coefficients of P and M coefficients of Q + # L+M+1 coefficients of A must be provided + if len(a) < L+M+1: + raise ValueError("L+M+1 Coefficients should be provided") + + if M == 0: + if L == 0: + return [ctx.one], [ctx.one] + else: + return a[:L+1], [ctx.one] + + # Solve first + # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1] + # ... + # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M] + A = ctx.matrix(M) + for j in range(M): + for i in range(min(M, L+j+1)): + A[j, i] = a[L+j-i] + v = -ctx.matrix(a[(L+1):(L+M+1)]) + x = ctx.lu_solve(A, v) + q = [ctx.one] + list(x) + # compute p + p = [0]*(L+1) + for i in range(L+1): + s = a[i] + for j in range(1, min(M,i) + 1): + s += q[j]*a[i-j] + p[i] = s + return p, q diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py new file mode 100644 index 0000000000000000000000000000000000000000..7df0fea3c62c9b71ee24d3f39fd9b7fd3318ed23 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py @@ -0,0 +1,2115 @@ +try: + from itertools import izip +except ImportError: + izip = zip + +from ..libmp.backend import xrange +from .calculus import defun + +try: + next = next +except NameError: + next = lambda _: _.next() + +@defun +def richardson(ctx, seq): + r""" + Given a list ``seq`` of the first `N` elements of a slowly convergent + infinite sequence, :func:`~mpmath.richardson` computes the `N`-term + Richardson extrapolate for the limit. + + :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated + limit and `c` is the magnitude of the largest weight used during the + computation. The weight provides an estimate of the precision + lost to cancellation. Due to cancellation effects, the sequence must + be typically be computed at a much higher precision than the target + accuracy of the extrapolation. + + **Applicability and issues** + + The `N`-step Richardson extrapolation algorithm used by + :func:`~mpmath.richardson` is described in [1]. + + Richardson extrapolation only works for a specific type of sequence, + namely one converging like partial sums of + `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials. + When the sequence does not convergence at such a rate + :func:`~mpmath.richardson` generally produces garbage. + + Richardson extrapolation has the advantage of being fast: the `N`-term + extrapolate requires only `O(N)` arithmetic operations, and usually + produces an estimate that is accurate to `O(N)` digits. Contrast with + the Shanks transformation (see :func:`~mpmath.shanks`), which requires + `O(N^2)` operations. + + :func:`~mpmath.richardson` is unable to produce an estimate for the + approximation error. One way to estimate the error is to perform + two extrapolations with slightly different `N` and comparing the + results. + + Richardson extrapolation does not work for oscillating sequences. + As a simple workaround, :func:`~mpmath.richardson` detects if the last + three elements do not differ monotonically, and in that case + applies extrapolation only to the even-index elements. + + **Example** + + Applying Richardson extrapolation to the Leibniz series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> v, c = richardson(S[:10]) + >>> v + 3.2126984126984126984126984127 + >>> nprint([v-pi, c]) + [0.0711058, 2.0] + + >>> v, c = richardson(S[:30]) + >>> v + 3.14159265468624052829954206226 + >>> nprint([v-pi, c]) + [1.09645e-9, 20833.3] + + **References** + + 1. [BenderOrszag]_ pp. 375-376 + + """ + if len(seq) < 3: + raise ValueError("seq should be of minimum length 3") + if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]): + seq = seq[::2] + N = len(seq)//2-1 + s = ctx.zero + # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)! + # To avoid repeated factorials, we simplify the quotient + # of successive weights to obtain a recurrence relation + c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N)) + maxc = 1 + for k in xrange(N+1): + s += c * seq[N+k] + maxc = max(abs(c), maxc) + c *= (k-N)*ctx.mpf(k+N+1)**N + c /= ((1+k)*ctx.mpf(k+N)**N) + return s, maxc + +@defun +def shanks(ctx, seq, table=None, randomized=False): + r""" + Given a list ``seq`` of the first `N` elements of a slowly + convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated + Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks + transformation often provides strong convergence acceleration, + especially if the sequence is oscillating. + + The iterated Shanks transformation is computed using the Wynn + epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full + epsilon table generated by Wynn's algorithm, which can be read + off as follows: + + * The table is a list of lists forming a lower triangular matrix, + where higher row and column indices correspond to more accurate + values. + * The columns with even index hold dummy entries (required for the + computation) and the columns with odd index hold the actual + extrapolates. + * The last element in the last row is typically the most + accurate estimate of the limit. + * The difference to the third last element in the last row + provides an estimate of the approximation error. + * The magnitude of the second last element provides an estimate + of the numerical accuracy lost to cancellation. + + For convenience, so the extrapolation is stopped at an odd index + so that ``shanks(seq)[-1][-1]`` always gives an estimate of the + limit. + + Optionally, an existing table can be passed to :func:`~mpmath.shanks`. + This can be used to efficiently extend a previous computation after + new elements have been appended to the sequence. The table will + then be updated in-place. + + **The Shanks transformation** + + The Shanks transformation is defined as follows (see [2]): given + the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is + given by + + .. math :: + + S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k} + + The Shanks transformation gives the exact limit `A_{\infty}` in a + single step if `A_k = A + a q^k`. Note in particular that it + extrapolates the exact sum of a geometric series in a single step. + + Applying the Shanks transformation once often improves convergence + substantially for an arbitrary sequence, but the optimal effect is + obtained by applying it iteratively: + `S(S(A_k)), S(S(S(A_k))), \ldots`. + + Wynn's epsilon algorithm provides an efficient way to generate + the table of iterated Shanks transformations. It reduces the + computation of each element to essentially a single division, at + the cost of requiring dummy elements in the table. See [1] for + details. + + **Precision issues** + + Due to cancellation effects, the sequence must be typically be + computed at a much higher precision than the target accuracy + of the extrapolation. + + If the Shanks transformation converges to the exact limit (such + as if the sequence is a geometric series), then a division by + zero occurs. By default, :func:`~mpmath.shanks` handles this case by + terminating the iteration and returning the table it has + generated so far. With *randomized=True*, it will instead + replace the zero by a pseudorandom number close to zero. + (TODO: find a better solution to this problem.) + + **Examples** + + We illustrate by applying Shanks transformation to the Leibniz + series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 50 + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> + >>> T = shanks(S[:7]) + >>> for row in T: + ... nprint(row) + ... + [-0.75] + [1.25, 3.16667] + [-1.75, 3.13333, -28.75] + [2.25, 3.14524, 82.25, 3.14234] + [-2.75, 3.13968, -177.75, 3.14139, -969.937] + [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161] + + The extrapolated accuracy is about 4 digits, and about 4 digits + may have been lost due to cancellation:: + + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [2.22532e-5, 4.78309e-5, 3515.06] + + Now we extend the computation:: + + >>> T = shanks(S[:25], T) + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [3.75527e-19, 1.48478e-19, 2.96014e+17] + + The value for pi is now accurate to 18 digits. About 18 digits may + also have been lost to cancellation. + + Here is an example with a geometric series, where the convergence + is immediate (the sum is exactly 1):: + + >>> mp.dps = 15 + >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]): + ... nprint(row) + [4.0] + [8.0, 1.0] + + **References** + + 1. [GravesMorris]_ + + 2. [BenderOrszag]_ pp. 368-375 + + """ + if len(seq) < 2: + raise ValueError("seq should be of minimum length 2") + if table: + START = len(table) + else: + START = 0 + table = [] + STOP = len(seq) - 1 + if STOP & 1: + STOP -= 1 + one = ctx.one + eps = +ctx.eps + if randomized: + from random import Random + rnd = Random() + rnd.seed(START) + for i in xrange(START, STOP): + row = [] + for j in xrange(i+1): + if j == 0: + a, b = 0, seq[i+1]-seq[i] + else: + if j == 1: + a = seq[i] + else: + a = table[i-1][j-2] + b = row[j-1] - table[i-1][j-1] + if not b: + if randomized: + b = (1 + rnd.getrandbits(10))*eps + elif i & 1: + return table[:-1] + else: + return table + row.append(a + one/b) + table.append(row) + return table + + +class levin_class: + # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) + r""" + This interface implements Levin's (nonlinear) sequence transformation for + convergence acceleration and summation of divergent series. It performs + better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent + or alternating divergent series. + + Let *A* be the series we want to sum: + + .. math :: + + A = \sum_{k=0}^{\infty} a_k + + Attention: all `a_k` must be non-zero! + + Let `s_n` be the partial sums of this series: + + .. math :: + + s_n = \sum_{k=0}^n a_k. + + **Methods** + + Calling ``levin`` returns an object with the following methods. + + ``update(...)`` works with the list of individual terms `a_k` of *A*, and + ``update_step(...)`` works with the list of partial sums `s_k` of *A*: + + .. code :: + + v, e = ...update([a_0, a_1,..., a_k]) + v, e = ...update_psum([s_0, s_1,..., s_k]) + + ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)`` + works with the partial sums `s_k`: + + .. code :: + + v, e = ...step(a_k) + v, e = ...step_psum(s_k) + + *v* is the current estimate for *A*, and *e* is an error estimate which is + simply the difference between the current estimate and the last estimate. + One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``. + + **A word of caution** + + One can only hope for good results (i.e. convergence acceleration or + resummation) if the `s_n` have some well defind asymptotic behavior for + large `n` and are not erratic or random. Furthermore one usually needs very + high working precision because of the numerical cancellation. If the working + precision is insufficient, levin may produce silently numerical garbage. + Furthermore even if the Levin-transformation converges, in the general case + there is no proof that the result is mathematically sound. Only for very + special classes of problems one can prove that the Levin-transformation + converges to the expected result (for example Stieltjes-type integrals). + Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison + to Shanks/Wynn-epsilon, Richardson & co. + In summary one can say that the Levin-transformation is powerful but + unreliable and that it may need a copious amount of working precision. + + The Levin transform has several variants differing in the choice of weights. + Some variants are better suited for the possible flavours of convergence + behaviour of *A* than other variants: + + .. code :: + + convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon + + logarithmic + - + - + linear + + + + + alternating divergent + + + + + + "+" means the variant is suitable,"-" means the variant is not suitable; + for comparison the Shanks/Wynn-epsilon transform is listed, too. + + The variant is controlled though the variant keyword (i.e. ``variant="u"``, + ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice. + + Finally it is possible to use the Sidi-S transform instead of the Levin transform + by using the keyword ``method='sidi'``. The Sidi-S transform works better than the + Levin transformation for some divergent series (see the examples). + + Parameters: + + .. code :: + + method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation + variant "u","t" or "v" chooses the weight variant. + + The Levin transform is also accessible through the nsum interface. + ``method="l"`` or ``method="levin"`` select the normal Levin transform while + ``method="sidi"`` + selects the Sidi-S transform. The variant is in both cases selected through the + levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise + it will miss the point where the Levin transform converges resulting in numerical + overflow/garbage. For highly divergent series a copious amount of working precision + must be chosen. + + **Examples** + + First we sum the zeta function:: + + >>> from mpmath import mp + >>> mp.prec = 53 + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "u") + ... S, s, n = [], 0, 1 + ... while 1: + ... s += mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = L.update_psum(S) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 6)) + 0.0 + >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!):: + + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "v") + ... A, n = [], 1 + ... while 1: + ... s = mp.mpf(n) ** (2 + 3j) + ... n += 1 + ... A.append(s) + ... v, e = L.update(A) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.zeta(-2-3j))) + 0.0 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the divergent asymptotic expansion of an integral related to the + exponential integral (see also [2] p.373). The Sidi-S transform works best here:: + + >>> z = mp.mpf(10) + >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation + ... L = mp.levin(method = "sidi", variant = "t") + ... n = 0 + ... while 1: + ... s = (-1)**n * mp.fac(n) * z ** (-n) + ... v, e = L.step(s) + ... n += 1 + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(v - w)) + 0.0 + + Another highly divergent integral is also summable:: + + >>> z = mp.mpf(2) + >>> eps = mp.mpf(mp.eps) + >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series + ... L = mp.levin(method = "levin", variant = "t") + ... n, s = 0, 0 + ... while 1: + ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) + ... n += 1 + ... v, e = L.step_psum(s) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(v - w)) + 0.0 + + These examples run with 15-20 decimal digits precision. For higher precision the + working precision must be raised. + + **Examples for nsum** + + Here we calculate Euler's constant as the constant term in the Laurent + expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of + the logarithmic convergence behaviour of the Dirichlet series for zeta:: + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + The Sidi-S transform performs excellently for the alternating series of `\log(2)`:: + + >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") + >>> print(mp.chop(a - mp.log(2))) + 0.0 + + Hypergeometric series can also be summed outside their range of convergence. + The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the + point where the Levin transform converges resulting in numerical overflow/garbage:: + + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + References: + + [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of + Convergence and the Summation of Divergent Series" arXiv:math/0306302 + + [2] A. Sidi - "Pratical Extrapolation Methods" + + [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209 + + """ + + def __init__(self, method = "levin", variant = "u"): + self.variant = variant + self.n = 0 + self.a0 = 0 + self.theta = 1 + self.A = [] + self.B = [] + self.last = 0 + self.last_s = False + + if method == "levin": + self.factor = self.factor_levin + elif method == "sidi": + self.factor = self.factor_sidi + else: + raise ValueError("levin: unknown method \"%s\"" % method) + + def factor_levin(self, i): + # original levin + # [1] p.50,e.7.5-7 (with n-j replaced by i) + return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1) + + def factor_sidi(self, i): + # sidi analogon to levin (factorial series) + # [1] p.59,e.8.3-16 (with n-j replaced by i) + return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3)) + + def run(self, s, a0, a1 = 0): + if self.variant=="t": + # levin t + w=a0 + elif self.variant=="u": + # levin u + w=a0*(self.theta+self.n) + elif self.variant=="v": + # levin v + w=a0*a1/(a0-a1) + else: + assert False, "unknown variant" + + if w==0: + raise ValueError("levin: zero weight") + + self.A.append(s/w) + self.B.append(1/w) + + for i in range(self.n-1,-1,-1): + if i==self.n-1: + f=1 + else: + f=self.factor(i) + + self.A[i]=self.A[i+1]-f*self.A[i] + self.B[i]=self.B[i+1]-f*self.B[i] + + self.n+=1 + + ########################################################################### + + def update_psum(self,S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k, k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + if self.variant!="v": + if self.n==0: + self.run(S[0],S[0]) + while self.n>> from mpmath import mp + >>> AC = mp.cohen_alt() + >>> S, s, n = [], 0, 1 + >>> while 1: + ... s += -((-1) ** n) * mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = AC.update_psum(S) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 12)) + 0.0 + + Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`:: + + >>> A = [] + >>> AC = mp.cohen_alt() + >>> n = 1 + >>> while 1: + ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) + ... A.append(-mp.loggamma(1 + mp.one / (2 * n))) + ... n += 1 + ... v, e = AC.update(A) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> v = mp.exp(v) + >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12)) + 0.0 + + ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface:: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a") + >>> print(mp.chop(v - mp.pi / 4)) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + """ + + def __init__(self): + self.last=0 + + def update(self, A): + """ + This routine applies the convergence acceleration to the list of individual terms. + + A = sum(a_k, k = 0..infinity) + + v, e = ...update([a_0, a_1,..., a_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(A) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in xrange(n): + c = b - c + if k % 2 == 0: + s = s + c * A[k] + else: + s = s - c * A[k] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + + def update_psum(self, S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k ,k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(S) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = self.ctx.one + s = 0 + + for k in xrange(n): + b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one)) + s += b * S[k] + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + +def cohen_alt(ctx): + L = cohen_alt_class() + L.ctx = ctx + return L + +cohen_alt.__doc__ = cohen_alt_class.__doc__ +defun(cohen_alt) + + +@defun +def sumap(ctx, f, interval, integral=None, error=False): + r""" + Evaluates an infinite series of an analytic summand *f* using the + Abel-Plana formula + + .. math :: + + \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) + + i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt. + + Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`), + the Abel-Plana formula does not require derivatives. However, + it only works when `|f(it)-f(-it)|` does not + increase too rapidly with `t`. + + **Examples** + + The Abel-Plana formula is particularly useful when the summand + decreases like a power of `k`; for example when the sum is a pure + zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sumap(lambda k: 1/k**2.5, [1,inf]) + 1.34148725725091717975677 + >>> zeta(2.5) + 1.34148725725091717975677 + >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf]) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + >>> zeta(2.5+2.5j, 1+1j) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + + If the series is alternating, numerical quadrature along the real + line is likely to give poor results, so it is better to evaluate + the first term symbolically whenever possible: + + >>> n=3; z=-0.75 + >>> I = expint(n,-log(z)) + >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I)) + -0.6917036036904594510141448 + >>> polylog(n,z) + -0.6917036036904594510141448 + + """ + prec = ctx.prec + try: + ctx.prec += 10 + a, b = interval + if b != ctx.inf: + raise ValueError("b should be equal to ctx.inf") + g = lambda x: f(x+a) + if integral is None: + i1, err1 = ctx.quad(g, [0,ctx.inf], error=True) + else: + i1, err1 = integral, 0 + j = ctx.j + p = ctx.pi * 2 + if ctx._is_real_type(i1): + h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t) + else: + h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t) + i2, err2 = ctx.quad(h, [0,ctx.inf], error=True) + err = err1+err2 + v = i1+i2+0.5*g(ctx.mpf(0)) + finally: + ctx.prec = prec + if error: + return +v, err + return +v + + +@defun +def sumem(ctx, f, interval, tol=None, reject=10, integral=None, + adiffs=None, bdiffs=None, verbose=False, error=False, + _fast_abort=False): + r""" + Uses the Euler-Maclaurin formula to compute an approximation accurate + to within ``tol`` (which defaults to the present epsilon) of the sum + + .. math :: + + S = \sum_{k=a}^b f(k) + + where `(a,b)` are given by ``interval`` and `a` or `b` may be + infinite. The approximation is + + .. math :: + + S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} + + \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} + \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right). + + The last sum in the Euler-Maclaurin formula is not generally + convergent (a notable exception is if `f` is a polynomial, in + which case Euler-Maclaurin actually gives an exact result). + + The summation is stopped as soon as the quotient between two + consecutive terms falls below *reject*. That is, by default + (*reject* = 10), the summation is continued as long as each + term adds at least one decimal. + + Although not convergent, convergence to a given tolerance can + often be "forced" if `b = \infty` by summing up to `a+N` and then + applying the Euler-Maclaurin formula to the sum over the range + `(a+N+1, \ldots, \infty)`. This procedure is implemented by + :func:`~mpmath.nsum`. + + By default numerical quadrature and differentiation is used. + If the symbolic values of the integral and endpoint derivatives + are known, it is more efficient to pass the value of the + integral explicitly as ``integral`` and the derivatives + explicitly as ``adiffs`` and ``bdiffs``. The derivatives + should be given as iterables that yield + `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`). + + **Examples** + + Summation of an infinite series, with automatic and symbolic + integral and derivative values (the second should be much faster):: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> sumem(lambda n: 1/n**2, [32, inf]) + 0.03174336652030209012658168043874142714132886413417 + >>> I = mpf(1)/32 + >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999)) + >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D) + 0.03174336652030209012658168043874142714132886413417 + + An exact evaluation of a finite polynomial sum:: + + >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000]) + 10500155000624963999742499550000.0 + >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001))) + 10500155000624963999742499550000 + + """ + tol = tol or +ctx.eps + interval = ctx._as_points(interval) + a = ctx.convert(interval[0]) + b = ctx.convert(interval[-1]) + err = ctx.zero + prev = 0 + M = 10000 + if a == ctx.ninf: adiffs = (0 for n in xrange(M)) + else: adiffs = adiffs or ctx.diffs(f, a) + if b == ctx.inf: bdiffs = (0 for n in xrange(M)) + else: bdiffs = bdiffs or ctx.diffs(f, b) + orig = ctx.prec + #verbose = 1 + try: + ctx.prec += 10 + s = ctx.zero + for k, (da, db) in enumerate(izip(adiffs, bdiffs)): + if k & 1: + term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1) + mag = abs(term) + if verbose: + print("term", k, "magnitude =", ctx.nstr(mag)) + if k > 4 and mag < tol: + s += term + break + elif k > 4 and abs(prev) / mag < reject: + err += mag + if _fast_abort: + return [s, (s, err)][error] + if verbose: + print("Failed to converge") + break + else: + s += term + prev = term + # Endpoint correction + if a != ctx.ninf: s += f(a)/2 + if b != ctx.inf: s += f(b)/2 + # Tail integral + if verbose: + print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b))) + if integral: + s += integral + else: + integral, ierr = ctx.quad(f, interval, error=True) + if verbose: + print("Integration error:", ierr) + s += integral + err += ierr + finally: + ctx.prec = orig + if error: + return s, err + else: + return s + +@defun +def adaptive_extrapolation(ctx, update, emfun, kwargs): + option = kwargs.get + if ctx._fixed_precision: + tol = option('tol', ctx.eps*2**10) + else: + tol = option('tol', ctx.eps/2**10) + verbose = option('verbose', False) + maxterms = option('maxterms', ctx.dps*10) + method = set(option('method', 'r+s').split('+')) + skip = option('skip', 0) + steps = iter(option('steps', xrange(10, 10**9, 10))) + strict = option('strict') + #steps = (10 for i in xrange(1000)) + summer=[] + if 'd' in method or 'direct' in method: + TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False + else: + TRY_RICHARDSON = ('r' in method) or ('richardson' in method) + TRY_SHANKS = ('s' in method) or ('shanks' in method) + TRY_EULER_MACLAURIN = ('e' in method) or \ + ('euler-maclaurin' in method) + + def init_levin(m): + variant = kwargs.get("levin_variant", "u") + if isinstance(variant, str): + if variant == "all": + variant = ["u", "v", "t"] + else: + variant = [variant] + for s in variant: + L = levin_class(method = m, variant = s) + L.ctx = ctx + L.name = m + "(" + s + ")" + summer.append(L) + + if ('l' in method) or ('levin' in method): + init_levin("levin") + + if ('sidi' in method): + init_levin("sidi") + + if ('a' in method) or ('alternating' in method): + L = cohen_alt_class() + L.ctx = ctx + L.name = "alternating" + summer.append(L) + + last_richardson_value = 0 + shanks_table = [] + index = 0 + step = 10 + partial = [] + best = ctx.zero + orig = ctx.prec + try: + if 'workprec' in kwargs: + ctx.prec = kwargs['workprec'] + elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0: + ctx.prec = (ctx.prec+10) * 4 + else: + ctx.prec += 30 + while 1: + if index >= maxterms: + break + + # Get new batch of terms + try: + step = next(steps) + except StopIteration: + pass + if verbose: + print("-"*70) + print("Adding terms #%i-#%i" % (index, index+step)) + update(partial, xrange(index, index+step)) + index += step + + # Check direct error + best = partial[-1] + error = abs(best - partial[-2]) + if verbose: + print("Direct error: %s" % ctx.nstr(error)) + if error <= tol: + return best + + # Check each extrapolation method + if TRY_RICHARDSON: + value, maxc = ctx.richardson(partial) + # Convergence + richardson_error = abs(value - last_richardson_value) + if verbose: + print("Richardson error: %s" % ctx.nstr(richardson_error)) + # Convergence + if richardson_error <= tol: + return value + last_richardson_value = value + # Unreliable due to cancellation + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Richardson") + TRY_RICHARDSON = False + if richardson_error < error: + error = richardson_error + best = value + if TRY_SHANKS: + shanks_table = ctx.shanks(partial, shanks_table, randomized=True) + row = shanks_table[-1] + if len(row) == 2: + est1 = row[-1] + shanks_error = 0 + else: + est1, maxc, est2 = row[-1], abs(row[-2]), row[-3] + shanks_error = abs(est1-est2) + if verbose: + print("Shanks error: %s" % ctx.nstr(shanks_error)) + if shanks_error <= tol: + return est1 + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Shanks") + TRY_SHANKS = False + if shanks_error < error: + error = shanks_error + best = est1 + for L in summer: + est, lerror = L.update_psum(partial) + if verbose: + print("%s error: %s" % (L.name, ctx.nstr(lerror))) + if lerror <= tol: + return est + if lerror < error: + error = lerror + best = est + if TRY_EULER_MACLAURIN: + if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1): + if verbose: + print ("NOT using Euler-Maclaurin: the series appears" + " to be alternating, so numerical\n quadrature" + " will most likely fail") + TRY_EULER_MACLAURIN = False + else: + value, em_error = emfun(index, tol) + value += partial[-1] + if verbose: + print("Euler-Maclaurin error: %s" % ctx.nstr(em_error)) + if em_error <= tol: + return value + if em_error < error: + best = value + finally: + ctx.prec = orig + if strict: + raise ctx.NoConvergence + if verbose: + print("Warning: failed to converge to target accuracy") + return best + +@defun +def nsum(ctx, f, *intervals, **options): + r""" + Computes the sum + + .. math :: S = \sum_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed, or more generally + + .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots + \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n) + + if multiple intervals are given. + + Two examples of infinite series that can be summed by :func:`~mpmath.nsum`, + where the first converges rapidly and the second converges slowly, + are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nsum(lambda n: 1/fac(n), [0, inf]) + 2.71828182845905 + >>> nsum(lambda n: 1/n**2, [1, inf]) + 1.64493406684823 + + When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to + accurately estimate the sums of slowly convergent series. If the series is + finite, :func:`~mpmath.nsum` currently does not attempt to perform any + extrapolation, and simply calls :func:`~mpmath.fsum`. + + Multidimensional infinite series are reduced to a single-dimensional + series over expanding hypercubes; if both infinite and finite dimensions + are present, the finite ranges are moved innermost. For more advanced + control over the summation order, use nested calls to :func:`~mpmath.nsum`, + or manually rewrite the sum as a single-dimensional series. + + **Options** + + *tol* + Desired maximum final error. Defaults roughly to the + epsilon of the working precision. + + *method* + Which summation algorithm to use (described below). + Default: ``'richardson+shanks'``. + + *maxterms* + Cancel after at most this many terms. Default: 10*dps. + + *steps* + An iterable giving the number of terms to add between + each extrapolation attempt. The default sequence is + [10, 20, 30, 40, ...]. For example, if you know that + approximately 100 terms will be required, efficiency might be + improved by setting this to [100, 10]. Then the first + extrapolation will be performed after 100 terms, the second + after 110, etc. + + *verbose* + Print details about progress. + + *ignore* + If enabled, any term that raises ``ArithmeticError`` + or ``ValueError`` (e.g. through division by zero) is replaced + by a zero. This is convenient for lattice sums with + a singular term near the origin. + + **Methods** + + Unfortunately, an algorithm that can efficiently sum any infinite + series does not exist. :func:`~mpmath.nsum` implements several different + algorithms that each work well in different cases. The *method* + keyword argument selects a method. + + The default method is ``'r+s'``, i.e. both Richardson extrapolation + and Shanks transformation is attempted. A slower method that + handles more cases is ``'r+s+e'``. For very high precision + summation, or if the summation needs to be fast (for example if + multiple sums need to be evaluated), it is a good idea to + investigate which one method works best and only use that. + + ``'richardson'`` / ``'r'``: + Uses Richardson extrapolation. Provides useful extrapolation + when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)` + for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for + additional information. + + ``'shanks'`` / ``'s'``: + Uses Shanks transformation. Typically provides useful + extrapolation when `f(k) \sim c^k` or when successive terms + alternate signs. Is able to sum some divergent series. + See :func:`~mpmath.shanks` for additional information. + + ``'levin'`` / ``'l'``: + Uses the Levin transformation. It performs better than the Shanks + transformation for logarithmic convergent or alternating divergent + series. The ``'levin_variant'``-keyword selects the variant. Valid + choices are "u", "t", "v" and "all" whereby "all" uses all three + u,t and v simultanously (This is good for performance comparison in + conjunction with "verbose=True"). Instead of the Levin transform one can + also use the Sidi-S transform by selecting the method ``'sidi'``. + See :func:`~mpmath.levin` for additional details. + + ``'alternating'`` / ``'a'``: + This is the convergence acceleration of alternating series developped + by Cohen, Villegras and Zagier. + See :func:`~mpmath.cohen_alt` for additional details. + + ``'euler-maclaurin'`` / ``'e'``: + Uses the Euler-Maclaurin summation formula to approximate + the remainder sum by an integral. This requires high-order + numerical derivatives and numerical integration. The advantage + of this algorithm is that it works regardless of the + decay rate of `f`, as long as `f` is sufficiently smooth. + See :func:`~mpmath.sumem` for additional information. + + ``'direct'`` / ``'d'``: + Does not perform any extrapolation. This can be used + (and should only be used for) rapidly convergent series. + The summation automatically stops when the terms + decrease below the target tolerance. + + **Basic examples** + + A finite sum:: + + >>> nsum(lambda k: 1/k, [1, 6]) + 2.45 + + Summation of a series going to negative infinity and a doubly + infinite series:: + + >>> nsum(lambda k: 1/k**2, [-inf, -1]) + 1.64493406684823 + >>> nsum(lambda k: 1/(1+k**2), [-inf, inf]) + 3.15334809493716 + + :func:`~mpmath.nsum` handles sums of complex numbers:: + + >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf]) + (1.6 + 0.8j) + + The following sum converges very rapidly, so it is most + efficient to sum it by disabling convergence acceleration:: + + >>> mp.dps = 1000 + >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf], + ... method='direct') + >>> b = (cos(1)+sin(1))/4 + >>> abs(a-b) < mpf('1e-998') + True + + **Examples with Richardson extrapolation** + + Richardson extrapolation works well for sums over rational + functions, as well as their alternating counterparts:: + + >>> mp.dps = 50 + >>> nsum(lambda k: 1 / k**3, [1, inf], + ... method='richardson') + 1.2020569031595942853997381615114499907649862923405 + >>> zeta(3) + 1.2020569031595942853997381615114499907649862923405 + + >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf], + ... method='richardson') + 2.9348022005446793094172454999380755676568497036204 + >>> pi**2/2-2 + 2.9348022005446793094172454999380755676568497036204 + + >>> nsum(lambda k: (-1)**k / k**3, [1, inf], + ... method='richardson') + -0.90154267736969571404980362113358749307373971925537 + >>> -3*zeta(3)/4 + -0.90154267736969571404980362113358749307373971925538 + + **Examples with Shanks transformation** + + The Shanks transformation works well for geometric series + and typically provides excellent acceleration for Taylor + series near the border of their disk of convergence. + Here we apply it to a series for `\log(2)`, which can be + seen as the Taylor series for `\log(1+x)` with `x = 1`:: + + >>> nsum(lambda k: -(-1)**k/k, [1, inf], + ... method='shanks') + 0.69314718055994530941723212145817656807550013436025 + >>> log(2) + 0.69314718055994530941723212145817656807550013436025 + + Here we apply it to a slowly convergent geometric series:: + + >>> nsum(lambda k: mpf('0.995')**k, [0, inf], + ... method='shanks') + 200.0 + + Finally, Shanks' method works very well for alternating series + where `f(k) = (-1)^k g(k)`, and often does so regardless of + the exact decay rate of `g(k)`:: + + >>> mp.dps = 15 + >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf], + ... method='shanks') + 0.765147024625408 + >>> (2-sqrt(2))*zeta(1.5)/2 + 0.765147024625408 + + The following slowly convergent alternating series has no known + closed-form value. Evaluating the sum a second time at higher + precision indicates that the value is probably correct:: + + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.924299897222939 + >>> mp.dps = 30 + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.92429989722293885595957018136 + + **Examples with Levin transformation** + + The following example calculates Euler's constant as the constant term in + the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow + because of the logarithmic convergence behaviour of the Dirichlet series + for zeta. + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!): + + >>> mp.dps = 15 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(w - mp.zeta(-2-3j))) + 0.0 + + The next example resummates an asymptotic series expansion of an integral + related to the exponential integral. + + >>> mp.dps = 15 + >>> z = mp.mpf(10) + >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(w - exact)) + 0.0 + + Following highly divergent asymptotic expansion needs some care. Firstly we + need copious amount of working precision. Secondly the stepsize must not be + chosen to large, otherwise nsum may miss the point where the Levin transform + converges and reach the point where only numerical garbage is produced due to + numerical cancellation. + + >>> mp.dps = 15 + >>> z = mp.mpf(2) + >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(w - exact)) + 0.0 + + The hypergeoemtric function can also be summed outside its range of convergence: + + >>> mp.dps = 15 + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + **Examples with Cohen's alternating series resummation** + + The next example sums the alternating zeta function: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + + The derivate of the alternating zeta function outside its range of + convergence: + + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + **Examples with Euler-Maclaurin summation** + + The sum in the following example has the wrong rate of convergence + for either Richardson or Shanks to be effective. + + >>> f = lambda k: log(k)/k**2.5 + >>> mp.dps = 15 + >>> nsum(f, [1, inf], method='euler-maclaurin') + 0.38734195032621 + >>> -diff(zeta, 2.5) + 0.38734195032621 + + Increasing ``steps`` improves speed at higher precision:: + + >>> mp.dps = 50 + >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250]) + 0.38734195032620997271199237593105101319948228874688 + >>> -diff(zeta, 2.5) + 0.38734195032620997271199237593105101319948228874688 + + **Divergent series** + + The Shanks transformation is able to sum some *divergent* + series. In particular, it is often able to sum Taylor series + beyond their radius of convergence (this is due to a relation + between the Shanks transformation and Pade approximations; + see :func:`~mpmath.pade` for an alternative way to evaluate divergent + Taylor series). Furthermore the Levin-transform examples above + contain some divergent series resummation. + + Here we apply it to `\log(1+x)` far outside the region of + convergence:: + + >>> mp.dps = 50 + >>> nsum(lambda k: -(-9)**k/k, [1, inf], + ... method='shanks') + 2.3025850929940456840179914546843642076011014886288 + >>> log(10) + 2.3025850929940456840179914546843642076011014886288 + + A particular type of divergent series that can be summed + using the Shanks transformation is geometric series. + The result is the same as using the closed-form formula + for an infinite geometric series:: + + >>> mp.dps = 15 + >>> for n in range(-8, 8): + ... if n == 1: + ... continue + ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n), + ... nsum(lambda k: n**k, [0, inf], method='shanks'))) + ... + -8.0 0.111111111111111 0.111111111111111 + -7.0 0.125 0.125 + -6.0 0.142857142857143 0.142857142857143 + -5.0 0.166666666666667 0.166666666666667 + -4.0 0.2 0.2 + -3.0 0.25 0.25 + -2.0 0.333333333333333 0.333333333333333 + -1.0 0.5 0.5 + 0.0 1.0 1.0 + 2.0 -1.0 -1.0 + 3.0 -0.5 -0.5 + 4.0 -0.333333333333333 -0.333333333333333 + 5.0 -0.25 -0.25 + 6.0 -0.2 -0.2 + 7.0 -0.166666666666667 -0.166666666666667 + + **Multidimensional sums** + + Any combination of finite and infinite ranges is allowed for the + summation indices:: + + >>> mp.dps = 15 + >>> nsum(lambda x,y: x+y, [2,3], [4,5]) + 28.0 + >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf]) + 6.0 + >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3]) + 6.0 + >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4]) + 7.0 + >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf]) + 7.0 + >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf]) + 7.0 + + Some nice examples of double series with analytic solutions or + reductions to single-dimensional series (see [1]):: + + >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf]) + 1.60669515241529 + >>> nsum(lambda n: 1/(2**n-1), [1,inf]) + 1.60669515241529 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf]) + 0.278070510848213 + >>> pi*(pi-3*ln2)/12 + 0.278070510848213 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf]) + 0.129319852864168 + >>> altzeta(2) - altzeta(1) + 0.129319852864168 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf]) + 0.0790756439455825 + >>> altzeta(3) - altzeta(2) + 0.0790756439455825 + + >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)), + ... [1,inf], [1,inf]) + 0.28125 + >>> mpf(9)/32 + 0.28125 + + >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j), + ... [1,inf], [1,inf], workprec=400) + 1.64493406684823 + >>> zeta(2) + 1.64493406684823 + + A hard example of a multidimensional sum is the Madelung constant + in three dimensions (see [2]). The defining sum converges very + slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to + obtain an accurate value through convergence acceleration. The + second evaluation below uses a much more efficient, rapidly + convergent 2D sum:: + + >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5, + ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True) + -1.74756459463318 + >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \ + ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf]) + -1.74756459463318 + + Another example of a lattice sum in 2D:: + + >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf], + ... [-inf,inf], ignore=True) + -2.1775860903036 + >>> -pi*ln2 + -2.1775860903036 + + An example of an Eisenstein series:: + + >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf], + ... ignore=True) + (3.1512120021539 + 0.0j) + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html, + 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html + + """ + infinite, g = standardize(ctx, f, intervals, options) + if not infinite: + return +g() + + def update(partial_sums, indices): + if partial_sums: + psum = partial_sums[-1] + else: + psum = ctx.zero + for k in indices: + psum = psum + g(ctx.mpf(k)) + partial_sums.append(psum) + + prec = ctx.prec + + def emfun(point, tol): + workprec = ctx.prec + ctx.prec = prec + 10 + v = ctx.sumem(g, [point, ctx.inf], tol, error=1) + ctx.prec = workprec + return v + + return +ctx.adaptive_extrapolation(update, emfun, options) + + +def wrapsafe(f): + def g(*args): + try: + return f(*args) + except (ArithmeticError, ValueError): + return 0 + return g + +def standardize(ctx, f, intervals, options): + if options.get("ignore"): + f = wrapsafe(f) + finite = [] + infinite = [] + for k, points in enumerate(intervals): + a, b = ctx._as_points(points) + if b < a: + return False, (lambda: ctx.zero) + if a == ctx.ninf or b == ctx.inf: + infinite.append((k, (a,b))) + else: + finite.append((k, (int(a), int(b)))) + if finite: + f = fold_finite(ctx, f, finite) + if not infinite: + return False, lambda: f(*([0]*len(intervals))) + if infinite: + f = standardize_infinite(ctx, f, infinite) + f = fold_infinite(ctx, f, infinite) + args = [0] * len(intervals) + d = infinite[0][0] + def g(k): + args[d] = k + return f(*args) + return True, g + +# backwards compatible itertools.product +def cartesian_product(args): + pools = map(tuple, args) + result = [[]] + for pool in pools: + result = [x+[y] for x in result for y in pool] + for prod in result: + yield tuple(prod) + +def fold_finite(ctx, f, intervals): + if not intervals: + return f + indices = [v[0] for v in intervals] + points = [v[1] for v in intervals] + ranges = [xrange(a, b+1) for (a,b) in points] + def g(*args): + args = list(args) + s = ctx.zero + for xs in cartesian_product(ranges): + for dim, x in zip(indices, xs): + args[dim] = ctx.mpf(x) + s += f(*args) + return s + #print "Folded finite", indices + return g + +# Standardize each interval to [0,inf] +def standardize_infinite(ctx, f, intervals): + if not intervals: + return f + dim, [a,b] = intervals[-1] + if a == ctx.ninf: + if b == ctx.inf: + def g(*args): + args = list(args) + k = args[dim] + if k: + s = f(*args) + args[dim] = -k + s += f(*args) + return s + else: + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] = b - args[dim] + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] += a + return f(*args) + #print "Standardized infinity along dimension", dim, a, b + return standardize_infinite(ctx, g, intervals[:-1]) + +def fold_infinite(ctx, f, intervals): + if len(intervals) < 2: + return f + dim1 = intervals[-2][0] + dim2 = intervals[-1][0] + # Assume intervals are [0,inf] x [0,inf] x ... + def g(*args): + args = list(args) + #args.insert(dim2, None) + n = int(args[dim1]) + s = ctx.zero + #y = ctx.mpf(n) + args[dim2] = ctx.mpf(n) #y + for x in xrange(n+1): + args[dim1] = ctx.mpf(x) + s += f(*args) + args[dim1] = ctx.mpf(n) #ctx.mpf(n) + for y in xrange(n): + args[dim2] = ctx.mpf(y) + s += f(*args) + return s + #print "Folded infinite from", len(intervals), "to", (len(intervals)-1) + return fold_infinite(ctx, g, intervals[:-1]) + +@defun +def nprod(ctx, f, interval, nsum=False, **kwargs): + r""" + Computes the product + + .. math :: + + P = \prod_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed. + + By default, :func:`~mpmath.nprod` uses the same extrapolation methods as + :func:`~mpmath.nsum`, except applied to the partial products rather than + partial sums, and the same keyword options as for :func:`~mpmath.nsum` are + supported. If ``nsum=True``, the product is instead computed via + :func:`~mpmath.nsum` as + + .. math :: + + P = \exp\left( \sum_{k=a}^b \log(f(k)) \right). + + This is slower, but can sometimes yield better results. It is + also required (and used automatically) when Euler-Maclaurin + summation is requested. + + **Examples** + + A simple finite product:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> nprod(lambda k: k, [1, 4]) + 24.0 + + A large number of infinite products have known exact values, + and can therefore be used as a reference. Most of the following + examples are taken from MathWorld [1]. + + A few infinite products with simple values are:: + + >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf]) + 3.141592653589793238462643 + >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf]) + 2.0 + >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf]) + 0.6666666666666666666666667 + >>> nprod(lambda k: (1-1/k**2), [2, inf]) + 0.5 + + Next, several more infinite products with more complicated + values:: + + >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6) + 5.180668317897115748416626 + 5.180668317897115748416626 + + >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi) + 0.2720290549821331629502366 + 0.2720290549821331629502366 + + >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf]) + 0.8480540493529003921296502 + >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi)) + 0.8480540493529003921296502 + + >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf]) + 1.848936182858244485224927 + >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi + 1.848936182858244485224927 + + >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi) + 0.9190194775937444301739244 + 0.9190194775937444301739244 + + >>> nprod(lambda k: (1-1/k**6), [2, inf]) + 0.9826842777421925183244759 + >>> (1+cosh(pi*sqrt(3)))/(12*pi**2) + 0.9826842777421925183244759 + + >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi) + 1.838038955187488860347849 + 1.838038955187488860347849 + + >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf]) + 1.447255926890365298959138 + >>> exp(1+euler/2)/sqrt(2*pi) + 1.447255926890365298959138 + + The following two products are equivalent and can be evaluated in + terms of a Jacobi theta function. Pi can be replaced by any value + (as long as convergence is preserved):: + + >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf]) + 0.3838451207481672404778686 + >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf]) + 0.3838451207481672404778686 + >>> jtheta(4,0,1/pi) + 0.3838451207481672404778686 + + This product does not have a known closed form value:: + + >>> nprod(lambda k: (1-1/2**k), [1, inf]) + 0.2887880950866024212788997 + + A product taken from `-\infty`:: + + >>> nprod(lambda k: 1-k**(-3), [-inf,-2]) + 0.8093965973662901095786805 + >>> cosh(pi*sqrt(3)/2)/(3*pi) + 0.8093965973662901095786805 + + A doubly infinite product:: + + >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf]) + 23.41432688231864337420035 + >>> exp(pi/tanh(pi)) + 23.41432688231864337420035 + + A product requiring the use of Euler-Maclaurin summation to compute + an accurate value:: + + >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e') + 0.696155111336231052898125 + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html + + """ + if nsum or ('e' in kwargs.get('method', '')): + orig = ctx.prec + try: + # TODO: we are evaluating log(1+eps) -> eps, which is + # inaccurate. This currently works because nsum greatly + # increases the working precision. But we should be + # more intelligent and handle the precision here. + ctx.prec += 10 + v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs) + finally: + ctx.prec = orig + return +ctx.exp(v) + + a, b = ctx._as_points(interval) + if a == ctx.ninf: + if b == ctx.inf: + return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs) + return ctx.nprod(f, [-b, ctx.inf], **kwargs) + elif b != ctx.inf: + return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1)) + + a = int(a) + + def update(partial_products, indices): + if partial_products: + pprod = partial_products[-1] + else: + pprod = ctx.one + for k in indices: + pprod = pprod * f(a + ctx.mpf(k)) + partial_products.append(pprod) + + return +ctx.adaptive_extrapolation(update, None, kwargs) + + +@defun +def limit(ctx, f, x, direction=1, exp=False, **kwargs): + r""" + Computes an estimate of the limit + + .. math :: + + \lim_{t \to x} f(t) + + where `x` may be finite or infinite. + + For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for + consecutive integer values of `n`, where the approach direction + `d` may be specified using the *direction* keyword argument. + For infinite `x`, :func:`~mpmath.limit` evaluates values of + `f(\mathrm{sign}(x) \cdot n)`. + + If the approach to the limit is not sufficiently fast to give + an accurate estimate directly, :func:`~mpmath.limit` attempts to find + the limit using Richardson extrapolation or the Shanks + transformation. You can select between these methods using + the *method* keyword (see documentation of :func:`~mpmath.nsum` for + more information). + + **Options** + + The following options are available with essentially the + same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*, + *steps*, *verbose*. + + If the option *exp=True* is set, `f` will be + sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots` + instead of the linearly spaced points `n = 1, 2, 3, \ldots`. + This can sometimes improve the rate of convergence so that + :func:`~mpmath.limit` may return a more accurate answer (and faster). + However, do note that this can only be used if `f` + supports fast and accurate evaluation for arguments that + are extremely close to the limit point (or if infinite, + very large arguments). + + **Examples** + + A basic evaluation of a removable singularity:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> limit(lambda x: (x-sin(x))/x**3, 0) + 0.166666666666666666666666666667 + + Computing the exponential function using its limit definition:: + + >>> limit(lambda n: (1+3/n)**n, inf) + 20.0855369231876677409285296546 + >>> exp(3) + 20.0855369231876677409285296546 + + A limit for `\pi`:: + + >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2 + >>> limit(f, inf) + 3.14159265358979323846264338328 + + Calculating the coefficient in Stirling's formula:: + + >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf) + 2.50662827463100050241576528481 + >>> sqrt(2*pi) + 2.50662827463100050241576528481 + + Evaluating Euler's constant `\gamma` using the limit representation + + .. math :: + + \gamma = \lim_{n \rightarrow \infty } \left[ \left( + \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right] + + (which converges notoriously slowly):: + + >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n) + >>> limit(f, inf) + 0.577215664901532860606512090082 + >>> +euler + 0.577215664901532860606512090082 + + With default settings, the following limit converges too slowly + to be evaluated accurately. Changing to exponential sampling + however gives a perfect result:: + + >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x) + >>> limit(f, inf) + 0.992831158558330281129249686491 + >>> limit(f, inf, exp=True) + 1.0 + + """ + + if ctx.isinf(x): + direction = ctx.sign(x) + g = lambda k: f(ctx.mpf(k+1)*direction) + else: + direction *= ctx.one + g = lambda k: f(x + direction/(k+1)) + if exp: + h = g + g = lambda k: h(2**k) + + def update(values, indices): + for k in indices: + values.append(g(k+1)) + + # XXX: steps used by nsum don't work well + if not 'steps' in kwargs: + kwargs['steps'] = [10] + + return +ctx.adaptive_extrapolation(update, None, kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py new file mode 100644 index 0000000000000000000000000000000000000000..d2206b05c1601ee781b09dcbedf3c0fcd89cfa59 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py @@ -0,0 +1,973 @@ +# contributed to mpmath by Kristopher L. Kuhlman, February 2017 +# contributed to mpmath by Guillermo Navas-Palencia, February 2022 + +class InverseLaplaceTransform(object): + r""" + Inverse Laplace transform methods are implemented using this + class, in order to simplify the code and provide a common + infrastructure. + + Implement a custom inverse Laplace transform algorithm by + subclassing :class:`InverseLaplaceTransform` and implementing the + appropriate methods. The subclass can then be used by + :func:`~mpmath.invertlaplace` by passing it as the *method* + argument. + """ + + def __init__(self, ctx): + self.ctx = ctx + + def calc_laplace_parameter(self, t, **kwargs): + r""" + Determine the vector of Laplace parameter values needed for an + algorithm, this will depend on the choice of algorithm (de + Hoog is default), the algorithm-specific parameters passed (or + default ones), and desired time. + """ + raise NotImplementedError + + def calc_time_domain_solution(self, fp): + r""" + Compute the time domain solution, after computing the + Laplace-space function evaluations at the abscissa required + for the algorithm. Abscissa computed for one algorithm are + typically not useful for another algorithm. + """ + raise NotImplementedError + + +class FixedTalbot(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The "fixed" Talbot method deforms the Bromwich contour towards + `-\infty` in the shape of a parabola. Traditionally the Talbot + algorithm has adjustable parameters, but the "fixed" version + does not. The `r` parameter could be passed in as a parameter, + if you want to override the default given by (Abate & Valko, + 2004). + + The Laplace parameter is sampled along a parabola opening + along the negative imaginary axis, with the base of the + parabola along the real axis at + `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in + the approximation (degree) grows, the abscissa required for + function evaluation tend towards `-\infty`, requiring high + precision to prevent overflow. If any poles, branch cuts or + other singularities exist such that the deformed Bromwich + contour lies to the left of the singularity, the method will + fail. + + **Optional arguments** + + :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter` + recognizes the following keywords + + *tmax* + maximum time associated with vector of times + (typically just the time requested) + *degree* + integer order of approximation (M = number of terms) + *r* + abscissa for `p_0` (otherwise computed using rule + of thumb `2M/5`) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + .. math :: + + p_0=\frac{r}{t} + + .. math :: + + p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left( + \frac{i\pi}{M}\right) + j \right] \qquad 1\le i 0: + self.degree += 1 + + M = self.degree + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + self.V = self._coeff() + self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t + + # NB: p is real (mpf) + + def _coeff(self): + r"""Salzer summation weights (aka, "Stehfest coefficients") + only depend on the approximation order (M) and the precision""" + + M = self.degree + M2 = int(M/2) # checked earlier that M is even + + V = self.ctx.matrix(M, 1) + + # Salzer summation weights + # get very large in magnitude and oscillate in sign, + # if the precision is not high enough, there will be + # catastrophic cancellation + for k in range(1, M+1): + z = self.ctx.matrix(min(k, M2)+1, 1) + for j in range(int((k+1)/2), min(k, M2)+1): + z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/ + (self.ctx.fac(M2-j)*self.ctx.fac(j)* + self.ctx.fac(j-1)*self.ctx.fac(k-j)* + self.ctx.fac(2*j-k))) + V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z) + + return V + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Compute time-domain Stehfest algorithm solution. + + .. math :: + + f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left( + p_k \right) + + where + + .. math :: + + V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor} + \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \, + \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!} + + As the degree increases, the abscissa (`p_k`) only increase + linearly towards `\infty`, but the Stehfest coefficients + (`V_k`) alternate in sign and increase rapidly in sign, + requiring high precision to prevent overflow or loss of + significance when evaluating the sum. + + **References** + + 1. Widder, D. (1941). *The Laplace Transform*. Princeton. + 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of + Laplace transforms. *Communications of the ACM* 13(1):47-49, + http://dx.doi.org/10.1145/361953.361969 + + """ + + # required + self.t = self.ctx.convert(t) + + # assume fp was computed from p matrix returned from + # calc_laplace_parameter(), so is already + # a list or matrix of mpmath 'mpf' types + + result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + # ignore any small imaginary part + return result.real + + +# **************************************** + +class deHoog(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""the de Hoog, Knight & Stokes algorithm is an + accelerated form of the Fourier series numerical + inverse Laplace transform algorithms. + + .. math :: + + p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1 + + where + + .. math :: + + \gamma = \alpha - \frac{\log \mathrm{tol}}{2T}, + + `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time, + `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the + rightmost pole or singularity, which is chosen based on the + desired accuracy (assuming the rightmost singularity is 0), + and `\mathrm{tol}=10\alpha` is the desired tolerance, which is + chosen in relation to `\alpha`.` + + When increasing the degree, the abscissa increase towards + `j\infty`, but more slowly than the fixed Talbot + algorithm. The de Hoog et al. algorithm typically does better + with oscillatory functions of time, and less well-behaved + functions. The method tends to be slower than the Talbot and + Stehfest algorithsm, especially so at very high precision + (e.g., `>500` digits precision). + + """ + + # required + # ------------------------------ + self.t = self.ctx.convert(t) + + # optional + # ------------------------------ + self.tmax = kwargs.get('tmax', self.t) + + # empirical relationships used here based on a linear fit of + # requested and delivered dps for exponentially decaying time + # functions for requested dps up to 512. + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.38*self.degree) + else: + self.dps_goal = int(self.ctx.dps*1.36) + self.degree = max(10, self.dps_goal) + + # 2*M+1 terms in approximation + M = self.degree + + # adjust alpha component of abscissa of convergence for higher + # precision + tmp = self.ctx.power(10.0, -self.dps_goal) + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # desired tolerance (here simply related to alpha) + self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0)) + self.np = 2*self.degree+1 # number of terms in approximation + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + # scaling factor (likely tun-able, but 2 is typical) + self.scale = kwargs.get('scale', 2) + self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax)) + + self.p = self.ctx.matrix(2*M+1, 1) + self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T) + self.p = (self.gamma + self.ctx.pi* + self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j) + + # NB: p is complex (mpc) + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for + de Hoog, Knight & Stokes algorithm. + + The un-accelerated Fourier series approach is: + + .. math :: + + f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'} + \Re\left[\bar{f}\left( p_k \right) + e^{i\pi t/T} \right], + + where the prime on the summation indicates the first term is halved. + + This simplistic approach requires so many function evaluations + that it is not practical. Non-linear acceleration is + accomplished via Pade-approximation and an analytic expression + for the remainder of the continued fraction. See the original + paper (reference 2 below) a detailed description of the + numerical approach. + + **References** + + 1. Davies, B. (2005). *Integral Transforms and their + Applications*, Third Edition. Springer. + 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved + method for numerical inversion of Laplace transforms. *SIAM + Journal of Scientific and Statistical Computing* 3:357-366, + http://dx.doi.org/10.1137/0903022 + + """ + + M = self.degree + np = self.np + T = self.T + + self.t = self.ctx.convert(t) + + # would it be useful to try re-using + # space between e&q and A&B? + e = self.ctx.zeros(np, M+1) + q = self.ctx.matrix(2*M, M) + d = self.ctx.matrix(np, 1) + A = self.ctx.zeros(np+1, 1) + B = self.ctx.ones(np+1, 1) + + # initialize Q-D table + e[:, 0] = 0.0 + 0j + q[0, 0] = fp[1]/(fp[0]/2) + for i in range(1, 2*M): + q[i, 0] = fp[i+1]/fp[i] + + # rhombus rule for filling triangular Q-D table (e & q) + for r in range(1, M+1): + # start with e, column 1, 0:2*M-2 + mr = 2*(M-r) + 1 + e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1] + if not r == M: + rq = r+1 + mr = 2*(M-rq)+1 + 2 + for i in range(mr): + q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1] + + # build up continued fraction coefficients (d) + d[0] = fp[0]/2 + for r in range(1, M+1): + d[2*r-1] = -q[0, r-1] # even terms + d[2*r] = -e[0, r] # odd terms + + # seed A and B for recurrence + A[0] = 0.0 + 0.0j + A[1] = d[0] + B[0:2] = 1.0 + 0.0j + + # base of the power series + z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn + + # coefficients of Pade approximation (A & B) + # using recurrence for all but last term + for i in range(1, 2*M): + A[i+1] = A[i] + d[i]*A[i-1]*z + B[i+1] = B[i] + d[i]*B[i-1]*z + + # "improved remainder" to continued fraction + brem = (1 + (d[2*M-1] - d[2*M])*z)/2 + # powm1(x,y) computes x^y - 1 more accurately near zero + rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem, + self.ctx.fraction(1, 2)) + + # last term of recurrence using new remainder + A[np] = A[2*M] + rem*A[2*M-1] + B[np] = B[2*M] + rem*B[2*M-1] + + # diagonal Pade approximation + # F=A/B represents accelerated trapezoid rule + result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class Cohen(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The Cohen algorithm accelerates the convergence of the nearly + alternating series resulting from the application of the trapezoidal + rule to the Bromwich contour inversion integral. + + .. math :: + + p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M + + where + + .. math :: + + \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)), + + `d = \mathrm{dps\_goal}`, which is chosen based on the desired + accuracy using the method developed in [1] to improve numerical + stability. The Cohen algorithm shows robustness similar to the de Hoog + et al. algorithm, but it is faster than the fixed Talbot algorithm. + + **Optional arguments** + + *degree* + integer order of the approximation (M = number of terms) + *alpha* + abscissa for `p_0` (controls the discretization error) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + **References** + + 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss + distribution in the Gaussian copula model: a comparison of methods. + *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057 + + """ + self.t = self.ctx.convert(t) + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.5 * self.degree) + else: + self.dps_goal = int(self.ctx.dps * 1.74) + self.degree = max(22, int(1.31 * self.dps_goal)) + + M = self.degree + 1 + + # this is adjusting the dps of the calling context hopefully + # the caller doesn't monkey around with it between calling + # this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + ttwo = 2 * self.t + tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo) + tmp = self.ctx.fraction(2, 3) * tmp + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # all but time-dependent part of p + a_t = self.alpha / ttwo + p_t = self.ctx.pi * 1j / self.t + + self.p = self.ctx.matrix(M, 1) + self.p[0] = a_t + + for i in range(1, M): + self.p[i] = a_t + i * p_t + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for Cohen algorithm. + + The accelerated nearly alternating series is: + + .. math :: + + f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2} + \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) - + \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f} + \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right], + + where coefficients `\frac{c_{M, k}}{d_M}` are described in [1]. + + 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence + acceleration of alternating series. *Experiment. Math* 9(1):3-12 + + """ + self.t = self.ctx.convert(t) + + n = self.degree + M = n + 1 + + A = self.ctx.matrix(M, 1) + for i in range(M): + A[i] = fp[i].real + + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in range(n): + c = b - c + s = s + c * A[k + 1] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d) + + # setting dps back to value when calc_laplace_parameter was + # called, unless flag is set. + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class LaplaceTransformInversionMethods(object): + def __init__(ctx, *args, **kwargs): + ctx._fixed_talbot = FixedTalbot(ctx) + ctx._stehfest = Stehfest(ctx) + ctx._de_hoog = deHoog(ctx) + ctx._cohen = Cohen(ctx) + + def invertlaplace(ctx, f, t, **kwargs): + r"""Computes the numerical inverse Laplace transform for a + Laplace-space function at a given time. The function being + evaluated is assumed to be a real-valued function of time. + + The user must supply a Laplace-space function `\bar{f}(p)`, + and a desired time at which to estimate the time-domain + solution `f(t)`. + + A few basic examples of Laplace-space functions with known + inverses (see references [1,2]) : + + .. math :: + + \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p) + + .. math :: + + \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t) + + .. math :: + + \bar{f}(p) = \frac{1}{(p+1)^2} + + .. math :: + + f(t) = t e^{-t} + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> tt = [0.001, 0.01, 0.1, 1, 10] + >>> fp = lambda p: 1/(p+1)**2 + >>> ft = lambda t: t*exp(-t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot') + (0.000999000499833375, 8.57923043561212e-20) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot') + (0.00990049833749168, 3.27007646698047e-19) + >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot') + (0.090483741803596, -1.75215800052168e-18) + >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot') + (0.367879441171442, 1.2428864009344e-17) + >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot') + (0.000453999297624849, 4.04513489306658e-20) + + The methods also work for higher precision: + + >>> mp.dps = 100; mp.pretty = True + >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15) + ('0.000999000499833375', '-4.96868310693356e-105') + >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15) + ('0.00990049833749168', '1.23032291513122e-104') + + .. math :: + + \bar{f}(p) = \frac{1}{p^2+1} + + .. math :: + + f(t) = \mathrm{J}_0(t) + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/sqrt(p*p + 1) + >>> ft = lambda t: besselj(0,t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog') + (0.999999750000016, -6.09717765032273e-18) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog') + (0.99997500015625, -5.61756281076169e-17) + + .. math :: + + \bar{f}(p) = \frac{\log p}{p} + + .. math :: + + f(t) = -\gamma -\log t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: log(p)/p + >>> ft = lambda t: -euler-log(t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest') + (6.3305396140806, -1.92126634837863e-16) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest') + (4.02795452108656, -4.81486093200704e-16) + + **Options** + + :func:`~mpmath.invertlaplace` recognizes the following optional + keywords valid for all methods: + + *method* + Chooses numerical inverse Laplace transform algorithm + (described below). + *degree* + Number of terms used in the approximation + + **Algorithms** + + Mpmath implements four numerical inverse Laplace transform + algorithms, attributed to: Talbot, Stehfest, and de Hoog, + Knight and Stokes. These can be selected by using + *method='talbot'*, *method='stehfest'*, *method='dehoog'* or + *method='cohen'* or by passing the classes *method=FixedTalbot*, + *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions + :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`, + :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen` + are also available as shortcuts. + + All four algorithms implement a heuristic balance between the + requested precision and the precision used internally for the + calculations. This has been tuned for a typical exponentially + decaying function and precision up to few hundred decimal + digits. + + The Laplace transform converts the variable time (i.e., along + a line) into a parameter given by the right half of the + complex `p`-plane. Singularities, poles, and branch cuts in + the complex `p`-plane contain all the information regarding + the time behavior of the corresponding function. Any numerical + method must therefore sample `p`-plane "close enough" to the + singularities to accurately characterize them, while not + getting too close to have catastrophic cancellation, overflow, + or underflow issues. Most significantly, if one or more of the + singularities in the `p`-plane is not on the left side of the + Bromwich contour, its effects will be left out of the computed + solution, and the answer will be completely wrong. + + *Talbot* + + The fixed Talbot method is high accuracy and fast, but the + method can catastrophically fail for certain classes of time-domain + behavior, including a Heaviside step function for positive + time (e.g., `H(t-2)`), or some oscillatory behaviors. The + Talbot method usually has adjustable parameters, but the + "fixed" variety implemented here does not. This method + deforms the Bromwich integral contour in the shape of a + parabola towards `-\infty`, which leads to problems + when the solution has a decaying exponential in it (e.g., a + Heaviside step function is equivalent to multiplying by a + decaying exponential in Laplace space). + + *Stehfest* + + The Stehfest algorithm only uses abscissa along the real axis + of the complex `p`-plane to estimate the time-domain + function. Oscillatory time-domain functions have poles away + from the real axis, so this method does not work well with + oscillatory functions, especially high-frequency ones. This + method also depends on summation of terms in a series that + grows very large, and will have catastrophic cancellation + during summation if the working precision is too low. + + *de Hoog et al.* + + The de Hoog, Knight, and Stokes method is essentially a + Fourier-series quadrature-type approximation to the Bromwich + contour integral, with non-linear series acceleration and an + analytical expression for the remainder term. This method is + typically one of the most robust. This method also involves the + greatest amount of overhead, so it is typically the slowest of the + four methods at high precision. + + *Cohen* + + The Cohen method is a trapezoidal rule approximation to the Bromwich + contour integral, with linear acceleration for alternating + series. This method is as robust as the de Hoog et al method and the + fastest of the four methods at high precision, and is therefore the + default method. + + **Singularities** + + All numerical inverse Laplace transform methods have problems + at large time when the Laplace-space function has poles, + singularities, or branch cuts to the right of the origin in + the complex plane. For simple poles in `\bar{f}(p)` at the + `p`-plane origin, the time function is constant in time (e.g., + `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at + `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a + decreasing function of time (e.g., `\mathcal{L}\left\lbrace + e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and + a pole to the right of the origin leads to an increasing + function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4} + \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When + singularities occur off the real `p` axis, the time-domain + function is oscillatory. For example `\mathcal{L}\left\lbrace + \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut + starting at `p=j=\sqrt{-1}` and is a decaying oscillatory + function, This range of behaviors is illustrated in Duffy [3] + Figure 4.10.4, p. 228. + + In general as `p \rightarrow \infty` `t \rightarrow 0` and + vice-versa. All numerical inverse Laplace transform methods + require their abscissa to shift closer to the origin for + larger times. If the abscissa shift left of the rightmost + singularity in the Laplace domain, the answer will be + completely wrong (the effect of singularities to the right of + the Bromwich contour are not included in the results). + + For example, the following exponentially growing function has + a pole at `p=3`: + + .. math :: + + \bar{f}(p)=\frac{1}{p^2-9} + + .. math :: + + f(t)=\frac{1}{3}\sinh 3t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/(p*p-9) + >>> ft = lambda t: sinh(3*t)/3 + >>> tt = [0.01,0.1,1.0,10.0] + >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot') + (0.0100015000675014, 0.0100015000675014) + >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot') + (0.101506764482381, 0.101506764482381) + >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot') + (3.33929164246997, 3.33929164246997) + >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot') + (1781079096920.74, -1.61331069624091e-14) + + **References** + + 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4) + 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform + Inversion, Springer. + 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press. + + **Numerical Inverse Laplace Transform Reviews** + + 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical + inversion of the Laplace transform: Applications to Biology, + Economics, Engineering, and Physics*. Elsevier. + 2. Davies, B., B. Martin (1979). Numerical inversion of the + Laplace transform: a survey and comparison of methods. *Journal + of Computational Physics* 33:1-32, + http://dx.doi.org/10.1016/0021-9991(79)90025-1 + 3. Duffy, D.G. (1993). On the numerical inversion of Laplace + transforms: Comparison of three new methods on characteristic + problems from applications. *ACM Transactions on Mathematical + Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788 + 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform + Algorithms for Laplace-Space Numerical Approaches, *Numerical + Algorithms*, 63(2):339-355. + http://dx.doi.org/10.1007/s11075-012-9625-3 + + """ + + rule = kwargs.get('method', 'cohen') + if type(rule) is str: + lrule = rule.lower() + if lrule == 'talbot': + rule = ctx._fixed_talbot + elif lrule == 'stehfest': + rule = ctx._stehfest + elif lrule == 'dehoog': + rule = ctx._de_hoog + elif rule == 'cohen': + rule = ctx._cohen + else: + raise ValueError("unknown invlap algorithm: %s" % rule) + else: + rule = rule(ctx) + + # determine the vector of Laplace-space parameter + # needed for the requested method and desired time + rule.calc_laplace_parameter(t, **kwargs) + + # compute the Laplace-space function evalutations + # at the required abscissa. + fp = [f(p) for p in rule.p] + + # compute the time-domain solution from the + # Laplace-space function evaluations + return rule.calc_time_domain_solution(fp, t) + + # shortcuts for the above function for specific methods + def invlaptalbot(ctx, *args, **kwargs): + kwargs['method'] = 'talbot' + return ctx.invertlaplace(*args, **kwargs) + + def invlapstehfest(ctx, *args, **kwargs): + kwargs['method'] = 'stehfest' + return ctx.invertlaplace(*args, **kwargs) + + def invlapdehoog(ctx, *args, **kwargs): + kwargs['method'] = 'dehoog' + return ctx.invertlaplace(*args, **kwargs) + + def invlapcohen(ctx, *args, **kwargs): + kwargs['method'] = 'cohen' + return ctx.invertlaplace(*args, **kwargs) + + +# **************************************** + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/odes.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/odes.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6bd3cb056c8841e6cf77ba4d08a0b1abb2f2c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/odes.py @@ -0,0 +1,288 @@ +from bisect import bisect +from ..libmp.backend import xrange + +class ODEMethods(object): + pass + +def ode_taylor(ctx, derivs, x0, y0, tol_prec, n): + h = tol = ctx.ldexp(1, -tol_prec) + dim = len(y0) + xs = [x0] + ys = [y0] + x = x0 + y = y0 + orig = ctx.prec + try: + ctx.prec = orig*(1+n) + # Use n steps with Euler's method to get + # evaluation points for derivatives + for i in range(n): + fxy = derivs(x, y) + y = [y[i]+h*fxy[i] for i in xrange(len(y))] + x += h + xs.append(x) + ys.append(y) + # Compute derivatives + ser = [[] for d in range(dim)] + for j in range(n+1): + s = [0]*dim + b = (-1) ** (j & 1) + k = 1 + for i in range(j+1): + for d in range(dim): + s[d] += b * ys[i][d] + b = (b * (j-k+1)) // (-k) + k += 1 + scale = h**(-j) / ctx.fac(j) + for d in range(dim): + s[d] = s[d] * scale + ser[d].append(s[d]) + finally: + ctx.prec = orig + # Estimate radius for which we can get full accuracy. + # XXX: do this right for zeros + radius = ctx.one + for ts in ser: + if ts[-1]: + radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n)) + radius /= 2 # XXX + return ser, x0+radius + +def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False): + r""" + Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]` + that is a numerical solution of the `n+1`-dimensional first-order + ordinary differential equation (ODE) system + + .. math :: + + y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + \vdots + + y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + The derivatives are specified by the vector-valued function + *F* that evaluates + `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`. + The initial point `x_0` is specified by the scalar argument *x0*, + and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is + specified by the vector argument *y0*. + + For convenience, if the system is one-dimensional, you may optionally + provide just a scalar value for *y0*. In this case, *F* should accept + a scalar *y* argument and return a scalar. The solution function + *y* will return scalar values instead of length-1 vectors. + + Evaluation of the solution function `y(x)` is permitted + for any `x \ge x_0`. + + A high-order ODE can be solved by transforming it into first-order + vector form. This transformation is described in standard texts + on ODEs. Examples will also be given below. + + **Options, speed and accuracy** + + By default, :func:`~mpmath.odefun` uses a high-order Taylor series + method. For reasonably well-behaved problems, the solution will + be fully accurate to within the working precision. Note that + *F* must be possible to evaluate to very high precision + for the generation of Taylor series to work. + + To get a faster but less accurate solution, you can set a large + value for *tol* (which defaults roughly to *eps*). If you just + want to plot the solution or perform a basic simulation, + *tol = 0.01* is likely sufficient. + + The *degree* argument controls the degree of the solver (with + *method='taylor'*, this is the degree of the Taylor series + expansion). A higher degree means that a longer step can be taken + before a new local solution must be generated from *F*, + meaning that fewer steps are required to get from `x_0` to a given + `x_1`. On the other hand, a higher degree also means that each + local solution becomes more expensive (i.e., more evaluations of + *F* are required per step, and at higher precision). + + The optimal setting therefore involves a tradeoff. Generally, + decreasing the *degree* for Taylor series is likely to give faster + solution at low precision, while increasing is likely to be better + at higher precision. + + The function + object returned by :func:`~mpmath.odefun` caches the solutions at all step + points and uses polynomial interpolation between step points. + Therefore, once `y(x_1)` has been evaluated for some `x_1`, + `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`. + and continuing the evaluation up to `x_2 > x_1` is also fast. + + **Examples of first-order ODEs** + + We will solve the standard test problem `y'(x) = y(x), y(0) = 1` + which has explicit solution `y(x) = \exp(x)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = odefun(lambda x, y: y, 0, 1) + >>> for x in [0, 1, 2.5]: + ... print((f(x), exp(x))) + ... + (1.0, 1.0) + (2.71828182845905, 2.71828182845905) + (12.1824939607035, 12.1824939607035) + + The solution with high precision:: + + >>> mp.dps = 50 + >>> f = odefun(lambda x, y: y, 0, 1) + >>> f(1) + 2.7182818284590452353602874713526624977572470937 + >>> exp(1) + 2.7182818284590452353602874713526624977572470937 + + Using the more general vectorized form, the test problem + can be input as (note that *f* returns a 1-element vector):: + + >>> mp.dps = 15 + >>> f = odefun(lambda x, y: [y[0]], 0, [1]) + >>> f(1) + [2.71828182845905] + + :func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally + impossible (and at best difficult) to solve analytically. As + an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))` + for `y(0) = \pi/2`. An exact solution happens to be known + for this problem, and is given by + `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`:: + + >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2) + >>> for x in [2, 5, 10]: + ... print((f(x), 2*atan(exp(mpf(x)**2/2)))) + ... + (2.87255666284091, 2.87255666284091) + (3.14158520028345, 3.14158520028345) + (3.14159265358979, 3.14159265358979) + + If `F` is independent of `y`, an ODE can be solved using direct + integration. We can therefore obtain a reference solution with + :func:`~mpmath.quad`:: + + >>> f = lambda x: (1+x**2)/(1+x**3) + >>> g = odefun(lambda x, y: f(x), pi, 0) + >>> g(2*pi) + 0.72128263801696 + >>> quad(f, [pi, 2*pi]) + 0.72128263801696 + + **Examples of second-order ODEs** + + We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`. + To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'` + whereby the original equation can be written as `y_1' + y_0' = 0`. Put + together, we get the first-order, two-dimensional vector ODE + + .. math :: + + \begin{cases} + y_0' = y_1 \\ + y_1' = -y_0 + \end{cases} + + To get a well-defined IVP, we need two initial values. With + `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of + course be solved by `y(x) = y_0(x) = \cos(x)` and + `-y'(x) = y_1(x) = \sin(x)`. We check this:: + + >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0]) + >>> for x in [0, 1, 2.5, 10]: + ... nprint(f(x), 15) + ... nprint([cos(x), sin(x)], 15) + ... print("---") + ... + [1.0, 0.0] + [1.0, 0.0] + --- + [0.54030230586814, 0.841470984807897] + [0.54030230586814, 0.841470984807897] + --- + [-0.801143615546934, 0.598472144103957] + [-0.801143615546934, 0.598472144103957] + --- + [-0.839071529076452, -0.54402111088937] + [-0.839071529076452, -0.54402111088937] + --- + + Note that we get both the sine and the cosine solutions + simultaneously. + + **TODO** + + * Better automatic choice of degree and step size + * Make determination of Taylor series convergence radius + more robust + * Allow solution for `x < x_0` + * Allow solution for complex `x` + * Test for difficult (ill-conditioned) problems + * Implement Runge-Kutta and other algorithms + + """ + if tol: + tol_prec = int(-ctx.log(tol, 2))+10 + else: + tol_prec = ctx.prec+10 + degree = degree or (3 + int(3*ctx.dps/2.)) + workprec = ctx.prec + 40 + try: + len(y0) + return_vector = True + except TypeError: + F_ = F + F = lambda x, y: [F_(x, y[0])] + y0 = [y0] + return_vector = False + ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree) + series_boundaries = [x0, xb] + series_data = [(ser, x0, xb)] + # We will be working with vectors of Taylor series + def mpolyval(ser, a): + return [ctx.polyval(s[::-1], a) for s in ser] + # Find nearest expansion point; compute if necessary + def get_series(x): + if x < x0: + raise ValueError + n = bisect(series_boundaries, x) + if n < len(series_boundaries): + return series_data[n-1] + while 1: + ser, xa, xb = series_data[-1] + if verbose: + print("Computing Taylor series for [%f, %f]" % (xa, xb)) + y = mpolyval(ser, xb-xa) + xa = xb + ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree) + series_boundaries.append(xb) + series_data.append((ser, xa, xb)) + if x <= xb: + return series_data[-1] + # Evaluation function + def interpolant(x): + x = ctx.convert(x) + orig = ctx.prec + try: + ctx.prec = workprec + ser, xa, xb = get_series(x) + y = mpolyval(ser, x-xa) + finally: + ctx.prec = orig + if return_vector: + return [+yk for yk in y] + else: + return +y[0] + return interpolant + +ODEMethods.odefun = odefun + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/optimization.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..69724f78336ee24856366db48917a47108b1b416 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/optimization.py @@ -0,0 +1,1102 @@ +from __future__ import print_function + +from copy import copy + +from ..libmp.backend import xrange + +class OptimizationMethods(object): + def __init__(ctx): + pass + +############## +# 1D-SOLVERS # +############## + +class Newton: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0 close to the root. + + Pro: + + * converges fast + * sometimes more robust than secant with bad second starting point + + Contra: + + * converges slowly for multiple roots + * needs first derivative + * 2 function evaluations per iteration + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + else: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + + def __iter__(self): + f = self.f + df = self.df + x0 = self.x0 + while True: + x1 = x0 - f(x0) / df(x0) + error = abs(x1 - x0) + x0 = x1 + yield (x1, error) + +class Secant: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0 and x1 close to the root. + x1 defaults to x0 + 0.25. + + Pro: + + * converges fast + + Contra: + + * converges slowly for multiple roots + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + self.x1 = self.x0 + 0.25 + elif len(x0) == 2: + self.x0 = x0[0] + self.x1 = x0[1] + else: + raise ValueError('expected 1 or 2 starting points, got %i' % len(x0)) + self.f = f + + def __iter__(self): + f = self.f + x0 = self.x0 + x1 = self.x1 + f0 = f(x0) + while True: + f1 = f(x1) + l = x1 - x0 + if not l: + break + s = (f1 - f0) / l + if not s: + break + x0, x1 = x1, x1 - f1/s + f0 = f1 + yield x1, abs(l) + +class MNewton: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting point x0 close to the root. + Uses modified Newton's method that converges fast regardless of the + multiplicity of the root. + + Pro: + + * converges fast for multiple roots + + Contra: + + * needs first and second derivative of f + * 3 function evaluations per iteration + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + if not 'd2f' in kwargs: + def d2f(x): + return self.ctx.diff(df, x) + else: + d2f = kwargs['df'] + self.d2f = d2f + + def __iter__(self): + x = self.x0 + f = self.f + df = self.df + d2f = self.d2f + while True: + prevx = x + fx = f(x) + if fx == 0: + break + dfx = df(x) + d2fx = d2f(x) + # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x) + x -= fx / (dfx - fx * d2fx / dfx) + error = abs(x - prevx) + yield x, error + +class Halley: + """ + 1d-solver generating pairs of approximative root and error. + + Needs a starting point x0 close to the root. + Uses Halley's method with cubic convergence rate. + + Pro: + + * converges even faster the Newton's method + * useful when computing with *many* digits + + Contra: + + * needs first and second derivative of f + * 3 function evaluations per iteration + * converges slowly for multiple roots + """ + + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + if not 'd2f' in kwargs: + def d2f(x): + return self.ctx.diff(df, x) + else: + d2f = kwargs['df'] + self.d2f = d2f + + def __iter__(self): + x = self.x0 + f = self.f + df = self.df + d2f = self.d2f + while True: + prevx = x + fx = f(x) + dfx = df(x) + d2fx = d2f(x) + x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx) + error = abs(x - prevx) + yield x, error + +class Muller: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0, x1 and x2 close to the root. + x1 defaults to x0 + 0.25; x2 to x1 + 0.25. + Uses Muller's method that converges towards complex roots. + + Pro: + + * converges fast (somewhat faster than secant) + * can find complex roots + + Contra: + + * converges slowly for multiple roots + * may have complex values for real starting points and real roots + + http://en.wikipedia.org/wiki/Muller's_method + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + self.x1 = self.x0 + 0.25 + self.x2 = self.x1 + 0.25 + elif len(x0) == 2: + self.x0 = x0[0] + self.x1 = x0[1] + self.x2 = self.x1 + 0.25 + elif len(x0) == 3: + self.x0 = x0[0] + self.x1 = x0[1] + self.x2 = x0[2] + else: + raise ValueError('expected 1, 2 or 3 starting points, got %i' + % len(x0)) + self.f = f + self.verbose = kwargs['verbose'] + + def __iter__(self): + f = self.f + x0 = self.x0 + x1 = self.x1 + x2 = self.x2 + fx0 = f(x0) + fx1 = f(x1) + fx2 = f(x2) + while True: + # TODO: maybe refactoring with function for divided differences + # calculate divided differences + fx2x1 = (fx1 - fx2) / (x1 - x2) + fx2x0 = (fx0 - fx2) / (x0 - x2) + fx1x0 = (fx0 - fx1) / (x0 - x1) + w = fx2x1 + fx2x0 - fx1x0 + fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2) + if w == 0 and fx2x1x0 == 0: + if self.verbose: + print('canceled with') + print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2) + break + x0 = x1 + fx0 = fx1 + x1 = x2 + fx1 = fx2 + # denominator should be as large as possible => choose sign + r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0) + if abs(w - r) > abs(w + r): + r = -r + x2 -= 2*fx2 / (w + r) + fx2 = f(x2) + error = abs(x2 - x1) + yield x2, error + +# TODO: consider raising a ValueError when there's no sign change in a and b +class Bisection: + """ + 1d-solver generating pairs of approximative root and error. + + Uses bisection method to find a root of f in [a, b]. + Might fail for multiple roots (needs sign change). + + Pro: + + * robust and reliable + + Contra: + + * converges slowly + * needs sign change + """ + maxsteps = 100 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.f = f + self.a = x0[0] + self.b = x0[1] + + def __iter__(self): + f = self.f + a = self.a + b = self.b + l = b - a + fb = f(b) + while True: + m = self.ctx.ldexp(a + b, -1) + fm = f(m) + sign = fm * fb + if sign < 0: + a = m + elif sign > 0: + b = m + fb = fm + else: + yield m, self.ctx.zero + l /= 2 + yield (a + b)/2, abs(l) + +def _getm(method): + """ + Return a function to calculate m for Illinois-like methods. + """ + if method == 'illinois': + def getm(fz, fb): + return 0.5 + elif method == 'pegasus': + def getm(fz, fb): + return fb/(fb + fz) + elif method == 'anderson': + def getm(fz, fb): + m = 1 - fz/fb + if m > 0: + return m + else: + return 0.5 + else: + raise ValueError("method '%s' not recognized" % method) + return getm + +class Illinois: + """ + 1d-solver generating pairs of approximative root and error. + + Uses Illinois method or similar to find a root of f in [a, b]. + Might fail for multiple roots (needs sign change). + Combines bisect with secant (improved regula falsi). + + The only difference between the methods is the scaling factor m, which is + used to ensure convergence (you can choose one using the 'method' keyword): + + Illinois method ('illinois'): + m = 0.5 + + Pegasus method ('pegasus'): + m = fb/(fb + fz) + + Anderson-Bjoerk method ('anderson'): + m = 1 - fz/fb if positive else 0.5 + + Pro: + + * converges very fast + + Contra: + + * has problems with multiple roots + * needs sign change + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.a = x0[0] + self.b = x0[1] + self.f = f + self.tol = kwargs['tol'] + self.verbose = kwargs['verbose'] + self.method = kwargs.get('method', 'illinois') + self.getm = _getm(self.method) + if self.verbose: + print('using %s method' % self.method) + + def __iter__(self): + method = self.method + f = self.f + a = self.a + b = self.b + fa = f(a) + fb = f(b) + m = None + while True: + l = b - a + if l == 0: + break + s = (fb - fa) / l + z = a - fa/s + fz = f(z) + if abs(fz) < self.tol: + # TODO: better condition (when f is very flat) + if self.verbose: + print('canceled with z =', z) + yield z, l + break + if fz * fb < 0: # root in [z, b] + a = b + fa = fb + b = z + fb = fz + else: # root in [a, z] + m = self.getm(fz, fb) + b = z + fb = fz + fa = m*fa # scale down to ensure convergence + if self.verbose and m and not method == 'illinois': + print('m:', m) + yield (a + b)/2, abs(l) + +def Pegasus(*args, **kwargs): + """ + 1d-solver generating pairs of approximative root and error. + + Uses Pegasus method to find a root of f in [a, b]. + Wrapper for illinois to use method='pegasus'. + """ + kwargs['method'] = 'pegasus' + return Illinois(*args, **kwargs) + +def Anderson(*args, **kwargs): + """ + 1d-solver generating pairs of approximative root and error. + + Uses Anderson-Bjoerk method to find a root of f in [a, b]. + Wrapper for illinois to use method='pegasus'. + """ + kwargs['method'] = 'anderson' + return Illinois(*args, **kwargs) + +# TODO: check whether it's possible to combine it with Illinois stuff +class Ridder: + """ + 1d-solver generating pairs of approximative root and error. + + Ridders' method to find a root of f in [a, b]. + Is told to perform as well as Brent's method while being simpler. + + Pro: + + * very fast + * simpler than Brent's method + + Contra: + + * two function evaluations per step + * has problems with multiple roots + * needs sign change + + http://en.wikipedia.org/wiki/Ridders'_method + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + self.f = f + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.x1 = x0[0] + self.x2 = x0[1] + self.verbose = kwargs['verbose'] + self.tol = kwargs['tol'] + + def __iter__(self): + ctx = self.ctx + f = self.f + x1 = self.x1 + fx1 = f(x1) + x2 = self.x2 + fx2 = f(x2) + while True: + x3 = 0.5*(x1 + x2) + fx3 = f(x3) + x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2) + fx4 = f(x4) + if abs(fx4) < self.tol: + # TODO: better condition (when f is very flat) + if self.verbose: + print('canceled with f(x4) =', fx4) + yield x4, abs(x1 - x2) + break + if fx4 * fx2 < 0: # root in [x4, x2] + x1 = x4 + fx1 = fx4 + else: # root in [x1, x4] + x2 = x4 + fx2 = fx4 + error = abs(x1 - x2) + yield (x1 + x2)/2, error + +class ANewton: + """ + EXPERIMENTAL 1d-solver generating pairs of approximative root and error. + + Uses Newton's method modified to use Steffensens method when convergence is + slow. (I.e. for multiple roots.) + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + def phi(x): + return x - f(x) / df(x) + self.phi = phi + self.verbose = kwargs['verbose'] + + def __iter__(self): + x0 = self.x0 + f = self.f + df = self.df + phi = self.phi + error = 0 + counter = 0 + while True: + prevx = x0 + try: + x0 = phi(x0) + except ZeroDivisionError: + if self.verbose: + print('ZeroDivisionError: canceled with x =', x0) + break + preverror = error + error = abs(prevx - x0) + # TODO: decide not to use convergence acceleration + if error and abs(error - preverror) / error < 1: + if self.verbose: + print('converging slowly') + counter += 1 + if counter >= 3: + # accelerate convergence + phi = steffensen(phi) + counter = 0 + if self.verbose: + print('accelerating convergence') + yield x0, error + +# TODO: add Brent + +############################ +# MULTIDIMENSIONAL SOLVERS # +############################ + +def jacobian(ctx, f, x): + """ + Calculate the Jacobian matrix of a function at the point x0. + + This is the first derivative of a vectorial function: + + f : R^m -> R^n with m >= n + """ + x = ctx.matrix(x) + h = ctx.sqrt(ctx.eps) + fx = ctx.matrix(f(*x)) + m = len(fx) + n = len(x) + J = ctx.matrix(m, n) + for j in xrange(n): + xj = x.copy() + xj[j] += h + Jj = (ctx.matrix(f(*xj)) - fx) / h + for i in xrange(m): + J[i,j] = Jj[i] + return J + +# TODO: test with user-specified jacobian matrix +class MDNewton: + """ + Find the root of a vector function numerically using Newton's method. + + f is a vector function representing a nonlinear equation system. + + x0 is the starting point close to the root. + + J is a function returning the Jacobian matrix for a point. + + Supports overdetermined systems. + + Use the 'norm' keyword to specify which norm to use. Defaults to max-norm. + The function to calculate the Jacobian matrix can be given using the + keyword 'J'. Otherwise it will be calculated numerically. + + Please note that this method converges only locally. Especially for high- + dimensional systems it is not trivial to find a good starting point being + close enough to the root. + + It is recommended to use a faster, low-precision solver from SciPy [1] or + OpenOpt [2] to get an initial guess. Afterwards you can use this method for + root-polishing to any precision. + + [1] http://scipy.org + + [2] http://openopt.org/Welcome + """ + maxsteps = 10 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + self.f = f + if isinstance(x0, (tuple, list)): + x0 = ctx.matrix(x0) + assert x0.cols == 1, 'need a vector' + self.x0 = x0 + if 'J' in kwargs: + self.J = kwargs['J'] + else: + def J(*x): + return ctx.jacobian(f, x) + self.J = J + self.norm = kwargs['norm'] + self.verbose = kwargs['verbose'] + + def __iter__(self): + f = self.f + x0 = self.x0 + norm = self.norm + J = self.J + fx = self.ctx.matrix(f(*x0)) + fxnorm = norm(fx) + cancel = False + while not cancel: + # get direction of descent + fxn = -fx + Jx = J(*x0) + s = self.ctx.lu_solve(Jx, fxn) + if self.verbose: + print('Jx:') + print(Jx) + print('s:', s) + # damping step size TODO: better strategy (hard task) + l = self.ctx.one + x1 = x0 + s + while True: + if x1 == x0: + if self.verbose: + print("canceled, won't get more excact") + cancel = True + break + fx = self.ctx.matrix(f(*x1)) + newnorm = norm(fx) + if newnorm < fxnorm: + # new x accepted + fxnorm = newnorm + x0 = x1 + break + l /= 2 + x1 = x0 + l*s + yield (x0, fxnorm) + +############# +# UTILITIES # +############# + +str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton, + 'halley':Halley, 'muller':Muller, 'bisect':Bisection, + 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson, + 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton} + +def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs): + r""" + Find an approximate solution to `f(x) = 0`, using *x0* as starting point or + interval for *x*. + + Multidimensional overdetermined systems are supported. + You can specify them using a function or a list of functions. + + Mathematically speaking, this function returns `x` such that + `|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision. + If the computed value does not meet this criterion, an exception is raised. + This exception can be disabled with *verify=False*. + + For interval arithmetic (``iv.findroot()``), please note that + the returned interval ``x`` is not guaranteed to contain `f(x)=0`! + It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds + regardless of numerical error. This may be improved in the future. + + **Arguments** + + *f* + one dimensional function + *x0* + starting point, several starting points or interval (depends on solver) + *tol* + the returned solution has an error smaller than this + *verbose* + print additional information for each iteration if true + *verify* + verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}` + *solver* + a generator for *f* and *x0* returning approximative solution and error + *maxsteps* + after how many steps the solver will cancel + *df* + first derivative of *f* (used by some solvers) + *d2f* + second derivative of *f* (used by some solvers) + *multidimensional* + force multidimensional solving + *J* + Jacobian matrix of *f* (used by multidimensional solvers) + *norm* + used vector norm (used by multidimensional solvers) + + solver has to be callable with ``(f, x0, **kwargs)`` and return an generator + yielding pairs of approximative solution and estimated error (which is + expected to be positive). + You can use the following string aliases: + 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson', + 'ridder', 'anewton', 'bisect' + + See mpmath.calculus.optimization for their documentation. + + **Examples** + + The function :func:`~mpmath.findroot` locates a root of a given function using the + secant method by default. A simple example use of the secant method is to + compute `\pi` as the root of `\sin x` closest to `x_0 = 3`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> findroot(sin, 3) + 3.14159265358979323846264338328 + + The secant method can be used to find complex roots of analytic functions, + although it must in that case generally be given a nonreal starting value + (or else it will never leave the real line):: + + >>> mp.dps = 15 + >>> findroot(lambda x: x**3 + 2*x + 1, j) + (0.226698825758202 + 1.46771150871022j) + + A nice application is to compute nontrivial roots of the Riemann zeta + function with many digits (good initial values are needed for convergence):: + + >>> mp.dps = 30 + >>> findroot(zeta, 0.5+14j) + (0.5 + 14.1347251417346937904572519836j) + + The secant method can also be used as an optimization algorithm, by passing + it a derivative of a function. The following example locates the positive + minimum of the gamma function:: + + >>> mp.dps = 20 + >>> findroot(lambda x: diff(gamma, x), 1) + 1.4616321449683623413 + + Finally, a useful application is to compute inverse functions, such as the + Lambert W function which is the inverse of `w e^w`, given the first + term of the solution's asymptotic expansion as the initial value. In basic + cases, this gives identical results to mpmath's built-in ``lambertw`` + function:: + + >>> def lambert(x): + ... return findroot(lambda w: w*exp(w) - x, log(1+x)) + ... + >>> mp.dps = 15 + >>> lambert(1); lambertw(1) + 0.567143290409784 + 0.567143290409784 + >>> lambert(1000); lambert(1000) + 5.2496028524016 + 5.2496028524016 + + Multidimensional functions are also supported:: + + >>> f = [lambda x1, x2: x1**2 + x2, + ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3] + >>> findroot(f, (0, 0)) + [-0.618033988749895] + [-0.381966011250105] + >>> findroot(f, (10, 10)) + [ 1.61803398874989] + [-2.61803398874989] + + You can verify this by solving the system manually. + + Please note that the following (more general) syntax also works:: + + >>> def f(x1, x2): + ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3 + ... + >>> findroot(f, (0, 0)) + [-0.618033988749895] + [-0.381966011250105] + + + **Multiple roots** + + For multiple roots all methods of the Newtonian family (including secant) + converge slowly. Consider this example:: + + >>> f = lambda x: (x - 1)**99 + >>> findroot(f, 0.9, verify=False) + 0.918073542444929 + + Even for a very close starting point the secant method converges very + slowly. Use ``verbose=True`` to illustrate this. + + It is possible to modify Newton's method to make it converge regardless of + the root's multiplicity:: + + >>> findroot(f, -10, solver='mnewton') + 1.0 + + This variant uses the first and second derivative of the function, which is + not very efficient. + + Alternatively you can use an experimental Newtonian solver that keeps track + of the speed of convergence and accelerates it using Steffensen's method if + necessary:: + + >>> findroot(f, -10, solver='anewton', verbose=True) + x: -9.88888888888888888889 + error: 0.111111111111111111111 + converging slowly + x: -9.77890011223344556678 + error: 0.10998877665544332211 + converging slowly + x: -9.67002233332199662166 + error: 0.108877778911448945119 + converging slowly + accelerating convergence + x: -9.5622443299551077669 + error: 0.107778003366888854764 + converging slowly + x: 0.99999999999999999214 + error: 10.562244329955107759 + x: 1.0 + error: 7.8598304758094664213e-18 + ZeroDivisionError: canceled with x = 1.0 + 1.0 + + **Complex roots** + + For complex roots it's recommended to use Muller's method as it converges + even for real starting points very fast:: + + >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller') + (0.727136084491197 + 0.934099289460529j) + + + **Intersection methods** + + When you need to find a root in a known interval, it's highly recommended to + use an intersection-based solver like ``'anderson'`` or ``'ridder'``. + Usually they converge faster and more reliable. They have however problems + with multiple roots and usually need a sign change to find a root:: + + >>> findroot(lambda x: x**3, (-1, 1), solver='anderson') + 0.0 + + Be careful with symmetric functions:: + + >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS + Traceback (most recent call last): + ... + ZeroDivisionError + + It fails even for better starting points, because there is no sign change:: + + >>> findroot(lambda x: x**2, (-1, .5), solver='anderson') + Traceback (most recent call last): + ... + ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19) + Try another starting point or tweak arguments. + + """ + prec = ctx.prec + try: + ctx.prec += 20 + + # initialize arguments + if tol is None: + tol = ctx.eps * 2**10 + + kwargs['verbose'] = kwargs.get('verbose', verbose) + + if 'd1f' in kwargs: + kwargs['df'] = kwargs['d1f'] + + kwargs['tol'] = tol + if isinstance(x0, (list, tuple)): + x0 = [ctx.convert(x) for x in x0] + else: + x0 = [ctx.convert(x0)] + + if isinstance(solver, str): + try: + solver = str2solver[solver] + except KeyError: + raise ValueError('could not recognize solver') + + # accept list of functions + if isinstance(f, (list, tuple)): + f2 = copy(f) + def tmp(*args): + return [fn(*args) for fn in f2] + f = tmp + + # detect multidimensional functions + try: + fx = f(*x0) + multidimensional = isinstance(fx, (list, tuple, ctx.matrix)) + except TypeError: + fx = f(x0[0]) + multidimensional = False + if 'multidimensional' in kwargs: + multidimensional = kwargs['multidimensional'] + if multidimensional: + # only one multidimensional solver available at the moment + solver = MDNewton + if not 'norm' in kwargs: + norm = lambda x: ctx.norm(x, 'inf') + kwargs['norm'] = norm + else: + norm = kwargs['norm'] + else: + norm = abs + + # happily return starting point if it's a root + if norm(fx) == 0: + if multidimensional: + return ctx.matrix(x0) + else: + return x0[0] + + # use solver + iterations = solver(ctx, f, x0, **kwargs) + if 'maxsteps' in kwargs: + maxsteps = kwargs['maxsteps'] + else: + maxsteps = iterations.maxsteps + i = 0 + for x, error in iterations: + if verbose: + print('x: ', x) + print('error:', error) + i += 1 + if error < tol * max(1, norm(x)) or i >= maxsteps: + break + else: + if not i: + raise ValueError('Could not find root using the given solver.\n' + 'Try another starting point or tweak arguments.') + if not isinstance(x, (list, tuple, ctx.matrix)): + xl = [x] + else: + xl = x + if verify and norm(f(*xl))**2 > tol: # TODO: better condition? + raise ValueError('Could not find root within given tolerance. ' + '(%s > %s)\n' + 'Try another starting point or tweak arguments.' + % (norm(f(*xl))**2, tol)) + return x + finally: + ctx.prec = prec + + +def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs): + """ + Return the multiplicity of a given root of f. + + Internally, numerical derivatives are used. This might be inefficient for + higher order derviatives. Due to this, ``multiplicity`` cancels after + evaluating 10 derivatives by default. You can be specify the n-th derivative + using the dnf keyword. + + >>> from mpmath import * + >>> multiplicity(lambda x: sin(x) - 1, pi/2) + 2 + + """ + if tol is None: + tol = ctx.eps ** 0.8 + kwargs['d0f'] = f + for i in xrange(maxsteps): + dfstr = 'd' + str(i) + 'f' + if dfstr in kwargs: + df = kwargs[dfstr] + else: + df = lambda x: ctx.diff(f, x, i) + if not abs(df(root)) < tol: + break + return i + +def steffensen(f): + """ + linear convergent function -> quadratic convergent function + + Steffensen's method for quadratic convergence of a linear converging + sequence. + Don not use it for higher rates of convergence. + It may even work for divergent sequences. + + Definition: + F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x) + + Example + ....... + + You can use Steffensen's method to accelerate a fixpoint iteration of linear + (or less) convergence. + + x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For + phi(x) = x**2 there are two fixpoints: 0 and 1. + + Let's try Steffensen's method: + + >>> f = lambda x: x**2 + >>> from mpmath.calculus.optimization import steffensen + >>> F = steffensen(f) + >>> for x in [0.5, 0.9, 2.0]: + ... fx = Fx = x + ... for i in xrange(9): + ... try: + ... fx = f(fx) + ... except OverflowError: + ... pass + ... try: + ... Fx = F(Fx) + ... except ZeroDivisionError: + ... pass + ... print('%20g %20g' % (fx, Fx)) + 0.25 -0.5 + 0.0625 0.1 + 0.00390625 -0.0011236 + 1.52588e-05 1.41691e-09 + 2.32831e-10 -2.84465e-27 + 5.42101e-20 2.30189e-80 + 2.93874e-39 -1.2197e-239 + 8.63617e-78 0 + 7.45834e-155 0 + 0.81 1.02676 + 0.6561 1.00134 + 0.430467 1 + 0.185302 1 + 0.0343368 1 + 0.00117902 1 + 1.39008e-06 1 + 1.93233e-12 1 + 3.73392e-24 1 + 4 1.6 + 16 1.2962 + 256 1.10194 + 65536 1.01659 + 4.29497e+09 1.00053 + 1.84467e+19 1 + 3.40282e+38 1 + 1.15792e+77 1 + 1.34078e+154 1 + + Unmodified, the iteration converges only towards 0. Modified it converges + not only much faster, it converges even to the repelling fixpoint 1. + """ + def F(x): + fx = f(x) + ffx = f(fx) + return (x*ffx - fx**2) / (ffx - 2*fx + x) + return F + +OptimizationMethods.jacobian = jacobian +OptimizationMethods.findroot = findroot +OptimizationMethods.multiplicity = multiplicity + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/polynomials.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..ba75c1e88cbc5d40aa590a786c0af5229f193103 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/polynomials.py @@ -0,0 +1,213 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Polynomials # +#----------------------------------------------------------------------------# + +# XXX: extra precision +@defun +def polyval(ctx, coeffs, x, derivative=False): + r""" + Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`, + :func:`~mpmath.polyval` evaluates the polynomial + + .. math :: + + P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0. + + If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously + evaluates `P(x)` with the derivative, `P'(x)`, and returns the + tuple `(P(x), P'(x))`. + + >>> from mpmath import * + >>> mp.pretty = True + >>> polyval([3, 0, 2], 0.5) + 2.75 + >>> polyval([3, 0, 2], 0.5, derivative=True) + (2.75, 3.0) + + The coefficients and the evaluation point may be any combination + of real or complex numbers. + """ + if not coeffs: + return ctx.zero + p = ctx.convert(coeffs[0]) + q = ctx.zero + for c in coeffs[1:]: + if derivative: + q = p + x*q + p = c + x*p + if derivative: + return p, q + else: + return p + +@defun +def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, + error=False, roots_init=None): + """ + Computes all roots (real or complex) of a given polynomial. + + The roots are returned as a sorted list, where real roots appear first + followed by complex conjugate roots as adjacent elements. The polynomial + should be given as a list of coefficients, in the format used by + :func:`~mpmath.polyval`. The leading coefficient must be nonzero. + + With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* + where *err* is an estimate of the maximum error among the computed roots. + + **Examples** + + Finding the three real roots of `x^3 - x^2 - 14x + 24`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(polyroots([1,-1,-14,24]), 4) + [-4.0, 2.0, 3.0] + + Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an + error estimate:: + + >>> roots, err = polyroots([4,3,2], error=True) + >>> for r in roots: + ... print(r) + ... + (-0.375 + 0.59947894041409j) + (-0.375 - 0.59947894041409j) + >>> + >>> err + 2.22044604925031e-16 + >>> + >>> polyval([4,3,2], roots[0]) + (2.22044604925031e-16 + 0.0j) + >>> polyval([4,3,2], roots[1]) + (2.22044604925031e-16 + 0.0j) + + The following example computes all the 5th roots of unity; that is, + the roots of `x^5 - 1`:: + + >>> mp.dps = 20 + >>> for r in polyroots([1, 0, 0, 0, 0, -1]): + ... print(r) + ... + 1.0 + (-0.8090169943749474241 + 0.58778525229247312917j) + (-0.8090169943749474241 - 0.58778525229247312917j) + (0.3090169943749474241 + 0.95105651629515357212j) + (0.3090169943749474241 - 0.95105651629515357212j) + + **Precision and conditioning** + + The roots are computed to the current working precision accuracy. If this + accuracy cannot be achieved in ``maxsteps`` steps, then a + ``NoConvergence`` exception is raised. The algorithm internally is using + the current working precision extended by ``extraprec``. If + ``NoConvergence`` was raised, that is caused either by not having enough + extra precision to achieve convergence (in which case increasing + ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which + case increasing ``maxsteps`` should fix the problem), or a combination of + both. + + The user should always do a convergence study with regards to + ``extraprec`` to ensure accurate results. It is possible to get + convergence to a wrong answer with too low ``extraprec``. + + Provided there are no repeated roots, :func:`~mpmath.polyroots` can + typically compute all roots of an arbitrary polynomial to high precision:: + + >>> mp.dps = 60 + >>> for r in polyroots([1, 0, -10, 0, 1]): + ... print(r) + ... + -3.14626436994197234232913506571557044551247712918732870123249 + -0.317837245195782244725757617296174288373133378433432554879127 + 0.317837245195782244725757617296174288373133378433432554879127 + 3.14626436994197234232913506571557044551247712918732870123249 + >>> + >>> sqrt(3) + sqrt(2) + 3.14626436994197234232913506571557044551247712918732870123249 + >>> sqrt(3) - sqrt(2) + 0.317837245195782244725757617296174288373133378433432554879127 + + **Algorithm** + + :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which + uses complex arithmetic to locate all roots simultaneously. + The Durand-Kerner method can be viewed as approximately performing + simultaneous Newton iteration for all the roots. In particular, + the convergence to simple roots is quadratic, just like Newton's + method. + + Although all roots are internally calculated using complex arithmetic, any + root found to have an imaginary part smaller than the estimated numerical + error is truncated to a real number (small real parts are also chopped). + Real roots are placed first in the returned list, sorted by value. The + remaining complex roots are sorted by their real parts so that conjugate + roots end up next to each other. + + **References** + + 1. http://en.wikipedia.org/wiki/Durand-Kerner_method + + """ + if len(coeffs) <= 1: + if not coeffs or not coeffs[0]: + raise ValueError("Input to polyroots must not be the zero polynomial") + # Constant polynomial with no roots + return [] + + orig = ctx.prec + tol = +ctx.eps + with ctx.extraprec(extraprec): + deg = len(coeffs) - 1 + # Must be monic + lead = ctx.convert(coeffs[0]) + if lead == 1: + coeffs = [ctx.convert(c) for c in coeffs] + else: + coeffs = [c/lead for c in coeffs] + f = lambda x: ctx.polyval(coeffs, x) + if roots_init is None: + roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)] + else: + roots = [None]*deg; + deg_init = min(deg, len(roots_init)) + roots[:deg_init] = list(roots_init[:deg_init]) + roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n + in xrange(deg_init,deg)] + err = [ctx.one for n in xrange(deg)] + # Durand-Kerner iteration until convergence + for step in xrange(maxsteps): + if abs(max(err)) < tol: + break + for i in xrange(deg): + p = roots[i] + x = f(p) + for j in range(deg): + if i != j: + try: + x /= (p-roots[j]) + except ZeroDivisionError: + continue + roots[i] = p - x + err[i] = abs(x) + if abs(max(err)) >= tol: + raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \ + % maxsteps) + # Remove small real or imaginary parts + if cleanup: + for i in xrange(deg): + if abs(roots[i]) < tol: + roots[i] = ctx.zero + elif abs(ctx._im(roots[i])) < tol: + roots[i] = roots[i].real + elif abs(ctx._re(roots[i])) < tol: + roots[i] = roots[i].imag * 1j + roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x))) + if error: + err = max(err) + err = max(err, ctx.ldexp(1, -orig+1)) + return [+r for r in roots], +err + else: + return [+r for r in roots] diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/quadrature.py b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..0545b3ad3e6f70a44f4ec31a0c3bcfb4ffde422b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath/calculus/quadrature.py @@ -0,0 +1,1115 @@ +import math + +from ..libmp.backend import xrange + +class QuadratureRule(object): + """ + Quadrature rules are implemented using this class, in order to + simplify the code and provide a common infrastructure + for tasks such as error estimation and node caching. + + You can implement a custom quadrature rule by subclassing + :class:`QuadratureRule` and implementing the appropriate + methods. The subclass can then be used by :func:`~mpmath.quad` by + passing it as the *method* argument. + + :class:`QuadratureRule` instances are supposed to be singletons. + :class:`QuadratureRule` therefore implements instance caching + in :func:`~mpmath.__new__`. + """ + + def __init__(self, ctx): + self.ctx = ctx + self.standard_cache = {} + self.transformed_cache = {} + self.interval_count = {} + + def clear(self): + """ + Delete cached node data. + """ + self.standard_cache = {} + self.transformed_cache = {} + self.interval_count = {} + + def calc_nodes(self, degree, prec, verbose=False): + r""" + Compute nodes for the standard interval `[-1, 1]`. Subclasses + should probably implement only this method, and use + :func:`~mpmath.get_nodes` method to retrieve the nodes. + """ + raise NotImplementedError + + def get_nodes(self, a, b, degree, prec, verbose=False): + """ + Return nodes for given interval, degree and precision. The + nodes are retrieved from a cache if already computed; + otherwise they are computed by calling :func:`~mpmath.calc_nodes` + and are then cached. + + Subclasses should probably not implement this method, + but just implement :func:`~mpmath.calc_nodes` for the actual + node computation. + """ + key = (a, b, degree, prec) + if key in self.transformed_cache: + return self.transformed_cache[key] + orig = self.ctx.prec + try: + self.ctx.prec = prec+20 + # Get nodes on standard interval + if (degree, prec) in self.standard_cache: + nodes = self.standard_cache[degree, prec] + else: + nodes = self.calc_nodes(degree, prec, verbose) + self.standard_cache[degree, prec] = nodes + # Transform to general interval + nodes = self.transform_nodes(nodes, a, b, verbose) + if key in self.interval_count: + self.transformed_cache[key] = nodes + else: + self.interval_count[key] = True + finally: + self.ctx.prec = orig + return nodes + + def transform_nodes(self, nodes, a, b, verbose=False): + r""" + Rescale standardized nodes (for `[-1, 1]`) to a general + interval `[a, b]`. For a finite interval, a simple linear + change of variables is used. Otherwise, the following + transformations are used: + + .. math :: + + \lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1) + + \lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x} + + \lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}} + + """ + ctx = self.ctx + a = ctx.convert(a) + b = ctx.convert(b) + one = ctx.one + if (a, b) == (-one, one): + return nodes + half = ctx.mpf(0.5) + new_nodes = [] + if ctx.isinf(a) or ctx.isinf(b): + if (a, b) == (ctx.ninf, ctx.inf): + p05 = -half + for x, w in nodes: + x2 = x*x + px1 = one-x2 + spx1 = px1**p05 + x = x*spx1 + w *= spx1/px1 + new_nodes.append((x, w)) + elif a == ctx.ninf: + b1 = b+1 + for x, w in nodes: + u = 2/(x+one) + x = b1-u + w *= half*u**2 + new_nodes.append((x, w)) + elif b == ctx.inf: + a1 = a-1 + for x, w in nodes: + u = 2/(x+one) + x = a1+u + w *= half*u**2 + new_nodes.append((x, w)) + elif a == ctx.inf or b == ctx.ninf: + return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)] + else: + raise NotImplementedError + else: + # Simple linear change of variables + C = (b-a)/2 + D = (b+a)/2 + for x, w in nodes: + new_nodes.append((D+C*x, C*w)) + return new_nodes + + def guess_degree(self, prec): + """ + Given a desired precision `p` in bits, estimate the degree `m` + of the quadrature required to accomplish full accuracy for + typical integrals. By default, :func:`~mpmath.quad` will perform up + to `m` iterations. The value of `m` should be a slight + overestimate, so that "slightly bad" integrals can be dealt + with automatically using a few extra iterations. On the + other hand, it should not be too big, so :func:`~mpmath.quad` can + quit within a reasonable amount of time when it is given + an "unsolvable" integral. + + The default formula used by :func:`~mpmath.guess_degree` is tuned + for both :class:`TanhSinh` and :class:`GaussLegendre`. + The output is roughly as follows: + + +---------+---------+ + | `p` | `m` | + +=========+=========+ + | 50 | 6 | + +---------+---------+ + | 100 | 7 | + +---------+---------+ + | 500 | 10 | + +---------+---------+ + | 3000 | 12 | + +---------+---------+ + + This formula is based purely on a limited amount of + experimentation and will sometimes be wrong. + """ + # Expected degree + # XXX: use mag + g = int(4 + max(0, self.ctx.log(prec/30.0, 2))) + # Reasonable "worst case" + g += 2 + return g + + def estimate_error(self, results, prec, epsilon): + r""" + Given results from integrations `[I_1, I_2, \ldots, I_k]` done + with a quadrature of rule of degree `1, 2, \ldots, k`, estimate + the error of `I_k`. + + For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`. + + For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|` + from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption + that each degree increment roughly doubles the accuracy of + the quadrature rule (this is true for both :class:`TanhSinh` + and :class:`GaussLegendre`). The extrapolation formula is given + by Borwein, Bailey & Girgensohn. Although not very conservative, + this method seems to be very robust in practice. + """ + if len(results) == 2: + return abs(results[0]-results[1]) + try: + if results[-1] == results[-2] == results[-3]: + return self.ctx.zero + D1 = self.ctx.log(abs(results[-1]-results[-2]), 10) + D2 = self.ctx.log(abs(results[-1]-results[-3]), 10) + except ValueError: + return epsilon + D3 = -prec + D4 = min(0, max(D1**2/D2, 2*D1, D3)) + return self.ctx.mpf(10) ** int(D4) + + def summation(self, f, points, prec, epsilon, max_degree, verbose=False): + """ + Main integration function. Computes the 1D integral over + the interval specified by *points*. For each subinterval, + performs quadrature of degree from 1 up to *max_degree* + until :func:`~mpmath.estimate_error` signals convergence. + + :func:`~mpmath.summation` transforms each subintegration to + the standard interval and then calls :func:`~mpmath.sum_next`. + """ + ctx = self.ctx + I = total_err = ctx.zero + for i in xrange(len(points)-1): + a, b = points[i], points[i+1] + if a == b: + continue + # XXX: we could use a single variable transformation, + # but this is not good in practice. We get better accuracy + # by having 0 as an endpoint. + if (a, b) == (ctx.ninf, ctx.inf): + _f = f + f = lambda x: _f(-x) + _f(x) + a, b = (ctx.zero, ctx.inf) + results = [] + err = ctx.zero + for degree in xrange(1, max_degree+1): + nodes = self.get_nodes(a, b, degree, prec, verbose) + if verbose: + print("Integrating from %s to %s (degree %s of %s)" % \ + (ctx.nstr(a), ctx.nstr(b), degree, max_degree)) + result = self.sum_next(f, nodes, degree, prec, results, verbose) + results.append(result) + if degree > 1: + err = self.estimate_error(results, prec, epsilon) + if verbose: + print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result)) + if err <= epsilon: + break + I += results[-1] + total_err += err + if total_err > epsilon: + if verbose: + print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err)) + return I, total_err + + def sum_next(self, f, nodes, degree, prec, previous, verbose=False): + r""" + Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list + contains the `(w_k, x_k)` pairs. + + :func:`~mpmath.summation` will supply the list *results* of + values computed by :func:`~mpmath.sum_next` at previous degrees, in + case the quadrature rule is able to reuse them. + """ + return self.ctx.fdot((w, f(x)) for (x,w) in nodes) + + +class TanhSinh(QuadratureRule): + r""" + This class implements "tanh-sinh" or "doubly exponential" + quadrature. This quadrature rule is based on the Euler-Maclaurin + integral formula. By performing a change of variables involving + nested exponentials / hyperbolic functions (hence the name), the + derivatives at the endpoints vanish rapidly. Since the error term + in the Euler-Maclaurin formula depends on the derivatives at the + endpoints, a simple step sum becomes extremely accurate. In + practice, this means that doubling the number of evaluation + points roughly doubles the number of accurate digits. + + Comparison to Gauss-Legendre: + * Initial computation of nodes is usually faster + * Handles endpoint singularities better + * Handles infinite integration intervals better + * Is slower for smooth integrands once nodes have been computed + + The implementation of the tanh-sinh algorithm is based on the + description given in Borwein, Bailey & Girgensohn, "Experimentation + in Mathematics - Computational Paths to Discovery", A K Peters, + 2003, pages 312-313. In the present implementation, a few + improvements have been made: + + * A more efficient scheme is used to compute nodes (exploiting + recurrence for the exponential function) + * The nodes are computed successively instead of all at once + + **References** + + * [Bailey]_ + * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf + + """ + + def sum_next(self, f, nodes, degree, prec, previous, verbose=False): + """ + Step sum for tanh-sinh quadrature of degree `m`. We exploit the + fact that half of the abscissas at degree `m` are precisely the + abscissas from degree `m-1`. Thus reusing the result from + the previous level allows a 2x speedup. + """ + h = self.ctx.mpf(2)**(-degree) + # Abscissas overlap, so reusing saves half of the time + if previous: + S = previous[-1]/(h*2) + else: + S = self.ctx.zero + S += self.ctx.fdot((w,f(x)) for (x,w) in nodes) + return h*S + + def calc_nodes(self, degree, prec, verbose=False): + r""" + The abscissas and weights for tanh-sinh quadrature of degree + `m` are given by + + .. math:: + + x_k = \tanh(\pi/2 \sinh(t_k)) + + w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2 + + where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The + list of nodes is actually infinite, but the weights die off so + rapidly that only a few are needed. + """ + ctx = self.ctx + nodes = [] + + extra = 20 + ctx.prec += extra + tol = ctx.ldexp(1, -prec-10) + pi4 = ctx.pi/4 + + # For simplicity, we work in steps h = 1/2^n, with the first point + # offset so that we can reuse the sum from the previous degree + + # We define degree 1 to include the "degree 0" steps, including + # the point x = 0. (It doesn't work well otherwise; not sure why.) + t0 = ctx.ldexp(1, -degree) + if degree == 1: + #nodes.append((mpf(0), pi4)) + #nodes.append((-mpf(0), pi4)) + nodes.append((ctx.zero, ctx.pi/2)) + h = t0 + else: + h = t0*2 + + # Since h is fixed, we can compute the next exponential + # by simply multiplying by exp(h) + expt0 = ctx.exp(t0) + a = pi4 * expt0 + b = pi4 / expt0 + udelta = ctx.exp(h) + urdelta = 1/udelta + + for k in xrange(0, 20*2**degree+1): + # Reference implementation: + # t = t0 + k*h + # x = tanh(pi/2 * sinh(t)) + # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2 + + # Fast implementation. Note that c = exp(pi/2 * sinh(t)) + c = ctx.exp(a-b) + d = 1/c + co = (c+d)/2 + si = (c-d)/2 + x = si / co + w = (a+b) / co**2 + diff = abs(x-1) + if diff <= tol: + break + + nodes.append((x, w)) + nodes.append((-x, w)) + + a *= udelta + b *= urdelta + + if verbose and k % 300 == 150: + # Note: the number displayed is rather arbitrary. Should + # figure out how to print something that looks more like a + # percentage + print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec)) + + ctx.prec -= extra + return nodes + + +class GaussLegendre(QuadratureRule): + r""" + This class implements Gauss-Legendre quadrature, which is + exceptionally efficient for polynomials and polynomial-like (i.e. + very smooth) integrands. + + The abscissas and weights are given by roots and values of + Legendre polynomials, which are the orthogonal polynomials + on `[-1, 1]` with respect to the unit weight + (see :func:`~mpmath.legendre`). + + In this implementation, we take the "degree" `m` of the quadrature + to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following + Borwein, Bailey & Girgensohn). This way we get quadratic, rather + than linear, convergence as the degree is incremented. + + Comparison to tanh-sinh quadrature: + * Is faster for smooth integrands once nodes have been computed + * Initial computation of nodes is usually slower + * Handles endpoint singularities worse + * Handles infinite integration intervals worse + + """ + + def calc_nodes(self, degree, prec, verbose=False): + r""" + Calculates the abscissas and weights for Gauss-Legendre + quadrature of degree of given degree (actually `3 \cdot 2^m`). + """ + ctx = self.ctx + # It is important that the epsilon is set lower than the + # "real" epsilon + epsilon = ctx.ldexp(1, -prec-8) + # Fairly high precision might be required for accurate + # evaluation of the roots + orig = ctx.prec + ctx.prec = int(prec*1.5) + if degree == 1: + x = ctx.sqrt(ctx.mpf(3)/5) + w = ctx.mpf(5)/9 + nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)] + ctx.prec = orig + return nodes + nodes = [] + n = 3*2**(degree-1) + upto = n//2 + 1 + for j in xrange(1, upto): + # Asymptotic formula for the roots + r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5))) + # Newton iteration + while 1: + t1, t2 = 1, 0 + # Evaluates the Legendre polynomial using its defining + # recurrence relation + for j1 in xrange(1,n+1): + t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1 + t4 = n*(r*t1-t2)/(r**2-1) + a = t1/t4 + r = r - a + if abs(a) < epsilon: + break + x = r + w = 2/((1-r**2)*t4**2) + if verbose and j % 30 == 15: + print("Computing nodes (%i of %i)" % (j, upto)) + nodes.append((x, w)) + nodes.append((-x, w)) + ctx.prec = orig + return nodes + +class QuadratureMethods(object): + + def __init__(ctx, *args, **kwargs): + ctx._gauss_legendre = GaussLegendre(ctx) + ctx._tanh_sinh = TanhSinh(ctx) + + def quad(ctx, f, *points, **kwargs): + r""" + Computes a single, double or triple integral over a given + 1D interval, 2D rectangle, or 3D cuboid. A basic example:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(sin, [0, pi]) + 2.0 + + A basic 2D integral:: + + >>> f = lambda x, y: cos(x+y/2) + >>> quad(f, [-pi/2, pi/2], [0, pi]) + 4.0 + + **Interval format** + + The integration range for each dimension may be specified + using a list or tuple. Arguments are interpreted as follows: + + ``quad(f, [x1, x2])`` -- calculates + `\int_{x_1}^{x_2} f(x) \, dx` + + ``quad(f, [x1, x2], [y1, y2])`` -- calculates + `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx` + + ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates + `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z) + \, dz \, dy \, dx` + + Endpoints may be finite or infinite. An interval descriptor + may also contain more than two points. In this + case, the integration is split into subintervals, between + each pair of consecutive points. This is useful for + dealing with mid-interval discontinuities, or integrating + over large intervals where the function is irregular or + oscillates. + + **Options** + + :func:`~mpmath.quad` recognizes the following keyword arguments: + + *method* + Chooses integration algorithm (described below). + *error* + If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the + integral and `e` is the estimated error. + *maxdegree* + Maximum degree of the quadrature rule to try before + quitting. + *verbose* + Print details about progress. + + **Algorithms** + + Mpmath presently implements two integration algorithms: tanh-sinh + quadrature and Gauss-Legendre quadrature. These can be selected + using *method='tanh-sinh'* or *method='gauss-legendre'* or by + passing the classes *method=TanhSinh*, *method=GaussLegendre*. + The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available + as shortcuts. + + Both algorithms have the property that doubling the number of + evaluation points roughly doubles the accuracy, so both are ideal + for high precision quadrature (hundreds or thousands of digits). + + At high precision, computing the nodes and weights for the + integration can be expensive (more expensive than computing the + function values). To make repeated integrations fast, nodes + are automatically cached. + + The advantages of the tanh-sinh algorithm are that it tends to + handle endpoint singularities well, and that the nodes are cheap + to compute on the first run. For these reasons, it is used by + :func:`~mpmath.quad` as the default algorithm. + + Gauss-Legendre quadrature often requires fewer function + evaluations, and is therefore often faster for repeated use, but + the algorithm does not handle endpoint singularities as well and + the nodes are more expensive to compute. Gauss-Legendre quadrature + can be a better choice if the integrand is smooth and repeated + integrations are required (e.g. for multiple integrals). + + See the documentation for :class:`TanhSinh` and + :class:`GaussLegendre` for additional details. + + **Examples of 1D integrals** + + Intervals may be infinite or half-infinite. The following two + examples evaluate the limits of the inverse tangent function + (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral + `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`:: + + >>> mp.dps = 15 + >>> quad(lambda x: 2/(x**2+1), [0, inf]) + 3.14159265358979 + >>> quad(lambda x: exp(-x**2), [-inf, inf])**2 + 3.14159265358979 + + Integrals can typically be resolved to high precision. + The following computes 50 digits of `\pi` by integrating the + area of the half-circle defined by `x^2 + y^2 \le 1`, + `-1 \le x \le 1`, `y \ge 0`:: + + >>> mp.dps = 50 + >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) + 3.1415926535897932384626433832795028841971693993751 + + One can just as well compute 1000 digits (output truncated):: + + >>> mp.dps = 1000 + >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS + 3.141592653589793238462643383279502884...216420199 + + Complex integrals are supported. The following computes + a residue at `z = 0` by integrating counterclockwise along the + diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`:: + + >>> mp.dps = 15 + >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1])) + (0.0 + 6.28318530717959j) + + **Examples of 2D and 3D integrals** + + Here are several nice examples of analytically solvable + 2D integrals (taken from MathWorld [1]) that can be evaluated + to high precision fairly rapidly by :func:`~mpmath.quad`:: + + >>> mp.dps = 30 + >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y)) + >>> quad(f, [0, 1], [0, 1]) + 0.577215664901532860606512090082 + >>> +euler + 0.577215664901532860606512090082 + + >>> f = lambda x, y: 1/sqrt(1+x**2+y**2) + >>> quad(f, [-1, 1], [-1, 1]) + 3.17343648530607134219175646705 + >>> 4*log(2+sqrt(3))-2*pi/3 + 3.17343648530607134219175646705 + + >>> f = lambda x, y: 1/(1-x**2 * y**2) + >>> quad(f, [0, 1], [0, 1]) + 1.23370055013616982735431137498 + >>> pi**2 / 8 + 1.23370055013616982735431137498 + + >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]) + 1.64493406684822643647241516665 + >>> pi**2 / 6 + 1.64493406684822643647241516665 + + Multiple integrals may be done over infinite ranges:: + + >>> mp.dps = 15 + >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf])) + 0.367879441171442 + >>> print(1/e) + 0.367879441171442 + + For nonrectangular areas, one can call :func:`~mpmath.quad` recursively. + For example, we can replicate the earlier example of calculating + `\pi` by integrating over the unit-circle, and actually use double + quadrature to actually measure the area circle:: + + >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)]) + >>> quad(f, [-1, 1]) + 3.14159265358979 + + Here is a simple triple integral:: + + >>> mp.dps = 15 + >>> f = lambda x,y,z: x*y/(1+z) + >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre') + 0.101366277027041 + >>> (log(3)-log(2))/4 + 0.101366277027041 + + **Singularities** + + Both tanh-sinh and Gauss-Legendre quadrature are designed to + integrate smooth (infinitely differentiable) functions. Neither + algorithm copes well with mid-interval singularities (such as + mid-interval discontinuities in `f(x)` or `f'(x)`). + The best solution is to split the integral into parts:: + + >>> mp.dps = 15 + >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad + 3.99900894176779 + >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good + 4.0 + + The tanh-sinh rule often works well for integrands having a + singularity at one or both endpoints:: + + >>> mp.dps = 15 + >>> quad(log, [0, 1], method='tanh-sinh') # Good + -1.0 + >>> quad(log, [0, 1], method='gauss-legendre') # Bad + -0.999932197413801 + + However, the result may still be inaccurate for some functions:: + + >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') + 1.99999999946942 + + This problem is not due to the quadrature rule per se, but to + numerical amplification of errors in the nodes. The problem can be + circumvented by temporarily increasing the precision:: + + >>> mp.dps = 30 + >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') + >>> mp.dps = 15 + >>> +a + 2.0 + + **Highly variable functions** + + For functions that are smooth (in the sense of being infinitely + differentiable) but contain sharp mid-interval peaks or many + "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For + example, with default settings, :func:`~mpmath.quad` is able to integrate + `\sin(x)` accurately over an interval of length 100 but not over + length 1000:: + + >>> quad(sin, [0, 100]); 1-cos(100) # Good + 0.137681127712316 + 0.137681127712316 + >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad + -37.8587612408485 + 0.437620923709297 + + One solution is to break the integration into 10 intervals of + length 100:: + + >>> quad(sin, linspace(0, 1000, 10)) # Good + 0.437620923709297 + + Another is to increase the degree of the quadrature:: + + >>> quad(sin, [0, 1000], maxdegree=10) # Also good + 0.437620923709297 + + Whether splitting the interval or increasing the degree is + more efficient differs from case to case. Another example is the + function `1/(1+x^2)`, which has a sharp peak centered around + `x = 0`:: + + >>> f = lambda x: 1/(1+x**2) + >>> quad(f, [-100, 100]) # Bad + 3.64804647105268 + >>> quad(f, [-100, 100], maxdegree=10) # Good + 3.12159332021646 + >>> quad(f, [-100, 0, 100]) # Also good + 3.12159332021646 + + **References** + + 1. http://mathworld.wolfram.com/DoubleIntegral.html + + """ + rule = kwargs.get('method', 'tanh-sinh') + if type(rule) is str: + if rule == 'tanh-sinh': + rule = ctx._tanh_sinh + elif rule == 'gauss-legendre': + rule = ctx._gauss_legendre + else: + raise ValueError("unknown quadrature rule: %s" % rule) + else: + rule = rule(ctx) + verbose = kwargs.get('verbose') + dim = len(points) + orig = prec = ctx.prec + epsilon = ctx.eps/8 + m = kwargs.get('maxdegree') or rule.guess_degree(prec) + points = [ctx._as_points(p) for p in points] + try: + ctx.prec += 20 + if dim == 1: + v, err = rule.summation(f, points[0], prec, epsilon, m, verbose) + elif dim == 2: + v, err = rule.summation(lambda x: \ + rule.summation(lambda y: f(x,y), \ + points[1], prec, epsilon, m)[0], + points[0], prec, epsilon, m, verbose) + elif dim == 3: + v, err = rule.summation(lambda x: \ + rule.summation(lambda y: \ + rule.summation(lambda z: f(x,y,z), \ + points[2], prec, epsilon, m)[0], + points[1], prec, epsilon, m)[0], + points[0], prec, epsilon, m, verbose) + else: + raise NotImplementedError("quadrature must have dim 1, 2 or 3") + finally: + ctx.prec = orig + if kwargs.get("error"): + return +v, err + return +v + + def quadts(ctx, *args, **kwargs): + """ + Performs tanh-sinh quadrature. The call + + quadts(func, *points, ...) + + is simply a shortcut for: + + quad(func, *points, ..., method=TanhSinh) + + For example, a single integral and a double integral: + + quadts(lambda x: exp(cos(x)), [0, 1]) + quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) + + See the documentation for quad for information about how points + arguments and keyword arguments are parsed. + + See documentation for TanhSinh for algorithmic information about + tanh-sinh quadrature. + """ + kwargs['method'] = 'tanh-sinh' + return ctx.quad(*args, **kwargs) + + def quadgl(ctx, *args, **kwargs): + """ + Performs Gauss-Legendre quadrature. The call + + quadgl(func, *points, ...) + + is simply a shortcut for: + + quad(func, *points, ..., method=GaussLegendre) + + For example, a single integral and a double integral: + + quadgl(lambda x: exp(cos(x)), [0, 1]) + quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) + + See the documentation for quad for information about how points + arguments and keyword arguments are parsed. + + See documentation for TanhSinh for algorithmic information about + tanh-sinh quadrature. + """ + kwargs['method'] = 'gauss-legendre' + return ctx.quad(*args, **kwargs) + + def quadosc(ctx, f, interval, omega=None, period=None, zeros=None): + r""" + Calculates + + .. math :: + + I = \int_a^b f(x) dx + + where at least one of `a` and `b` is infinite and where + `f(x) = g(x) \cos(\omega x + \phi)` for some slowly + decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc` + can also handle oscillatory integrals where the oscillation + rate is different from a pure sine or cosine wave. + + In the standard case when `|a| < \infty, b = \infty`, + :func:`~mpmath.quadosc` works by evaluating the infinite series + + .. math :: + + I = \int_a^{x_1} f(x) dx + + \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx + + where `x_k` are consecutive zeros (alternatively + some other periodic reference point) of `f(x)`. + Accordingly, :func:`~mpmath.quadosc` requires information about the + zeros of `f(x)`. For a periodic function, you can specify + the zeros by either providing the angular frequency `\omega` + (*omega*) or the *period* `2 \pi/\omega`. In general, you can + specify the `n`-th zero by providing the *zeros* arguments. + Below is an example of each:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = lambda x: sin(3*x)/(x**2+1) + >>> quadosc(f, [0,inf], omega=3) + 0.37833007080198 + >>> quadosc(f, [0,inf], period=2*pi/3) + 0.37833007080198 + >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3) + 0.37833007080198 + >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica + 0.37833007080198 + + Note that *zeros* was specified to multiply `n` by the + *half-period*, not the full period. In theory, it does not matter + whether each partial integral is done over a half period or a full + period. However, if done over half-periods, the infinite series + passed to :func:`~mpmath.nsum` becomes an *alternating series* and this + typically makes the extrapolation much more efficient. + + Here is an example of an integration over the entire real line, + and a half-infinite integration starting at `-\infty`:: + + >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1) + 1.15572734979092 + >>> pi/e + 1.15572734979092 + >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi) + -0.0844109505595739 + >>> cos(1)+si(1)-pi/2 + -0.0844109505595738 + + Of course, the integrand may contain a complex exponential just as + well as a real sine or cosine:: + + >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3) + (0.156410688228254 + 0.0j) + >>> pi/e**3 + 0.156410688228254 + >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3) + (0.00317486988463794 - 0.0447701735209082j) + >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2) + (0.00317486988463794 - 0.0447701735209082j) + + **Non-periodic functions** + + If `f(x) = g(x) h(x)` for some function `h(x)` that is not + strictly periodic, *omega* or *period* might not work, and it might + be necessary to use *zeros*. + + A notable exception can be made for Bessel functions which, though not + periodic, are "asymptotically periodic" in a sufficiently strong sense + that the sum extrapolation will work out:: + + >>> quadosc(j0, [0, inf], period=2*pi) + 1.0 + >>> quadosc(j1, [0, inf], period=2*pi) + 1.0 + + More properly, one should provide the exact Bessel function zeros:: + + >>> j0zero = lambda n: findroot(j0, pi*(n-0.25)) + >>> quadosc(j0, [0, inf], zeros=j0zero) + 1.0 + + For an example where *zeros* becomes necessary, consider the + complete Fresnel integrals + + .. math :: + + \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx + = \sqrt{\frac{\pi}{8}}. + + Although the integrands do not decrease in magnitude as + `x \to \infty`, the integrals are convergent since the oscillation + rate increases (causing consecutive periods to asymptotically + cancel out). These integrals are virtually impossible to calculate + to any kind of accuracy using standard quadrature rules. However, + if one provides the correct asymptotic distribution of zeros + (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works:: + + >>> mp.dps = 30 + >>> f = lambda x: cos(x**2) + >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) + 0.626657068657750125603941321203 + >>> f = lambda x: sin(x**2) + >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) + 0.626657068657750125603941321203 + >>> sqrt(pi/8) + 0.626657068657750125603941321203 + + (Interestingly, these integrals can still be evaluated if one + places some other constant than `\pi` in the square root sign.) + + In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow + the inverse-function distribution `h^{-1}(x)`:: + + >>> mp.dps = 15 + >>> f = lambda x: sin(exp(x)) + >>> quadosc(f, [1,inf], zeros=lambda n: log(n)) + -0.25024394235267 + >>> pi/2-si(e) + -0.250243942352671 + + **Non-alternating functions** + + If the integrand oscillates around a positive value, without + alternating signs, the extrapolation might fail. A simple trick + that sometimes works is to multiply or divide the frequency by 2:: + + >>> f = lambda x: 1/x**2+sin(x)/x**4 + >>> quadosc(f, [1,inf], omega=1) # Bad + 1.28642190869861 + >>> quadosc(f, [1,inf], omega=0.5) # Perfect + 1.28652953559617 + >>> 1+(cos(1)+ci(1)+sin(1))/6 + 1.28652953559617 + + **Fast decay** + + :func:`~mpmath.quadosc` is primarily useful for slowly decaying + integrands. If the integrand decreases exponentially or faster, + :func:`~mpmath.quad` will likely handle it without trouble (and generally be + much faster than :func:`~mpmath.quadosc`):: + + >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1) + 0.5 + >>> quad(lambda x: cos(x)/exp(x), [0, inf]) + 0.5 + + """ + a, b = ctx._as_points(interval) + a = ctx.convert(a) + b = ctx.convert(b) + if [omega, period, zeros].count(None) != 2: + raise ValueError( \ + "must specify exactly one of omega, period, zeros") + if a == ctx.ninf and b == ctx.inf: + s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period) + s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period) + return s1 + s2 + if a == ctx.ninf: + if zeros: + return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n)) + else: + return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period) + if b != ctx.inf: + raise ValueError("quadosc requires an infinite integration interval") + if not zeros: + if omega: + period = 2*ctx.pi/omega + zeros = lambda n: n*period/2 + #for n in range(1,10): + # p = zeros(n) + # if p > a: + # break + #if n >= 9: + # raise ValueError("zeros do not appear to be correctly indexed") + n = 1 + s = ctx.quadgl(f, [a, zeros(n)]) + def term(k): + return ctx.quadgl(f, [zeros(k), zeros(k+1)]) + s += ctx.nsum(term, [n, ctx.inf]) + return s + + def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs): + """ + Computes the integral of *f* over the interval or path specified + by *interval*, using :func:`~mpmath.quad` together with adaptive + subdivision of the interval. + + This function gives an accurate answer for some integrals where + :func:`~mpmath.quad` fails:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) + 3.99900894176779 + >>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi]) + 4.0 + >>> quadsubdiv(sin, [0, 1000]) + 0.437620923709297 + >>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100]) + 3.12159332021646 + >>> quadsubdiv(lambda x: ceil(x), [0, 100]) + 5050.0 + >>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8]) + 0.347400172657248 + + The argument *maxintervals* can be set to limit the permissible + subdivision:: + + >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True) + (-5.40487904307774, 5.011) + >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True) + (0.631417921866934, 1.10101120134116e-17) + + Subdivision does not guarantee a correct answer since, the error + estimate on subintervals may be inaccurate:: + + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True) + (0.210802735500549, 1.0001111101e-17) + >>> mp.dps = 20 + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True) + (0.21080273550054927738, 2.200000001e-24) + + The second answer is correct. We can get an accurate result at lower + precision by forcing a finer initial subdivision:: + + >>> mp.dps = 15 + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5)) + 0.210802735500549 + + The following integral is too oscillatory for convergence, but we can get a + reasonable estimate:: + + >>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True) + >>> round(v, 6), round(err, 6) + (0.504067, 1e-06) + >>> sin(1) - ci(1) + 0.504067061906928 + + """ + queue = [] + for i in range(len(interval)-1): + queue.append((interval[i], interval[i+1])) + total = ctx.zero + total_error = ctx.zero + if maxintervals is None: + maxintervals = 10 * ctx.prec + count = 0 + quad_args = kwargs.copy() + quad_args["verbose"] = False + quad_args["error"] = True + if tol is None: + tol = +ctx.eps + orig = ctx.prec + try: + ctx.prec += 5 + while queue: + a, b = queue.pop() + s, err = ctx.quad(f, [a, b], **quad_args) + if kwargs.get("verbose"): + print("subinterval", count, a, b, err) + if err < tol or count > maxintervals: + total += s + total_error += err + else: + count += 1 + if count == maxintervals and kwargs.get("verbose"): + print("warning: number of intervals exceeded maxintervals") + if a == -ctx.inf and b == ctx.inf: + m = 0 + elif a == -ctx.inf: + m = min(b-1, 2*b) + elif b == ctx.inf: + m = max(a+1, 2*a) + else: + m = a + (b - a) / 2 + queue.append((a, m)) + queue.append((m, b)) + finally: + ctx.prec = orig + if kwargs.get("error"): + return +total, +total_error + else: + return +total + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe5094f91509fdea92db323fd226a125c24c967 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb86a955d586ad2351f072f42a67a9e437cff563 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29d35e65b9d3f631de38d001bc94c3878deff3fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e9e71628f53fb03ff1d7928f5c4c25c7f356b4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f568db14ab4f2913fd7b9bddce1bf3e5cf44db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8c6b0efc441d25254f8f83af171c5bccceb139 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e2e94c52e3c661b156e7ccfa33d2cf1c170ec7e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/theta.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8efd00a22a45129dc0e1e0c758977375e902ca1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee379166be720d029fde23df47580c1a7a49eb3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/__init__.py @@ -0,0 +1,90 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.10.0" + +from .auto import ( + AutoPeftModel, + AutoPeftModelForCausalLM, + AutoPeftModelForSequenceClassification, + AutoPeftModelForSeq2SeqLM, + AutoPeftModelForTokenClassification, + AutoPeftModelForQuestionAnswering, + AutoPeftModelForFeatureExtraction, +) +from .mapping import ( + MODEL_TYPE_TO_PEFT_MODEL_MAPPING, + PEFT_TYPE_TO_CONFIG_MAPPING, + get_peft_config, + get_peft_model, + inject_adapter_in_model, +) +from .mixed_model import PeftMixedModel +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, + PeftModelForQuestionAnswering, + PeftModelForFeatureExtraction, +) +from .tuners import ( + AdaptionPromptConfig, + AdaptionPromptModel, + LoraConfig, + LoftQConfig, + LoraModel, + LoHaConfig, + LoHaModel, + LoKrConfig, + LoKrModel, + IA3Config, + IA3Model, + AdaLoraConfig, + AdaLoraModel, + PrefixEncoder, + PrefixTuningConfig, + PromptEmbedding, + PromptEncoder, + PromptEncoderConfig, + PromptEncoderReparameterizationType, + PromptTuningConfig, + PromptTuningInit, + MultitaskPromptTuningConfig, + MultitaskPromptTuningInit, + OFTConfig, + OFTModel, + PolyConfig, + PolyModel, +) +from .utils import ( + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + PeftType, + TaskType, + bloom_model_postprocess_past_key_value, + get_peft_model_state_dict, + prepare_model_for_kbit_training, + replace_lora_weights_loftq, + set_peft_model_state_dict, + shift_tokens_right, + load_peft_weights, + cast_mixed_precision_params, +) +from .config import PeftConfig, PromptLearningConfig diff --git a/llmeval-env/lib/python3.10/site-packages/peft/auto.py b/llmeval-env/lib/python3.10/site-packages/peft/auto.py new file mode 100644 index 0000000000000000000000000000000000000000..353c9e2f84c48bd61194102da6d6e83dfdcd42db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/auto.py @@ -0,0 +1,170 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +import os +from typing import Optional + +from transformers import ( + AutoModel, + AutoModelForCausalLM, + AutoModelForQuestionAnswering, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForTokenClassification, + AutoTokenizer, +) + +from .config import PeftConfig +from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForFeatureExtraction, + PeftModelForQuestionAnswering, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, +) +from .utils.constants import TOKENIZER_CONFIG_NAME +from .utils.other import check_file_exists_on_hf_hub + + +class _BaseAutoPeftModel: + _target_class = None + _target_peft_class = None + + def __init__(self, *args, **kwargs): + # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400 + raise EnvironmentError( # noqa: UP024 + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_config(config)` methods." + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path, + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs, + ): + r""" + A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs + are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and + the config object init. + """ + peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + base_model_path = peft_config.base_model_name_or_path + + task_type = getattr(peft_config, "task_type", None) + + if cls._target_class is not None: + target_class = cls._target_class + elif cls._target_class is None and task_type is not None: + # this is only in the case where we use `AutoPeftModel` + raise ValueError( + "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)" + ) + + if task_type is not None: + expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type] + if cls._target_peft_class.__name__ != expected_target_class.__name__: + raise ValueError( + f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }" + " make sure that you are loading the correct model for your task type." + ) + elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None: + auto_mapping = getattr(peft_config, "auto_mapping", None) + base_model_class = auto_mapping["base_model_class"] + parent_library_name = auto_mapping["parent_library"] + + parent_library = importlib.import_module(parent_library_name) + target_class = getattr(parent_library, base_model_class) + else: + raise ValueError( + "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type." + ) + + base_model = target_class.from_pretrained(base_model_path, **kwargs) + + tokenizer_exists = False + if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)): + tokenizer_exists = True + else: + token = kwargs.get("token", None) + if token is None: + token = kwargs.get("use_auth_token", None) + + tokenizer_exists = check_file_exists_on_hf_hub( + repo_id=pretrained_model_name_or_path, + filename=TOKENIZER_CONFIG_NAME, + revision=kwargs.get("revision", None), + repo_type=kwargs.get("repo_type", None), + token=token, + ) + + if tokenizer_exists: + tokenizer = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False) + ) + base_model.resize_token_embeddings(len(tokenizer)) + + return cls._target_peft_class.from_pretrained( + base_model, + pretrained_model_name_or_path, + adapter_name=adapter_name, + is_trainable=is_trainable, + config=config, + **kwargs, + ) + + +class AutoPeftModel(_BaseAutoPeftModel): + _target_class = None + _target_peft_class = PeftModel + + +class AutoPeftModelForCausalLM(_BaseAutoPeftModel): + _target_class = AutoModelForCausalLM + _target_peft_class = PeftModelForCausalLM + + +class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel): + _target_class = AutoModelForSeq2SeqLM + _target_peft_class = PeftModelForSeq2SeqLM + + +class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel): + _target_class = AutoModelForSequenceClassification + _target_peft_class = PeftModelForSequenceClassification + + +class AutoPeftModelForTokenClassification(_BaseAutoPeftModel): + _target_class = AutoModelForTokenClassification + _target_peft_class = PeftModelForTokenClassification + + +class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel): + _target_class = AutoModelForQuestionAnswering + _target_peft_class = PeftModelForQuestionAnswering + + +class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel): + _target_class = AutoModel + _target_peft_class = PeftModelForFeatureExtraction diff --git a/llmeval-env/lib/python3.10/site-packages/peft/config.py b/llmeval-env/lib/python3.10/site-packages/peft/config.py new file mode 100644 index 0000000000000000000000000000000000000000..99aff43ca41b88ccb4ee8887b2969482d7fec936 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/config.py @@ -0,0 +1,270 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import json +import os +from dataclasses import asdict, dataclass, field +from typing import Dict, Optional, Union + +from huggingface_hub import hf_hub_download +from transformers.utils import PushToHubMixin + +from .utils import CONFIG_NAME, PeftType, TaskType + + +@dataclass +class PeftConfigMixin(PushToHubMixin): + r""" + This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all + PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to + push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a + directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. + + Args: + peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. + """ + + peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) + auto_mapping: Optional[dict] = field( + default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} + ) + + def to_dict(self) -> Dict: + r""" + Returns the configuration for your adapter model as a dictionary. + """ + return asdict(self) + + def save_pretrained(self, save_directory: str, **kwargs) -> None: + r""" + This method saves the configuration of your adapter model in a directory. + + Args: + save_directory (`str`): + The directory where the configuration will be saved. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] + method. + """ + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) + + output_dict = asdict(self) + # converting set type to list + for key, value in output_dict.items(): + if isinstance(value, set): + output_dict[key] = list(value) + + output_path = os.path.join(save_directory, CONFIG_NAME) + + # Add auto mapping details for custom models. + if auto_mapping_dict is not None: + output_dict["auto_mapping"] = auto_mapping_dict + + # save it + with open(output_path, "w") as writer: + writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) + + @classmethod + def from_peft_type(cls, **kwargs): + r""" + This method loads the configuration of your adapter model from a set of kwargs. + + The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided, + the calling class type is instantiated. + + Args: + kwargs (configuration keyword arguments): + Keyword arguments passed along to the configuration initialization. + """ + # Avoid circular dependency .. TODO: fix this with a larger refactor + from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + # TODO: this hack is needed to fix the following issue (on commit 702f937): + # if someone saves a default config and loads it back with `PeftConfig` class it yields to + # not loading the correct config class. + + # from peft import AdaLoraConfig, PeftConfig + # peft_config = AdaLoraConfig() + # print(peft_config) + # >>> AdaLoraConfig(peft_type=, auto_mapping=None, base_model_name_or_path=None, + # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... + # + # peft_config.save_pretrained("./test_config") + # peft_config = PeftConfig.from_pretrained("./test_config") + # print(peft_config) + # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) + + if "peft_type" in kwargs: + peft_type = kwargs["peft_type"] + config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] + else: + config_cls = cls + + return config_cls(**kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): + r""" + This method loads the configuration of your adapter model from a directory. + + Args: + pretrained_model_name_or_path (`str`): + The directory or the Hub repository id where the configuration is saved. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the child class initialization. + """ + path = ( + os.path.join(pretrained_model_name_or_path, subfolder) + if subfolder is not None + else pretrained_model_name_or_path + ) + + hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) + + if os.path.isfile(os.path.join(path, CONFIG_NAME)): + config_file = os.path.join(path, CONFIG_NAME) + else: + try: + config_file = hf_hub_download( + pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs + ) + except Exception: + raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") + + loaded_attributes = cls.from_json_file(config_file) + kwargs = {**class_kwargs, **loaded_attributes} + return cls.from_peft_type(**kwargs) + + @classmethod + def from_json_file(cls, path_json_file: str, **kwargs): + r""" + Loads a configuration file from a json file. + + Args: + path_json_file (`str`): + The path to the json file. + """ + with open(path_json_file) as file: + json_object = json.load(file) + + return json_object + + @classmethod + def _split_kwargs(cls, kwargs): + hf_hub_download_kwargs = {} + class_kwargs = {} + other_kwargs = {} + + for key, value in kwargs.items(): + if key in inspect.signature(hf_hub_download).parameters: + hf_hub_download_kwargs[key] = value + elif key in list(cls.__annotations__): + class_kwargs[key] = value + else: + other_kwargs[key] = value + + return hf_hub_download_kwargs, class_kwargs, other_kwargs + + @classmethod + def _get_peft_type( + cls, + model_id: str, + **hf_hub_download_kwargs, + ): + subfolder = hf_hub_download_kwargs.get("subfolder", None) + + path = os.path.join(model_id, subfolder) if subfolder is not None else model_id + + if os.path.isfile(os.path.join(path, CONFIG_NAME)): + config_file = os.path.join(path, CONFIG_NAME) + else: + try: + config_file = hf_hub_download( + model_id, + CONFIG_NAME, + **hf_hub_download_kwargs, + ) + except Exception: + raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") + + loaded_attributes = cls.from_json_file(config_file) + return loaded_attributes["peft_type"] + + @property + def is_prompt_learning(self) -> bool: + r""" + Utility method to check if the configuration is for prompt learning. + """ + return False + + @property + def is_adaption_prompt(self) -> bool: + """Return True if this is an adaption prompt config.""" + return False + + +@dataclass +class PeftConfig(PeftConfigMixin): + """ + This is the base configuration class to store the configuration of a [`PeftModel`]. + + Args: + peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. + task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. + inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. + """ + + base_model_name_or_path: Optional[str] = field( + default=None, metadata={"help": "The name of the base model to use."} + ) + revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) + peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) + task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) + inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) + + +@dataclass +class PromptLearningConfig(PeftConfig): + """ + This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or + [`PromptTuning`]. + + Args: + num_virtual_tokens (`int`): The number of virtual tokens to use. + token_dim (`int`): The hidden embedding dimension of the base transformer model. + num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. + num_attention_heads (`int`): The number of attention heads in the base transformer model. + num_layers (`int`): The number of layers in the base transformer model. + """ + + num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) + token_dim: int = field( + default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} + ) + num_transformer_submodules: Optional[int] = field( + default=None, metadata={"help": "Number of transformer submodules"} + ) + num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) + num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) + + @property + def is_prompt_learning(self) -> bool: + r""" + Utility method to check if the configuration is for prompt learning. + """ + return True diff --git a/llmeval-env/lib/python3.10/site-packages/peft/helpers.py b/llmeval-env/lib/python3.10/site-packages/peft/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..8875ff7fc493ae4dfff11a1d8e4485b330cb27dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/helpers.py @@ -0,0 +1,113 @@ +import inspect +from copy import deepcopy +from functools import update_wrapper +from types import MethodType + +from .peft_model import PeftModel + + +def update_forward_signature(model: PeftModel) -> None: + """ + Args: + Updates the forward signature of the PeftModel to include parents class signature + model (`PeftModel`): Peft model to update the forward signature + Example: + + ```python + >>> from transformers import WhisperForConditionalGeneration + >>> from peft import get_peft_model, LoraConfig, update_forward_signature + + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"]) + + >>> peft_model = get_peft_model(model, peft_config) + >>> update_forward_signature(peft_model) + ``` + """ + + # Only update signature when the current forward signature only has *args and **kwargs + current_signature = inspect.signature(model.forward) + if ( + len(current_signature.parameters) == 2 + and "args" in current_signature.parameters + and "kwargs" in current_signature.parameters + ): + forward = deepcopy(model.forward.__func__) + update_wrapper( + forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__") + ) + model.forward = MethodType(forward, model) + + +def update_generate_signature(model: PeftModel) -> None: + """ + Args: + Updates the generate signature of a PeftModel with overriding generate to include parents class signature + model (`PeftModel`): Peft model to update the generate signature + Example: + + ```python + >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature + + >>> model_name_or_path = "bigscience/mt0-large" + >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + + >>> peft_config = LoraConfig( + ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 + ... ) + >>> peft_model = get_peft_model(model, peft_config) + >>> update_generate_signature(peft_model) + >>> help(peft_model.generate) + ``` + """ + if not hasattr(model, "generate"): + return + current_signature = inspect.signature(model.generate) + if ( + len(current_signature.parameters) == 2 + and "args" in current_signature.parameters + and "kwargs" in current_signature.parameters + ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters): + generate = deepcopy(model.generate.__func__) + update_wrapper( + generate, + type(model.get_base_model()).generate, + assigned=("__doc__", "__name__", "__annotations__"), + ) + model.generate = MethodType(generate, model) + + +def update_signature(model: PeftModel, method: str = "all") -> None: + """ + Args: + Updates the signature of a PeftModel include parents class signature for forward or generate method + model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update + signature choose one of "forward", "generate", "all" + Example: + ```python + >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature + + >>> model_name_or_path = "bigscience/mt0-large" + >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + + >>> peft_config = LoraConfig( + ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 + ... ) + >>> peft_model = get_peft_model(model, peft_config) + >>> update_signature(peft_model) + >>> help(peft_model.generate) + ``` + """ + if method == "forward": + update_forward_signature(model) + elif method == "generate": + update_generate_signature(model) + elif method == "all": + update_forward_signature(model) + update_generate_signature(model) + else: + raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/import_utils.py b/llmeval-env/lib/python3.10/site-packages/peft/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c32d96d52e74bd5de879c06c732fbf82417a8b6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/import_utils.py @@ -0,0 +1,73 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import importlib.metadata as importlib_metadata +from functools import lru_cache + +import packaging.version + + +def is_bnb_available() -> bool: + return importlib.util.find_spec("bitsandbytes") is not None + + +def is_bnb_4bit_available() -> bool: + if not is_bnb_available(): + return False + + import bitsandbytes as bnb + + return hasattr(bnb.nn, "Linear4bit") + + +def is_auto_gptq_available(): + if importlib.util.find_spec("auto_gptq") is not None: + AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") + version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) + if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: + return True + else: + raise ImportError( + f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " + f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" + ) + + +def is_optimum_available() -> bool: + return importlib.util.find_spec("optimum") is not None + + +@lru_cache +def is_torch_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + if importlib.util.find_spec("torch_xla") is not None: + if check_device: + # We need to check if `xla_device` can be found, will raise a RuntimeError if not + try: + import torch_xla.core.xla_model as xm + + _ = xm.xla_device() + return True + except RuntimeError: + return False + return True + return False + + +def is_aqlm_available(): + return importlib.util.find_spec("aqlm") is not None + + +def is_auto_awq_available(): + return importlib.util.find_spec("awq") is not None diff --git a/llmeval-env/lib/python3.10/site-packages/peft/mapping.py b/llmeval-env/lib/python3.10/site-packages/peft/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..b62ddf94aafa1a32b2711c0a6e365900065a93b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/mapping.py @@ -0,0 +1,168 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import torch + +from .config import PeftConfig +from .mixed_model import PeftMixedModel +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForFeatureExtraction, + PeftModelForQuestionAnswering, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, +) +from .tuners import ( + AdaLoraConfig, + AdaLoraModel, + AdaptionPromptConfig, + IA3Config, + IA3Model, + LoHaConfig, + LoHaModel, + LoKrConfig, + LoKrModel, + LoraConfig, + LoraModel, + MultitaskPromptTuningConfig, + OFTConfig, + OFTModel, + PolyConfig, + PolyModel, + PrefixTuningConfig, + PromptEncoderConfig, + PromptTuningConfig, +) +from .utils import _prepare_prompt_learning_config + + +if TYPE_CHECKING: + from transformers import PreTrainedModel + + +MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = { + "SEQ_CLS": PeftModelForSequenceClassification, + "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, + "CAUSAL_LM": PeftModelForCausalLM, + "TOKEN_CLS": PeftModelForTokenClassification, + "QUESTION_ANS": PeftModelForQuestionAnswering, + "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, +} + +PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = { + "ADAPTION_PROMPT": AdaptionPromptConfig, + "PROMPT_TUNING": PromptTuningConfig, + "PREFIX_TUNING": PrefixTuningConfig, + "P_TUNING": PromptEncoderConfig, + "LORA": LoraConfig, + "LOHA": LoHaConfig, + "LOKR": LoKrConfig, + "ADALORA": AdaLoraConfig, + "IA3": IA3Config, + "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig, + "OFT": OFTConfig, + "POLY": PolyConfig, +} + +PEFT_TYPE_TO_TUNER_MAPPING = { + "LORA": LoraModel, + "LOHA": LoHaModel, + "LOKR": LoKrModel, + "ADALORA": AdaLoraModel, + "IA3": IA3Model, + "OFT": OFTModel, + "POLY": PolyModel, +} + + +def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig: + """ + Returns a Peft config object from a dictionary. + + Args: + config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. + """ + + return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) + + +def get_peft_model( + model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False +) -> PeftModel | PeftMixedModel: + """ + Returns a Peft model object from a model and a config. + + Args: + model ([`transformers.PreTrainedModel`]): + Model to be wrapped. + peft_config ([`PeftConfig`]): + Configuration object containing the parameters of the Peft model. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). + mixed (`bool`, `optional`, defaults to `False`): + Whether to allow mixing different (compatible) adapter types. + """ + model_config = getattr(model, "config", {"model_type": "custom"}) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + + peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) + + if mixed: + return PeftMixedModel(model, peft_config, adapter_name=adapter_name) + + if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: + return PeftModel(model, peft_config, adapter_name=adapter_name) + + if peft_config.is_prompt_learning: + peft_config = _prepare_prompt_learning_config(peft_config, model_config) + return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name) + + +def inject_adapter_in_model( + peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" +) -> torch.nn.Module: + r""" + A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning + methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API + calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. + + Args: + peft_config (`PeftConfig`): + Configuration object containing the parameters of the Peft model. + model (`torch.nn.Module`): + The input model where the adapter will be injected. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). + """ + if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: + raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") + + if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): + raise ValueError( + f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." + ) + + tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] + + # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. + peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name) + + return peft_model.model diff --git a/llmeval-env/lib/python3.10/site-packages/peft/mixed_model.py b/llmeval-env/lib/python3.10/site-packages/peft/mixed_model.py new file mode 100644 index 0000000000000000000000000000000000000000..92b9f74ecd4caace48a0d1d59288b6fdfdfad0bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/mixed_model.py @@ -0,0 +1,409 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from contextlib import contextmanager +from typing import Any, Optional, Union + +import torch +from accelerate.hooks import remove_hook_from_submodules +from torch import nn +from transformers.utils import PushToHubMixin + +from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES + +from .config import PeftConfig +from .peft_model import PeftModel +from .tuners import ( + AdaLoraModel, + IA3Model, + LoHaModel, + LoKrModel, + LoraModel, + MixedModel, + OFTModel, +) +from .utils import PeftType, _set_adapter, _set_trainable + + +PEFT_TYPE_TO_MODEL_MAPPING = { + PeftType.LORA: LoraModel, + PeftType.LOHA: LoHaModel, + PeftType.LOKR: LoKrModel, + PeftType.ADALORA: AdaLoraModel, + PeftType.IA3: IA3Model, + PeftType.OFT: OFTModel, +} + + +def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: + r""" + Prepares the model for gradient checkpointing if necessary + """ + # Note: same as PeftModel._prepare_model_for_gradient_checkpointing + if not getattr(model, "is_gradient_checkpointing", True): + return model + + if not ( + getattr(model, "is_loaded_in_8bit", False) + or getattr(model, "is_loaded_in_4bit", False) + or getattr(model, "is_quantized", False) + ): + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + elif hasattr(model, "get_input_embeddings"): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + +def _check_config_compatible(peft_config: PeftConfig) -> None: + if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: + raise ValueError( + f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " + f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" + ) + + +class PeftMixedModel(PushToHubMixin, torch.nn.Module): + """ + PeftMixedModel for loading mixing different types of adapters for inference. + + This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use + `get_peft_model` with the argument `mixed=True`. + + + + Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn + more about using different adapter types. + + + + Example: + + ```py + >>> from peft import get_peft_model + + >>> base_model = ... # load the base model, e.g. from transformers + >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() + >>> peft_model.load_adapter(path_to_adapter2, "adapter2") + >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters + >>> peft_model(data) # forward pass using both adapters + ``` + + Args: + model (`torch.nn.Module`): + The model to be tuned. + config (`PeftConfig`): + The config of the model to be tuned. The adapter type must be compatible. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the first adapter. + """ + + def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__() + _check_config_compatible(peft_config) + _prepare_model_for_gradient_checkpointing(model) + self.modules_to_save = None + self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) + self.set_modules_to_save(peft_config, adapter_name) + + self.config = getattr(model, "config", {"model_type": "custom"}) + + # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid + # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected + # behavior we disable that in this line. + if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): + self.base_model.config.pretraining_tp = 1 + + @property + def peft_config(self) -> dict[str, PeftConfig]: + return self.base_model.peft_config + + @property + def active_adapter(self) -> str: + return self.base_model.active_adapter + + @property + def active_adapters(self) -> list[str]: + return self.base_model.active_adapters + + def get_nb_trainable_parameters(self): + r""" + Returns the number of trainable parameters and number of all parameters in the model. + """ + # note: same as PeftModel.get_nb_trainable_parameters + trainable_params = 0 + all_param = 0 + for _, param in self.named_parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + num_params = param.ds_numel + + # Due to the design of 4bit linear layers from bitsandbytes + # one needs to multiply the number of parameters by 2 to get + # the correct number of parameters + if param.__class__.__name__ == "Params4bit": + num_params = num_params * 2 + + all_param += num_params + if param.requires_grad: + trainable_params += num_params + + return trainable_params, all_param + + def print_trainable_parameters(self): + """ + Prints the number of trainable parameters in the model. + + Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from + num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns + (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. + For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for + prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number + of trainable parameters of the backbone transformer model which can be different. + """ + # note: same as PeftModel.print_trainable_parameters + trainable_params, all_param = self.get_nb_trainable_parameters() + + print( + f"trainable params: {trainable_params:,d} || " + f"all params: {all_param:,d} || " + f"trainable%: {100 * trainable_params / all_param:.4f}" + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.base_model, name) + + def forward(self, *args: Any, **kwargs: Any): + """ + Forward pass of the model. + """ + return self.base_model(*args, **kwargs) + + def generate(self, *args: Any, **kwargs: Any): + """ + Generate output. + """ + return self.base_model.generate(*args, **kwargs) + + @contextmanager + def disable_adapter(self): + """ + Disables the adapter module. + """ + try: + self.base_model.disable_adapter_layers() + yield + finally: + self.base_model.enable_adapter_layers() + + def add_adapter(self, adapter_name: str, peft_config: PeftConfig): + _check_config_compatible(peft_config) + + try: + self.peft_config[adapter_name] = peft_config + self.base_model.inject_adapter(self, adapter_name) + except Exception: # something went wrong, roll back + if adapter_name in self.peft_config: + del self.peft_config[adapter_name] + raise + + self.set_modules_to_save(peft_config, adapter_name) + + def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: + if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: + return + + if self.modules_to_save is None: + self.modules_to_save = set(modules_to_save) + else: + self.modules_to_save.update(modules_to_save) + _set_trainable(self, adapter_name) + + def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: + """ + Sets the active adapter(s) for the model. + + Note that the order in which the adapters are applied during the forward pass may not be the same as the order + in which they are passed to this function. Instead, the order during the forward pass is determined by the + order in which the adapters were loaded into the model. The active adapters only determine which adapters are + active during the forward pass, but not the order in which they are applied. + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `List[str]`): + The name of the adapter(s) to be activated. + """ + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + mismatched = set(adapter_name) - set(self.peft_config.keys()) + if mismatched: + raise ValueError( + f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" + ) + + self.base_model.set_adapter(adapter_name) + _set_adapter(self, adapter_name) + + def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + mismatched = set(adapter_name) - set(self.peft_config.keys()) + if mismatched: + raise ValueError( + f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" + ) + + self.base_model.delete_adapter(adapter_name) + + def merge_and_unload(self, *args: Any, **kwargs: Any): + r""" + This method merges the adapter layers into the base model. This is needed if someone wants to use the base + model as a standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + return self.base_model.merge_and_unload(*args, **kwargs) + + def unload(self, *args: Any, **kwargs: Any): + """ + Gets back the base model by removing all the adapter modules without merging. This gives back the original base + model. + """ + return self.base_model.unload(*args, **kwargs) + + @classmethod + def _split_kwargs(cls, kwargs: dict[str, Any]): + return PeftModel._split_kwargs(kwargs) + + def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): + output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) + # TODO: not quite clear why this is necessary but tests fail without it + self.set_adapter(self.active_adapters) + return output + + def create_or_update_model_card(self, output_dir: str): + raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") + + def save_pretrained( + self, + save_directory: str, + safe_serialization: bool = False, + selected_adapters: Optional[list[str]] = None, + **kwargs: Any, + ): + raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") + + @classmethod + def from_pretrained( + cls, + model: nn.Module, + model_id: str | os.PathLike, + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs: Any, + ): + r""" + Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. + + Note that the passed `model` may be modified inplace. + + Args: + model (`nn.Module`): + The model to be adapted. + model_id (`str` or `os.PathLike`): + The name of the PEFT configuration to use. Can be either: + - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face + Hub. + - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` + method (`./my_peft_config_directory/`). + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to be loaded. This is useful for loading multiple adapters. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for + inference + config ([`~peft.PeftConfig`], *optional*): + The configuration object to use instead of an automatically loaded configuration. This configuration + object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already + loaded before calling `from_pretrained`. + kwargs: (`optional`): + Additional keyword arguments passed along to the specific PEFT configuration class. + """ + # note: adapted from PeftModel.from_pretrained + from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + # load the config + if config is None: + config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + subfolder=kwargs.get("subfolder", None), + revision=kwargs.get("revision", None), + cache_dir=kwargs.get("cache_dir", None), + use_auth_token=kwargs.get("use_auth_token", None), + ) + ].from_pretrained(model_id, **kwargs) + elif isinstance(config, PeftConfig): + config.inference_mode = not is_trainable + else: + raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") + + # note: this is different from PeftModel.from_pretrained + if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING: + raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") + + if (getattr(model, "hf_device_map", None) is not None) and len( + set(model.hf_device_map.values()).intersection({"cpu", "disk"}) + ) > 0: + remove_hook_from_submodules(model) + + if config.is_prompt_learning and is_trainable: + # note: should not be possible to reach, but just in case + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + config.inference_mode = not is_trainable + + # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel + model = cls(model, config, adapter_name) + model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) + return model diff --git a/llmeval-env/lib/python3.10/site-packages/peft/peft_model.py b/llmeval-env/lib/python3.10/site-packages/peft/peft_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b78a1b9a327cc382a0b722aabcfe0ec5b016f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/peft_model.py @@ -0,0 +1,1986 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import collections +import inspect +import os +import warnings +from contextlib import contextmanager +from copy import deepcopy +from typing import Any, Optional, Union + +import packaging.version +import torch +import transformers +from accelerate import dispatch_model, infer_auto_device_map +from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules +from accelerate.utils import get_balanced_memory +from huggingface_hub import ModelCard, ModelCardData, hf_hub_download +from safetensors.torch import save_file as safe_save_file +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from transformers import PreTrainedModel +from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput +from transformers.utils import PushToHubMixin + +from . import __version__ +from .config import PeftConfig +from .tuners import ( + AdaLoraModel, + AdaptionPromptModel, + IA3Model, + LoHaModel, + LoKrModel, + LoraModel, + MultitaskPromptEmbedding, + OFTModel, + PolyModel, + PrefixEncoder, + PromptEmbedding, + PromptEncoder, +) +from .utils import ( + SAFETENSORS_WEIGHTS_NAME, + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + WEIGHTS_NAME, + PeftType, + TaskType, + _get_batch_size, + _prepare_prompt_learning_config, + _set_adapter, + _set_trainable, + get_peft_model_state_dict, + id_tensor_storage, + infer_device, + load_peft_weights, + set_peft_model_state_dict, + shift_tokens_right, +) + + +PEFT_TYPE_TO_MODEL_MAPPING = { + PeftType.LORA: LoraModel, + PeftType.LOHA: LoHaModel, + PeftType.LOKR: LoKrModel, + PeftType.PROMPT_TUNING: PromptEmbedding, + PeftType.P_TUNING: PromptEncoder, + PeftType.PREFIX_TUNING: PrefixEncoder, + PeftType.ADALORA: AdaLoraModel, + PeftType.ADAPTION_PROMPT: AdaptionPromptModel, + PeftType.IA3: IA3Model, + PeftType.OFT: OFTModel, + PeftType.POLY: PolyModel, +} + + +class PeftModel(PushToHubMixin, torch.nn.Module): + """ + Base model encompassing various Peft methods. + + Args: + model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. + peft_config ([`PeftConfig`]): The configuration of the Peft model. + adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. + + **Attributes**: + - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. + - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. + - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when + saving the model. + - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if + using [`PromptLearningConfig`]. + - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if + using [`PromptLearningConfig`]. + - **transformer_backbone_name** (`str`) -- The name of the transformer + backbone in the base model if using [`PromptLearningConfig`]. + - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone + in the base model if using [`PromptLearningConfig`]. + """ + + def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__() + self.modules_to_save = None + self.active_adapter = adapter_name + self.peft_type = peft_config.peft_type + # These args are special PEFT arguments that users can pass. They need to be removed before passing them to + # forward. + self.special_peft_forward_args = {"adapter_names"} + + self._is_prompt_learning = peft_config.is_prompt_learning + if self._is_prompt_learning: + self._peft_config = {adapter_name: peft_config} + self.base_model = model + self.add_adapter(adapter_name, peft_config) + else: + self._peft_config = None + cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] + self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) + self.set_additional_trainable_modules(peft_config, adapter_name) + + if getattr(model, "is_gradient_checkpointing", True): + model = self._prepare_model_for_gradient_checkpointing(model) + + # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid + # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected + # behavior we disable that in this line. + if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): + self.base_model.config.pretraining_tp = 1 + + @property + def peft_config(self) -> dict[str, PeftConfig]: + if self._is_prompt_learning: + return self._peft_config + return self.base_model.peft_config + + @property + def active_adapters(self) -> list[str]: + try: + adapters = self.base_model.active_adapters + except AttributeError: + adapters = self.active_adapter + if isinstance(adapters, str): + adapters = [adapters] + return adapters + + @peft_config.setter + def peft_config(self, value: dict[str, PeftConfig]): + if self._is_prompt_learning: + self._peft_config = value + else: + self.base_model.peft_config = value + + def save_pretrained( + self, + save_directory: str, + safe_serialization: bool = True, + selected_adapters: Optional[list[str]] = None, + save_embedding_layers: Union[str, bool] = "auto", + is_main_process: bool = True, + **kwargs: Any, + ) -> None: + r""" + This function saves the adapter model and the adapter configuration files to a directory, so that it can be + reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] + method. + + Args: + save_directory (`str`): + Directory where the adapter model and configuration files will be saved (will be created if it does not + exist). + safe_serialization (`bool`, *optional*): + Whether to save the adapter files in safetensors format, defaults to `True`. + selected_adapters (`List[str]`, *optional*): + A list of adapters to be saved. If `None`, will default to all adapters. + save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): + If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common + embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. + and automatically sets the boolean flag. This only works for 🤗 transformers models. + is_main_process (`bool`, *optional*): + Whether the process calling this is the main process or not. Will default to `True`. Will not save the + checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the `push_to_hub` method. + """ + if os.path.isfile(save_directory): + raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") + + if selected_adapters is None: + selected_adapters = list(self.peft_config.keys()) + else: + if any( + selected_adapter_name not in list(self.peft_config.keys()) + for selected_adapter_name in selected_adapters + ): + raise ValueError( + f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" + f" {list(self.peft_config.keys())} - got {selected_adapters}." + ) + + if is_main_process: + os.makedirs(save_directory, exist_ok=True) + self.create_or_update_model_card(save_directory) + + for adapter_name in selected_adapters: + peft_config = self.peft_config[adapter_name] + # save only the trainable weights + output_state_dict = get_peft_model_state_dict( + self, + state_dict=kwargs.get("state_dict", None), + adapter_name=adapter_name, + save_embedding_layers=save_embedding_layers, + ) + output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory + os.makedirs(output_dir, exist_ok=True) + + if is_main_process and safe_serialization: + # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + for name, tensor in output_state_dict.items(): + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + + for _, names in shared_ptrs.items(): + # Here we just clone the shared tensors to avoid tensor aliasing which is + # not supported in safetensors. + for shared_tensor_name in names[1:]: + output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() + + safe_save_file( + output_state_dict, + os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), + metadata={"format": "pt"}, + ) + elif is_main_process: + torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + + # save the config and change the inference mode to `True` + if peft_config.base_model_name_or_path is None: + peft_config.base_model_name_or_path = ( + self.base_model.__dict__.get("name_or_path", None) + if peft_config.is_prompt_learning + else self.base_model.model.__dict__.get("name_or_path", None) + ) + inference_mode = peft_config.inference_mode + peft_config.inference_mode = True + + if peft_config.task_type is None: + # deal with auto mapping + base_model_class = self._get_base_model_class( + is_prompt_tuning=peft_config.is_prompt_learning, + ) + parent_library = base_model_class.__module__ + + auto_mapping_dict = { + "base_model_class": base_model_class.__name__, + "parent_library": parent_library, + } + else: + auto_mapping_dict = None + + if is_main_process: + peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) + peft_config.inference_mode = inference_mode + + @classmethod + def from_pretrained( + cls, + model: torch.nn.Module, + model_id: Union[str, os.PathLike], + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs: Any, + ) -> PeftModel: + r""" + Instantiate a PEFT model from a pretrained model and loaded PEFT weights. + + Note that the passed `model` may be modified inplace. + + Args: + model ([`torch.nn.Module`]): + The model to be adapted. For 🤗 Transformers models, the model should be initialized with the + [`~transformers.PreTrainedModel.from_pretrained`]. + model_id (`str` or `os.PathLike`): + The name of the PEFT configuration to use. Can be either: + - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face + Hub. + - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` + method (`./my_peft_config_directory/`). + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to be loaded. This is useful for loading multiple adapters. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be + used for inference. + config ([`~peft.PeftConfig`], *optional*): + The configuration object to use instead of an automatically loaded configuration. This configuration + object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already + loaded before calling `from_pretrained`. + kwargs: (`optional`): + Additional keyword arguments passed along to the specific PEFT configuration class. + """ + from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING + + # load the config + if config is None: + config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + subfolder=kwargs.get("subfolder", None), + revision=kwargs.get("revision", None), + cache_dir=kwargs.get("cache_dir", None), + use_auth_token=kwargs.get("use_auth_token", None), + token=kwargs.get("token", None), + ) + ].from_pretrained(model_id, **kwargs) + elif isinstance(config, PeftConfig): + config.inference_mode = not is_trainable + else: + raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") + + if (getattr(model, "hf_device_map", None) is not None) and len( + set(model.hf_device_map.values()).intersection({"cpu", "disk"}) + ) > 0: + remove_hook_from_submodules(model) + + if config.is_prompt_learning and is_trainable: + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + config.inference_mode = not is_trainable + + if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): + model = cls(model, config, adapter_name) + else: + model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) + model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) + return model + + def _setup_prompt_encoder(self, adapter_name: str): + config = self.peft_config[adapter_name] + if not hasattr(self, "prompt_encoder"): + self.prompt_encoder = torch.nn.ModuleDict({}) + self.prompt_tokens = {} + transformer_backbone = None + for name, module in self.base_model.named_children(): + for param in module.parameters(): + param.requires_grad = False + if isinstance(module, PreTrainedModel): + # Make sure to freeze Tranformers model + if transformer_backbone is None: + transformer_backbone = module + self.transformer_backbone_name = name + if transformer_backbone is None: + transformer_backbone = self.base_model + + if config.num_transformer_submodules is None: + config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 + + for named_param, value in list(transformer_backbone.named_parameters()): + # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] + # the actual unsharded shape is stored in "ds_shape" attribute + # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig + # has been called before + # For reference refer to issue: https://github.com/huggingface/peft/issues/996 + deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) + + if value.shape[0] == self.base_model.config.vocab_size or ( + deepspeed_distributed_tensor_shape is not None + and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size + ): + self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) + break + + if config.peft_type == PeftType.PROMPT_TUNING: + prompt_encoder = PromptEmbedding(config, self.word_embeddings) + elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) + elif config.peft_type == PeftType.P_TUNING: + prompt_encoder = PromptEncoder(config) + elif config.peft_type == PeftType.PREFIX_TUNING: + prompt_encoder = PrefixEncoder(config) + else: + raise ValueError("Not supported") + + prompt_encoder = prompt_encoder.to(self.device) + self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) + self.prompt_tokens[adapter_name] = torch.arange( + config.num_virtual_tokens * config.num_transformer_submodules + ).long() + + def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): + r""" + Prepares the model for gradient checkpointing if necessary + """ + if not ( + getattr(model, "is_loaded_in_8bit", False) + or getattr(model, "is_loaded_in_4bit", False) + or getattr(model, "is_quantized", False) + ): + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + elif hasattr(model, "get_input_embeddings"): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + return model + + def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: + """ + Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning + method. + """ + prompt_encoder = self.prompt_encoder[adapter_name] + prompt_tokens = ( + self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) + ) + if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: + prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] + + if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) + else: + prompt_embeddings = prompt_encoder(prompt_tokens) + + return prompt_embeddings[0].detach().cpu() + + def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. + """ + peft_config = self.active_peft_config + prompt_encoder = self.prompt_encoder[self.active_adapter] + prompt_tokens = ( + self.prompt_tokens[self.active_adapter] + .unsqueeze(0) + .expand(batch_size, -1) + .to(prompt_encoder.embedding.weight.device) + ) + if peft_config.peft_type == PeftType.PREFIX_TUNING: + prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] + if peft_config.inference_mode: + past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) + else: + past_key_values = prompt_encoder(prompt_tokens) + if self.base_model_torch_dtype is not None: + past_key_values = past_key_values.to(self.base_model_torch_dtype) + past_key_values = past_key_values.view( + batch_size, + peft_config.num_virtual_tokens, + peft_config.num_layers * 2, + peft_config.num_attention_heads, + peft_config.token_dim // peft_config.num_attention_heads, + ) + if peft_config.num_transformer_submodules == 2: + past_key_values = torch.cat([past_key_values, past_key_values], dim=2) + past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( + peft_config.num_transformer_submodules * 2 + ) + if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: + post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] + past_key_values = post_process_fn(past_key_values) + return past_key_values + else: + if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompts = prompt_encoder(prompt_tokens, task_ids) + else: + if peft_config.inference_mode: + prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) + else: + prompts = prompt_encoder(prompt_tokens) + return prompts + + def get_nb_trainable_parameters(self) -> tuple[int, int]: + r""" + Returns the number of trainable parameters and the number of all parameters in the model. + """ + trainable_params = 0 + all_param = 0 + for _, param in self.named_parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + num_params = param.ds_numel + + # Due to the design of 4bit linear layers from bitsandbytes + # one needs to multiply the number of parameters by 2 to get + # the correct number of parameters + if param.__class__.__name__ == "Params4bit": + num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1 + num_params = num_params * 2 * num_bytes + + all_param += num_params + if param.requires_grad: + trainable_params += num_params + + return trainable_params, all_param + + def print_trainable_parameters(self) -> None: + """ + Prints the number of trainable parameters in the model. + + Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from + num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns + (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. + For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for + prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number + of trainable parameters of the backbone transformer model which can be different. + """ + trainable_params, all_param = self.get_nb_trainable_parameters() + + print( + f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.base_model, name) + + @contextmanager + def _enable_peft_forward_hooks(self, *args, **kwargs): + # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this + # runs without any changes + if hasattr(self.base_model, "_enable_peft_forward_hooks"): + with self.base_model._enable_peft_forward_hooks(*args, **kwargs): + yield + return + else: + # nothing to enable + yield + return + + def forward(self, *args: Any, **kwargs: Any): + """ + Forward pass of the model. + """ + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.get_base_model()(*args, **kwargs) + + def generate(self, *args, **kwargs): + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.get_base_model().generate(*args, **kwargs) + + def _get_base_model_class(self, is_prompt_tuning=False): + """ + Returns the base model class. + """ + if not is_prompt_tuning: + return self.base_model.model.__class__ + return self.base_model.__class__ + + @contextmanager + def disable_adapter(self): + """ + Context manager that disables the adapter module. Use this to run inference on the base model. + + Example: + + ```py + >>> with model.disable_adapter(): + ... model(inputs) + ``` + """ + try: + if self.peft_config[self.active_adapter].is_prompt_learning: + # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and + # letting the underlying methods deal with it, same as how LoRA does it. + old_forward = self.forward + self.forward = self.base_model.forward + old_prepare_inputs_for_generation = self.prepare_inputs_for_generation + self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + else: + self.base_model.disable_adapter_layers() + yield + finally: + if self.peft_config[self.active_adapter].is_prompt_learning: + self.forward = old_forward + self.prepare_inputs_for_generation = old_prepare_inputs_for_generation + else: + self.base_model.enable_adapter_layers() + + def get_base_model(self) -> torch.nn.Module: + """ + Returns the base model. + """ + return ( + self.base_model + if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY) + else self.base_model.model + ) + + def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: + """ + Add an adapter to the model based on the passed configuration. + + This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. + + The name for the new adapter should be unique. + + The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active + adapter. + + Args: + adapter_name (`str`): + The name of the adapter to be added. + peft_config ([`PeftConfig`]): + The configuration of the adapter to be added. + """ + if peft_config.peft_type != self.peft_type: + raise ValueError( + f"Cannot combine adapters with different peft types. " + f"Found {self.peft_type} and {peft_config.peft_type}." + ) + + try: + if peft_config.is_prompt_learning: + self.peft_config[adapter_name] = peft_config + if hasattr(self.config, "to_dict"): + dict_config = self.config.to_dict() + else: + dict_config = self.config + + peft_config = _prepare_prompt_learning_config(peft_config, dict_config) + self._setup_prompt_encoder(adapter_name) + elif peft_config.is_adaption_prompt: + self.base_model.add_adapter(adapter_name, peft_config) + else: + self.peft_config[adapter_name] = peft_config + self.base_model.inject_adapter(self.base_model.model, adapter_name) + except Exception: # something went wrong, roll back + if adapter_name in self.peft_config: + del self.peft_config[adapter_name] + raise + + self.set_additional_trainable_modules(peft_config, adapter_name) + + def set_additional_trainable_modules(self, peft_config, adapter_name): + if getattr(peft_config, "modules_to_save", None) is not None: + if self.modules_to_save is None: + self.modules_to_save = set(peft_config.modules_to_save) + else: + self.modules_to_save.update(peft_config.modules_to_save) + _set_trainable(self, adapter_name) + + @classmethod + def _split_kwargs(cls, kwargs: dict[str, Any]): + _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) + hf_hub_download_kwargs = {} + other_kwargs = {} + + for key, value in kwargs.items(): + if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: + hf_hub_download_kwargs[key] = value + else: + other_kwargs[key] = value + + return hf_hub_download_kwargs, other_kwargs + + def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): + """ + Load a trained adapter into the model. + + The name for the new adapter should be unique. + + The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active + adapter. + + Args: + adapter_name (`str`): + The name of the adapter to be added. + peft_config ([`PeftConfig`]): + The configuration of the adapter to be added. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be + used for inference. + kwargs: (`optional`): + Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. + """ + from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) + torch_device = infer_device() + + if adapter_name not in self.peft_config: + # load the config + peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + **hf_hub_download_kwargs, + ) + ].from_pretrained( + model_id, + **hf_hub_download_kwargs, + ) + if peft_config.is_prompt_learning and is_trainable: + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + peft_config.inference_mode = not is_trainable + self.add_adapter(adapter_name, peft_config) + + adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) + + # load the weights into the model + load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) + if ( + (getattr(self, "hf_device_map", None) is not None) + and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) + and len(self.peft_config) == 1 + ): + device_map = kwargs.get("device_map", "auto") + max_memory = kwargs.get("max_memory", None) + offload_dir = kwargs.get("offload_folder", None) + offload_index = kwargs.get("offload_index", None) + + dispatch_model_kwargs = {} + # Safety checker for previous `accelerate` versions + # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ + if "offload_index" in inspect.signature(dispatch_model).parameters: + dispatch_model_kwargs["offload_index"] = offload_index + + no_split_module_classes = self._no_split_modules + + if device_map != "sequential": + max_memory = get_balanced_memory( + self, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + low_zero=(device_map == "balanced_low_0"), + ) + if isinstance(device_map, str): + device_map = infer_auto_device_map( + self, max_memory=max_memory, no_split_module_classes=no_split_module_classes + ) + dispatch_model( + self, + device_map=device_map, + offload_dir=offload_dir, + **dispatch_model_kwargs, + ) + hook = AlignDevicesHook(io_same_device=True) + if self.peft_config[adapter_name].is_prompt_learning: + remove_hook_from_submodules(self.prompt_encoder) + add_hook_to_module(self.get_base_model(), hook) + + # Set model in evaluation mode to deactivate Dropout modules by default + if not is_trainable: + self.eval() + return load_result + + def set_adapter(self, adapter_name: str) -> None: + """ + Sets the active adapter. + + Only one adapter can be active at a time. + + Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str`): + The name of the adapter to be set as active. The adapter must be loaded first. + """ + if adapter_name not in self.peft_config: + raise ValueError(f"Adapter {adapter_name} not found.") + self.active_adapter = adapter_name + if not self.peft_config[adapter_name].is_prompt_learning: + self.base_model.set_adapter(adapter_name) + _set_adapter(self, adapter_name) + + @property + def base_model_torch_dtype(self): + return getattr(self.base_model, "dtype", None) + + @property + def active_peft_config(self): + return self.peft_config[self.active_adapter] + + def create_or_update_model_card(self, output_dir: str): + """ + Updates or create model card to include information about peft: + 1. Adds `peft` library tag + 2. Adds peft version + 3. Adds base model info + 4. Adds quantization information if it was used + """ + + filename = os.path.join(output_dir, "README.md") + + card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) + + card.data["library_name"] = "peft" + + model_config = getattr(self, "config", None) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + if model_config is not None and "_name_or_path" in model_config: + card.data["base_model"] = model_config["_name_or_path"] + + lines = card.text.splitlines() + + quantization_config = None + if hasattr(model_config, "quantization_config"): + quantization_config = self.config.quantization_config.to_dict() + training_config_text = "" + quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" + # Adds quantization information if it was used + if quantization_config is not None: + training_config_text += f"\n{quantization_prefix}\n" + training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) + training_config_text += "\n" + + training_procedure_heading = "## Training procedure" + if quantization_prefix not in lines and bool(training_config_text): + if training_procedure_heading in lines: + lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) + else: + lines.append(f"{training_procedure_heading}\n{training_config_text}") + + # Adds peft version + framework_block_heading = "### Framework versions" + if f"- PEFT {__version__}" not in lines: + if framework_block_heading in lines: + lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") + else: + lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") + + card.text = "\n".join(lines) + card.save(filename) + + +class PeftModelForSequenceClassification(PeftModel): + """ + Peft model for sequence classification tasks. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForSequenceClassification + >>> from peft import PeftModelForSequenceClassification, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "SEQ_CLS", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 768, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 12, + ... "num_layers": 12, + ... "encoder_hidden_size": 768, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForSequenceClassification(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"classifier", "score"} + else: + self.modules_to_save.update({"classifier", "score"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(labels=labels, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + pooled_output = self.base_model.dropout(pooled_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.base_model.num_labels == 1: + self.config.problem_type = "regression" + elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.base_model.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForCausalLM(PeftModel): + """ + Peft model for causal language modeling. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + + Example: + + ```py + >>> from transformers import AutoModelForCausalLM + >>> from peft import PeftModelForCausalLM, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "CAUSAL_LM", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 1280, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 20, + ... "num_layers": 36, + ... "encoder_hidden_size": 1280, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") + >>> peft_model = PeftModelForCausalLM(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if self.base_model.config.model_type == "mpt": + if inputs_embeds is not None: + raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model( + input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs + ) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + # concat prompt labels + if labels is not None: + prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) + kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def generate(self, *args, **kwargs): + peft_config = self.active_peft_config + self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation + if hasattr(self.base_model, "model"): + self.base_model.model.generation_config = self.generation_config + else: + self.base_model.generation_config = self.generation_config + try: + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + outputs = self.base_model.generate(*args, **kwargs) + else: + outputs = self.base_model.generate(**kwargs) + except: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + raise + else: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + return outputs + + def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs): + peft_config = self.active_peft_config + model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) + + # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format + # for some architectures which requires a special fix for prompt tuning etc. + # TODO: starting with transformers 4.38, all architectures should support caching. + uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0") + uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0") + transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"] + uses_cache = uses_transformers_4_38 or ( + uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs + ) + + if peft_config.peft_type == PeftType.POLY: + model_kwargs["task_ids"] = task_ids + if peft_config.is_prompt_learning: + if uses_cache and (model_kwargs["past_key_values"] is not None): + # change in the logic of `prepare_inputs_for_generation` makes the below code necessary + # In prompt learning methods, past key values are longer when compared to the `input_ids`. + # As such only consider the last input ids in the autogressive generation phase. + if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]: + model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:] + + if model_kwargs.get("attention_mask", None) is not None: + size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens + prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device) + model_kwargs["attention_mask"] = torch.cat( + (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 + ) + + if model_kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + model_kwargs["position_ids"] = None + + if kwargs.get("token_type_ids", None) is not None: + warnings.warn( + "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" + ) + kwargs["token_type_ids"] = None + + if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) + model_kwargs["past_key_values"] = past_key_values + else: + if model_kwargs["past_key_values"] is None: + inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) + prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) + model_kwargs["input_ids"] = None + + # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is + # passed in the forward pass to keep track of the position ids of the cache. We have to + # pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed + # `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956 + _ = model_kwargs.pop("cache_position", None) + + return model_kwargs + + +class PeftModelForSeq2SeqLM(PeftModel): + """ + Peft model for sequence-to-sequence language modeling. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + + Example: + + ```py + >>> from transformers import AutoModelForSeq2SeqLM + >>> from peft import PeftModelForSeq2SeqLM, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "SEQ_2_SEQ_LM", + ... "inference_mode": False, + ... "r": 8, + ... "target_modules": ["q", "v"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.1, + ... "fan_in_fan_out": False, + ... "enable_lora": None, + ... "bias": "none", + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model._prepare_encoder_decoder_kwargs_for_generation + ) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_inputs_embeds=decoder_inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if decoder_attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + decoder_attention_mask.device + ) + if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: + decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model( + input_ids=input_ids, + decoder_input_ids=decoder_input_ids, + decoder_inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + **kwargs, + ) + elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + attention_mask.device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + + return self.base_model( + inputs_embeds=inputs_embeds, + decoder_input_ids=decoder_input_ids, + decoder_inputs_embeds=decoder_inputs_embeds, + **kwargs, + ) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + if decoder_inputs_embeds is None and decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) + + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + attention_mask.device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) + # concat prompt labels + if labels is not None: + if peft_config.num_transformer_submodules == 1: + kwargs["labels"] = labels + elif peft_config.num_transformer_submodules == 2: + prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) + kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + if peft_config.num_transformer_submodules == 1: + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + elif peft_config.num_transformer_submodules == 2: + decoder_inputs_embeds = torch.cat( + (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 + ) + return self.base_model( + inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs + ) + + def generate(self, **kwargs): + peft_config = self.active_peft_config + self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self._prepare_encoder_decoder_kwargs_for_generation + ) + try: + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + outputs = self.base_model.generate(**kwargs) + else: + if "input_ids" not in kwargs: + raise ValueError("input_ids must be provided for Peft model generation") + if kwargs.get("position_ids", None) is not None: + warnings.warn( + "Position ids are not supported for parameter efficient tuning. Ignoring position ids." + ) + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn( + "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" + ) + kwargs["token_type_ids"] = None + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + outputs = self.base_model.generate(**kwargs) + elif peft_config.peft_type in [ + PeftType.PROMPT_TUNING, + PeftType.P_TUNING, + PeftType.MULTITASK_PROMPT_TUNING, + ]: + kwargs = deepcopy(kwargs) + + if "encoder_outputs" in kwargs: + del kwargs["encoder_outputs"] + warnings.warn( + "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." + ) + + input_ids = kwargs.pop("input_ids") + inputs_embeds = self.word_embeddings(input_ids) + batch_size = inputs_embeds.shape[0] + prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) + prompts = prompts.to(inputs_embeds.dtype) + + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + kwargs["inputs_embeds"] = inputs_embeds + + if "attention_mask" in kwargs: + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + kwargs["attention_mask"].device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) + + return self.base_model.generate(**kwargs) + else: + raise NotImplementedError + except: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model_prepare_encoder_decoder_kwargs_for_generation + ) + raise + else: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model_prepare_encoder_decoder_kwargs_for_generation + ) + return outputs + + def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): + peft_config = self.active_peft_config + model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) + if peft_config.peft_type == PeftType.POLY: + model_kwargs["task_ids"] = task_ids + if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: + batch_size = model_kwargs["decoder_input_ids"].shape[0] + past_key_values = self.get_prompt(batch_size) + model_kwargs["past_key_values"] = past_key_values + + return model_kwargs + + +class PeftModelForTokenClassification(PeftModel): + """ + Peft model for token classification tasks. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForSequenceClassification + >>> from peft import PeftModelForTokenClassification, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "TOKEN_CLS", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 768, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 12, + ... "num_layers": 12, + ... "encoder_hidden_size": 768, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForTokenClassification(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"classifier", "score"} + else: + self.modules_to_save.update({"classifier", "score"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(labels=labels, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + sequence_output = outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + sequence_output = self.base_model.dropout(sequence_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForQuestionAnswering(PeftModel): + """ + Peft model for extractive question answering. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForQuestionAnswering + >>> from peft import PeftModelForQuestionAnswering, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "QUESTION_ANS", + ... "inference_mode": False, + ... "r": 16, + ... "target_modules": ["query", "value"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.05, + ... "fan_in_fan_out": False, + ... "bias": "none", + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"qa_outputs"} + else: + self.modules_to_save.update({"qa_outputs"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + start_positions=start_positions, + end_positions=end_positions, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "start_positions": start_positions, + "end_positions": end_positions, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + sequence_output = outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + sequence_output = self.base_model.dropout(sequence_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForFeatureExtraction(PeftModel): + """ + Peft model for extracting features/embeddings from transformer models + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + + Example: + + ```py + >>> from transformers import AutoModel + >>> from peft import PeftModelForFeatureExtraction, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "FEATURE_EXTRACTION", + ... "inference_mode": False, + ... "r": 16, + ... "target_modules": ["query", "value"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.05, + ... "fan_in_fan_out": False, + ... "bias": "none", + ... } + >>> peft_config = get_peft_config(config) + >>> model = AutoModel.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) + >>> peft_model.print_trainable_parameters() + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"): + super().__init__(model, peft_config, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/py.typed b/llmeval-env/lib/python3.10/site-packages/peft/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..068c69493a2059d8547150263b5655bee12194ec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f952f3ea695789947ec69e724fe120a667ca433 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c603d4ab2eff3f875ea81787ddee0cd864f69fc2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d06551f2acd1853084e8744c311602c23a258a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..973bb05d0d46b1945eb6bcfe10b2684771900a36 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aea520046f6f9d5c79a38143b8e5818dd57d378d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py new file mode 100644 index 0000000000000000000000000000000000000000..910377c5db5908727ed4753fd15b24e68821ce00 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py @@ -0,0 +1,72 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from .layer import AdaLoraLayer + + +class SVDQuantLinear(torch.nn.Module, AdaLoraLayer): + def __init__( + self, + base_layer, + adapter_name, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + + # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter + # for backwards compatibility + self.quant_linear_module = base_layer + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + result = self.quant_linear_module(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + if x.dtype != torch.float32: + x = x.float() + + output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum + # TODO: here, the dtype conversion is applied on the *whole expression*, + # not the intermediate result, unlike for SVDLinear8bitLT and + # SVDLinear4bit, is that correct? + if requires_conversion: + output = output.to(expected_dtype) + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..763d0133a285eda4cf2aa68f624ea2c1b3a447a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .config import IA3Config +from .layer import Conv2d, IA3Layer, Linear +from .model import IA3Model + + +__all__ = ["Conv2d", "IA3Config", "IA3Layer", "IA3Model", "Linear"] + + +def __getattr__(name): + if (name == "Linear8bitLt") and is_bnb_available(): + from .bnb import Linear8bitLt + + return Linear8bitLt + + if (name == "Linear4bit") and is_bnb_4bit_available(): + from .bnb import Linear4bit + + return Linear4bit + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..628e3ce7229528a0b3157da349b2b34153573c51 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py @@ -0,0 +1,129 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .layer import IA3Layer + + +if is_bnb_available(): + + class Linear8bitLt(torch.nn.Module, IA3Layer): + # (IA)^3 implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + is_feedforward: bool, + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + if self.disable_adapters: + return self.base_layer(x) + + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) + if requires_conversion: + x = x.float() + if self.is_feedforward: + result = self.base_layer(x * ia3_scaling) + expected_dtype = result.dtype + else: + result = self.base_layer(x) + expected_dtype = result.dtype + result = result * ia3_scaling + + if requires_conversion: + result = result.to(expected_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "ia3." + rep + + +if is_bnb_4bit_available(): + + class Linear4bit(torch.nn.Module, IA3Layer): + # IA3 implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + is_feedforward: bool, + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + if self.disable_adapters: + return self.base_layer(x) + + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) + if requires_conversion: + x = x.float() + if self.is_feedforward: + result = self.base_layer(x * ia3_scaling) + expected_dtype = result.dtype + else: + result = self.base_layer(x) + expected_dtype = result.dtype + result = result * ia3_scaling + + result = result.clone() + # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch. + # This has been duplicated here. + + if requires_conversion: + result = result.to(expected_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "ia3." + rep diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/config.py new file mode 100644 index 0000000000000000000000000000000000000000..322ea068d3d44fc6797354cc718f725b4999bbe4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/config.py @@ -0,0 +1,98 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Optional, Union + +from peft.config import PeftConfig +from peft.utils import PeftType + + +@dataclass +class IA3Config(PeftConfig): + """ + This is the configuration class to store the configuration of a [`IA3Model`]. + + Args: + target_modules (`Optional[Union[List[str], str]]`): + The names of the modules to apply the adapter to. If this is specified, only the modules with the specified + names will be replaced. When passing a string, a regex match will be performed. When passing a list of + strings, either an exact match will be performed or it is checked if the name of the module ends with any + of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, + excluding the output layer. If this is not specified, modules will be chosen according to the model + architecture. If the architecture is not known, an error will be raised -- in this case, you should specify + the target modules manually. + feedforward_modules (`Optional[Union[List[str], str]]`): + The names of the modules to be treated as feedforward modules, as in the original paper. These modules will + have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or + a subset of names present in `target_modules`. + fan_in_fan_out (`bool`): + Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses + `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. + modules_to_save (`Optional[List[str]]`): + List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint. + init_ia3_weights (`bool`): + Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is + discouraged. + """ + + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": ( + "List of module names or regex expression of the module names to replace with (IA)³." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." + "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." + "If not specified, modules will be chosen according to the model architecture, If the architecture is " + "not known, an error will be raised -- in this case, you should specify the target modules manually." + ), + }, + ) + feedforward_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or a regex expression of module names which are feedforward" + "For example, ['output.dense']" + }, + ) + fan_in_fan_out: bool = field( + default=False, + metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + init_ia3_weights: bool = field( + default=True, + metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."}, + ) + + def __post_init__(self): + self.peft_type = PeftType.IA3 + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) + self.feedforward_modules = ( + set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules + ) + + # check if feedforward_modules is a subset of target_modules. run the check only if both are sets + if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set): + if not self.feedforward_modules.issubset(self.target_modules): + raise ValueError("`feedforward_modules` should be a subset of `target_modules`") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..9ea04e6873f857bbda6f5828a3fb5095a118c7cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/layer.py @@ -0,0 +1,307 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Any, List, Optional + +import torch +import torch.nn as nn +from transformers.pytorch_utils import Conv1D + +from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge +from peft.utils import transpose + + +class IA3Layer(BaseTunerLayer): + # All names of layers that may contain adapter weights + adapter_layer_names = ("ia3_l",) + + def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None: + self.base_layer = base_layer + self.ia3_l = nn.ParameterDict({}) + # Mark the weight as unmerged + self._disable_adapters = False + self.merged_adapters = [] + self.is_feedforward = is_feedforward + + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + in_features, out_features = base_layer.in_features, base_layer.out_features + elif isinstance(base_layer, nn.Conv2d): + in_features, out_features = base_layer.in_channels, base_layer.out_channels + elif isinstance(base_layer, nn.Embedding): + in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim + elif isinstance(base_layer, Conv1D): + in_features, out_features = ( + base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape + ) + else: + raise ValueError(f"Unsupported layer type {type(base_layer)}") + self.in_features = in_features + self.out_features = out_features + + def update_layer(self, adapter_name, init_ia3_weights): + # This code works for linear layers, override for other layer types + # Actual trainable parameters + if self.is_feedforward: + weight = torch.randn((1, self.in_features)) + else: + weight = torch.randn((self.out_features, 1)) + self.ia3_l[adapter_name] = nn.Parameter(weight) + if init_ia3_weights: + self.reset_ia3_parameters(adapter_name) + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def reset_ia3_parameters(self, adapter_name): + if adapter_name in self.ia3_l.keys(): + # initialize learned vector with torch.ones + nn.init.constant_(self.ia3_l[adapter_name], 1.0) + + +class Linear(nn.Module, IA3Layer): + # (IA)^3 implemented in a dense layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) + is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer + is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later + init_ia3_weights: bool = True, # whether to initialize IA3 weights + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + self.fan_in_fan_out = fan_in_fan_out + self.is_target_conv_1d_layer = is_target_conv_1d_layer + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + if safe_merge: + orig_weights = base_layer.weight.data + orig_weights = torch.mul(orig_weights, ia3_l) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + base_layer.weight.data = orig_weights + else: + base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + warnings.warn("Unmerge result can be inaccurate for (IA)^3.") + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + # Add tolerace to avoid division by zero + ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8 + base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + dtype = previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + dtype = self.ia3_l[active_adapter].dtype + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + if self.is_feedforward: + x = x.to(dtype) + # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype + # e.g. bf16 vs fp32. Is that okay? + interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) + result = self.base_layer(interm, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + result = result.to(dtype) * ia3_scaling + + result = result.to(previous_dtype) + return result + + +class Conv2d(nn.Module, IA3Layer): + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) + is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + self.fan_in_fan_out = fan_in_fan_out + self._active_adapter = adapter_name + + self.update_layer(adapter_name, init_ia3_weights) + + def update_layer(self, adapter_name, init_ia3_weights): + # Actual trainable parameters + if self.is_feedforward: + weight = torch.randn((1, self.in_features, 1, 1)) + else: + weight = torch.randn((1, self.out_features, 1, 1)) + self.ia3_l[adapter_name] = nn.Parameter(weight) + if init_ia3_weights: + self.reset_ia3_parameters(adapter_name) + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + ia3_scaling = self.ia3_l[active_adapter].data + if not self.is_feedforward: + ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) + + if safe_merge: + output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone() + + if not torch.isfinite(output_weight).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = output_weight + else: + base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + warnings.warn("Unmerge result can be inaccurate for (IA)^3.") + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + # divide by (IA)^3 vector. Add tolerace to avoid division by zero + ia3_scaling = self.ia3_l[active_adapter].data + if not self.is_feedforward: + ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) + base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + dtype = previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + dtype = self.ia3_l[active_adapter].dtype + ia3_scaling *= self.ia3_l[active_adapter] + + if self.is_feedforward: + x = x.to(dtype) + # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype + # e.g. bf16 vs fp32. Is that okay? + interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) + result = self.base_layer(interm, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + result = result.to(dtype) * ia3_scaling + + result = result.to(previous_dtype) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d84d7759a8230671b8ce63ebf0d9b75d3416add --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/__init__.py @@ -0,0 +1,52 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType +from .loftq_utils import replace_lora_weights_loftq +from .peft_types import PeftType, TaskType +from .other import ( + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, + CONFIG_NAME, + WEIGHTS_NAME, + SAFETENSORS_WEIGHTS_NAME, + INCLUDE_LINEAR_LAYERS_SHORTHAND, + _set_trainable, + bloom_model_postprocess_past_key_value, + prepare_model_for_kbit_training, + prepare_model_for_kbit_training, + shift_tokens_right, + transpose, + _get_batch_size, + _get_submodules, + _set_adapter, + _freeze_adapter, + ModulesToSaveWrapper, + _prepare_prompt_learning_config, + _is_valid_match, + infer_device, + get_auto_gptq_quant_linear, + get_quantization_config, + id_tensor_storage, + cast_mixed_precision_params, +) +from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b277aa428b36dd2c8ca84734fc878357d3b8511 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae591ae7099cf02781542bc3096f59936c0b3bf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1192b23661fe182f568f789993a82f1bdaed0916 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec3c8d08f146e0fa87576221457fbb2a007a04e7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..425875d066f59b4e585445493f8dda8cf237526e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50d25f72b121fb09a255576e51cd00ddc35a852c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a7c6ec153812c4868fec6a7d752f2f2f7b4fcb3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/constants.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..41047431cef3c18c603b4a331b791cbf00f4cdf2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/constants.py @@ -0,0 +1,158 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + + +# needed for prefix-tuning of bloom model +def bloom_model_postprocess_past_key_value(past_key_values): + past_key_values = torch.cat(past_key_values) + total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape + keys = past_key_values[: total_layers // 2] + keys = keys.transpose(2, 3).reshape( + total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens + ) + values = past_key_values[total_layers // 2 :] + values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) + + return tuple(zip(keys, values)) + + +# needed for prefix-tuning of StarCoder models +def starcoder_model_postprocess_past_key_value(past_key_values): + result = [] + for k in past_key_values: + k = k[:, :, 0] + k = k.permute([1, 2, 0, 3]) + k = k.reshape(*k.shape[:-2], -1) + result.append(k) + return tuple(result) + + +TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = { + "bloom": bloom_model_postprocess_past_key_value, + "gpt_bigcode": starcoder_model_postprocess_past_key_value, +} + + +TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = { + "t5": ["q", "v"], + "mt5": ["q", "v"], + "bart": ["q_proj", "v_proj"], + "gpt2": ["c_attn"], + "bloom": ["query_key_value"], + "blip-2": ["q", "v", "q_proj", "v_proj"], + "opt": ["q_proj", "v_proj"], + "gptj": ["q_proj", "v_proj"], + "gpt_neox": ["query_key_value"], + "gpt_neo": ["q_proj", "v_proj"], + "bert": ["query", "value"], + "roberta": ["query", "value"], + "xlm-roberta": ["query", "value"], + "electra": ["query", "value"], + "deberta-v2": ["query_proj", "value_proj"], + "deberta": ["in_proj"], + "layoutlm": ["query", "value"], + "llama": ["q_proj", "v_proj"], + "chatglm": ["query_key_value"], + "gpt_bigcode": ["c_attn"], + "mpt": ["Wqkv"], + "RefinedWebModel": ["query_key_value"], + "RefinedWeb": ["query_key_value"], + "falcon": ["query_key_value"], + "btlm": ["c_proj", "c_attn"], + "codegen": ["qkv_proj"], + "mistral": ["q_proj", "v_proj"], + "mixtral": ["q_proj", "v_proj"], + "stablelm": ["q_proj", "v_proj"], + "phi": ["q_proj", "v_proj", "fc1", "fc2"], + "gemma": ["q_proj", "v_proj"], +} + +TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = { + "t5": ["k", "v", "wo"], + "mt5": ["k", "v", "wi_1"], + "gpt2": ["c_attn", "mlp.c_proj"], + "bloom": ["query_key_value", "mlp.dense_4h_to_h"], + "roberta": ["key", "value", "output.dense"], + "opt": ["q_proj", "k_proj", "fc2"], + "gptj": ["q_proj", "v_proj", "fc_out"], + "gpt_neox": ["query_key_value", "dense_4h_to_h"], + "gpt_neo": ["q_proj", "v_proj", "c_proj"], + "bart": ["q_proj", "v_proj", "fc2"], + "gpt_bigcode": ["c_attn", "mlp.c_proj"], + "llama": ["k_proj", "v_proj", "down_proj"], + "mistral": ["k_proj", "v_proj", "down_proj"], + "mixtral": ["k_proj", "v_proj", "w2"], + "bert": ["key", "value", "output.dense"], + "deberta-v2": ["key_proj", "value_proj", "output.dense"], + "deberta": ["in_proj", "output.dense"], + "RefinedWebModel": ["query_key_value", "dense_4h_to_h"], + "RefinedWeb": ["query_key_value", "dense_4h_to_h"], + "falcon": ["query_key_value", "dense_4h_to_h"], + "phi": ["q_proj", "v_proj", "fc2"], + "gemma": ["q_proj", "v_proj", "down_proj"], +} + +TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = { + "t5": ["wo"], + "mt5": [], + "gpt2": ["mlp.c_proj"], + "bloom": ["mlp.dense_4h_to_h"], + "roberta": ["output.dense"], + "opt": ["fc2"], + "gptj": ["fc_out"], + "gpt_neox": ["dense_4h_to_h"], + "gpt_neo": ["c_proj"], + "bart": ["fc2"], + "gpt_bigcode": ["mlp.c_proj"], + "llama": ["down_proj"], + "mistral": ["down_proj"], + "mixtral": ["w2"], + "bert": ["output.dense"], + "deberta-v2": ["output.dense"], + "deberta": ["output.dense"], + "RefinedWeb": ["dense_4h_to_h"], + "RefinedWebModel": ["dense_4h_to_h"], + "falcon": ["dense_4h_to_h"], + "phi": ["fc2"], + "gemma": ["down_proj"], +} + +TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = { + "t5": ["q", "k", "v", "o", "wi", "wo"], + "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], + "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], + "gpt2": ["c_attn"], + "bloom": ["query_key_value"], + "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], + "gptj": ["q_proj", "v_proj"], + "gpt_neox": ["query_key_value"], + "gpt_neo": ["q_proj", "v_proj"], + "llama": ["q_proj", "v_proj"], + "bert": ["query", "value"], + "roberta": ["query", "key", "value", "dense"], + # "xlm-roberta": ["query", "value"], + # "electra": ["query", "value"], + "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], + "gpt_bigcode": ["c_attn"], + "deberta": ["in_proj"], + # "layoutlm": ["query", "value"], +} + +WEIGHTS_NAME = "adapter_model.bin" +SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" +CONFIG_NAME = "adapter_config.json" +EMBEDDING_LAYER_NAMES = ["embed_tokens", "lm_head"] +INCLUDE_LINEAR_LAYERS_SHORTHAND = "all-linear" +TOKENIZER_CONFIG_NAME = "tokenizer_config.json" diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/integrations.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/integrations.py new file mode 100644 index 0000000000000000000000000000000000000000..76693701de2f81fdf7535b55d9b7cafbcb75df17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/integrations.py @@ -0,0 +1,69 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import contextmanager + +import packaging.version +import torch +import transformers + + +@contextmanager +def gather_params_ctx(module: torch.nn.Module, modifier_rank: int = 0): + """Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing.""" + if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"): + from transformers.integrations import is_deepspeed_zero3_enabled + else: + from transformers.deepspeed import is_deepspeed_zero3_enabled + + if not is_deepspeed_zero3_enabled(): + yield + return + + import deepspeed + + params_to_gather = module.parameters() + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=modifier_rank): + yield + return + + +def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None): + """ + Helper function to dequantize 4bit or 8bit bnb weights. + + If the weight is not a bnb quantized weight, it will be returned as is. + """ + if not isinstance(weight, torch.nn.Parameter): + raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead") + + cls_name = weight.__class__.__name__ + if cls_name not in ("Params4bit", "Int8Params"): + return weight + + import bitsandbytes as bnb + + if cls_name == "Params4bit": + return bnb.functional.dequantize_4bit(weight.data, weight.quant_state) + + if state.SCB is None: + state.SCB = weight.SCB + + im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device) + im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im) + im, Sim = bnb.functional.transform(im, "col32") + if state.CxB is None: + state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB) + out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB) + return bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t() diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/loftq_utils.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/loftq_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..20bbe20adacef45f8e50cd1a4f09200ad741b997 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/loftq_utils.py @@ -0,0 +1,407 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py +# Reference paper: https://arxiv.org/abs/2310.08659 + +from __future__ import annotations + +import logging +import os +from typing import Callable, Optional, Union + +import torch +from huggingface_hub import snapshot_download +from huggingface_hub.utils import LocalEntryNotFoundError +from safetensors import SafetensorError, safe_open +from transformers.utils import cached_file +from transformers.utils.hub import get_checkpoint_shard_files + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + + +if is_bnb_available(): + import bitsandbytes as bnb + + +class NFQuantizer: + def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_bits = num_bits + self.device = device + self.method = method + self.block_size = block_size + if self.method == "normal": + self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits) + self.norm_lookup_table = self.norm_lookup_table.to(device) + elif self.method == "uniform": + self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits) + self.norm_lookup_table = self.norm_lookup_table.to(device) + else: + raise NotImplementedError("Other quantization methods not supported yet.") + + @staticmethod + def create_uniform_map(symmetric=False, num_bits=4): + if symmetric: + # print("symmetric uniform quantization") + negative = torch.linspace(-1, 0, 2 ** (num_bits - 1)) + positive = torch.linspace(0, 1, 2 ** (num_bits - 1)) + table = torch.cat([negative, positive[1:]]) + else: + # print("asymmetric uniform quantization") + table = torch.linspace(-1, 1, 2**num_bits) + return table + + @staticmethod + def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2): + try: + from scipy.stats import norm + except ImportError: + raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") + + variations = 2**num_bits + if symmetric: + v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist() + values = [] + for index in range(len(v) - 1): + values.append(0.5 * v[index] + 0.5 * v[index + 1]) + v = values + else: + # one more positive value, this is an asymmetric type + v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist() + v2 = [0] + v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist() + v = v1 + v2 + v3 + + values = torch.Tensor(v) + values = values.sort().values + values /= values.max() + return values + + def quantize_tensor(self, weight): + max_abs = torch.abs(weight).max() + weight_normed = weight / max_abs + + weight_normed_expanded = weight_normed.unsqueeze(-1) + + # Reshape L to have the same number of dimensions as X_expanded + L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1) + + # Calculate the absolute difference between X_expanded and L_reshaped + abs_diff = torch.abs(weight_normed_expanded - L_reshaped) + + # Find the index of the minimum absolute difference for each element + qweight = torch.argmin(abs_diff, dim=-1) + return qweight, max_abs + + def dequantize_tensor(self, qweight, max_abs): + qweight_flatten = qweight.flatten() + + weight_normed = self.norm_lookup_table[qweight_flatten] + weight = weight_normed * max_abs + + weight = weight.reshape(qweight.shape) + + return weight + + def quantize_block(self, weight): + if len(weight.shape) != 2: + raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.") + if weight.shape[0] * weight.shape[1] % self.block_size != 0: + raise ValueError( + f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) " + f"is not dividable by block size {self.block_size}." + ) + + M, N = weight.shape + device = weight.device + + # Quantization + weight_flatten = weight.flatten() # (M*N, ) + weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B + if self.method == "normal": + weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1) + elif self.method == "uniform": + weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1) + else: + raise NotImplementedError("Method not supported yet.") + weight_max = weight_max.unsqueeze(-1) + weight_divabs = weight_block / weight_max # (L, B) + weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1) + L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K) + + abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K) + qweight = torch.argmin(abs_diff, dim=-1) # (L, B) + + # Pack multiple k-bit into uint8 + qweight = qweight.reshape(-1, 8 // self.num_bits) + qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device) + + # data format example: + # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO + for i in range(8 // self.num_bits): + qweight[:, i] = qweight[:, i] << i * self.num_bits + qweight_pack[:, 0] |= qweight[:, i] + + return qweight_pack, weight_max, weight.shape + + def dequantize_block(self, qweight, weight_max, weight_shape): + # unpack weight + device = qweight.device + weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device) + for i in range(8 // self.num_bits): + lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits + lookup_table_idx = lookup_table_idx.to(torch.long) + weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze() + qweight = qweight >> self.num_bits # right shift 2 bits of the original data + + weight_block = weight.reshape(-1, self.block_size) + weight = weight_block * weight_max + weight = weight.reshape(weight_shape) + + return weight + + +def _low_rank_decomposition(weight, reduced_rank=32): + """ + :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return: + """ + matrix_dimension = len(weight.size()) + if matrix_dimension != 2: + raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.") + + # Use SVD to decompose a matrix, default full_matrices is False to save parameters + U, S, Vh = torch.linalg.svd(weight, full_matrices=False) + + L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank])) + R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh + + return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank} + + +@torch.no_grad() +def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1): + if num_bits not in [2, 4, 8]: + raise ValueError("Only support 2, 4, 8 bits quantization") + if num_iter <= 0: + raise ValueError("Number of iterations must be greater than 0") + + out_feature, in_feature = weight.size() + device = weight.device + dtype = weight.dtype + + logging.info( + f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} " + f"| Num Iter: {num_iter} | Num Bits: {num_bits}" + ) + if not is_bnb_4bit_available() or num_bits in [2, 8]: + quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) + compute_device = device + else: + compute_device = "cuda" + + weight = weight.to(device=compute_device, dtype=torch.float32) + res = weight.clone() + for i in range(num_iter): + torch.cuda.empty_cache() + # Quantization + if num_bits == 4 and is_bnb_4bit_available(): + qweight = bnb.nn.Params4bit( + res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4" + ).to(compute_device) + dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) + else: + quantized_weight, max_abs, shape = quantizer.quantize_block(res) + dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape) + + res = weight - dequantized_weight + + # Decompose the residual by SVD + output = _low_rank_decomposition(res, reduced_rank=reduced_rank) + L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] + res = weight - torch.mm(L, R) + + lora_A, lora_B = R, L + + return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B + + +@torch.no_grad() +def _loftq_init_new(qweight, weight, num_bits: int, reduced_rank: int): + if num_bits != 4: + raise ValueError("Only 4 bit quantization supported at the moment.") + if not is_bnb_4bit_available(): + raise ValueError("bitsandbytes 4bit quantization is not available.") + + compute_device = "cuda" + dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) + + weight = weight.to(device=compute_device, dtype=torch.float32) + residual = weight - dequantized_weight + torch.cuda.empty_cache() + # Decompose the residualidual by SVD + output = _low_rank_decomposition(residual, reduced_rank=reduced_rank) + L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] + return R, L + + +class _SafetensorLoader: + """ + Simple utility class that loads tensors with safetensors from a single file or sharded files. + + Takes care of file name normalization etc. + + """ + + def __init__(self, peft_model, model_path): + if model_path is None: + try: + model_path = snapshot_download(peft_model.base_model.config._name_or_path, local_files_only=True) + except AttributeError as exc: + raise ValueError( + "The provided model does not appear to be a transformers model. In this case, you must pass the " + "model_path to the safetensors file." + ) from exc + except LocalEntryNotFoundError as exc: + raise ValueError( + "The model.safetensors file must be present on disk, but it could not be found." + ) from exc + + suffix = "model.safetensors" + if not model_path.endswith(suffix): + model_path = os.path.join(model_path, suffix) + + self.model_path = model_path + self.base_model_prefix = getattr(peft_model.get_base_model(), "base_model_prefix", None) + self.prefix = "base_model.model." + self.is_sharded = False + self.weight_map = None + + if not os.path.exists(model_path): + # check if the file is sharded + par_dir = model_path.rpartition(os.path.sep)[0] + try: + resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( + par_dir, cached_file(par_dir, "model.safetensors.index.json") + ) + except OSError as exc: + raise FileNotFoundError( + f"Could not find file for {model_path}, ensure that there is a (sharded) safetensors file of the model." + ) from exc + + self.is_sharded = True + # maps from 'model-X-of-Y.safetensors' to full file path + file_map = {k.rpartition(os.path.sep)[-1]: k for k in resolved_archive_file} + self.weight_map = {k: file_map[v] for k, v in sharded_metadata["weight_map"].items()} + + def get_tensor(self, name): + if not self.is_sharded: + file_path = self.model_path + else: + file_path = self.weight_map[name] + + with safe_open(file_path, framework="pt", device="cpu") as f: + try: + tensor = f.get_tensor(name) + except SafetensorError as exc: + # no matching key found, we probably need to remove the base model prefix + if self.base_model_prefix: + # remove 1 extra character for "." + name = name[len(self.base_model_prefix) + 1 :] + tensor = f.get_tensor(name) + else: + raise exc + return tensor + + +@torch.no_grad() +def replace_lora_weights_loftq( + peft_model, + model_path: Optional[str] = None, + adapter_name: str = "default", + callback: Optional[Callable[[torch.nn.Module, str], bool]] = None, +): + """ + Replace the LoRA weights of a model quantized with bitsandbytes, using the LoftQ technique. + + The replacement is done on the fly by loading in the non-quantized weights from a locally stored safetensors model + file and initializing the LoRA weights such that the quantization error between the original and quantized weights + is minimized. + + As lazy loading is not possible with pickle, normal PyTorch checkpoint files cannot be supported. + + Depending on the model size, calling this function may take some time to finish. + + Args: + peft_model (`PeftModel`): + The model to replace the weights of. Must be a quantized PEFT model with LoRA layers. + model_path (`Optional[str]`): + The path to the model safetensors file. If the model is a Hugging Face model, this will be inferred from + the model's config. Otherwise, it must be provided. + adapter_name (`str`): + The name of the adapter to replace the weights of. The default adapter name is "default". + callback (`Optional[Callable[[PeftModel, str], bool]]`): + A callback function that will be called after each module is replaced. The callback function should take + the model and the name of the current module as input and return a boolean indicating whether the + replacement should be kept. If the callback returns False, the replacement will be rolled back. This can be + very useful to confirm that the LoftQ initialization actually decreases the quantization error of the + model. As an example, this callback could generate logits for given input and compare it with the logits + from the original, non-quanitzed model with the same input, and only return `True` if there is an + improvement. As this is a greedy optimization, it's possible that calling this function multiple times + yields incremental improvements. + """ + if not is_bnb_4bit_available(): + raise ValueError("bitsandbytes must be installed and the model must be quantized in 4bits.") + + from peft.tuners.lora import Linear4bit + + # model_path = _check_model_path_loftq(model_path, peft_model) + prefix = "base_model.model." + any_match = False + safetensor_loader = _SafetensorLoader(peft_model, model_path) + + # if too slow, consider adding tqdm as an option + for name, module in peft_model.named_modules(): + if not isinstance(module, Linear4bit): + continue + + if not name.startswith(prefix): + raise TypeError("The passed model does not appear to be a valid PeftModel") + + any_match = True + name = name[len(prefix) :] + tensor = safetensor_loader.get_tensor(name + ".weight") + + reduced_rank = module.r[adapter_name] + lora_A, lora_B = _loftq_init_new(module.weight, tensor, num_bits=4, reduced_rank=reduced_rank) + if not callback: + module.lora_A[adapter_name].weight.data = lora_A + module.lora_B[adapter_name].weight.data = lora_B + continue + + lora_A_before = module.lora_A[adapter_name].weight.data + lora_B_before = module.lora_B[adapter_name].weight.data + + module.lora_A[adapter_name].weight.data = lora_A + module.lora_B[adapter_name].weight.data = lora_B + should_replace = callback(peft_model, name) + if not should_replace: + # roll back + module.lora_A[adapter_name].weight.data = lora_A_before + module.lora_B[adapter_name].weight.data = lora_B_before + + del lora_A_before, lora_B_before + + if not any_match: + raise ValueError("No bnb LoRA module found on the model") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/merge_utils.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/merge_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..900cb878e2d785e5e800caa58642f0532f4d574b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/merge_utils.py @@ -0,0 +1,268 @@ +# Copyright 2024-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List, Literal + +import torch + + +def reshape_weight_task_tensors(task_tensors, weights): + """ + Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. + + Args: + task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. + weights (`torch.Tensor`): The tensor to be reshaped. + + Returns: + `torch.Tensor`: The reshaped tensor. + """ + new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) + weights = weights.view(new_shape) + return weights + + +def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor: + """ + Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction + `density`. + + Args: + tensor (`torch.Tensor`):The tensor to prune. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + + Returns: + `torch.Tensor`: The tensor with the pruned weights. + """ + mask = torch.zeros_like(tensor).reshape(-1) + k = int(density * tensor.numel()) + top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True) + mask[top_k[1]] = 1 + return tensor * mask.reshape(tensor.shape) + + +def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor: + """ + Prune random values based on the specified fraction `density`. + + Args: + tensor (`torch.Tensor`):The tensor to prune. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. + + Returns: + `torch.Tensor`: The pruned tensor. + """ + mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density)) + pruned_tensor = tensor * mask + if rescale: + torch.div(input=pruned_tensor, other=density) + return pruned_tensor + + +def prune( + tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False +) -> torch.Tensor: + """ + Prune the values of task tensors based on the `method`. + + Args: + tensor (`torch.Tensor`):The tensor to prune. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. + rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. + + Returns: + `torch.Tensor`: The pruned tensor. + """ + if density >= 1: + warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") + return tensor + elif density < 0: + raise ValueError(f"Density should be >= 0, got {density}") + if method == "magnitude": + return magnitude_based_pruning(tensor, density) + elif method == "random": + return random_pruning(tensor, density, rescale=rescale) + else: + raise ValueError(f"Unknown method {method}") + + +def calculate_majority_sign_mask( + tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" +) -> torch.Tensor: + """ + Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. + + Args: + tensor (`torch.Tensor`):The tensor to get the mask from. + method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. + + Returns: + `torch.Tensor`: The majority sign mask. + """ + + sign = tensor.sign() + if method == "total": + sign_magnitude = tensor.sum(dim=0) + elif method == "frequency": + sign_magnitude = sign.sum(dim=0) + else: + raise RuntimeError(f'Unimplemented mask method "{method}"') + majority_sign = torch.where(sign_magnitude >= 0, 1, -1) + return sign == majority_sign + + +def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: + """ + Merge the task tensors using disjoint merge. + + Args: + task_tensors (`torch.Tensor`):The task tensors to merge. + majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. + + Returns: + `torch.Tensor`: The merged tensor. + """ + mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) + num_params_preserved = majority_sign_mask.sum(dim=0) + return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) + + +def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: + """ + Merge the task tensors using `task arithmetic`. + + Args: + task_tensors(`List[torch.Tensor]`):The task tensors to merge. + weights (`torch.Tensor`):The weights of the task tensors. + + Returns: + `torch.Tensor`: The merged tensor. + """ + task_tensors = torch.stack(task_tensors, dim=0) + # weighted task tensors + weights = reshape_weight_task_tensors(task_tensors, weights) + weighted_task_tensors = task_tensors * weights + mixed_task_tensors = weighted_task_tensors.sum(dim=0) + return mixed_task_tensors + + +def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: + """ + Merge the task tensors using `task arithmetic`. + + Args: + task_tensors(`List[torch.Tensor]`):The task tensors to merge. + weights (`torch.Tensor`):The weights of the task tensors. + density (`float`): The fraction of values to preserve. Should be in [0,1]. + + Returns: + `torch.Tensor`: The merged tensor. + """ + # sparsify + task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] + task_tensors = torch.stack(task_tensors, dim=0) + # weighted task tensors + weights = reshape_weight_task_tensors(task_tensors, weights) + weighted_task_tensors = task_tensors * weights + mixed_task_tensors = weighted_task_tensors.sum(dim=0) + return mixed_task_tensors + + +def ties( + task_tensors: List[torch.Tensor], + weights: torch.Tensor, + density: float, + majority_sign_method: Literal["total", "frequency"] = "total", +) -> torch.Tensor: + """ + Merge the task tensors using `ties`. + + Args: + task_tensors(`List[torch.Tensor]`):The task tensors to merge. + weights (`torch.Tensor`):The weights of the task tensors. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + majority_sign_method (`str`): + The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. + + Returns: + `torch.Tensor`: The merged tensor. + """ + # sparsify + task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] + task_tensors = torch.stack(task_tensors, dim=0) + # Elect Sign + majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) + # weighted task tensors + weights = reshape_weight_task_tensors(task_tensors, weights) + weighted_task_tensors = task_tensors * weights + # Disjoint Merge + mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) + return mixed_task_tensors + + +def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: + """ + Merge the task tensors using `dare linear`. + + Args: + task_tensors(`List[torch.Tensor]`):The task tensors to merge. + weights (`torch.Tensor`):The weights of the task tensors. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + + Returns: + `torch.Tensor`: The merged tensor. + """ + # sparsify + task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] + task_tensors = torch.stack(task_tensors, dim=0) + # weighted task tensors + weights = reshape_weight_task_tensors(task_tensors, weights) + weighted_task_tensors = task_tensors * weights + mixed_task_tensors = weighted_task_tensors.sum(dim=0) + return mixed_task_tensors + + +def dare_ties( + task_tensors: List[torch.Tensor], + weights: torch.Tensor, + density: float, + majority_sign_method: Literal["total", "frequency"] = "total", +) -> torch.Tensor: + """ + Merge the task tensors using `dare ties`. + + Args: + task_tensors(`List[torch.Tensor]`):The task tensors to merge. + weights (`torch.Tensor`):The weights of the task tensors. + density (`float`):The fraction of values to preserve. Should be in [0,1]. + majority_sign_method (`str`): + The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. + + Returns: + `torch.Tensor`: The merged tensor. + """ + # sparsify + task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] + task_tensors = torch.stack(task_tensors, dim=0) + # Elect Sign + majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) + # weighted task tensors + weights = reshape_weight_task_tensors(task_tensors, weights) + weighted_task_tensors = task_tensors * weights + # Disjoint Merge + mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) + return mixed_task_tensors diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/other.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/other.py new file mode 100644 index 0000000000000000000000000000000000000000..05f8d6b12959a483e038ea264de3be075d9446db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/other.py @@ -0,0 +1,586 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import inspect +import os +import warnings +from contextlib import nullcontext +from typing import Optional, Tuple + +import accelerate +import torch +from accelerate.hooks import add_hook_to_module, remove_hook_from_module +from accelerate.utils import is_npu_available, is_xpu_available +from huggingface_hub import file_exists +from huggingface_hub.utils import EntryNotFoundError, HFValidationError +from safetensors.torch import storage_ptr, storage_size + +from ..import_utils import is_auto_gptq_available, is_torch_tpu_available +from .constants import ( + CONFIG_NAME, + EMBEDDING_LAYER_NAMES, + INCLUDE_LINEAR_LAYERS_SHORTHAND, + SAFETENSORS_WEIGHTS_NAME, + TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + WEIGHTS_NAME, + bloom_model_postprocess_past_key_value, + starcoder_model_postprocess_past_key_value, +) + + +__all__ = [ + "CONFIG_NAME", + "EMBEDDING_LAYER_NAMES", + "SAFETENSORS_WEIGHTS_NAME", + "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING", + "TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING", + "TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING", + "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING", + "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", + "WEIGHTS_NAME", + "INCLUDE_LINEAR_LAYERS_SHORTHAND", + "bloom_model_postprocess_past_key_value", + "starcoder_model_postprocess_past_key_value", +] + + +# Get current device name based on available devices +def infer_device() -> str: + if torch.cuda.is_available(): + return "cuda" + elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): + return "mps" + elif is_xpu_available(): + return "xpu" + elif is_npu_available(): + return "npu" + return "cpu" + + +def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None): + r""" + Note this method only works for `transformers` models. + + This method wraps the entire protocol for preparing a model before running a training. This includes: + 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm + head to fp32 + + Args: + model (`transformers.PreTrainedModel`): + The loaded model from `transformers` + use_gradient_checkpointing (`bool`, *optional*, defaults to `True`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): + Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of + `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method. + Note this is only available in the latest transformers versions (> 4.34.1). + """ + loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) + is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" + is_aqlm_quantized = getattr(model, "quantization_method", None) == "aqlm" + if gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {} + + for name, param in model.named_parameters(): + # freeze base model's layers + param.requires_grad = False + + if not is_gptq_quantized and not is_aqlm_quantized: + # cast all non INT8 parameters to fp32 + for param in model.parameters(): + if ( + (param.dtype == torch.float16) or (param.dtype == torch.bfloat16) + ) and param.__class__.__name__ != "Params4bit": + param.data = param.data.to(torch.float32) + + if (loaded_in_kbit or is_gptq_quantized or is_aqlm_quantized) and use_gradient_checkpointing: + # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack + if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]: + # For backward compatibility + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs + _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( + inspect.signature(model.gradient_checkpointing_enable).parameters + ) + + if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0: + warnings.warn( + "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored." + " if you want to use that feature, please upgrade to the latest version of transformers.", + FutureWarning, + ) + + gc_enable_kwargs = ( + {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs} + ) + + # enable gradient checkpointing for memory efficiency + model.gradient_checkpointing_enable(**gc_enable_kwargs) + return model + + +# copied from transformers.models.bart.modeling_bart +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids + pad_token_id (`int`): The id of the `padding` token. + decoder_start_token_id (`int`): The id of the `start` token. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +class ModulesToSaveWrapper(torch.nn.Module): + def __init__(self, module_to_save, adapter_name): + super().__init__() + self.original_module = module_to_save + self.modules_to_save = torch.nn.ModuleDict({}) + self._active_adapter = adapter_name + self._disable_adapters = False + self.update(adapter_name) + self.check_module() + + def check_module(self): + """Perform some sanity checks on the module to ensure that it works""" + # Try to anticipate some modules that users could try to target that would not work. + # Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and + # ModuleList, even though their forward methods cannot be called + forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList) + if isinstance(self.original_module, forbidden_classes): + cls_name = self.original_module.__class__.__name__ + raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}") + + @property + def disable_adapters(self) -> bool: + # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method + return self._disable_adapters + + @property + def active_adapter(self) -> str: + # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method + return self._active_adapter + + @property + def weight(self): + if self.active_adapter not in self.modules_to_save: + return self.original_module.weight + return self.modules_to_save[self.active_adapter].weight + + def update(self, adapter_name): + context_manager = nullcontext() + for _, param in self.original_module.named_parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + import deepspeed + + context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0) + break + with context_manager: + self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) + + if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): + old_hook = self.modules_to_save[adapter_name]._hf_hook + new_hook = self._create_new_hook(old_hook) + remove_hook_from_module(self.modules_to_save[adapter_name]) + add_hook_to_module(self.modules_to_save[adapter_name], new_hook) + + self.original_module.requires_grad_(False) + if adapter_name == self.active_adapter: + self.modules_to_save[adapter_name].requires_grad_(True) + + def _create_new_hook(self, old_hook): + r""" + Creates a new hook based on the old hook. Use it only if you know what you are doing ! + """ + old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) + old_hook_attr = old_hook.__dict__ + filtered_old_hook_attr = {} + old_hook_init_signature = inspect.signature(old_hook_cls.__init__) + for k in old_hook_attr.keys(): + if k in old_hook_init_signature.parameters: + filtered_old_hook_attr[k] = old_hook_attr[k] + new_hook = old_hook_cls(**filtered_old_hook_attr) + return new_hook + + def forward(self, *args, **kwargs): + if self.disable_adapters or (self.active_adapter not in self.modules_to_save): + return self.original_module(*args, **kwargs) + return self.modules_to_save[self.active_adapter](*args, **kwargs) + + def enable_adapters(self, enabled: bool): + """Toggle the enabling and disabling of adapters + + Takes care of setting the requires_grad flag for the adapter weights. + + Args: + enabled (bool): True to enable adapters, False to disable adapters + """ + if self._disable_adapters is not enabled: + # already in the desired state, do nothing + return + + if enabled: + self.original_module.requires_grad_(False) + self.modules_to_save[self.active_adapter].requires_grad_(True) + self._disable_adapters = False + else: + self.original_module.requires_grad_(True) + self.modules_to_save.requires_grad_(False) + self._disable_adapters = True + + def set_adapter(self, adapter_name: str): + """Set the active adapter + + Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (str): The name of the adapter to set as active + """ + if adapter_name not in self.modules_to_save: + raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") + + self.modules_to_save[self.active_adapter].requires_grad_(False) + self.modules_to_save[adapter_name].requires_grad_(True) + self._active_adapter = adapter_name + + +def _get_submodules(model, key): + parent = model.get_submodule(".".join(key.split(".")[:-1])) + target_name = key.split(".")[-1] + target = model.get_submodule(key) + return parent, target, target_name + + +def _freeze_adapter(model, adapter_name): + for n, p in model.named_parameters(): + if adapter_name in n: + p.requires_grad = False + + +def _set_trainable(model, adapter_name): + key_list = [key for key, _ in model.named_modules()] + for key in key_list: + target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) + if target_module_found: + parent, target, target_name = _get_submodules(model, key) + if isinstance(target, ModulesToSaveWrapper): + target.update(adapter_name) + target.set_adapter(target.active_adapter) + else: + new_module = ModulesToSaveWrapper(target, adapter_name) + new_module.set_adapter(adapter_name) + setattr(parent, target_name, new_module) + + +def _set_adapter(model, adapter_name): + def check_adapter_name(adapter_name): + if isinstance(adapter_name, str): + return adapter_name + + # adapter_name is a list of str + if len(adapter_name) > 1: + raise ValueError("Only one adapter can be set at a time for modules_to_save") + elif len(adapter_name) == 0: + raise ValueError("Please specify at least one adapter to set") + adapter_name = adapter_name[0] + return adapter_name + + for module in model.modules(): + if isinstance(module, ModulesToSaveWrapper): + # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care + adapter_name = check_adapter_name(adapter_name) + module.set_adapter(adapter_name) + + +def _prepare_prompt_learning_config(peft_config, model_config): + if peft_config.num_layers is None: + if "num_hidden_layers" in model_config: + num_layers = model_config["num_hidden_layers"] + elif "num_layers" in model_config: + num_layers = model_config["num_layers"] + elif "n_layer" in model_config: + num_layers = model_config["n_layer"] + else: + raise ValueError("Please specify `num_layers` in `peft_config`") + peft_config.num_layers = num_layers + + if peft_config.token_dim is None: + if "hidden_size" in model_config: + token_dim = model_config["hidden_size"] + elif "n_embd" in model_config: + token_dim = model_config["n_embd"] + elif "d_model" in model_config: + token_dim = model_config["d_model"] + else: + raise ValueError("Please specify `token_dim` in `peft_config`") + peft_config.token_dim = token_dim + + if peft_config.num_attention_heads is None: + if "num_attention_heads" in model_config: + num_attention_heads = model_config["num_attention_heads"] + elif "n_head" in model_config: + num_attention_heads = model_config["n_head"] + elif "num_heads" in model_config: + num_attention_heads = model_config["num_heads"] + elif "encoder_attention_heads" in model_config: + num_attention_heads = model_config["encoder_attention_heads"] + else: + raise ValueError("Please specify `num_attention_heads` in `peft_config`") + peft_config.num_attention_heads = num_attention_heads + + if getattr(peft_config, "encoder_hidden_size", None) is None: + setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) + + return peft_config + + +def fsdp_auto_wrap_policy(model): + import functools + import os + + from accelerate import FullyShardedDataParallelPlugin + from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy + + from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder + + default_transformer_cls_names_to_wrap = ( + ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else "" + ) + transformer_cls_names_to_wrap = os.environ.get( + "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap + ).split(",") + transformer_cls_to_wrap = {PrefixEncoder, PromptEncoder, PromptEmbedding} + for layer_class in transformer_cls_names_to_wrap: + transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + + def lambda_policy_fn(module): + if ( + len(list(module.named_children())) == 0 + and getattr(module, "weight", None) is not None + and module.weight.requires_grad + ): + return True + return False + + lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) + transformer_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + transformer_layer_cls=transformer_cls_to_wrap, + ) + + auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) + return auto_wrap_policy + + +def transpose(weight, fan_in_fan_out): + if not fan_in_fan_out: + return weight + + if isinstance(weight, torch.nn.Parameter): + return torch.nn.Parameter(weight.T) + return weight.T + + +def _is_valid_match(key: str, target_key: str): + """ + Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key + or the target_key is a submodule of key + """ + if key.endswith(target_key): + if len(key) > len(target_key): + return key.endswith("." + target_key) # must be a sub module + return True + return False + + +def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: + """Get the batch size based on either input_ids or input_embeds + + Raises an ValueError if both are None. + + """ + if (input_ids is None) and (inputs_embeds is None): + raise ValueError("You have to provide either input_ids or inputs_embeds") + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + return batch_size + + +def get_quantization_config(model: torch.nn.Module, method: str): + """ + Get the quantization config of the related quantization method + """ + if ( + hasattr(model, "config") + and hasattr(model.config, "quantization_config") + and (getattr(model, "quantization_method", None) == method) + ): + return model.config.quantization_config + return None + + +def get_auto_gptq_quant_linear(gptq_quantization_config): + """ + Get the right AutoGPTQQuantLinear class based on the quantization config file + """ + if gptq_quantization_config is not None and is_auto_gptq_available(): + from auto_gptq.utils.import_utils import dynamically_import_QuantLinear + + desc_act = gptq_quantization_config.desc_act + group_size = gptq_quantization_config.group_size + bits = gptq_quantization_config.bits + if hasattr(gptq_quantization_config, "use_exllama"): + use_exllama = gptq_quantization_config.use_exllama + else: + use_exllama = not gptq_quantization_config.disable_exllama + if hasattr(gptq_quantization_config, "exllama_config"): + exllama_version = gptq_quantization_config.exllama_config["version"] + else: + exllama_version = 1 + AutoGPTQQuantLinear = dynamically_import_QuantLinear( + use_triton=False, + desc_act=desc_act, + group_size=group_size, + bits=bits, + disable_exllama=not (use_exllama and exllama_version == 1), + disable_exllamav2=not (use_exllama and exllama_version == 2), + ) + return AutoGPTQQuantLinear + return None + + +def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: + """ + Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + + This method is the exact same copy of + https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added + it here manually to avoid import issue with old versions of transformers. + """ + if tensor.device.type == "xla" and is_torch_tpu_available(): + # NOTE: xla tensors dont have storage + # use some other unique id to distinguish. + # this is a XLA tensor, it must be created using torch_xla's + # device. So the following import is safe: + import torch_xla + + unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) + else: + unique_id = storage_ptr(tensor) + + return tensor.device, unique_id, storage_size(tensor) + + +def cast_mixed_precision_params(model, dtype): + """ + Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or + `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full + precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for + non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using + automatic mixed-precision training. + + Args: + model (`torch.nn.Module`): + The model to cast the non-trainable parameters of. + dtype (`torch.dtype`): + The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or + `torch.bfloat16` as per the mixed-precision training you are performing. + """ + for p in model.parameters(): + if not p.requires_grad: + p.data = p.to(dtype) + else: + p.data = p.to(torch.float32) + + +def str_to_bool(value: str) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). + + True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + # same as function as in accelerate.utils, which replaces the deprecated distutils.util.strtobool + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def check_file_exists_on_hf_hub(repo_id: str, filename: str, **kwargs) -> Optional[bool]: + """Check if a file exists on HF Hub, if check was not successful returns None instead of erroring. + + Respect offline mode if set. + + """ + exists: Optional[bool] = None + if str_to_bool(os.environ.get("HF_HUB_OFFLINE", "0")): + # user set offline mode, cannot check + return exists + + try: + exists = file_exists(repo_id, filename, **kwargs) + except (HFValidationError, EntryNotFoundError): + # error, exists stays None + pass + except Exception as e: + warnings.warn( + f"Unable to fetch remote file due to the following error {e} - silently ignoring the lookup" + f" for the file {filename} in {repo_id}." + ) + + return exists diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/peft_types.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/peft_types.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a84435dc60bae6fde33bdbbc5e24f03293c678 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/peft_types.py @@ -0,0 +1,73 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import enum + + +class PeftType(str, enum.Enum): + """ + Enum class for the different types of adapters in PEFT. + + Supported PEFT types: + - PROMPT_TUNING + - MULTITASK_PROMPT_TUNING + - P_TUNING + - PREFIX_TUNING + - LORA + - ADALORA + - ADAPTION_PROMPT + - IA3 + - LOHA + - LOKR + - OFT + """ + + PROMPT_TUNING = "PROMPT_TUNING" + MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" + P_TUNING = "P_TUNING" + PREFIX_TUNING = "PREFIX_TUNING" + LORA = "LORA" + ADALORA = "ADALORA" + ADAPTION_PROMPT = "ADAPTION_PROMPT" + IA3 = "IA3" + LOHA = "LOHA" + LOKR = "LOKR" + OFT = "OFT" + POLY = "POLY" + + +class TaskType(str, enum.Enum): + """ + Enum class for the different types of tasks supported by PEFT. + + Overview of the supported task types: + - SEQ_CLS: Text classification. + - SEQ_2_SEQ_LM: Sequence-to-sequence language modeling. + - CAUSAL_LM: Causal language modeling. + - TOKEN_CLS: Token classification. + - QUESTION_ANS: Question answering. + - FEATURE_EXTRACTION: Feature extraction. Provides the hidden states which can be used as embeddings or features + for downstream tasks. + """ + + SEQ_CLS = "SEQ_CLS" + SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM" + CAUSAL_LM = "CAUSAL_LM" + TOKEN_CLS = "TOKEN_CLS" + QUESTION_ANS = "QUESTION_ANS" + FEATURE_EXTRACTION = "FEATURE_EXTRACTION" diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/save_and_load.py b/llmeval-env/lib/python3.10/site-packages/peft/utils/save_and_load.py new file mode 100644 index 0000000000000000000000000000000000000000..5ac1264d89afb82d3ab3eadccb49163a75284aaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/utils/save_and_load.py @@ -0,0 +1,330 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import warnings +from typing import Optional + +import torch +from huggingface_hub import file_exists, hf_hub_download +from huggingface_hub.utils import EntryNotFoundError +from safetensors.torch import load_file as safe_load_file + +from .other import ( + EMBEDDING_LAYER_NAMES, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, + check_file_exists_on_hf_hub, + infer_device, +) +from .peft_types import PeftType + + +def has_valid_embedding_base_layer(layer): + """Check if the layer has an embedding base layer""" + return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding)) + + +def get_embedding_layer_name(model, layer, is_embedding_in_target_modules): + """Get the name of the embedding module for a given layer.""" + for name, module in model.named_modules(): + if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None): + return name + return None + + +def get_peft_model_state_dict( + model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" +): + """ + Get the state dict of the Peft model. + + Args: + model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, + the model should be the underlying model/unwrapped model (i.e. model.module). + state_dict (`dict`, *optional*, defaults to `None`): + The state dict of the model. If not provided, the state dict of the passed model will be used. + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter whose state dict should be returned. + unwrap_compiled (`bool`, *optional*, defaults to `False`): + Whether to unwrap the model if torch.compile was used. + save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): + If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding + layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it + sets the boolean flag. This only works for 🤗 transformers models. + """ + if unwrap_compiled: + model = getattr(model, "_orig_mod", model) + + config = model.peft_config[adapter_name] + if state_dict is None: + state_dict = model.state_dict() + if config.peft_type in (PeftType.LORA, PeftType.ADALORA): + # to_return = lora_state_dict(model, bias=model.peft_config.bias) + # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` + # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP + bias = config.bias + if bias == "none": + to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} + elif bias == "all": + to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} + elif bias == "lora_only": + to_return = {} + for k in state_dict: + if "lora_" in k: + to_return[k] = state_dict[k] + bias_name = k.split("lora_")[0] + "bias" + if bias_name in state_dict: + to_return[bias_name] = state_dict[bias_name] + else: + raise NotImplementedError + to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} + if config.peft_type == PeftType.ADALORA: + rank_pattern = config.rank_pattern + if rank_pattern is not None: + rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} + config.rank_pattern = rank_pattern + to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) + + elif config.peft_type == PeftType.LOHA: + to_return = {k: state_dict[k] for k in state_dict if "hada_" in k} + + elif config.peft_type == PeftType.LOKR: + to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k} + + elif config.peft_type == PeftType.ADAPTION_PROMPT: + to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} + elif config.is_prompt_learning: + to_return = {} + if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols + to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows + prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight + else: + if config.inference_mode: + prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight + else: + prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) + to_return["prompt_embeddings"] = prompt_embeddings + elif config.peft_type == PeftType.IA3: + to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} + elif config.peft_type == PeftType.OFT: + to_return = {k: state_dict[k] for k in state_dict if "oft_" in k} + elif config.peft_type == PeftType.POLY: + to_return = {k: state_dict[k] for k in state_dict if "poly_" in k} + else: + raise NotImplementedError + if getattr(model, "modules_to_save", None) is not None: + for key, value in state_dict.items(): + if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): + to_return[key.replace("modules_to_save.", "")] = value + + # check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary + is_embedding_in_target_modules = False + if ( + save_embedding_layers == "auto" + and hasattr(config, "target_modules") + and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES) + ): + warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.") + save_embedding_layers = is_embedding_in_target_modules = True + elif save_embedding_layers == "auto": + vocab_size = getattr(getattr(model, "config", None), "vocab_size", None) + model_id = getattr(config, "base_model_name_or_path", None) + + # For some models e.g. diffusers the text config file is stored in a subfolder + # we need to make sure we can download that config. + has_remote_config = False + + # ensure that this check is not performed in HF offline mode, see #1452 + if model_id is not None: + exists = check_file_exists_on_hf_hub(model_id, "config.json") + if exists is None: + # check failed, could not determine if it exists or not + warnings.warn( + f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified." + ) + has_remote_config = False + else: + has_remote_config = exists + + # check if the vocab size of the base model is different from the vocab size of the finetuned model + if ( + vocab_size + and model_id + and has_remote_config + and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size) + ): + warnings.warn( + "Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning." + ) + save_embedding_layers = True + else: + save_embedding_layers = False + + if save_embedding_layers and hasattr(model, "get_input_embeddings"): + for layer in [model.get_input_embeddings(), model.get_output_embeddings()]: + if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer): + # support from version >= 0.6.2 + embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules) + if embedding_module_name: + to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k}) + elif save_embedding_layers: + warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.") + + to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} + return to_return + + +def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): + """ + Set the state dict of the Peft model. + + Args: + model ([`PeftModel`]): The Peft model. + peft_model_state_dict (`dict`): The state dict of the Peft model. + """ + config = model.peft_config[adapter_name] + state_dict = {} + if getattr(model, "modules_to_save", None) is not None: + for key, value in peft_model_state_dict.items(): + if any(module_name in key for module_name in model.modules_to_save): + for module_name in model.modules_to_save: + if module_name in key: + key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") + break + state_dict[key] = value + else: + state_dict = peft_model_state_dict + + if config.peft_type in ( + PeftType.LORA, + PeftType.LOHA, + PeftType.LOKR, + PeftType.ADALORA, + PeftType.IA3, + PeftType.OFT, + PeftType.POLY, + ): + peft_model_state_dict = {} + parameter_prefix = { + PeftType.IA3: "ia3_", + PeftType.LORA: "lora_", + PeftType.ADALORA: "lora_", + PeftType.LOHA: "hada_", + PeftType.LOKR: "lokr_", + PeftType.OFT: "oft_", + PeftType.POLY: "poly_", + }[config.peft_type] + for k, v in state_dict.items(): + if parameter_prefix in k: + suffix = k.split(parameter_prefix)[1] + if "." in suffix: + suffix_to_replace = ".".join(suffix.split(".")[1:]) + k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") + else: + k = f"{k}.{adapter_name}" + peft_model_state_dict[k] = v + else: + peft_model_state_dict[k] = v + if config.peft_type == PeftType.ADALORA: + rank_pattern = config.rank_pattern + if rank_pattern is not None: + model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) + elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: + peft_model_state_dict = state_dict + else: + raise NotImplementedError + + load_result = model.load_state_dict(peft_model_state_dict, strict=False) + if config.is_prompt_learning: + model.prompt_encoder[adapter_name].embedding.load_state_dict( + {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True + ) + + if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) + return load_result + + +def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: + r""" + A helper method to load the PEFT weights from the HuggingFace Hub or locally + + Args: + model_id (`str`): + The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. + device (`str`): + The device to load the weights onto. + hf_hub_download_kwargs (`dict`): + Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. + """ + path = ( + os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) + if hf_hub_download_kwargs.get("subfolder", None) is not None + else model_id + ) + + if device is None: + device = infer_device() + + if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): + filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) + use_safetensors = True + elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): + filename = os.path.join(path, WEIGHTS_NAME) + use_safetensors = False + else: + token = hf_hub_download_kwargs.get("token", None) + if token is None: + token = hf_hub_download_kwargs.get("use_auth_token", None) + + hub_filename = ( + os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME) + if hf_hub_download_kwargs.get("subfolder", None) is not None + else SAFETENSORS_WEIGHTS_NAME + ) + has_remote_safetensors_file = file_exists( + repo_id=model_id, + filename=hub_filename, + revision=hf_hub_download_kwargs.get("revision", None), + repo_type=hf_hub_download_kwargs.get("repo_type", None), + token=token, + ) + use_safetensors = has_remote_safetensors_file + + if has_remote_safetensors_file: + # Priority 1: load safetensors weights + filename = hf_hub_download( + model_id, + SAFETENSORS_WEIGHTS_NAME, + **hf_hub_download_kwargs, + ) + else: + try: + filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) + except EntryNotFoundError: + raise ValueError( + f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " + f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." + ) + + if use_safetensors: + if hasattr(torch.backends, "mps") and (device == torch.device("mps")): + adapters_weights = safe_load_file(filename, device="cpu") + else: + adapters_weights = safe_load_file(filename, device=device) + else: + adapters_weights = torch.load(filename, map_location=torch.device(device)) + + return adapters_weights