applied-ai-018 commited on
Commit
b32497d
·
verified ·
1 Parent(s): 5e03ef2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/integrate/__init__.py +110 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_bvp.py +1155 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py +8 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py +290 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py +479 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py +440 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py +748 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py +224 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py +574 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py +601 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py +1135 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py +37 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ode.py +1376 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py +262 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py +656 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py +1291 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadrature.py +1830 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py +1231 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/integrate/dop.py +18 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/integrate/lsoda.py +15 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/integrate/odepack.py +17 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/integrate/quadpack.py +24 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/integrate/vode.py +18 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/mmio.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__init__.py +17 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/_fortran_format_parser.py +309 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/hb.py +571 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__init__.py +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_hb.py +65 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__init__.py +169 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/integrate/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================
3
+ Integration and ODEs (:mod:`scipy.integrate`)
4
+ =============================================
5
+
6
+ .. currentmodule:: scipy.integrate
7
+
8
+ Integrating functions, given function object
9
+ ============================================
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ quad -- General purpose integration
15
+ quad_vec -- General purpose integration of vector-valued functions
16
+ dblquad -- General purpose double integration
17
+ tplquad -- General purpose triple integration
18
+ nquad -- General purpose N-D integration
19
+ fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
20
+ quadrature -- Integrate with given tolerance using Gaussian quadrature
21
+ romberg -- Integrate func using Romberg integration
22
+ newton_cotes -- Weights and error coefficient for Newton-Cotes integration
23
+ qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature
24
+ IntegrationWarning -- Warning on issues during integration
25
+ AccuracyWarning -- Warning on issues during quadrature integration
26
+
27
+ Integrating functions, given fixed samples
28
+ ==========================================
29
+
30
+ .. autosummary::
31
+ :toctree: generated/
32
+
33
+ trapezoid -- Use trapezoidal rule to compute integral.
34
+ cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
35
+ simpson -- Use Simpson's rule to compute integral from samples.
36
+ cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples.
37
+ romb -- Use Romberg Integration to compute integral from
38
+ -- (2**k + 1) evenly-spaced samples.
39
+
40
+ .. seealso::
41
+
42
+ :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
43
+ quadrature roots and weights for other weighting factors and regions.
44
+
45
+ Solving initial value problems for ODE systems
46
+ ==============================================
47
+
48
+ The solvers are implemented as individual classes, which can be used directly
49
+ (low-level usage) or through a convenience function.
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ solve_ivp -- Convenient function for ODE integration.
55
+ RK23 -- Explicit Runge-Kutta solver of order 3(2).
56
+ RK45 -- Explicit Runge-Kutta solver of order 5(4).
57
+ DOP853 -- Explicit Runge-Kutta solver of order 8.
58
+ Radau -- Implicit Runge-Kutta solver of order 5.
59
+ BDF -- Implicit multi-step variable order (1 to 5) solver.
60
+ LSODA -- LSODA solver from ODEPACK Fortran package.
61
+ OdeSolver -- Base class for ODE solvers.
62
+ DenseOutput -- Local interpolant for computing a dense output.
63
+ OdeSolution -- Class which represents a continuous ODE solution.
64
+
65
+
66
+ Old API
67
+ -------
68
+
69
+ These are the routines developed earlier for SciPy. They wrap older solvers
70
+ implemented in Fortran (mostly ODEPACK). While the interface to them is not
71
+ particularly convenient and certain features are missing compared to the new
72
+ API, the solvers themselves are of good quality and work fast as compiled
73
+ Fortran code. In some cases, it might be worth using this old API.
74
+
75
+ .. autosummary::
76
+ :toctree: generated/
77
+
78
+ odeint -- General integration of ordinary differential equations.
79
+ ode -- Integrate ODE using VODE and ZVODE routines.
80
+ complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
81
+ ODEintWarning -- Warning raised during the execution of `odeint`.
82
+
83
+
84
+ Solving boundary value problems for ODE systems
85
+ ===============================================
86
+
87
+ .. autosummary::
88
+ :toctree: generated/
89
+
90
+ solve_bvp -- Solve a boundary value problem for a system of ODEs.
91
+ """ # noqa: E501
92
+
93
+
94
+ from ._quadrature import *
95
+ from ._odepack_py import *
96
+ from ._quadpack_py import *
97
+ from ._ode import *
98
+ from ._bvp import solve_bvp
99
+ from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
100
+ OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
101
+ from ._quad_vec import quad_vec
102
+
103
+ # Deprecated namespaces, to be removed in v2.0.0
104
+ from . import dop, lsoda, vode, odepack, quadpack
105
+
106
+ __all__ = [s for s in dir() if not s.startswith('_')]
107
+
108
+ from scipy._lib._testutils import PytestTester
109
+ test = PytestTester(__name__)
110
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_bvp.py ADDED
@@ -0,0 +1,1155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Boundary value problem solver."""
2
+ from warnings import warn
3
+
4
+ import numpy as np
5
+ from numpy.linalg import pinv
6
+
7
+ from scipy.sparse import coo_matrix, csc_matrix
8
+ from scipy.sparse.linalg import splu
9
+ from scipy.optimize import OptimizeResult
10
+
11
+
12
+ EPS = np.finfo(float).eps
13
+
14
+
15
+ def estimate_fun_jac(fun, x, y, p, f0=None):
16
+ """Estimate derivatives of an ODE system rhs with forward differences.
17
+
18
+ Returns
19
+ -------
20
+ df_dy : ndarray, shape (n, n, m)
21
+ Derivatives with respect to y. An element (i, j, q) corresponds to
22
+ d f_i(x_q, y_q) / d (y_q)_j.
23
+ df_dp : ndarray with shape (n, k, m) or None
24
+ Derivatives with respect to p. An element (i, j, q) corresponds to
25
+ d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
26
+ """
27
+ n, m = y.shape
28
+ if f0 is None:
29
+ f0 = fun(x, y, p)
30
+
31
+ dtype = y.dtype
32
+
33
+ df_dy = np.empty((n, n, m), dtype=dtype)
34
+ h = EPS**0.5 * (1 + np.abs(y))
35
+ for i in range(n):
36
+ y_new = y.copy()
37
+ y_new[i] += h[i]
38
+ hi = y_new[i] - y[i]
39
+ f_new = fun(x, y_new, p)
40
+ df_dy[:, i, :] = (f_new - f0) / hi
41
+
42
+ k = p.shape[0]
43
+ if k == 0:
44
+ df_dp = None
45
+ else:
46
+ df_dp = np.empty((n, k, m), dtype=dtype)
47
+ h = EPS**0.5 * (1 + np.abs(p))
48
+ for i in range(k):
49
+ p_new = p.copy()
50
+ p_new[i] += h[i]
51
+ hi = p_new[i] - p[i]
52
+ f_new = fun(x, y, p_new)
53
+ df_dp[:, i, :] = (f_new - f0) / hi
54
+
55
+ return df_dy, df_dp
56
+
57
+
58
+ def estimate_bc_jac(bc, ya, yb, p, bc0=None):
59
+ """Estimate derivatives of boundary conditions with forward differences.
60
+
61
+ Returns
62
+ -------
63
+ dbc_dya : ndarray, shape (n + k, n)
64
+ Derivatives with respect to ya. An element (i, j) corresponds to
65
+ d bc_i / d ya_j.
66
+ dbc_dyb : ndarray, shape (n + k, n)
67
+ Derivatives with respect to yb. An element (i, j) corresponds to
68
+ d bc_i / d ya_j.
69
+ dbc_dp : ndarray with shape (n + k, k) or None
70
+ Derivatives with respect to p. An element (i, j) corresponds to
71
+ d bc_i / d p_j. If `p` is empty, None is returned.
72
+ """
73
+ n = ya.shape[0]
74
+ k = p.shape[0]
75
+
76
+ if bc0 is None:
77
+ bc0 = bc(ya, yb, p)
78
+
79
+ dtype = ya.dtype
80
+
81
+ dbc_dya = np.empty((n, n + k), dtype=dtype)
82
+ h = EPS**0.5 * (1 + np.abs(ya))
83
+ for i in range(n):
84
+ ya_new = ya.copy()
85
+ ya_new[i] += h[i]
86
+ hi = ya_new[i] - ya[i]
87
+ bc_new = bc(ya_new, yb, p)
88
+ dbc_dya[i] = (bc_new - bc0) / hi
89
+ dbc_dya = dbc_dya.T
90
+
91
+ h = EPS**0.5 * (1 + np.abs(yb))
92
+ dbc_dyb = np.empty((n, n + k), dtype=dtype)
93
+ for i in range(n):
94
+ yb_new = yb.copy()
95
+ yb_new[i] += h[i]
96
+ hi = yb_new[i] - yb[i]
97
+ bc_new = bc(ya, yb_new, p)
98
+ dbc_dyb[i] = (bc_new - bc0) / hi
99
+ dbc_dyb = dbc_dyb.T
100
+
101
+ if k == 0:
102
+ dbc_dp = None
103
+ else:
104
+ h = EPS**0.5 * (1 + np.abs(p))
105
+ dbc_dp = np.empty((k, n + k), dtype=dtype)
106
+ for i in range(k):
107
+ p_new = p.copy()
108
+ p_new[i] += h[i]
109
+ hi = p_new[i] - p[i]
110
+ bc_new = bc(ya, yb, p_new)
111
+ dbc_dp[i] = (bc_new - bc0) / hi
112
+ dbc_dp = dbc_dp.T
113
+
114
+ return dbc_dya, dbc_dyb, dbc_dp
115
+
116
+
117
+ def compute_jac_indices(n, m, k):
118
+ """Compute indices for the collocation system Jacobian construction.
119
+
120
+ See `construct_global_jac` for the explanation.
121
+ """
122
+ i_col = np.repeat(np.arange((m - 1) * n), n)
123
+ j_col = (np.tile(np.arange(n), n * (m - 1)) +
124
+ np.repeat(np.arange(m - 1) * n, n**2))
125
+
126
+ i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
127
+ j_bc = np.tile(np.arange(n), n + k)
128
+
129
+ i_p_col = np.repeat(np.arange((m - 1) * n), k)
130
+ j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
131
+
132
+ i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
133
+ j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
134
+
135
+ i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
136
+ j = np.hstack((j_col, j_col + n,
137
+ j_bc, j_bc + (m - 1) * n,
138
+ j_p_col, j_p_bc))
139
+
140
+ return i, j
141
+
142
+
143
+ def stacked_matmul(a, b):
144
+ """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
145
+
146
+ Empirical optimization. Use outer Python loop and BLAS for large
147
+ matrices, otherwise use a single einsum call.
148
+ """
149
+ if a.shape[1] > 50:
150
+ out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
151
+ for i in range(a.shape[0]):
152
+ out[i] = np.dot(a[i], b[i])
153
+ return out
154
+ else:
155
+ return np.einsum('...ij,...jk->...ik', a, b)
156
+
157
+
158
+ def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
159
+ df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
160
+ """Construct the Jacobian of the collocation system.
161
+
162
+ There are n * m + k functions: m - 1 collocations residuals, each
163
+ containing n components, followed by n + k boundary condition residuals.
164
+
165
+ There are n * m + k variables: m vectors of y, each containing n
166
+ components, followed by k values of vector p.
167
+
168
+ For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
169
+ the following sparsity structure:
170
+
171
+ 1 1 2 2 0 0 0 0 5
172
+ 1 1 2 2 0 0 0 0 5
173
+ 0 0 1 1 2 2 0 0 5
174
+ 0 0 1 1 2 2 0 0 5
175
+ 0 0 0 0 1 1 2 2 5
176
+ 0 0 0 0 1 1 2 2 5
177
+
178
+ 3 3 0 0 0 0 4 4 6
179
+ 3 3 0 0 0 0 4 4 6
180
+ 3 3 0 0 0 0 4 4 6
181
+
182
+ Zeros denote identically zero values, other values denote different kinds
183
+ of blocks in the matrix (see below). The blank row indicates the separation
184
+ of collocation residuals from boundary conditions. And the blank column
185
+ indicates the separation of y values from p values.
186
+
187
+ Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
188
+ of collocation residuals with respect to y.
189
+
190
+ Parameters
191
+ ----------
192
+ n : int
193
+ Number of equations in the ODE system.
194
+ m : int
195
+ Number of nodes in the mesh.
196
+ k : int
197
+ Number of the unknown parameters.
198
+ i_jac, j_jac : ndarray
199
+ Row and column indices returned by `compute_jac_indices`. They
200
+ represent different blocks in the Jacobian matrix in the following
201
+ order (see the scheme above):
202
+
203
+ * 1: m - 1 diagonal n x n blocks for the collocation residuals.
204
+ * 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
205
+ * 3 : (n + k) x n block for the dependency of the boundary
206
+ conditions on ya.
207
+ * 4: (n + k) x n block for the dependency of the boundary
208
+ conditions on yb.
209
+ * 5: (m - 1) * n x k block for the dependency of the collocation
210
+ residuals on p.
211
+ * 6: (n + k) x k block for the dependency of the boundary
212
+ conditions on p.
213
+
214
+ df_dy : ndarray, shape (n, n, m)
215
+ Jacobian of f with respect to y computed at the mesh nodes.
216
+ df_dy_middle : ndarray, shape (n, n, m - 1)
217
+ Jacobian of f with respect to y computed at the middle between the
218
+ mesh nodes.
219
+ df_dp : ndarray with shape (n, k, m) or None
220
+ Jacobian of f with respect to p computed at the mesh nodes.
221
+ df_dp_middle : ndarray with shape (n, k, m - 1) or None
222
+ Jacobian of f with respect to p computed at the middle between the
223
+ mesh nodes.
224
+ dbc_dya, dbc_dyb : ndarray, shape (n, n)
225
+ Jacobian of bc with respect to ya and yb.
226
+ dbc_dp : ndarray with shape (n, k) or None
227
+ Jacobian of bc with respect to p.
228
+
229
+ Returns
230
+ -------
231
+ J : csc_matrix, shape (n * m + k, n * m + k)
232
+ Jacobian of the collocation system in a sparse form.
233
+
234
+ References
235
+ ----------
236
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
237
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
238
+ Number 3, pp. 299-316, 2001.
239
+ """
240
+ df_dy = np.transpose(df_dy, (2, 0, 1))
241
+ df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
242
+
243
+ h = h[:, np.newaxis, np.newaxis]
244
+
245
+ dtype = df_dy.dtype
246
+
247
+ # Computing diagonal n x n blocks.
248
+ dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
249
+ dPhi_dy_0[:] = -np.identity(n)
250
+ dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
251
+ T = stacked_matmul(df_dy_middle, df_dy[:-1])
252
+ dPhi_dy_0 -= h**2 / 12 * T
253
+
254
+ # Computing off-diagonal n x n blocks.
255
+ dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
256
+ dPhi_dy_1[:] = np.identity(n)
257
+ dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
258
+ T = stacked_matmul(df_dy_middle, df_dy[1:])
259
+ dPhi_dy_1 += h**2 / 12 * T
260
+
261
+ values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
262
+ dbc_dyb.ravel()))
263
+
264
+ if k > 0:
265
+ df_dp = np.transpose(df_dp, (2, 0, 1))
266
+ df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
267
+ T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
268
+ df_dp_middle += 0.125 * h * T
269
+ dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
270
+ values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
271
+
272
+ J = coo_matrix((values, (i_jac, j_jac)))
273
+ return csc_matrix(J)
274
+
275
+
276
+ def collocation_fun(fun, y, p, x, h):
277
+ """Evaluate collocation residuals.
278
+
279
+ This function lies in the core of the method. The solution is sought
280
+ as a cubic C1 continuous spline with derivatives matching the ODE rhs
281
+ at given nodes `x`. Collocation conditions are formed from the equality
282
+ of the spline derivatives and rhs of the ODE system in the middle points
283
+ between nodes.
284
+
285
+ Such method is classified to Lobbato IIIA family in ODE literature.
286
+ Refer to [1]_ for the formula and some discussion.
287
+
288
+ Returns
289
+ -------
290
+ col_res : ndarray, shape (n, m - 1)
291
+ Collocation residuals at the middle points of the mesh intervals.
292
+ y_middle : ndarray, shape (n, m - 1)
293
+ Values of the cubic spline evaluated at the middle points of the mesh
294
+ intervals.
295
+ f : ndarray, shape (n, m)
296
+ RHS of the ODE system evaluated at the mesh nodes.
297
+ f_middle : ndarray, shape (n, m - 1)
298
+ RHS of the ODE system evaluated at the middle points of the mesh
299
+ intervals (and using `y_middle`).
300
+
301
+ References
302
+ ----------
303
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
304
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
305
+ Number 3, pp. 299-316, 2001.
306
+ """
307
+ f = fun(x, y, p)
308
+ y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
309
+ 0.125 * h * (f[:, 1:] - f[:, :-1]))
310
+ f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
311
+ col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
312
+ 4 * f_middle)
313
+
314
+ return col_res, y_middle, f, f_middle
315
+
316
+
317
+ def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
318
+ """Create the function and the Jacobian for the collocation system."""
319
+ x_middle = x[:-1] + 0.5 * h
320
+ i_jac, j_jac = compute_jac_indices(n, m, k)
321
+
322
+ def col_fun(y, p):
323
+ return collocation_fun(fun, y, p, x, h)
324
+
325
+ def sys_jac(y, p, y_middle, f, f_middle, bc0):
326
+ if fun_jac is None:
327
+ df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
328
+ df_dy_middle, df_dp_middle = estimate_fun_jac(
329
+ fun, x_middle, y_middle, p, f_middle)
330
+ else:
331
+ df_dy, df_dp = fun_jac(x, y, p)
332
+ df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
333
+
334
+ if bc_jac is None:
335
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
336
+ p, bc0)
337
+ else:
338
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
339
+
340
+ return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
341
+ df_dy_middle, df_dp, df_dp_middle, dbc_dya,
342
+ dbc_dyb, dbc_dp)
343
+
344
+ return col_fun, sys_jac
345
+
346
+
347
+ def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
348
+ """Solve the nonlinear collocation system by a Newton method.
349
+
350
+ This is a simple Newton method with a backtracking line search. As
351
+ advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
352
+ is used, where J is the Jacobian matrix at the current iteration and r is
353
+ the vector or collocation residuals (values of the system lhs).
354
+
355
+ The method alters between full Newton iterations and the fixed-Jacobian
356
+ iterations based
357
+
358
+ There are other tricks proposed in [1]_, but they are not used as they
359
+ don't seem to improve anything significantly, and even break the
360
+ convergence on some test problems I tried.
361
+
362
+ All important parameters of the algorithm are defined inside the function.
363
+
364
+ Parameters
365
+ ----------
366
+ n : int
367
+ Number of equations in the ODE system.
368
+ m : int
369
+ Number of nodes in the mesh.
370
+ h : ndarray, shape (m-1,)
371
+ Mesh intervals.
372
+ col_fun : callable
373
+ Function computing collocation residuals.
374
+ bc : callable
375
+ Function computing boundary condition residuals.
376
+ jac : callable
377
+ Function computing the Jacobian of the whole system (including
378
+ collocation and boundary condition residuals). It is supposed to
379
+ return csc_matrix.
380
+ y : ndarray, shape (n, m)
381
+ Initial guess for the function values at the mesh nodes.
382
+ p : ndarray, shape (k,)
383
+ Initial guess for the unknown parameters.
384
+ B : ndarray with shape (n, n) or None
385
+ Matrix to force the S y(a) = 0 condition for a problems with the
386
+ singular term. If None, the singular term is assumed to be absent.
387
+ bvp_tol : float
388
+ Tolerance to which we want to solve a BVP.
389
+ bc_tol : float
390
+ Tolerance to which we want to satisfy the boundary conditions.
391
+
392
+ Returns
393
+ -------
394
+ y : ndarray, shape (n, m)
395
+ Final iterate for the function values at the mesh nodes.
396
+ p : ndarray, shape (k,)
397
+ Final iterate for the unknown parameters.
398
+ singular : bool
399
+ True, if the LU decomposition failed because Jacobian turned out
400
+ to be singular.
401
+
402
+ References
403
+ ----------
404
+ .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
405
+ Boundary Value Problems for Ordinary Differential Equations"
406
+ """
407
+ # We know that the solution residuals at the middle points of the mesh
408
+ # are connected with collocation residuals r_middle = 1.5 * col_res / h.
409
+ # As our BVP solver tries to decrease relative residuals below a certain
410
+ # tolerance, it seems reasonable to terminated Newton iterations by
411
+ # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
412
+ # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
413
+ # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
414
+ # should be computed as follows:
415
+ tol_r = 2/3 * h * 5e-2 * bvp_tol
416
+
417
+ # Maximum allowed number of Jacobian evaluation and factorization, in
418
+ # other words, the maximum number of full Newton iterations. A small value
419
+ # is recommended in the literature.
420
+ max_njev = 4
421
+
422
+ # Maximum number of iterations, considering that some of them can be
423
+ # performed with the fixed Jacobian. In theory, such iterations are cheap,
424
+ # but it's not that simple in Python.
425
+ max_iter = 8
426
+
427
+ # Minimum relative improvement of the criterion function to accept the
428
+ # step (Armijo constant).
429
+ sigma = 0.2
430
+
431
+ # Step size decrease factor for backtracking.
432
+ tau = 0.5
433
+
434
+ # Maximum number of backtracking steps, the minimum step is then
435
+ # tau ** n_trial.
436
+ n_trial = 4
437
+
438
+ col_res, y_middle, f, f_middle = col_fun(y, p)
439
+ bc_res = bc(y[:, 0], y[:, -1], p)
440
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
441
+
442
+ njev = 0
443
+ singular = False
444
+ recompute_jac = True
445
+ for iteration in range(max_iter):
446
+ if recompute_jac:
447
+ J = jac(y, p, y_middle, f, f_middle, bc_res)
448
+ njev += 1
449
+ try:
450
+ LU = splu(J)
451
+ except RuntimeError:
452
+ singular = True
453
+ break
454
+
455
+ step = LU.solve(res)
456
+ cost = np.dot(step, step)
457
+
458
+ y_step = step[:m * n].reshape((n, m), order='F')
459
+ p_step = step[m * n:]
460
+
461
+ alpha = 1
462
+ for trial in range(n_trial + 1):
463
+ y_new = y - alpha * y_step
464
+ if B is not None:
465
+ y_new[:, 0] = np.dot(B, y_new[:, 0])
466
+ p_new = p - alpha * p_step
467
+
468
+ col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
469
+ bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
470
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
471
+
472
+ step_new = LU.solve(res)
473
+ cost_new = np.dot(step_new, step_new)
474
+ if cost_new < (1 - 2 * alpha * sigma) * cost:
475
+ break
476
+
477
+ if trial < n_trial:
478
+ alpha *= tau
479
+
480
+ y = y_new
481
+ p = p_new
482
+
483
+ if njev == max_njev:
484
+ break
485
+
486
+ if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
487
+ np.all(np.abs(bc_res) < bc_tol)):
488
+ break
489
+
490
+ # If the full step was taken, then we are going to continue with
491
+ # the same Jacobian. This is the approach of BVP_SOLVER.
492
+ if alpha == 1:
493
+ step = step_new
494
+ cost = cost_new
495
+ recompute_jac = False
496
+ else:
497
+ recompute_jac = True
498
+
499
+ return y, p, singular
500
+
501
+
502
+ def print_iteration_header():
503
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
504
+ "Iteration", "Max residual", "Max BC residual", "Total nodes",
505
+ "Nodes added"))
506
+
507
+
508
+ def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
509
+ nodes_added):
510
+ print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
511
+ iteration, residual, bc_residual, total_nodes, nodes_added))
512
+
513
+
514
+ class BVPResult(OptimizeResult):
515
+ pass
516
+
517
+
518
+ TERMINATION_MESSAGES = {
519
+ 0: "The algorithm converged to the desired accuracy.",
520
+ 1: "The maximum number of mesh nodes is exceeded.",
521
+ 2: "A singular Jacobian encountered when solving the collocation system.",
522
+ 3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
523
+ }
524
+
525
+
526
+ def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
527
+ """Estimate rms values of collocation residuals using Lobatto quadrature.
528
+
529
+ The residuals are defined as the difference between the derivatives of
530
+ our solution and rhs of the ODE system. We use relative residuals, i.e.,
531
+ normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
532
+ normalized integrals of the squared relative residuals over each interval.
533
+ Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
534
+ fact that residuals at the mesh nodes are identically zero.
535
+
536
+ In [2] they don't normalize integrals by interval lengths, which gives
537
+ a higher rate of convergence of the residuals by the factor of h**0.5.
538
+ I chose to do such normalization for an ease of interpretation of return
539
+ values as RMS estimates.
540
+
541
+ Returns
542
+ -------
543
+ rms_res : ndarray, shape (m - 1,)
544
+ Estimated rms values of the relative residuals over each interval.
545
+
546
+ References
547
+ ----------
548
+ .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
549
+ .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
550
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
551
+ Number 3, pp. 299-316, 2001.
552
+ """
553
+ x_middle = x[:-1] + 0.5 * h
554
+ s = 0.5 * h * (3/7)**0.5
555
+ x1 = x_middle + s
556
+ x2 = x_middle - s
557
+ y1 = sol(x1)
558
+ y2 = sol(x2)
559
+ y1_prime = sol(x1, 1)
560
+ y2_prime = sol(x2, 1)
561
+ f1 = fun(x1, y1, p)
562
+ f2 = fun(x2, y2, p)
563
+ r1 = y1_prime - f1
564
+ r2 = y2_prime - f2
565
+
566
+ r_middle /= 1 + np.abs(f_middle)
567
+ r1 /= 1 + np.abs(f1)
568
+ r2 /= 1 + np.abs(f2)
569
+
570
+ r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
571
+ r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
572
+ r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
573
+
574
+ return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
575
+
576
+
577
+ def create_spline(y, yp, x, h):
578
+ """Create a cubic spline given values and derivatives.
579
+
580
+ Formulas for the coefficients are taken from interpolate.CubicSpline.
581
+
582
+ Returns
583
+ -------
584
+ sol : PPoly
585
+ Constructed spline as a PPoly instance.
586
+ """
587
+ from scipy.interpolate import PPoly
588
+
589
+ n, m = y.shape
590
+ c = np.empty((4, n, m - 1), dtype=y.dtype)
591
+ slope = (y[:, 1:] - y[:, :-1]) / h
592
+ t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
593
+ c[0] = t / h
594
+ c[1] = (slope - yp[:, :-1]) / h - t
595
+ c[2] = yp[:, :-1]
596
+ c[3] = y[:, :-1]
597
+ c = np.moveaxis(c, 1, 0)
598
+
599
+ return PPoly(c, x, extrapolate=True, axis=1)
600
+
601
+
602
+ def modify_mesh(x, insert_1, insert_2):
603
+ """Insert nodes into a mesh.
604
+
605
+ Nodes removal logic is not established, its impact on the solver is
606
+ presumably negligible. So, only insertion is done in this function.
607
+
608
+ Parameters
609
+ ----------
610
+ x : ndarray, shape (m,)
611
+ Mesh nodes.
612
+ insert_1 : ndarray
613
+ Intervals to each insert 1 new node in the middle.
614
+ insert_2 : ndarray
615
+ Intervals to each insert 2 new nodes, such that divide an interval
616
+ into 3 equal parts.
617
+
618
+ Returns
619
+ -------
620
+ x_new : ndarray
621
+ New mesh nodes.
622
+
623
+ Notes
624
+ -----
625
+ `insert_1` and `insert_2` should not have common values.
626
+ """
627
+ # Because np.insert implementation apparently varies with a version of
628
+ # NumPy, we use a simple and reliable approach with sorting.
629
+ return np.sort(np.hstack((
630
+ x,
631
+ 0.5 * (x[insert_1] + x[insert_1 + 1]),
632
+ (2 * x[insert_2] + x[insert_2 + 1]) / 3,
633
+ (x[insert_2] + 2 * x[insert_2 + 1]) / 3
634
+ )))
635
+
636
+
637
+ def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
638
+ """Wrap functions for unified usage in the solver."""
639
+ if fun_jac is None:
640
+ fun_jac_wrapped = None
641
+
642
+ if bc_jac is None:
643
+ bc_jac_wrapped = None
644
+
645
+ if k == 0:
646
+ def fun_p(x, y, _):
647
+ return np.asarray(fun(x, y), dtype)
648
+
649
+ def bc_wrapped(ya, yb, _):
650
+ return np.asarray(bc(ya, yb), dtype)
651
+
652
+ if fun_jac is not None:
653
+ def fun_jac_p(x, y, _):
654
+ return np.asarray(fun_jac(x, y), dtype), None
655
+
656
+ if bc_jac is not None:
657
+ def bc_jac_wrapped(ya, yb, _):
658
+ dbc_dya, dbc_dyb = bc_jac(ya, yb)
659
+ return (np.asarray(dbc_dya, dtype),
660
+ np.asarray(dbc_dyb, dtype), None)
661
+ else:
662
+ def fun_p(x, y, p):
663
+ return np.asarray(fun(x, y, p), dtype)
664
+
665
+ def bc_wrapped(x, y, p):
666
+ return np.asarray(bc(x, y, p), dtype)
667
+
668
+ if fun_jac is not None:
669
+ def fun_jac_p(x, y, p):
670
+ df_dy, df_dp = fun_jac(x, y, p)
671
+ return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
672
+
673
+ if bc_jac is not None:
674
+ def bc_jac_wrapped(ya, yb, p):
675
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
676
+ return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
677
+ np.asarray(dbc_dp, dtype))
678
+
679
+ if S is None:
680
+ fun_wrapped = fun_p
681
+ else:
682
+ def fun_wrapped(x, y, p):
683
+ f = fun_p(x, y, p)
684
+ if x[0] == a:
685
+ f[:, 0] = np.dot(D, f[:, 0])
686
+ f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
687
+ else:
688
+ f += np.dot(S, y) / (x - a)
689
+ return f
690
+
691
+ if fun_jac is not None:
692
+ if S is None:
693
+ fun_jac_wrapped = fun_jac_p
694
+ else:
695
+ Sr = S[:, :, np.newaxis]
696
+
697
+ def fun_jac_wrapped(x, y, p):
698
+ df_dy, df_dp = fun_jac_p(x, y, p)
699
+ if x[0] == a:
700
+ df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
701
+ df_dy[:, :, 1:] += Sr / (x[1:] - a)
702
+ else:
703
+ df_dy += Sr / (x - a)
704
+
705
+ return df_dy, df_dp
706
+
707
+ return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
708
+
709
+
710
+ def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
711
+ tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
712
+ """Solve a boundary value problem for a system of ODEs.
713
+
714
+ This function numerically solves a first order system of ODEs subject to
715
+ two-point boundary conditions::
716
+
717
+ dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
718
+ bc(y(a), y(b), p) = 0
719
+
720
+ Here x is a 1-D independent variable, y(x) is an N-D
721
+ vector-valued function and p is a k-D vector of unknown
722
+ parameters which is to be found along with y(x). For the problem to be
723
+ determined, there must be n + k boundary conditions, i.e., bc must be an
724
+ (n + k)-D function.
725
+
726
+ The last singular term on the right-hand side of the system is optional.
727
+ It is defined by an n-by-n matrix S, such that the solution must satisfy
728
+ S y(a) = 0. This condition will be forced during iterations, so it must not
729
+ contradict boundary conditions. See [2]_ for the explanation how this term
730
+ is handled when solving BVPs numerically.
731
+
732
+ Problems in a complex domain can be solved as well. In this case, y and p
733
+ are considered to be complex, and f and bc are assumed to be complex-valued
734
+ functions, but x stays real. Note that f and bc must be complex
735
+ differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
736
+ should rewrite your problem for real and imaginary parts separately. To
737
+ solve a problem in a complex domain, pass an initial guess for y with a
738
+ complex data type (see below).
739
+
740
+ Parameters
741
+ ----------
742
+ fun : callable
743
+ Right-hand side of the system. The calling signature is ``fun(x, y)``,
744
+ or ``fun(x, y, p)`` if parameters are present. All arguments are
745
+ ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
746
+ ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
747
+ return value must be an array with shape (n, m) and with the same
748
+ layout as ``y``.
749
+ bc : callable
750
+ Function evaluating residuals of the boundary conditions. The calling
751
+ signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
752
+ present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
753
+ and ``p`` with shape (k,). The return value must be an array with
754
+ shape (n + k,).
755
+ x : array_like, shape (m,)
756
+ Initial mesh. Must be a strictly increasing sequence of real numbers
757
+ with ``x[0]=a`` and ``x[-1]=b``.
758
+ y : array_like, shape (n, m)
759
+ Initial guess for the function values at the mesh nodes, ith column
760
+ corresponds to ``x[i]``. For problems in a complex domain pass `y`
761
+ with a complex data type (even if the initial guess is purely real).
762
+ p : array_like with shape (k,) or None, optional
763
+ Initial guess for the unknown parameters. If None (default), it is
764
+ assumed that the problem doesn't depend on any parameters.
765
+ S : array_like with shape (n, n) or None
766
+ Matrix defining the singular term. If None (default), the problem is
767
+ solved without the singular term.
768
+ fun_jac : callable or None, optional
769
+ Function computing derivatives of f with respect to y and p. The
770
+ calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
771
+ parameters are present. The return must contain 1 or 2 elements in the
772
+ following order:
773
+
774
+ * df_dy : array_like with shape (n, n, m), where an element
775
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
776
+ * df_dp : array_like with shape (n, k, m), where an element
777
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
778
+
779
+ Here q numbers nodes at which x and y are defined, whereas i and j
780
+ number vector components. If the problem is solved without unknown
781
+ parameters, df_dp should not be returned.
782
+
783
+ If `fun_jac` is None (default), the derivatives will be estimated
784
+ by the forward finite differences.
785
+ bc_jac : callable or None, optional
786
+ Function computing derivatives of bc with respect to ya, yb, and p.
787
+ The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
788
+ if parameters are present. The return must contain 2 or 3 elements in
789
+ the following order:
790
+
791
+ * dbc_dya : array_like with shape (n, n), where an element (i, j)
792
+ equals to d bc_i(ya, yb, p) / d ya_j.
793
+ * dbc_dyb : array_like with shape (n, n), where an element (i, j)
794
+ equals to d bc_i(ya, yb, p) / d yb_j.
795
+ * dbc_dp : array_like with shape (n, k), where an element (i, j)
796
+ equals to d bc_i(ya, yb, p) / d p_j.
797
+
798
+ If the problem is solved without unknown parameters, dbc_dp should not
799
+ be returned.
800
+
801
+ If `bc_jac` is None (default), the derivatives will be estimated by
802
+ the forward finite differences.
803
+ tol : float, optional
804
+ Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
805
+ where y is the found solution, then the solver tries to achieve on each
806
+ mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
807
+ estimated in a root mean squared sense (using a numerical quadrature
808
+ formula). Default is 1e-3.
809
+ max_nodes : int, optional
810
+ Maximum allowed number of the mesh nodes. If exceeded, the algorithm
811
+ terminates. Default is 1000.
812
+ verbose : {0, 1, 2}, optional
813
+ Level of algorithm's verbosity:
814
+
815
+ * 0 (default) : work silently.
816
+ * 1 : display a termination report.
817
+ * 2 : display progress during iterations.
818
+ bc_tol : float, optional
819
+ Desired absolute tolerance for the boundary condition residuals: `bc`
820
+ value should satisfy ``abs(bc) < bc_tol`` component-wise.
821
+ Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
822
+ tolerance.
823
+
824
+ Returns
825
+ -------
826
+ Bunch object with the following fields defined:
827
+ sol : PPoly
828
+ Found solution for y as `scipy.interpolate.PPoly` instance, a C1
829
+ continuous cubic spline.
830
+ p : ndarray or None, shape (k,)
831
+ Found parameters. None, if the parameters were not present in the
832
+ problem.
833
+ x : ndarray, shape (m,)
834
+ Nodes of the final mesh.
835
+ y : ndarray, shape (n, m)
836
+ Solution values at the mesh nodes.
837
+ yp : ndarray, shape (n, m)
838
+ Solution derivatives at the mesh nodes.
839
+ rms_residuals : ndarray, shape (m - 1,)
840
+ RMS values of the relative residuals over each mesh interval (see the
841
+ description of `tol` parameter).
842
+ niter : int
843
+ Number of completed iterations.
844
+ status : int
845
+ Reason for algorithm termination:
846
+
847
+ * 0: The algorithm converged to the desired accuracy.
848
+ * 1: The maximum number of mesh nodes is exceeded.
849
+ * 2: A singular Jacobian encountered when solving the collocation
850
+ system.
851
+
852
+ message : string
853
+ Verbal description of the termination reason.
854
+ success : bool
855
+ True if the algorithm converged to the desired accuracy (``status=0``).
856
+
857
+ Notes
858
+ -----
859
+ This function implements a 4th order collocation algorithm with the
860
+ control of residuals similar to [1]_. A collocation system is solved
861
+ by a damped Newton method with an affine-invariant criterion function as
862
+ described in [3]_.
863
+
864
+ Note that in [1]_ integral residuals are defined without normalization
865
+ by interval lengths. So, their definition is different by a multiplier of
866
+ h**0.5 (h is an interval length) from the definition used here.
867
+
868
+ .. versionadded:: 0.18.0
869
+
870
+ References
871
+ ----------
872
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
873
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
874
+ Number 3, pp. 299-316, 2001.
875
+ .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
876
+ Solver".
877
+ .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
878
+ Boundary Value Problems for Ordinary Differential Equations".
879
+ .. [4] `Cauchy-Riemann equations
880
+ <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
881
+ Wikipedia.
882
+
883
+ Examples
884
+ --------
885
+ In the first example, we solve Bratu's problem::
886
+
887
+ y'' + k * exp(y) = 0
888
+ y(0) = y(1) = 0
889
+
890
+ for k = 1.
891
+
892
+ We rewrite the equation as a first-order system and implement its
893
+ right-hand side evaluation::
894
+
895
+ y1' = y2
896
+ y2' = -exp(y1)
897
+
898
+ >>> import numpy as np
899
+ >>> def fun(x, y):
900
+ ... return np.vstack((y[1], -np.exp(y[0])))
901
+
902
+ Implement evaluation of the boundary condition residuals:
903
+
904
+ >>> def bc(ya, yb):
905
+ ... return np.array([ya[0], yb[0]])
906
+
907
+ Define the initial mesh with 5 nodes:
908
+
909
+ >>> x = np.linspace(0, 1, 5)
910
+
911
+ This problem is known to have two solutions. To obtain both of them, we
912
+ use two different initial guesses for y. We denote them by subscripts
913
+ a and b.
914
+
915
+ >>> y_a = np.zeros((2, x.size))
916
+ >>> y_b = np.zeros((2, x.size))
917
+ >>> y_b[0] = 3
918
+
919
+ Now we are ready to run the solver.
920
+
921
+ >>> from scipy.integrate import solve_bvp
922
+ >>> res_a = solve_bvp(fun, bc, x, y_a)
923
+ >>> res_b = solve_bvp(fun, bc, x, y_b)
924
+
925
+ Let's plot the two found solutions. We take an advantage of having the
926
+ solution in a spline form to produce a smooth plot.
927
+
928
+ >>> x_plot = np.linspace(0, 1, 100)
929
+ >>> y_plot_a = res_a.sol(x_plot)[0]
930
+ >>> y_plot_b = res_b.sol(x_plot)[0]
931
+ >>> import matplotlib.pyplot as plt
932
+ >>> plt.plot(x_plot, y_plot_a, label='y_a')
933
+ >>> plt.plot(x_plot, y_plot_b, label='y_b')
934
+ >>> plt.legend()
935
+ >>> plt.xlabel("x")
936
+ >>> plt.ylabel("y")
937
+ >>> plt.show()
938
+
939
+ We see that the two solutions have similar shape, but differ in scale
940
+ significantly.
941
+
942
+ In the second example, we solve a simple Sturm-Liouville problem::
943
+
944
+ y'' + k**2 * y = 0
945
+ y(0) = y(1) = 0
946
+
947
+ It is known that a non-trivial solution y = A * sin(k * x) is possible for
948
+ k = pi * n, where n is an integer. To establish the normalization constant
949
+ A = 1 we add a boundary condition::
950
+
951
+ y'(0) = k
952
+
953
+ Again, we rewrite our equation as a first-order system and implement its
954
+ right-hand side evaluation::
955
+
956
+ y1' = y2
957
+ y2' = -k**2 * y1
958
+
959
+ >>> def fun(x, y, p):
960
+ ... k = p[0]
961
+ ... return np.vstack((y[1], -k**2 * y[0]))
962
+
963
+ Note that parameters p are passed as a vector (with one element in our
964
+ case).
965
+
966
+ Implement the boundary conditions:
967
+
968
+ >>> def bc(ya, yb, p):
969
+ ... k = p[0]
970
+ ... return np.array([ya[0], yb[0], ya[1] - k])
971
+
972
+ Set up the initial mesh and guess for y. We aim to find the solution for
973
+ k = 2 * pi, to achieve that we set values of y to approximately follow
974
+ sin(2 * pi * x):
975
+
976
+ >>> x = np.linspace(0, 1, 5)
977
+ >>> y = np.zeros((2, x.size))
978
+ >>> y[0, 1] = 1
979
+ >>> y[0, 3] = -1
980
+
981
+ Run the solver with 6 as an initial guess for k.
982
+
983
+ >>> sol = solve_bvp(fun, bc, x, y, p=[6])
984
+
985
+ We see that the found k is approximately correct:
986
+
987
+ >>> sol.p[0]
988
+ 6.28329460046
989
+
990
+ And, finally, plot the solution to see the anticipated sinusoid:
991
+
992
+ >>> x_plot = np.linspace(0, 1, 100)
993
+ >>> y_plot = sol.sol(x_plot)[0]
994
+ >>> plt.plot(x_plot, y_plot)
995
+ >>> plt.xlabel("x")
996
+ >>> plt.ylabel("y")
997
+ >>> plt.show()
998
+ """
999
+ x = np.asarray(x, dtype=float)
1000
+ if x.ndim != 1:
1001
+ raise ValueError("`x` must be 1 dimensional.")
1002
+ h = np.diff(x)
1003
+ if np.any(h <= 0):
1004
+ raise ValueError("`x` must be strictly increasing.")
1005
+ a = x[0]
1006
+
1007
+ y = np.asarray(y)
1008
+ if np.issubdtype(y.dtype, np.complexfloating):
1009
+ dtype = complex
1010
+ else:
1011
+ dtype = float
1012
+ y = y.astype(dtype, copy=False)
1013
+
1014
+ if y.ndim != 2:
1015
+ raise ValueError("`y` must be 2 dimensional.")
1016
+ if y.shape[1] != x.shape[0]:
1017
+ raise ValueError(f"`y` is expected to have {x.shape[0]} columns, but actually "
1018
+ f"has {y.shape[1]}.")
1019
+
1020
+ if p is None:
1021
+ p = np.array([])
1022
+ else:
1023
+ p = np.asarray(p, dtype=dtype)
1024
+ if p.ndim != 1:
1025
+ raise ValueError("`p` must be 1 dimensional.")
1026
+
1027
+ if tol < 100 * EPS:
1028
+ warn(f"`tol` is too low, setting to {100 * EPS:.2e}", stacklevel=2)
1029
+ tol = 100 * EPS
1030
+
1031
+ if verbose not in [0, 1, 2]:
1032
+ raise ValueError("`verbose` must be in [0, 1, 2].")
1033
+
1034
+ n = y.shape[0]
1035
+ k = p.shape[0]
1036
+
1037
+ if S is not None:
1038
+ S = np.asarray(S, dtype=dtype)
1039
+ if S.shape != (n, n):
1040
+ raise ValueError(f"`S` is expected to have shape {(n, n)}, "
1041
+ f"but actually has {S.shape}")
1042
+
1043
+ # Compute I - S^+ S to impose necessary boundary conditions.
1044
+ B = np.identity(n) - np.dot(pinv(S), S)
1045
+
1046
+ y[:, 0] = np.dot(B, y[:, 0])
1047
+
1048
+ # Compute (I - S)^+ to correct derivatives at x=a.
1049
+ D = pinv(np.identity(n) - S)
1050
+ else:
1051
+ B = None
1052
+ D = None
1053
+
1054
+ if bc_tol is None:
1055
+ bc_tol = tol
1056
+
1057
+ # Maximum number of iterations
1058
+ max_iteration = 10
1059
+
1060
+ fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
1061
+ fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
1062
+
1063
+ f = fun_wrapped(x, y, p)
1064
+ if f.shape != y.shape:
1065
+ raise ValueError(f"`fun` return is expected to have shape {y.shape}, "
1066
+ f"but actually has {f.shape}.")
1067
+
1068
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
1069
+ if bc_res.shape != (n + k,):
1070
+ raise ValueError(f"`bc` return is expected to have shape {(n + k,)}, "
1071
+ f"but actually has {bc_res.shape}.")
1072
+
1073
+ status = 0
1074
+ iteration = 0
1075
+ if verbose == 2:
1076
+ print_iteration_header()
1077
+
1078
+ while True:
1079
+ m = x.shape[0]
1080
+
1081
+ col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
1082
+ fun_jac_wrapped, bc_jac_wrapped, x, h)
1083
+ y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
1084
+ y, p, B, tol, bc_tol)
1085
+ iteration += 1
1086
+
1087
+ col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
1088
+ p, x, h)
1089
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
1090
+ max_bc_res = np.max(abs(bc_res))
1091
+
1092
+ # This relation is not trivial, but can be verified.
1093
+ r_middle = 1.5 * col_res / h
1094
+ sol = create_spline(y, f, x, h)
1095
+ rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
1096
+ r_middle, f_middle)
1097
+ max_rms_res = np.max(rms_res)
1098
+
1099
+ if singular:
1100
+ status = 2
1101
+ break
1102
+
1103
+ insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
1104
+ insert_2, = np.nonzero(rms_res >= 100 * tol)
1105
+ nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
1106
+
1107
+ if m + nodes_added > max_nodes:
1108
+ status = 1
1109
+ if verbose == 2:
1110
+ nodes_added = f"({nodes_added})"
1111
+ print_iteration_progress(iteration, max_rms_res, max_bc_res,
1112
+ m, nodes_added)
1113
+ break
1114
+
1115
+ if verbose == 2:
1116
+ print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
1117
+ nodes_added)
1118
+
1119
+ if nodes_added > 0:
1120
+ x = modify_mesh(x, insert_1, insert_2)
1121
+ h = np.diff(x)
1122
+ y = sol(x)
1123
+ elif max_bc_res <= bc_tol:
1124
+ status = 0
1125
+ break
1126
+ elif iteration >= max_iteration:
1127
+ status = 3
1128
+ break
1129
+
1130
+ if verbose > 0:
1131
+ if status == 0:
1132
+ print(f"Solved in {iteration} iterations, number of nodes {x.shape[0]}. \n"
1133
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1134
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1135
+ elif status == 1:
1136
+ print(f"Number of nodes is exceeded after iteration {iteration}. \n"
1137
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1138
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1139
+ elif status == 2:
1140
+ print("Singular Jacobian encountered when solving the collocation "
1141
+ f"system on iteration {iteration}. \n"
1142
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1143
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1144
+ elif status == 3:
1145
+ print("The solver was unable to satisfy boundary conditions "
1146
+ f"tolerance on iteration {iteration}. \n"
1147
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1148
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1149
+
1150
+ if p.size == 0:
1151
+ p = None
1152
+
1153
+ return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
1154
+ niter=iteration, status=status,
1155
+ message=TERMINATION_MESSAGES[status], success=status == 0)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (117 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """Suite of ODE solvers implemented in Python."""
2
+ from .ivp import solve_ivp
3
+ from .rk import RK23, RK45, DOP853
4
+ from .radau import Radau
5
+ from .bdf import BDF
6
+ from .lsoda import LSODA
7
+ from .common import OdeSolution
8
+ from .base import DenseOutput, OdeSolver
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def check_arguments(fun, y0, support_complex):
5
+ """Helper function for checking arguments common to all solvers."""
6
+ y0 = np.asarray(y0)
7
+ if np.issubdtype(y0.dtype, np.complexfloating):
8
+ if not support_complex:
9
+ raise ValueError("`y0` is complex, but the chosen solver does "
10
+ "not support integration in a complex domain.")
11
+ dtype = complex
12
+ else:
13
+ dtype = float
14
+ y0 = y0.astype(dtype, copy=False)
15
+
16
+ if y0.ndim != 1:
17
+ raise ValueError("`y0` must be 1-dimensional.")
18
+
19
+ if not np.isfinite(y0).all():
20
+ raise ValueError("All components of the initial state `y0` must be finite.")
21
+
22
+ def fun_wrapped(t, y):
23
+ return np.asarray(fun(t, y), dtype=dtype)
24
+
25
+ return fun_wrapped, y0
26
+
27
+
28
+ class OdeSolver:
29
+ """Base class for ODE solvers.
30
+
31
+ In order to implement a new solver you need to follow the guidelines:
32
+
33
+ 1. A constructor must accept parameters presented in the base class
34
+ (listed below) along with any other parameters specific to a solver.
35
+ 2. A constructor must accept arbitrary extraneous arguments
36
+ ``**extraneous``, but warn that these arguments are irrelevant
37
+ using `common.warn_extraneous` function. Do not pass these
38
+ arguments to the base class.
39
+ 3. A solver must implement a private method `_step_impl(self)` which
40
+ propagates a solver one step further. It must return tuple
41
+ ``(success, message)``, where ``success`` is a boolean indicating
42
+ whether a step was successful, and ``message`` is a string
43
+ containing description of a failure if a step failed or None
44
+ otherwise.
45
+ 4. A solver must implement a private method `_dense_output_impl(self)`,
46
+ which returns a `DenseOutput` object covering the last successful
47
+ step.
48
+ 5. A solver must have attributes listed below in Attributes section.
49
+ Note that ``t_old`` and ``step_size`` are updated automatically.
50
+ 6. Use `fun(self, t, y)` method for the system rhs evaluation, this
51
+ way the number of function evaluations (`nfev`) will be tracked
52
+ automatically.
53
+ 7. For convenience, a base class provides `fun_single(self, t, y)` and
54
+ `fun_vectorized(self, t, y)` for evaluating the rhs in
55
+ non-vectorized and vectorized fashions respectively (regardless of
56
+ how `fun` from the constructor is implemented). These calls don't
57
+ increment `nfev`.
58
+ 8. If a solver uses a Jacobian matrix and LU decompositions, it should
59
+ track the number of Jacobian evaluations (`njev`) and the number of
60
+ LU decompositions (`nlu`).
61
+ 9. By convention, the function evaluations used to compute a finite
62
+ difference approximation of the Jacobian should not be counted in
63
+ `nfev`, thus use `fun_single(self, t, y)` or
64
+ `fun_vectorized(self, t, y)` when computing a finite difference
65
+ approximation of the Jacobian.
66
+
67
+ Parameters
68
+ ----------
69
+ fun : callable
70
+ Right-hand side of the system: the time derivative of the state ``y``
71
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
72
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
73
+ return an array of the same shape as ``y``. See `vectorized` for more
74
+ information.
75
+ t0 : float
76
+ Initial time.
77
+ y0 : array_like, shape (n,)
78
+ Initial state.
79
+ t_bound : float
80
+ Boundary time --- the integration won't continue beyond it. It also
81
+ determines the direction of the integration.
82
+ vectorized : bool
83
+ Whether `fun` can be called in a vectorized fashion. Default is False.
84
+
85
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
86
+ shape ``(n,)``, where ``n = len(y0)``.
87
+
88
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
89
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
90
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
91
+ the returned array is the time derivative of the state corresponding
92
+ with a column of ``y``).
93
+
94
+ Setting ``vectorized=True`` allows for faster finite difference
95
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
96
+ will result in slower execution for other methods. It can also
97
+ result in slower overall execution for 'Radau' and 'BDF' in some
98
+ circumstances (e.g. small ``len(y0)``).
99
+ support_complex : bool, optional
100
+ Whether integration in a complex domain should be supported.
101
+ Generally determined by a derived solver class capabilities.
102
+ Default is False.
103
+
104
+ Attributes
105
+ ----------
106
+ n : int
107
+ Number of equations.
108
+ status : string
109
+ Current status of the solver: 'running', 'finished' or 'failed'.
110
+ t_bound : float
111
+ Boundary time.
112
+ direction : float
113
+ Integration direction: +1 or -1.
114
+ t : float
115
+ Current time.
116
+ y : ndarray
117
+ Current state.
118
+ t_old : float
119
+ Previous time. None if no steps were made yet.
120
+ step_size : float
121
+ Size of the last successful step. None if no steps were made yet.
122
+ nfev : int
123
+ Number of the system's rhs evaluations.
124
+ njev : int
125
+ Number of the Jacobian evaluations.
126
+ nlu : int
127
+ Number of LU decompositions.
128
+ """
129
+ TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
130
+
131
+ def __init__(self, fun, t0, y0, t_bound, vectorized,
132
+ support_complex=False):
133
+ self.t_old = None
134
+ self.t = t0
135
+ self._fun, self.y = check_arguments(fun, y0, support_complex)
136
+ self.t_bound = t_bound
137
+ self.vectorized = vectorized
138
+
139
+ if vectorized:
140
+ def fun_single(t, y):
141
+ return self._fun(t, y[:, None]).ravel()
142
+ fun_vectorized = self._fun
143
+ else:
144
+ fun_single = self._fun
145
+
146
+ def fun_vectorized(t, y):
147
+ f = np.empty_like(y)
148
+ for i, yi in enumerate(y.T):
149
+ f[:, i] = self._fun(t, yi)
150
+ return f
151
+
152
+ def fun(t, y):
153
+ self.nfev += 1
154
+ return self.fun_single(t, y)
155
+
156
+ self.fun = fun
157
+ self.fun_single = fun_single
158
+ self.fun_vectorized = fun_vectorized
159
+
160
+ self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
161
+ self.n = self.y.size
162
+ self.status = 'running'
163
+
164
+ self.nfev = 0
165
+ self.njev = 0
166
+ self.nlu = 0
167
+
168
+ @property
169
+ def step_size(self):
170
+ if self.t_old is None:
171
+ return None
172
+ else:
173
+ return np.abs(self.t - self.t_old)
174
+
175
+ def step(self):
176
+ """Perform one integration step.
177
+
178
+ Returns
179
+ -------
180
+ message : string or None
181
+ Report from the solver. Typically a reason for a failure if
182
+ `self.status` is 'failed' after the step was taken or None
183
+ otherwise.
184
+ """
185
+ if self.status != 'running':
186
+ raise RuntimeError("Attempt to step on a failed or finished "
187
+ "solver.")
188
+
189
+ if self.n == 0 or self.t == self.t_bound:
190
+ # Handle corner cases of empty solver or no integration.
191
+ self.t_old = self.t
192
+ self.t = self.t_bound
193
+ message = None
194
+ self.status = 'finished'
195
+ else:
196
+ t = self.t
197
+ success, message = self._step_impl()
198
+
199
+ if not success:
200
+ self.status = 'failed'
201
+ else:
202
+ self.t_old = t
203
+ if self.direction * (self.t - self.t_bound) >= 0:
204
+ self.status = 'finished'
205
+
206
+ return message
207
+
208
+ def dense_output(self):
209
+ """Compute a local interpolant over the last successful step.
210
+
211
+ Returns
212
+ -------
213
+ sol : `DenseOutput`
214
+ Local interpolant over the last successful step.
215
+ """
216
+ if self.t_old is None:
217
+ raise RuntimeError("Dense output is available after a successful "
218
+ "step was made.")
219
+
220
+ if self.n == 0 or self.t == self.t_old:
221
+ # Handle corner cases of empty solver and no integration.
222
+ return ConstantDenseOutput(self.t_old, self.t, self.y)
223
+ else:
224
+ return self._dense_output_impl()
225
+
226
+ def _step_impl(self):
227
+ raise NotImplementedError
228
+
229
+ def _dense_output_impl(self):
230
+ raise NotImplementedError
231
+
232
+
233
+ class DenseOutput:
234
+ """Base class for local interpolant over step made by an ODE solver.
235
+
236
+ It interpolates between `t_min` and `t_max` (see Attributes below).
237
+ Evaluation outside this interval is not forbidden, but the accuracy is not
238
+ guaranteed.
239
+
240
+ Attributes
241
+ ----------
242
+ t_min, t_max : float
243
+ Time range of the interpolation.
244
+ """
245
+ def __init__(self, t_old, t):
246
+ self.t_old = t_old
247
+ self.t = t
248
+ self.t_min = min(t, t_old)
249
+ self.t_max = max(t, t_old)
250
+
251
+ def __call__(self, t):
252
+ """Evaluate the interpolant.
253
+
254
+ Parameters
255
+ ----------
256
+ t : float or array_like with shape (n_points,)
257
+ Points to evaluate the solution at.
258
+
259
+ Returns
260
+ -------
261
+ y : ndarray, shape (n,) or (n, n_points)
262
+ Computed values. Shape depends on whether `t` was a scalar or a
263
+ 1-D array.
264
+ """
265
+ t = np.asarray(t)
266
+ if t.ndim > 1:
267
+ raise ValueError("`t` must be a float or a 1-D array.")
268
+ return self._call_impl(t)
269
+
270
+ def _call_impl(self, t):
271
+ raise NotImplementedError
272
+
273
+
274
+ class ConstantDenseOutput(DenseOutput):
275
+ """Constant value interpolator.
276
+
277
+ This class used for degenerate integration cases: equal integration limits
278
+ or a system with 0 equations.
279
+ """
280
+ def __init__(self, t_old, t, value):
281
+ super().__init__(t_old, t)
282
+ self.value = value
283
+
284
+ def _call_impl(self, t):
285
+ if t.ndim == 0:
286
+ return self.value
287
+ else:
288
+ ret = np.empty((self.value.shape[0], t.shape[0]))
289
+ ret[:] = self.value[:, None]
290
+ return ret
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import issparse, csc_matrix, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, EPS, num_jac, validate_first_step,
8
+ warn_extraneous)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+
12
+ MAX_ORDER = 5
13
+ NEWTON_MAXITER = 4
14
+ MIN_FACTOR = 0.2
15
+ MAX_FACTOR = 10
16
+
17
+
18
+ def compute_R(order, factor):
19
+ """Compute the matrix for changing the differences array."""
20
+ I = np.arange(1, order + 1)[:, None]
21
+ J = np.arange(1, order + 1)
22
+ M = np.zeros((order + 1, order + 1))
23
+ M[1:, 1:] = (I - 1 - factor * J) / I
24
+ M[0] = 1
25
+ return np.cumprod(M, axis=0)
26
+
27
+
28
+ def change_D(D, order, factor):
29
+ """Change differences array in-place when step size is changed."""
30
+ R = compute_R(order, factor)
31
+ U = compute_R(order, 1)
32
+ RU = R.dot(U)
33
+ D[:order + 1] = np.dot(RU.T, D[:order + 1])
34
+
35
+
36
+ def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
37
+ """Solve the algebraic system resulting from BDF method."""
38
+ d = 0
39
+ y = y_predict.copy()
40
+ dy_norm_old = None
41
+ converged = False
42
+ for k in range(NEWTON_MAXITER):
43
+ f = fun(t_new, y)
44
+ if not np.all(np.isfinite(f)):
45
+ break
46
+
47
+ dy = solve_lu(LU, c * f - psi - d)
48
+ dy_norm = norm(dy / scale)
49
+
50
+ if dy_norm_old is None:
51
+ rate = None
52
+ else:
53
+ rate = dy_norm / dy_norm_old
54
+
55
+ if (rate is not None and (rate >= 1 or
56
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
57
+ break
58
+
59
+ y += dy
60
+ d += dy
61
+
62
+ if (dy_norm == 0 or
63
+ rate is not None and rate / (1 - rate) * dy_norm < tol):
64
+ converged = True
65
+ break
66
+
67
+ dy_norm_old = dy_norm
68
+
69
+ return converged, k + 1, y, d
70
+
71
+
72
+ class BDF(OdeSolver):
73
+ """Implicit method based on backward-differentiation formulas.
74
+
75
+ This is a variable order method with the order varying automatically from
76
+ 1 to 5. The general framework of the BDF algorithm is described in [1]_.
77
+ This class implements a quasi-constant step size as explained in [2]_.
78
+ The error estimation strategy for the constant-step BDF is derived in [3]_.
79
+ An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
80
+
81
+ Can be applied in the complex domain.
82
+
83
+ Parameters
84
+ ----------
85
+ fun : callable
86
+ Right-hand side of the system: the time derivative of the state ``y``
87
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
88
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
89
+ return an array of the same shape as ``y``. See `vectorized` for more
90
+ information.
91
+ t0 : float
92
+ Initial time.
93
+ y0 : array_like, shape (n,)
94
+ Initial state.
95
+ t_bound : float
96
+ Boundary time - the integration won't continue beyond it. It also
97
+ determines the direction of the integration.
98
+ first_step : float or None, optional
99
+ Initial step size. Default is ``None`` which means that the algorithm
100
+ should choose.
101
+ max_step : float, optional
102
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
103
+ bounded and determined solely by the solver.
104
+ rtol, atol : float and array_like, optional
105
+ Relative and absolute tolerances. The solver keeps the local error
106
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
107
+ relative accuracy (number of correct digits), while `atol` controls
108
+ absolute accuracy (number of correct decimal places). To achieve the
109
+ desired `rtol`, set `atol` to be smaller than the smallest value that
110
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
111
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
112
+ number of correct digits is not guaranteed. Conversely, to achieve the
113
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
114
+ than `atol`. If components of y have different scales, it might be
115
+ beneficial to set different `atol` values for different components by
116
+ passing array_like with shape (n,) for `atol`. Default values are
117
+ 1e-3 for `rtol` and 1e-6 for `atol`.
118
+ jac : {None, array_like, sparse_matrix, callable}, optional
119
+ Jacobian matrix of the right-hand side of the system with respect to y,
120
+ required by this method. The Jacobian matrix has shape (n, n) and its
121
+ element (i, j) is equal to ``d f_i / d y_j``.
122
+ There are three ways to define the Jacobian:
123
+
124
+ * If array_like or sparse_matrix, the Jacobian is assumed to
125
+ be constant.
126
+ * If callable, the Jacobian is assumed to depend on both
127
+ t and y; it will be called as ``jac(t, y)`` as necessary.
128
+ For the 'Radau' and 'BDF' methods, the return value might be a
129
+ sparse matrix.
130
+ * If None (default), the Jacobian will be approximated by
131
+ finite differences.
132
+
133
+ It is generally recommended to provide the Jacobian rather than
134
+ relying on a finite-difference approximation.
135
+ jac_sparsity : {None, array_like, sparse matrix}, optional
136
+ Defines a sparsity structure of the Jacobian matrix for a
137
+ finite-difference approximation. Its shape must be (n, n). This argument
138
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
139
+ elements in *each* row, providing the sparsity structure will greatly
140
+ speed up the computations [4]_. A zero entry means that a corresponding
141
+ element in the Jacobian is always zero. If None (default), the Jacobian
142
+ is assumed to be dense.
143
+ vectorized : bool, optional
144
+ Whether `fun` can be called in a vectorized fashion. Default is False.
145
+
146
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
147
+ shape ``(n,)``, where ``n = len(y0)``.
148
+
149
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
150
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
151
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
152
+ the returned array is the time derivative of the state corresponding
153
+ with a column of ``y``).
154
+
155
+ Setting ``vectorized=True`` allows for faster finite difference
156
+ approximation of the Jacobian by this method, but may result in slower
157
+ execution overall in some circumstances (e.g. small ``len(y0)``).
158
+
159
+ Attributes
160
+ ----------
161
+ n : int
162
+ Number of equations.
163
+ status : string
164
+ Current status of the solver: 'running', 'finished' or 'failed'.
165
+ t_bound : float
166
+ Boundary time.
167
+ direction : float
168
+ Integration direction: +1 or -1.
169
+ t : float
170
+ Current time.
171
+ y : ndarray
172
+ Current state.
173
+ t_old : float
174
+ Previous time. None if no steps were made yet.
175
+ step_size : float
176
+ Size of the last successful step. None if no steps were made yet.
177
+ nfev : int
178
+ Number of evaluations of the right-hand side.
179
+ njev : int
180
+ Number of evaluations of the Jacobian.
181
+ nlu : int
182
+ Number of LU decompositions.
183
+
184
+ References
185
+ ----------
186
+ .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
187
+ Solution of Ordinary Differential Equations", ACM Transactions on
188
+ Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
189
+ .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
190
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
191
+ .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
192
+ Nonstiff Problems", Sec. III.2.
193
+ .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
194
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
195
+ and its Applications, 13, pp. 117-120, 1974.
196
+ """
197
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
198
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
199
+ vectorized=False, first_step=None, **extraneous):
200
+ warn_extraneous(extraneous)
201
+ super().__init__(fun, t0, y0, t_bound, vectorized,
202
+ support_complex=True)
203
+ self.max_step = validate_max_step(max_step)
204
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
205
+ f = self.fun(self.t, self.y)
206
+ if first_step is None:
207
+ self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
208
+ self.direction, 1,
209
+ self.rtol, self.atol)
210
+ else:
211
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
212
+ self.h_abs_old = None
213
+ self.error_norm_old = None
214
+
215
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
216
+
217
+ self.jac_factor = None
218
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
219
+ if issparse(self.J):
220
+ def lu(A):
221
+ self.nlu += 1
222
+ return splu(A)
223
+
224
+ def solve_lu(LU, b):
225
+ return LU.solve(b)
226
+
227
+ I = eye(self.n, format='csc', dtype=self.y.dtype)
228
+ else:
229
+ def lu(A):
230
+ self.nlu += 1
231
+ return lu_factor(A, overwrite_a=True)
232
+
233
+ def solve_lu(LU, b):
234
+ return lu_solve(LU, b, overwrite_b=True)
235
+
236
+ I = np.identity(self.n, dtype=self.y.dtype)
237
+
238
+ self.lu = lu
239
+ self.solve_lu = solve_lu
240
+ self.I = I
241
+
242
+ kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
243
+ self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
244
+ self.alpha = (1 - kappa) * self.gamma
245
+ self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
246
+
247
+ D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
248
+ D[0] = self.y
249
+ D[1] = f * self.h_abs * self.direction
250
+ self.D = D
251
+
252
+ self.order = 1
253
+ self.n_equal_steps = 0
254
+ self.LU = None
255
+
256
+ def _validate_jac(self, jac, sparsity):
257
+ t0 = self.t
258
+ y0 = self.y
259
+
260
+ if jac is None:
261
+ if sparsity is not None:
262
+ if issparse(sparsity):
263
+ sparsity = csc_matrix(sparsity)
264
+ groups = group_columns(sparsity)
265
+ sparsity = (sparsity, groups)
266
+
267
+ def jac_wrapped(t, y):
268
+ self.njev += 1
269
+ f = self.fun_single(t, y)
270
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
271
+ self.atol, self.jac_factor,
272
+ sparsity)
273
+ return J
274
+ J = jac_wrapped(t0, y0)
275
+ elif callable(jac):
276
+ J = jac(t0, y0)
277
+ self.njev += 1
278
+ if issparse(J):
279
+ J = csc_matrix(J, dtype=y0.dtype)
280
+
281
+ def jac_wrapped(t, y):
282
+ self.njev += 1
283
+ return csc_matrix(jac(t, y), dtype=y0.dtype)
284
+ else:
285
+ J = np.asarray(J, dtype=y0.dtype)
286
+
287
+ def jac_wrapped(t, y):
288
+ self.njev += 1
289
+ return np.asarray(jac(t, y), dtype=y0.dtype)
290
+
291
+ if J.shape != (self.n, self.n):
292
+ raise ValueError("`jac` is expected to have shape {}, but "
293
+ "actually has {}."
294
+ .format((self.n, self.n), J.shape))
295
+ else:
296
+ if issparse(jac):
297
+ J = csc_matrix(jac, dtype=y0.dtype)
298
+ else:
299
+ J = np.asarray(jac, dtype=y0.dtype)
300
+
301
+ if J.shape != (self.n, self.n):
302
+ raise ValueError("`jac` is expected to have shape {}, but "
303
+ "actually has {}."
304
+ .format((self.n, self.n), J.shape))
305
+ jac_wrapped = None
306
+
307
+ return jac_wrapped, J
308
+
309
+ def _step_impl(self):
310
+ t = self.t
311
+ D = self.D
312
+
313
+ max_step = self.max_step
314
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
315
+ if self.h_abs > max_step:
316
+ h_abs = max_step
317
+ change_D(D, self.order, max_step / self.h_abs)
318
+ self.n_equal_steps = 0
319
+ elif self.h_abs < min_step:
320
+ h_abs = min_step
321
+ change_D(D, self.order, min_step / self.h_abs)
322
+ self.n_equal_steps = 0
323
+ else:
324
+ h_abs = self.h_abs
325
+
326
+ atol = self.atol
327
+ rtol = self.rtol
328
+ order = self.order
329
+
330
+ alpha = self.alpha
331
+ gamma = self.gamma
332
+ error_const = self.error_const
333
+
334
+ J = self.J
335
+ LU = self.LU
336
+ current_jac = self.jac is None
337
+
338
+ step_accepted = False
339
+ while not step_accepted:
340
+ if h_abs < min_step:
341
+ return False, self.TOO_SMALL_STEP
342
+
343
+ h = h_abs * self.direction
344
+ t_new = t + h
345
+
346
+ if self.direction * (t_new - self.t_bound) > 0:
347
+ t_new = self.t_bound
348
+ change_D(D, order, np.abs(t_new - t) / h_abs)
349
+ self.n_equal_steps = 0
350
+ LU = None
351
+
352
+ h = t_new - t
353
+ h_abs = np.abs(h)
354
+
355
+ y_predict = np.sum(D[:order + 1], axis=0)
356
+
357
+ scale = atol + rtol * np.abs(y_predict)
358
+ psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
359
+
360
+ converged = False
361
+ c = h / alpha[order]
362
+ while not converged:
363
+ if LU is None:
364
+ LU = self.lu(self.I - c * J)
365
+
366
+ converged, n_iter, y_new, d = solve_bdf_system(
367
+ self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
368
+ scale, self.newton_tol)
369
+
370
+ if not converged:
371
+ if current_jac:
372
+ break
373
+ J = self.jac(t_new, y_predict)
374
+ LU = None
375
+ current_jac = True
376
+
377
+ if not converged:
378
+ factor = 0.5
379
+ h_abs *= factor
380
+ change_D(D, order, factor)
381
+ self.n_equal_steps = 0
382
+ LU = None
383
+ continue
384
+
385
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
386
+ + n_iter)
387
+
388
+ scale = atol + rtol * np.abs(y_new)
389
+ error = error_const[order] * d
390
+ error_norm = norm(error / scale)
391
+
392
+ if error_norm > 1:
393
+ factor = max(MIN_FACTOR,
394
+ safety * error_norm ** (-1 / (order + 1)))
395
+ h_abs *= factor
396
+ change_D(D, order, factor)
397
+ self.n_equal_steps = 0
398
+ # As we didn't have problems with convergence, we don't
399
+ # reset LU here.
400
+ else:
401
+ step_accepted = True
402
+
403
+ self.n_equal_steps += 1
404
+
405
+ self.t = t_new
406
+ self.y = y_new
407
+
408
+ self.h_abs = h_abs
409
+ self.J = J
410
+ self.LU = LU
411
+
412
+ # Update differences. The principal relation here is
413
+ # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
414
+ # contained difference for previous interpolating polynomial and
415
+ # d = D^{k + 1} y_n. Thus this elegant code follows.
416
+ D[order + 2] = d - D[order + 1]
417
+ D[order + 1] = d
418
+ for i in reversed(range(order + 1)):
419
+ D[i] += D[i + 1]
420
+
421
+ if self.n_equal_steps < order + 1:
422
+ return True, None
423
+
424
+ if order > 1:
425
+ error_m = error_const[order - 1] * D[order]
426
+ error_m_norm = norm(error_m / scale)
427
+ else:
428
+ error_m_norm = np.inf
429
+
430
+ if order < MAX_ORDER:
431
+ error_p = error_const[order + 1] * D[order + 2]
432
+ error_p_norm = norm(error_p / scale)
433
+ else:
434
+ error_p_norm = np.inf
435
+
436
+ error_norms = np.array([error_m_norm, error_norm, error_p_norm])
437
+ with np.errstate(divide='ignore'):
438
+ factors = error_norms ** (-1 / np.arange(order, order + 3))
439
+
440
+ delta_order = np.argmax(factors) - 1
441
+ order += delta_order
442
+ self.order = order
443
+
444
+ factor = min(MAX_FACTOR, safety * np.max(factors))
445
+ self.h_abs *= factor
446
+ change_D(D, order, factor)
447
+ self.n_equal_steps = 0
448
+ self.LU = None
449
+
450
+ return True, None
451
+
452
+ def _dense_output_impl(self):
453
+ return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
454
+ self.order, self.D[:self.order + 1].copy())
455
+
456
+
457
+ class BdfDenseOutput(DenseOutput):
458
+ def __init__(self, t_old, t, h, order, D):
459
+ super().__init__(t_old, t)
460
+ self.order = order
461
+ self.t_shift = self.t - h * np.arange(self.order)
462
+ self.denom = h * (1 + np.arange(self.order))
463
+ self.D = D
464
+
465
+ def _call_impl(self, t):
466
+ if t.ndim == 0:
467
+ x = (t - self.t_shift) / self.denom
468
+ p = np.cumprod(x)
469
+ else:
470
+ x = (t - self.t_shift[:, None]) / self.denom[:, None]
471
+ p = np.cumprod(x, axis=0)
472
+
473
+ y = np.dot(self.D[1:].T, p)
474
+ if y.ndim == 1:
475
+ y += self.D[0]
476
+ else:
477
+ y += self.D[0, :, None]
478
+
479
+ return y
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import groupby
2
+ from warnings import warn
3
+ import numpy as np
4
+ from scipy.sparse import find, coo_matrix
5
+
6
+
7
+ EPS = np.finfo(float).eps
8
+
9
+
10
+ def validate_first_step(first_step, t0, t_bound):
11
+ """Assert that first_step is valid and return it."""
12
+ if first_step <= 0:
13
+ raise ValueError("`first_step` must be positive.")
14
+ if first_step > np.abs(t_bound - t0):
15
+ raise ValueError("`first_step` exceeds bounds.")
16
+ return first_step
17
+
18
+
19
+ def validate_max_step(max_step):
20
+ """Assert that max_Step is valid and return it."""
21
+ if max_step <= 0:
22
+ raise ValueError("`max_step` must be positive.")
23
+ return max_step
24
+
25
+
26
+ def warn_extraneous(extraneous):
27
+ """Display a warning for extraneous keyword arguments.
28
+
29
+ The initializer of each solver class is expected to collect keyword
30
+ arguments that it doesn't understand and warn about them. This function
31
+ prints a warning for each key in the supplied dictionary.
32
+
33
+ Parameters
34
+ ----------
35
+ extraneous : dict
36
+ Extraneous keyword arguments
37
+ """
38
+ if extraneous:
39
+ warn("The following arguments have no effect for a chosen solver: {}."
40
+ .format(", ".join(f"`{x}`" for x in extraneous)),
41
+ stacklevel=3)
42
+
43
+
44
+ def validate_tol(rtol, atol, n):
45
+ """Validate tolerance values."""
46
+
47
+ if np.any(rtol < 100 * EPS):
48
+ warn("At least one element of `rtol` is too small. "
49
+ f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.",
50
+ stacklevel=3)
51
+ rtol = np.maximum(rtol, 100 * EPS)
52
+
53
+ atol = np.asarray(atol)
54
+ if atol.ndim > 0 and atol.shape != (n,):
55
+ raise ValueError("`atol` has wrong shape.")
56
+
57
+ if np.any(atol < 0):
58
+ raise ValueError("`atol` must be positive.")
59
+
60
+ return rtol, atol
61
+
62
+
63
+ def norm(x):
64
+ """Compute RMS norm."""
65
+ return np.linalg.norm(x) / x.size ** 0.5
66
+
67
+
68
+ def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
69
+ """Empirically select a good initial step.
70
+
71
+ The algorithm is described in [1]_.
72
+
73
+ Parameters
74
+ ----------
75
+ fun : callable
76
+ Right-hand side of the system.
77
+ t0 : float
78
+ Initial value of the independent variable.
79
+ y0 : ndarray, shape (n,)
80
+ Initial value of the dependent variable.
81
+ f0 : ndarray, shape (n,)
82
+ Initial value of the derivative, i.e., ``fun(t0, y0)``.
83
+ direction : float
84
+ Integration direction.
85
+ order : float
86
+ Error estimator order. It means that the error controlled by the
87
+ algorithm is proportional to ``step_size ** (order + 1)`.
88
+ rtol : float
89
+ Desired relative tolerance.
90
+ atol : float
91
+ Desired absolute tolerance.
92
+
93
+ Returns
94
+ -------
95
+ h_abs : float
96
+ Absolute value of the suggested initial step.
97
+
98
+ References
99
+ ----------
100
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
101
+ Equations I: Nonstiff Problems", Sec. II.4.
102
+ """
103
+ if y0.size == 0:
104
+ return np.inf
105
+
106
+ scale = atol + np.abs(y0) * rtol
107
+ d0 = norm(y0 / scale)
108
+ d1 = norm(f0 / scale)
109
+ if d0 < 1e-5 or d1 < 1e-5:
110
+ h0 = 1e-6
111
+ else:
112
+ h0 = 0.01 * d0 / d1
113
+
114
+ y1 = y0 + h0 * direction * f0
115
+ f1 = fun(t0 + h0 * direction, y1)
116
+ d2 = norm((f1 - f0) / scale) / h0
117
+
118
+ if d1 <= 1e-15 and d2 <= 1e-15:
119
+ h1 = max(1e-6, h0 * 1e-3)
120
+ else:
121
+ h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
122
+
123
+ return min(100 * h0, h1)
124
+
125
+
126
+ class OdeSolution:
127
+ """Continuous ODE solution.
128
+
129
+ It is organized as a collection of `DenseOutput` objects which represent
130
+ local interpolants. It provides an algorithm to select a right interpolant
131
+ for each given point.
132
+
133
+ The interpolants cover the range between `t_min` and `t_max` (see
134
+ Attributes below). Evaluation outside this interval is not forbidden, but
135
+ the accuracy is not guaranteed.
136
+
137
+ When evaluating at a breakpoint (one of the values in `ts`) a segment with
138
+ the lower index is selected.
139
+
140
+ Parameters
141
+ ----------
142
+ ts : array_like, shape (n_segments + 1,)
143
+ Time instants between which local interpolants are defined. Must
144
+ be strictly increasing or decreasing (zero segment with two points is
145
+ also allowed).
146
+ interpolants : list of DenseOutput with n_segments elements
147
+ Local interpolants. An i-th interpolant is assumed to be defined
148
+ between ``ts[i]`` and ``ts[i + 1]``.
149
+ alt_segment : boolean
150
+ Requests the alternative interpolant segment selection scheme. At each
151
+ solver integration point, two interpolant segments are available. The
152
+ default (False) and alternative (True) behaviours select the segment
153
+ for which the requested time corresponded to ``t`` and ``t_old``,
154
+ respectively. This functionality is only relevant for testing the
155
+ interpolants' accuracy: different integrators use different
156
+ construction strategies.
157
+
158
+ Attributes
159
+ ----------
160
+ t_min, t_max : float
161
+ Time range of the interpolation.
162
+ """
163
+ def __init__(self, ts, interpolants, alt_segment=False):
164
+ ts = np.asarray(ts)
165
+ d = np.diff(ts)
166
+ # The first case covers integration on zero segment.
167
+ if not ((ts.size == 2 and ts[0] == ts[-1])
168
+ or np.all(d > 0) or np.all(d < 0)):
169
+ raise ValueError("`ts` must be strictly increasing or decreasing.")
170
+
171
+ self.n_segments = len(interpolants)
172
+ if ts.shape != (self.n_segments + 1,):
173
+ raise ValueError("Numbers of time stamps and interpolants "
174
+ "don't match.")
175
+
176
+ self.ts = ts
177
+ self.interpolants = interpolants
178
+ if ts[-1] >= ts[0]:
179
+ self.t_min = ts[0]
180
+ self.t_max = ts[-1]
181
+ self.ascending = True
182
+ self.side = "right" if alt_segment else "left"
183
+ self.ts_sorted = ts
184
+ else:
185
+ self.t_min = ts[-1]
186
+ self.t_max = ts[0]
187
+ self.ascending = False
188
+ self.side = "left" if alt_segment else "right"
189
+ self.ts_sorted = ts[::-1]
190
+
191
+ def _call_single(self, t):
192
+ # Here we preserve a certain symmetry that when t is in self.ts,
193
+ # if alt_segment=False, then we prioritize a segment with a lower
194
+ # index.
195
+ ind = np.searchsorted(self.ts_sorted, t, side=self.side)
196
+
197
+ segment = min(max(ind - 1, 0), self.n_segments - 1)
198
+ if not self.ascending:
199
+ segment = self.n_segments - 1 - segment
200
+
201
+ return self.interpolants[segment](t)
202
+
203
+ def __call__(self, t):
204
+ """Evaluate the solution.
205
+
206
+ Parameters
207
+ ----------
208
+ t : float or array_like with shape (n_points,)
209
+ Points to evaluate at.
210
+
211
+ Returns
212
+ -------
213
+ y : ndarray, shape (n_states,) or (n_states, n_points)
214
+ Computed values. Shape depends on whether `t` is a scalar or a
215
+ 1-D array.
216
+ """
217
+ t = np.asarray(t)
218
+
219
+ if t.ndim == 0:
220
+ return self._call_single(t)
221
+
222
+ order = np.argsort(t)
223
+ reverse = np.empty_like(order)
224
+ reverse[order] = np.arange(order.shape[0])
225
+ t_sorted = t[order]
226
+
227
+ # See comment in self._call_single.
228
+ segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side)
229
+ segments -= 1
230
+ segments[segments < 0] = 0
231
+ segments[segments > self.n_segments - 1] = self.n_segments - 1
232
+ if not self.ascending:
233
+ segments = self.n_segments - 1 - segments
234
+
235
+ ys = []
236
+ group_start = 0
237
+ for segment, group in groupby(segments):
238
+ group_end = group_start + len(list(group))
239
+ y = self.interpolants[segment](t_sorted[group_start:group_end])
240
+ ys.append(y)
241
+ group_start = group_end
242
+
243
+ ys = np.hstack(ys)
244
+ ys = ys[:, reverse]
245
+
246
+ return ys
247
+
248
+
249
+ NUM_JAC_DIFF_REJECT = EPS ** 0.875
250
+ NUM_JAC_DIFF_SMALL = EPS ** 0.75
251
+ NUM_JAC_DIFF_BIG = EPS ** 0.25
252
+ NUM_JAC_MIN_FACTOR = 1e3 * EPS
253
+ NUM_JAC_FACTOR_INCREASE = 10
254
+ NUM_JAC_FACTOR_DECREASE = 0.1
255
+
256
+
257
+ def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
258
+ """Finite differences Jacobian approximation tailored for ODE solvers.
259
+
260
+ This function computes finite difference approximation to the Jacobian
261
+ matrix of `fun` with respect to `y` using forward differences.
262
+ The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
263
+ ``d f_i / d y_j``.
264
+
265
+ A special feature of this function is the ability to correct the step
266
+ size from iteration to iteration. The main idea is to keep the finite
267
+ difference significantly separated from its round-off error which
268
+ approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
269
+ huge error and assures that the estimated derivative are reasonably close
270
+ to the true values (i.e., the finite difference approximation is at least
271
+ qualitatively reflects the structure of the true Jacobian).
272
+
273
+ Parameters
274
+ ----------
275
+ fun : callable
276
+ Right-hand side of the system implemented in a vectorized fashion.
277
+ t : float
278
+ Current time.
279
+ y : ndarray, shape (n,)
280
+ Current state.
281
+ f : ndarray, shape (n,)
282
+ Value of the right hand side at (t, y).
283
+ threshold : float
284
+ Threshold for `y` value used for computing the step size as
285
+ ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
286
+ absolute tolerance (atol) for a solver should be passed as `threshold`.
287
+ factor : ndarray with shape (n,) or None
288
+ Factor to use for computing the step size. Pass None for the very
289
+ evaluation, then use the value returned from this function.
290
+ sparsity : tuple (structure, groups) or None
291
+ Sparsity structure of the Jacobian, `structure` must be csc_matrix.
292
+
293
+ Returns
294
+ -------
295
+ J : ndarray or csc_matrix, shape (n, n)
296
+ Jacobian matrix.
297
+ factor : ndarray, shape (n,)
298
+ Suggested `factor` for the next evaluation.
299
+ """
300
+ y = np.asarray(y)
301
+ n = y.shape[0]
302
+ if n == 0:
303
+ return np.empty((0, 0)), factor
304
+
305
+ if factor is None:
306
+ factor = np.full(n, EPS ** 0.5)
307
+ else:
308
+ factor = factor.copy()
309
+
310
+ # Direct the step as ODE dictates, hoping that such a step won't lead to
311
+ # a problematic region. For complex ODEs it makes sense to use the real
312
+ # part of f as we use steps along real axis.
313
+ f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
314
+ y_scale = f_sign * np.maximum(threshold, np.abs(y))
315
+ h = (y + factor * y_scale) - y
316
+
317
+ # Make sure that the step is not 0 to start with. Not likely it will be
318
+ # executed often.
319
+ for i in np.nonzero(h == 0)[0]:
320
+ while h[i] == 0:
321
+ factor[i] *= 10
322
+ h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
323
+
324
+ if sparsity is None:
325
+ return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
326
+ else:
327
+ structure, groups = sparsity
328
+ return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
329
+ structure, groups)
330
+
331
+
332
+ def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
333
+ n = y.shape[0]
334
+ h_vecs = np.diag(h)
335
+ f_new = fun(t, y[:, None] + h_vecs)
336
+ diff = f_new - f[:, None]
337
+ max_ind = np.argmax(np.abs(diff), axis=0)
338
+ r = np.arange(n)
339
+ max_diff = np.abs(diff[max_ind, r])
340
+ scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
341
+
342
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
343
+ if np.any(diff_too_small):
344
+ ind, = np.nonzero(diff_too_small)
345
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
346
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
347
+ h_vecs[ind, ind] = h_new
348
+ f_new = fun(t, y[:, None] + h_vecs[:, ind])
349
+ diff_new = f_new - f[:, None]
350
+ max_ind = np.argmax(np.abs(diff_new), axis=0)
351
+ r = np.arange(ind.shape[0])
352
+ max_diff_new = np.abs(diff_new[max_ind, r])
353
+ scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
354
+
355
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
356
+ if np.any(update):
357
+ update, = np.nonzero(update)
358
+ update_ind = ind[update]
359
+ factor[update_ind] = new_factor[update]
360
+ h[update_ind] = h_new[update]
361
+ diff[:, update_ind] = diff_new[:, update]
362
+ scale[update_ind] = scale_new[update]
363
+ max_diff[update_ind] = max_diff_new[update]
364
+
365
+ diff /= h
366
+
367
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
368
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
369
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
370
+
371
+ return diff, factor
372
+
373
+
374
+ def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
375
+ n = y.shape[0]
376
+ n_groups = np.max(groups) + 1
377
+ h_vecs = np.empty((n_groups, n))
378
+ for group in range(n_groups):
379
+ e = np.equal(group, groups)
380
+ h_vecs[group] = h * e
381
+ h_vecs = h_vecs.T
382
+
383
+ f_new = fun(t, y[:, None] + h_vecs)
384
+ df = f_new - f[:, None]
385
+
386
+ i, j, _ = find(structure)
387
+ diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
388
+ max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
389
+ r = np.arange(n)
390
+ max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
391
+ scale = np.maximum(np.abs(f[max_ind]),
392
+ np.abs(f_new[max_ind, groups[r]]))
393
+
394
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
395
+ if np.any(diff_too_small):
396
+ ind, = np.nonzero(diff_too_small)
397
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
398
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
399
+ h_new_all = np.zeros(n)
400
+ h_new_all[ind] = h_new
401
+
402
+ groups_unique = np.unique(groups[ind])
403
+ groups_map = np.empty(n_groups, dtype=int)
404
+ h_vecs = np.empty((groups_unique.shape[0], n))
405
+ for k, group in enumerate(groups_unique):
406
+ e = np.equal(group, groups)
407
+ h_vecs[k] = h_new_all * e
408
+ groups_map[group] = k
409
+ h_vecs = h_vecs.T
410
+
411
+ f_new = fun(t, y[:, None] + h_vecs)
412
+ df = f_new - f[:, None]
413
+ i, j, _ = find(structure[:, ind])
414
+ diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
415
+ (i, j)), shape=(n, ind.shape[0])).tocsc()
416
+
417
+ max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
418
+ r = np.arange(ind.shape[0])
419
+ max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
420
+ scale_new = np.maximum(
421
+ np.abs(f[max_ind_new]),
422
+ np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
423
+
424
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
425
+ if np.any(update):
426
+ update, = np.nonzero(update)
427
+ update_ind = ind[update]
428
+ factor[update_ind] = new_factor[update]
429
+ h[update_ind] = h_new[update]
430
+ diff[:, update_ind] = diff_new[:, update]
431
+ scale[update_ind] = scale_new[update]
432
+ max_diff[update_ind] = max_diff_new[update]
433
+
434
+ diff.data /= np.repeat(h, np.diff(diff.indptr))
435
+
436
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
437
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
438
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
439
+
440
+ return diff, factor
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ N_STAGES = 12
4
+ N_STAGES_EXTENDED = 16
5
+ INTERPOLATOR_POWER = 7
6
+
7
+ C = np.array([0.0,
8
+ 0.526001519587677318785587544488e-01,
9
+ 0.789002279381515978178381316732e-01,
10
+ 0.118350341907227396726757197510,
11
+ 0.281649658092772603273242802490,
12
+ 0.333333333333333333333333333333,
13
+ 0.25,
14
+ 0.307692307692307692307692307692,
15
+ 0.651282051282051282051282051282,
16
+ 0.6,
17
+ 0.857142857142857142857142857142,
18
+ 1.0,
19
+ 1.0,
20
+ 0.1,
21
+ 0.2,
22
+ 0.777777777777777777777777777778])
23
+
24
+ A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
25
+ A[1, 0] = 5.26001519587677318785587544488e-2
26
+
27
+ A[2, 0] = 1.97250569845378994544595329183e-2
28
+ A[2, 1] = 5.91751709536136983633785987549e-2
29
+
30
+ A[3, 0] = 2.95875854768068491816892993775e-2
31
+ A[3, 2] = 8.87627564304205475450678981324e-2
32
+
33
+ A[4, 0] = 2.41365134159266685502369798665e-1
34
+ A[4, 2] = -8.84549479328286085344864962717e-1
35
+ A[4, 3] = 9.24834003261792003115737966543e-1
36
+
37
+ A[5, 0] = 3.7037037037037037037037037037e-2
38
+ A[5, 3] = 1.70828608729473871279604482173e-1
39
+ A[5, 4] = 1.25467687566822425016691814123e-1
40
+
41
+ A[6, 0] = 3.7109375e-2
42
+ A[6, 3] = 1.70252211019544039314978060272e-1
43
+ A[6, 4] = 6.02165389804559606850219397283e-2
44
+ A[6, 5] = -1.7578125e-2
45
+
46
+ A[7, 0] = 3.70920001185047927108779319836e-2
47
+ A[7, 3] = 1.70383925712239993810214054705e-1
48
+ A[7, 4] = 1.07262030446373284651809199168e-1
49
+ A[7, 5] = -1.53194377486244017527936158236e-2
50
+ A[7, 6] = 8.27378916381402288758473766002e-3
51
+
52
+ A[8, 0] = 6.24110958716075717114429577812e-1
53
+ A[8, 3] = -3.36089262944694129406857109825
54
+ A[8, 4] = -8.68219346841726006818189891453e-1
55
+ A[8, 5] = 2.75920996994467083049415600797e1
56
+ A[8, 6] = 2.01540675504778934086186788979e1
57
+ A[8, 7] = -4.34898841810699588477366255144e1
58
+
59
+ A[9, 0] = 4.77662536438264365890433908527e-1
60
+ A[9, 3] = -2.48811461997166764192642586468
61
+ A[9, 4] = -5.90290826836842996371446475743e-1
62
+ A[9, 5] = 2.12300514481811942347288949897e1
63
+ A[9, 6] = 1.52792336328824235832596922938e1
64
+ A[9, 7] = -3.32882109689848629194453265587e1
65
+ A[9, 8] = -2.03312017085086261358222928593e-2
66
+
67
+ A[10, 0] = -9.3714243008598732571704021658e-1
68
+ A[10, 3] = 5.18637242884406370830023853209
69
+ A[10, 4] = 1.09143734899672957818500254654
70
+ A[10, 5] = -8.14978701074692612513997267357
71
+ A[10, 6] = -1.85200656599969598641566180701e1
72
+ A[10, 7] = 2.27394870993505042818970056734e1
73
+ A[10, 8] = 2.49360555267965238987089396762
74
+ A[10, 9] = -3.0467644718982195003823669022
75
+
76
+ A[11, 0] = 2.27331014751653820792359768449
77
+ A[11, 3] = -1.05344954667372501984066689879e1
78
+ A[11, 4] = -2.00087205822486249909675718444
79
+ A[11, 5] = -1.79589318631187989172765950534e1
80
+ A[11, 6] = 2.79488845294199600508499808837e1
81
+ A[11, 7] = -2.85899827713502369474065508674
82
+ A[11, 8] = -8.87285693353062954433549289258
83
+ A[11, 9] = 1.23605671757943030647266201528e1
84
+ A[11, 10] = 6.43392746015763530355970484046e-1
85
+
86
+ A[12, 0] = 5.42937341165687622380535766363e-2
87
+ A[12, 5] = 4.45031289275240888144113950566
88
+ A[12, 6] = 1.89151789931450038304281599044
89
+ A[12, 7] = -5.8012039600105847814672114227
90
+ A[12, 8] = 3.1116436695781989440891606237e-1
91
+ A[12, 9] = -1.52160949662516078556178806805e-1
92
+ A[12, 10] = 2.01365400804030348374776537501e-1
93
+ A[12, 11] = 4.47106157277725905176885569043e-2
94
+
95
+ A[13, 0] = 5.61675022830479523392909219681e-2
96
+ A[13, 6] = 2.53500210216624811088794765333e-1
97
+ A[13, 7] = -2.46239037470802489917441475441e-1
98
+ A[13, 8] = -1.24191423263816360469010140626e-1
99
+ A[13, 9] = 1.5329179827876569731206322685e-1
100
+ A[13, 10] = 8.20105229563468988491666602057e-3
101
+ A[13, 11] = 7.56789766054569976138603589584e-3
102
+ A[13, 12] = -8.298e-3
103
+
104
+ A[14, 0] = 3.18346481635021405060768473261e-2
105
+ A[14, 5] = 2.83009096723667755288322961402e-2
106
+ A[14, 6] = 5.35419883074385676223797384372e-2
107
+ A[14, 7] = -5.49237485713909884646569340306e-2
108
+ A[14, 10] = -1.08347328697249322858509316994e-4
109
+ A[14, 11] = 3.82571090835658412954920192323e-4
110
+ A[14, 12] = -3.40465008687404560802977114492e-4
111
+ A[14, 13] = 1.41312443674632500278074618366e-1
112
+
113
+ A[15, 0] = -4.28896301583791923408573538692e-1
114
+ A[15, 5] = -4.69762141536116384314449447206
115
+ A[15, 6] = 7.68342119606259904184240953878
116
+ A[15, 7] = 4.06898981839711007970213554331
117
+ A[15, 8] = 3.56727187455281109270669543021e-1
118
+ A[15, 12] = -1.39902416515901462129418009734e-3
119
+ A[15, 13] = 2.9475147891527723389556272149
120
+ A[15, 14] = -9.15095847217987001081870187138
121
+
122
+
123
+ B = A[N_STAGES, :N_STAGES]
124
+
125
+ E3 = np.zeros(N_STAGES + 1)
126
+ E3[:-1] = B.copy()
127
+ E3[0] -= 0.244094488188976377952755905512
128
+ E3[8] -= 0.733846688281611857341361741547
129
+ E3[11] -= 0.220588235294117647058823529412e-1
130
+
131
+ E5 = np.zeros(N_STAGES + 1)
132
+ E5[0] = 0.1312004499419488073250102996e-1
133
+ E5[5] = -0.1225156446376204440720569753e+1
134
+ E5[6] = -0.4957589496572501915214079952
135
+ E5[7] = 0.1664377182454986536961530415e+1
136
+ E5[8] = -0.3503288487499736816886487290
137
+ E5[9] = 0.3341791187130174790297318841
138
+ E5[10] = 0.8192320648511571246570742613e-1
139
+ E5[11] = -0.2235530786388629525884427845e-1
140
+
141
+ # First 3 coefficients are computed separately.
142
+ D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
143
+ D[0, 0] = -0.84289382761090128651353491142e+1
144
+ D[0, 5] = 0.56671495351937776962531783590
145
+ D[0, 6] = -0.30689499459498916912797304727e+1
146
+ D[0, 7] = 0.23846676565120698287728149680e+1
147
+ D[0, 8] = 0.21170345824450282767155149946e+1
148
+ D[0, 9] = -0.87139158377797299206789907490
149
+ D[0, 10] = 0.22404374302607882758541771650e+1
150
+ D[0, 11] = 0.63157877876946881815570249290
151
+ D[0, 12] = -0.88990336451333310820698117400e-1
152
+ D[0, 13] = 0.18148505520854727256656404962e+2
153
+ D[0, 14] = -0.91946323924783554000451984436e+1
154
+ D[0, 15] = -0.44360363875948939664310572000e+1
155
+
156
+ D[1, 0] = 0.10427508642579134603413151009e+2
157
+ D[1, 5] = 0.24228349177525818288430175319e+3
158
+ D[1, 6] = 0.16520045171727028198505394887e+3
159
+ D[1, 7] = -0.37454675472269020279518312152e+3
160
+ D[1, 8] = -0.22113666853125306036270938578e+2
161
+ D[1, 9] = 0.77334326684722638389603898808e+1
162
+ D[1, 10] = -0.30674084731089398182061213626e+2
163
+ D[1, 11] = -0.93321305264302278729567221706e+1
164
+ D[1, 12] = 0.15697238121770843886131091075e+2
165
+ D[1, 13] = -0.31139403219565177677282850411e+2
166
+ D[1, 14] = -0.93529243588444783865713862664e+1
167
+ D[1, 15] = 0.35816841486394083752465898540e+2
168
+
169
+ D[2, 0] = 0.19985053242002433820987653617e+2
170
+ D[2, 5] = -0.38703730874935176555105901742e+3
171
+ D[2, 6] = -0.18917813819516756882830838328e+3
172
+ D[2, 7] = 0.52780815920542364900561016686e+3
173
+ D[2, 8] = -0.11573902539959630126141871134e+2
174
+ D[2, 9] = 0.68812326946963000169666922661e+1
175
+ D[2, 10] = -0.10006050966910838403183860980e+1
176
+ D[2, 11] = 0.77771377980534432092869265740
177
+ D[2, 12] = -0.27782057523535084065932004339e+1
178
+ D[2, 13] = -0.60196695231264120758267380846e+2
179
+ D[2, 14] = 0.84320405506677161018159903784e+2
180
+ D[2, 15] = 0.11992291136182789328035130030e+2
181
+
182
+ D[3, 0] = -0.25693933462703749003312586129e+2
183
+ D[3, 5] = -0.15418974869023643374053993627e+3
184
+ D[3, 6] = -0.23152937917604549567536039109e+3
185
+ D[3, 7] = 0.35763911791061412378285349910e+3
186
+ D[3, 8] = 0.93405324183624310003907691704e+2
187
+ D[3, 9] = -0.37458323136451633156875139351e+2
188
+ D[3, 10] = 0.10409964950896230045147246184e+3
189
+ D[3, 11] = 0.29840293426660503123344363579e+2
190
+ D[3, 12] = -0.43533456590011143754432175058e+2
191
+ D[3, 13] = 0.96324553959188282948394950600e+2
192
+ D[3, 14] = -0.39177261675615439165231486172e+2
193
+ D[3, 15] = -0.14972683625798562581422125276e+3
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py ADDED
@@ -0,0 +1,748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import numpy as np
3
+ from .bdf import BDF
4
+ from .radau import Radau
5
+ from .rk import RK23, RK45, DOP853
6
+ from .lsoda import LSODA
7
+ from scipy.optimize import OptimizeResult
8
+ from .common import EPS, OdeSolution
9
+ from .base import OdeSolver
10
+
11
+
12
+ METHODS = {'RK23': RK23,
13
+ 'RK45': RK45,
14
+ 'DOP853': DOP853,
15
+ 'Radau': Radau,
16
+ 'BDF': BDF,
17
+ 'LSODA': LSODA}
18
+
19
+
20
+ MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
21
+ 1: "A termination event occurred."}
22
+
23
+
24
+ class OdeResult(OptimizeResult):
25
+ pass
26
+
27
+
28
+ def prepare_events(events):
29
+ """Standardize event functions and extract attributes."""
30
+ if callable(events):
31
+ events = (events,)
32
+
33
+ max_events = np.empty(len(events))
34
+ direction = np.empty(len(events))
35
+ for i, event in enumerate(events):
36
+ terminal = getattr(event, 'terminal', None)
37
+ direction[i] = getattr(event, 'direction', 0)
38
+
39
+ message = ('The `terminal` attribute of each event '
40
+ 'must be a boolean or positive integer.')
41
+ if terminal is None or terminal == 0:
42
+ max_events[i] = np.inf
43
+ elif int(terminal) == terminal and terminal > 0:
44
+ max_events[i] = terminal
45
+ else:
46
+ raise ValueError(message)
47
+
48
+ return events, max_events, direction
49
+
50
+
51
+ def solve_event_equation(event, sol, t_old, t):
52
+ """Solve an equation corresponding to an ODE event.
53
+
54
+ The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
55
+ ODE solver using some sort of interpolation. It is solved by
56
+ `scipy.optimize.brentq` with xtol=atol=4*EPS.
57
+
58
+ Parameters
59
+ ----------
60
+ event : callable
61
+ Function ``event(t, y)``.
62
+ sol : callable
63
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
64
+ and `t`.
65
+ t_old, t : float
66
+ Previous and new values of time. They will be used as a bracketing
67
+ interval.
68
+
69
+ Returns
70
+ -------
71
+ root : float
72
+ Found solution.
73
+ """
74
+ from scipy.optimize import brentq
75
+ return brentq(lambda t: event(t, sol(t)), t_old, t,
76
+ xtol=4 * EPS, rtol=4 * EPS)
77
+
78
+
79
+ def handle_events(sol, events, active_events, event_count, max_events,
80
+ t_old, t):
81
+ """Helper function to handle events.
82
+
83
+ Parameters
84
+ ----------
85
+ sol : DenseOutput
86
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
87
+ and `t`.
88
+ events : list of callables, length n_events
89
+ Event functions with signatures ``event(t, y)``.
90
+ active_events : ndarray
91
+ Indices of events which occurred.
92
+ event_count : ndarray
93
+ Current number of occurrences for each event.
94
+ max_events : ndarray, shape (n_events,)
95
+ Number of occurrences allowed for each event before integration
96
+ termination is issued.
97
+ t_old, t : float
98
+ Previous and new values of time.
99
+
100
+ Returns
101
+ -------
102
+ root_indices : ndarray
103
+ Indices of events which take zero between `t_old` and `t` and before
104
+ a possible termination.
105
+ roots : ndarray
106
+ Values of t at which events occurred.
107
+ terminate : bool
108
+ Whether a terminal event occurred.
109
+ """
110
+ roots = [solve_event_equation(events[event_index], sol, t_old, t)
111
+ for event_index in active_events]
112
+
113
+ roots = np.asarray(roots)
114
+
115
+ if np.any(event_count[active_events] >= max_events[active_events]):
116
+ if t > t_old:
117
+ order = np.argsort(roots)
118
+ else:
119
+ order = np.argsort(-roots)
120
+ active_events = active_events[order]
121
+ roots = roots[order]
122
+ t = np.nonzero(event_count[active_events]
123
+ >= max_events[active_events])[0][0]
124
+ active_events = active_events[:t + 1]
125
+ roots = roots[:t + 1]
126
+ terminate = True
127
+ else:
128
+ terminate = False
129
+
130
+ return active_events, roots, terminate
131
+
132
+
133
+ def find_active_events(g, g_new, direction):
134
+ """Find which event occurred during an integration step.
135
+
136
+ Parameters
137
+ ----------
138
+ g, g_new : array_like, shape (n_events,)
139
+ Values of event functions at a current and next points.
140
+ direction : ndarray, shape (n_events,)
141
+ Event "direction" according to the definition in `solve_ivp`.
142
+
143
+ Returns
144
+ -------
145
+ active_events : ndarray
146
+ Indices of events which occurred during the step.
147
+ """
148
+ g, g_new = np.asarray(g), np.asarray(g_new)
149
+ up = (g <= 0) & (g_new >= 0)
150
+ down = (g >= 0) & (g_new <= 0)
151
+ either = up | down
152
+ mask = (up & (direction > 0) |
153
+ down & (direction < 0) |
154
+ either & (direction == 0))
155
+
156
+ return np.nonzero(mask)[0]
157
+
158
+
159
+ def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
160
+ events=None, vectorized=False, args=None, **options):
161
+ """Solve an initial value problem for a system of ODEs.
162
+
163
+ This function numerically integrates a system of ordinary differential
164
+ equations given an initial value::
165
+
166
+ dy / dt = f(t, y)
167
+ y(t0) = y0
168
+
169
+ Here t is a 1-D independent variable (time), y(t) is an
170
+ N-D vector-valued function (state), and an N-D
171
+ vector-valued function f(t, y) determines the differential equations.
172
+ The goal is to find y(t) approximately satisfying the differential
173
+ equations, given an initial value y(t0)=y0.
174
+
175
+ Some of the solvers support integration in the complex domain, but note
176
+ that for stiff ODE solvers, the right-hand side must be
177
+ complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
178
+ To solve a problem in the complex domain, pass y0 with a complex data type.
179
+ Another option always available is to rewrite your problem for real and
180
+ imaginary parts separately.
181
+
182
+ Parameters
183
+ ----------
184
+ fun : callable
185
+ Right-hand side of the system: the time derivative of the state ``y``
186
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
187
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. Additional
188
+ arguments need to be passed if ``args`` is used (see documentation of
189
+ ``args`` argument). ``fun`` must return an array of the same shape as
190
+ ``y``. See `vectorized` for more information.
191
+ t_span : 2-member sequence
192
+ Interval of integration (t0, tf). The solver starts with t=t0 and
193
+ integrates until it reaches t=tf. Both t0 and tf must be floats
194
+ or values interpretable by the float conversion function.
195
+ y0 : array_like, shape (n,)
196
+ Initial state. For problems in the complex domain, pass `y0` with a
197
+ complex data type (even if the initial value is purely real).
198
+ method : string or `OdeSolver`, optional
199
+ Integration method to use:
200
+
201
+ * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
202
+ The error is controlled assuming accuracy of the fourth-order
203
+ method, but steps are taken using the fifth-order accurate
204
+ formula (local extrapolation is done). A quartic interpolation
205
+ polynomial is used for the dense output [2]_. Can be applied in
206
+ the complex domain.
207
+ * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
208
+ is controlled assuming accuracy of the second-order method, but
209
+ steps are taken using the third-order accurate formula (local
210
+ extrapolation is done). A cubic Hermite polynomial is used for the
211
+ dense output. Can be applied in the complex domain.
212
+ * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
213
+ Python implementation of the "DOP853" algorithm originally
214
+ written in Fortran [14]_. A 7-th order interpolation polynomial
215
+ accurate to 7-th order is used for the dense output.
216
+ Can be applied in the complex domain.
217
+ * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
218
+ order 5 [4]_. The error is controlled with a third-order accurate
219
+ embedded formula. A cubic polynomial which satisfies the
220
+ collocation conditions is used for the dense output.
221
+ * 'BDF': Implicit multi-step variable-order (1 to 5) method based
222
+ on a backward differentiation formula for the derivative
223
+ approximation [5]_. The implementation follows the one described
224
+ in [6]_. A quasi-constant step scheme is used and accuracy is
225
+ enhanced using the NDF modification. Can be applied in the
226
+ complex domain.
227
+ * 'LSODA': Adams/BDF method with automatic stiffness detection and
228
+ switching [7]_, [8]_. This is a wrapper of the Fortran solver
229
+ from ODEPACK.
230
+
231
+ Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
232
+ for non-stiff problems and implicit methods ('Radau', 'BDF') for
233
+ stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
234
+ for solving with high precision (low values of `rtol` and `atol`).
235
+
236
+ If not sure, first try to run 'RK45'. If it makes unusually many
237
+ iterations, diverges, or fails, your problem is likely to be stiff and
238
+ you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
239
+ choice, but it might be somewhat less convenient to work with as it
240
+ wraps old Fortran code.
241
+
242
+ You can also pass an arbitrary class derived from `OdeSolver` which
243
+ implements the solver.
244
+ t_eval : array_like or None, optional
245
+ Times at which to store the computed solution, must be sorted and lie
246
+ within `t_span`. If None (default), use points selected by the solver.
247
+ dense_output : bool, optional
248
+ Whether to compute a continuous solution. Default is False.
249
+ events : callable, or list of callables, optional
250
+ Events to track. If None (default), no events will be tracked.
251
+ Each event occurs at the zeros of a continuous function of time and
252
+ state. Each function must have the signature ``event(t, y)`` where
253
+ additional argument have to be passed if ``args`` is used (see
254
+ documentation of ``args`` argument). Each function must return a
255
+ float. The solver will find an accurate value of `t` at which
256
+ ``event(t, y(t)) = 0`` using a root-finding algorithm. By default,
257
+ all zeros will be found. The solver looks for a sign change over
258
+ each step, so if multiple zero crossings occur within one step,
259
+ events may be missed. Additionally each `event` function might
260
+ have the following attributes:
261
+
262
+ terminal: bool or int, optional
263
+ When boolean, whether to terminate integration if this event occurs.
264
+ When integral, termination occurs after the specified the number of
265
+ occurences of this event.
266
+ Implicitly False if not assigned.
267
+ direction: float, optional
268
+ Direction of a zero crossing. If `direction` is positive,
269
+ `event` will only trigger when going from negative to positive,
270
+ and vice versa if `direction` is negative. If 0, then either
271
+ direction will trigger event. Implicitly 0 if not assigned.
272
+
273
+ You can assign attributes like ``event.terminal = True`` to any
274
+ function in Python.
275
+ vectorized : bool, optional
276
+ Whether `fun` can be called in a vectorized fashion. Default is False.
277
+
278
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
279
+ shape ``(n,)``, where ``n = len(y0)``.
280
+
281
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
282
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
283
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
284
+ the returned array is the time derivative of the state corresponding
285
+ with a column of ``y``).
286
+
287
+ Setting ``vectorized=True`` allows for faster finite difference
288
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
289
+ will result in slower execution for other methods and for 'Radau' and
290
+ 'BDF' in some circumstances (e.g. small ``len(y0)``).
291
+ args : tuple, optional
292
+ Additional arguments to pass to the user-defined functions. If given,
293
+ the additional arguments are passed to all user-defined functions.
294
+ So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
295
+ then `jac` (if given) and any event functions must have the same
296
+ signature, and `args` must be a tuple of length 3.
297
+ **options
298
+ Options passed to a chosen solver. All options available for already
299
+ implemented solvers are listed below.
300
+ first_step : float or None, optional
301
+ Initial step size. Default is `None` which means that the algorithm
302
+ should choose.
303
+ max_step : float, optional
304
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
305
+ bounded and determined solely by the solver.
306
+ rtol, atol : float or array_like, optional
307
+ Relative and absolute tolerances. The solver keeps the local error
308
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
309
+ relative accuracy (number of correct digits), while `atol` controls
310
+ absolute accuracy (number of correct decimal places). To achieve the
311
+ desired `rtol`, set `atol` to be smaller than the smallest value that
312
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
313
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
314
+ number of correct digits is not guaranteed. Conversely, to achieve the
315
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
316
+ than `atol`. If components of y have different scales, it might be
317
+ beneficial to set different `atol` values for different components by
318
+ passing array_like with shape (n,) for `atol`. Default values are
319
+ 1e-3 for `rtol` and 1e-6 for `atol`.
320
+ jac : array_like, sparse_matrix, callable or None, optional
321
+ Jacobian matrix of the right-hand side of the system with respect
322
+ to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
323
+ Jacobian matrix has shape (n, n) and its element (i, j) is equal to
324
+ ``d f_i / d y_j``. There are three ways to define the Jacobian:
325
+
326
+ * If array_like or sparse_matrix, the Jacobian is assumed to
327
+ be constant. Not supported by 'LSODA'.
328
+ * If callable, the Jacobian is assumed to depend on both
329
+ t and y; it will be called as ``jac(t, y)``, as necessary.
330
+ Additional arguments have to be passed if ``args`` is
331
+ used (see documentation of ``args`` argument).
332
+ For 'Radau' and 'BDF' methods, the return value might be a
333
+ sparse matrix.
334
+ * If None (default), the Jacobian will be approximated by
335
+ finite differences.
336
+
337
+ It is generally recommended to provide the Jacobian rather than
338
+ relying on a finite-difference approximation.
339
+ jac_sparsity : array_like, sparse matrix or None, optional
340
+ Defines a sparsity structure of the Jacobian matrix for a finite-
341
+ difference approximation. Its shape must be (n, n). This argument
342
+ is ignored if `jac` is not `None`. If the Jacobian has only few
343
+ non-zero elements in *each* row, providing the sparsity structure
344
+ will greatly speed up the computations [10]_. A zero entry means that
345
+ a corresponding element in the Jacobian is always zero. If None
346
+ (default), the Jacobian is assumed to be dense.
347
+ Not supported by 'LSODA', see `lband` and `uband` instead.
348
+ lband, uband : int or None, optional
349
+ Parameters defining the bandwidth of the Jacobian for the 'LSODA'
350
+ method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
351
+ Default is None. Setting these requires your jac routine to return the
352
+ Jacobian in the packed format: the returned array must have ``n``
353
+ columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
354
+ written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
355
+ The same format is used in `scipy.linalg.solve_banded` (check for an
356
+ illustration). These parameters can be also used with ``jac=None`` to
357
+ reduce the number of Jacobian elements estimated by finite differences.
358
+ min_step : float, optional
359
+ The minimum allowed step size for 'LSODA' method.
360
+ By default `min_step` is zero.
361
+
362
+ Returns
363
+ -------
364
+ Bunch object with the following fields defined:
365
+ t : ndarray, shape (n_points,)
366
+ Time points.
367
+ y : ndarray, shape (n, n_points)
368
+ Values of the solution at `t`.
369
+ sol : `OdeSolution` or None
370
+ Found solution as `OdeSolution` instance; None if `dense_output` was
371
+ set to False.
372
+ t_events : list of ndarray or None
373
+ Contains for each event type a list of arrays at which an event of
374
+ that type event was detected. None if `events` was None.
375
+ y_events : list of ndarray or None
376
+ For each value of `t_events`, the corresponding value of the solution.
377
+ None if `events` was None.
378
+ nfev : int
379
+ Number of evaluations of the right-hand side.
380
+ njev : int
381
+ Number of evaluations of the Jacobian.
382
+ nlu : int
383
+ Number of LU decompositions.
384
+ status : int
385
+ Reason for algorithm termination:
386
+
387
+ * -1: Integration step failed.
388
+ * 0: The solver successfully reached the end of `tspan`.
389
+ * 1: A termination event occurred.
390
+
391
+ message : string
392
+ Human-readable description of the termination reason.
393
+ success : bool
394
+ True if the solver reached the interval end or a termination event
395
+ occurred (``status >= 0``).
396
+
397
+ References
398
+ ----------
399
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
400
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
401
+ No. 1, pp. 19-26, 1980.
402
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
403
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
404
+ .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
405
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
406
+ .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
407
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
408
+ .. [5] `Backward Differentiation Formula
409
+ <https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
410
+ on Wikipedia.
411
+ .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
412
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
413
+ .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
414
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
415
+ pp. 55-64, 1983.
416
+ .. [8] L. Petzold, "Automatic selection of methods for solving stiff and
417
+ nonstiff systems of ordinary differential equations", SIAM Journal
418
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
419
+ 1983.
420
+ .. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
421
+ Wikipedia.
422
+ .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
423
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
424
+ and its Applications, 13, pp. 117-120, 1974.
425
+ .. [11] `Cauchy-Riemann equations
426
+ <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
427
+ Wikipedia.
428
+ .. [12] `Lotka-Volterra equations
429
+ <https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
430
+ on Wikipedia.
431
+ .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
432
+ Equations I: Nonstiff Problems", Sec. II.
433
+ .. [14] `Page with original Fortran code of DOP853
434
+ <http://www.unige.ch/~hairer/software.html>`_.
435
+
436
+ Examples
437
+ --------
438
+ Basic exponential decay showing automatically chosen time points.
439
+
440
+ >>> import numpy as np
441
+ >>> from scipy.integrate import solve_ivp
442
+ >>> def exponential_decay(t, y): return -0.5 * y
443
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
444
+ >>> print(sol.t)
445
+ [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
446
+ 8.33328988 10. ]
447
+ >>> print(sol.y)
448
+ [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
449
+ 0.03107158 0.01350781]
450
+ [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
451
+ 0.06214316 0.02701561]
452
+ [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
453
+ 0.12428631 0.05403123]]
454
+
455
+ Specifying points where the solution is desired.
456
+
457
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
458
+ ... t_eval=[0, 1, 2, 4, 10])
459
+ >>> print(sol.t)
460
+ [ 0 1 2 4 10]
461
+ >>> print(sol.y)
462
+ [[2. 1.21305369 0.73534021 0.27066736 0.01350938]
463
+ [4. 2.42610739 1.47068043 0.54133472 0.02701876]
464
+ [8. 4.85221478 2.94136085 1.08266944 0.05403753]]
465
+
466
+ Cannon fired upward with terminal event upon impact. The ``terminal`` and
467
+ ``direction`` fields of an event are applied by monkey patching a function.
468
+ Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
469
+ at position 0 with velocity +10. Note that the integration never reaches
470
+ t=100 because the event is terminal.
471
+
472
+ >>> def upward_cannon(t, y): return [y[1], -0.5]
473
+ >>> def hit_ground(t, y): return y[0]
474
+ >>> hit_ground.terminal = True
475
+ >>> hit_ground.direction = -1
476
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
477
+ >>> print(sol.t_events)
478
+ [array([40.])]
479
+ >>> print(sol.t)
480
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
481
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
482
+
483
+ Use `dense_output` and `events` to find position, which is 100, at the apex
484
+ of the cannonball's trajectory. Apex is not defined as terminal, so both
485
+ apex and hit_ground are found. There is no information at t=20, so the sol
486
+ attribute is used to evaluate the solution. The sol attribute is returned
487
+ by setting ``dense_output=True``. Alternatively, the `y_events` attribute
488
+ can be used to access the solution at the time of the event.
489
+
490
+ >>> def apex(t, y): return y[1]
491
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
492
+ ... events=(hit_ground, apex), dense_output=True)
493
+ >>> print(sol.t_events)
494
+ [array([40.]), array([20.])]
495
+ >>> print(sol.t)
496
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
497
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
498
+ >>> print(sol.sol(sol.t_events[1][0]))
499
+ [100. 0.]
500
+ >>> print(sol.y_events)
501
+ [array([[-5.68434189e-14, -1.00000000e+01]]),
502
+ array([[1.00000000e+02, 1.77635684e-15]])]
503
+
504
+ As an example of a system with additional parameters, we'll implement
505
+ the Lotka-Volterra equations [12]_.
506
+
507
+ >>> def lotkavolterra(t, z, a, b, c, d):
508
+ ... x, y = z
509
+ ... return [a*x - b*x*y, -c*y + d*x*y]
510
+ ...
511
+
512
+ We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
513
+ argument.
514
+
515
+ >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
516
+ ... dense_output=True)
517
+
518
+ Compute a dense solution and plot it.
519
+
520
+ >>> t = np.linspace(0, 15, 300)
521
+ >>> z = sol.sol(t)
522
+ >>> import matplotlib.pyplot as plt
523
+ >>> plt.plot(t, z.T)
524
+ >>> plt.xlabel('t')
525
+ >>> plt.legend(['x', 'y'], shadow=True)
526
+ >>> plt.title('Lotka-Volterra System')
527
+ >>> plt.show()
528
+
529
+ A couple examples of using solve_ivp to solve the differential
530
+ equation ``y' = Ay`` with complex matrix ``A``.
531
+
532
+ >>> A = np.array([[-0.25 + 0.14j, 0, 0.33 + 0.44j],
533
+ ... [0.25 + 0.58j, -0.2 + 0.14j, 0],
534
+ ... [0, 0.2 + 0.4j, -0.1 + 0.97j]])
535
+
536
+ Solving an IVP with ``A`` from above and ``y`` as 3x1 vector:
537
+
538
+ >>> def deriv_vec(t, y):
539
+ ... return A @ y
540
+ >>> result = solve_ivp(deriv_vec, [0, 25],
541
+ ... np.array([10 + 0j, 20 + 0j, 30 + 0j]),
542
+ ... t_eval=np.linspace(0, 25, 101))
543
+ >>> print(result.y[:, 0])
544
+ [10.+0.j 20.+0.j 30.+0.j]
545
+ >>> print(result.y[:, -1])
546
+ [18.46291039+45.25653651j 10.01569306+36.23293216j
547
+ -4.98662741+80.07360388j]
548
+
549
+ Solving an IVP with ``A`` from above with ``y`` as 3x3 matrix :
550
+
551
+ >>> def deriv_mat(t, y):
552
+ ... return (A @ y.reshape(3, 3)).flatten()
553
+ >>> y0 = np.array([[2 + 0j, 3 + 0j, 4 + 0j],
554
+ ... [5 + 0j, 6 + 0j, 7 + 0j],
555
+ ... [9 + 0j, 34 + 0j, 78 + 0j]])
556
+
557
+ >>> result = solve_ivp(deriv_mat, [0, 25], y0.flatten(),
558
+ ... t_eval=np.linspace(0, 25, 101))
559
+ >>> print(result.y[:, 0].reshape(3, 3))
560
+ [[ 2.+0.j 3.+0.j 4.+0.j]
561
+ [ 5.+0.j 6.+0.j 7.+0.j]
562
+ [ 9.+0.j 34.+0.j 78.+0.j]]
563
+ >>> print(result.y[:, -1].reshape(3, 3))
564
+ [[ 5.67451179 +12.07938445j 17.2888073 +31.03278837j
565
+ 37.83405768 +63.25138759j]
566
+ [ 3.39949503 +11.82123994j 21.32530996 +44.88668871j
567
+ 53.17531184+103.80400411j]
568
+ [ -2.26105874 +22.19277664j -15.1255713 +70.19616341j
569
+ -38.34616845+153.29039931j]]
570
+
571
+
572
+ """
573
+ if method not in METHODS and not (
574
+ inspect.isclass(method) and issubclass(method, OdeSolver)):
575
+ raise ValueError(f"`method` must be one of {METHODS} or OdeSolver class.")
576
+
577
+ t0, tf = map(float, t_span)
578
+
579
+ if args is not None:
580
+ # Wrap the user's fun (and jac, if given) in lambdas to hide the
581
+ # additional parameters. Pass in the original fun as a keyword
582
+ # argument to keep it in the scope of the lambda.
583
+ try:
584
+ _ = [*(args)]
585
+ except TypeError as exp:
586
+ suggestion_tuple = (
587
+ "Supplied 'args' cannot be unpacked. Please supply `args`"
588
+ f" as a tuple (e.g. `args=({args},)`)"
589
+ )
590
+ raise TypeError(suggestion_tuple) from exp
591
+
592
+ def fun(t, x, fun=fun):
593
+ return fun(t, x, *args)
594
+ jac = options.get('jac')
595
+ if callable(jac):
596
+ options['jac'] = lambda t, x: jac(t, x, *args)
597
+
598
+ if t_eval is not None:
599
+ t_eval = np.asarray(t_eval)
600
+ if t_eval.ndim != 1:
601
+ raise ValueError("`t_eval` must be 1-dimensional.")
602
+
603
+ if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
604
+ raise ValueError("Values in `t_eval` are not within `t_span`.")
605
+
606
+ d = np.diff(t_eval)
607
+ if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
608
+ raise ValueError("Values in `t_eval` are not properly sorted.")
609
+
610
+ if tf > t0:
611
+ t_eval_i = 0
612
+ else:
613
+ # Make order of t_eval decreasing to use np.searchsorted.
614
+ t_eval = t_eval[::-1]
615
+ # This will be an upper bound for slices.
616
+ t_eval_i = t_eval.shape[0]
617
+
618
+ if method in METHODS:
619
+ method = METHODS[method]
620
+
621
+ solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
622
+
623
+ if t_eval is None:
624
+ ts = [t0]
625
+ ys = [y0]
626
+ elif t_eval is not None and dense_output:
627
+ ts = []
628
+ ti = [t0]
629
+ ys = []
630
+ else:
631
+ ts = []
632
+ ys = []
633
+
634
+ interpolants = []
635
+
636
+ if events is not None:
637
+ events, max_events, event_dir = prepare_events(events)
638
+ event_count = np.zeros(len(events))
639
+ if args is not None:
640
+ # Wrap user functions in lambdas to hide the additional parameters.
641
+ # The original event function is passed as a keyword argument to the
642
+ # lambda to keep the original function in scope (i.e., avoid the
643
+ # late binding closure "gotcha").
644
+ events = [lambda t, x, event=event: event(t, x, *args)
645
+ for event in events]
646
+ g = [event(t0, y0) for event in events]
647
+ t_events = [[] for _ in range(len(events))]
648
+ y_events = [[] for _ in range(len(events))]
649
+ else:
650
+ t_events = None
651
+ y_events = None
652
+
653
+ status = None
654
+ while status is None:
655
+ message = solver.step()
656
+
657
+ if solver.status == 'finished':
658
+ status = 0
659
+ elif solver.status == 'failed':
660
+ status = -1
661
+ break
662
+
663
+ t_old = solver.t_old
664
+ t = solver.t
665
+ y = solver.y
666
+
667
+ if dense_output:
668
+ sol = solver.dense_output()
669
+ interpolants.append(sol)
670
+ else:
671
+ sol = None
672
+
673
+ if events is not None:
674
+ g_new = [event(t, y) for event in events]
675
+ active_events = find_active_events(g, g_new, event_dir)
676
+ if active_events.size > 0:
677
+ if sol is None:
678
+ sol = solver.dense_output()
679
+
680
+ event_count[active_events] += 1
681
+ root_indices, roots, terminate = handle_events(
682
+ sol, events, active_events, event_count, max_events,
683
+ t_old, t)
684
+
685
+ for e, te in zip(root_indices, roots):
686
+ t_events[e].append(te)
687
+ y_events[e].append(sol(te))
688
+
689
+ if terminate:
690
+ status = 1
691
+ t = roots[-1]
692
+ y = sol(t)
693
+
694
+ g = g_new
695
+
696
+ if t_eval is None:
697
+ ts.append(t)
698
+ ys.append(y)
699
+ else:
700
+ # The value in t_eval equal to t will be included.
701
+ if solver.direction > 0:
702
+ t_eval_i_new = np.searchsorted(t_eval, t, side='right')
703
+ t_eval_step = t_eval[t_eval_i:t_eval_i_new]
704
+ else:
705
+ t_eval_i_new = np.searchsorted(t_eval, t, side='left')
706
+ # It has to be done with two slice operations, because
707
+ # you can't slice to 0th element inclusive using backward
708
+ # slicing.
709
+ t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
710
+
711
+ if t_eval_step.size > 0:
712
+ if sol is None:
713
+ sol = solver.dense_output()
714
+ ts.append(t_eval_step)
715
+ ys.append(sol(t_eval_step))
716
+ t_eval_i = t_eval_i_new
717
+
718
+ if t_eval is not None and dense_output:
719
+ ti.append(t)
720
+
721
+ message = MESSAGES.get(status, message)
722
+
723
+ if t_events is not None:
724
+ t_events = [np.asarray(te) for te in t_events]
725
+ y_events = [np.asarray(ye) for ye in y_events]
726
+
727
+ if t_eval is None:
728
+ ts = np.array(ts)
729
+ ys = np.vstack(ys).T
730
+ elif ts:
731
+ ts = np.hstack(ts)
732
+ ys = np.hstack(ys)
733
+
734
+ if dense_output:
735
+ if t_eval is None:
736
+ sol = OdeSolution(
737
+ ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False
738
+ )
739
+ else:
740
+ sol = OdeSolution(
741
+ ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False
742
+ )
743
+ else:
744
+ sol = None
745
+
746
+ return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
747
+ nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
748
+ status=status, message=message, success=status >= 0)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.integrate import ode
3
+ from .common import validate_tol, validate_first_step, warn_extraneous
4
+ from .base import OdeSolver, DenseOutput
5
+
6
+
7
+ class LSODA(OdeSolver):
8
+ """Adams/BDF method with automatic stiffness detection and switching.
9
+
10
+ This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
11
+ automatically between the nonstiff Adams method and the stiff BDF method.
12
+ The method was originally detailed in [2]_.
13
+
14
+ Parameters
15
+ ----------
16
+ fun : callable
17
+ Right-hand side of the system: the time derivative of the state ``y``
18
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
19
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
20
+ return an array of the same shape as ``y``. See `vectorized` for more
21
+ information.
22
+ t0 : float
23
+ Initial time.
24
+ y0 : array_like, shape (n,)
25
+ Initial state.
26
+ t_bound : float
27
+ Boundary time - the integration won't continue beyond it. It also
28
+ determines the direction of the integration.
29
+ first_step : float or None, optional
30
+ Initial step size. Default is ``None`` which means that the algorithm
31
+ should choose.
32
+ min_step : float, optional
33
+ Minimum allowed step size. Default is 0.0, i.e., the step size is not
34
+ bounded and determined solely by the solver.
35
+ max_step : float, optional
36
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
37
+ bounded and determined solely by the solver.
38
+ rtol, atol : float and array_like, optional
39
+ Relative and absolute tolerances. The solver keeps the local error
40
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
41
+ relative accuracy (number of correct digits), while `atol` controls
42
+ absolute accuracy (number of correct decimal places). To achieve the
43
+ desired `rtol`, set `atol` to be smaller than the smallest value that
44
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
45
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
46
+ number of correct digits is not guaranteed. Conversely, to achieve the
47
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
48
+ than `atol`. If components of y have different scales, it might be
49
+ beneficial to set different `atol` values for different components by
50
+ passing array_like with shape (n,) for `atol`. Default values are
51
+ 1e-3 for `rtol` and 1e-6 for `atol`.
52
+ jac : None or callable, optional
53
+ Jacobian matrix of the right-hand side of the system with respect to
54
+ ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
55
+ equal to ``d f_i / d y_j``. The function will be called as
56
+ ``jac(t, y)``. If None (default), the Jacobian will be
57
+ approximated by finite differences. It is generally recommended to
58
+ provide the Jacobian rather than relying on a finite-difference
59
+ approximation.
60
+ lband, uband : int or None
61
+ Parameters defining the bandwidth of the Jacobian,
62
+ i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
63
+ these requires your jac routine to return the Jacobian in the packed format:
64
+ the returned array must have ``n`` columns and ``uband + lband + 1``
65
+ rows in which Jacobian diagonals are written. Specifically
66
+ ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
67
+ in `scipy.linalg.solve_banded` (check for an illustration).
68
+ These parameters can be also used with ``jac=None`` to reduce the
69
+ number of Jacobian elements estimated by finite differences.
70
+ vectorized : bool, optional
71
+ Whether `fun` may be called in a vectorized fashion. False (default)
72
+ is recommended for this solver.
73
+
74
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
75
+ shape ``(n,)``, where ``n = len(y0)``.
76
+
77
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
78
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
79
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
80
+ the returned array is the time derivative of the state corresponding
81
+ with a column of ``y``).
82
+
83
+ Setting ``vectorized=True`` allows for faster finite difference
84
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
85
+ will result in slower execution for this solver.
86
+
87
+ Attributes
88
+ ----------
89
+ n : int
90
+ Number of equations.
91
+ status : string
92
+ Current status of the solver: 'running', 'finished' or 'failed'.
93
+ t_bound : float
94
+ Boundary time.
95
+ direction : float
96
+ Integration direction: +1 or -1.
97
+ t : float
98
+ Current time.
99
+ y : ndarray
100
+ Current state.
101
+ t_old : float
102
+ Previous time. None if no steps were made yet.
103
+ nfev : int
104
+ Number of evaluations of the right-hand side.
105
+ njev : int
106
+ Number of evaluations of the Jacobian.
107
+
108
+ References
109
+ ----------
110
+ .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
111
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
112
+ pp. 55-64, 1983.
113
+ .. [2] L. Petzold, "Automatic selection of methods for solving stiff and
114
+ nonstiff systems of ordinary differential equations", SIAM Journal
115
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
116
+ 1983.
117
+ """
118
+ def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
119
+ max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
120
+ uband=None, vectorized=False, **extraneous):
121
+ warn_extraneous(extraneous)
122
+ super().__init__(fun, t0, y0, t_bound, vectorized)
123
+
124
+ if first_step is None:
125
+ first_step = 0 # LSODA value for automatic selection.
126
+ else:
127
+ first_step = validate_first_step(first_step, t0, t_bound)
128
+
129
+ first_step *= self.direction
130
+
131
+ if max_step == np.inf:
132
+ max_step = 0 # LSODA value for infinity.
133
+ elif max_step <= 0:
134
+ raise ValueError("`max_step` must be positive.")
135
+
136
+ if min_step < 0:
137
+ raise ValueError("`min_step` must be nonnegative.")
138
+
139
+ rtol, atol = validate_tol(rtol, atol, self.n)
140
+
141
+ solver = ode(self.fun, jac)
142
+ solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
143
+ min_step=min_step, first_step=first_step,
144
+ lband=lband, uband=uband)
145
+ solver.set_initial_value(y0, t0)
146
+
147
+ # Inject t_bound into rwork array as needed for itask=5.
148
+ solver._integrator.rwork[0] = self.t_bound
149
+ solver._integrator.call_args[4] = solver._integrator.rwork
150
+
151
+ self._lsoda_solver = solver
152
+
153
+ def _step_impl(self):
154
+ solver = self._lsoda_solver
155
+ integrator = solver._integrator
156
+
157
+ # From lsoda.step and lsoda.integrate itask=5 means take a single
158
+ # step and do not go past t_bound.
159
+ itask = integrator.call_args[2]
160
+ integrator.call_args[2] = 5
161
+ solver._y, solver.t = integrator.run(
162
+ solver.f, solver.jac or (lambda: None), solver._y, solver.t,
163
+ self.t_bound, solver.f_params, solver.jac_params)
164
+ integrator.call_args[2] = itask
165
+
166
+ if solver.successful():
167
+ self.t = solver.t
168
+ self.y = solver._y
169
+ # From LSODA Fortran source njev is equal to nlu.
170
+ self.njev = integrator.iwork[12]
171
+ self.nlu = integrator.iwork[12]
172
+ return True, None
173
+ else:
174
+ return False, 'Unexpected istate in LSODA.'
175
+
176
+ def _dense_output_impl(self):
177
+ iwork = self._lsoda_solver._integrator.iwork
178
+ rwork = self._lsoda_solver._integrator.rwork
179
+
180
+ # We want to produce the Nordsieck history array, yh, up to the order
181
+ # used in the last successful iteration. The step size is unimportant
182
+ # because it will be scaled out in LsodaDenseOutput. Some additional
183
+ # work may be required because ODEPACK's LSODA implementation produces
184
+ # the Nordsieck history in the state needed for the next iteration.
185
+
186
+ # iwork[13] contains order from last successful iteration, while
187
+ # iwork[14] contains order to be attempted next.
188
+ order = iwork[13]
189
+
190
+ # rwork[11] contains the step size to be attempted next, while
191
+ # rwork[10] contains step size from last successful iteration.
192
+ h = rwork[11]
193
+
194
+ # rwork[20:20 + (iwork[14] + 1) * self.n] contains entries of the
195
+ # Nordsieck array in state needed for next iteration. We want
196
+ # the entries up to order for the last successful step so use the
197
+ # following.
198
+ yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
199
+ (self.n, order + 1), order='F').copy()
200
+ if iwork[14] < order:
201
+ # If the order is set to decrease then the final column of yh
202
+ # has not been updated within ODEPACK's LSODA
203
+ # implementation because this column will not be used in the
204
+ # next iteration. We must rescale this column to make the
205
+ # associated step size consistent with the other columns.
206
+ yh[:, -1] *= (h / rwork[10]) ** order
207
+
208
+ return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
209
+
210
+
211
+ class LsodaDenseOutput(DenseOutput):
212
+ def __init__(self, t_old, t, h, order, yh):
213
+ super().__init__(t_old, t)
214
+ self.h = h
215
+ self.yh = yh
216
+ self.p = np.arange(order + 1)
217
+
218
+ def _call_impl(self, t):
219
+ if t.ndim == 0:
220
+ x = ((t - self.t) / self.h) ** self.p
221
+ else:
222
+ x = ((t - self.t) / self.h) ** self.p[:, None]
223
+
224
+ return np.dot(self.yh, x)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import csc_matrix, issparse, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, num_jac, EPS, warn_extraneous,
8
+ validate_first_step)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+ S6 = 6 ** 0.5
12
+
13
+ # Butcher tableau. A is not used directly, see below.
14
+ C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
15
+ E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
16
+
17
+ # Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
18
+ # and a complex conjugate pair. They are written below.
19
+ MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
20
+ MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
21
+ - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
22
+
23
+ # These are transformation matrices.
24
+ T = np.array([
25
+ [0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
26
+ [0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
27
+ [1, 1, 0]])
28
+ TI = np.array([
29
+ [4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
30
+ [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
31
+ [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
32
+ # These linear combinations are used in the algorithm.
33
+ TI_REAL = TI[0]
34
+ TI_COMPLEX = TI[1] + 1j * TI[2]
35
+
36
+ # Interpolator coefficients.
37
+ P = np.array([
38
+ [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
39
+ [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
40
+ [1/3, -8/3, 10/3]])
41
+
42
+
43
+ NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
44
+ MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
45
+ MAX_FACTOR = 10 # Maximum allowed increase in a step size.
46
+
47
+
48
+ def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
49
+ LU_real, LU_complex, solve_lu):
50
+ """Solve the collocation system.
51
+
52
+ Parameters
53
+ ----------
54
+ fun : callable
55
+ Right-hand side of the system.
56
+ t : float
57
+ Current time.
58
+ y : ndarray, shape (n,)
59
+ Current state.
60
+ h : float
61
+ Step to try.
62
+ Z0 : ndarray, shape (3, n)
63
+ Initial guess for the solution. It determines new values of `y` at
64
+ ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
65
+ scale : ndarray, shape (n)
66
+ Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
67
+ tol : float
68
+ Tolerance to which solve the system. This value is compared with
69
+ the normalized by `scale` error.
70
+ LU_real, LU_complex
71
+ LU decompositions of the system Jacobians.
72
+ solve_lu : callable
73
+ Callable which solves a linear system given a LU decomposition. The
74
+ signature is ``solve_lu(LU, b)``.
75
+
76
+ Returns
77
+ -------
78
+ converged : bool
79
+ Whether iterations converged.
80
+ n_iter : int
81
+ Number of completed iterations.
82
+ Z : ndarray, shape (3, n)
83
+ Found solution.
84
+ rate : float
85
+ The rate of convergence.
86
+ """
87
+ n = y.shape[0]
88
+ M_real = MU_REAL / h
89
+ M_complex = MU_COMPLEX / h
90
+
91
+ W = TI.dot(Z0)
92
+ Z = Z0
93
+
94
+ F = np.empty((3, n))
95
+ ch = h * C
96
+
97
+ dW_norm_old = None
98
+ dW = np.empty_like(W)
99
+ converged = False
100
+ rate = None
101
+ for k in range(NEWTON_MAXITER):
102
+ for i in range(3):
103
+ F[i] = fun(t + ch[i], y + Z[i])
104
+
105
+ if not np.all(np.isfinite(F)):
106
+ break
107
+
108
+ f_real = F.T.dot(TI_REAL) - M_real * W[0]
109
+ f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
110
+
111
+ dW_real = solve_lu(LU_real, f_real)
112
+ dW_complex = solve_lu(LU_complex, f_complex)
113
+
114
+ dW[0] = dW_real
115
+ dW[1] = dW_complex.real
116
+ dW[2] = dW_complex.imag
117
+
118
+ dW_norm = norm(dW / scale)
119
+ if dW_norm_old is not None:
120
+ rate = dW_norm / dW_norm_old
121
+
122
+ if (rate is not None and (rate >= 1 or
123
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
124
+ break
125
+
126
+ W += dW
127
+ Z = T.dot(W)
128
+
129
+ if (dW_norm == 0 or
130
+ rate is not None and rate / (1 - rate) * dW_norm < tol):
131
+ converged = True
132
+ break
133
+
134
+ dW_norm_old = dW_norm
135
+
136
+ return converged, k + 1, Z, rate
137
+
138
+
139
+ def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
140
+ """Predict by which factor to increase/decrease the step size.
141
+
142
+ The algorithm is described in [1]_.
143
+
144
+ Parameters
145
+ ----------
146
+ h_abs, h_abs_old : float
147
+ Current and previous values of the step size, `h_abs_old` can be None
148
+ (see Notes).
149
+ error_norm, error_norm_old : float
150
+ Current and previous values of the error norm, `error_norm_old` can
151
+ be None (see Notes).
152
+
153
+ Returns
154
+ -------
155
+ factor : float
156
+ Predicted factor.
157
+
158
+ Notes
159
+ -----
160
+ If `h_abs_old` and `error_norm_old` are both not None then a two-step
161
+ algorithm is used, otherwise a one-step algorithm is used.
162
+
163
+ References
164
+ ----------
165
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
166
+ Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
167
+ """
168
+ if error_norm_old is None or h_abs_old is None or error_norm == 0:
169
+ multiplier = 1
170
+ else:
171
+ multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
172
+
173
+ with np.errstate(divide='ignore'):
174
+ factor = min(1, multiplier) * error_norm ** -0.25
175
+
176
+ return factor
177
+
178
+
179
+ class Radau(OdeSolver):
180
+ """Implicit Runge-Kutta method of Radau IIA family of order 5.
181
+
182
+ The implementation follows [1]_. The error is controlled with a
183
+ third-order accurate embedded formula. A cubic polynomial which satisfies
184
+ the collocation conditions is used for the dense output.
185
+
186
+ Parameters
187
+ ----------
188
+ fun : callable
189
+ Right-hand side of the system: the time derivative of the state ``y``
190
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
191
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
192
+ return an array of the same shape as ``y``. See `vectorized` for more
193
+ information.
194
+ t0 : float
195
+ Initial time.
196
+ y0 : array_like, shape (n,)
197
+ Initial state.
198
+ t_bound : float
199
+ Boundary time - the integration won't continue beyond it. It also
200
+ determines the direction of the integration.
201
+ first_step : float or None, optional
202
+ Initial step size. Default is ``None`` which means that the algorithm
203
+ should choose.
204
+ max_step : float, optional
205
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
206
+ bounded and determined solely by the solver.
207
+ rtol, atol : float and array_like, optional
208
+ Relative and absolute tolerances. The solver keeps the local error
209
+ estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a
210
+ relative accuracy (number of correct digits), while `atol` controls
211
+ absolute accuracy (number of correct decimal places). To achieve the
212
+ desired `rtol`, set `atol` to be smaller than the smallest value that
213
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
214
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
215
+ number of correct digits is not guaranteed. Conversely, to achieve the
216
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
217
+ than `atol`. If components of y have different scales, it might be
218
+ beneficial to set different `atol` values for different components by
219
+ passing array_like with shape (n,) for `atol`. Default values are
220
+ 1e-3 for `rtol` and 1e-6 for `atol`.
221
+ jac : {None, array_like, sparse_matrix, callable}, optional
222
+ Jacobian matrix of the right-hand side of the system with respect to
223
+ y, required by this method. The Jacobian matrix has shape (n, n) and
224
+ its element (i, j) is equal to ``d f_i / d y_j``.
225
+ There are three ways to define the Jacobian:
226
+
227
+ * If array_like or sparse_matrix, the Jacobian is assumed to
228
+ be constant.
229
+ * If callable, the Jacobian is assumed to depend on both
230
+ t and y; it will be called as ``jac(t, y)`` as necessary.
231
+ For the 'Radau' and 'BDF' methods, the return value might be a
232
+ sparse matrix.
233
+ * If None (default), the Jacobian will be approximated by
234
+ finite differences.
235
+
236
+ It is generally recommended to provide the Jacobian rather than
237
+ relying on a finite-difference approximation.
238
+ jac_sparsity : {None, array_like, sparse matrix}, optional
239
+ Defines a sparsity structure of the Jacobian matrix for a
240
+ finite-difference approximation. Its shape must be (n, n). This argument
241
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
242
+ elements in *each* row, providing the sparsity structure will greatly
243
+ speed up the computations [2]_. A zero entry means that a corresponding
244
+ element in the Jacobian is always zero. If None (default), the Jacobian
245
+ is assumed to be dense.
246
+ vectorized : bool, optional
247
+ Whether `fun` can be called in a vectorized fashion. Default is False.
248
+
249
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
250
+ shape ``(n,)``, where ``n = len(y0)``.
251
+
252
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
253
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
254
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
255
+ the returned array is the time derivative of the state corresponding
256
+ with a column of ``y``).
257
+
258
+ Setting ``vectorized=True`` allows for faster finite difference
259
+ approximation of the Jacobian by this method, but may result in slower
260
+ execution overall in some circumstances (e.g. small ``len(y0)``).
261
+
262
+ Attributes
263
+ ----------
264
+ n : int
265
+ Number of equations.
266
+ status : string
267
+ Current status of the solver: 'running', 'finished' or 'failed'.
268
+ t_bound : float
269
+ Boundary time.
270
+ direction : float
271
+ Integration direction: +1 or -1.
272
+ t : float
273
+ Current time.
274
+ y : ndarray
275
+ Current state.
276
+ t_old : float
277
+ Previous time. None if no steps were made yet.
278
+ step_size : float
279
+ Size of the last successful step. None if no steps were made yet.
280
+ nfev : int
281
+ Number of evaluations of the right-hand side.
282
+ njev : int
283
+ Number of evaluations of the Jacobian.
284
+ nlu : int
285
+ Number of LU decompositions.
286
+
287
+ References
288
+ ----------
289
+ .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
290
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
291
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
292
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
293
+ and its Applications, 13, pp. 117-120, 1974.
294
+ """
295
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
296
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
297
+ vectorized=False, first_step=None, **extraneous):
298
+ warn_extraneous(extraneous)
299
+ super().__init__(fun, t0, y0, t_bound, vectorized)
300
+ self.y_old = None
301
+ self.max_step = validate_max_step(max_step)
302
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
303
+ self.f = self.fun(self.t, self.y)
304
+ # Select initial step assuming the same order which is used to control
305
+ # the error.
306
+ if first_step is None:
307
+ self.h_abs = select_initial_step(
308
+ self.fun, self.t, self.y, self.f, self.direction,
309
+ 3, self.rtol, self.atol)
310
+ else:
311
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
312
+ self.h_abs_old = None
313
+ self.error_norm_old = None
314
+
315
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
316
+ self.sol = None
317
+
318
+ self.jac_factor = None
319
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
320
+ if issparse(self.J):
321
+ def lu(A):
322
+ self.nlu += 1
323
+ return splu(A)
324
+
325
+ def solve_lu(LU, b):
326
+ return LU.solve(b)
327
+
328
+ I = eye(self.n, format='csc')
329
+ else:
330
+ def lu(A):
331
+ self.nlu += 1
332
+ return lu_factor(A, overwrite_a=True)
333
+
334
+ def solve_lu(LU, b):
335
+ return lu_solve(LU, b, overwrite_b=True)
336
+
337
+ I = np.identity(self.n)
338
+
339
+ self.lu = lu
340
+ self.solve_lu = solve_lu
341
+ self.I = I
342
+
343
+ self.current_jac = True
344
+ self.LU_real = None
345
+ self.LU_complex = None
346
+ self.Z = None
347
+
348
+ def _validate_jac(self, jac, sparsity):
349
+ t0 = self.t
350
+ y0 = self.y
351
+
352
+ if jac is None:
353
+ if sparsity is not None:
354
+ if issparse(sparsity):
355
+ sparsity = csc_matrix(sparsity)
356
+ groups = group_columns(sparsity)
357
+ sparsity = (sparsity, groups)
358
+
359
+ def jac_wrapped(t, y, f):
360
+ self.njev += 1
361
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
362
+ self.atol, self.jac_factor,
363
+ sparsity)
364
+ return J
365
+ J = jac_wrapped(t0, y0, self.f)
366
+ elif callable(jac):
367
+ J = jac(t0, y0)
368
+ self.njev = 1
369
+ if issparse(J):
370
+ J = csc_matrix(J)
371
+
372
+ def jac_wrapped(t, y, _=None):
373
+ self.njev += 1
374
+ return csc_matrix(jac(t, y), dtype=float)
375
+
376
+ else:
377
+ J = np.asarray(J, dtype=float)
378
+
379
+ def jac_wrapped(t, y, _=None):
380
+ self.njev += 1
381
+ return np.asarray(jac(t, y), dtype=float)
382
+
383
+ if J.shape != (self.n, self.n):
384
+ raise ValueError("`jac` is expected to have shape {}, but "
385
+ "actually has {}."
386
+ .format((self.n, self.n), J.shape))
387
+ else:
388
+ if issparse(jac):
389
+ J = csc_matrix(jac)
390
+ else:
391
+ J = np.asarray(jac, dtype=float)
392
+
393
+ if J.shape != (self.n, self.n):
394
+ raise ValueError("`jac` is expected to have shape {}, but "
395
+ "actually has {}."
396
+ .format((self.n, self.n), J.shape))
397
+ jac_wrapped = None
398
+
399
+ return jac_wrapped, J
400
+
401
+ def _step_impl(self):
402
+ t = self.t
403
+ y = self.y
404
+ f = self.f
405
+
406
+ max_step = self.max_step
407
+ atol = self.atol
408
+ rtol = self.rtol
409
+
410
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
411
+ if self.h_abs > max_step:
412
+ h_abs = max_step
413
+ h_abs_old = None
414
+ error_norm_old = None
415
+ elif self.h_abs < min_step:
416
+ h_abs = min_step
417
+ h_abs_old = None
418
+ error_norm_old = None
419
+ else:
420
+ h_abs = self.h_abs
421
+ h_abs_old = self.h_abs_old
422
+ error_norm_old = self.error_norm_old
423
+
424
+ J = self.J
425
+ LU_real = self.LU_real
426
+ LU_complex = self.LU_complex
427
+
428
+ current_jac = self.current_jac
429
+ jac = self.jac
430
+
431
+ rejected = False
432
+ step_accepted = False
433
+ message = None
434
+ while not step_accepted:
435
+ if h_abs < min_step:
436
+ return False, self.TOO_SMALL_STEP
437
+
438
+ h = h_abs * self.direction
439
+ t_new = t + h
440
+
441
+ if self.direction * (t_new - self.t_bound) > 0:
442
+ t_new = self.t_bound
443
+
444
+ h = t_new - t
445
+ h_abs = np.abs(h)
446
+
447
+ if self.sol is None:
448
+ Z0 = np.zeros((3, y.shape[0]))
449
+ else:
450
+ Z0 = self.sol(t + h * C).T - y
451
+
452
+ scale = atol + np.abs(y) * rtol
453
+
454
+ converged = False
455
+ while not converged:
456
+ if LU_real is None or LU_complex is None:
457
+ LU_real = self.lu(MU_REAL / h * self.I - J)
458
+ LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
459
+
460
+ converged, n_iter, Z, rate = solve_collocation_system(
461
+ self.fun, t, y, h, Z0, scale, self.newton_tol,
462
+ LU_real, LU_complex, self.solve_lu)
463
+
464
+ if not converged:
465
+ if current_jac:
466
+ break
467
+
468
+ J = self.jac(t, y, f)
469
+ current_jac = True
470
+ LU_real = None
471
+ LU_complex = None
472
+
473
+ if not converged:
474
+ h_abs *= 0.5
475
+ LU_real = None
476
+ LU_complex = None
477
+ continue
478
+
479
+ y_new = y + Z[-1]
480
+ ZE = Z.T.dot(E) / h
481
+ error = self.solve_lu(LU_real, f + ZE)
482
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
483
+ error_norm = norm(error / scale)
484
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
485
+ + n_iter)
486
+
487
+ if rejected and error_norm > 1:
488
+ error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
489
+ error_norm = norm(error / scale)
490
+
491
+ if error_norm > 1:
492
+ factor = predict_factor(h_abs, h_abs_old,
493
+ error_norm, error_norm_old)
494
+ h_abs *= max(MIN_FACTOR, safety * factor)
495
+
496
+ LU_real = None
497
+ LU_complex = None
498
+ rejected = True
499
+ else:
500
+ step_accepted = True
501
+
502
+ recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
503
+
504
+ factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
505
+ factor = min(MAX_FACTOR, safety * factor)
506
+
507
+ if not recompute_jac and factor < 1.2:
508
+ factor = 1
509
+ else:
510
+ LU_real = None
511
+ LU_complex = None
512
+
513
+ f_new = self.fun(t_new, y_new)
514
+ if recompute_jac:
515
+ J = jac(t_new, y_new, f_new)
516
+ current_jac = True
517
+ elif jac is not None:
518
+ current_jac = False
519
+
520
+ self.h_abs_old = self.h_abs
521
+ self.error_norm_old = error_norm
522
+
523
+ self.h_abs = h_abs * factor
524
+
525
+ self.y_old = y
526
+
527
+ self.t = t_new
528
+ self.y = y_new
529
+ self.f = f_new
530
+
531
+ self.Z = Z
532
+
533
+ self.LU_real = LU_real
534
+ self.LU_complex = LU_complex
535
+ self.current_jac = current_jac
536
+ self.J = J
537
+
538
+ self.t_old = t
539
+ self.sol = self._compute_dense_output()
540
+
541
+ return step_accepted, message
542
+
543
+ def _compute_dense_output(self):
544
+ Q = np.dot(self.Z.T, P)
545
+ return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
546
+
547
+ def _dense_output_impl(self):
548
+ return self.sol
549
+
550
+
551
+ class RadauDenseOutput(DenseOutput):
552
+ def __init__(self, t_old, t, y_old, Q):
553
+ super().__init__(t_old, t)
554
+ self.h = t - t_old
555
+ self.Q = Q
556
+ self.order = Q.shape[1] - 1
557
+ self.y_old = y_old
558
+
559
+ def _call_impl(self, t):
560
+ x = (t - self.t_old) / self.h
561
+ if t.ndim == 0:
562
+ p = np.tile(x, self.order + 1)
563
+ p = np.cumprod(p)
564
+ else:
565
+ p = np.tile(x, (self.order + 1, 1))
566
+ p = np.cumprod(p, axis=0)
567
+ # Here we don't multiply by h, not a mistake.
568
+ y = np.dot(self.Q, p)
569
+ if y.ndim == 2:
570
+ y += self.y_old[:, None]
571
+ else:
572
+ y += self.y_old
573
+
574
+ return y
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .base import OdeSolver, DenseOutput
3
+ from .common import (validate_max_step, validate_tol, select_initial_step,
4
+ norm, warn_extraneous, validate_first_step)
5
+ from . import dop853_coefficients
6
+
7
+ # Multiply steps computed from asymptotic behaviour of errors by this.
8
+ SAFETY = 0.9
9
+
10
+ MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
11
+ MAX_FACTOR = 10 # Maximum allowed increase in a step size.
12
+
13
+
14
+ def rk_step(fun, t, y, f, h, A, B, C, K):
15
+ """Perform a single Runge-Kutta step.
16
+
17
+ This function computes a prediction of an explicit Runge-Kutta method and
18
+ also estimates the error of a less accurate method.
19
+
20
+ Notation for Butcher tableau is as in [1]_.
21
+
22
+ Parameters
23
+ ----------
24
+ fun : callable
25
+ Right-hand side of the system.
26
+ t : float
27
+ Current time.
28
+ y : ndarray, shape (n,)
29
+ Current state.
30
+ f : ndarray, shape (n,)
31
+ Current value of the derivative, i.e., ``fun(x, y)``.
32
+ h : float
33
+ Step to use.
34
+ A : ndarray, shape (n_stages, n_stages)
35
+ Coefficients for combining previous RK stages to compute the next
36
+ stage. For explicit methods the coefficients at and above the main
37
+ diagonal are zeros.
38
+ B : ndarray, shape (n_stages,)
39
+ Coefficients for combining RK stages for computing the final
40
+ prediction.
41
+ C : ndarray, shape (n_stages,)
42
+ Coefficients for incrementing time for consecutive RK stages.
43
+ The value for the first stage is always zero.
44
+ K : ndarray, shape (n_stages + 1, n)
45
+ Storage array for putting RK stages here. Stages are stored in rows.
46
+ The last row is a linear combination of the previous rows with
47
+ coefficients
48
+
49
+ Returns
50
+ -------
51
+ y_new : ndarray, shape (n,)
52
+ Solution at t + h computed with a higher accuracy.
53
+ f_new : ndarray, shape (n,)
54
+ Derivative ``fun(t + h, y_new)``.
55
+
56
+ References
57
+ ----------
58
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
59
+ Equations I: Nonstiff Problems", Sec. II.4.
60
+ """
61
+ K[0] = f
62
+ for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
63
+ dy = np.dot(K[:s].T, a[:s]) * h
64
+ K[s] = fun(t + c * h, y + dy)
65
+
66
+ y_new = y + h * np.dot(K[:-1].T, B)
67
+ f_new = fun(t + h, y_new)
68
+
69
+ K[-1] = f_new
70
+
71
+ return y_new, f_new
72
+
73
+
74
+ class RungeKutta(OdeSolver):
75
+ """Base class for explicit Runge-Kutta methods."""
76
+ C: np.ndarray = NotImplemented
77
+ A: np.ndarray = NotImplemented
78
+ B: np.ndarray = NotImplemented
79
+ E: np.ndarray = NotImplemented
80
+ P: np.ndarray = NotImplemented
81
+ order: int = NotImplemented
82
+ error_estimator_order: int = NotImplemented
83
+ n_stages: int = NotImplemented
84
+
85
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
86
+ rtol=1e-3, atol=1e-6, vectorized=False,
87
+ first_step=None, **extraneous):
88
+ warn_extraneous(extraneous)
89
+ super().__init__(fun, t0, y0, t_bound, vectorized,
90
+ support_complex=True)
91
+ self.y_old = None
92
+ self.max_step = validate_max_step(max_step)
93
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
94
+ self.f = self.fun(self.t, self.y)
95
+ if first_step is None:
96
+ self.h_abs = select_initial_step(
97
+ self.fun, self.t, self.y, self.f, self.direction,
98
+ self.error_estimator_order, self.rtol, self.atol)
99
+ else:
100
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
101
+ self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
102
+ self.error_exponent = -1 / (self.error_estimator_order + 1)
103
+ self.h_previous = None
104
+
105
+ def _estimate_error(self, K, h):
106
+ return np.dot(K.T, self.E) * h
107
+
108
+ def _estimate_error_norm(self, K, h, scale):
109
+ return norm(self._estimate_error(K, h) / scale)
110
+
111
+ def _step_impl(self):
112
+ t = self.t
113
+ y = self.y
114
+
115
+ max_step = self.max_step
116
+ rtol = self.rtol
117
+ atol = self.atol
118
+
119
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
120
+
121
+ if self.h_abs > max_step:
122
+ h_abs = max_step
123
+ elif self.h_abs < min_step:
124
+ h_abs = min_step
125
+ else:
126
+ h_abs = self.h_abs
127
+
128
+ step_accepted = False
129
+ step_rejected = False
130
+
131
+ while not step_accepted:
132
+ if h_abs < min_step:
133
+ return False, self.TOO_SMALL_STEP
134
+
135
+ h = h_abs * self.direction
136
+ t_new = t + h
137
+
138
+ if self.direction * (t_new - self.t_bound) > 0:
139
+ t_new = self.t_bound
140
+
141
+ h = t_new - t
142
+ h_abs = np.abs(h)
143
+
144
+ y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
145
+ self.B, self.C, self.K)
146
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
147
+ error_norm = self._estimate_error_norm(self.K, h, scale)
148
+
149
+ if error_norm < 1:
150
+ if error_norm == 0:
151
+ factor = MAX_FACTOR
152
+ else:
153
+ factor = min(MAX_FACTOR,
154
+ SAFETY * error_norm ** self.error_exponent)
155
+
156
+ if step_rejected:
157
+ factor = min(1, factor)
158
+
159
+ h_abs *= factor
160
+
161
+ step_accepted = True
162
+ else:
163
+ h_abs *= max(MIN_FACTOR,
164
+ SAFETY * error_norm ** self.error_exponent)
165
+ step_rejected = True
166
+
167
+ self.h_previous = h
168
+ self.y_old = y
169
+
170
+ self.t = t_new
171
+ self.y = y_new
172
+
173
+ self.h_abs = h_abs
174
+ self.f = f_new
175
+
176
+ return True, None
177
+
178
+ def _dense_output_impl(self):
179
+ Q = self.K.T.dot(self.P)
180
+ return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
181
+
182
+
183
+ class RK23(RungeKutta):
184
+ """Explicit Runge-Kutta method of order 3(2).
185
+
186
+ This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
187
+ assuming accuracy of the second-order method, but steps are taken using the
188
+ third-order accurate formula (local extrapolation is done). A cubic Hermite
189
+ polynomial is used for the dense output.
190
+
191
+ Can be applied in the complex domain.
192
+
193
+ Parameters
194
+ ----------
195
+ fun : callable
196
+ Right-hand side of the system: the time derivative of the state ``y``
197
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
198
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
199
+ return an array of the same shape as ``y``. See `vectorized` for more
200
+ information.
201
+ t0 : float
202
+ Initial time.
203
+ y0 : array_like, shape (n,)
204
+ Initial state.
205
+ t_bound : float
206
+ Boundary time - the integration won't continue beyond it. It also
207
+ determines the direction of the integration.
208
+ first_step : float or None, optional
209
+ Initial step size. Default is ``None`` which means that the algorithm
210
+ should choose.
211
+ max_step : float, optional
212
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
213
+ bounded and determined solely by the solver.
214
+ rtol, atol : float and array_like, optional
215
+ Relative and absolute tolerances. The solver keeps the local error
216
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
217
+ relative accuracy (number of correct digits), while `atol` controls
218
+ absolute accuracy (number of correct decimal places). To achieve the
219
+ desired `rtol`, set `atol` to be smaller than the smallest value that
220
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
221
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
222
+ number of correct digits is not guaranteed. Conversely, to achieve the
223
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
224
+ than `atol`. If components of y have different scales, it might be
225
+ beneficial to set different `atol` values for different components by
226
+ passing array_like with shape (n,) for `atol`. Default values are
227
+ 1e-3 for `rtol` and 1e-6 for `atol`.
228
+ vectorized : bool, optional
229
+ Whether `fun` may be called in a vectorized fashion. False (default)
230
+ is recommended for this solver.
231
+
232
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
233
+ shape ``(n,)``, where ``n = len(y0)``.
234
+
235
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
236
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
237
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
238
+ the returned array is the time derivative of the state corresponding
239
+ with a column of ``y``).
240
+
241
+ Setting ``vectorized=True`` allows for faster finite difference
242
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
243
+ will result in slower execution for this solver.
244
+
245
+ Attributes
246
+ ----------
247
+ n : int
248
+ Number of equations.
249
+ status : string
250
+ Current status of the solver: 'running', 'finished' or 'failed'.
251
+ t_bound : float
252
+ Boundary time.
253
+ direction : float
254
+ Integration direction: +1 or -1.
255
+ t : float
256
+ Current time.
257
+ y : ndarray
258
+ Current state.
259
+ t_old : float
260
+ Previous time. None if no steps were made yet.
261
+ step_size : float
262
+ Size of the last successful step. None if no steps were made yet.
263
+ nfev : int
264
+ Number evaluations of the system's right-hand side.
265
+ njev : int
266
+ Number of evaluations of the Jacobian.
267
+ Is always 0 for this solver as it does not use the Jacobian.
268
+ nlu : int
269
+ Number of LU decompositions. Is always 0 for this solver.
270
+
271
+ References
272
+ ----------
273
+ .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
274
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
275
+ """
276
+ order = 3
277
+ error_estimator_order = 2
278
+ n_stages = 3
279
+ C = np.array([0, 1/2, 3/4])
280
+ A = np.array([
281
+ [0, 0, 0],
282
+ [1/2, 0, 0],
283
+ [0, 3/4, 0]
284
+ ])
285
+ B = np.array([2/9, 1/3, 4/9])
286
+ E = np.array([5/72, -1/12, -1/9, 1/8])
287
+ P = np.array([[1, -4 / 3, 5 / 9],
288
+ [0, 1, -2/3],
289
+ [0, 4/3, -8/9],
290
+ [0, -1, 1]])
291
+
292
+
293
+ class RK45(RungeKutta):
294
+ """Explicit Runge-Kutta method of order 5(4).
295
+
296
+ This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
297
+ assuming accuracy of the fourth-order method accuracy, but steps are taken
298
+ using the fifth-order accurate formula (local extrapolation is done).
299
+ A quartic interpolation polynomial is used for the dense output [2]_.
300
+
301
+ Can be applied in the complex domain.
302
+
303
+ Parameters
304
+ ----------
305
+ fun : callable
306
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
307
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
308
+ It can either have shape (n,); then ``fun`` must return array_like with
309
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
310
+ must return an array_like with shape (n, k), i.e., each column
311
+ corresponds to a single column in ``y``. The choice between the two
312
+ options is determined by `vectorized` argument (see below).
313
+ t0 : float
314
+ Initial time.
315
+ y0 : array_like, shape (n,)
316
+ Initial state.
317
+ t_bound : float
318
+ Boundary time - the integration won't continue beyond it. It also
319
+ determines the direction of the integration.
320
+ first_step : float or None, optional
321
+ Initial step size. Default is ``None`` which means that the algorithm
322
+ should choose.
323
+ max_step : float, optional
324
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
325
+ bounded and determined solely by the solver.
326
+ rtol, atol : float and array_like, optional
327
+ Relative and absolute tolerances. The solver keeps the local error
328
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
329
+ relative accuracy (number of correct digits), while `atol` controls
330
+ absolute accuracy (number of correct decimal places). To achieve the
331
+ desired `rtol`, set `atol` to be smaller than the smallest value that
332
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
333
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
334
+ number of correct digits is not guaranteed. Conversely, to achieve the
335
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
336
+ than `atol`. If components of y have different scales, it might be
337
+ beneficial to set different `atol` values for different components by
338
+ passing array_like with shape (n,) for `atol`. Default values are
339
+ 1e-3 for `rtol` and 1e-6 for `atol`.
340
+ vectorized : bool, optional
341
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
342
+
343
+ Attributes
344
+ ----------
345
+ n : int
346
+ Number of equations.
347
+ status : string
348
+ Current status of the solver: 'running', 'finished' or 'failed'.
349
+ t_bound : float
350
+ Boundary time.
351
+ direction : float
352
+ Integration direction: +1 or -1.
353
+ t : float
354
+ Current time.
355
+ y : ndarray
356
+ Current state.
357
+ t_old : float
358
+ Previous time. None if no steps were made yet.
359
+ step_size : float
360
+ Size of the last successful step. None if no steps were made yet.
361
+ nfev : int
362
+ Number evaluations of the system's right-hand side.
363
+ njev : int
364
+ Number of evaluations of the Jacobian.
365
+ Is always 0 for this solver as it does not use the Jacobian.
366
+ nlu : int
367
+ Number of LU decompositions. Is always 0 for this solver.
368
+
369
+ References
370
+ ----------
371
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
372
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
373
+ No. 1, pp. 19-26, 1980.
374
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
375
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
376
+ """
377
+ order = 5
378
+ error_estimator_order = 4
379
+ n_stages = 6
380
+ C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
381
+ A = np.array([
382
+ [0, 0, 0, 0, 0],
383
+ [1/5, 0, 0, 0, 0],
384
+ [3/40, 9/40, 0, 0, 0],
385
+ [44/45, -56/15, 32/9, 0, 0],
386
+ [19372/6561, -25360/2187, 64448/6561, -212/729, 0],
387
+ [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
388
+ ])
389
+ B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
390
+ E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
391
+ 1/40])
392
+ # Corresponds to the optimum value of c_6 from [2]_.
393
+ P = np.array([
394
+ [1, -8048581381/2820520608, 8663915743/2820520608,
395
+ -12715105075/11282082432],
396
+ [0, 0, 0, 0],
397
+ [0, 131558114200/32700410799, -68118460800/10900136933,
398
+ 87487479700/32700410799],
399
+ [0, -1754552775/470086768, 14199869525/1410260304,
400
+ -10690763975/1880347072],
401
+ [0, 127303824393/49829197408, -318862633887/49829197408,
402
+ 701980252875 / 199316789632],
403
+ [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
404
+ [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
405
+
406
+
407
+ class DOP853(RungeKutta):
408
+ """Explicit Runge-Kutta method of order 8.
409
+
410
+ This is a Python implementation of "DOP853" algorithm originally written
411
+ in Fortran [1]_, [2]_. Note that this is not a literal translation, but
412
+ the algorithmic core and coefficients are the same.
413
+
414
+ Can be applied in the complex domain.
415
+
416
+ Parameters
417
+ ----------
418
+ fun : callable
419
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
420
+ Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
421
+ It can either have shape (n,); then ``fun`` must return array_like with
422
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
423
+ must return an array_like with shape (n, k), i.e. each column
424
+ corresponds to a single column in ``y``. The choice between the two
425
+ options is determined by `vectorized` argument (see below).
426
+ t0 : float
427
+ Initial time.
428
+ y0 : array_like, shape (n,)
429
+ Initial state.
430
+ t_bound : float
431
+ Boundary time - the integration won't continue beyond it. It also
432
+ determines the direction of the integration.
433
+ first_step : float or None, optional
434
+ Initial step size. Default is ``None`` which means that the algorithm
435
+ should choose.
436
+ max_step : float, optional
437
+ Maximum allowed step size. Default is np.inf, i.e. the step size is not
438
+ bounded and determined solely by the solver.
439
+ rtol, atol : float and array_like, optional
440
+ Relative and absolute tolerances. The solver keeps the local error
441
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
442
+ relative accuracy (number of correct digits), while `atol` controls
443
+ absolute accuracy (number of correct decimal places). To achieve the
444
+ desired `rtol`, set `atol` to be smaller than the smallest value that
445
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
446
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
447
+ number of correct digits is not guaranteed. Conversely, to achieve the
448
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
449
+ than `atol`. If components of y have different scales, it might be
450
+ beneficial to set different `atol` values for different components by
451
+ passing array_like with shape (n,) for `atol`. Default values are
452
+ 1e-3 for `rtol` and 1e-6 for `atol`.
453
+ vectorized : bool, optional
454
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
455
+
456
+ Attributes
457
+ ----------
458
+ n : int
459
+ Number of equations.
460
+ status : string
461
+ Current status of the solver: 'running', 'finished' or 'failed'.
462
+ t_bound : float
463
+ Boundary time.
464
+ direction : float
465
+ Integration direction: +1 or -1.
466
+ t : float
467
+ Current time.
468
+ y : ndarray
469
+ Current state.
470
+ t_old : float
471
+ Previous time. None if no steps were made yet.
472
+ step_size : float
473
+ Size of the last successful step. None if no steps were made yet.
474
+ nfev : int
475
+ Number evaluations of the system's right-hand side.
476
+ njev : int
477
+ Number of evaluations of the Jacobian. Is always 0 for this solver
478
+ as it does not use the Jacobian.
479
+ nlu : int
480
+ Number of LU decompositions. Is always 0 for this solver.
481
+
482
+ References
483
+ ----------
484
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
485
+ Equations I: Nonstiff Problems", Sec. II.
486
+ .. [2] `Page with original Fortran code of DOP853
487
+ <http://www.unige.ch/~hairer/software.html>`_.
488
+ """
489
+ n_stages = dop853_coefficients.N_STAGES
490
+ order = 8
491
+ error_estimator_order = 7
492
+ A = dop853_coefficients.A[:n_stages, :n_stages]
493
+ B = dop853_coefficients.B
494
+ C = dop853_coefficients.C[:n_stages]
495
+ E3 = dop853_coefficients.E3
496
+ E5 = dop853_coefficients.E5
497
+ D = dop853_coefficients.D
498
+
499
+ A_EXTRA = dop853_coefficients.A[n_stages + 1:]
500
+ C_EXTRA = dop853_coefficients.C[n_stages + 1:]
501
+
502
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
503
+ rtol=1e-3, atol=1e-6, vectorized=False,
504
+ first_step=None, **extraneous):
505
+ super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol,
506
+ vectorized, first_step, **extraneous)
507
+ self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
508
+ self.n), dtype=self.y.dtype)
509
+ self.K = self.K_extended[:self.n_stages + 1]
510
+
511
+ def _estimate_error(self, K, h): # Left for testing purposes.
512
+ err5 = np.dot(K.T, self.E5)
513
+ err3 = np.dot(K.T, self.E3)
514
+ denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
515
+ correction_factor = np.ones_like(err5)
516
+ mask = denom > 0
517
+ correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
518
+ return h * err5 * correction_factor
519
+
520
+ def _estimate_error_norm(self, K, h, scale):
521
+ err5 = np.dot(K.T, self.E5) / scale
522
+ err3 = np.dot(K.T, self.E3) / scale
523
+ err5_norm_2 = np.linalg.norm(err5)**2
524
+ err3_norm_2 = np.linalg.norm(err3)**2
525
+ if err5_norm_2 == 0 and err3_norm_2 == 0:
526
+ return 0.0
527
+ denom = err5_norm_2 + 0.01 * err3_norm_2
528
+ return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
529
+
530
+ def _dense_output_impl(self):
531
+ K = self.K_extended
532
+ h = self.h_previous
533
+ for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
534
+ start=self.n_stages + 1):
535
+ dy = np.dot(K[:s].T, a[:s]) * h
536
+ K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
537
+
538
+ F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
539
+ dtype=self.y_old.dtype)
540
+
541
+ f_old = K[0]
542
+ delta_y = self.y - self.y_old
543
+
544
+ F[0] = delta_y
545
+ F[1] = h * f_old - delta_y
546
+ F[2] = 2 * delta_y - h * (self.f + f_old)
547
+ F[3:] = h * np.dot(self.D, K)
548
+
549
+ return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
550
+
551
+
552
+ class RkDenseOutput(DenseOutput):
553
+ def __init__(self, t_old, t, y_old, Q):
554
+ super().__init__(t_old, t)
555
+ self.h = t - t_old
556
+ self.Q = Q
557
+ self.order = Q.shape[1] - 1
558
+ self.y_old = y_old
559
+
560
+ def _call_impl(self, t):
561
+ x = (t - self.t_old) / self.h
562
+ if t.ndim == 0:
563
+ p = np.tile(x, self.order + 1)
564
+ p = np.cumprod(p)
565
+ else:
566
+ p = np.tile(x, (self.order + 1, 1))
567
+ p = np.cumprod(p, axis=0)
568
+ y = self.h * np.dot(self.Q, p)
569
+ if y.ndim == 2:
570
+ y += self.y_old[:, None]
571
+ else:
572
+ y += self.y_old
573
+
574
+ return y
575
+
576
+
577
+ class Dop853DenseOutput(DenseOutput):
578
+ def __init__(self, t_old, t, y_old, F):
579
+ super().__init__(t_old, t)
580
+ self.h = t - t_old
581
+ self.F = F
582
+ self.y_old = y_old
583
+
584
+ def _call_impl(self, t):
585
+ x = (t - self.t_old) / self.h
586
+
587
+ if t.ndim == 0:
588
+ y = np.zeros_like(self.y_old)
589
+ else:
590
+ x = x[:, None]
591
+ y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
592
+
593
+ for i, f in enumerate(reversed(self.F)):
594
+ y += f
595
+ if i % 2 == 0:
596
+ y *= x
597
+ else:
598
+ y *= 1 - x
599
+ y += self.y_old
600
+
601
+ return y.T
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py ADDED
@@ -0,0 +1,1135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+ from numpy.testing import (assert_, assert_allclose, assert_array_less,
3
+ assert_equal, assert_no_warnings, suppress_warnings)
4
+ import pytest
5
+ from pytest import raises as assert_raises
6
+ import numpy as np
7
+ from scipy.optimize._numdiff import group_columns
8
+ from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA
9
+ from scipy.integrate import OdeSolution
10
+ from scipy.integrate._ivp.common import num_jac
11
+ from scipy.integrate._ivp.base import ConstantDenseOutput
12
+ from scipy.sparse import coo_matrix, csc_matrix
13
+
14
+
15
+ def fun_zero(t, y):
16
+ return np.zeros_like(y)
17
+
18
+
19
+ def fun_linear(t, y):
20
+ return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
21
+
22
+
23
+ def jac_linear():
24
+ return np.array([[-1, -5], [1, 1]])
25
+
26
+
27
+ def sol_linear(t):
28
+ return np.vstack((-5 * np.sin(2 * t),
29
+ 2 * np.cos(2 * t) + np.sin(2 * t)))
30
+
31
+
32
+ def fun_rational(t, y):
33
+ return np.array([y[1] / t,
34
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
35
+
36
+
37
+ def fun_rational_vectorized(t, y):
38
+ return np.vstack((y[1] / t,
39
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
40
+
41
+
42
+ def jac_rational(t, y):
43
+ return np.array([
44
+ [0, 1 / t],
45
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
46
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
47
+ ])
48
+
49
+
50
+ def jac_rational_sparse(t, y):
51
+ return csc_matrix([
52
+ [0, 1 / t],
53
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
54
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
55
+ ])
56
+
57
+
58
+ def sol_rational(t):
59
+ return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
60
+
61
+
62
+ def fun_medazko(t, y):
63
+ n = y.shape[0] // 2
64
+ k = 100
65
+ c = 4
66
+
67
+ phi = 2 if t <= 5 else 0
68
+ y = np.hstack((phi, 0, y, y[-2]))
69
+
70
+ d = 1 / n
71
+ j = np.arange(n) + 1
72
+ alpha = 2 * (j * d - 1) ** 3 / c ** 2
73
+ beta = (j * d - 1) ** 4 / c ** 2
74
+
75
+ j_2_p1 = 2 * j + 2
76
+ j_2_m3 = 2 * j - 2
77
+ j_2_m1 = 2 * j
78
+ j_2 = 2 * j + 1
79
+
80
+ f = np.empty(2 * n)
81
+ f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
82
+ beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
83
+ k * y[j_2_m1] * y[j_2])
84
+ f[1::2] = -k * y[j_2] * y[j_2_m1]
85
+
86
+ return f
87
+
88
+
89
+ def medazko_sparsity(n):
90
+ cols = []
91
+ rows = []
92
+
93
+ i = np.arange(n) * 2
94
+
95
+ cols.append(i[1:])
96
+ rows.append(i[1:] - 2)
97
+
98
+ cols.append(i)
99
+ rows.append(i)
100
+
101
+ cols.append(i)
102
+ rows.append(i + 1)
103
+
104
+ cols.append(i[:-1])
105
+ rows.append(i[:-1] + 2)
106
+
107
+ i = np.arange(n) * 2 + 1
108
+
109
+ cols.append(i)
110
+ rows.append(i)
111
+
112
+ cols.append(i)
113
+ rows.append(i - 1)
114
+
115
+ cols = np.hstack(cols)
116
+ rows = np.hstack(rows)
117
+
118
+ return coo_matrix((np.ones_like(cols), (cols, rows)))
119
+
120
+
121
+ def fun_complex(t, y):
122
+ return -y
123
+
124
+
125
+ def jac_complex(t, y):
126
+ return -np.eye(y.shape[0])
127
+
128
+
129
+ def jac_complex_sparse(t, y):
130
+ return csc_matrix(jac_complex(t, y))
131
+
132
+
133
+ def sol_complex(t):
134
+ y = (0.5 + 1j) * np.exp(-t)
135
+ return y.reshape((1, -1))
136
+
137
+
138
+ def fun_event_dense_output_LSODA(t, y):
139
+ return y * (t - 2)
140
+
141
+
142
+ def jac_event_dense_output_LSODA(t, y):
143
+ return t - 2
144
+
145
+
146
+ def sol_event_dense_output_LSODA(t):
147
+ return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6)
148
+
149
+
150
+ def compute_error(y, y_true, rtol, atol):
151
+ e = (y - y_true) / (atol + rtol * np.abs(y_true))
152
+ return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])
153
+
154
+
155
+ def test_integration():
156
+ rtol = 1e-3
157
+ atol = 1e-6
158
+ y0 = [1/3, 2/9]
159
+
160
+ for vectorized, method, t_span, jac in product(
161
+ [False, True],
162
+ ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
163
+ [[5, 9], [5, 1]],
164
+ [None, jac_rational, jac_rational_sparse]):
165
+
166
+ if vectorized:
167
+ fun = fun_rational_vectorized
168
+ else:
169
+ fun = fun_rational
170
+
171
+ with suppress_warnings() as sup:
172
+ sup.filter(UserWarning,
173
+ "The following arguments have no effect for a chosen "
174
+ "solver: `jac`")
175
+ res = solve_ivp(fun, t_span, y0, rtol=rtol,
176
+ atol=atol, method=method, dense_output=True,
177
+ jac=jac, vectorized=vectorized)
178
+ assert_equal(res.t[0], t_span[0])
179
+ assert_(res.t_events is None)
180
+ assert_(res.y_events is None)
181
+ assert_(res.success)
182
+ assert_equal(res.status, 0)
183
+
184
+ if method == 'DOP853':
185
+ # DOP853 spends more functions evaluation because it doesn't
186
+ # have enough time to develop big enough step size.
187
+ assert_(res.nfev < 50)
188
+ else:
189
+ assert_(res.nfev < 40)
190
+
191
+ if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
192
+ assert_equal(res.njev, 0)
193
+ assert_equal(res.nlu, 0)
194
+ else:
195
+ assert_(0 < res.njev < 3)
196
+ assert_(0 < res.nlu < 10)
197
+
198
+ y_true = sol_rational(res.t)
199
+ e = compute_error(res.y, y_true, rtol, atol)
200
+ assert_(np.all(e < 5))
201
+
202
+ tc = np.linspace(*t_span)
203
+ yc_true = sol_rational(tc)
204
+ yc = res.sol(tc)
205
+
206
+ e = compute_error(yc, yc_true, rtol, atol)
207
+ assert_(np.all(e < 5))
208
+
209
+ tc = (t_span[0] + t_span[-1]) / 2
210
+ yc_true = sol_rational(tc)
211
+ yc = res.sol(tc)
212
+
213
+ e = compute_error(yc, yc_true, rtol, atol)
214
+ assert_(np.all(e < 5))
215
+
216
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
217
+
218
+
219
+ def test_integration_complex():
220
+ rtol = 1e-3
221
+ atol = 1e-6
222
+ y0 = [0.5 + 1j]
223
+ t_span = [0, 1]
224
+ tc = np.linspace(t_span[0], t_span[1])
225
+ for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],
226
+ [None, jac_complex, jac_complex_sparse]):
227
+ with suppress_warnings() as sup:
228
+ sup.filter(UserWarning,
229
+ "The following arguments have no effect for a chosen "
230
+ "solver: `jac`")
231
+ res = solve_ivp(fun_complex, t_span, y0, method=method,
232
+ dense_output=True, rtol=rtol, atol=atol, jac=jac)
233
+
234
+ assert_equal(res.t[0], t_span[0])
235
+ assert_(res.t_events is None)
236
+ assert_(res.y_events is None)
237
+ assert_(res.success)
238
+ assert_equal(res.status, 0)
239
+
240
+ if method == 'DOP853':
241
+ assert res.nfev < 35
242
+ else:
243
+ assert res.nfev < 25
244
+
245
+ if method == 'BDF':
246
+ assert_equal(res.njev, 1)
247
+ assert res.nlu < 6
248
+ else:
249
+ assert res.njev == 0
250
+ assert res.nlu == 0
251
+
252
+ y_true = sol_complex(res.t)
253
+ e = compute_error(res.y, y_true, rtol, atol)
254
+ assert np.all(e < 5)
255
+
256
+ yc_true = sol_complex(tc)
257
+ yc = res.sol(tc)
258
+ e = compute_error(yc, yc_true, rtol, atol)
259
+
260
+ assert np.all(e < 5)
261
+
262
+
263
+ def test_integration_sparse_difference():
264
+ n = 200
265
+ t_span = [0, 20]
266
+ y0 = np.zeros(2 * n)
267
+ y0[1::2] = 1
268
+ sparsity = medazko_sparsity(n)
269
+
270
+ for method in ['BDF', 'Radau']:
271
+ res = solve_ivp(fun_medazko, t_span, y0, method=method,
272
+ jac_sparsity=sparsity)
273
+
274
+ assert_equal(res.t[0], t_span[0])
275
+ assert_(res.t_events is None)
276
+ assert_(res.y_events is None)
277
+ assert_(res.success)
278
+ assert_equal(res.status, 0)
279
+
280
+ assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
281
+ assert_allclose(res.y[79, -1], 0, atol=1e-3)
282
+ assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
283
+ assert_allclose(res.y[149, -1], 0, atol=1e-3)
284
+ assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
285
+ assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
286
+ assert_allclose(res.y[238, -1], 0, atol=1e-3)
287
+ assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
288
+
289
+
290
+ def test_integration_const_jac():
291
+ rtol = 1e-3
292
+ atol = 1e-6
293
+ y0 = [0, 2]
294
+ t_span = [0, 2]
295
+ J = jac_linear()
296
+ J_sparse = csc_matrix(J)
297
+
298
+ for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
299
+ res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
300
+ method=method, dense_output=True, jac=jac)
301
+ assert_equal(res.t[0], t_span[0])
302
+ assert_(res.t_events is None)
303
+ assert_(res.y_events is None)
304
+ assert_(res.success)
305
+ assert_equal(res.status, 0)
306
+
307
+ assert_(res.nfev < 100)
308
+ assert_equal(res.njev, 0)
309
+ assert_(0 < res.nlu < 15)
310
+
311
+ y_true = sol_linear(res.t)
312
+ e = compute_error(res.y, y_true, rtol, atol)
313
+ assert_(np.all(e < 10))
314
+
315
+ tc = np.linspace(*t_span)
316
+ yc_true = sol_linear(tc)
317
+ yc = res.sol(tc)
318
+
319
+ e = compute_error(yc, yc_true, rtol, atol)
320
+ assert_(np.all(e < 15))
321
+
322
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
323
+
324
+
325
+ @pytest.mark.slow
326
+ @pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA'])
327
+ def test_integration_stiff(method):
328
+ rtol = 1e-6
329
+ atol = 1e-6
330
+ y0 = [1e4, 0, 0]
331
+ tspan = [0, 1e8]
332
+
333
+ def fun_robertson(t, state):
334
+ x, y, z = state
335
+ return [
336
+ -0.04 * x + 1e4 * y * z,
337
+ 0.04 * x - 1e4 * y * z - 3e7 * y * y,
338
+ 3e7 * y * y,
339
+ ]
340
+
341
+ res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,
342
+ atol=atol, method=method)
343
+
344
+ # If the stiff mode is not activated correctly, these numbers will be much bigger
345
+ assert res.nfev < 5000
346
+ assert res.njev < 200
347
+
348
+
349
+ def test_events():
350
+ def event_rational_1(t, y):
351
+ return y[0] - y[1] ** 0.7
352
+
353
+ def event_rational_2(t, y):
354
+ return y[1] ** 0.6 - y[0]
355
+
356
+ def event_rational_3(t, y):
357
+ return t - 7.4
358
+
359
+ event_rational_3.terminal = True
360
+
361
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
362
+ res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
363
+ events=(event_rational_1, event_rational_2))
364
+ assert_equal(res.status, 0)
365
+ assert_equal(res.t_events[0].size, 1)
366
+ assert_equal(res.t_events[1].size, 1)
367
+ assert_(5.3 < res.t_events[0][0] < 5.7)
368
+ assert_(7.3 < res.t_events[1][0] < 7.7)
369
+
370
+ assert_equal(res.y_events[0].shape, (1, 2))
371
+ assert_equal(res.y_events[1].shape, (1, 2))
372
+ assert np.isclose(
373
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
374
+ assert np.isclose(
375
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
376
+
377
+ event_rational_1.direction = 1
378
+ event_rational_2.direction = 1
379
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
380
+ events=(event_rational_1, event_rational_2))
381
+ assert_equal(res.status, 0)
382
+ assert_equal(res.t_events[0].size, 1)
383
+ assert_equal(res.t_events[1].size, 0)
384
+ assert_(5.3 < res.t_events[0][0] < 5.7)
385
+ assert_equal(res.y_events[0].shape, (1, 2))
386
+ assert_equal(res.y_events[1].shape, (0,))
387
+ assert np.isclose(
388
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
389
+
390
+ event_rational_1.direction = -1
391
+ event_rational_2.direction = -1
392
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
393
+ events=(event_rational_1, event_rational_2))
394
+ assert_equal(res.status, 0)
395
+ assert_equal(res.t_events[0].size, 0)
396
+ assert_equal(res.t_events[1].size, 1)
397
+ assert_(7.3 < res.t_events[1][0] < 7.7)
398
+ assert_equal(res.y_events[0].shape, (0,))
399
+ assert_equal(res.y_events[1].shape, (1, 2))
400
+ assert np.isclose(
401
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
402
+
403
+ event_rational_1.direction = 0
404
+ event_rational_2.direction = 0
405
+
406
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
407
+ events=(event_rational_1, event_rational_2,
408
+ event_rational_3), dense_output=True)
409
+ assert_equal(res.status, 1)
410
+ assert_equal(res.t_events[0].size, 1)
411
+ assert_equal(res.t_events[1].size, 0)
412
+ assert_equal(res.t_events[2].size, 1)
413
+ assert_(5.3 < res.t_events[0][0] < 5.7)
414
+ assert_(7.3 < res.t_events[2][0] < 7.5)
415
+ assert_equal(res.y_events[0].shape, (1, 2))
416
+ assert_equal(res.y_events[1].shape, (0,))
417
+ assert_equal(res.y_events[2].shape, (1, 2))
418
+ assert np.isclose(
419
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
420
+ assert np.isclose(
421
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
422
+
423
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
424
+ events=event_rational_1, dense_output=True)
425
+ assert_equal(res.status, 0)
426
+ assert_equal(res.t_events[0].size, 1)
427
+ assert_(5.3 < res.t_events[0][0] < 5.7)
428
+
429
+ assert_equal(res.y_events[0].shape, (1, 2))
430
+ assert np.isclose(
431
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
432
+
433
+ # Also test that termination by event doesn't break interpolants.
434
+ tc = np.linspace(res.t[0], res.t[-1])
435
+ yc_true = sol_rational(tc)
436
+ yc = res.sol(tc)
437
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
438
+ assert_(np.all(e < 5))
439
+
440
+ # Test that the y_event matches solution
441
+ assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0],
442
+ rtol=1e-3, atol=1e-6)
443
+
444
+ # Test in backward direction.
445
+ event_rational_1.direction = 0
446
+ event_rational_2.direction = 0
447
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
448
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
449
+ events=(event_rational_1, event_rational_2))
450
+ assert_equal(res.status, 0)
451
+ assert_equal(res.t_events[0].size, 1)
452
+ assert_equal(res.t_events[1].size, 1)
453
+ assert_(5.3 < res.t_events[0][0] < 5.7)
454
+ assert_(7.3 < res.t_events[1][0] < 7.7)
455
+
456
+ assert_equal(res.y_events[0].shape, (1, 2))
457
+ assert_equal(res.y_events[1].shape, (1, 2))
458
+ assert np.isclose(
459
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
460
+ assert np.isclose(
461
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
462
+
463
+ event_rational_1.direction = -1
464
+ event_rational_2.direction = -1
465
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
466
+ events=(event_rational_1, event_rational_2))
467
+ assert_equal(res.status, 0)
468
+ assert_equal(res.t_events[0].size, 1)
469
+ assert_equal(res.t_events[1].size, 0)
470
+ assert_(5.3 < res.t_events[0][0] < 5.7)
471
+
472
+ assert_equal(res.y_events[0].shape, (1, 2))
473
+ assert_equal(res.y_events[1].shape, (0,))
474
+ assert np.isclose(
475
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
476
+
477
+ event_rational_1.direction = 1
478
+ event_rational_2.direction = 1
479
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
480
+ events=(event_rational_1, event_rational_2))
481
+ assert_equal(res.status, 0)
482
+ assert_equal(res.t_events[0].size, 0)
483
+ assert_equal(res.t_events[1].size, 1)
484
+ assert_(7.3 < res.t_events[1][0] < 7.7)
485
+
486
+ assert_equal(res.y_events[0].shape, (0,))
487
+ assert_equal(res.y_events[1].shape, (1, 2))
488
+ assert np.isclose(
489
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
490
+
491
+ event_rational_1.direction = 0
492
+ event_rational_2.direction = 0
493
+
494
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
495
+ events=(event_rational_1, event_rational_2,
496
+ event_rational_3), dense_output=True)
497
+ assert_equal(res.status, 1)
498
+ assert_equal(res.t_events[0].size, 0)
499
+ assert_equal(res.t_events[1].size, 1)
500
+ assert_equal(res.t_events[2].size, 1)
501
+ assert_(7.3 < res.t_events[1][0] < 7.7)
502
+ assert_(7.3 < res.t_events[2][0] < 7.5)
503
+
504
+ assert_equal(res.y_events[0].shape, (0,))
505
+ assert_equal(res.y_events[1].shape, (1, 2))
506
+ assert_equal(res.y_events[2].shape, (1, 2))
507
+ assert np.isclose(
508
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
509
+ assert np.isclose(
510
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
511
+
512
+ # Also test that termination by event doesn't break interpolants.
513
+ tc = np.linspace(res.t[-1], res.t[0])
514
+ yc_true = sol_rational(tc)
515
+ yc = res.sol(tc)
516
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
517
+ assert_(np.all(e < 5))
518
+
519
+ assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0],
520
+ rtol=1e-3, atol=1e-6)
521
+ assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0],
522
+ rtol=1e-3, atol=1e-6)
523
+
524
+
525
+ def _get_harmonic_oscillator():
526
+ def f(t, y):
527
+ return [y[1], -y[0]]
528
+
529
+ def event(t, y):
530
+ return y[0]
531
+
532
+ return f, event
533
+
534
+
535
+ @pytest.mark.parametrize('n_events', [3, 4])
536
+ def test_event_terminal_integer(n_events):
537
+ f, event = _get_harmonic_oscillator()
538
+ event.terminal = n_events
539
+ res = solve_ivp(f, (0, 100), [1, 0], events=event)
540
+ assert len(res.t_events[0]) == n_events
541
+ assert len(res.y_events[0]) == n_events
542
+ assert_allclose(res.y_events[0][:, 0], 0, atol=1e-14)
543
+
544
+
545
+ def test_event_terminal_iv():
546
+ f, event = _get_harmonic_oscillator()
547
+ args = (f, (0, 100), [1, 0])
548
+
549
+ event.terminal = None
550
+ res = solve_ivp(*args, events=event)
551
+ event.terminal = 0
552
+ ref = solve_ivp(*args, events=event)
553
+ assert_allclose(res.t_events, ref.t_events)
554
+
555
+ message = "The `terminal` attribute..."
556
+ event.terminal = -1
557
+ with pytest.raises(ValueError, match=message):
558
+ solve_ivp(*args, events=event)
559
+ event.terminal = 3.5
560
+ with pytest.raises(ValueError, match=message):
561
+ solve_ivp(*args, events=event)
562
+
563
+
564
+ def test_max_step():
565
+ rtol = 1e-3
566
+ atol = 1e-6
567
+ y0 = [1/3, 2/9]
568
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
569
+ for t_span in ([5, 9], [5, 1]):
570
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
571
+ max_step=0.5, atol=atol, method=method,
572
+ dense_output=True)
573
+ assert_equal(res.t[0], t_span[0])
574
+ assert_equal(res.t[-1], t_span[-1])
575
+ assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))
576
+ assert_(res.t_events is None)
577
+ assert_(res.success)
578
+ assert_equal(res.status, 0)
579
+
580
+ y_true = sol_rational(res.t)
581
+ e = compute_error(res.y, y_true, rtol, atol)
582
+ assert_(np.all(e < 5))
583
+
584
+ tc = np.linspace(*t_span)
585
+ yc_true = sol_rational(tc)
586
+ yc = res.sol(tc)
587
+
588
+ e = compute_error(yc, yc_true, rtol, atol)
589
+ assert_(np.all(e < 5))
590
+
591
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
592
+
593
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
594
+ t_span[1], max_step=-1)
595
+
596
+ if method is not LSODA:
597
+ solver = method(fun_rational, t_span[0], y0, t_span[1],
598
+ rtol=rtol, atol=atol, max_step=1e-20)
599
+ message = solver.step()
600
+
601
+ assert_equal(solver.status, 'failed')
602
+ assert_("step size is less" in message)
603
+ assert_raises(RuntimeError, solver.step)
604
+
605
+
606
+ def test_first_step():
607
+ rtol = 1e-3
608
+ atol = 1e-6
609
+ y0 = [1/3, 2/9]
610
+ first_step = 0.1
611
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
612
+ for t_span in ([5, 9], [5, 1]):
613
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
614
+ max_step=0.5, atol=atol, method=method,
615
+ dense_output=True, first_step=first_step)
616
+
617
+ assert_equal(res.t[0], t_span[0])
618
+ assert_equal(res.t[-1], t_span[-1])
619
+ assert_allclose(first_step, np.abs(res.t[1] - 5))
620
+ assert_(res.t_events is None)
621
+ assert_(res.success)
622
+ assert_equal(res.status, 0)
623
+
624
+ y_true = sol_rational(res.t)
625
+ e = compute_error(res.y, y_true, rtol, atol)
626
+ assert_(np.all(e < 5))
627
+
628
+ tc = np.linspace(*t_span)
629
+ yc_true = sol_rational(tc)
630
+ yc = res.sol(tc)
631
+
632
+ e = compute_error(yc, yc_true, rtol, atol)
633
+ assert_(np.all(e < 5))
634
+
635
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
636
+
637
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
638
+ t_span[1], first_step=-1)
639
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
640
+ t_span[1], first_step=5)
641
+
642
+
643
+ def test_t_eval():
644
+ rtol = 1e-3
645
+ atol = 1e-6
646
+ y0 = [1/3, 2/9]
647
+ for t_span in ([5, 9], [5, 1]):
648
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
649
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
650
+ t_eval=t_eval)
651
+ assert_equal(res.t, t_eval)
652
+ assert_(res.t_events is None)
653
+ assert_(res.success)
654
+ assert_equal(res.status, 0)
655
+
656
+ y_true = sol_rational(res.t)
657
+ e = compute_error(res.y, y_true, rtol, atol)
658
+ assert_(np.all(e < 5))
659
+
660
+ t_eval = [5, 5.01, 7, 8, 8.01, 9]
661
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
662
+ t_eval=t_eval)
663
+ assert_equal(res.t, t_eval)
664
+ assert_(res.t_events is None)
665
+ assert_(res.success)
666
+ assert_equal(res.status, 0)
667
+
668
+ y_true = sol_rational(res.t)
669
+ e = compute_error(res.y, y_true, rtol, atol)
670
+ assert_(np.all(e < 5))
671
+
672
+ t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
673
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
674
+ t_eval=t_eval)
675
+ assert_equal(res.t, t_eval)
676
+ assert_(res.t_events is None)
677
+ assert_(res.success)
678
+ assert_equal(res.status, 0)
679
+
680
+ t_eval = [5.01, 7, 8, 8.01]
681
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
682
+ t_eval=t_eval)
683
+ assert_equal(res.t, t_eval)
684
+ assert_(res.t_events is None)
685
+ assert_(res.success)
686
+ assert_equal(res.status, 0)
687
+
688
+ y_true = sol_rational(res.t)
689
+ e = compute_error(res.y, y_true, rtol, atol)
690
+ assert_(np.all(e < 5))
691
+
692
+ t_eval = [4.99, 3, 1.5, 1.1, 1.01]
693
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
694
+ t_eval=t_eval)
695
+ assert_equal(res.t, t_eval)
696
+ assert_(res.t_events is None)
697
+ assert_(res.success)
698
+ assert_equal(res.status, 0)
699
+
700
+ t_eval = [4, 6]
701
+ assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
702
+ rtol=rtol, atol=atol, t_eval=t_eval)
703
+
704
+
705
+ def test_t_eval_dense_output():
706
+ rtol = 1e-3
707
+ atol = 1e-6
708
+ y0 = [1/3, 2/9]
709
+ t_span = [5, 9]
710
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
711
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
712
+ t_eval=t_eval)
713
+ res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
714
+ t_eval=t_eval, dense_output=True)
715
+ assert_equal(res.t, t_eval)
716
+ assert_(res.t_events is None)
717
+ assert_(res.success)
718
+ assert_equal(res.status, 0)
719
+
720
+ assert_equal(res.t, res_d.t)
721
+ assert_equal(res.y, res_d.y)
722
+ assert_(res_d.t_events is None)
723
+ assert_(res_d.success)
724
+ assert_equal(res_d.status, 0)
725
+
726
+ # if t and y are equal only test values for one case
727
+ y_true = sol_rational(res.t)
728
+ e = compute_error(res.y, y_true, rtol, atol)
729
+ assert_(np.all(e < 5))
730
+
731
+
732
+ def test_t_eval_early_event():
733
+ def early_event(t, y):
734
+ return t - 7
735
+
736
+ early_event.terminal = True
737
+
738
+ rtol = 1e-3
739
+ atol = 1e-6
740
+ y0 = [1/3, 2/9]
741
+ t_span = [5, 9]
742
+ t_eval = np.linspace(7.5, 9, 16)
743
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
744
+ with suppress_warnings() as sup:
745
+ sup.filter(UserWarning,
746
+ "The following arguments have no effect for a chosen "
747
+ "solver: `jac`")
748
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
749
+ method=method, t_eval=t_eval, events=early_event,
750
+ jac=jac_rational)
751
+ assert res.success
752
+ assert res.message == 'A termination event occurred.'
753
+ assert res.status == 1
754
+ assert not res.t and not res.y
755
+ assert len(res.t_events) == 1
756
+ assert res.t_events[0].size == 1
757
+ assert res.t_events[0][0] == 7
758
+
759
+
760
+ def test_event_dense_output_LSODA():
761
+ def event_lsoda(t, y):
762
+ return y[0] - 2.02e-5
763
+
764
+ rtol = 1e-3
765
+ atol = 1e-6
766
+ y0 = [0.05]
767
+ t_span = [-2, 2]
768
+ first_step = 1e-3
769
+ res = solve_ivp(
770
+ fun_event_dense_output_LSODA,
771
+ t_span,
772
+ y0,
773
+ method="LSODA",
774
+ dense_output=True,
775
+ events=event_lsoda,
776
+ first_step=first_step,
777
+ max_step=1,
778
+ rtol=rtol,
779
+ atol=atol,
780
+ jac=jac_event_dense_output_LSODA,
781
+ )
782
+
783
+ assert_equal(res.t[0], t_span[0])
784
+ assert_equal(res.t[-1], t_span[-1])
785
+ assert_allclose(first_step, np.abs(res.t[1] - t_span[0]))
786
+ assert res.success
787
+ assert_equal(res.status, 0)
788
+
789
+ y_true = sol_event_dense_output_LSODA(res.t)
790
+ e = compute_error(res.y, y_true, rtol, atol)
791
+ assert_array_less(e, 5)
792
+
793
+ tc = np.linspace(*t_span)
794
+ yc_true = sol_event_dense_output_LSODA(tc)
795
+ yc = res.sol(tc)
796
+ e = compute_error(yc, yc_true, rtol, atol)
797
+ assert_array_less(e, 5)
798
+
799
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
800
+
801
+
802
+ def test_no_integration():
803
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
804
+ sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
805
+ method=method, dense_output=True)
806
+ assert_equal(sol.sol(4), [2, 3])
807
+ assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
808
+
809
+
810
+ def test_no_integration_class():
811
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
812
+ solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
813
+ solver.step()
814
+ assert_equal(solver.status, 'finished')
815
+ sol = solver.dense_output()
816
+ assert_equal(sol(0.0), [10.0, 0.0])
817
+ assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
818
+
819
+ solver = method(lambda t, y: -y, 0.0, [], np.inf)
820
+ solver.step()
821
+ assert_equal(solver.status, 'finished')
822
+ sol = solver.dense_output()
823
+ assert_equal(sol(100.0), [])
824
+ assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
825
+
826
+
827
+ def test_empty():
828
+ def fun(t, y):
829
+ return np.zeros((0,))
830
+
831
+ y0 = np.zeros((0,))
832
+
833
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
834
+ sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
835
+ method=method, dense_output=True)
836
+ assert_equal(sol.sol(10), np.zeros((0,)))
837
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
838
+
839
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
840
+ sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
841
+ method=method, dense_output=True)
842
+ assert_equal(sol.sol(10), np.zeros((0,)))
843
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
844
+
845
+
846
+ def test_ConstantDenseOutput():
847
+ sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
848
+ assert_allclose(sol(1.5), [1, 2])
849
+ assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
850
+
851
+ sol = ConstantDenseOutput(0, 1, np.array([]))
852
+ assert_allclose(sol(1.5), np.empty(0))
853
+ assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
854
+
855
+
856
+ def test_classes():
857
+ y0 = [1 / 3, 2 / 9]
858
+ for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
859
+ solver = cls(fun_rational, 5, y0, np.inf)
860
+ assert_equal(solver.n, 2)
861
+ assert_equal(solver.status, 'running')
862
+ assert_equal(solver.t_bound, np.inf)
863
+ assert_equal(solver.direction, 1)
864
+ assert_equal(solver.t, 5)
865
+ assert_equal(solver.y, y0)
866
+ assert_(solver.step_size is None)
867
+ if cls is not LSODA:
868
+ assert_(solver.nfev > 0)
869
+ assert_(solver.njev >= 0)
870
+ assert_equal(solver.nlu, 0)
871
+ else:
872
+ assert_equal(solver.nfev, 0)
873
+ assert_equal(solver.njev, 0)
874
+ assert_equal(solver.nlu, 0)
875
+
876
+ assert_raises(RuntimeError, solver.dense_output)
877
+
878
+ message = solver.step()
879
+ assert_equal(solver.status, 'running')
880
+ assert_equal(message, None)
881
+ assert_equal(solver.n, 2)
882
+ assert_equal(solver.t_bound, np.inf)
883
+ assert_equal(solver.direction, 1)
884
+ assert_(solver.t > 5)
885
+ assert_(not np.all(np.equal(solver.y, y0)))
886
+ assert_(solver.step_size > 0)
887
+ assert_(solver.nfev > 0)
888
+ assert_(solver.njev >= 0)
889
+ assert_(solver.nlu >= 0)
890
+ sol = solver.dense_output()
891
+ assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
892
+
893
+
894
+ def test_OdeSolution():
895
+ ts = np.array([0, 2, 5], dtype=float)
896
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
897
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
898
+
899
+ sol = OdeSolution(ts, [s1, s2])
900
+
901
+ assert_equal(sol(-1), [-1])
902
+ assert_equal(sol(1), [-1])
903
+ assert_equal(sol(2), [-1])
904
+ assert_equal(sol(3), [1])
905
+ assert_equal(sol(5), [1])
906
+ assert_equal(sol(6), [1])
907
+
908
+ assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
909
+ np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
910
+
911
+ ts = np.array([10, 4, -3])
912
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
913
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
914
+
915
+ sol = OdeSolution(ts, [s1, s2])
916
+ assert_equal(sol(11), [-1])
917
+ assert_equal(sol(10), [-1])
918
+ assert_equal(sol(5), [-1])
919
+ assert_equal(sol(4), [-1])
920
+ assert_equal(sol(0), [1])
921
+ assert_equal(sol(-3), [1])
922
+ assert_equal(sol(-4), [1])
923
+
924
+ assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
925
+ np.array([[-1, 1, -1, 1, -1, 1, -1]]))
926
+
927
+ ts = np.array([1, 1])
928
+ s = ConstantDenseOutput(1, 1, np.array([10]))
929
+ sol = OdeSolution(ts, [s])
930
+ assert_equal(sol(0), [10])
931
+ assert_equal(sol(1), [10])
932
+ assert_equal(sol(2), [10])
933
+
934
+ assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
935
+
936
+
937
+ def test_num_jac():
938
+ def fun(t, y):
939
+ return np.vstack([
940
+ -0.04 * y[0] + 1e4 * y[1] * y[2],
941
+ 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
942
+ 3e7 * y[1] ** 2
943
+ ])
944
+
945
+ def jac(t, y):
946
+ return np.array([
947
+ [-0.04, 1e4 * y[2], 1e4 * y[1]],
948
+ [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
949
+ [0, 6e7 * y[1], 0]
950
+ ])
951
+
952
+ t = 1
953
+ y = np.array([1, 0, 0])
954
+ J_true = jac(t, y)
955
+ threshold = 1e-5
956
+ f = fun(t, y).ravel()
957
+
958
+ J_num, factor = num_jac(fun, t, y, f, threshold, None)
959
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
960
+
961
+ J_num, factor = num_jac(fun, t, y, f, threshold, factor)
962
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
963
+
964
+
965
+ def test_num_jac_sparse():
966
+ def fun(t, y):
967
+ e = y[1:]**3 - y[:-1]**2
968
+ z = np.zeros(y.shape[1])
969
+ return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
970
+
971
+ def structure(n):
972
+ A = np.zeros((n, n), dtype=int)
973
+ A[0, 0] = 1
974
+ A[0, 1] = 1
975
+ for i in range(1, n - 1):
976
+ A[i, i - 1: i + 2] = 1
977
+ A[-1, -1] = 1
978
+ A[-1, -2] = 1
979
+
980
+ return A
981
+
982
+ np.random.seed(0)
983
+ n = 20
984
+ y = np.random.randn(n)
985
+ A = structure(n)
986
+ groups = group_columns(A)
987
+
988
+ f = fun(0, y[:, None]).ravel()
989
+
990
+ # Compare dense and sparse results, assuming that dense implementation
991
+ # is correct (as it is straightforward).
992
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
993
+ sparsity=(A, groups))
994
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
995
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
996
+ rtol=1e-12, atol=1e-14)
997
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
998
+
999
+ # Take small factors to trigger their recomputing inside.
1000
+ factor = np.random.uniform(0, 1e-12, size=n)
1001
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
1002
+ sparsity=(A, groups))
1003
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
1004
+
1005
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
1006
+ rtol=1e-12, atol=1e-14)
1007
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
1008
+
1009
+
1010
+ def test_args():
1011
+
1012
+ # sys3 is actually two decoupled systems. (x, y) form a
1013
+ # linear oscillator, while z is a nonlinear first order
1014
+ # system with equilibria at z=0 and z=1. If k > 0, z=1
1015
+ # is stable and z=0 is unstable.
1016
+
1017
+ def sys3(t, w, omega, k, zfinal):
1018
+ x, y, z = w
1019
+ return [-omega*y, omega*x, k*z*(1 - z)]
1020
+
1021
+ def sys3_jac(t, w, omega, k, zfinal):
1022
+ x, y, z = w
1023
+ J = np.array([[0, -omega, 0],
1024
+ [omega, 0, 0],
1025
+ [0, 0, k*(1 - 2*z)]])
1026
+ return J
1027
+
1028
+ def sys3_x0decreasing(t, w, omega, k, zfinal):
1029
+ x, y, z = w
1030
+ return x
1031
+
1032
+ def sys3_y0increasing(t, w, omega, k, zfinal):
1033
+ x, y, z = w
1034
+ return y
1035
+
1036
+ def sys3_zfinal(t, w, omega, k, zfinal):
1037
+ x, y, z = w
1038
+ return z - zfinal
1039
+
1040
+ # Set the event flags for the event functions.
1041
+ sys3_x0decreasing.direction = -1
1042
+ sys3_y0increasing.direction = 1
1043
+ sys3_zfinal.terminal = True
1044
+
1045
+ omega = 2
1046
+ k = 4
1047
+
1048
+ tfinal = 5
1049
+ zfinal = 0.99
1050
+ # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.
1051
+ # The condition z(tfinal) = zfinal is the terminal event.
1052
+ z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))
1053
+
1054
+ w0 = [0, -1, z0]
1055
+
1056
+ # Provide the jac argument and use the Radau method to ensure that the use
1057
+ # of the Jacobian function is exercised.
1058
+ # If event handling is working, the solution will stop at tfinal, not tend.
1059
+ tend = 2*tfinal
1060
+ sol = solve_ivp(sys3, [0, tend], w0,
1061
+ events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],
1062
+ dense_output=True, args=(omega, k, zfinal),
1063
+ method='Radau', jac=sys3_jac,
1064
+ rtol=1e-10, atol=1e-13)
1065
+
1066
+ # Check that we got the expected events at the expected times.
1067
+ x0events_t = sol.t_events[0]
1068
+ y0events_t = sol.t_events[1]
1069
+ zfinalevents_t = sol.t_events[2]
1070
+ assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])
1071
+ assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])
1072
+ assert_allclose(zfinalevents_t, [tfinal])
1073
+
1074
+ # Check that the solution agrees with the known exact solution.
1075
+ t = np.linspace(0, zfinalevents_t[0], 250)
1076
+ w = sol.sol(t)
1077
+ assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)
1078
+ assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)
1079
+ assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),
1080
+ rtol=1e-9, atol=1e-12)
1081
+
1082
+ # Check that the state variables have the expected values at the events.
1083
+ x0events = sol.sol(x0events_t)
1084
+ y0events = sol.sol(y0events_t)
1085
+ zfinalevents = sol.sol(zfinalevents_t)
1086
+ assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)
1087
+ assert_allclose(x0events[1], np.ones_like(x0events[1]))
1088
+ assert_allclose(y0events[0], np.ones_like(y0events[0]))
1089
+ assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)
1090
+ assert_allclose(zfinalevents[2], [zfinal])
1091
+
1092
+
1093
+ def test_array_rtol():
1094
+ # solve_ivp had a bug with array_like `rtol`; see gh-15482
1095
+ # check that it's fixed
1096
+ def f(t, y):
1097
+ return y[0], y[1]
1098
+
1099
+ # no warning (or error) when `rtol` is array_like
1100
+ sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1])
1101
+ err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
1102
+
1103
+ # warning when an element of `rtol` is too small
1104
+ with pytest.warns(UserWarning, match="At least one element..."):
1105
+ sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16])
1106
+ err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
1107
+
1108
+ # tighter rtol improves the error
1109
+ assert err2 < err1
1110
+
1111
+ @pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])
1112
+ def test_integration_zero_rhs(method):
1113
+ result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)
1114
+ assert_(result.success)
1115
+ assert_equal(result.status, 0)
1116
+ assert_allclose(result.y, 1.0, rtol=1e-15)
1117
+
1118
+
1119
+ def test_args_single_value():
1120
+ def fun_with_arg(t, y, a):
1121
+ return a*y
1122
+
1123
+ message = "Supplied 'args' cannot be unpacked."
1124
+ with pytest.raises(TypeError, match=message):
1125
+ solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1)
1126
+
1127
+ sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,))
1128
+ assert_allclose(sol.y[0, -1], np.exp(-0.1))
1129
+
1130
+ @pytest.mark.parametrize("f0_fill", [np.nan, np.inf])
1131
+ def test_initial_state_finiteness(f0_fill):
1132
+ # regression test for gh-17846
1133
+ msg = "All components of the initial state `y0` must be finite."
1134
+ with pytest.raises(ValueError, match=msg):
1135
+ solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill))
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from numpy.testing import assert_allclose, assert_
3
+ import numpy as np
4
+ from scipy.integrate import RK23, RK45, DOP853
5
+ from scipy.integrate._ivp import dop853_coefficients
6
+
7
+
8
+ @pytest.mark.parametrize("solver", [RK23, RK45, DOP853])
9
+ def test_coefficient_properties(solver):
10
+ assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
11
+ assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14)
12
+
13
+
14
+ def test_coefficient_properties_dop853():
15
+ assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15)
16
+ assert_allclose(np.sum(dop853_coefficients.A, axis=1),
17
+ dop853_coefficients.C,
18
+ rtol=1e-14)
19
+
20
+
21
+ @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
22
+ def test_error_estimation(solver_class):
23
+ step = 0.2
24
+ solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step)
25
+ solver.step()
26
+ error_estimate = solver._estimate_error(solver.K, step)
27
+ error = solver.y - np.exp([step])
28
+ assert_(np.abs(error) < np.abs(error_estimate))
29
+
30
+
31
+ @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
32
+ def test_error_estimation_complex(solver_class):
33
+ h = 0.2
34
+ solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h)
35
+ solver.step()
36
+ err_norm = solver._estimate_error_norm(solver.K, h, scale=[1])
37
+ assert np.isrealobj(err_norm)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (113 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ode.py ADDED
@@ -0,0 +1,1376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Pearu Peterson, Pauli Virtanen, John Travers
2
+ """
3
+ First-order ODE integrators.
4
+
5
+ User-friendly interface to various numerical integrators for solving a
6
+ system of first order ODEs with prescribed initial conditions::
7
+
8
+ d y(t)[i]
9
+ --------- = f(t,y(t))[i],
10
+ d t
11
+
12
+ y(t=0)[i] = y0[i],
13
+
14
+ where::
15
+
16
+ i = 0, ..., len(y0) - 1
17
+
18
+ class ode
19
+ ---------
20
+
21
+ A generic interface class to numeric integrators. It has the following
22
+ methods::
23
+
24
+ integrator = ode(f, jac=None)
25
+ integrator = integrator.set_integrator(name, **params)
26
+ integrator = integrator.set_initial_value(y0, t0=0.0)
27
+ integrator = integrator.set_f_params(*args)
28
+ integrator = integrator.set_jac_params(*args)
29
+ y1 = integrator.integrate(t1, step=False, relax=False)
30
+ flag = integrator.successful()
31
+
32
+ class complex_ode
33
+ -----------------
34
+
35
+ This class has the same generic interface as ode, except it can handle complex
36
+ f, y and Jacobians by transparently translating them into the equivalent
37
+ real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
38
+ an alternative to ode with the zvode solver, sometimes performing better.
39
+ """
40
+ # XXX: Integrators must have:
41
+ # ===========================
42
+ # cvode - C version of vode and vodpk with many improvements.
43
+ # Get it from http://www.netlib.org/ode/cvode.tar.gz.
44
+ # To wrap cvode to Python, one must write the extension module by
45
+ # hand. Its interface is too much 'advanced C' that using f2py
46
+ # would be too complicated (or impossible).
47
+ #
48
+ # How to define a new integrator:
49
+ # ===============================
50
+ #
51
+ # class myodeint(IntegratorBase):
52
+ #
53
+ # runner = <odeint function> or None
54
+ #
55
+ # def __init__(self,...): # required
56
+ # <initialize>
57
+ #
58
+ # def reset(self,n,has_jac): # optional
59
+ # # n - the size of the problem (number of equations)
60
+ # # has_jac - whether user has supplied its own routine for Jacobian
61
+ # <allocate memory,initialize further>
62
+ #
63
+ # def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
64
+ # # this method is called to integrate from t=t0 to t=t1
65
+ # # with initial condition y0. f and jac are user-supplied functions
66
+ # # that define the problem. f_params,jac_params are additional
67
+ # # arguments
68
+ # # to these functions.
69
+ # <calculate y1>
70
+ # if <calculation was unsuccessful>:
71
+ # self.success = 0
72
+ # return t1,y1
73
+ #
74
+ # # In addition, one can define step() and run_relax() methods (they
75
+ # # take the same arguments as run()) if the integrator can support
76
+ # # these features (see IntegratorBase doc strings).
77
+ #
78
+ # if myodeint.runner:
79
+ # IntegratorBase.integrator_classes.append(myodeint)
80
+
81
+ __all__ = ['ode', 'complex_ode']
82
+
83
+ import re
84
+ import warnings
85
+
86
+ from numpy import asarray, array, zeros, isscalar, real, imag, vstack
87
+
88
+ from . import _vode
89
+ from . import _dop
90
+ from . import _lsoda
91
+
92
+
93
+ _dop_int_dtype = _dop.types.intvar.dtype
94
+ _vode_int_dtype = _vode.types.intvar.dtype
95
+ _lsoda_int_dtype = _lsoda.types.intvar.dtype
96
+
97
+
98
+ # ------------------------------------------------------------------------------
99
+ # User interface
100
+ # ------------------------------------------------------------------------------
101
+
102
+
103
+ class ode:
104
+ """
105
+ A generic interface class to numeric integrators.
106
+
107
+ Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
108
+
109
+ *Note*: The first two arguments of ``f(t, y, ...)`` are in the
110
+ opposite order of the arguments in the system definition function used
111
+ by `scipy.integrate.odeint`.
112
+
113
+ Parameters
114
+ ----------
115
+ f : callable ``f(t, y, *f_args)``
116
+ Right-hand side of the differential equation. t is a scalar,
117
+ ``y.shape == (n,)``.
118
+ ``f_args`` is set by calling ``set_f_params(*args)``.
119
+ `f` should return a scalar, array or list (not a tuple).
120
+ jac : callable ``jac(t, y, *jac_args)``, optional
121
+ Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
122
+ ``jac_args`` is set by calling ``set_jac_params(*args)``.
123
+
124
+ Attributes
125
+ ----------
126
+ t : float
127
+ Current time.
128
+ y : ndarray
129
+ Current variable values.
130
+
131
+ See also
132
+ --------
133
+ odeint : an integrator with a simpler interface based on lsoda from ODEPACK
134
+ quad : for finding the area under a curve
135
+
136
+ Notes
137
+ -----
138
+ Available integrators are listed below. They can be selected using
139
+ the `set_integrator` method.
140
+
141
+ "vode"
142
+
143
+ Real-valued Variable-coefficient Ordinary Differential Equation
144
+ solver, with fixed-leading-coefficient implementation. It provides
145
+ implicit Adams method (for non-stiff problems) and a method based on
146
+ backward differentiation formulas (BDF) (for stiff problems).
147
+
148
+ Source: http://www.netlib.org/ode/vode.f
149
+
150
+ .. warning::
151
+
152
+ This integrator is not re-entrant. You cannot have two `ode`
153
+ instances using the "vode" integrator at the same time.
154
+
155
+ This integrator accepts the following parameters in `set_integrator`
156
+ method of the `ode` class:
157
+
158
+ - atol : float or sequence
159
+ absolute tolerance for solution
160
+ - rtol : float or sequence
161
+ relative tolerance for solution
162
+ - lband : None or int
163
+ - uband : None or int
164
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
165
+ Setting these requires your jac routine to return the jacobian
166
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
167
+ dimension of the matrix must be (lband+uband+1, len(y)).
168
+ - method: 'adams' or 'bdf'
169
+ Which solver to use, Adams (non-stiff) or BDF (stiff)
170
+ - with_jacobian : bool
171
+ This option is only considered when the user has not supplied a
172
+ Jacobian function and has not indicated (by setting either band)
173
+ that the Jacobian is banded. In this case, `with_jacobian` specifies
174
+ whether the iteration method of the ODE solver's correction step is
175
+ chord iteration with an internally generated full Jacobian or
176
+ functional iteration with no Jacobian.
177
+ - nsteps : int
178
+ Maximum number of (internally defined) steps allowed during one
179
+ call to the solver.
180
+ - first_step : float
181
+ - min_step : float
182
+ - max_step : float
183
+ Limits for the step sizes used by the integrator.
184
+ - order : int
185
+ Maximum order used by the integrator,
186
+ order <= 12 for Adams, <= 5 for BDF.
187
+
188
+ "zvode"
189
+
190
+ Complex-valued Variable-coefficient Ordinary Differential Equation
191
+ solver, with fixed-leading-coefficient implementation. It provides
192
+ implicit Adams method (for non-stiff problems) and a method based on
193
+ backward differentiation formulas (BDF) (for stiff problems).
194
+
195
+ Source: http://www.netlib.org/ode/zvode.f
196
+
197
+ .. warning::
198
+
199
+ This integrator is not re-entrant. You cannot have two `ode`
200
+ instances using the "zvode" integrator at the same time.
201
+
202
+ This integrator accepts the same parameters in `set_integrator`
203
+ as the "vode" solver.
204
+
205
+ .. note::
206
+
207
+ When using ZVODE for a stiff system, it should only be used for
208
+ the case in which the function f is analytic, that is, when each f(i)
209
+ is an analytic function of each y(j). Analyticity means that the
210
+ partial derivative df(i)/dy(j) is a unique complex number, and this
211
+ fact is critical in the way ZVODE solves the dense or banded linear
212
+ systems that arise in the stiff case. For a complex stiff ODE system
213
+ in which f is not analytic, ZVODE is likely to have convergence
214
+ failures, and for this problem one should instead use DVODE on the
215
+ equivalent real system (in the real and imaginary parts of y).
216
+
217
+ "lsoda"
218
+
219
+ Real-valued Variable-coefficient Ordinary Differential Equation
220
+ solver, with fixed-leading-coefficient implementation. It provides
221
+ automatic method switching between implicit Adams method (for non-stiff
222
+ problems) and a method based on backward differentiation formulas (BDF)
223
+ (for stiff problems).
224
+
225
+ Source: http://www.netlib.org/odepack
226
+
227
+ .. warning::
228
+
229
+ This integrator is not re-entrant. You cannot have two `ode`
230
+ instances using the "lsoda" integrator at the same time.
231
+
232
+ This integrator accepts the following parameters in `set_integrator`
233
+ method of the `ode` class:
234
+
235
+ - atol : float or sequence
236
+ absolute tolerance for solution
237
+ - rtol : float or sequence
238
+ relative tolerance for solution
239
+ - lband : None or int
240
+ - uband : None or int
241
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
242
+ Setting these requires your jac routine to return the jacobian
243
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j].
244
+ - with_jacobian : bool
245
+ *Not used.*
246
+ - nsteps : int
247
+ Maximum number of (internally defined) steps allowed during one
248
+ call to the solver.
249
+ - first_step : float
250
+ - min_step : float
251
+ - max_step : float
252
+ Limits for the step sizes used by the integrator.
253
+ - max_order_ns : int
254
+ Maximum order used in the nonstiff case (default 12).
255
+ - max_order_s : int
256
+ Maximum order used in the stiff case (default 5).
257
+ - max_hnil : int
258
+ Maximum number of messages reporting too small step size (t + h = t)
259
+ (default 0)
260
+ - ixpr : int
261
+ Whether to generate extra printing at method switches (default False).
262
+
263
+ "dopri5"
264
+
265
+ This is an explicit runge-kutta method of order (4)5 due to Dormand &
266
+ Prince (with stepsize control and dense output).
267
+
268
+ Authors:
269
+
270
+ E. Hairer and G. Wanner
271
+ Universite de Geneve, Dept. de Mathematiques
272
+ CH-1211 Geneve 24, Switzerland
273
274
+
275
+ This code is described in [HNW93]_.
276
+
277
+ This integrator accepts the following parameters in set_integrator()
278
+ method of the ode class:
279
+
280
+ - atol : float or sequence
281
+ absolute tolerance for solution
282
+ - rtol : float or sequence
283
+ relative tolerance for solution
284
+ - nsteps : int
285
+ Maximum number of (internally defined) steps allowed during one
286
+ call to the solver.
287
+ - first_step : float
288
+ - max_step : float
289
+ - safety : float
290
+ Safety factor on new step selection (default 0.9)
291
+ - ifactor : float
292
+ - dfactor : float
293
+ Maximum factor to increase/decrease step size by in one step
294
+ - beta : float
295
+ Beta parameter for stabilised step size control.
296
+ - verbosity : int
297
+ Switch for printing messages (< 0 for no messages).
298
+
299
+ "dop853"
300
+
301
+ This is an explicit runge-kutta method of order 8(5,3) due to Dormand
302
+ & Prince (with stepsize control and dense output).
303
+
304
+ Options and references the same as "dopri5".
305
+
306
+ Examples
307
+ --------
308
+
309
+ A problem to integrate and the corresponding jacobian:
310
+
311
+ >>> from scipy.integrate import ode
312
+ >>>
313
+ >>> y0, t0 = [1.0j, 2.0], 0
314
+ >>>
315
+ >>> def f(t, y, arg1):
316
+ ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
317
+ >>> def jac(t, y, arg1):
318
+ ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
319
+
320
+ The integration:
321
+
322
+ >>> r = ode(f, jac).set_integrator('zvode', method='bdf')
323
+ >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
324
+ >>> t1 = 10
325
+ >>> dt = 1
326
+ >>> while r.successful() and r.t < t1:
327
+ ... print(r.t+dt, r.integrate(r.t+dt))
328
+ 1 [-0.71038232+0.23749653j 0.40000271+0.j ]
329
+ 2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
330
+ 3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
331
+ 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
332
+ 5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
333
+ 6.0 [0.58643071+0.339819j 0.08000018+0.j ]
334
+ 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
335
+ 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
336
+ 9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
337
+ 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
338
+
339
+ References
340
+ ----------
341
+ .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
342
+ Differential Equations i. Nonstiff Problems. 2nd edition.
343
+ Springer Series in Computational Mathematics,
344
+ Springer-Verlag (1993)
345
+
346
+ """
347
+
348
+ def __init__(self, f, jac=None):
349
+ self.stiff = 0
350
+ self.f = f
351
+ self.jac = jac
352
+ self.f_params = ()
353
+ self.jac_params = ()
354
+ self._y = []
355
+
356
+ @property
357
+ def y(self):
358
+ return self._y
359
+
360
+ def set_initial_value(self, y, t=0.0):
361
+ """Set initial conditions y(t) = y."""
362
+ if isscalar(y):
363
+ y = [y]
364
+ n_prev = len(self._y)
365
+ if not n_prev:
366
+ self.set_integrator('') # find first available integrator
367
+ self._y = asarray(y, self._integrator.scalar)
368
+ self.t = t
369
+ self._integrator.reset(len(self._y), self.jac is not None)
370
+ return self
371
+
372
+ def set_integrator(self, name, **integrator_params):
373
+ """
374
+ Set integrator by name.
375
+
376
+ Parameters
377
+ ----------
378
+ name : str
379
+ Name of the integrator.
380
+ **integrator_params
381
+ Additional parameters for the integrator.
382
+ """
383
+ integrator = find_integrator(name)
384
+ if integrator is None:
385
+ # FIXME: this really should be raise an exception. Will that break
386
+ # any code?
387
+ message = f'No integrator name match with {name!r} or is not available.'
388
+ warnings.warn(message, stacklevel=2)
389
+ else:
390
+ self._integrator = integrator(**integrator_params)
391
+ if not len(self._y):
392
+ self.t = 0.0
393
+ self._y = array([0.0], self._integrator.scalar)
394
+ self._integrator.reset(len(self._y), self.jac is not None)
395
+ return self
396
+
397
+ def integrate(self, t, step=False, relax=False):
398
+ """Find y=y(t), set y as an initial condition, and return y.
399
+
400
+ Parameters
401
+ ----------
402
+ t : float
403
+ The endpoint of the integration step.
404
+ step : bool
405
+ If True, and if the integrator supports the step method,
406
+ then perform a single integration step and return.
407
+ This parameter is provided in order to expose internals of
408
+ the implementation, and should not be changed from its default
409
+ value in most cases.
410
+ relax : bool
411
+ If True and if the integrator supports the run_relax method,
412
+ then integrate until t_1 >= t and return. ``relax`` is not
413
+ referenced if ``step=True``.
414
+ This parameter is provided in order to expose internals of
415
+ the implementation, and should not be changed from its default
416
+ value in most cases.
417
+
418
+ Returns
419
+ -------
420
+ y : float
421
+ The integrated value at t
422
+ """
423
+ if step and self._integrator.supports_step:
424
+ mth = self._integrator.step
425
+ elif relax and self._integrator.supports_run_relax:
426
+ mth = self._integrator.run_relax
427
+ else:
428
+ mth = self._integrator.run
429
+
430
+ try:
431
+ self._y, self.t = mth(self.f, self.jac or (lambda: None),
432
+ self._y, self.t, t,
433
+ self.f_params, self.jac_params)
434
+ except SystemError as e:
435
+ # f2py issue with tuple returns, see ticket 1187.
436
+ raise ValueError(
437
+ 'Function to integrate must not return a tuple.'
438
+ ) from e
439
+
440
+ return self._y
441
+
442
+ def successful(self):
443
+ """Check if integration was successful."""
444
+ try:
445
+ self._integrator
446
+ except AttributeError:
447
+ self.set_integrator('')
448
+ return self._integrator.success == 1
449
+
450
+ def get_return_code(self):
451
+ """Extracts the return code for the integration to enable better control
452
+ if the integration fails.
453
+
454
+ In general, a return code > 0 implies success, while a return code < 0
455
+ implies failure.
456
+
457
+ Notes
458
+ -----
459
+ This section describes possible return codes and their meaning, for available
460
+ integrators that can be selected by `set_integrator` method.
461
+
462
+ "vode"
463
+
464
+ =========== =======
465
+ Return Code Message
466
+ =========== =======
467
+ 2 Integration successful.
468
+ -1 Excess work done on this call. (Perhaps wrong MF.)
469
+ -2 Excess accuracy requested. (Tolerances too small.)
470
+ -3 Illegal input detected. (See printed message.)
471
+ -4 Repeated error test failures. (Check all input.)
472
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
473
+ supplied or wrong choice of MF or tolerances.)
474
+ -6 Error weight became zero during problem. (Solution
475
+ component i vanished, and ATOL or ATOL(i) = 0.)
476
+ =========== =======
477
+
478
+ "zvode"
479
+
480
+ =========== =======
481
+ Return Code Message
482
+ =========== =======
483
+ 2 Integration successful.
484
+ -1 Excess work done on this call. (Perhaps wrong MF.)
485
+ -2 Excess accuracy requested. (Tolerances too small.)
486
+ -3 Illegal input detected. (See printed message.)
487
+ -4 Repeated error test failures. (Check all input.)
488
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
489
+ supplied or wrong choice of MF or tolerances.)
490
+ -6 Error weight became zero during problem. (Solution
491
+ component i vanished, and ATOL or ATOL(i) = 0.)
492
+ =========== =======
493
+
494
+ "dopri5"
495
+
496
+ =========== =======
497
+ Return Code Message
498
+ =========== =======
499
+ 1 Integration successful.
500
+ 2 Integration successful (interrupted by solout).
501
+ -1 Input is not consistent.
502
+ -2 Larger nsteps is needed.
503
+ -3 Step size becomes too small.
504
+ -4 Problem is probably stiff (interrupted).
505
+ =========== =======
506
+
507
+ "dop853"
508
+
509
+ =========== =======
510
+ Return Code Message
511
+ =========== =======
512
+ 1 Integration successful.
513
+ 2 Integration successful (interrupted by solout).
514
+ -1 Input is not consistent.
515
+ -2 Larger nsteps is needed.
516
+ -3 Step size becomes too small.
517
+ -4 Problem is probably stiff (interrupted).
518
+ =========== =======
519
+
520
+ "lsoda"
521
+
522
+ =========== =======
523
+ Return Code Message
524
+ =========== =======
525
+ 2 Integration successful.
526
+ -1 Excess work done on this call (perhaps wrong Dfun type).
527
+ -2 Excess accuracy requested (tolerances too small).
528
+ -3 Illegal input detected (internal error).
529
+ -4 Repeated error test failures (internal error).
530
+ -5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
531
+ -6 Error weight became zero during problem.
532
+ -7 Internal workspace insufficient to finish (internal error).
533
+ =========== =======
534
+ """
535
+ try:
536
+ self._integrator
537
+ except AttributeError:
538
+ self.set_integrator('')
539
+ return self._integrator.istate
540
+
541
+ def set_f_params(self, *args):
542
+ """Set extra parameters for user-supplied function f."""
543
+ self.f_params = args
544
+ return self
545
+
546
+ def set_jac_params(self, *args):
547
+ """Set extra parameters for user-supplied function jac."""
548
+ self.jac_params = args
549
+ return self
550
+
551
+ def set_solout(self, solout):
552
+ """
553
+ Set callable to be called at every successful integration step.
554
+
555
+ Parameters
556
+ ----------
557
+ solout : callable
558
+ ``solout(t, y)`` is called at each internal integrator step,
559
+ t is a scalar providing the current independent position
560
+ y is the current solution ``y.shape == (n,)``
561
+ solout should return -1 to stop integration
562
+ otherwise it should return None or 0
563
+
564
+ """
565
+ if self._integrator.supports_solout:
566
+ self._integrator.set_solout(solout)
567
+ if self._y is not None:
568
+ self._integrator.reset(len(self._y), self.jac is not None)
569
+ else:
570
+ raise ValueError("selected integrator does not support solout,"
571
+ " choose another one")
572
+
573
+
574
+ def _transform_banded_jac(bjac):
575
+ """
576
+ Convert a real matrix of the form (for example)
577
+
578
+ [0 0 A B] [0 0 0 B]
579
+ [0 0 C D] [0 0 A D]
580
+ [E F G H] to [0 F C H]
581
+ [I J K L] [E J G L]
582
+ [I 0 K 0]
583
+
584
+ That is, every other column is shifted up one.
585
+ """
586
+ # Shift every other column.
587
+ newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
588
+ newjac[1:, ::2] = bjac[:, ::2]
589
+ newjac[:-1, 1::2] = bjac[:, 1::2]
590
+ return newjac
591
+
592
+
593
+ class complex_ode(ode):
594
+ """
595
+ A wrapper of ode for complex systems.
596
+
597
+ This functions similarly as `ode`, but re-maps a complex-valued
598
+ equation system to a real-valued one before using the integrators.
599
+
600
+ Parameters
601
+ ----------
602
+ f : callable ``f(t, y, *f_args)``
603
+ Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
604
+ ``f_args`` is set by calling ``set_f_params(*args)``.
605
+ jac : callable ``jac(t, y, *jac_args)``
606
+ Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
607
+ ``jac_args`` is set by calling ``set_f_params(*args)``.
608
+
609
+ Attributes
610
+ ----------
611
+ t : float
612
+ Current time.
613
+ y : ndarray
614
+ Current variable values.
615
+
616
+ Examples
617
+ --------
618
+ For usage examples, see `ode`.
619
+
620
+ """
621
+
622
+ def __init__(self, f, jac=None):
623
+ self.cf = f
624
+ self.cjac = jac
625
+ if jac is None:
626
+ ode.__init__(self, self._wrap, None)
627
+ else:
628
+ ode.__init__(self, self._wrap, self._wrap_jac)
629
+
630
+ def _wrap(self, t, y, *f_args):
631
+ f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
632
+ # self.tmp is a real-valued array containing the interleaved
633
+ # real and imaginary parts of f.
634
+ self.tmp[::2] = real(f)
635
+ self.tmp[1::2] = imag(f)
636
+ return self.tmp
637
+
638
+ def _wrap_jac(self, t, y, *jac_args):
639
+ # jac is the complex Jacobian computed by the user-defined function.
640
+ jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
641
+
642
+ # jac_tmp is the real version of the complex Jacobian. Each complex
643
+ # entry in jac, say 2+3j, becomes a 2x2 block of the form
644
+ # [2 -3]
645
+ # [3 2]
646
+ jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
647
+ jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
648
+ jac_tmp[1::2, ::2] = imag(jac)
649
+ jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
650
+
651
+ ml = getattr(self._integrator, 'ml', None)
652
+ mu = getattr(self._integrator, 'mu', None)
653
+ if ml is not None or mu is not None:
654
+ # Jacobian is banded. The user's Jacobian function has computed
655
+ # the complex Jacobian in packed format. The corresponding
656
+ # real-valued version has every other column shifted up.
657
+ jac_tmp = _transform_banded_jac(jac_tmp)
658
+
659
+ return jac_tmp
660
+
661
+ @property
662
+ def y(self):
663
+ return self._y[::2] + 1j * self._y[1::2]
664
+
665
+ def set_integrator(self, name, **integrator_params):
666
+ """
667
+ Set integrator by name.
668
+
669
+ Parameters
670
+ ----------
671
+ name : str
672
+ Name of the integrator
673
+ **integrator_params
674
+ Additional parameters for the integrator.
675
+ """
676
+ if name == 'zvode':
677
+ raise ValueError("zvode must be used with ode, not complex_ode")
678
+
679
+ lband = integrator_params.get('lband')
680
+ uband = integrator_params.get('uband')
681
+ if lband is not None or uband is not None:
682
+ # The Jacobian is banded. Override the user-supplied bandwidths
683
+ # (which are for the complex Jacobian) with the bandwidths of
684
+ # the corresponding real-valued Jacobian wrapper of the complex
685
+ # Jacobian.
686
+ integrator_params['lband'] = 2 * (lband or 0) + 1
687
+ integrator_params['uband'] = 2 * (uband or 0) + 1
688
+
689
+ return ode.set_integrator(self, name, **integrator_params)
690
+
691
+ def set_initial_value(self, y, t=0.0):
692
+ """Set initial conditions y(t) = y."""
693
+ y = asarray(y)
694
+ self.tmp = zeros(y.size * 2, 'float')
695
+ self.tmp[::2] = real(y)
696
+ self.tmp[1::2] = imag(y)
697
+ return ode.set_initial_value(self, self.tmp, t)
698
+
699
+ def integrate(self, t, step=False, relax=False):
700
+ """Find y=y(t), set y as an initial condition, and return y.
701
+
702
+ Parameters
703
+ ----------
704
+ t : float
705
+ The endpoint of the integration step.
706
+ step : bool
707
+ If True, and if the integrator supports the step method,
708
+ then perform a single integration step and return.
709
+ This parameter is provided in order to expose internals of
710
+ the implementation, and should not be changed from its default
711
+ value in most cases.
712
+ relax : bool
713
+ If True and if the integrator supports the run_relax method,
714
+ then integrate until t_1 >= t and return. ``relax`` is not
715
+ referenced if ``step=True``.
716
+ This parameter is provided in order to expose internals of
717
+ the implementation, and should not be changed from its default
718
+ value in most cases.
719
+
720
+ Returns
721
+ -------
722
+ y : float
723
+ The integrated value at t
724
+ """
725
+ y = ode.integrate(self, t, step, relax)
726
+ return y[::2] + 1j * y[1::2]
727
+
728
+ def set_solout(self, solout):
729
+ """
730
+ Set callable to be called at every successful integration step.
731
+
732
+ Parameters
733
+ ----------
734
+ solout : callable
735
+ ``solout(t, y)`` is called at each internal integrator step,
736
+ t is a scalar providing the current independent position
737
+ y is the current solution ``y.shape == (n,)``
738
+ solout should return -1 to stop integration
739
+ otherwise it should return None or 0
740
+
741
+ """
742
+ if self._integrator.supports_solout:
743
+ self._integrator.set_solout(solout, complex=True)
744
+ else:
745
+ raise TypeError("selected integrator does not support solouta,"
746
+ + "choose another one")
747
+
748
+
749
+ # ------------------------------------------------------------------------------
750
+ # ODE integrators
751
+ # ------------------------------------------------------------------------------
752
+
753
+ def find_integrator(name):
754
+ for cl in IntegratorBase.integrator_classes:
755
+ if re.match(name, cl.__name__, re.I):
756
+ return cl
757
+ return None
758
+
759
+
760
+ class IntegratorConcurrencyError(RuntimeError):
761
+ """
762
+ Failure due to concurrent usage of an integrator that can be used
763
+ only for a single problem at a time.
764
+
765
+ """
766
+
767
+ def __init__(self, name):
768
+ msg = ("Integrator `%s` can be used to solve only a single problem "
769
+ "at a time. If you want to integrate multiple problems, "
770
+ "consider using a different integrator "
771
+ "(see `ode.set_integrator`)") % name
772
+ RuntimeError.__init__(self, msg)
773
+
774
+
775
+ class IntegratorBase:
776
+ runner = None # runner is None => integrator is not available
777
+ success = None # success==1 if integrator was called successfully
778
+ istate = None # istate > 0 means success, istate < 0 means failure
779
+ supports_run_relax = None
780
+ supports_step = None
781
+ supports_solout = False
782
+ integrator_classes = []
783
+ scalar = float
784
+
785
+ def acquire_new_handle(self):
786
+ # Some of the integrators have internal state (ancient
787
+ # Fortran...), and so only one instance can use them at a time.
788
+ # We keep track of this, and fail when concurrent usage is tried.
789
+ self.__class__.active_global_handle += 1
790
+ self.handle = self.__class__.active_global_handle
791
+
792
+ def check_handle(self):
793
+ if self.handle is not self.__class__.active_global_handle:
794
+ raise IntegratorConcurrencyError(self.__class__.__name__)
795
+
796
+ def reset(self, n, has_jac):
797
+ """Prepare integrator for call: allocate memory, set flags, etc.
798
+ n - number of equations.
799
+ has_jac - if user has supplied function for evaluating Jacobian.
800
+ """
801
+
802
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
803
+ """Integrate from t=t0 to t=t1 using y0 as an initial condition.
804
+ Return 2-tuple (y1,t1) where y1 is the result and t=t1
805
+ defines the stoppage coordinate of the result.
806
+ """
807
+ raise NotImplementedError('all integrators must define '
808
+ 'run(f, jac, t0, t1, y0, f_params, jac_params)')
809
+
810
+ def step(self, f, jac, y0, t0, t1, f_params, jac_params):
811
+ """Make one integration step and return (y1,t1)."""
812
+ raise NotImplementedError('%s does not support step() method' %
813
+ self.__class__.__name__)
814
+
815
+ def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
816
+ """Integrate from t=t0 to t>=t1 and return (y1,t)."""
817
+ raise NotImplementedError('%s does not support run_relax() method' %
818
+ self.__class__.__name__)
819
+
820
+ # XXX: __str__ method for getting visual state of the integrator
821
+
822
+
823
+ def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
824
+ """
825
+ Wrap a banded Jacobian function with a function that pads
826
+ the Jacobian with `ml` rows of zeros.
827
+ """
828
+
829
+ def jac_wrapper(t, y):
830
+ jac = asarray(jacfunc(t, y, *jac_params))
831
+ padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
832
+ return padded_jac
833
+
834
+ return jac_wrapper
835
+
836
+
837
+ class vode(IntegratorBase):
838
+ runner = getattr(_vode, 'dvode', None)
839
+
840
+ messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
841
+ -2: 'Excess accuracy requested. (Tolerances too small.)',
842
+ -3: 'Illegal input detected. (See printed message.)',
843
+ -4: 'Repeated error test failures. (Check all input.)',
844
+ -5: 'Repeated convergence failures. (Perhaps bad'
845
+ ' Jacobian supplied or wrong choice of MF or tolerances.)',
846
+ -6: 'Error weight became zero during problem. (Solution'
847
+ ' component i vanished, and ATOL or ATOL(i) = 0.)'
848
+ }
849
+ supports_run_relax = 1
850
+ supports_step = 1
851
+ active_global_handle = 0
852
+
853
+ def __init__(self,
854
+ method='adams',
855
+ with_jacobian=False,
856
+ rtol=1e-6, atol=1e-12,
857
+ lband=None, uband=None,
858
+ order=12,
859
+ nsteps=500,
860
+ max_step=0.0, # corresponds to infinite
861
+ min_step=0.0,
862
+ first_step=0.0, # determined by solver
863
+ ):
864
+
865
+ if re.match(method, r'adams', re.I):
866
+ self.meth = 1
867
+ elif re.match(method, r'bdf', re.I):
868
+ self.meth = 2
869
+ else:
870
+ raise ValueError('Unknown integration method %s' % method)
871
+ self.with_jacobian = with_jacobian
872
+ self.rtol = rtol
873
+ self.atol = atol
874
+ self.mu = uband
875
+ self.ml = lband
876
+
877
+ self.order = order
878
+ self.nsteps = nsteps
879
+ self.max_step = max_step
880
+ self.min_step = min_step
881
+ self.first_step = first_step
882
+ self.success = 1
883
+
884
+ self.initialized = False
885
+
886
+ def _determine_mf_and_set_bands(self, has_jac):
887
+ """
888
+ Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
889
+
890
+ In the Fortran code, the legal values of `MF` are:
891
+ 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
892
+ -11, -12, -14, -15, -21, -22, -24, -25
893
+ but this Python wrapper does not use negative values.
894
+
895
+ Returns
896
+
897
+ mf = 10*self.meth + miter
898
+
899
+ self.meth is the linear multistep method:
900
+ self.meth == 1: method="adams"
901
+ self.meth == 2: method="bdf"
902
+
903
+ miter is the correction iteration method:
904
+ miter == 0: Functional iteration; no Jacobian involved.
905
+ miter == 1: Chord iteration with user-supplied full Jacobian.
906
+ miter == 2: Chord iteration with internally computed full Jacobian.
907
+ miter == 3: Chord iteration with internally computed diagonal Jacobian.
908
+ miter == 4: Chord iteration with user-supplied banded Jacobian.
909
+ miter == 5: Chord iteration with internally computed banded Jacobian.
910
+
911
+ Side effects: If either self.mu or self.ml is not None and the other is None,
912
+ then the one that is None is set to 0.
913
+ """
914
+
915
+ jac_is_banded = self.mu is not None or self.ml is not None
916
+ if jac_is_banded:
917
+ if self.mu is None:
918
+ self.mu = 0
919
+ if self.ml is None:
920
+ self.ml = 0
921
+
922
+ # has_jac is True if the user provided a Jacobian function.
923
+ if has_jac:
924
+ if jac_is_banded:
925
+ miter = 4
926
+ else:
927
+ miter = 1
928
+ else:
929
+ if jac_is_banded:
930
+ if self.ml == self.mu == 0:
931
+ miter = 3 # Chord iteration with internal diagonal Jacobian.
932
+ else:
933
+ miter = 5 # Chord iteration with internal banded Jacobian.
934
+ else:
935
+ # self.with_jacobian is set by the user in
936
+ # the call to ode.set_integrator.
937
+ if self.with_jacobian:
938
+ miter = 2 # Chord iteration with internal full Jacobian.
939
+ else:
940
+ miter = 0 # Functional iteration; no Jacobian involved.
941
+
942
+ mf = 10 * self.meth + miter
943
+ return mf
944
+
945
+ def reset(self, n, has_jac):
946
+ mf = self._determine_mf_and_set_bands(has_jac)
947
+
948
+ if mf == 10:
949
+ lrw = 20 + 16 * n
950
+ elif mf in [11, 12]:
951
+ lrw = 22 + 16 * n + 2 * n * n
952
+ elif mf == 13:
953
+ lrw = 22 + 17 * n
954
+ elif mf in [14, 15]:
955
+ lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
956
+ elif mf == 20:
957
+ lrw = 20 + 9 * n
958
+ elif mf in [21, 22]:
959
+ lrw = 22 + 9 * n + 2 * n * n
960
+ elif mf == 23:
961
+ lrw = 22 + 10 * n
962
+ elif mf in [24, 25]:
963
+ lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
964
+ else:
965
+ raise ValueError('Unexpected mf=%s' % mf)
966
+
967
+ if mf % 10 in [0, 3]:
968
+ liw = 30
969
+ else:
970
+ liw = 30 + n
971
+
972
+ rwork = zeros((lrw,), float)
973
+ rwork[4] = self.first_step
974
+ rwork[5] = self.max_step
975
+ rwork[6] = self.min_step
976
+ self.rwork = rwork
977
+
978
+ iwork = zeros((liw,), _vode_int_dtype)
979
+ if self.ml is not None:
980
+ iwork[0] = self.ml
981
+ if self.mu is not None:
982
+ iwork[1] = self.mu
983
+ iwork[4] = self.order
984
+ iwork[5] = self.nsteps
985
+ iwork[6] = 2 # mxhnil
986
+ self.iwork = iwork
987
+
988
+ self.call_args = [self.rtol, self.atol, 1, 1,
989
+ self.rwork, self.iwork, mf]
990
+ self.success = 1
991
+ self.initialized = False
992
+
993
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
994
+ if self.initialized:
995
+ self.check_handle()
996
+ else:
997
+ self.initialized = True
998
+ self.acquire_new_handle()
999
+
1000
+ if self.ml is not None and self.ml > 0:
1001
+ # Banded Jacobian. Wrap the user-provided function with one
1002
+ # that pads the Jacobian array with the extra `self.ml` rows
1003
+ # required by the f2py-generated wrapper.
1004
+ jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
1005
+
1006
+ args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
1007
+ (f_params, jac_params))
1008
+ y1, t, istate = self.runner(*args)
1009
+ self.istate = istate
1010
+ if istate < 0:
1011
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1012
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1013
+ self.messages.get(istate, unexpected_istate_msg)),
1014
+ stacklevel=2)
1015
+ self.success = 0
1016
+ else:
1017
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1018
+ self.istate = 2
1019
+ return y1, t
1020
+
1021
+ def step(self, *args):
1022
+ itask = self.call_args[2]
1023
+ self.call_args[2] = 2
1024
+ r = self.run(*args)
1025
+ self.call_args[2] = itask
1026
+ return r
1027
+
1028
+ def run_relax(self, *args):
1029
+ itask = self.call_args[2]
1030
+ self.call_args[2] = 3
1031
+ r = self.run(*args)
1032
+ self.call_args[2] = itask
1033
+ return r
1034
+
1035
+
1036
+ if vode.runner is not None:
1037
+ IntegratorBase.integrator_classes.append(vode)
1038
+
1039
+
1040
+ class zvode(vode):
1041
+ runner = getattr(_vode, 'zvode', None)
1042
+
1043
+ supports_run_relax = 1
1044
+ supports_step = 1
1045
+ scalar = complex
1046
+ active_global_handle = 0
1047
+
1048
+ def reset(self, n, has_jac):
1049
+ mf = self._determine_mf_and_set_bands(has_jac)
1050
+
1051
+ if mf in (10,):
1052
+ lzw = 15 * n
1053
+ elif mf in (11, 12):
1054
+ lzw = 15 * n + 2 * n ** 2
1055
+ elif mf in (-11, -12):
1056
+ lzw = 15 * n + n ** 2
1057
+ elif mf in (13,):
1058
+ lzw = 16 * n
1059
+ elif mf in (14, 15):
1060
+ lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
1061
+ elif mf in (-14, -15):
1062
+ lzw = 16 * n + (2 * self.ml + self.mu) * n
1063
+ elif mf in (20,):
1064
+ lzw = 8 * n
1065
+ elif mf in (21, 22):
1066
+ lzw = 8 * n + 2 * n ** 2
1067
+ elif mf in (-21, -22):
1068
+ lzw = 8 * n + n ** 2
1069
+ elif mf in (23,):
1070
+ lzw = 9 * n
1071
+ elif mf in (24, 25):
1072
+ lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
1073
+ elif mf in (-24, -25):
1074
+ lzw = 9 * n + (2 * self.ml + self.mu) * n
1075
+
1076
+ lrw = 20 + n
1077
+
1078
+ if mf % 10 in (0, 3):
1079
+ liw = 30
1080
+ else:
1081
+ liw = 30 + n
1082
+
1083
+ zwork = zeros((lzw,), complex)
1084
+ self.zwork = zwork
1085
+
1086
+ rwork = zeros((lrw,), float)
1087
+ rwork[4] = self.first_step
1088
+ rwork[5] = self.max_step
1089
+ rwork[6] = self.min_step
1090
+ self.rwork = rwork
1091
+
1092
+ iwork = zeros((liw,), _vode_int_dtype)
1093
+ if self.ml is not None:
1094
+ iwork[0] = self.ml
1095
+ if self.mu is not None:
1096
+ iwork[1] = self.mu
1097
+ iwork[4] = self.order
1098
+ iwork[5] = self.nsteps
1099
+ iwork[6] = 2 # mxhnil
1100
+ self.iwork = iwork
1101
+
1102
+ self.call_args = [self.rtol, self.atol, 1, 1,
1103
+ self.zwork, self.rwork, self.iwork, mf]
1104
+ self.success = 1
1105
+ self.initialized = False
1106
+
1107
+
1108
+ if zvode.runner is not None:
1109
+ IntegratorBase.integrator_classes.append(zvode)
1110
+
1111
+
1112
+ class dopri5(IntegratorBase):
1113
+ runner = getattr(_dop, 'dopri5', None)
1114
+ name = 'dopri5'
1115
+ supports_solout = True
1116
+
1117
+ messages = {1: 'computation successful',
1118
+ 2: 'computation successful (interrupted by solout)',
1119
+ -1: 'input is not consistent',
1120
+ -2: 'larger nsteps is needed',
1121
+ -3: 'step size becomes too small',
1122
+ -4: 'problem is probably stiff (interrupted)',
1123
+ }
1124
+
1125
+ def __init__(self,
1126
+ rtol=1e-6, atol=1e-12,
1127
+ nsteps=500,
1128
+ max_step=0.0,
1129
+ first_step=0.0, # determined by solver
1130
+ safety=0.9,
1131
+ ifactor=10.0,
1132
+ dfactor=0.2,
1133
+ beta=0.0,
1134
+ method=None,
1135
+ verbosity=-1, # no messages if negative
1136
+ ):
1137
+ self.rtol = rtol
1138
+ self.atol = atol
1139
+ self.nsteps = nsteps
1140
+ self.max_step = max_step
1141
+ self.first_step = first_step
1142
+ self.safety = safety
1143
+ self.ifactor = ifactor
1144
+ self.dfactor = dfactor
1145
+ self.beta = beta
1146
+ self.verbosity = verbosity
1147
+ self.success = 1
1148
+ self.set_solout(None)
1149
+
1150
+ def set_solout(self, solout, complex=False):
1151
+ self.solout = solout
1152
+ self.solout_cmplx = complex
1153
+ if solout is None:
1154
+ self.iout = 0
1155
+ else:
1156
+ self.iout = 1
1157
+
1158
+ def reset(self, n, has_jac):
1159
+ work = zeros((8 * n + 21,), float)
1160
+ work[1] = self.safety
1161
+ work[2] = self.dfactor
1162
+ work[3] = self.ifactor
1163
+ work[4] = self.beta
1164
+ work[5] = self.max_step
1165
+ work[6] = self.first_step
1166
+ self.work = work
1167
+ iwork = zeros((21,), _dop_int_dtype)
1168
+ iwork[0] = self.nsteps
1169
+ iwork[2] = self.verbosity
1170
+ self.iwork = iwork
1171
+ self.call_args = [self.rtol, self.atol, self._solout,
1172
+ self.iout, self.work, self.iwork]
1173
+ self.success = 1
1174
+
1175
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1176
+ x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
1177
+ tuple(self.call_args) + (f_params,)))
1178
+ self.istate = istate
1179
+ if istate < 0:
1180
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1181
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1182
+ self.messages.get(istate, unexpected_istate_msg)),
1183
+ stacklevel=2)
1184
+ self.success = 0
1185
+ return y, x
1186
+
1187
+ def _solout(self, nr, xold, x, y, nd, icomp, con):
1188
+ if self.solout is not None:
1189
+ if self.solout_cmplx:
1190
+ y = y[::2] + 1j * y[1::2]
1191
+ return self.solout(x, y)
1192
+ else:
1193
+ return 1
1194
+
1195
+
1196
+ if dopri5.runner is not None:
1197
+ IntegratorBase.integrator_classes.append(dopri5)
1198
+
1199
+
1200
+ class dop853(dopri5):
1201
+ runner = getattr(_dop, 'dop853', None)
1202
+ name = 'dop853'
1203
+
1204
+ def __init__(self,
1205
+ rtol=1e-6, atol=1e-12,
1206
+ nsteps=500,
1207
+ max_step=0.0,
1208
+ first_step=0.0, # determined by solver
1209
+ safety=0.9,
1210
+ ifactor=6.0,
1211
+ dfactor=0.3,
1212
+ beta=0.0,
1213
+ method=None,
1214
+ verbosity=-1, # no messages if negative
1215
+ ):
1216
+ super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
1217
+ ifactor, dfactor, beta, method, verbosity)
1218
+
1219
+ def reset(self, n, has_jac):
1220
+ work = zeros((11 * n + 21,), float)
1221
+ work[1] = self.safety
1222
+ work[2] = self.dfactor
1223
+ work[3] = self.ifactor
1224
+ work[4] = self.beta
1225
+ work[5] = self.max_step
1226
+ work[6] = self.first_step
1227
+ self.work = work
1228
+ iwork = zeros((21,), _dop_int_dtype)
1229
+ iwork[0] = self.nsteps
1230
+ iwork[2] = self.verbosity
1231
+ self.iwork = iwork
1232
+ self.call_args = [self.rtol, self.atol, self._solout,
1233
+ self.iout, self.work, self.iwork]
1234
+ self.success = 1
1235
+
1236
+
1237
+ if dop853.runner is not None:
1238
+ IntegratorBase.integrator_classes.append(dop853)
1239
+
1240
+
1241
+ class lsoda(IntegratorBase):
1242
+ runner = getattr(_lsoda, 'lsoda', None)
1243
+ active_global_handle = 0
1244
+
1245
+ messages = {
1246
+ 2: "Integration successful.",
1247
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
1248
+ -2: "Excess accuracy requested (tolerances too small).",
1249
+ -3: "Illegal input detected (internal error).",
1250
+ -4: "Repeated error test failures (internal error).",
1251
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
1252
+ -6: "Error weight became zero during problem.",
1253
+ -7: "Internal workspace insufficient to finish (internal error)."
1254
+ }
1255
+
1256
+ def __init__(self,
1257
+ with_jacobian=False,
1258
+ rtol=1e-6, atol=1e-12,
1259
+ lband=None, uband=None,
1260
+ nsteps=500,
1261
+ max_step=0.0, # corresponds to infinite
1262
+ min_step=0.0,
1263
+ first_step=0.0, # determined by solver
1264
+ ixpr=0,
1265
+ max_hnil=0,
1266
+ max_order_ns=12,
1267
+ max_order_s=5,
1268
+ method=None
1269
+ ):
1270
+
1271
+ self.with_jacobian = with_jacobian
1272
+ self.rtol = rtol
1273
+ self.atol = atol
1274
+ self.mu = uband
1275
+ self.ml = lband
1276
+
1277
+ self.max_order_ns = max_order_ns
1278
+ self.max_order_s = max_order_s
1279
+ self.nsteps = nsteps
1280
+ self.max_step = max_step
1281
+ self.min_step = min_step
1282
+ self.first_step = first_step
1283
+ self.ixpr = ixpr
1284
+ self.max_hnil = max_hnil
1285
+ self.success = 1
1286
+
1287
+ self.initialized = False
1288
+
1289
+ def reset(self, n, has_jac):
1290
+ # Calculate parameters for Fortran subroutine dvode.
1291
+ if has_jac:
1292
+ if self.mu is None and self.ml is None:
1293
+ jt = 1
1294
+ else:
1295
+ if self.mu is None:
1296
+ self.mu = 0
1297
+ if self.ml is None:
1298
+ self.ml = 0
1299
+ jt = 4
1300
+ else:
1301
+ if self.mu is None and self.ml is None:
1302
+ jt = 2
1303
+ else:
1304
+ if self.mu is None:
1305
+ self.mu = 0
1306
+ if self.ml is None:
1307
+ self.ml = 0
1308
+ jt = 5
1309
+ lrn = 20 + (self.max_order_ns + 4) * n
1310
+ if jt in [1, 2]:
1311
+ lrs = 22 + (self.max_order_s + 4) * n + n * n
1312
+ elif jt in [4, 5]:
1313
+ lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
1314
+ else:
1315
+ raise ValueError('Unexpected jt=%s' % jt)
1316
+ lrw = max(lrn, lrs)
1317
+ liw = 20 + n
1318
+ rwork = zeros((lrw,), float)
1319
+ rwork[4] = self.first_step
1320
+ rwork[5] = self.max_step
1321
+ rwork[6] = self.min_step
1322
+ self.rwork = rwork
1323
+ iwork = zeros((liw,), _lsoda_int_dtype)
1324
+ if self.ml is not None:
1325
+ iwork[0] = self.ml
1326
+ if self.mu is not None:
1327
+ iwork[1] = self.mu
1328
+ iwork[4] = self.ixpr
1329
+ iwork[5] = self.nsteps
1330
+ iwork[6] = self.max_hnil
1331
+ iwork[7] = self.max_order_ns
1332
+ iwork[8] = self.max_order_s
1333
+ self.iwork = iwork
1334
+ self.call_args = [self.rtol, self.atol, 1, 1,
1335
+ self.rwork, self.iwork, jt]
1336
+ self.success = 1
1337
+ self.initialized = False
1338
+
1339
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1340
+ if self.initialized:
1341
+ self.check_handle()
1342
+ else:
1343
+ self.initialized = True
1344
+ self.acquire_new_handle()
1345
+ args = [f, y0, t0, t1] + self.call_args[:-1] + \
1346
+ [jac, self.call_args[-1], f_params, 0, jac_params]
1347
+ y1, t, istate = self.runner(*args)
1348
+ self.istate = istate
1349
+ if istate < 0:
1350
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1351
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1352
+ self.messages.get(istate, unexpected_istate_msg)),
1353
+ stacklevel=2)
1354
+ self.success = 0
1355
+ else:
1356
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1357
+ self.istate = 2
1358
+ return y1, t
1359
+
1360
+ def step(self, *args):
1361
+ itask = self.call_args[2]
1362
+ self.call_args[2] = 2
1363
+ r = self.run(*args)
1364
+ self.call_args[2] = itask
1365
+ return r
1366
+
1367
+ def run_relax(self, *args):
1368
+ itask = self.call_args[2]
1369
+ self.call_args[2] = 3
1370
+ r = self.run(*args)
1371
+ self.call_args[2] = itask
1372
+ return r
1373
+
1374
+
1375
+ if lsoda.runner:
1376
+ IntegratorBase.integrator_classes.append(lsoda)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (83.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant
2
+
3
+ __all__ = ['odeint', 'ODEintWarning']
4
+
5
+ import numpy as np
6
+ from . import _odepack
7
+ from copy import copy
8
+ import warnings
9
+
10
+
11
+ class ODEintWarning(Warning):
12
+ """Warning raised during the execution of `odeint`."""
13
+ pass
14
+
15
+
16
+ _msgs = {2: "Integration successful.",
17
+ 1: "Nothing was done; the integration time was 0.",
18
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
19
+ -2: "Excess accuracy requested (tolerances too small).",
20
+ -3: "Illegal input detected (internal error).",
21
+ -4: "Repeated error test failures (internal error).",
22
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
23
+ -6: "Error weight became zero during problem.",
24
+ -7: "Internal workspace insufficient to finish (internal error).",
25
+ -8: "Run terminated (internal error)."
26
+ }
27
+
28
+
29
+ def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
30
+ ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
31
+ hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
32
+ mxords=5, printmessg=0, tfirst=False):
33
+ """
34
+ Integrate a system of ordinary differential equations.
35
+
36
+ .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
37
+ differential equation.
38
+
39
+ Solve a system of ordinary differential equations using lsoda from the
40
+ FORTRAN library odepack.
41
+
42
+ Solves the initial value problem for stiff or non-stiff systems
43
+ of first order ode-s::
44
+
45
+ dy/dt = func(y, t, ...) [or func(t, y, ...)]
46
+
47
+ where y can be a vector.
48
+
49
+ .. note:: By default, the required order of the first two arguments of
50
+ `func` are in the opposite order of the arguments in the system
51
+ definition function used by the `scipy.integrate.ode` class and
52
+ the function `scipy.integrate.solve_ivp`. To use a function with
53
+ the signature ``func(t, y, ...)``, the argument `tfirst` must be
54
+ set to ``True``.
55
+
56
+ Parameters
57
+ ----------
58
+ func : callable(y, t, ...) or callable(t, y, ...)
59
+ Computes the derivative of y at t.
60
+ If the signature is ``callable(t, y, ...)``, then the argument
61
+ `tfirst` must be set ``True``.
62
+ y0 : array
63
+ Initial condition on y (can be a vector).
64
+ t : array
65
+ A sequence of time points for which to solve for y. The initial
66
+ value point should be the first element of this sequence.
67
+ This sequence must be monotonically increasing or monotonically
68
+ decreasing; repeated values are allowed.
69
+ args : tuple, optional
70
+ Extra arguments to pass to function.
71
+ Dfun : callable(y, t, ...) or callable(t, y, ...)
72
+ Gradient (Jacobian) of `func`.
73
+ If the signature is ``callable(t, y, ...)``, then the argument
74
+ `tfirst` must be set ``True``.
75
+ col_deriv : bool, optional
76
+ True if `Dfun` defines derivatives down columns (faster),
77
+ otherwise `Dfun` should define derivatives across rows.
78
+ full_output : bool, optional
79
+ True if to return a dictionary of optional outputs as the second output
80
+ printmessg : bool, optional
81
+ Whether to print the convergence message
82
+ tfirst : bool, optional
83
+ If True, the first two arguments of `func` (and `Dfun`, if given)
84
+ must ``t, y`` instead of the default ``y, t``.
85
+
86
+ .. versionadded:: 1.1.0
87
+
88
+ Returns
89
+ -------
90
+ y : array, shape (len(t), len(y0))
91
+ Array containing the value of y for each desired time in t,
92
+ with the initial value `y0` in the first row.
93
+ infodict : dict, only returned if full_output == True
94
+ Dictionary containing additional output information
95
+
96
+ ======= ============================================================
97
+ key meaning
98
+ ======= ============================================================
99
+ 'hu' vector of step sizes successfully used for each time step
100
+ 'tcur' vector with the value of t reached for each time step
101
+ (will always be at least as large as the input times)
102
+ 'tolsf' vector of tolerance scale factors, greater than 1.0,
103
+ computed when a request for too much accuracy was detected
104
+ 'tsw' value of t at the time of the last method switch
105
+ (given for each time step)
106
+ 'nst' cumulative number of time steps
107
+ 'nfe' cumulative number of function evaluations for each time step
108
+ 'nje' cumulative number of jacobian evaluations for each time step
109
+ 'nqu' a vector of method orders for each successful step
110
+ 'imxer' index of the component of largest magnitude in the
111
+ weighted local error vector (e / ewt) on an error return, -1
112
+ otherwise
113
+ 'lenrw' the length of the double work array required
114
+ 'leniw' the length of integer work array required
115
+ 'mused' a vector of method indicators for each successful time step:
116
+ 1: adams (nonstiff), 2: bdf (stiff)
117
+ ======= ============================================================
118
+
119
+ Other Parameters
120
+ ----------------
121
+ ml, mu : int, optional
122
+ If either of these are not None or non-negative, then the
123
+ Jacobian is assumed to be banded. These give the number of
124
+ lower and upper non-zero diagonals in this banded matrix.
125
+ For the banded case, `Dfun` should return a matrix whose
126
+ rows contain the non-zero bands (starting with the lowest diagonal).
127
+ Thus, the return matrix `jac` from `Dfun` should have shape
128
+ ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
129
+ The data in `jac` must be stored such that ``jac[i - j + mu, j]``
130
+ holds the derivative of the ``i``\\ th equation with respect to the
131
+ ``j``\\ th state variable. If `col_deriv` is True, the transpose of
132
+ this `jac` must be returned.
133
+ rtol, atol : float, optional
134
+ The input parameters `rtol` and `atol` determine the error
135
+ control performed by the solver. The solver will control the
136
+ vector, e, of estimated local errors in y, according to an
137
+ inequality of the form ``max-norm of (e / ewt) <= 1``,
138
+ where ewt is a vector of positive error weights computed as
139
+ ``ewt = rtol * abs(y) + atol``.
140
+ rtol and atol can be either vectors the same length as y or scalars.
141
+ Defaults to 1.49012e-8.
142
+ tcrit : ndarray, optional
143
+ Vector of critical points (e.g., singularities) where integration
144
+ care should be taken.
145
+ h0 : float, (0: solver-determined), optional
146
+ The step size to be attempted on the first step.
147
+ hmax : float, (0: solver-determined), optional
148
+ The maximum absolute step size allowed.
149
+ hmin : float, (0: solver-determined), optional
150
+ The minimum absolute step size allowed.
151
+ ixpr : bool, optional
152
+ Whether to generate extra printing at method switches.
153
+ mxstep : int, (0: solver-determined), optional
154
+ Maximum number of (internally defined) steps allowed for each
155
+ integration point in t.
156
+ mxhnil : int, (0: solver-determined), optional
157
+ Maximum number of messages printed.
158
+ mxordn : int, (0: solver-determined), optional
159
+ Maximum order to be allowed for the non-stiff (Adams) method.
160
+ mxords : int, (0: solver-determined), optional
161
+ Maximum order to be allowed for the stiff (BDF) method.
162
+
163
+ See Also
164
+ --------
165
+ solve_ivp : solve an initial value problem for a system of ODEs
166
+ ode : a more object-oriented integrator based on VODE
167
+ quad : for finding the area under a curve
168
+
169
+ Examples
170
+ --------
171
+ The second order differential equation for the angle `theta` of a
172
+ pendulum acted on by gravity with friction can be written::
173
+
174
+ theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
175
+
176
+ where `b` and `c` are positive constants, and a prime (') denotes a
177
+ derivative. To solve this equation with `odeint`, we must first convert
178
+ it to a system of first order equations. By defining the angular
179
+ velocity ``omega(t) = theta'(t)``, we obtain the system::
180
+
181
+ theta'(t) = omega(t)
182
+ omega'(t) = -b*omega(t) - c*sin(theta(t))
183
+
184
+ Let `y` be the vector [`theta`, `omega`]. We implement this system
185
+ in Python as:
186
+
187
+ >>> import numpy as np
188
+ >>> def pend(y, t, b, c):
189
+ ... theta, omega = y
190
+ ... dydt = [omega, -b*omega - c*np.sin(theta)]
191
+ ... return dydt
192
+ ...
193
+
194
+ We assume the constants are `b` = 0.25 and `c` = 5.0:
195
+
196
+ >>> b = 0.25
197
+ >>> c = 5.0
198
+
199
+ For initial conditions, we assume the pendulum is nearly vertical
200
+ with `theta(0)` = `pi` - 0.1, and is initially at rest, so
201
+ `omega(0)` = 0. Then the vector of initial conditions is
202
+
203
+ >>> y0 = [np.pi - 0.1, 0.0]
204
+
205
+ We will generate a solution at 101 evenly spaced samples in the interval
206
+ 0 <= `t` <= 10. So our array of times is:
207
+
208
+ >>> t = np.linspace(0, 10, 101)
209
+
210
+ Call `odeint` to generate the solution. To pass the parameters
211
+ `b` and `c` to `pend`, we give them to `odeint` using the `args`
212
+ argument.
213
+
214
+ >>> from scipy.integrate import odeint
215
+ >>> sol = odeint(pend, y0, t, args=(b, c))
216
+
217
+ The solution is an array with shape (101, 2). The first column
218
+ is `theta(t)`, and the second is `omega(t)`. The following code
219
+ plots both components.
220
+
221
+ >>> import matplotlib.pyplot as plt
222
+ >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
223
+ >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
224
+ >>> plt.legend(loc='best')
225
+ >>> plt.xlabel('t')
226
+ >>> plt.grid()
227
+ >>> plt.show()
228
+ """
229
+
230
+ if ml is None:
231
+ ml = -1 # changed to zero inside function call
232
+ if mu is None:
233
+ mu = -1 # changed to zero inside function call
234
+
235
+ dt = np.diff(t)
236
+ if not ((dt >= 0).all() or (dt <= 0).all()):
237
+ raise ValueError("The values in t must be monotonically increasing "
238
+ "or monotonically decreasing; repeated values are "
239
+ "allowed.")
240
+
241
+ t = copy(t)
242
+ y0 = copy(y0)
243
+ output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
244
+ full_output, rtol, atol, tcrit, h0, hmax, hmin,
245
+ ixpr, mxstep, mxhnil, mxordn, mxords,
246
+ int(bool(tfirst)))
247
+ if output[-1] < 0:
248
+ warning_msg = (f"{_msgs[output[-1]]} Run with full_output = 1 to "
249
+ f"get quantitative information.")
250
+ warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
251
+ elif printmessg:
252
+ warning_msg = _msgs[output[-1]]
253
+ warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
254
+
255
+ if full_output:
256
+ output[1]['message'] = _msgs[output[-1]]
257
+
258
+ output = output[:-1]
259
+ if len(output) == 1:
260
+ return output[0]
261
+ else:
262
+ return output
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import copy
3
+ import heapq
4
+ import collections
5
+ import functools
6
+
7
+ import numpy as np
8
+
9
+ from scipy._lib._util import MapWrapper, _FunctionWrapper
10
+
11
+
12
+ class LRUDict(collections.OrderedDict):
13
+ def __init__(self, max_size):
14
+ self.__max_size = max_size
15
+
16
+ def __setitem__(self, key, value):
17
+ existing_key = (key in self)
18
+ super().__setitem__(key, value)
19
+ if existing_key:
20
+ self.move_to_end(key)
21
+ elif len(self) > self.__max_size:
22
+ self.popitem(last=False)
23
+
24
+ def update(self, other):
25
+ # Not needed below
26
+ raise NotImplementedError()
27
+
28
+
29
+ class SemiInfiniteFunc:
30
+ """
31
+ Argument transform from (start, +-oo) to (0, 1)
32
+ """
33
+ def __init__(self, func, start, infty):
34
+ self._func = func
35
+ self._start = start
36
+ self._sgn = -1 if infty < 0 else 1
37
+
38
+ # Overflow threshold for the 1/t**2 factor
39
+ self._tmin = sys.float_info.min**0.5
40
+
41
+ def get_t(self, x):
42
+ z = self._sgn * (x - self._start) + 1
43
+ if z == 0:
44
+ # Can happen only if point not in range
45
+ return np.inf
46
+ return 1 / z
47
+
48
+ def __call__(self, t):
49
+ if t < self._tmin:
50
+ return 0.0
51
+ else:
52
+ x = self._start + self._sgn * (1 - t) / t
53
+ f = self._func(x)
54
+ return self._sgn * (f / t) / t
55
+
56
+
57
+ class DoubleInfiniteFunc:
58
+ """
59
+ Argument transform from (-oo, oo) to (-1, 1)
60
+ """
61
+ def __init__(self, func):
62
+ self._func = func
63
+
64
+ # Overflow threshold for the 1/t**2 factor
65
+ self._tmin = sys.float_info.min**0.5
66
+
67
+ def get_t(self, x):
68
+ s = -1 if x < 0 else 1
69
+ return s / (abs(x) + 1)
70
+
71
+ def __call__(self, t):
72
+ if abs(t) < self._tmin:
73
+ return 0.0
74
+ else:
75
+ x = (1 - abs(t)) / t
76
+ f = self._func(x)
77
+ return (f / t) / t
78
+
79
+
80
+ def _max_norm(x):
81
+ return np.amax(abs(x))
82
+
83
+
84
+ def _get_sizeof(obj):
85
+ try:
86
+ return sys.getsizeof(obj)
87
+ except TypeError:
88
+ # occurs on pypy
89
+ if hasattr(obj, '__sizeof__'):
90
+ return int(obj.__sizeof__())
91
+ return 64
92
+
93
+
94
+ class _Bunch:
95
+ def __init__(self, **kwargs):
96
+ self.__keys = kwargs.keys()
97
+ self.__dict__.update(**kwargs)
98
+
99
+ def __repr__(self):
100
+ return "_Bunch({})".format(", ".join(f"{k}={repr(self.__dict__[k])}"
101
+ for k in self.__keys))
102
+
103
+
104
+ def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6,
105
+ limit=10000, workers=1, points=None, quadrature=None, full_output=False,
106
+ *, args=()):
107
+ r"""Adaptive integration of a vector-valued function.
108
+
109
+ Parameters
110
+ ----------
111
+ f : callable
112
+ Vector-valued function f(x) to integrate.
113
+ a : float
114
+ Initial point.
115
+ b : float
116
+ Final point.
117
+ epsabs : float, optional
118
+ Absolute tolerance.
119
+ epsrel : float, optional
120
+ Relative tolerance.
121
+ norm : {'max', '2'}, optional
122
+ Vector norm to use for error estimation.
123
+ cache_size : int, optional
124
+ Number of bytes to use for memoization.
125
+ limit : float or int, optional
126
+ An upper bound on the number of subintervals used in the adaptive
127
+ algorithm.
128
+ workers : int or map-like callable, optional
129
+ If `workers` is an integer, part of the computation is done in
130
+ parallel subdivided to this many tasks (using
131
+ :class:`python:multiprocessing.pool.Pool`).
132
+ Supply `-1` to use all cores available to the Process.
133
+ Alternatively, supply a map-like callable, such as
134
+ :meth:`python:multiprocessing.pool.Pool.map` for evaluating the
135
+ population in parallel.
136
+ This evaluation is carried out as ``workers(func, iterable)``.
137
+ points : list, optional
138
+ List of additional breakpoints.
139
+ quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
140
+ Quadrature rule to use on subintervals.
141
+ Options: 'gk21' (Gauss-Kronrod 21-point rule),
142
+ 'gk15' (Gauss-Kronrod 15-point rule),
143
+ 'trapezoid' (composite trapezoid rule).
144
+ Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
145
+ full_output : bool, optional
146
+ Return an additional ``info`` dictionary.
147
+ args : tuple, optional
148
+ Extra arguments to pass to function, if any.
149
+
150
+ .. versionadded:: 1.8.0
151
+
152
+ Returns
153
+ -------
154
+ res : {float, array-like}
155
+ Estimate for the result
156
+ err : float
157
+ Error estimate for the result in the given norm
158
+ info : dict
159
+ Returned only when ``full_output=True``.
160
+ Info dictionary. Is an object with the attributes:
161
+
162
+ success : bool
163
+ Whether integration reached target precision.
164
+ status : int
165
+ Indicator for convergence, success (0),
166
+ failure (1), and failure due to rounding error (2).
167
+ neval : int
168
+ Number of function evaluations.
169
+ intervals : ndarray, shape (num_intervals, 2)
170
+ Start and end points of subdivision intervals.
171
+ integrals : ndarray, shape (num_intervals, ...)
172
+ Integral for each interval.
173
+ Note that at most ``cache_size`` values are recorded,
174
+ and the array may contains *nan* for missing items.
175
+ errors : ndarray, shape (num_intervals,)
176
+ Estimated integration error for each interval.
177
+
178
+ Notes
179
+ -----
180
+ The algorithm mainly follows the implementation of QUADPACK's
181
+ DQAG* algorithms, implementing global error control and adaptive
182
+ subdivision.
183
+
184
+ The algorithm here has some differences to the QUADPACK approach:
185
+
186
+ Instead of subdividing one interval at a time, the algorithm
187
+ subdivides N intervals with largest errors at once. This enables
188
+ (partial) parallelization of the integration.
189
+
190
+ The logic of subdividing "next largest" intervals first is then
191
+ not implemented, and we rely on the above extension to avoid
192
+ concentrating on "small" intervals only.
193
+
194
+ The Wynn epsilon table extrapolation is not used (QUADPACK uses it
195
+ for infinite intervals). This is because the algorithm here is
196
+ supposed to work on vector-valued functions, in an user-specified
197
+ norm, and the extension of the epsilon algorithm to this case does
198
+ not appear to be widely agreed. For max-norm, using elementwise
199
+ Wynn epsilon could be possible, but we do not do this here with
200
+ the hope that the epsilon extrapolation is mainly useful in
201
+ special cases.
202
+
203
+ References
204
+ ----------
205
+ [1] R. Piessens, E. de Doncker, QUADPACK (1983).
206
+
207
+ Examples
208
+ --------
209
+ We can compute integrations of a vector-valued function:
210
+
211
+ >>> from scipy.integrate import quad_vec
212
+ >>> import numpy as np
213
+ >>> import matplotlib.pyplot as plt
214
+ >>> alpha = np.linspace(0.0, 2.0, num=30)
215
+ >>> f = lambda x: x**alpha
216
+ >>> x0, x1 = 0, 2
217
+ >>> y, err = quad_vec(f, x0, x1)
218
+ >>> plt.plot(alpha, y)
219
+ >>> plt.xlabel(r"$\alpha$")
220
+ >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
221
+ >>> plt.show()
222
+
223
+ """
224
+ a = float(a)
225
+ b = float(b)
226
+
227
+ if args:
228
+ if not isinstance(args, tuple):
229
+ args = (args,)
230
+
231
+ # create a wrapped function to allow the use of map and Pool.map
232
+ f = _FunctionWrapper(f, args)
233
+
234
+ # Use simple transformations to deal with integrals over infinite
235
+ # intervals.
236
+ kwargs = dict(epsabs=epsabs,
237
+ epsrel=epsrel,
238
+ norm=norm,
239
+ cache_size=cache_size,
240
+ limit=limit,
241
+ workers=workers,
242
+ points=points,
243
+ quadrature='gk15' if quadrature is None else quadrature,
244
+ full_output=full_output)
245
+ if np.isfinite(a) and np.isinf(b):
246
+ f2 = SemiInfiniteFunc(f, start=a, infty=b)
247
+ if points is not None:
248
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
249
+ return quad_vec(f2, 0, 1, **kwargs)
250
+ elif np.isfinite(b) and np.isinf(a):
251
+ f2 = SemiInfiniteFunc(f, start=b, infty=a)
252
+ if points is not None:
253
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
254
+ res = quad_vec(f2, 0, 1, **kwargs)
255
+ return (-res[0],) + res[1:]
256
+ elif np.isinf(a) and np.isinf(b):
257
+ sgn = -1 if b < a else 1
258
+
259
+ # NB. explicitly split integral at t=0, which separates
260
+ # the positive and negative sides
261
+ f2 = DoubleInfiniteFunc(f)
262
+ if points is not None:
263
+ kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
264
+ else:
265
+ kwargs['points'] = (0,)
266
+
267
+ if a != b:
268
+ res = quad_vec(f2, -1, 1, **kwargs)
269
+ else:
270
+ res = quad_vec(f2, 1, 1, **kwargs)
271
+
272
+ return (res[0]*sgn,) + res[1:]
273
+ elif not (np.isfinite(a) and np.isfinite(b)):
274
+ raise ValueError(f"invalid integration bounds a={a}, b={b}")
275
+
276
+ norm_funcs = {
277
+ None: _max_norm,
278
+ 'max': _max_norm,
279
+ '2': np.linalg.norm
280
+ }
281
+ if callable(norm):
282
+ norm_func = norm
283
+ else:
284
+ norm_func = norm_funcs[norm]
285
+
286
+ parallel_count = 128
287
+ min_intervals = 2
288
+
289
+ try:
290
+ _quadrature = {None: _quadrature_gk21,
291
+ 'gk21': _quadrature_gk21,
292
+ 'gk15': _quadrature_gk15,
293
+ 'trapz': _quadrature_trapezoid, # alias for backcompat
294
+ 'trapezoid': _quadrature_trapezoid}[quadrature]
295
+ except KeyError as e:
296
+ raise ValueError(f"unknown quadrature {quadrature!r}") from e
297
+
298
+ # Initial interval set
299
+ if points is None:
300
+ initial_intervals = [(a, b)]
301
+ else:
302
+ prev = a
303
+ initial_intervals = []
304
+ for p in sorted(points):
305
+ p = float(p)
306
+ if not (a < p < b) or p == prev:
307
+ continue
308
+ initial_intervals.append((prev, p))
309
+ prev = p
310
+ initial_intervals.append((prev, b))
311
+
312
+ global_integral = None
313
+ global_error = None
314
+ rounding_error = None
315
+ interval_cache = None
316
+ intervals = []
317
+ neval = 0
318
+
319
+ for x1, x2 in initial_intervals:
320
+ ig, err, rnd = _quadrature(x1, x2, f, norm_func)
321
+ neval += _quadrature.num_eval
322
+
323
+ if global_integral is None:
324
+ if isinstance(ig, (float, complex)):
325
+ # Specialize for scalars
326
+ if norm_func in (_max_norm, np.linalg.norm):
327
+ norm_func = abs
328
+
329
+ global_integral = ig
330
+ global_error = float(err)
331
+ rounding_error = float(rnd)
332
+
333
+ cache_count = cache_size // _get_sizeof(ig)
334
+ interval_cache = LRUDict(cache_count)
335
+ else:
336
+ global_integral += ig
337
+ global_error += err
338
+ rounding_error += rnd
339
+
340
+ interval_cache[(x1, x2)] = copy.copy(ig)
341
+ intervals.append((-err, x1, x2))
342
+
343
+ heapq.heapify(intervals)
344
+
345
+ CONVERGED = 0
346
+ NOT_CONVERGED = 1
347
+ ROUNDING_ERROR = 2
348
+ NOT_A_NUMBER = 3
349
+
350
+ status_msg = {
351
+ CONVERGED: "Target precision reached.",
352
+ NOT_CONVERGED: "Target precision not reached.",
353
+ ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
354
+ NOT_A_NUMBER: "Non-finite values encountered."
355
+ }
356
+
357
+ # Process intervals
358
+ with MapWrapper(workers) as mapwrapper:
359
+ ier = NOT_CONVERGED
360
+
361
+ while intervals and len(intervals) < limit:
362
+ # Select intervals with largest errors for subdivision
363
+ tol = max(epsabs, epsrel*norm_func(global_integral))
364
+
365
+ to_process = []
366
+ err_sum = 0
367
+
368
+ for j in range(parallel_count):
369
+ if not intervals:
370
+ break
371
+
372
+ if j > 0 and err_sum > global_error - tol/8:
373
+ # avoid unnecessary parallel splitting
374
+ break
375
+
376
+ interval = heapq.heappop(intervals)
377
+
378
+ neg_old_err, a, b = interval
379
+ old_int = interval_cache.pop((a, b), None)
380
+ to_process.append(
381
+ ((-neg_old_err, a, b, old_int), f, norm_func, _quadrature)
382
+ )
383
+ err_sum += -neg_old_err
384
+
385
+ # Subdivide intervals
386
+ for parts in mapwrapper(_subdivide_interval, to_process):
387
+ dint, derr, dround_err, subint, dneval = parts
388
+ neval += dneval
389
+ global_integral += dint
390
+ global_error += derr
391
+ rounding_error += dround_err
392
+ for x in subint:
393
+ x1, x2, ig, err = x
394
+ interval_cache[(x1, x2)] = ig
395
+ heapq.heappush(intervals, (-err, x1, x2))
396
+
397
+ # Termination check
398
+ if len(intervals) >= min_intervals:
399
+ tol = max(epsabs, epsrel*norm_func(global_integral))
400
+ if global_error < tol/8:
401
+ ier = CONVERGED
402
+ break
403
+ if global_error < rounding_error:
404
+ ier = ROUNDING_ERROR
405
+ break
406
+
407
+ if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
408
+ ier = NOT_A_NUMBER
409
+ break
410
+
411
+ res = global_integral
412
+ err = global_error + rounding_error
413
+
414
+ if full_output:
415
+ res_arr = np.asarray(res)
416
+ dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
417
+ integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
418
+ for z in intervals], dtype=res_arr.dtype)
419
+ errors = np.array([-z[0] for z in intervals])
420
+ intervals = np.array([[z[1], z[2]] for z in intervals])
421
+
422
+ info = _Bunch(neval=neval,
423
+ success=(ier == CONVERGED),
424
+ status=ier,
425
+ message=status_msg[ier],
426
+ intervals=intervals,
427
+ integrals=integrals,
428
+ errors=errors)
429
+ return (res, err, info)
430
+ else:
431
+ return (res, err)
432
+
433
+
434
+ def _subdivide_interval(args):
435
+ interval, f, norm_func, _quadrature = args
436
+ old_err, a, b, old_int = interval
437
+
438
+ c = 0.5 * (a + b)
439
+
440
+ # Left-hand side
441
+ if getattr(_quadrature, 'cache_size', 0) > 0:
442
+ f = functools.lru_cache(_quadrature.cache_size)(f)
443
+
444
+ s1, err1, round1 = _quadrature(a, c, f, norm_func)
445
+ dneval = _quadrature.num_eval
446
+ s2, err2, round2 = _quadrature(c, b, f, norm_func)
447
+ dneval += _quadrature.num_eval
448
+ if old_int is None:
449
+ old_int, _, _ = _quadrature(a, b, f, norm_func)
450
+ dneval += _quadrature.num_eval
451
+
452
+ if getattr(_quadrature, 'cache_size', 0) > 0:
453
+ dneval = f.cache_info().misses
454
+
455
+ dint = s1 + s2 - old_int
456
+ derr = err1 + err2 - old_err
457
+ dround_err = round1 + round2
458
+
459
+ subintervals = ((a, c, s1, err1), (c, b, s2, err2))
460
+ return dint, derr, dround_err, subintervals, dneval
461
+
462
+
463
+ def _quadrature_trapezoid(x1, x2, f, norm_func):
464
+ """
465
+ Composite trapezoid quadrature
466
+ """
467
+ x3 = 0.5*(x1 + x2)
468
+ f1 = f(x1)
469
+ f2 = f(x2)
470
+ f3 = f(x3)
471
+
472
+ s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
473
+
474
+ round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
475
+ + 2*float(norm_func(f3))
476
+ + float(norm_func(f2))) * 2e-16
477
+
478
+ s1 = 0.5 * (x2 - x1) * (f1 + f2)
479
+ err = 1/3 * float(norm_func(s1 - s2))
480
+ return s2, err, round_err
481
+
482
+
483
+ _quadrature_trapezoid.cache_size = 3 * 3
484
+ _quadrature_trapezoid.num_eval = 3
485
+
486
+
487
+ def _quadrature_gk(a, b, f, norm_func, x, w, v):
488
+ """
489
+ Generic Gauss-Kronrod quadrature
490
+ """
491
+
492
+ fv = [0.0]*len(x)
493
+
494
+ c = 0.5 * (a + b)
495
+ h = 0.5 * (b - a)
496
+
497
+ # Gauss-Kronrod
498
+ s_k = 0.0
499
+ s_k_abs = 0.0
500
+ for i in range(len(x)):
501
+ ff = f(c + h*x[i])
502
+ fv[i] = ff
503
+
504
+ vv = v[i]
505
+
506
+ # \int f(x)
507
+ s_k += vv * ff
508
+ # \int |f(x)|
509
+ s_k_abs += vv * abs(ff)
510
+
511
+ # Gauss
512
+ s_g = 0.0
513
+ for i in range(len(w)):
514
+ s_g += w[i] * fv[2*i + 1]
515
+
516
+ # Quadrature of abs-deviation from average
517
+ s_k_dabs = 0.0
518
+ y0 = s_k / 2.0
519
+ for i in range(len(x)):
520
+ # \int |f(x) - y0|
521
+ s_k_dabs += v[i] * abs(fv[i] - y0)
522
+
523
+ # Use similar error estimation as quadpack
524
+ err = float(norm_func((s_k - s_g) * h))
525
+ dabs = float(norm_func(s_k_dabs * h))
526
+ if dabs != 0 and err != 0:
527
+ err = dabs * min(1.0, (200 * err / dabs)**1.5)
528
+
529
+ eps = sys.float_info.epsilon
530
+ round_err = float(norm_func(50 * eps * h * s_k_abs))
531
+
532
+ if round_err > sys.float_info.min:
533
+ err = max(err, round_err)
534
+
535
+ return h * s_k, err, round_err
536
+
537
+
538
+ def _quadrature_gk21(a, b, f, norm_func):
539
+ """
540
+ Gauss-Kronrod 21 quadrature with error estimate
541
+ """
542
+ # Gauss-Kronrod points
543
+ x = (0.995657163025808080735527280689003,
544
+ 0.973906528517171720077964012084452,
545
+ 0.930157491355708226001207180059508,
546
+ 0.865063366688984510732096688423493,
547
+ 0.780817726586416897063717578345042,
548
+ 0.679409568299024406234327365114874,
549
+ 0.562757134668604683339000099272694,
550
+ 0.433395394129247190799265943165784,
551
+ 0.294392862701460198131126603103866,
552
+ 0.148874338981631210884826001129720,
553
+ 0,
554
+ -0.148874338981631210884826001129720,
555
+ -0.294392862701460198131126603103866,
556
+ -0.433395394129247190799265943165784,
557
+ -0.562757134668604683339000099272694,
558
+ -0.679409568299024406234327365114874,
559
+ -0.780817726586416897063717578345042,
560
+ -0.865063366688984510732096688423493,
561
+ -0.930157491355708226001207180059508,
562
+ -0.973906528517171720077964012084452,
563
+ -0.995657163025808080735527280689003)
564
+
565
+ # 10-point weights
566
+ w = (0.066671344308688137593568809893332,
567
+ 0.149451349150580593145776339657697,
568
+ 0.219086362515982043995534934228163,
569
+ 0.269266719309996355091226921569469,
570
+ 0.295524224714752870173892994651338,
571
+ 0.295524224714752870173892994651338,
572
+ 0.269266719309996355091226921569469,
573
+ 0.219086362515982043995534934228163,
574
+ 0.149451349150580593145776339657697,
575
+ 0.066671344308688137593568809893332)
576
+
577
+ # 21-point weights
578
+ v = (0.011694638867371874278064396062192,
579
+ 0.032558162307964727478818972459390,
580
+ 0.054755896574351996031381300244580,
581
+ 0.075039674810919952767043140916190,
582
+ 0.093125454583697605535065465083366,
583
+ 0.109387158802297641899210590325805,
584
+ 0.123491976262065851077958109831074,
585
+ 0.134709217311473325928054001771707,
586
+ 0.142775938577060080797094273138717,
587
+ 0.147739104901338491374841515972068,
588
+ 0.149445554002916905664936468389821,
589
+ 0.147739104901338491374841515972068,
590
+ 0.142775938577060080797094273138717,
591
+ 0.134709217311473325928054001771707,
592
+ 0.123491976262065851077958109831074,
593
+ 0.109387158802297641899210590325805,
594
+ 0.093125454583697605535065465083366,
595
+ 0.075039674810919952767043140916190,
596
+ 0.054755896574351996031381300244580,
597
+ 0.032558162307964727478818972459390,
598
+ 0.011694638867371874278064396062192)
599
+
600
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
601
+
602
+
603
+ _quadrature_gk21.num_eval = 21
604
+
605
+
606
+ def _quadrature_gk15(a, b, f, norm_func):
607
+ """
608
+ Gauss-Kronrod 15 quadrature with error estimate
609
+ """
610
+ # Gauss-Kronrod points
611
+ x = (0.991455371120812639206854697526329,
612
+ 0.949107912342758524526189684047851,
613
+ 0.864864423359769072789712788640926,
614
+ 0.741531185599394439863864773280788,
615
+ 0.586087235467691130294144838258730,
616
+ 0.405845151377397166906606412076961,
617
+ 0.207784955007898467600689403773245,
618
+ 0.000000000000000000000000000000000,
619
+ -0.207784955007898467600689403773245,
620
+ -0.405845151377397166906606412076961,
621
+ -0.586087235467691130294144838258730,
622
+ -0.741531185599394439863864773280788,
623
+ -0.864864423359769072789712788640926,
624
+ -0.949107912342758524526189684047851,
625
+ -0.991455371120812639206854697526329)
626
+
627
+ # 7-point weights
628
+ w = (0.129484966168869693270611432679082,
629
+ 0.279705391489276667901467771423780,
630
+ 0.381830050505118944950369775488975,
631
+ 0.417959183673469387755102040816327,
632
+ 0.381830050505118944950369775488975,
633
+ 0.279705391489276667901467771423780,
634
+ 0.129484966168869693270611432679082)
635
+
636
+ # 15-point weights
637
+ v = (0.022935322010529224963732008058970,
638
+ 0.063092092629978553290700663189204,
639
+ 0.104790010322250183839876322541518,
640
+ 0.140653259715525918745189590510238,
641
+ 0.169004726639267902826583426598550,
642
+ 0.190350578064785409913256402421014,
643
+ 0.204432940075298892414161999234649,
644
+ 0.209482141084727828012999174891714,
645
+ 0.204432940075298892414161999234649,
646
+ 0.190350578064785409913256402421014,
647
+ 0.169004726639267902826583426598550,
648
+ 0.140653259715525918745189590510238,
649
+ 0.104790010322250183839876322541518,
650
+ 0.063092092629978553290700663189204,
651
+ 0.022935322010529224963732008058970)
652
+
653
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
654
+
655
+
656
+ _quadrature_gk15.num_eval = 15
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (116 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py ADDED
@@ -0,0 +1,1291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant 2001
2
+ # Author: Nathan Woods 2013 (nquad &c)
3
+ import sys
4
+ import warnings
5
+ from functools import partial
6
+
7
+ from . import _quadpack
8
+ import numpy as np
9
+
10
+ __all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"]
11
+
12
+
13
+ error = _quadpack.error
14
+
15
+ class IntegrationWarning(UserWarning):
16
+ """
17
+ Warning on issues during integration.
18
+ """
19
+ pass
20
+
21
+
22
+ def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
23
+ limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
24
+ limlst=50, complex_func=False):
25
+ """
26
+ Compute a definite integral.
27
+
28
+ Integrate func from `a` to `b` (possibly infinite interval) using a
29
+ technique from the Fortran library QUADPACK.
30
+
31
+ Parameters
32
+ ----------
33
+ func : {function, scipy.LowLevelCallable}
34
+ A Python function or method to integrate. If `func` takes many
35
+ arguments, it is integrated along the axis corresponding to the
36
+ first argument.
37
+
38
+ If the user desires improved integration performance, then `f` may
39
+ be a `scipy.LowLevelCallable` with one of the signatures::
40
+
41
+ double func(double x)
42
+ double func(double x, void *user_data)
43
+ double func(int n, double *xx)
44
+ double func(int n, double *xx, void *user_data)
45
+
46
+ The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
47
+ In the call forms with ``xx``, ``n`` is the length of the ``xx``
48
+ array which contains ``xx[0] == x`` and the rest of the items are
49
+ numbers contained in the ``args`` argument of quad.
50
+
51
+ In addition, certain ctypes call signatures are supported for
52
+ backward compatibility, but those should not be used in new code.
53
+ a : float
54
+ Lower limit of integration (use -numpy.inf for -infinity).
55
+ b : float
56
+ Upper limit of integration (use numpy.inf for +infinity).
57
+ args : tuple, optional
58
+ Extra arguments to pass to `func`.
59
+ full_output : int, optional
60
+ Non-zero to return a dictionary of integration information.
61
+ If non-zero, warning messages are also suppressed and the
62
+ message is appended to the output tuple.
63
+ complex_func : bool, optional
64
+ Indicate if the function's (`func`) return type is real
65
+ (``complex_func=False``: default) or complex (``complex_func=True``).
66
+ In both cases, the function's argument is real.
67
+ If full_output is also non-zero, the `infodict`, `message`, and
68
+ `explain` for the real and complex components are returned in
69
+ a dictionary with keys "real output" and "imag output".
70
+
71
+ Returns
72
+ -------
73
+ y : float
74
+ The integral of func from `a` to `b`.
75
+ abserr : float
76
+ An estimate of the absolute error in the result.
77
+ infodict : dict
78
+ A dictionary containing additional information.
79
+ message
80
+ A convergence message.
81
+ explain
82
+ Appended only with 'cos' or 'sin' weighting and infinite
83
+ integration limits, it contains an explanation of the codes in
84
+ infodict['ierlst']
85
+
86
+ Other Parameters
87
+ ----------------
88
+ epsabs : float or int, optional
89
+ Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
90
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
91
+ where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
92
+ numerical approximation. See `epsrel` below.
93
+ epsrel : float or int, optional
94
+ Relative error tolerance. Default is 1.49e-8.
95
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
96
+ and ``50 * (machine epsilon)``. See `epsabs` above.
97
+ limit : float or int, optional
98
+ An upper bound on the number of subintervals used in the adaptive
99
+ algorithm.
100
+ points : (sequence of floats,ints), optional
101
+ A sequence of break points in the bounded integration interval
102
+ where local difficulties of the integrand may occur (e.g.,
103
+ singularities, discontinuities). The sequence does not have
104
+ to be sorted. Note that this option cannot be used in conjunction
105
+ with ``weight``.
106
+ weight : float or int, optional
107
+ String indicating weighting function. Full explanation for this
108
+ and the remaining arguments can be found below.
109
+ wvar : optional
110
+ Variables for use with weighting functions.
111
+ wopts : optional
112
+ Optional input for reusing Chebyshev moments.
113
+ maxp1 : float or int, optional
114
+ An upper bound on the number of Chebyshev moments.
115
+ limlst : int, optional
116
+ Upper bound on the number of cycles (>=3) for use with a sinusoidal
117
+ weighting and an infinite end-point.
118
+
119
+ See Also
120
+ --------
121
+ dblquad : double integral
122
+ tplquad : triple integral
123
+ nquad : n-dimensional integrals (uses `quad` recursively)
124
+ fixed_quad : fixed-order Gaussian quadrature
125
+ quadrature : adaptive Gaussian quadrature
126
+ odeint : ODE integrator
127
+ ode : ODE integrator
128
+ simpson : integrator for sampled data
129
+ romb : integrator for sampled data
130
+ scipy.special : for coefficients and roots of orthogonal polynomials
131
+
132
+ Notes
133
+ -----
134
+ For valid results, the integral must converge; behavior for divergent
135
+ integrals is not guaranteed.
136
+
137
+ **Extra information for quad() inputs and outputs**
138
+
139
+ If full_output is non-zero, then the third output argument
140
+ (infodict) is a dictionary with entries as tabulated below. For
141
+ infinite limits, the range is transformed to (0,1) and the
142
+ optional outputs are given with respect to this transformed range.
143
+ Let M be the input argument limit and let K be infodict['last'].
144
+ The entries are:
145
+
146
+ 'neval'
147
+ The number of function evaluations.
148
+ 'last'
149
+ The number, K, of subintervals produced in the subdivision process.
150
+ 'alist'
151
+ A rank-1 array of length M, the first K elements of which are the
152
+ left end points of the subintervals in the partition of the
153
+ integration range.
154
+ 'blist'
155
+ A rank-1 array of length M, the first K elements of which are the
156
+ right end points of the subintervals.
157
+ 'rlist'
158
+ A rank-1 array of length M, the first K elements of which are the
159
+ integral approximations on the subintervals.
160
+ 'elist'
161
+ A rank-1 array of length M, the first K elements of which are the
162
+ moduli of the absolute error estimates on the subintervals.
163
+ 'iord'
164
+ A rank-1 integer array of length M, the first L elements of
165
+ which are pointers to the error estimates over the subintervals
166
+ with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
167
+ sequence ``infodict['iord']`` and let E be the sequence
168
+ ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
169
+ decreasing sequence.
170
+
171
+ If the input argument points is provided (i.e., it is not None),
172
+ the following additional outputs are placed in the output
173
+ dictionary. Assume the points sequence is of length P.
174
+
175
+ 'pts'
176
+ A rank-1 array of length P+2 containing the integration limits
177
+ and the break points of the intervals in ascending order.
178
+ This is an array giving the subintervals over which integration
179
+ will occur.
180
+ 'level'
181
+ A rank-1 integer array of length M (=limit), containing the
182
+ subdivision levels of the subintervals, i.e., if (aa,bb) is a
183
+ subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
184
+ are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
185
+ if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
186
+ 'ndin'
187
+ A rank-1 integer array of length P+2. After the first integration
188
+ over the intervals (pts[1], pts[2]), the error estimates over some
189
+ of the intervals may have been increased artificially in order to
190
+ put their subdivision forward. This array has ones in slots
191
+ corresponding to the subintervals for which this happens.
192
+
193
+ **Weighting the integrand**
194
+
195
+ The input variables, *weight* and *wvar*, are used to weight the
196
+ integrand by a select list of functions. Different integration
197
+ methods are used to compute the integral with these weighting
198
+ functions, and these do not support specifying break points. The
199
+ possible values of weight and the corresponding weighting functions are.
200
+
201
+ ========== =================================== =====================
202
+ ``weight`` Weight function used ``wvar``
203
+ ========== =================================== =====================
204
+ 'cos' cos(w*x) wvar = w
205
+ 'sin' sin(w*x) wvar = w
206
+ 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
207
+ 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
208
+ 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
209
+ 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
210
+ 'cauchy' 1/(x-c) wvar = c
211
+ ========== =================================== =====================
212
+
213
+ wvar holds the parameter w, (alpha, beta), or c depending on the weight
214
+ selected. In these expressions, a and b are the integration limits.
215
+
216
+ For the 'cos' and 'sin' weighting, additional inputs and outputs are
217
+ available.
218
+
219
+ For finite integration limits, the integration is performed using a
220
+ Clenshaw-Curtis method which uses Chebyshev moments. For repeated
221
+ calculations, these moments are saved in the output dictionary:
222
+
223
+ 'momcom'
224
+ The maximum level of Chebyshev moments that have been computed,
225
+ i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
226
+ computed for intervals of length ``|b-a| * 2**(-l)``,
227
+ ``l=0,1,...,M_c``.
228
+ 'nnlog'
229
+ A rank-1 integer array of length M(=limit), containing the
230
+ subdivision levels of the subintervals, i.e., an element of this
231
+ array is equal to l if the corresponding subinterval is
232
+ ``|b-a|* 2**(-l)``.
233
+ 'chebmo'
234
+ A rank-2 array of shape (25, maxp1) containing the computed
235
+ Chebyshev moments. These can be passed on to an integration
236
+ over the same interval by passing this array as the second
237
+ element of the sequence wopts and passing infodict['momcom'] as
238
+ the first element.
239
+
240
+ If one of the integration limits is infinite, then a Fourier integral is
241
+ computed (assuming w neq 0). If full_output is 1 and a numerical error
242
+ is encountered, besides the error message attached to the output tuple,
243
+ a dictionary is also appended to the output tuple which translates the
244
+ error codes in the array ``info['ierlst']`` to English messages. The
245
+ output information dictionary contains the following entries instead of
246
+ 'last', 'alist', 'blist', 'rlist', and 'elist':
247
+
248
+ 'lst'
249
+ The number of subintervals needed for the integration (call it ``K_f``).
250
+ 'rslst'
251
+ A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
252
+ contain the integral contribution over the interval
253
+ ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
254
+ and ``k=1,2,...,K_f``.
255
+ 'erlst'
256
+ A rank-1 array of length ``M_f`` containing the error estimate
257
+ corresponding to the interval in the same position in
258
+ ``infodict['rslist']``.
259
+ 'ierlst'
260
+ A rank-1 integer array of length ``M_f`` containing an error flag
261
+ corresponding to the interval in the same position in
262
+ ``infodict['rslist']``. See the explanation dictionary (last entry
263
+ in the output tuple) for the meaning of the codes.
264
+
265
+
266
+ **Details of QUADPACK level routines**
267
+
268
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
269
+ provides details on the conditions for each routine to be called and a
270
+ short description of each routine. The routine called depends on
271
+ `weight`, `points` and the integration limits `a` and `b`.
272
+
273
+ ================ ============== ========== =====================
274
+ QUADPACK routine `weight` `points` infinite bounds
275
+ ================ ============== ========== =====================
276
+ qagse None No No
277
+ qagie None No Yes
278
+ qagpe None Yes No
279
+ qawoe 'sin', 'cos' No No
280
+ qawfe 'sin', 'cos' No either `a` or `b`
281
+ qawse 'alg*' No No
282
+ qawce 'cauchy' No No
283
+ ================ ============== ========== =====================
284
+
285
+ The following provides a short description from [1]_ for each
286
+ routine.
287
+
288
+ qagse
289
+ is an integrator based on globally adaptive interval
290
+ subdivision in connection with extrapolation, which will
291
+ eliminate the effects of integrand singularities of
292
+ several types.
293
+ qagie
294
+ handles integration over infinite intervals. The infinite range is
295
+ mapped onto a finite interval and subsequently the same strategy as
296
+ in ``QAGS`` is applied.
297
+ qagpe
298
+ serves the same purposes as QAGS, but also allows the
299
+ user to provide explicit information about the location
300
+ and type of trouble-spots i.e. the abscissae of internal
301
+ singularities, discontinuities and other difficulties of
302
+ the integrand function.
303
+ qawoe
304
+ is an integrator for the evaluation of
305
+ :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
306
+ :math:`\\int^b_a \\sin(\\omega x)f(x)dx`
307
+ over a finite interval [a,b], where :math:`\\omega` and :math:`f`
308
+ are specified by the user. The rule evaluation component is based
309
+ on the modified Clenshaw-Curtis technique
310
+
311
+ An adaptive subdivision scheme is used in connection
312
+ with an extrapolation procedure, which is a modification
313
+ of that in ``QAGS`` and allows the algorithm to deal with
314
+ singularities in :math:`f(x)`.
315
+ qawfe
316
+ calculates the Fourier transform
317
+ :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
318
+ :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
319
+ for user-provided :math:`\\omega` and :math:`f`. The procedure of
320
+ ``QAWO`` is applied on successive finite intervals, and convergence
321
+ acceleration by means of the :math:`\\varepsilon`-algorithm is applied
322
+ to the series of integral approximations.
323
+ qawse
324
+ approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
325
+ :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
326
+ :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
327
+ following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
328
+ :math:`\\log(x-a)\\log(b-x)`.
329
+
330
+ The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
331
+ function :math:`v`. A globally adaptive subdivision strategy is
332
+ applied, with modified Clenshaw-Curtis integration on those
333
+ subintervals which contain `a` or `b`.
334
+ qawce
335
+ compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
336
+ interpreted as a Cauchy principal value integral, for user specified
337
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
338
+ Clenshaw-Curtis integration is used on those intervals containing the
339
+ point :math:`x = c`.
340
+
341
+ **Integration of Complex Function of a Real Variable**
342
+
343
+ A complex valued function, :math:`f`, of a real variable can be written as
344
+ :math:`f = g + ih`. Similarly, the integral of :math:`f` can be
345
+ written as
346
+
347
+ .. math::
348
+ \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
349
+
350
+ assuming that the integrals of :math:`g` and :math:`h` exist
351
+ over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
352
+ complex-valued functions by integrating the real and imaginary components
353
+ separately.
354
+
355
+
356
+ References
357
+ ----------
358
+
359
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
360
+ Überhuber, Christoph W.; Kahaner, David (1983).
361
+ QUADPACK: A subroutine package for automatic integration.
362
+ Springer-Verlag.
363
+ ISBN 978-3-540-12553-2.
364
+
365
+ .. [2] McCullough, Thomas; Phillips, Keith (1973).
366
+ Foundations of Analysis in the Complex Plane.
367
+ Holt Rinehart Winston.
368
+ ISBN 0-03-086370-8
369
+
370
+ Examples
371
+ --------
372
+ Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
373
+
374
+ >>> from scipy import integrate
375
+ >>> import numpy as np
376
+ >>> x2 = lambda x: x**2
377
+ >>> integrate.quad(x2, 0, 4)
378
+ (21.333333333333332, 2.3684757858670003e-13)
379
+ >>> print(4**3 / 3.) # analytical result
380
+ 21.3333333333
381
+
382
+ Calculate :math:`\\int^\\infty_0 e^{-x} dx`
383
+
384
+ >>> invexp = lambda x: np.exp(-x)
385
+ >>> integrate.quad(invexp, 0, np.inf)
386
+ (1.0, 5.842605999138044e-11)
387
+
388
+ Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
389
+
390
+ >>> f = lambda x, a: a*x
391
+ >>> y, err = integrate.quad(f, 0, 1, args=(1,))
392
+ >>> y
393
+ 0.5
394
+ >>> y, err = integrate.quad(f, 0, 1, args=(3,))
395
+ >>> y
396
+ 1.5
397
+
398
+ Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
399
+ y parameter as 1::
400
+
401
+ testlib.c =>
402
+ double func(int n, double args[n]){
403
+ return args[0]*args[0] + args[1]*args[1];}
404
+ compile to library testlib.*
405
+
406
+ ::
407
+
408
+ from scipy import integrate
409
+ import ctypes
410
+ lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
411
+ lib.func.restype = ctypes.c_double
412
+ lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
413
+ integrate.quad(lib.func,0,1,(1))
414
+ #(1.3333333333333333, 1.4802973661668752e-14)
415
+ print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
416
+ # 1.3333333333333333
417
+
418
+ Be aware that pulse shapes and other sharp features as compared to the
419
+ size of the integration interval may not be integrated correctly using
420
+ this method. A simplified example of this limitation is integrating a
421
+ y-axis reflected step function with many zero values within the integrals
422
+ bounds.
423
+
424
+ >>> y = lambda x: 1 if x<=0 else 0
425
+ >>> integrate.quad(y, -1, 1)
426
+ (1.0, 1.1102230246251565e-14)
427
+ >>> integrate.quad(y, -1, 100)
428
+ (1.0000000002199108, 1.0189464580163188e-08)
429
+ >>> integrate.quad(y, -1, 10000)
430
+ (0.0, 0.0)
431
+
432
+ """
433
+ if not isinstance(args, tuple):
434
+ args = (args,)
435
+
436
+ # check the limits of integration: \int_a^b, expect a < b
437
+ flip, a, b = b < a, min(a, b), max(a, b)
438
+
439
+ if complex_func:
440
+ def imfunc(x, *args):
441
+ return func(x, *args).imag
442
+
443
+ def refunc(x, *args):
444
+ return func(x, *args).real
445
+
446
+ re_retval = quad(refunc, a, b, args, full_output, epsabs,
447
+ epsrel, limit, points, weight, wvar, wopts,
448
+ maxp1, limlst, complex_func=False)
449
+ im_retval = quad(imfunc, a, b, args, full_output, epsabs,
450
+ epsrel, limit, points, weight, wvar, wopts,
451
+ maxp1, limlst, complex_func=False)
452
+ integral = re_retval[0] + 1j*im_retval[0]
453
+ error_estimate = re_retval[1] + 1j*im_retval[1]
454
+ retval = integral, error_estimate
455
+ if full_output:
456
+ msgexp = {}
457
+ msgexp["real"] = re_retval[2:]
458
+ msgexp["imag"] = im_retval[2:]
459
+ retval = retval + (msgexp,)
460
+
461
+ return retval
462
+
463
+ if weight is None:
464
+ retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
465
+ points)
466
+ else:
467
+ if points is not None:
468
+ msg = ("Break points cannot be specified when using weighted integrand.\n"
469
+ "Continuing, ignoring specified points.")
470
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
471
+ retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
472
+ limlst, limit, maxp1, weight, wvar, wopts)
473
+
474
+ if flip:
475
+ retval = (-retval[0],) + retval[1:]
476
+
477
+ ier = retval[-1]
478
+ if ier == 0:
479
+ return retval[:-1]
480
+
481
+ msgs = {80: "A Python error occurred possibly while calling the function.",
482
+ 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n "
483
+ f"If increasing the limit yields no improvement it is advised to "
484
+ f"analyze \n the integrand in order to determine the difficulties. "
485
+ f"If the position of a \n local difficulty can be determined "
486
+ f"(singularity, discontinuity) one will \n probably gain from "
487
+ f"splitting up the interval and calling the integrator \n on the "
488
+ f"subranges. Perhaps a special-purpose integrator should be used.",
489
+ 2: "The occurrence of roundoff error is detected, which prevents \n "
490
+ "the requested tolerance from being achieved. "
491
+ "The error may be \n underestimated.",
492
+ 3: "Extremely bad integrand behavior occurs at some points of the\n "
493
+ "integration interval.",
494
+ 4: "The algorithm does not converge. Roundoff error is detected\n "
495
+ "in the extrapolation table. It is assumed that the requested "
496
+ "tolerance\n cannot be achieved, and that the returned result "
497
+ "(if full_output = 1) is \n the best which can be obtained.",
498
+ 5: "The integral is probably divergent, or slowly convergent.",
499
+ 6: "The input is invalid.",
500
+ 7: "Abnormal termination of the routine. The estimates for result\n "
501
+ "and error are less reliable. It is assumed that the requested "
502
+ "accuracy\n has not been achieved.",
503
+ 'unknown': "Unknown error."}
504
+
505
+ if weight in ['cos','sin'] and (b == np.inf or a == -np.inf):
506
+ msgs[1] = (
507
+ "The maximum number of cycles allowed has been achieved., e.e.\n of "
508
+ "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n "
509
+ "*pi/abs(omega), for k = 1, 2, ..., lst. "
510
+ "One can allow more cycles by increasing the value of limlst. "
511
+ "Look at info['ierlst'] with full_output=1."
512
+ )
513
+ msgs[4] = (
514
+ "The extrapolation table constructed for convergence acceleration\n of "
515
+ "the series formed by the integral contributions over the cycles, \n does "
516
+ "not converge to within the requested accuracy. "
517
+ "Look at \n info['ierlst'] with full_output=1."
518
+ )
519
+ msgs[7] = (
520
+ "Bad integrand behavior occurs within one or more of the cycles.\n "
521
+ "Location and type of the difficulty involved can be determined from \n "
522
+ "the vector info['ierlist'] obtained with full_output=1."
523
+ )
524
+ explain = {1: "The maximum number of subdivisions (= limit) has been \n "
525
+ "achieved on this cycle.",
526
+ 2: "The occurrence of roundoff error is detected and prevents\n "
527
+ "the tolerance imposed on this cycle from being achieved.",
528
+ 3: "Extremely bad integrand behavior occurs at some points of\n "
529
+ "this cycle.",
530
+ 4: "The integral over this cycle does not converge (to within the "
531
+ "required accuracy) due to roundoff in the extrapolation "
532
+ "procedure invoked on this cycle. It is assumed that the result "
533
+ "on this interval is the best which can be obtained.",
534
+ 5: "The integral over this cycle is probably divergent or "
535
+ "slowly convergent."}
536
+
537
+ try:
538
+ msg = msgs[ier]
539
+ except KeyError:
540
+ msg = msgs['unknown']
541
+
542
+ if ier in [1,2,3,4,5,7]:
543
+ if full_output:
544
+ if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf):
545
+ return retval[:-1] + (msg, explain)
546
+ else:
547
+ return retval[:-1] + (msg,)
548
+ else:
549
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
550
+ return retval[:-1]
551
+
552
+ elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
553
+ if epsabs <= 0: # Small error tolerance - applies to all methods
554
+ if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
555
+ msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
556
+ " 5e-29 and 50*(machine epsilon).")
557
+ elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf):
558
+ msg = ("Sine or cosine weighted integrals with infinite domain"
559
+ " must have 'epsabs'>0.")
560
+
561
+ elif weight is None:
562
+ if points is None: # QAGSE/QAGIE
563
+ msg = ("Invalid 'limit' argument. There must be"
564
+ " at least one subinterval")
565
+ else: # QAGPE
566
+ if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
567
+ msg = ("All break points in 'points' must lie within the"
568
+ " integration limits.")
569
+ elif len(points) >= limit:
570
+ msg = (f"Number of break points ({len(points):d}) "
571
+ f"must be less than subinterval limit ({limit:d})")
572
+
573
+ else:
574
+ if maxp1 < 1:
575
+ msg = "Chebyshev moment limit maxp1 must be >=1."
576
+
577
+ elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE
578
+ msg = "Cycle limit limlst must be >=3."
579
+
580
+ elif weight.startswith('alg'): # QAWSE
581
+ if min(wvar) < -1:
582
+ msg = "wvar parameters (alpha, beta) must both be >= -1."
583
+ if b < a:
584
+ msg = "Integration limits a, b must satistfy a<b."
585
+
586
+ elif weight == 'cauchy' and wvar in (a, b):
587
+ msg = ("Parameter 'wvar' must not equal"
588
+ " integration limits 'a' or 'b'.")
589
+
590
+ raise ValueError(msg)
591
+
592
+
593
+ def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
594
+ infbounds = 0
595
+ if (b != np.inf and a != -np.inf):
596
+ pass # standard integration
597
+ elif (b == np.inf and a != -np.inf):
598
+ infbounds = 1
599
+ bound = a
600
+ elif (b == np.inf and a == -np.inf):
601
+ infbounds = 2
602
+ bound = 0 # ignored
603
+ elif (b != np.inf and a == -np.inf):
604
+ infbounds = -1
605
+ bound = b
606
+ else:
607
+ raise RuntimeError("Infinity comparisons don't work for you.")
608
+
609
+ if points is None:
610
+ if infbounds == 0:
611
+ return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
612
+ else:
613
+ return _quadpack._qagie(func, bound, infbounds, args, full_output,
614
+ epsabs, epsrel, limit)
615
+ else:
616
+ if infbounds != 0:
617
+ raise ValueError("Infinity inputs cannot be used with break points.")
618
+ else:
619
+ #Duplicates force function evaluation at singular points
620
+ the_points = np.unique(points)
621
+ the_points = the_points[a < the_points]
622
+ the_points = the_points[the_points < b]
623
+ the_points = np.concatenate((the_points, (0., 0.)))
624
+ return _quadpack._qagpe(func, a, b, the_points, args, full_output,
625
+ epsabs, epsrel, limit)
626
+
627
+
628
+ def _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
629
+ limlst, limit, maxp1,weight, wvar, wopts):
630
+ if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
631
+ raise ValueError("%s not a recognized weighting function." % weight)
632
+
633
+ strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
634
+
635
+ if weight in ['cos','sin']:
636
+ integr = strdict[weight]
637
+ if (b != np.inf and a != -np.inf): # finite limits
638
+ if wopts is None: # no precomputed Chebyshev moments
639
+ return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
640
+ epsabs, epsrel, limit, maxp1,1)
641
+ else: # precomputed Chebyshev moments
642
+ momcom = wopts[0]
643
+ chebcom = wopts[1]
644
+ return _quadpack._qawoe(func, a, b, wvar, integr, args,
645
+ full_output,epsabs, epsrel, limit, maxp1, 2,
646
+ momcom, chebcom)
647
+
648
+ elif (b == np.inf and a != -np.inf):
649
+ return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
650
+ epsabs, limlst, limit, maxp1)
651
+ elif (b != np.inf and a == -np.inf): # remap function and interval
652
+ if weight == 'cos':
653
+ def thefunc(x,*myargs):
654
+ y = -x
655
+ func = myargs[0]
656
+ myargs = (y,) + myargs[1:]
657
+ return func(*myargs)
658
+ else:
659
+ def thefunc(x,*myargs):
660
+ y = -x
661
+ func = myargs[0]
662
+ myargs = (y,) + myargs[1:]
663
+ return -func(*myargs)
664
+ args = (func,) + args
665
+ return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
666
+ full_output, epsabs, limlst, limit, maxp1)
667
+ else:
668
+ raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
669
+ else:
670
+ if a in [-np.inf, np.inf] or b in [-np.inf, np.inf]:
671
+ message = "Cannot integrate with this weight over an infinite interval."
672
+ raise ValueError(message)
673
+
674
+ if weight.startswith('alg'):
675
+ integr = strdict[weight]
676
+ return _quadpack._qawse(func, a, b, wvar, integr, args,
677
+ full_output, epsabs, epsrel, limit)
678
+ else: # weight == 'cauchy'
679
+ return _quadpack._qawce(func, a, b, wvar, args, full_output,
680
+ epsabs, epsrel, limit)
681
+
682
+
683
+ def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
684
+ """
685
+ Compute a double integral.
686
+
687
+ Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
688
+ and ``y = gfun(x)..hfun(x)``.
689
+
690
+ Parameters
691
+ ----------
692
+ func : callable
693
+ A Python function or method of at least two variables: y must be the
694
+ first argument and x the second argument.
695
+ a, b : float
696
+ The limits of integration in x: `a` < `b`
697
+ gfun : callable or float
698
+ The lower boundary curve in y which is a function taking a single
699
+ floating point argument (x) and returning a floating point result
700
+ or a float indicating a constant boundary curve.
701
+ hfun : callable or float
702
+ The upper boundary curve in y (same requirements as `gfun`).
703
+ args : sequence, optional
704
+ Extra arguments to pass to `func`.
705
+ epsabs : float, optional
706
+ Absolute tolerance passed directly to the inner 1-D quadrature
707
+ integration. Default is 1.49e-8. ``dblquad`` tries to obtain
708
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
709
+ where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
710
+ to ``hfun(x)``, and ``result`` is the numerical approximation.
711
+ See `epsrel` below.
712
+ epsrel : float, optional
713
+ Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
714
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
715
+ and ``50 * (machine epsilon)``. See `epsabs` above.
716
+
717
+ Returns
718
+ -------
719
+ y : float
720
+ The resultant integral.
721
+ abserr : float
722
+ An estimate of the error.
723
+
724
+ See Also
725
+ --------
726
+ quad : single integral
727
+ tplquad : triple integral
728
+ nquad : N-dimensional integrals
729
+ fixed_quad : fixed-order Gaussian quadrature
730
+ quadrature : adaptive Gaussian quadrature
731
+ odeint : ODE integrator
732
+ ode : ODE integrator
733
+ simpson : integrator for sampled data
734
+ romb : integrator for sampled data
735
+ scipy.special : for coefficients and roots of orthogonal polynomials
736
+
737
+
738
+ Notes
739
+ -----
740
+ For valid results, the integral must converge; behavior for divergent
741
+ integrals is not guaranteed.
742
+
743
+ **Details of QUADPACK level routines**
744
+
745
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
746
+ provides details on the conditions for each routine to be called and a
747
+ short description of each routine. For each level of integration, ``qagse``
748
+ is used for finite limits or ``qagie`` is used if either limit (or both!)
749
+ are infinite. The following provides a short description from [1]_ for each
750
+ routine.
751
+
752
+ qagse
753
+ is an integrator based on globally adaptive interval
754
+ subdivision in connection with extrapolation, which will
755
+ eliminate the effects of integrand singularities of
756
+ several types.
757
+ qagie
758
+ handles integration over infinite intervals. The infinite range is
759
+ mapped onto a finite interval and subsequently the same strategy as
760
+ in ``QAGS`` is applied.
761
+
762
+ References
763
+ ----------
764
+
765
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
766
+ Überhuber, Christoph W.; Kahaner, David (1983).
767
+ QUADPACK: A subroutine package for automatic integration.
768
+ Springer-Verlag.
769
+ ISBN 978-3-540-12553-2.
770
+
771
+ Examples
772
+ --------
773
+ Compute the double integral of ``x * y**2`` over the box
774
+ ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
775
+ That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
776
+
777
+ >>> import numpy as np
778
+ >>> from scipy import integrate
779
+ >>> f = lambda y, x: x*y**2
780
+ >>> integrate.dblquad(f, 0, 2, 0, 1)
781
+ (0.6666666666666667, 7.401486830834377e-15)
782
+
783
+ Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
784
+ \\,dy \\,dx`.
785
+
786
+ >>> f = lambda y, x: 1
787
+ >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
788
+ (0.41421356237309503, 1.1083280054755938e-14)
789
+
790
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
791
+ for :math:`a=1, 3`.
792
+
793
+ >>> f = lambda y, x, a: a*x*y
794
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
795
+ (0.33333333333333337, 5.551115123125783e-15)
796
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
797
+ (0.9999999999999999, 1.6653345369377348e-14)
798
+
799
+ Compute the two-dimensional Gaussian Integral, which is the integral of the
800
+ Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
801
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
802
+ :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
803
+
804
+ >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
805
+ >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
806
+ (3.141592653589777, 2.5173086737433208e-08)
807
+
808
+ """
809
+
810
+ def temp_ranges(*args):
811
+ return [gfun(args[0]) if callable(gfun) else gfun,
812
+ hfun(args[0]) if callable(hfun) else hfun]
813
+
814
+ return nquad(func, [temp_ranges, [a, b]], args=args,
815
+ opts={"epsabs": epsabs, "epsrel": epsrel})
816
+
817
+
818
+ def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
819
+ epsrel=1.49e-8):
820
+ """
821
+ Compute a triple (definite) integral.
822
+
823
+ Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
824
+ ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
825
+
826
+ Parameters
827
+ ----------
828
+ func : function
829
+ A Python function or method of at least three variables in the
830
+ order (z, y, x).
831
+ a, b : float
832
+ The limits of integration in x: `a` < `b`
833
+ gfun : function or float
834
+ The lower boundary curve in y which is a function taking a single
835
+ floating point argument (x) and returning a floating point result
836
+ or a float indicating a constant boundary curve.
837
+ hfun : function or float
838
+ The upper boundary curve in y (same requirements as `gfun`).
839
+ qfun : function or float
840
+ The lower boundary surface in z. It must be a function that takes
841
+ two floats in the order (x, y) and returns a float or a float
842
+ indicating a constant boundary surface.
843
+ rfun : function or float
844
+ The upper boundary surface in z. (Same requirements as `qfun`.)
845
+ args : tuple, optional
846
+ Extra arguments to pass to `func`.
847
+ epsabs : float, optional
848
+ Absolute tolerance passed directly to the innermost 1-D quadrature
849
+ integration. Default is 1.49e-8.
850
+ epsrel : float, optional
851
+ Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
852
+
853
+ Returns
854
+ -------
855
+ y : float
856
+ The resultant integral.
857
+ abserr : float
858
+ An estimate of the error.
859
+
860
+ See Also
861
+ --------
862
+ quad : Adaptive quadrature using QUADPACK
863
+ quadrature : Adaptive Gaussian quadrature
864
+ fixed_quad : Fixed-order Gaussian quadrature
865
+ dblquad : Double integrals
866
+ nquad : N-dimensional integrals
867
+ romb : Integrators for sampled data
868
+ simpson : Integrators for sampled data
869
+ ode : ODE integrators
870
+ odeint : ODE integrators
871
+ scipy.special : For coefficients and roots of orthogonal polynomials
872
+
873
+ Notes
874
+ -----
875
+ For valid results, the integral must converge; behavior for divergent
876
+ integrals is not guaranteed.
877
+
878
+ **Details of QUADPACK level routines**
879
+
880
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
881
+ provides details on the conditions for each routine to be called and a
882
+ short description of each routine. For each level of integration, ``qagse``
883
+ is used for finite limits or ``qagie`` is used, if either limit (or both!)
884
+ are infinite. The following provides a short description from [1]_ for each
885
+ routine.
886
+
887
+ qagse
888
+ is an integrator based on globally adaptive interval
889
+ subdivision in connection with extrapolation, which will
890
+ eliminate the effects of integrand singularities of
891
+ several types.
892
+ qagie
893
+ handles integration over infinite intervals. The infinite range is
894
+ mapped onto a finite interval and subsequently the same strategy as
895
+ in ``QAGS`` is applied.
896
+
897
+ References
898
+ ----------
899
+
900
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
901
+ Überhuber, Christoph W.; Kahaner, David (1983).
902
+ QUADPACK: A subroutine package for automatic integration.
903
+ Springer-Verlag.
904
+ ISBN 978-3-540-12553-2.
905
+
906
+ Examples
907
+ --------
908
+ Compute the triple integral of ``x * y * z``, over ``x`` ranging
909
+ from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
910
+ That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
911
+ \\,dz \\,dy \\,dx`.
912
+
913
+ >>> import numpy as np
914
+ >>> from scipy import integrate
915
+ >>> f = lambda z, y, x: x*y*z
916
+ >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
917
+ (1.8749999999999998, 3.3246447942574074e-14)
918
+
919
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
920
+ \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
921
+ Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
922
+ takes arguments in the order (z, y, x).
923
+
924
+ >>> f = lambda z, y, x: x*y*z
925
+ >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
926
+ (0.05416666666666668, 2.1774196738157757e-14)
927
+
928
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
929
+ a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
930
+
931
+ >>> f = lambda z, y, x, a: a*x*y*z
932
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
933
+ (0.125, 5.527033708952211e-15)
934
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
935
+ (0.375, 1.6581101126856635e-14)
936
+
937
+ Compute the three-dimensional Gaussian Integral, which is the integral of
938
+ the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
939
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
940
+ :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
941
+ \\,dy\\,dx`.
942
+
943
+ >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
944
+ >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
945
+ (5.568327996830833, 4.4619078828029765e-08)
946
+
947
+ """
948
+ # f(z, y, x)
949
+ # qfun/rfun(x, y)
950
+ # gfun/hfun(x)
951
+ # nquad will hand (y, x, t0, ...) to ranges0
952
+ # nquad will hand (x, t0, ...) to ranges1
953
+ # Only qfun / rfun is different API...
954
+
955
+ def ranges0(*args):
956
+ return [qfun(args[1], args[0]) if callable(qfun) else qfun,
957
+ rfun(args[1], args[0]) if callable(rfun) else rfun]
958
+
959
+ def ranges1(*args):
960
+ return [gfun(args[0]) if callable(gfun) else gfun,
961
+ hfun(args[0]) if callable(hfun) else hfun]
962
+
963
+ ranges = [ranges0, ranges1, [a, b]]
964
+ return nquad(func, ranges, args=args,
965
+ opts={"epsabs": epsabs, "epsrel": epsrel})
966
+
967
+
968
+ def nquad(func, ranges, args=None, opts=None, full_output=False):
969
+ r"""
970
+ Integration over multiple variables.
971
+
972
+ Wraps `quad` to enable integration over multiple variables.
973
+ Various options allow improved integration of discontinuous functions, as
974
+ well as the use of weighted integration, and generally finer control of the
975
+ integration process.
976
+
977
+ Parameters
978
+ ----------
979
+ func : {callable, scipy.LowLevelCallable}
980
+ The function to be integrated. Has arguments of ``x0, ... xn``,
981
+ ``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
982
+ which must be floats. Where ``t0, ... tm`` are extra arguments
983
+ passed in args.
984
+ Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
985
+ Integration is carried out in order. That is, integration over ``x0``
986
+ is the innermost integral, and ``xn`` is the outermost.
987
+
988
+ If the user desires improved integration performance, then `f` may
989
+ be a `scipy.LowLevelCallable` with one of the signatures::
990
+
991
+ double func(int n, double *xx)
992
+ double func(int n, double *xx, void *user_data)
993
+
994
+ where ``n`` is the number of variables and args. The ``xx`` array
995
+ contains the coordinates and extra arguments. ``user_data`` is the data
996
+ contained in the `scipy.LowLevelCallable`.
997
+ ranges : iterable object
998
+ Each element of ranges may be either a sequence of 2 numbers, or else
999
+ a callable that returns such a sequence. ``ranges[0]`` corresponds to
1000
+ integration over x0, and so on. If an element of ranges is a callable,
1001
+ then it will be called with all of the integration arguments available,
1002
+ as well as any parametric arguments. e.g., if
1003
+ ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
1004
+ either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
1005
+ args : iterable object, optional
1006
+ Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``,
1007
+ and ``opts``.
1008
+ opts : iterable object or dict, optional
1009
+ Options to be passed to `quad`. May be empty, a dict, or
1010
+ a sequence of dicts or functions that return a dict. If empty, the
1011
+ default options from scipy.integrate.quad are used. If a dict, the same
1012
+ options are used for all levels of integraion. If a sequence, then each
1013
+ element of the sequence corresponds to a particular integration. e.g.,
1014
+ ``opts[0]`` corresponds to integration over ``x0``, and so on. If a
1015
+ callable, the signature must be the same as for ``ranges``. The
1016
+ available options together with their default values are:
1017
+
1018
+ - epsabs = 1.49e-08
1019
+ - epsrel = 1.49e-08
1020
+ - limit = 50
1021
+ - points = None
1022
+ - weight = None
1023
+ - wvar = None
1024
+ - wopts = None
1025
+
1026
+ For more information on these options, see `quad`.
1027
+
1028
+ full_output : bool, optional
1029
+ Partial implementation of ``full_output`` from scipy.integrate.quad.
1030
+ The number of integrand function evaluations ``neval`` can be obtained
1031
+ by setting ``full_output=True`` when calling nquad.
1032
+
1033
+ Returns
1034
+ -------
1035
+ result : float
1036
+ The result of the integration.
1037
+ abserr : float
1038
+ The maximum of the estimates of the absolute error in the various
1039
+ integration results.
1040
+ out_dict : dict, optional
1041
+ A dict containing additional information on the integration.
1042
+
1043
+ See Also
1044
+ --------
1045
+ quad : 1-D numerical integration
1046
+ dblquad, tplquad : double and triple integrals
1047
+ fixed_quad : fixed-order Gaussian quadrature
1048
+ quadrature : adaptive Gaussian quadrature
1049
+
1050
+ Notes
1051
+ -----
1052
+ For valid results, the integral must converge; behavior for divergent
1053
+ integrals is not guaranteed.
1054
+
1055
+ **Details of QUADPACK level routines**
1056
+
1057
+ `nquad` calls routines from the FORTRAN library QUADPACK. This section
1058
+ provides details on the conditions for each routine to be called and a
1059
+ short description of each routine. The routine called depends on
1060
+ `weight`, `points` and the integration limits `a` and `b`.
1061
+
1062
+ ================ ============== ========== =====================
1063
+ QUADPACK routine `weight` `points` infinite bounds
1064
+ ================ ============== ========== =====================
1065
+ qagse None No No
1066
+ qagie None No Yes
1067
+ qagpe None Yes No
1068
+ qawoe 'sin', 'cos' No No
1069
+ qawfe 'sin', 'cos' No either `a` or `b`
1070
+ qawse 'alg*' No No
1071
+ qawce 'cauchy' No No
1072
+ ================ ============== ========== =====================
1073
+
1074
+ The following provides a short description from [1]_ for each
1075
+ routine.
1076
+
1077
+ qagse
1078
+ is an integrator based on globally adaptive interval
1079
+ subdivision in connection with extrapolation, which will
1080
+ eliminate the effects of integrand singularities of
1081
+ several types.
1082
+ qagie
1083
+ handles integration over infinite intervals. The infinite range is
1084
+ mapped onto a finite interval and subsequently the same strategy as
1085
+ in ``QAGS`` is applied.
1086
+ qagpe
1087
+ serves the same purposes as QAGS, but also allows the
1088
+ user to provide explicit information about the location
1089
+ and type of trouble-spots i.e. the abscissae of internal
1090
+ singularities, discontinuities and other difficulties of
1091
+ the integrand function.
1092
+ qawoe
1093
+ is an integrator for the evaluation of
1094
+ :math:`\int^b_a \cos(\omega x)f(x)dx` or
1095
+ :math:`\int^b_a \sin(\omega x)f(x)dx`
1096
+ over a finite interval [a,b], where :math:`\omega` and :math:`f`
1097
+ are specified by the user. The rule evaluation component is based
1098
+ on the modified Clenshaw-Curtis technique
1099
+
1100
+ An adaptive subdivision scheme is used in connection
1101
+ with an extrapolation procedure, which is a modification
1102
+ of that in ``QAGS`` and allows the algorithm to deal with
1103
+ singularities in :math:`f(x)`.
1104
+ qawfe
1105
+ calculates the Fourier transform
1106
+ :math:`\int^\infty_a \cos(\omega x)f(x)dx` or
1107
+ :math:`\int^\infty_a \sin(\omega x)f(x)dx`
1108
+ for user-provided :math:`\omega` and :math:`f`. The procedure of
1109
+ ``QAWO`` is applied on successive finite intervals, and convergence
1110
+ acceleration by means of the :math:`\varepsilon`-algorithm is applied
1111
+ to the series of integral approximations.
1112
+ qawse
1113
+ approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
1114
+ :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
1115
+ :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
1116
+ following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
1117
+ :math:`\log(x-a)\log(b-x)`.
1118
+
1119
+ The user specifies :math:`\alpha`, :math:`\beta` and the type of the
1120
+ function :math:`v`. A globally adaptive subdivision strategy is
1121
+ applied, with modified Clenshaw-Curtis integration on those
1122
+ subintervals which contain `a` or `b`.
1123
+ qawce
1124
+ compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
1125
+ interpreted as a Cauchy principal value integral, for user specified
1126
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
1127
+ Clenshaw-Curtis integration is used on those intervals containing the
1128
+ point :math:`x = c`.
1129
+
1130
+ References
1131
+ ----------
1132
+
1133
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
1134
+ Überhuber, Christoph W.; Kahaner, David (1983).
1135
+ QUADPACK: A subroutine package for automatic integration.
1136
+ Springer-Verlag.
1137
+ ISBN 978-3-540-12553-2.
1138
+
1139
+ Examples
1140
+ --------
1141
+ Calculate
1142
+
1143
+ .. math::
1144
+
1145
+ \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
1146
+ f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
1147
+
1148
+ where
1149
+
1150
+ .. math::
1151
+
1152
+ f(x_0, x_1, x_2, x_3) = \begin{cases}
1153
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
1154
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
1155
+ \end{cases} .
1156
+
1157
+ >>> import numpy as np
1158
+ >>> from scipy import integrate
1159
+ >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
1160
+ ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
1161
+ >>> def opts0(*args, **kwargs):
1162
+ ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
1163
+ >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
1164
+ ... opts=[opts0,{},{},{}], full_output=True)
1165
+ (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
1166
+
1167
+ Calculate
1168
+
1169
+ .. math::
1170
+
1171
+ \int^{t_0+t_1+1}_{t_0+t_1-1}
1172
+ \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
1173
+ \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
1174
+ f(x_0,x_1, x_2,t_0,t_1)
1175
+ \,dx_0 \,dx_1 \,dx_2,
1176
+
1177
+ where
1178
+
1179
+ .. math::
1180
+
1181
+ f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
1182
+ x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
1183
+ x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
1184
+ \end{cases}
1185
+
1186
+ and :math:`(t_0, t_1) = (0, 1)` .
1187
+
1188
+ >>> def func2(x0, x1, x2, t0, t1):
1189
+ ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
1190
+ >>> def lim0(x1, x2, t0, t1):
1191
+ ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
1192
+ >>> def lim1(x2, t0, t1):
1193
+ ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
1194
+ >>> def lim2(t0, t1):
1195
+ ... return [t0 + t1 - 1, t0 + t1 + 1]
1196
+ >>> def opts0(x1, x2, t0, t1):
1197
+ ... return {'points' : [t0 - t1*x1]}
1198
+ >>> def opts1(x2, t0, t1):
1199
+ ... return {}
1200
+ >>> def opts2(t0, t1):
1201
+ ... return {}
1202
+ >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
1203
+ ... opts=[opts0, opts1, opts2])
1204
+ (36.099919226771625, 1.8546948553373528e-07)
1205
+
1206
+ """
1207
+ depth = len(ranges)
1208
+ ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
1209
+ if args is None:
1210
+ args = ()
1211
+ if opts is None:
1212
+ opts = [dict([])] * depth
1213
+
1214
+ if isinstance(opts, dict):
1215
+ opts = [_OptFunc(opts)] * depth
1216
+ else:
1217
+ opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
1218
+ return _NQuad(func, ranges, opts, full_output).integrate(*args)
1219
+
1220
+
1221
+ class _RangeFunc:
1222
+ def __init__(self, range_):
1223
+ self.range_ = range_
1224
+
1225
+ def __call__(self, *args):
1226
+ """Return stored value.
1227
+
1228
+ *args needed because range_ can be float or func, and is called with
1229
+ variable number of parameters.
1230
+ """
1231
+ return self.range_
1232
+
1233
+
1234
+ class _OptFunc:
1235
+ def __init__(self, opt):
1236
+ self.opt = opt
1237
+
1238
+ def __call__(self, *args):
1239
+ """Return stored dict."""
1240
+ return self.opt
1241
+
1242
+
1243
+ class _NQuad:
1244
+ def __init__(self, func, ranges, opts, full_output):
1245
+ self.abserr = 0
1246
+ self.func = func
1247
+ self.ranges = ranges
1248
+ self.opts = opts
1249
+ self.maxdepth = len(ranges)
1250
+ self.full_output = full_output
1251
+ if self.full_output:
1252
+ self.out_dict = {'neval': 0}
1253
+
1254
+ def integrate(self, *args, **kwargs):
1255
+ depth = kwargs.pop('depth', 0)
1256
+ if kwargs:
1257
+ raise ValueError('unexpected kwargs')
1258
+
1259
+ # Get the integration range and options for this depth.
1260
+ ind = -(depth + 1)
1261
+ fn_range = self.ranges[ind]
1262
+ low, high = fn_range(*args)
1263
+ fn_opt = self.opts[ind]
1264
+ opt = dict(fn_opt(*args))
1265
+
1266
+ if 'points' in opt:
1267
+ opt['points'] = [x for x in opt['points'] if low <= x <= high]
1268
+ if depth + 1 == self.maxdepth:
1269
+ f = self.func
1270
+ else:
1271
+ f = partial(self.integrate, depth=depth+1)
1272
+ quad_r = quad(f, low, high, args=args, full_output=self.full_output,
1273
+ **opt)
1274
+ value = quad_r[0]
1275
+ abserr = quad_r[1]
1276
+ if self.full_output:
1277
+ infodict = quad_r[2]
1278
+ # The 'neval' parameter in full_output returns the total
1279
+ # number of times the integrand function was evaluated.
1280
+ # Therefore, only the innermost integration loop counts.
1281
+ if depth + 1 == self.maxdepth:
1282
+ self.out_dict['neval'] += infodict['neval']
1283
+ self.abserr = max(self.abserr, abserr)
1284
+ if depth > 0:
1285
+ return value
1286
+ else:
1287
+ # Final result of N-D integration with error
1288
+ if self.full_output:
1289
+ return value, self.abserr, self.out_dict
1290
+ else:
1291
+ return value, self.abserr
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_quadrature.py ADDED
@@ -0,0 +1,1830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Callable, Any, cast
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+ import math
6
+ import warnings
7
+ from collections import namedtuple
8
+
9
+ from scipy.special import roots_legendre
10
+ from scipy.special import gammaln, logsumexp
11
+ from scipy._lib._util import _rng_spawn
12
+ from scipy._lib.deprecation import (_NoValue, _deprecate_positional_args,
13
+ _deprecated)
14
+
15
+
16
+ __all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
17
+ 'trapezoid', 'trapz', 'simps', 'simpson',
18
+ 'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
19
+ 'qmc_quad', 'AccuracyWarning', 'cumulative_simpson']
20
+
21
+
22
+ def trapezoid(y, x=None, dx=1.0, axis=-1):
23
+ r"""
24
+ Integrate along the given axis using the composite trapezoidal rule.
25
+
26
+ If `x` is provided, the integration happens in sequence along its
27
+ elements - they are not sorted.
28
+
29
+ Integrate `y` (`x`) along each 1d slice on the given axis, compute
30
+ :math:`\int y(x) dx`.
31
+ When `x` is specified, this integrates along the parametric curve,
32
+ computing :math:`\int_t y(t) dt =
33
+ \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
34
+
35
+ Parameters
36
+ ----------
37
+ y : array_like
38
+ Input array to integrate.
39
+ x : array_like, optional
40
+ The sample points corresponding to the `y` values. If `x` is None,
41
+ the sample points are assumed to be evenly spaced `dx` apart. The
42
+ default is None.
43
+ dx : scalar, optional
44
+ The spacing between sample points when `x` is None. The default is 1.
45
+ axis : int, optional
46
+ The axis along which to integrate.
47
+
48
+ Returns
49
+ -------
50
+ trapezoid : float or ndarray
51
+ Definite integral of `y` = n-dimensional array as approximated along
52
+ a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
53
+ then the result is a float. If `n` is greater than 1, then the result
54
+ is an `n`-1 dimensional array.
55
+
56
+ See Also
57
+ --------
58
+ cumulative_trapezoid, simpson, romb
59
+
60
+ Notes
61
+ -----
62
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
63
+ will be taken from `y` array, by default x-axis distances between
64
+ points will be 1.0, alternatively they can be provided with `x` array
65
+ or with `dx` scalar. Return value will be equal to combined area under
66
+ the red lines.
67
+
68
+ References
69
+ ----------
70
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
71
+
72
+ .. [2] Illustration image:
73
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
74
+
75
+ Examples
76
+ --------
77
+ Use the trapezoidal rule on evenly spaced points:
78
+
79
+ >>> import numpy as np
80
+ >>> from scipy import integrate
81
+ >>> integrate.trapezoid([1, 2, 3])
82
+ 4.0
83
+
84
+ The spacing between sample points can be selected by either the
85
+ ``x`` or ``dx`` arguments:
86
+
87
+ >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8])
88
+ 8.0
89
+ >>> integrate.trapezoid([1, 2, 3], dx=2)
90
+ 8.0
91
+
92
+ Using a decreasing ``x`` corresponds to integrating in reverse:
93
+
94
+ >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4])
95
+ -8.0
96
+
97
+ More generally ``x`` is used to integrate along a parametric curve. We can
98
+ estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
99
+
100
+ >>> x = np.linspace(0, 1, num=50)
101
+ >>> y = x**2
102
+ >>> integrate.trapezoid(y, x)
103
+ 0.33340274885464394
104
+
105
+ Or estimate the area of a circle, noting we repeat the sample which closes
106
+ the curve:
107
+
108
+ >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
109
+ >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta))
110
+ 3.141571941375841
111
+
112
+ ``trapezoid`` can be applied along a specified axis to do multiple
113
+ computations in one call:
114
+
115
+ >>> a = np.arange(6).reshape(2, 3)
116
+ >>> a
117
+ array([[0, 1, 2],
118
+ [3, 4, 5]])
119
+ >>> integrate.trapezoid(a, axis=0)
120
+ array([1.5, 2.5, 3.5])
121
+ >>> integrate.trapezoid(a, axis=1)
122
+ array([2., 8.])
123
+ """
124
+ y = np.asanyarray(y)
125
+ if x is None:
126
+ d = dx
127
+ else:
128
+ x = np.asanyarray(x)
129
+ if x.ndim == 1:
130
+ d = np.diff(x)
131
+ # reshape to correct shape
132
+ shape = [1]*y.ndim
133
+ shape[axis] = d.shape[0]
134
+ d = d.reshape(shape)
135
+ else:
136
+ d = np.diff(x, axis=axis)
137
+ nd = y.ndim
138
+ slice1 = [slice(None)]*nd
139
+ slice2 = [slice(None)]*nd
140
+ slice1[axis] = slice(1, None)
141
+ slice2[axis] = slice(None, -1)
142
+ try:
143
+ ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
144
+ except ValueError:
145
+ # Operations didn't work, cast to ndarray
146
+ d = np.asarray(d)
147
+ y = np.asarray(y)
148
+ ret = np.add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
149
+ return ret
150
+
151
+
152
+ # Note: alias kept for backwards compatibility. Rename was done
153
+ # because trapz is a slur in colloquial English (see gh-12924).
154
+ def trapz(y, x=None, dx=1.0, axis=-1):
155
+ """An alias of `trapezoid`.
156
+
157
+ `trapz` is kept for backwards compatibility. For new code, prefer
158
+ `trapezoid` instead.
159
+ """
160
+ msg = ("'scipy.integrate.trapz' is deprecated in favour of "
161
+ "'scipy.integrate.trapezoid' and will be removed in SciPy 1.14.0")
162
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
163
+ return trapezoid(y, x=x, dx=dx, axis=axis)
164
+
165
+
166
+ class AccuracyWarning(Warning):
167
+ pass
168
+
169
+
170
+ if TYPE_CHECKING:
171
+ # workaround for mypy function attributes see:
172
+ # https://github.com/python/mypy/issues/2087#issuecomment-462726600
173
+ from typing import Protocol
174
+
175
+ class CacheAttributes(Protocol):
176
+ cache: dict[int, tuple[Any, Any]]
177
+ else:
178
+ CacheAttributes = Callable
179
+
180
+
181
+ def cache_decorator(func: Callable) -> CacheAttributes:
182
+ return cast(CacheAttributes, func)
183
+
184
+
185
+ @cache_decorator
186
+ def _cached_roots_legendre(n):
187
+ """
188
+ Cache roots_legendre results to speed up calls of the fixed_quad
189
+ function.
190
+ """
191
+ if n in _cached_roots_legendre.cache:
192
+ return _cached_roots_legendre.cache[n]
193
+
194
+ _cached_roots_legendre.cache[n] = roots_legendre(n)
195
+ return _cached_roots_legendre.cache[n]
196
+
197
+
198
+ _cached_roots_legendre.cache = dict()
199
+
200
+
201
+ def fixed_quad(func, a, b, args=(), n=5):
202
+ """
203
+ Compute a definite integral using fixed-order Gaussian quadrature.
204
+
205
+ Integrate `func` from `a` to `b` using Gaussian quadrature of
206
+ order `n`.
207
+
208
+ Parameters
209
+ ----------
210
+ func : callable
211
+ A Python function or method to integrate (must accept vector inputs).
212
+ If integrating a vector-valued function, the returned array must have
213
+ shape ``(..., len(x))``.
214
+ a : float
215
+ Lower limit of integration.
216
+ b : float
217
+ Upper limit of integration.
218
+ args : tuple, optional
219
+ Extra arguments to pass to function, if any.
220
+ n : int, optional
221
+ Order of quadrature integration. Default is 5.
222
+
223
+ Returns
224
+ -------
225
+ val : float
226
+ Gaussian quadrature approximation to the integral
227
+ none : None
228
+ Statically returned value of None
229
+
230
+ See Also
231
+ --------
232
+ quad : adaptive quadrature using QUADPACK
233
+ dblquad : double integrals
234
+ tplquad : triple integrals
235
+ romberg : adaptive Romberg quadrature
236
+ quadrature : adaptive Gaussian quadrature
237
+ romb : integrators for sampled data
238
+ simpson : integrators for sampled data
239
+ cumulative_trapezoid : cumulative integration for sampled data
240
+ ode : ODE integrator
241
+ odeint : ODE integrator
242
+
243
+ Examples
244
+ --------
245
+ >>> from scipy import integrate
246
+ >>> import numpy as np
247
+ >>> f = lambda x: x**8
248
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
249
+ (0.1110884353741496, None)
250
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
251
+ (0.11111111111111102, None)
252
+ >>> print(1/9.0) # analytical result
253
+ 0.1111111111111111
254
+
255
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
256
+ (0.9999999771971152, None)
257
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
258
+ (1.000000000039565, None)
259
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
260
+ 1.0
261
+
262
+ """
263
+ x, w = _cached_roots_legendre(n)
264
+ x = np.real(x)
265
+ if np.isinf(a) or np.isinf(b):
266
+ raise ValueError("Gaussian quadrature is only available for "
267
+ "finite limits.")
268
+ y = (b-a)*(x+1)/2.0 + a
269
+ return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
270
+
271
+
272
+ def vectorize1(func, args=(), vec_func=False):
273
+ """Vectorize the call to a function.
274
+
275
+ This is an internal utility function used by `romberg` and
276
+ `quadrature` to create a vectorized version of a function.
277
+
278
+ If `vec_func` is True, the function `func` is assumed to take vector
279
+ arguments.
280
+
281
+ Parameters
282
+ ----------
283
+ func : callable
284
+ User defined function.
285
+ args : tuple, optional
286
+ Extra arguments for the function.
287
+ vec_func : bool, optional
288
+ True if the function func takes vector arguments.
289
+
290
+ Returns
291
+ -------
292
+ vfunc : callable
293
+ A function that will take a vector argument and return the
294
+ result.
295
+
296
+ """
297
+ if vec_func:
298
+ def vfunc(x):
299
+ return func(x, *args)
300
+ else:
301
+ def vfunc(x):
302
+ if np.isscalar(x):
303
+ return func(x, *args)
304
+ x = np.asarray(x)
305
+ # call with first point to get output type
306
+ y0 = func(x[0], *args)
307
+ n = len(x)
308
+ dtype = getattr(y0, 'dtype', type(y0))
309
+ output = np.empty((n,), dtype=dtype)
310
+ output[0] = y0
311
+ for i in range(1, n):
312
+ output[i] = func(x[i], *args)
313
+ return output
314
+ return vfunc
315
+
316
+
317
+ @_deprecated("`scipy.integrate.quadrature` is deprecated as of SciPy 1.12.0"
318
+ "and will be removed in SciPy 1.15.0. Please use"
319
+ "`scipy.integrate.quad` instead.")
320
+ def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
321
+ vec_func=True, miniter=1):
322
+ """
323
+ Compute a definite integral using fixed-tolerance Gaussian quadrature.
324
+
325
+ .. deprecated:: 1.12.0
326
+
327
+ This function is deprecated as of SciPy 1.12.0 and will be removed
328
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
329
+
330
+ Integrate `func` from `a` to `b` using Gaussian quadrature
331
+ with absolute tolerance `tol`.
332
+
333
+ Parameters
334
+ ----------
335
+ func : function
336
+ A Python function or method to integrate.
337
+ a : float
338
+ Lower limit of integration.
339
+ b : float
340
+ Upper limit of integration.
341
+ args : tuple, optional
342
+ Extra arguments to pass to function.
343
+ tol, rtol : float, optional
344
+ Iteration stops when error between last two iterates is less than
345
+ `tol` OR the relative change is less than `rtol`.
346
+ maxiter : int, optional
347
+ Maximum order of Gaussian quadrature.
348
+ vec_func : bool, optional
349
+ True or False if func handles arrays as arguments (is
350
+ a "vector" function). Default is True.
351
+ miniter : int, optional
352
+ Minimum order of Gaussian quadrature.
353
+
354
+ Returns
355
+ -------
356
+ val : float
357
+ Gaussian quadrature approximation (within tolerance) to integral.
358
+ err : float
359
+ Difference between last two estimates of the integral.
360
+
361
+ See Also
362
+ --------
363
+ romberg : adaptive Romberg quadrature
364
+ fixed_quad : fixed-order Gaussian quadrature
365
+ quad : adaptive quadrature using QUADPACK
366
+ dblquad : double integrals
367
+ tplquad : triple integrals
368
+ romb : integrator for sampled data
369
+ simpson : integrator for sampled data
370
+ cumulative_trapezoid : cumulative integration for sampled data
371
+ ode : ODE integrator
372
+ odeint : ODE integrator
373
+
374
+ Examples
375
+ --------
376
+ >>> from scipy import integrate
377
+ >>> import numpy as np
378
+ >>> f = lambda x: x**8
379
+ >>> integrate.quadrature(f, 0.0, 1.0)
380
+ (0.11111111111111106, 4.163336342344337e-17)
381
+ >>> print(1/9.0) # analytical result
382
+ 0.1111111111111111
383
+
384
+ >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
385
+ (0.9999999999999536, 3.9611425250996035e-11)
386
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
387
+ 1.0
388
+
389
+ """
390
+ if not isinstance(args, tuple):
391
+ args = (args,)
392
+ vfunc = vectorize1(func, args, vec_func=vec_func)
393
+ val = np.inf
394
+ err = np.inf
395
+ maxiter = max(miniter+1, maxiter)
396
+ for n in range(miniter, maxiter+1):
397
+ newval = fixed_quad(vfunc, a, b, (), n)[0]
398
+ err = abs(newval-val)
399
+ val = newval
400
+
401
+ if err < tol or err < rtol*abs(val):
402
+ break
403
+ else:
404
+ warnings.warn(
405
+ "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
406
+ AccuracyWarning, stacklevel=2
407
+ )
408
+ return val, err
409
+
410
+
411
+ def tupleset(t, i, value):
412
+ l = list(t)
413
+ l[i] = value
414
+ return tuple(l)
415
+
416
+
417
+ # Note: alias kept for backwards compatibility. Rename was done
418
+ # because cumtrapz is a slur in colloquial English (see gh-12924).
419
+ def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
420
+ """An alias of `cumulative_trapezoid`.
421
+
422
+ `cumtrapz` is kept for backwards compatibility. For new code, prefer
423
+ `cumulative_trapezoid` instead.
424
+ """
425
+ msg = ("'scipy.integrate.cumtrapz' is deprecated in favour of "
426
+ "'scipy.integrate.cumulative_trapezoid' and will be removed "
427
+ "in SciPy 1.14.0")
428
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
429
+ return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
430
+
431
+
432
+ def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
433
+ """
434
+ Cumulatively integrate y(x) using the composite trapezoidal rule.
435
+
436
+ Parameters
437
+ ----------
438
+ y : array_like
439
+ Values to integrate.
440
+ x : array_like, optional
441
+ The coordinate to integrate along. If None (default), use spacing `dx`
442
+ between consecutive elements in `y`.
443
+ dx : float, optional
444
+ Spacing between elements of `y`. Only used if `x` is None.
445
+ axis : int, optional
446
+ Specifies the axis to cumulate. Default is -1 (last axis).
447
+ initial : scalar, optional
448
+ If given, insert this value at the beginning of the returned result.
449
+ 0 or None are the only values accepted. Default is None, which means
450
+ `res` has one element less than `y` along the axis of integration.
451
+
452
+ .. deprecated:: 1.12.0
453
+ The option for non-zero inputs for `initial` will be deprecated in
454
+ SciPy 1.15.0. After this time, a ValueError will be raised if
455
+ `initial` is not None or 0.
456
+
457
+ Returns
458
+ -------
459
+ res : ndarray
460
+ The result of cumulative integration of `y` along `axis`.
461
+ If `initial` is None, the shape is such that the axis of integration
462
+ has one less value than `y`. If `initial` is given, the shape is equal
463
+ to that of `y`.
464
+
465
+ See Also
466
+ --------
467
+ numpy.cumsum, numpy.cumprod
468
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
469
+ quad : adaptive quadrature using QUADPACK
470
+ romberg : adaptive Romberg quadrature
471
+ quadrature : adaptive Gaussian quadrature
472
+ fixed_quad : fixed-order Gaussian quadrature
473
+ dblquad : double integrals
474
+ tplquad : triple integrals
475
+ romb : integrators for sampled data
476
+ ode : ODE integrators
477
+ odeint : ODE integrators
478
+
479
+ Examples
480
+ --------
481
+ >>> from scipy import integrate
482
+ >>> import numpy as np
483
+ >>> import matplotlib.pyplot as plt
484
+
485
+ >>> x = np.linspace(-2, 2, num=20)
486
+ >>> y = x
487
+ >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
488
+ >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
489
+ >>> plt.show()
490
+
491
+ """
492
+ y = np.asarray(y)
493
+ if y.shape[axis] == 0:
494
+ raise ValueError("At least one point is required along `axis`.")
495
+ if x is None:
496
+ d = dx
497
+ else:
498
+ x = np.asarray(x)
499
+ if x.ndim == 1:
500
+ d = np.diff(x)
501
+ # reshape to correct shape
502
+ shape = [1] * y.ndim
503
+ shape[axis] = -1
504
+ d = d.reshape(shape)
505
+ elif len(x.shape) != len(y.shape):
506
+ raise ValueError("If given, shape of x must be 1-D or the "
507
+ "same as y.")
508
+ else:
509
+ d = np.diff(x, axis=axis)
510
+
511
+ if d.shape[axis] != y.shape[axis] - 1:
512
+ raise ValueError("If given, length of x along axis must be the "
513
+ "same as y.")
514
+
515
+ nd = len(y.shape)
516
+ slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
517
+ slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
518
+ res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
519
+
520
+ if initial is not None:
521
+ if initial != 0:
522
+ warnings.warn(
523
+ "The option for values for `initial` other than None or 0 is "
524
+ "deprecated as of SciPy 1.12.0 and will raise a value error in"
525
+ " SciPy 1.15.0.",
526
+ DeprecationWarning, stacklevel=2
527
+ )
528
+ if not np.isscalar(initial):
529
+ raise ValueError("`initial` parameter should be a scalar.")
530
+
531
+ shape = list(res.shape)
532
+ shape[axis] = 1
533
+ res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
534
+ axis=axis)
535
+
536
+ return res
537
+
538
+
539
+ def _basic_simpson(y, start, stop, x, dx, axis):
540
+ nd = len(y.shape)
541
+ if start is None:
542
+ start = 0
543
+ step = 2
544
+ slice_all = (slice(None),)*nd
545
+ slice0 = tupleset(slice_all, axis, slice(start, stop, step))
546
+ slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
547
+ slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
548
+
549
+ if x is None: # Even-spaced Simpson's rule.
550
+ result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis)
551
+ result *= dx / 3.0
552
+ else:
553
+ # Account for possibly different spacings.
554
+ # Simpson's rule changes a bit.
555
+ h = np.diff(x, axis=axis)
556
+ sl0 = tupleset(slice_all, axis, slice(start, stop, step))
557
+ sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
558
+ h0 = h[sl0].astype(float, copy=False)
559
+ h1 = h[sl1].astype(float, copy=False)
560
+ hsum = h0 + h1
561
+ hprod = h0 * h1
562
+ h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
563
+ tmp = hsum/6.0 * (y[slice0] *
564
+ (2.0 - np.true_divide(1.0, h0divh1,
565
+ out=np.zeros_like(h0divh1),
566
+ where=h0divh1 != 0)) +
567
+ y[slice1] * (hsum *
568
+ np.true_divide(hsum, hprod,
569
+ out=np.zeros_like(hsum),
570
+ where=hprod != 0)) +
571
+ y[slice2] * (2.0 - h0divh1))
572
+ result = np.sum(tmp, axis=axis)
573
+ return result
574
+
575
+
576
+ # Note: alias kept for backwards compatibility. simps was renamed to simpson
577
+ # because the former is a slur in colloquial English (see gh-12924).
578
+ def simps(y, x=None, dx=1.0, axis=-1, even=_NoValue):
579
+ """An alias of `simpson`.
580
+
581
+ `simps` is kept for backwards compatibility. For new code, prefer
582
+ `simpson` instead.
583
+ """
584
+ msg = ("'scipy.integrate.simps' is deprecated in favour of "
585
+ "'scipy.integrate.simpson' and will be removed in SciPy 1.14.0")
586
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
587
+ # we don't deprecate positional use as the wrapper is going away completely
588
+ return simpson(y, x=x, dx=dx, axis=axis, even=even)
589
+
590
+
591
+ @_deprecate_positional_args(version="1.14")
592
+ def simpson(y, *, x=None, dx=1.0, axis=-1, even=_NoValue):
593
+ """
594
+ Integrate y(x) using samples along the given axis and the composite
595
+ Simpson's rule. If x is None, spacing of dx is assumed.
596
+
597
+ If there are an even number of samples, N, then there are an odd
598
+ number of intervals (N-1), but Simpson's rule requires an even number
599
+ of intervals. The parameter 'even' controls how this is handled.
600
+
601
+ Parameters
602
+ ----------
603
+ y : array_like
604
+ Array to be integrated.
605
+ x : array_like, optional
606
+ If given, the points at which `y` is sampled.
607
+ dx : float, optional
608
+ Spacing of integration points along axis of `x`. Only used when
609
+ `x` is None. Default is 1.
610
+ axis : int, optional
611
+ Axis along which to integrate. Default is the last axis.
612
+ even : {None, 'simpson', 'avg', 'first', 'last'}, optional
613
+ 'avg' : Average two results:
614
+ 1) use the first N-2 intervals with
615
+ a trapezoidal rule on the last interval and
616
+ 2) use the last
617
+ N-2 intervals with a trapezoidal rule on the first interval.
618
+
619
+ 'first' : Use Simpson's rule for the first N-2 intervals with
620
+ a trapezoidal rule on the last interval.
621
+
622
+ 'last' : Use Simpson's rule for the last N-2 intervals with a
623
+ trapezoidal rule on the first interval.
624
+
625
+ None : equivalent to 'simpson' (default)
626
+
627
+ 'simpson' : Use Simpson's rule for the first N-2 intervals with the
628
+ addition of a 3-point parabolic segment for the last
629
+ interval using equations outlined by Cartwright [1]_.
630
+ If the axis to be integrated over only has two points then
631
+ the integration falls back to a trapezoidal integration.
632
+
633
+ .. versionadded:: 1.11.0
634
+
635
+ .. versionchanged:: 1.11.0
636
+ The newly added 'simpson' option is now the default as it is more
637
+ accurate in most situations.
638
+
639
+ .. deprecated:: 1.11.0
640
+ Parameter `even` is deprecated and will be removed in SciPy
641
+ 1.14.0. After this time the behaviour for an even number of
642
+ points will follow that of `even='simpson'`.
643
+
644
+ Returns
645
+ -------
646
+ float
647
+ The estimated integral computed with the composite Simpson's rule.
648
+
649
+ See Also
650
+ --------
651
+ quad : adaptive quadrature using QUADPACK
652
+ romberg : adaptive Romberg quadrature
653
+ quadrature : adaptive Gaussian quadrature
654
+ fixed_quad : fixed-order Gaussian quadrature
655
+ dblquad : double integrals
656
+ tplquad : triple integrals
657
+ romb : integrators for sampled data
658
+ cumulative_trapezoid : cumulative integration for sampled data
659
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
660
+ ode : ODE integrators
661
+ odeint : ODE integrators
662
+
663
+ Notes
664
+ -----
665
+ For an odd number of samples that are equally spaced the result is
666
+ exact if the function is a polynomial of order 3 or less. If
667
+ the samples are not equally spaced, then the result is exact only
668
+ if the function is a polynomial of order 2 or less.
669
+
670
+ References
671
+ ----------
672
+ .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
673
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
674
+ Sciences and Mathematics Education. 12 (2): 1-9
675
+
676
+ Examples
677
+ --------
678
+ >>> from scipy import integrate
679
+ >>> import numpy as np
680
+ >>> x = np.arange(0, 10)
681
+ >>> y = np.arange(0, 10)
682
+
683
+ >>> integrate.simpson(y, x=x)
684
+ 40.5
685
+
686
+ >>> y = np.power(x, 3)
687
+ >>> integrate.simpson(y, x=x)
688
+ 1640.5
689
+ >>> integrate.quad(lambda x: x**3, 0, 9)[0]
690
+ 1640.25
691
+
692
+ >>> integrate.simpson(y, x=x, even='first')
693
+ 1644.5
694
+
695
+ """
696
+ y = np.asarray(y)
697
+ nd = len(y.shape)
698
+ N = y.shape[axis]
699
+ last_dx = dx
700
+ first_dx = dx
701
+ returnshape = 0
702
+ if x is not None:
703
+ x = np.asarray(x)
704
+ if len(x.shape) == 1:
705
+ shapex = [1] * nd
706
+ shapex[axis] = x.shape[0]
707
+ saveshape = x.shape
708
+ returnshape = 1
709
+ x = x.reshape(tuple(shapex))
710
+ elif len(x.shape) != len(y.shape):
711
+ raise ValueError("If given, shape of x must be 1-D or the "
712
+ "same as y.")
713
+ if x.shape[axis] != N:
714
+ raise ValueError("If given, length of x along axis must be the "
715
+ "same as y.")
716
+
717
+ # even keyword parameter is deprecated
718
+ if even is not _NoValue:
719
+ warnings.warn(
720
+ "The 'even' keyword is deprecated as of SciPy 1.11.0 and will be "
721
+ "removed in SciPy 1.14.0",
722
+ DeprecationWarning, stacklevel=2
723
+ )
724
+
725
+ if N % 2 == 0:
726
+ val = 0.0
727
+ result = 0.0
728
+ slice_all = (slice(None),) * nd
729
+
730
+ # default is 'simpson'
731
+ even = even if even not in (_NoValue, None) else "simpson"
732
+
733
+ if even not in ['avg', 'last', 'first', 'simpson']:
734
+ raise ValueError(
735
+ "Parameter 'even' must be 'simpson', "
736
+ "'avg', 'last', or 'first'."
737
+ )
738
+
739
+ if N == 2:
740
+ # need at least 3 points in integration axis to form parabolic
741
+ # segment. If there are two points then any of 'avg', 'first',
742
+ # 'last' should give the same result.
743
+ slice1 = tupleset(slice_all, axis, -1)
744
+ slice2 = tupleset(slice_all, axis, -2)
745
+ if x is not None:
746
+ last_dx = x[slice1] - x[slice2]
747
+ val += 0.5 * last_dx * (y[slice1] + y[slice2])
748
+
749
+ # calculation is finished. Set `even` to None to skip other
750
+ # scenarios
751
+ even = None
752
+
753
+ if even == 'simpson':
754
+ # use Simpson's rule on first intervals
755
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
756
+
757
+ slice1 = tupleset(slice_all, axis, -1)
758
+ slice2 = tupleset(slice_all, axis, -2)
759
+ slice3 = tupleset(slice_all, axis, -3)
760
+
761
+ h = np.asarray([dx, dx], dtype=np.float64)
762
+ if x is not None:
763
+ # grab the last two spacings from the appropriate axis
764
+ hm2 = tupleset(slice_all, axis, slice(-2, -1, 1))
765
+ hm1 = tupleset(slice_all, axis, slice(-1, None, 1))
766
+
767
+ diffs = np.float64(np.diff(x, axis=axis))
768
+ h = [np.squeeze(diffs[hm2], axis=axis),
769
+ np.squeeze(diffs[hm1], axis=axis)]
770
+
771
+ # This is the correction for the last interval according to
772
+ # Cartwright.
773
+ # However, I used the equations given at
774
+ # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data
775
+ # A footnote on Wikipedia says:
776
+ # Cartwright 2017, Equation 8. The equation in Cartwright is
777
+ # calculating the first interval whereas the equations in the
778
+ # Wikipedia article are adjusting for the last integral. If the
779
+ # proper algebraic substitutions are made, the equation results in
780
+ # the values shown.
781
+ num = 2 * h[1] ** 2 + 3 * h[0] * h[1]
782
+ den = 6 * (h[1] + h[0])
783
+ alpha = np.true_divide(
784
+ num,
785
+ den,
786
+ out=np.zeros_like(den),
787
+ where=den != 0
788
+ )
789
+
790
+ num = h[1] ** 2 + 3.0 * h[0] * h[1]
791
+ den = 6 * h[0]
792
+ beta = np.true_divide(
793
+ num,
794
+ den,
795
+ out=np.zeros_like(den),
796
+ where=den != 0
797
+ )
798
+
799
+ num = 1 * h[1] ** 3
800
+ den = 6 * h[0] * (h[0] + h[1])
801
+ eta = np.true_divide(
802
+ num,
803
+ den,
804
+ out=np.zeros_like(den),
805
+ where=den != 0
806
+ )
807
+
808
+ result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3]
809
+
810
+ # The following code (down to result=result+val) can be removed
811
+ # once the 'even' keyword is removed.
812
+
813
+ # Compute using Simpson's rule on first intervals
814
+ if even in ['avg', 'first']:
815
+ slice1 = tupleset(slice_all, axis, -1)
816
+ slice2 = tupleset(slice_all, axis, -2)
817
+ if x is not None:
818
+ last_dx = x[slice1] - x[slice2]
819
+ val += 0.5*last_dx*(y[slice1]+y[slice2])
820
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
821
+ # Compute using Simpson's rule on last set of intervals
822
+ if even in ['avg', 'last']:
823
+ slice1 = tupleset(slice_all, axis, 0)
824
+ slice2 = tupleset(slice_all, axis, 1)
825
+ if x is not None:
826
+ first_dx = x[tuple(slice2)] - x[tuple(slice1)]
827
+ val += 0.5*first_dx*(y[slice2]+y[slice1])
828
+ result += _basic_simpson(y, 1, N-2, x, dx, axis)
829
+ if even == 'avg':
830
+ val /= 2.0
831
+ result /= 2.0
832
+ result = result + val
833
+ else:
834
+ result = _basic_simpson(y, 0, N-2, x, dx, axis)
835
+ if returnshape:
836
+ x = x.reshape(saveshape)
837
+ return result
838
+
839
+
840
+ def _cumulatively_sum_simpson_integrals(
841
+ y: np.ndarray,
842
+ dx: np.ndarray,
843
+ integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray],
844
+ ) -> np.ndarray:
845
+ """Calculate cumulative sum of Simpson integrals.
846
+ Takes as input the integration function to be used.
847
+ The integration_func is assumed to return the cumulative sum using
848
+ composite Simpson's rule. Assumes the axis of summation is -1.
849
+ """
850
+ sub_integrals_h1 = integration_func(y, dx)
851
+ sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1]
852
+
853
+ shape = list(sub_integrals_h1.shape)
854
+ shape[-1] += 1
855
+ sub_integrals = np.empty(shape)
856
+ sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2]
857
+ sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2]
858
+ # Integral over last subinterval can only be calculated from
859
+ # formula for h2
860
+ sub_integrals[..., -1] = sub_integrals_h2[..., -1]
861
+ res = np.cumsum(sub_integrals, axis=-1)
862
+ return res
863
+
864
+
865
+ def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
866
+ """Calculate the Simpson integrals for all h1 intervals assuming equal interval
867
+ widths. The function can also be used to calculate the integral for all
868
+ h2 intervals by reversing the inputs, `y` and `dx`.
869
+ """
870
+ d = dx[..., :-1]
871
+ f1 = y[..., :-2]
872
+ f2 = y[..., 1:-1]
873
+ f3 = y[..., 2:]
874
+
875
+ # Calculate integral over the subintervals (eqn (10) of Reference [2])
876
+ return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4)
877
+
878
+
879
+ def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
880
+ """Calculate the Simpson integrals for all h1 intervals assuming unequal interval
881
+ widths. The function can also be used to calculate the integral for all
882
+ h2 intervals by reversing the inputs, `y` and `dx`.
883
+ """
884
+ x21 = dx[..., :-1]
885
+ x32 = dx[..., 1:]
886
+ f1 = y[..., :-2]
887
+ f2 = y[..., 1:-1]
888
+ f3 = y[..., 2:]
889
+
890
+ x31 = x21 + x32
891
+ x21_x31 = x21/x31
892
+ x21_x32 = x21/x32
893
+ x21x21_x31x32 = x21_x31 * x21_x32
894
+
895
+ # Calculate integral over the subintervals (eqn (8) of Reference [2])
896
+ coeff1 = 3 - x21_x31
897
+ coeff2 = 3 + x21x21_x31x32 + x21_x31
898
+ coeff3 = -x21x21_x31x32
899
+
900
+ return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3)
901
+
902
+
903
+ def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray:
904
+ arr = np.asarray(arr)
905
+ if np.issubdtype(arr.dtype, np.integer):
906
+ arr = arr.astype(float, copy=False)
907
+ return arr
908
+
909
+
910
+ def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None):
911
+ r"""
912
+ Cumulatively integrate y(x) using the composite Simpson's 1/3 rule.
913
+ The integral of the samples at every point is calculated by assuming a
914
+ quadratic relationship between each point and the two adjacent points.
915
+
916
+ Parameters
917
+ ----------
918
+ y : array_like
919
+ Values to integrate. Requires at least one point along `axis`. If two or fewer
920
+ points are provided along `axis`, Simpson's integration is not possible and the
921
+ result is calculated with `cumulative_trapezoid`.
922
+ x : array_like, optional
923
+ The coordinate to integrate along. Must have the same shape as `y` or
924
+ must be 1D with the same length as `y` along `axis`. `x` must also be
925
+ strictly increasing along `axis`.
926
+ If `x` is None (default), integration is performed using spacing `dx`
927
+ between consecutive elements in `y`.
928
+ dx : scalar or array_like, optional
929
+ Spacing between elements of `y`. Only used if `x` is None. Can either
930
+ be a float, or an array with the same shape as `y`, but of length one along
931
+ `axis`. Default is 1.0.
932
+ axis : int, optional
933
+ Specifies the axis to integrate along. Default is -1 (last axis).
934
+ initial : scalar or array_like, optional
935
+ If given, insert this value at the beginning of the returned result,
936
+ and add it to the rest of the result. Default is None, which means no
937
+ value at ``x[0]`` is returned and `res` has one element less than `y`
938
+ along the axis of integration. Can either be a float, or an array with
939
+ the same shape as `y`, but of length one along `axis`.
940
+
941
+ Returns
942
+ -------
943
+ res : ndarray
944
+ The result of cumulative integration of `y` along `axis`.
945
+ If `initial` is None, the shape is such that the axis of integration
946
+ has one less value than `y`. If `initial` is given, the shape is equal
947
+ to that of `y`.
948
+
949
+ See Also
950
+ --------
951
+ numpy.cumsum
952
+ cumulative_trapezoid : cumulative integration using the composite
953
+ trapezoidal rule
954
+ simpson : integrator for sampled data using the Composite Simpson's Rule
955
+
956
+ Notes
957
+ -----
958
+
959
+ .. versionadded:: 1.12.0
960
+
961
+ The composite Simpson's 1/3 method can be used to approximate the definite
962
+ integral of a sampled input function :math:`y(x)` [1]_. The method assumes
963
+ a quadratic relationship over the interval containing any three consecutive
964
+ sampled points.
965
+
966
+ Consider three consecutive points:
967
+ :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`.
968
+
969
+ Assuming a quadratic relationship over the three points, the integral over
970
+ the subinterval between :math:`x_1` and :math:`x_2` is given by formula
971
+ (8) of [2]_:
972
+
973
+ .. math::
974
+ \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\
975
+ \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \
976
+ \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \
977
+ \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\
978
+ - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right]
979
+
980
+ The integral between :math:`x_2` and :math:`x_3` is given by swapping
981
+ appearances of :math:`x_1` and :math:`x_3`. The integral is estimated
982
+ separately for each subinterval and then cumulatively summed to obtain
983
+ the final result.
984
+
985
+ For samples that are equally spaced, the result is exact if the function
986
+ is a polynomial of order three or less [1]_ and the number of subintervals
987
+ is even. Otherwise, the integral is exact for polynomials of order two or
988
+ less.
989
+
990
+ References
991
+ ----------
992
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule
993
+ .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
994
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
995
+ Sciences and Mathematics Education. 12 (2): 1-9
996
+
997
+ Examples
998
+ --------
999
+ >>> from scipy import integrate
1000
+ >>> import numpy as np
1001
+ >>> import matplotlib.pyplot as plt
1002
+ >>> x = np.linspace(-2, 2, num=20)
1003
+ >>> y = x**2
1004
+ >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0)
1005
+ >>> fig, ax = plt.subplots()
1006
+ >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-')
1007
+ >>> ax.grid()
1008
+ >>> plt.show()
1009
+
1010
+ The output of `cumulative_simpson` is similar to that of iteratively
1011
+ calling `simpson` with successively higher upper limits of integration, but
1012
+ not identical.
1013
+
1014
+ >>> def cumulative_simpson_reference(y, x):
1015
+ ... return np.asarray([integrate.simpson(y[:i], x=x[:i])
1016
+ ... for i in range(2, len(y) + 1)])
1017
+ >>>
1018
+ >>> rng = np.random.default_rng(354673834679465)
1019
+ >>> x, y = rng.random(size=(2, 10))
1020
+ >>> x.sort()
1021
+ >>>
1022
+ >>> res = integrate.cumulative_simpson(y, x=x)
1023
+ >>> ref = cumulative_simpson_reference(y, x)
1024
+ >>> equal = np.abs(res - ref) < 1e-15
1025
+ >>> equal # not equal when `simpson` has even number of subintervals
1026
+ array([False, True, False, True, False, True, False, True, True])
1027
+
1028
+ This is expected: because `cumulative_simpson` has access to more
1029
+ information than `simpson`, it can typically produce more accurate
1030
+ estimates of the underlying integral over subintervals.
1031
+
1032
+ """
1033
+ y = _ensure_float_array(y)
1034
+
1035
+ # validate `axis` and standardize to work along the last axis
1036
+ original_y = y
1037
+ original_shape = y.shape
1038
+ try:
1039
+ y = np.swapaxes(y, axis, -1)
1040
+ except IndexError as e:
1041
+ message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`."
1042
+ raise ValueError(message) from e
1043
+ if y.shape[-1] < 3:
1044
+ res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None)
1045
+ res = np.swapaxes(res, axis, -1)
1046
+
1047
+ elif x is not None:
1048
+ x = _ensure_float_array(x)
1049
+ message = ("If given, shape of `x` must be the same as `y` or 1-D with "
1050
+ "the same length as `y` along `axis`.")
1051
+ if not (x.shape == original_shape
1052
+ or (x.ndim == 1 and len(x) == original_shape[axis])):
1053
+ raise ValueError(message)
1054
+
1055
+ x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1)
1056
+ dx = np.diff(x, axis=-1)
1057
+ if np.any(dx <= 0):
1058
+ raise ValueError("Input x must be strictly increasing.")
1059
+ res = _cumulatively_sum_simpson_integrals(
1060
+ y, dx, _cumulative_simpson_unequal_intervals
1061
+ )
1062
+
1063
+ else:
1064
+ dx = _ensure_float_array(dx)
1065
+ final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1)
1066
+ alt_input_dx_shape = tupleset(original_shape, axis, 1)
1067
+ message = ("If provided, `dx` must either be a scalar or have the same "
1068
+ "shape as `y` but with only 1 point along `axis`.")
1069
+ if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape):
1070
+ raise ValueError(message)
1071
+ dx = np.broadcast_to(dx, final_dx_shape)
1072
+ dx = np.swapaxes(dx, axis, -1)
1073
+ res = _cumulatively_sum_simpson_integrals(
1074
+ y, dx, _cumulative_simpson_equal_intervals
1075
+ )
1076
+
1077
+ if initial is not None:
1078
+ initial = _ensure_float_array(initial)
1079
+ alt_initial_input_shape = tupleset(original_shape, axis, 1)
1080
+ message = ("If provided, `initial` must either be a scalar or have the "
1081
+ "same shape as `y` but with only 1 point along `axis`.")
1082
+ if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape):
1083
+ raise ValueError(message)
1084
+ initial = np.broadcast_to(initial, alt_initial_input_shape)
1085
+ initial = np.swapaxes(initial, axis, -1)
1086
+
1087
+ res += initial
1088
+ res = np.concatenate((initial, res), axis=-1)
1089
+
1090
+ res = np.swapaxes(res, -1, axis)
1091
+ return res
1092
+
1093
+
1094
+ def romb(y, dx=1.0, axis=-1, show=False):
1095
+ """
1096
+ Romberg integration using samples of a function.
1097
+
1098
+ Parameters
1099
+ ----------
1100
+ y : array_like
1101
+ A vector of ``2**k + 1`` equally-spaced samples of a function.
1102
+ dx : float, optional
1103
+ The sample spacing. Default is 1.
1104
+ axis : int, optional
1105
+ The axis along which to integrate. Default is -1 (last axis).
1106
+ show : bool, optional
1107
+ When `y` is a single 1-D array, then if this argument is True
1108
+ print the table showing Richardson extrapolation from the
1109
+ samples. Default is False.
1110
+
1111
+ Returns
1112
+ -------
1113
+ romb : ndarray
1114
+ The integrated result for `axis`.
1115
+
1116
+ See Also
1117
+ --------
1118
+ quad : adaptive quadrature using QUADPACK
1119
+ romberg : adaptive Romberg quadrature
1120
+ quadrature : adaptive Gaussian quadrature
1121
+ fixed_quad : fixed-order Gaussian quadrature
1122
+ dblquad : double integrals
1123
+ tplquad : triple integrals
1124
+ simpson : integrators for sampled data
1125
+ cumulative_trapezoid : cumulative integration for sampled data
1126
+ ode : ODE integrators
1127
+ odeint : ODE integrators
1128
+
1129
+ Examples
1130
+ --------
1131
+ >>> from scipy import integrate
1132
+ >>> import numpy as np
1133
+ >>> x = np.arange(10, 14.25, 0.25)
1134
+ >>> y = np.arange(3, 12)
1135
+
1136
+ >>> integrate.romb(y)
1137
+ 56.0
1138
+
1139
+ >>> y = np.sin(np.power(x, 2.5))
1140
+ >>> integrate.romb(y)
1141
+ -0.742561336672229
1142
+
1143
+ >>> integrate.romb(y, show=True)
1144
+ Richardson Extrapolation Table for Romberg Integration
1145
+ ======================================================
1146
+ -0.81576
1147
+ 4.63862 6.45674
1148
+ -1.10581 -3.02062 -3.65245
1149
+ -2.57379 -3.06311 -3.06595 -3.05664
1150
+ -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
1151
+ ======================================================
1152
+ -0.742561336672229 # may vary
1153
+
1154
+ """
1155
+ y = np.asarray(y)
1156
+ nd = len(y.shape)
1157
+ Nsamps = y.shape[axis]
1158
+ Ninterv = Nsamps-1
1159
+ n = 1
1160
+ k = 0
1161
+ while n < Ninterv:
1162
+ n <<= 1
1163
+ k += 1
1164
+ if n != Ninterv:
1165
+ raise ValueError("Number of samples must be one plus a "
1166
+ "non-negative power of 2.")
1167
+
1168
+ R = {}
1169
+ slice_all = (slice(None),) * nd
1170
+ slice0 = tupleset(slice_all, axis, 0)
1171
+ slicem1 = tupleset(slice_all, axis, -1)
1172
+ h = Ninterv * np.asarray(dx, dtype=float)
1173
+ R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
1174
+ slice_R = slice_all
1175
+ start = stop = step = Ninterv
1176
+ for i in range(1, k+1):
1177
+ start >>= 1
1178
+ slice_R = tupleset(slice_R, axis, slice(start, stop, step))
1179
+ step >>= 1
1180
+ R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
1181
+ for j in range(1, i+1):
1182
+ prev = R[(i, j-1)]
1183
+ R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
1184
+ h /= 2.0
1185
+
1186
+ if show:
1187
+ if not np.isscalar(R[(0, 0)]):
1188
+ print("*** Printing table only supported for integrals" +
1189
+ " of a single data set.")
1190
+ else:
1191
+ try:
1192
+ precis = show[0]
1193
+ except (TypeError, IndexError):
1194
+ precis = 5
1195
+ try:
1196
+ width = show[1]
1197
+ except (TypeError, IndexError):
1198
+ width = 8
1199
+ formstr = "%%%d.%df" % (width, precis)
1200
+
1201
+ title = "Richardson Extrapolation Table for Romberg Integration"
1202
+ print(title, "=" * len(title), sep="\n", end="\n")
1203
+ for i in range(k+1):
1204
+ for j in range(i+1):
1205
+ print(formstr % R[(i, j)], end=" ")
1206
+ print()
1207
+ print("=" * len(title))
1208
+
1209
+ return R[(k, k)]
1210
+
1211
+ # Romberg quadratures for numeric integration.
1212
+ #
1213
+ # Written by Scott M. Ransom <[email protected]>
1214
+ # last revision: 14 Nov 98
1215
+ #
1216
+ # Cosmetic changes by Konrad Hinsen <[email protected]>
1217
+ # last revision: 1999-7-21
1218
+ #
1219
+ # Adapted to SciPy by Travis Oliphant <[email protected]>
1220
+ # last revision: Dec 2001
1221
+
1222
+
1223
+ def _difftrap(function, interval, numtraps):
1224
+ """
1225
+ Perform part of the trapezoidal rule to integrate a function.
1226
+ Assume that we had called difftrap with all lower powers-of-2
1227
+ starting with 1. Calling difftrap only returns the summation
1228
+ of the new ordinates. It does _not_ multiply by the width
1229
+ of the trapezoids. This must be performed by the caller.
1230
+ 'function' is the function to evaluate (must accept vector arguments).
1231
+ 'interval' is a sequence with lower and upper limits
1232
+ of integration.
1233
+ 'numtraps' is the number of trapezoids to use (must be a
1234
+ power-of-2).
1235
+ """
1236
+ if numtraps <= 0:
1237
+ raise ValueError("numtraps must be > 0 in difftrap().")
1238
+ elif numtraps == 1:
1239
+ return 0.5*(function(interval[0])+function(interval[1]))
1240
+ else:
1241
+ numtosum = numtraps/2
1242
+ h = float(interval[1]-interval[0])/numtosum
1243
+ lox = interval[0] + 0.5 * h
1244
+ points = lox + h * np.arange(numtosum)
1245
+ s = np.sum(function(points), axis=0)
1246
+ return s
1247
+
1248
+
1249
+ def _romberg_diff(b, c, k):
1250
+ """
1251
+ Compute the differences for the Romberg quadrature corrections.
1252
+ See Forman Acton's "Real Computing Made Real," p 143.
1253
+ """
1254
+ tmp = 4.0**k
1255
+ return (tmp * c - b)/(tmp - 1.0)
1256
+
1257
+
1258
+ def _printresmat(function, interval, resmat):
1259
+ # Print the Romberg result matrix.
1260
+ i = j = 0
1261
+ print('Romberg integration of', repr(function), end=' ')
1262
+ print('from', interval)
1263
+ print('')
1264
+ print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
1265
+ for i in range(len(resmat)):
1266
+ print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
1267
+ for j in range(i+1):
1268
+ print('%9f' % (resmat[i][j]), end=' ')
1269
+ print('')
1270
+ print('')
1271
+ print('The final result is', resmat[i][j], end=' ')
1272
+ print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
1273
+
1274
+
1275
+ @_deprecated("`scipy.integrate.romberg` is deprecated as of SciPy 1.12.0"
1276
+ "and will be removed in SciPy 1.15.0. Please use"
1277
+ "`scipy.integrate.quad` instead.")
1278
+ def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
1279
+ divmax=10, vec_func=False):
1280
+ """
1281
+ Romberg integration of a callable function or method.
1282
+
1283
+ .. deprecated:: 1.12.0
1284
+
1285
+ This function is deprecated as of SciPy 1.12.0 and will be removed
1286
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
1287
+
1288
+ Returns the integral of `function` (a function of one variable)
1289
+ over the interval (`a`, `b`).
1290
+
1291
+ If `show` is 1, the triangular array of the intermediate results
1292
+ will be printed. If `vec_func` is True (default is False), then
1293
+ `function` is assumed to support vector arguments.
1294
+
1295
+ Parameters
1296
+ ----------
1297
+ function : callable
1298
+ Function to be integrated.
1299
+ a : float
1300
+ Lower limit of integration.
1301
+ b : float
1302
+ Upper limit of integration.
1303
+
1304
+ Returns
1305
+ -------
1306
+ results : float
1307
+ Result of the integration.
1308
+
1309
+ Other Parameters
1310
+ ----------------
1311
+ args : tuple, optional
1312
+ Extra arguments to pass to function. Each element of `args` will
1313
+ be passed as a single argument to `func`. Default is to pass no
1314
+ extra arguments.
1315
+ tol, rtol : float, optional
1316
+ The desired absolute and relative tolerances. Defaults are 1.48e-8.
1317
+ show : bool, optional
1318
+ Whether to print the results. Default is False.
1319
+ divmax : int, optional
1320
+ Maximum order of extrapolation. Default is 10.
1321
+ vec_func : bool, optional
1322
+ Whether `func` handles arrays as arguments (i.e., whether it is a
1323
+ "vector" function). Default is False.
1324
+
1325
+ See Also
1326
+ --------
1327
+ fixed_quad : Fixed-order Gaussian quadrature.
1328
+ quad : Adaptive quadrature using QUADPACK.
1329
+ dblquad : Double integrals.
1330
+ tplquad : Triple integrals.
1331
+ romb : Integrators for sampled data.
1332
+ simpson : Integrators for sampled data.
1333
+ cumulative_trapezoid : Cumulative integration for sampled data.
1334
+ ode : ODE integrator.
1335
+ odeint : ODE integrator.
1336
+
1337
+ References
1338
+ ----------
1339
+ .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
1340
+
1341
+ Examples
1342
+ --------
1343
+ Integrate a gaussian from 0 to 1 and compare to the error function.
1344
+
1345
+ >>> from scipy import integrate
1346
+ >>> from scipy.special import erf
1347
+ >>> import numpy as np
1348
+ >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
1349
+ >>> result = integrate.romberg(gaussian, 0, 1, show=True)
1350
+ Romberg integration of <function vfunc at ...> from [0, 1]
1351
+
1352
+ ::
1353
+
1354
+ Steps StepSize Results
1355
+ 1 1.000000 0.385872
1356
+ 2 0.500000 0.412631 0.421551
1357
+ 4 0.250000 0.419184 0.421368 0.421356
1358
+ 8 0.125000 0.420810 0.421352 0.421350 0.421350
1359
+ 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
1360
+ 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
1361
+
1362
+ The final result is 0.421350396475 after 33 function evaluations.
1363
+
1364
+ >>> print("%g %g" % (2*result, erf(1)))
1365
+ 0.842701 0.842701
1366
+
1367
+ """
1368
+ if np.isinf(a) or np.isinf(b):
1369
+ raise ValueError("Romberg integration only available "
1370
+ "for finite limits.")
1371
+ vfunc = vectorize1(function, args, vec_func=vec_func)
1372
+ n = 1
1373
+ interval = [a, b]
1374
+ intrange = b - a
1375
+ ordsum = _difftrap(vfunc, interval, n)
1376
+ result = intrange * ordsum
1377
+ resmat = [[result]]
1378
+ err = np.inf
1379
+ last_row = resmat[0]
1380
+ for i in range(1, divmax+1):
1381
+ n *= 2
1382
+ ordsum += _difftrap(vfunc, interval, n)
1383
+ row = [intrange * ordsum / n]
1384
+ for k in range(i):
1385
+ row.append(_romberg_diff(last_row[k], row[k], k+1))
1386
+ result = row[i]
1387
+ lastresult = last_row[i-1]
1388
+ if show:
1389
+ resmat.append(row)
1390
+ err = abs(result - lastresult)
1391
+ if err < tol or err < rtol * abs(result):
1392
+ break
1393
+ last_row = row
1394
+ else:
1395
+ warnings.warn(
1396
+ "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
1397
+ AccuracyWarning, stacklevel=2)
1398
+
1399
+ if show:
1400
+ _printresmat(vfunc, interval, resmat)
1401
+ return result
1402
+
1403
+
1404
+ # Coefficients for Newton-Cotes quadrature
1405
+ #
1406
+ # These are the points being used
1407
+ # to construct the local interpolating polynomial
1408
+ # a are the weights for Newton-Cotes integration
1409
+ # B is the error coefficient.
1410
+ # error in these coefficients grows as N gets larger.
1411
+ # or as samples are closer and closer together
1412
+
1413
+ # You can use maxima to find these rational coefficients
1414
+ # for equally spaced data using the commands
1415
+ # a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N)
1416
+ # / ((N-i)! * i!) * (-1)^(N-i));
1417
+ # Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
1418
+ # Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
1419
+ # B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
1420
+ #
1421
+ # pre-computed for equally-spaced weights
1422
+ #
1423
+ # num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
1424
+ #
1425
+ # a = num_a*array(int_a)/den_a
1426
+ # B = num_B*1.0 / den_B
1427
+ #
1428
+ # integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
1429
+ # where k = N // 2
1430
+ #
1431
+ _builtincoeffs = {
1432
+ 1: (1,2,[1,1],-1,12),
1433
+ 2: (1,3,[1,4,1],-1,90),
1434
+ 3: (3,8,[1,3,3,1],-3,80),
1435
+ 4: (2,45,[7,32,12,32,7],-8,945),
1436
+ 5: (5,288,[19,75,50,50,75,19],-275,12096),
1437
+ 6: (1,140,[41,216,27,272,27,216,41],-9,1400),
1438
+ 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
1439
+ 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
1440
+ -2368,467775),
1441
+ 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
1442
+ 15741,2857], -4671, 394240),
1443
+ 10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
1444
+ -260550,272400,-48525,106300,16067],
1445
+ -673175, 163459296),
1446
+ 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
1447
+ 15493566,15493566,-9595542,25226685,-3237113,
1448
+ 13486539,2171465], -2224234463, 237758976000),
1449
+ 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
1450
+ 87516288,-87797136,87516288,-51491295,35725120,
1451
+ -7587864,9903168,1364651], -3012, 875875),
1452
+ 13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
1453
+ 156074417954,-151659573325,206683437987,
1454
+ -43111992612,-43111992612,206683437987,
1455
+ -151659573325,156074417954,-31268252574,
1456
+ 56280729661,8181904909], -2639651053,
1457
+ 344881152000),
1458
+ 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
1459
+ -6625093363,12630121616,-16802270373,19534438464,
1460
+ -16802270373,12630121616,-6625093363,3501442784,
1461
+ -770720657,710986864,90241897], -3740727473,
1462
+ 1275983280000)
1463
+ }
1464
+
1465
+
1466
+ def newton_cotes(rn, equal=0):
1467
+ r"""
1468
+ Return weights and error coefficient for Newton-Cotes integration.
1469
+
1470
+ Suppose we have (N+1) samples of f at the positions
1471
+ x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
1472
+ integral between x_0 and x_N is:
1473
+
1474
+ :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
1475
+ + B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
1476
+
1477
+ where :math:`\xi \in [x_0,x_N]`
1478
+ and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
1479
+
1480
+ If the samples are equally-spaced and N is even, then the error
1481
+ term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
1482
+
1483
+ Parameters
1484
+ ----------
1485
+ rn : int
1486
+ The integer order for equally-spaced data or the relative positions of
1487
+ the samples with the first sample at 0 and the last at N, where N+1 is
1488
+ the length of `rn`. N is the order of the Newton-Cotes integration.
1489
+ equal : int, optional
1490
+ Set to 1 to enforce equally spaced data.
1491
+
1492
+ Returns
1493
+ -------
1494
+ an : ndarray
1495
+ 1-D array of weights to apply to the function at the provided sample
1496
+ positions.
1497
+ B : float
1498
+ Error coefficient.
1499
+
1500
+ Notes
1501
+ -----
1502
+ Normally, the Newton-Cotes rules are used on smaller integration
1503
+ regions and a composite rule is used to return the total integral.
1504
+
1505
+ Examples
1506
+ --------
1507
+ Compute the integral of sin(x) in [0, :math:`\pi`]:
1508
+
1509
+ >>> from scipy.integrate import newton_cotes
1510
+ >>> import numpy as np
1511
+ >>> def f(x):
1512
+ ... return np.sin(x)
1513
+ >>> a = 0
1514
+ >>> b = np.pi
1515
+ >>> exact = 2
1516
+ >>> for N in [2, 4, 6, 8, 10]:
1517
+ ... x = np.linspace(a, b, N + 1)
1518
+ ... an, B = newton_cotes(N, 1)
1519
+ ... dx = (b - a) / N
1520
+ ... quad = dx * np.sum(an * f(x))
1521
+ ... error = abs(quad - exact)
1522
+ ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
1523
+ ...
1524
+ 2 2.094395102 9.43951e-02
1525
+ 4 1.998570732 1.42927e-03
1526
+ 6 2.000017814 1.78136e-05
1527
+ 8 1.999999835 1.64725e-07
1528
+ 10 2.000000001 1.14677e-09
1529
+
1530
+ """
1531
+ try:
1532
+ N = len(rn)-1
1533
+ if equal:
1534
+ rn = np.arange(N+1)
1535
+ elif np.all(np.diff(rn) == 1):
1536
+ equal = 1
1537
+ except Exception:
1538
+ N = rn
1539
+ rn = np.arange(N+1)
1540
+ equal = 1
1541
+
1542
+ if equal and N in _builtincoeffs:
1543
+ na, da, vi, nb, db = _builtincoeffs[N]
1544
+ an = na * np.array(vi, dtype=float) / da
1545
+ return an, float(nb)/db
1546
+
1547
+ if (rn[0] != 0) or (rn[-1] != N):
1548
+ raise ValueError("The sample positions must start at 0"
1549
+ " and end at N")
1550
+ yi = rn / float(N)
1551
+ ti = 2 * yi - 1
1552
+ nvec = np.arange(N+1)
1553
+ C = ti ** nvec[:, np.newaxis]
1554
+ Cinv = np.linalg.inv(C)
1555
+ # improve precision of result
1556
+ for i in range(2):
1557
+ Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
1558
+ vec = 2.0 / (nvec[::2]+1)
1559
+ ai = Cinv[:, ::2].dot(vec) * (N / 2.)
1560
+
1561
+ if (N % 2 == 0) and equal:
1562
+ BN = N/(N+3.)
1563
+ power = N+2
1564
+ else:
1565
+ BN = N/(N+2.)
1566
+ power = N+1
1567
+
1568
+ BN = BN - np.dot(yi**power, ai)
1569
+ p1 = power+1
1570
+ fac = power*math.log(N) - gammaln(p1)
1571
+ fac = math.exp(fac)
1572
+ return ai, BN*fac
1573
+
1574
+
1575
+ def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log):
1576
+
1577
+ # lazy import to avoid issues with partially-initialized submodule
1578
+ if not hasattr(qmc_quad, 'qmc'):
1579
+ from scipy import stats
1580
+ qmc_quad.stats = stats
1581
+ else:
1582
+ stats = qmc_quad.stats
1583
+
1584
+ if not callable(func):
1585
+ message = "`func` must be callable."
1586
+ raise TypeError(message)
1587
+
1588
+ # a, b will be modified, so copy. Oh well if it's copied twice.
1589
+ a = np.atleast_1d(a).copy()
1590
+ b = np.atleast_1d(b).copy()
1591
+ a, b = np.broadcast_arrays(a, b)
1592
+ dim = a.shape[0]
1593
+
1594
+ try:
1595
+ func((a + b) / 2)
1596
+ except Exception as e:
1597
+ message = ("`func` must evaluate the integrand at points within "
1598
+ "the integration range; e.g. `func( (a + b) / 2)` "
1599
+ "must return the integrand at the centroid of the "
1600
+ "integration volume.")
1601
+ raise ValueError(message) from e
1602
+
1603
+ try:
1604
+ func(np.array([a, b]).T)
1605
+ vfunc = func
1606
+ except Exception as e:
1607
+ message = ("Exception encountered when attempting vectorized call to "
1608
+ f"`func`: {e}. For better performance, `func` should "
1609
+ "accept two-dimensional array `x` with shape `(len(a), "
1610
+ "n_points)` and return an array of the integrand value at "
1611
+ "each of the `n_points.")
1612
+ warnings.warn(message, stacklevel=3)
1613
+
1614
+ def vfunc(x):
1615
+ return np.apply_along_axis(func, axis=-1, arr=x)
1616
+
1617
+ n_points_int = np.int64(n_points)
1618
+ if n_points != n_points_int:
1619
+ message = "`n_points` must be an integer."
1620
+ raise TypeError(message)
1621
+
1622
+ n_estimates_int = np.int64(n_estimates)
1623
+ if n_estimates != n_estimates_int:
1624
+ message = "`n_estimates` must be an integer."
1625
+ raise TypeError(message)
1626
+
1627
+ if qrng is None:
1628
+ qrng = stats.qmc.Halton(dim)
1629
+ elif not isinstance(qrng, stats.qmc.QMCEngine):
1630
+ message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
1631
+ raise TypeError(message)
1632
+
1633
+ if qrng.d != a.shape[0]:
1634
+ message = ("`qrng` must be initialized with dimensionality equal to "
1635
+ "the number of variables in `a`, i.e., "
1636
+ "`qrng.random().shape[-1]` must equal `a.shape[0]`.")
1637
+ raise ValueError(message)
1638
+
1639
+ rng_seed = getattr(qrng, 'rng_seed', None)
1640
+ rng = stats._qmc.check_random_state(rng_seed)
1641
+
1642
+ if log not in {True, False}:
1643
+ message = "`log` must be boolean (`True` or `False`)."
1644
+ raise TypeError(message)
1645
+
1646
+ return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats)
1647
+
1648
+
1649
+ QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error'])
1650
+
1651
+
1652
+ def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None,
1653
+ log=False):
1654
+ """
1655
+ Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature.
1656
+
1657
+ Parameters
1658
+ ----------
1659
+ func : callable
1660
+ The integrand. Must accept a single argument ``x``, an array which
1661
+ specifies the point(s) at which to evaluate the scalar-valued
1662
+ integrand, and return the value(s) of the integrand.
1663
+ For efficiency, the function should be vectorized to accept an array of
1664
+ shape ``(d, n_points)``, where ``d`` is the number of variables (i.e.
1665
+ the dimensionality of the function domain) and `n_points` is the number
1666
+ of quadrature points, and return an array of shape ``(n_points,)``,
1667
+ the integrand at each quadrature point.
1668
+ a, b : array-like
1669
+ One-dimensional arrays specifying the lower and upper integration
1670
+ limits, respectively, of each of the ``d`` variables.
1671
+ n_estimates, n_points : int, optional
1672
+ `n_estimates` (default: 8) statistically independent QMC samples, each
1673
+ of `n_points` (default: 1024) points, will be generated by `qrng`.
1674
+ The total number of points at which the integrand `func` will be
1675
+ evaluated is ``n_points * n_estimates``. See Notes for details.
1676
+ qrng : `~scipy.stats.qmc.QMCEngine`, optional
1677
+ An instance of the QMCEngine from which to sample QMC points.
1678
+ The QMCEngine must be initialized to a number of dimensions ``d``
1679
+ corresponding with the number of variables ``x1, ..., xd`` passed to
1680
+ `func`.
1681
+ The provided QMCEngine is used to produce the first integral estimate.
1682
+ If `n_estimates` is greater than one, additional QMCEngines are
1683
+ spawned from the first (with scrambling enabled, if it is an option.)
1684
+ If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton`
1685
+ will be initialized with the number of dimensions determine from
1686
+ the length of `a`.
1687
+ log : boolean, default: False
1688
+ When set to True, `func` returns the log of the integrand, and
1689
+ the result object contains the log of the integral.
1690
+
1691
+ Returns
1692
+ -------
1693
+ result : object
1694
+ A result object with attributes:
1695
+
1696
+ integral : float
1697
+ The estimate of the integral.
1698
+ standard_error :
1699
+ The error estimate. See Notes for interpretation.
1700
+
1701
+ Notes
1702
+ -----
1703
+ Values of the integrand at each of the `n_points` points of a QMC sample
1704
+ are used to produce an estimate of the integral. This estimate is drawn
1705
+ from a population of possible estimates of the integral, the value of
1706
+ which we obtain depends on the particular points at which the integral
1707
+ was evaluated. We perform this process `n_estimates` times, each time
1708
+ evaluating the integrand at different scrambled QMC points, effectively
1709
+ drawing i.i.d. random samples from the population of integral estimates.
1710
+ The sample mean :math:`m` of these integral estimates is an
1711
+ unbiased estimator of the true value of the integral, and the standard
1712
+ error of the mean :math:`s` of these estimates may be used to generate
1713
+ confidence intervals using the t distribution with ``n_estimates - 1``
1714
+ degrees of freedom. Perhaps counter-intuitively, increasing `n_points`
1715
+ while keeping the total number of function evaluation points
1716
+ ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas
1717
+ increasing `n_estimates` tends to decrease the error estimate.
1718
+
1719
+ Examples
1720
+ --------
1721
+ QMC quadrature is particularly useful for computing integrals in higher
1722
+ dimensions. An example integrand is the probability density function
1723
+ of a multivariate normal distribution.
1724
+
1725
+ >>> import numpy as np
1726
+ >>> from scipy import stats
1727
+ >>> dim = 8
1728
+ >>> mean = np.zeros(dim)
1729
+ >>> cov = np.eye(dim)
1730
+ >>> def func(x):
1731
+ ... # `multivariate_normal` expects the _last_ axis to correspond with
1732
+ ... # the dimensionality of the space, so `x` must be transposed
1733
+ ... return stats.multivariate_normal.pdf(x.T, mean, cov)
1734
+
1735
+ To compute the integral over the unit hypercube:
1736
+
1737
+ >>> from scipy.integrate import qmc_quad
1738
+ >>> a = np.zeros(dim)
1739
+ >>> b = np.ones(dim)
1740
+ >>> rng = np.random.default_rng()
1741
+ >>> qrng = stats.qmc.Halton(d=dim, seed=rng)
1742
+ >>> n_estimates = 8
1743
+ >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng)
1744
+ >>> res.integral, res.standard_error
1745
+ (0.00018429555666024108, 1.0389431116001344e-07)
1746
+
1747
+ A two-sided, 99% confidence interval for the integral may be estimated
1748
+ as:
1749
+
1750
+ >>> t = stats.t(df=n_estimates-1, loc=res.integral,
1751
+ ... scale=res.standard_error)
1752
+ >>> t.interval(0.99)
1753
+ (0.0001839319802536469, 0.00018465913306683527)
1754
+
1755
+ Indeed, the value reported by `scipy.stats.multivariate_normal` is
1756
+ within this range.
1757
+
1758
+ >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
1759
+ 0.00018430867675187443
1760
+
1761
+ """
1762
+ args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log)
1763
+ func, a, b, n_points, n_estimates, qrng, rng, log, stats = args
1764
+
1765
+ def sum_product(integrands, dA, log=False):
1766
+ if log:
1767
+ return logsumexp(integrands) + np.log(dA)
1768
+ else:
1769
+ return np.sum(integrands * dA)
1770
+
1771
+ def mean(estimates, log=False):
1772
+ if log:
1773
+ return logsumexp(estimates) - np.log(n_estimates)
1774
+ else:
1775
+ return np.mean(estimates)
1776
+
1777
+ def std(estimates, m=None, ddof=0, log=False):
1778
+ m = m or mean(estimates, log)
1779
+ if log:
1780
+ estimates, m = np.broadcast_arrays(estimates, m)
1781
+ temp = np.vstack((estimates, m + np.pi * 1j))
1782
+ diff = logsumexp(temp, axis=0)
1783
+ return np.real(0.5 * (logsumexp(2 * diff)
1784
+ - np.log(n_estimates - ddof)))
1785
+ else:
1786
+ return np.std(estimates, ddof=ddof)
1787
+
1788
+ def sem(estimates, m=None, s=None, log=False):
1789
+ m = m or mean(estimates, log)
1790
+ s = s or std(estimates, m, ddof=1, log=log)
1791
+ if log:
1792
+ return s - 0.5*np.log(n_estimates)
1793
+ else:
1794
+ return s / np.sqrt(n_estimates)
1795
+
1796
+ # The sign of the integral depends on the order of the limits. Fix this by
1797
+ # ensuring that lower bounds are indeed lower and setting sign of resulting
1798
+ # integral manually
1799
+ if np.any(a == b):
1800
+ message = ("A lower limit was equal to an upper limit, so the value "
1801
+ "of the integral is zero by definition.")
1802
+ warnings.warn(message, stacklevel=2)
1803
+ return QMCQuadResult(-np.inf if log else 0, 0)
1804
+
1805
+ i_swap = b < a
1806
+ sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
1807
+ a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
1808
+
1809
+ A = np.prod(b - a)
1810
+ dA = A / n_points
1811
+
1812
+ estimates = np.zeros(n_estimates)
1813
+ rngs = _rng_spawn(qrng.rng, n_estimates)
1814
+ for i in range(n_estimates):
1815
+ # Generate integral estimate
1816
+ sample = qrng.random(n_points)
1817
+ # The rationale for transposing is that this allows users to easily
1818
+ # unpack `x` into separate variables, if desired. This is consistent
1819
+ # with the `xx` array passed into the `scipy.integrate.nquad` `func`.
1820
+ x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points)
1821
+ integrands = func(x)
1822
+ estimates[i] = sum_product(integrands, dA, log)
1823
+
1824
+ # Get a new, independently-scrambled QRNG for next time
1825
+ qrng = type(qrng)(seed=rngs[i], **qrng._init_quad)
1826
+
1827
+ integral = mean(estimates, log)
1828
+ standard_error = sem(estimates, m=integral, log=log)
1829
+ integral = integral + np.pi*1j if (log and sign < 0) else integral*sign
1830
+ return QMCQuadResult(integral, standard_error)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ from scipy import special
4
+ import scipy._lib._elementwise_iterative_method as eim
5
+ from scipy._lib._util import _RichResult
6
+
7
+ # todo:
8
+ # figure out warning situation
9
+ # address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521
10
+ # without `minweight`, we are also suppressing infinities within the interval.
11
+ # Is that OK? If so, we can probably get rid of `status=3`.
12
+ # Add heuristic to stop when improvement is too slow / antithrashing
13
+ # support singularities? interval subdivision? this feature will be added
14
+ # eventually, but do we adjust the interface now?
15
+ # When doing log-integration, should the tolerances control the error of the
16
+ # log-integral or the error of the integral? The trouble is that `log`
17
+ # inherently looses some precision so it may not be possible to refine
18
+ # the integral further. Example: 7th moment of stats.f(15, 20)
19
+ # respect function evaluation limit?
20
+ # make public?
21
+
22
+
23
+ def _tanhsinh(f, a, b, *, args=(), log=False, maxfun=None, maxlevel=None,
24
+ minlevel=2, atol=None, rtol=None, preserve_shape=False,
25
+ callback=None):
26
+ """Evaluate a convergent integral numerically using tanh-sinh quadrature.
27
+
28
+ In practice, tanh-sinh quadrature achieves quadratic convergence for
29
+ many integrands: the number of accurate *digits* scales roughly linearly
30
+ with the number of function evaluations [1]_.
31
+
32
+ Either or both of the limits of integration may be infinite, and
33
+ singularities at the endpoints are acceptable. Divergent integrals and
34
+ integrands with non-finite derivatives or singularities within an interval
35
+ are out of scope, but the latter may be evaluated be calling `_tanhsinh` on
36
+ each sub-interval separately.
37
+
38
+ Parameters
39
+ ----------
40
+ f : callable
41
+ The function to be integrated. The signature must be::
42
+ func(x: ndarray, *fargs) -> ndarray
43
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
44
+ which may contain an arbitrary number of arrays that are broadcastable
45
+ with `x`. ``func`` must be an elementwise-scalar function; see
46
+ documentation of parameter `preserve_shape` for details.
47
+ If ``func`` returns a value with complex dtype when evaluated at
48
+ either endpoint, subsequent arguments ``x`` will have complex dtype
49
+ (but zero imaginary part).
50
+ a, b : array_like
51
+ Real lower and upper limits of integration. Must be broadcastable.
52
+ Elements may be infinite.
53
+ args : tuple, optional
54
+ Additional positional arguments to be passed to `func`. Must be arrays
55
+ broadcastable with `a` and `b`. If the callable to be integrated
56
+ requires arguments that are not broadcastable with `a` and `b`, wrap
57
+ that callable with `f`. See Examples.
58
+ log : bool, default: False
59
+ Setting to True indicates that `f` returns the log of the integrand
60
+ and that `atol` and `rtol` are expressed as the logs of the absolute
61
+ and relative errors. In this case, the result object will contain the
62
+ log of the integral and error. This is useful for integrands for which
63
+ numerical underflow or overflow would lead to inaccuracies.
64
+ When ``log=True``, the integrand (the exponential of `f`) must be real,
65
+ but it may be negative, in which case the log of the integrand is a
66
+ complex number with an imaginary part that is an odd multiple of π.
67
+ maxlevel : int, default: 10
68
+ The maximum refinement level of the algorithm.
69
+
70
+ At the zeroth level, `f` is called once, performing 16 function
71
+ evaluations. At each subsequent level, `f` is called once more,
72
+ approximately doubling the number of function evaluations that have
73
+ been performed. Accordingly, for many integrands, each successive level
74
+ will double the number of accurate digits in the result (up to the
75
+ limits of floating point precision).
76
+
77
+ The algorithm will terminate after completing level `maxlevel` or after
78
+ another termination condition is satisfied, whichever comes first.
79
+ minlevel : int, default: 2
80
+ The level at which to begin iteration (default: 2). This does not
81
+ change the total number of function evaluations or the abscissae at
82
+ which the function is evaluated; it changes only the *number of times*
83
+ `f` is called. If ``minlevel=k``, then the integrand is evaluated at
84
+ all abscissae from levels ``0`` through ``k`` in a single call.
85
+ Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is
86
+ ignored, and `minlevel` is set equal to `maxlevel`.
87
+ atol, rtol : float, optional
88
+ Absolute termination tolerance (default: 0) and relative termination
89
+ tolerance (default: ``eps**0.75``, where ``eps`` is the precision of
90
+ the result dtype), respectively. The error estimate is as
91
+ described in [1]_ Section 5. While not theoretically rigorous or
92
+ conservative, it is said to work well in practice. Must be non-negative
93
+ and finite if `log` is False, and must be expressed as the log of a
94
+ non-negative and finite number if `log` is True.
95
+ preserve_shape : bool, default: False
96
+ In the following, "arguments of `f`" refers to the array ``x`` and
97
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
98
+ of `a`, `b`, and all elements of `args` (which is conceptually
99
+ distinct from ``fargs`` passed into `f`).
100
+
101
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
102
+ of *any* broadcastable shapes.
103
+
104
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
105
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
106
+ abscissae at which the function is being evaluated.
107
+
108
+ In either case, for each scalar element ``xi`` within `x`, the array
109
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
110
+ Consequently, the shape of the output is always the shape of the input
111
+ ``x``.
112
+
113
+ See Examples.
114
+
115
+ callback : callable, optional
116
+ An optional user-supplied function to be called before the first
117
+ iteration and after each iteration.
118
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
119
+ similar to that returned by `_differentiate` (but containing the
120
+ current iterate's values of all variables). If `callback` raises a
121
+ ``StopIteration``, the algorithm will terminate immediately and
122
+ `_tanhsinh` will return a result object.
123
+
124
+ Returns
125
+ -------
126
+ res : _RichResult
127
+ An instance of `scipy._lib._util._RichResult` with the following
128
+ attributes. (The descriptions are written as though the values will be
129
+ scalars; however, if `func` returns an array, the outputs will be
130
+ arrays of the same shape.)
131
+ success : bool
132
+ ``True`` when the algorithm terminated successfully (status ``0``).
133
+ status : int
134
+ An integer representing the exit status of the algorithm.
135
+ ``0`` : The algorithm converged to the specified tolerances.
136
+ ``-1`` : (unused)
137
+ ``-2`` : The maximum number of iterations was reached.
138
+ ``-3`` : A non-finite value was encountered.
139
+ ``-4`` : Iteration was terminated by `callback`.
140
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
141
+ integral : float
142
+ An estimate of the integral
143
+ error : float
144
+ An estimate of the error. Only available if level two or higher
145
+ has been completed; otherwise NaN.
146
+ maxlevel : int
147
+ The maximum refinement level used.
148
+ nfev : int
149
+ The number of points at which `func` was evaluated.
150
+
151
+ See Also
152
+ --------
153
+ quad, quadrature
154
+
155
+ Notes
156
+ -----
157
+ Implements the algorithm as described in [1]_ with minor adaptations for
158
+ finite-precision arithmetic, including some described by [2]_ and [3]_. The
159
+ tanh-sinh scheme was originally introduced in [4]_.
160
+
161
+ Due to floating-point error in the abscissae, the function may be evaluated
162
+ at the endpoints of the interval during iterations. The values returned by
163
+ the function at the endpoints will be ignored.
164
+
165
+ References
166
+ ----------
167
+ [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of
168
+ three high-precision quadrature schemes." Experimental Mathematics 14.3
169
+ (2005): 317-329.
170
+ [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for
171
+ single and multiple integration using floating-point arithmetic."
172
+ arXiv preprint arXiv:2007.15057 (2020).
173
+ [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature
174
+ Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas."
175
+ https://www.genivia.com/files/qthsh.pdf
176
+ [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for
177
+ numerical integration." Publications of the Research Institute for
178
+ Mathematical Sciences 9.3 (1974): 721-741.
179
+
180
+ Example
181
+ -------
182
+ Evaluate the Gaussian integral:
183
+
184
+ >>> import numpy as np
185
+ >>> from scipy.integrate._tanhsinh import _tanhsinh
186
+ >>> def f(x):
187
+ ... return np.exp(-x**2)
188
+ >>> res = _tanhsinh(f, -np.inf, np.inf)
189
+ >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159
190
+ 1.7724538509055159
191
+ >>> res.error # actual error is 0
192
+ 4.0007963937534104e-16
193
+
194
+ The value of the Gaussian function (bell curve) is nearly zero for
195
+ arguments sufficiently far from zero, so the value of the integral
196
+ over a finite interval is nearly the same.
197
+
198
+ >>> _tanhsinh(f, -20, 20).integral
199
+ 1.772453850905518
200
+
201
+ However, with unfavorable integration limits, the integration scheme
202
+ may not be able to find the important region.
203
+
204
+ >>> _tanhsinh(f, -np.inf, 1000).integral
205
+ 4.500490856620352
206
+
207
+ In such cases, or when there are singularities within the interval,
208
+ break the integral into parts with endpoints at the important points.
209
+
210
+ >>> _tanhsinh(f, -np.inf, 0).integral + _tanhsinh(f, 0, 1000).integral
211
+ 1.772453850905404
212
+
213
+ For integration involving very large or very small magnitudes, use
214
+ log-integration. (For illustrative purposes, the following example shows a
215
+ case in which both regular and log-integration work, but for more extreme
216
+ limits of integration, log-integration would avoid the underflow
217
+ experienced when evaluating the integral normally.)
218
+
219
+ >>> res = _tanhsinh(f, 20, 30, rtol=1e-10)
220
+ >>> res.integral, res.error
221
+ 4.7819613911309014e-176, 4.670364401645202e-187
222
+ >>> def log_f(x):
223
+ ... return -x**2
224
+ >>> np.exp(res.integral), np.exp(res.error)
225
+ 4.7819613911306924e-176, 4.670364401645093e-187
226
+
227
+ The limits of integration and elements of `args` may be broadcastable
228
+ arrays, and integration is performed elementwise.
229
+
230
+ >>> from scipy import stats
231
+ >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18)
232
+ >>> a, b = dist.support()
233
+ >>> x = np.linspace(a, b, 100)
234
+ >>> res = _tanhsinh(dist.pdf, a, x)
235
+ >>> ref = dist.cdf(x)
236
+ >>> np.allclose(res.integral, ref)
237
+
238
+ By default, `preserve_shape` is False, and therefore the callable
239
+ `f` may be called with arrays of any broadcastable shapes.
240
+ For example:
241
+
242
+ >>> shapes = []
243
+ >>> def f(x, c):
244
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
245
+ ... shapes.append(shape)
246
+ ... return np.sin(c*x)
247
+ >>>
248
+ >>> c = [1, 10, 30, 100]
249
+ >>> res = _tanhsinh(f, 0, 1, args=(c,), minlevel=1)
250
+ >>> shapes
251
+ [(4,), (4, 66), (3, 64), (2, 128), (1, 256)]
252
+
253
+ To understand where these shapes are coming from - and to better
254
+ understand how `_tanhsinh` computes accurate results - note that
255
+ higher values of ``c`` correspond with higher frequency sinusoids.
256
+ The higher frequency sinusoids make the integrand more complicated,
257
+ so more function evaluations are required to achieve the target
258
+ accuracy:
259
+
260
+ >>> res.nfev
261
+ array([ 67, 131, 259, 515])
262
+
263
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
264
+ integrand at a single abscissa and all four frequencies; this is used
265
+ for input validation and to determine the size and dtype of the arrays
266
+ that store results. The next shape corresponds with evaluating the
267
+ integrand at an initial grid of abscissae and all four frequencies.
268
+ Successive calls to the function double the total number of abscissae at
269
+ which the function has been evaluated. However, in later function
270
+ evaluations, the integrand is evaluated at fewer frequencies because
271
+ the corresponding integral has already converged to the required
272
+ tolerance. This saves function evaluations to improve performance, but
273
+ it requires the function to accept arguments of any shape.
274
+
275
+ "Vector-valued" integrands, such as those written for use with
276
+ `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement.
277
+ For example, consider
278
+
279
+ >>> def f(x):
280
+ ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2]
281
+
282
+ This integrand is not compatible with `_tanhsinh` as written; for instance,
283
+ the shape of the output will not be the same as the shape of ``x``. Such a
284
+ function *could* be converted to a compatible form with the introduction of
285
+ additional parameters, but this would be inconvenient. In such cases,
286
+ a simpler solution would be to use `preserve_shape`.
287
+
288
+ >>> shapes = []
289
+ >>> def f(x):
290
+ ... shapes.append(x.shape)
291
+ ... x0, x1, x2, x3 = x
292
+ ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)]
293
+ >>>
294
+ >>> a = np.zeros(4)
295
+ >>> res = _tanhsinh(f, a, 1, preserve_shape=True)
296
+ >>> shapes
297
+ [(4,), (4, 66), (4, 64), (4, 128), (4, 256)]
298
+
299
+ Here, the broadcasted shape of `a` and `b` is ``(4,)``. With
300
+ ``preserve_shape=True``, the function may be called with argument
301
+ ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe.
302
+
303
+ """
304
+ (f, a, b, log, maxfun, maxlevel, minlevel,
305
+ atol, rtol, args, preserve_shape, callback) = _tanhsinh_iv(
306
+ f, a, b, log, maxfun, maxlevel, minlevel, atol,
307
+ rtol, args, preserve_shape, callback)
308
+
309
+ # Initialization
310
+ # `eim._initialize` does several important jobs, including
311
+ # ensuring that limits, each of the `args`, and the output of `f`
312
+ # broadcast correctly and are of consistent types. To save a function
313
+ # evaluation, I pass the midpoint of the integration interval. This comes
314
+ # at a cost of some gymnastics to ensure that the midpoint has the right
315
+ # shape and dtype. Did you know that 0d and >0d arrays follow different
316
+ # type promotion rules?
317
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
318
+ c = ((a.ravel() + b.ravel())/2).reshape(a.shape)
319
+ inf_a, inf_b = np.isinf(a), np.isinf(b)
320
+ c[inf_a] = b[inf_a] - 1 # takes care of infinite a
321
+ c[inf_b] = a[inf_b] + 1 # takes care of infinite b
322
+ c[inf_a & inf_b] = 0 # takes care of infinite a and b
323
+ temp = eim._initialize(f, (c,), args, complex_ok=True,
324
+ preserve_shape=preserve_shape)
325
+ f, xs, fs, args, shape, dtype = temp
326
+ a = np.broadcast_to(a, shape).astype(dtype).ravel()
327
+ b = np.broadcast_to(b, shape).astype(dtype).ravel()
328
+
329
+ # Transform improper integrals
330
+ a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b)
331
+
332
+ # Define variables we'll need
333
+ nit, nfev = 0, 1 # one function evaluation performed above
334
+ zero = -np.inf if log else 0
335
+ pi = dtype.type(np.pi)
336
+ maxiter = maxlevel - minlevel + 1
337
+ eps = np.finfo(dtype).eps
338
+ if rtol is None:
339
+ rtol = 0.75*np.log(eps) if log else eps**0.75
340
+
341
+ Sn = np.full(shape, zero, dtype=dtype).ravel() # latest integral estimate
342
+ Sn[np.isnan(a) | np.isnan(b) | np.isnan(fs[0])] = np.nan
343
+ Sk = np.empty_like(Sn).reshape(-1, 1)[:, 0:0] # all integral estimates
344
+ aerr = np.full(shape, np.nan, dtype=dtype).ravel() # absolute error
345
+ status = np.full(shape, eim._EINPROGRESS, dtype=int).ravel()
346
+ h0 = np.real(_get_base_step(dtype=dtype)) # base step
347
+
348
+ # For term `d4` of error estimate ([1] Section 5), we need to keep the
349
+ # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin
350
+ # sum. Here, we initialize these variables.
351
+ xr0 = np.full(shape, -np.inf, dtype=dtype).ravel()
352
+ fr0 = np.full(shape, np.nan, dtype=dtype).ravel()
353
+ wr0 = np.zeros(shape, dtype=dtype).ravel()
354
+ xl0 = np.full(shape, np.inf, dtype=dtype).ravel()
355
+ fl0 = np.full(shape, np.nan, dtype=dtype).ravel()
356
+ wl0 = np.zeros(shape, dtype=dtype).ravel()
357
+ d4 = np.zeros(shape, dtype=dtype).ravel()
358
+
359
+ work = _RichResult(
360
+ Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps,
361
+ a=a.reshape(-1, 1), b=b.reshape(-1, 1), # integration limits
362
+ n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts
363
+ xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est
364
+ ainf=ainf, binf=binf, abinf=abinf, a0=a0.reshape(-1, 1)) # transforms
365
+ # Constant scalars don't need to be put in `work` unless they need to be
366
+ # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel.
367
+
368
+ # Correspondence between terms in the `work` object and the result
369
+ res_work_pairs = [('status', 'status'), ('integral', 'Sn'),
370
+ ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')]
371
+
372
+ def pre_func_eval(work):
373
+ # Determine abscissae at which to evaluate `f`
374
+ work.h = h0 / 2**work.n
375
+ xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype,
376
+ inclusive=(work.n == minlevel))
377
+ work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b)
378
+
379
+ # Perform abscissae substitutions for infinite limits of integration
380
+ xj = work.xj.copy()
381
+ xj[work.abinf] = xj[work.abinf] / (1 - xj[work.abinf]**2)
382
+ xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf]
383
+ xj[work.ainf] *= -1
384
+ return xj
385
+
386
+ def post_func_eval(x, fj, work):
387
+ # Weight integrand as required by substitutions for infinite limits
388
+ if work.log:
389
+ fj[work.abinf] += (np.log(1 + work.xj[work.abinf] ** 2)
390
+ - 2*np.log(1 - work.xj[work.abinf] ** 2))
391
+ fj[work.binf] -= 2 * np.log(work.xj[work.binf])
392
+ else:
393
+ fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) /
394
+ (1 - work.xj[work.abinf]**2)**2)
395
+ fj[work.binf] *= work.xj[work.binf]**-2.
396
+
397
+ # Estimate integral with Euler-Maclaurin Sum
398
+ fjwj, Sn = _euler_maclaurin_sum(fj, work)
399
+ if work.Sk.shape[-1]:
400
+ Snm1 = work.Sk[:, -1]
401
+ Sn = (special.logsumexp([Snm1 - np.log(2), Sn], axis=0) if log
402
+ else Snm1 / 2 + Sn)
403
+
404
+ work.fjwj = fjwj
405
+ work.Sn = Sn
406
+
407
+ def check_termination(work):
408
+ """Terminate due to convergence or encountering non-finite values"""
409
+ stop = np.zeros(work.Sn.shape, dtype=bool)
410
+
411
+ # Terminate before first iteration if integration limits are equal
412
+ if work.nit == 0:
413
+ i = (work.a == work.b).ravel() # ravel singleton dimension
414
+ zero = -np.inf if log else 0
415
+ work.Sn[i] = zero
416
+ work.aerr[i] = zero
417
+ work.status[i] = eim._ECONVERGED
418
+ stop[i] = True
419
+ else:
420
+ # Terminate if convergence criterion is met
421
+ work.rerr, work.aerr = _estimate_error(work)
422
+ i = ((work.rerr < rtol) | (work.rerr + np.real(work.Sn) < atol) if log
423
+ else (work.rerr < rtol) | (work.rerr * abs(work.Sn) < atol))
424
+ work.status[i] = eim._ECONVERGED
425
+ stop[i] = True
426
+
427
+ # Terminate if integral estimate becomes invalid
428
+ if log:
429
+ i = (np.isposinf(np.real(work.Sn)) | np.isnan(work.Sn)) & ~stop
430
+ else:
431
+ i = ~np.isfinite(work.Sn) & ~stop
432
+ work.status[i] = eim._EVALUEERR
433
+ stop[i] = True
434
+
435
+ return stop
436
+
437
+ def post_termination_check(work):
438
+ work.n += 1
439
+ work.Sk = np.concatenate((work.Sk, work.Sn[:, np.newaxis]), axis=-1)
440
+ return
441
+
442
+ def customize_result(res, shape):
443
+ # If the integration limits were such that b < a, we reversed them
444
+ # to perform the calculation, and the final result needs to be negated.
445
+ if log and np.any(negative):
446
+ pi = res['integral'].dtype.type(np.pi)
447
+ j = np.complex64(1j) # minimum complex type
448
+ res['integral'] = res['integral'] + negative*pi*j
449
+ else:
450
+ res['integral'][negative] *= -1
451
+
452
+ # For this algorithm, it seems more appropriate to report the maximum
453
+ # level rather than the number of iterations in which it was performed.
454
+ res['maxlevel'] = minlevel + res['nit'] - 1
455
+ res['maxlevel'][res['nit'] == 0] = -1
456
+ del res['nit']
457
+ return shape
458
+
459
+ # Suppress all warnings initially, since there are many places in the code
460
+ # for which this is expected behavior.
461
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
462
+ res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval,
463
+ post_func_eval, check_termination, post_termination_check,
464
+ customize_result, res_work_pairs, preserve_shape)
465
+ return res
466
+
467
+
468
+ def _get_base_step(dtype=np.float64):
469
+ # Compute the base step length for the provided dtype. Theoretically, the
470
+ # Euler-Maclaurin sum is infinite, but it gets cut off when either the
471
+ # weights underflow or the abscissae cannot be distinguished from the
472
+ # limits of integration. The latter happens to occur first for float32 and
473
+ # float64, and it occurs when `xjc` (the abscissa complement)
474
+ # in `_compute_pair` underflows. We can solve for the argument `tmax` at
475
+ # which it will underflow using [2] Eq. 13.
476
+ fmin = 4*np.finfo(dtype).tiny # stay a little away from the limit
477
+ tmax = np.arcsinh(np.log(2/fmin - 1) / np.pi)
478
+
479
+ # Based on this, we can choose a base step size `h` for level 0.
480
+ # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is
481
+ # the level and `m` is an integer we get to choose. I choose
482
+ # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a
483
+ # power of 2 makes floating point arithmetic more predictable. It also
484
+ # results in a base step size close to `1`, which is what [1] uses (and I
485
+ # used here until I found [2] and these ideas settled).
486
+ h0 = tmax / _N_BASE_STEPS
487
+ return h0.astype(dtype)
488
+
489
+
490
+ _N_BASE_STEPS = 8
491
+
492
+
493
+ def _compute_pair(k, h0):
494
+ # Compute the abscissa-weight pairs for each level k. See [1] page 9.
495
+
496
+ # For now, we compute and store in 64-bit precision. If higher-precision
497
+ # data types become better supported, it would be good to compute these
498
+ # using the highest precision available. Or, once there is an Array API-
499
+ # compatible arbitrary precision array, we can compute at the required
500
+ # precision.
501
+
502
+ # "....each level k of abscissa-weight pairs uses h = 2 **-k"
503
+ # We adapt to floating point arithmetic using ideas of [2].
504
+ h = h0 / 2**k
505
+ max = _N_BASE_STEPS * 2**k
506
+
507
+ # For iterations after the first, "....the integrand function needs to be
508
+ # evaluated only at the odd-indexed abscissas at each level."
509
+ j = np.arange(max+1) if k == 0 else np.arange(1, max+1, 2)
510
+ jh = j * h
511
+
512
+ # "In this case... the weights wj = u1/cosh(u2)^2, where..."
513
+ pi_2 = np.pi / 2
514
+ u1 = pi_2*np.cosh(jh)
515
+ u2 = pi_2*np.sinh(jh)
516
+ # Denominators get big here. Overflow then underflow doesn't need warning.
517
+ # with np.errstate(under='ignore', over='ignore'):
518
+ wj = u1 / np.cosh(u2)**2
519
+ # "We actually store 1-xj = 1/(...)."
520
+ xjc = 1 / (np.exp(u2) * np.cosh(u2)) # complement of xj = np.tanh(u2)
521
+
522
+ # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify
523
+ # code, the function will be evaluated there twice; each gets half weight.
524
+ wj[0] = wj[0] / 2 if k == 0 else wj[0]
525
+
526
+ return xjc, wj # store at full precision
527
+
528
+
529
+ def _pair_cache(k, h0):
530
+ # Cache the abscissa-weight pairs up to a specified level.
531
+ # Abscissae and weights of consecutive levels are concatenated.
532
+ # `index` records the indices that correspond with each level:
533
+ # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae.
534
+ if h0 != _pair_cache.h0:
535
+ _pair_cache.xjc = np.empty(0)
536
+ _pair_cache.wj = np.empty(0)
537
+ _pair_cache.indices = [0]
538
+
539
+ xjcs = [_pair_cache.xjc]
540
+ wjs = [_pair_cache.wj]
541
+
542
+ for i in range(len(_pair_cache.indices)-1, k + 1):
543
+ xjc, wj = _compute_pair(i, h0)
544
+ xjcs.append(xjc)
545
+ wjs.append(wj)
546
+ _pair_cache.indices.append(_pair_cache.indices[-1] + len(xjc))
547
+
548
+ _pair_cache.xjc = np.concatenate(xjcs)
549
+ _pair_cache.wj = np.concatenate(wjs)
550
+ _pair_cache.h0 = h0
551
+
552
+ _pair_cache.xjc = np.empty(0)
553
+ _pair_cache.wj = np.empty(0)
554
+ _pair_cache.indices = [0]
555
+ _pair_cache.h0 = None
556
+
557
+
558
+ def _get_pairs(k, h0, inclusive=False, dtype=np.float64):
559
+ # Retrieve the specified abscissa-weight pairs from the cache
560
+ # If `inclusive`, return all up to and including the specified level
561
+ if len(_pair_cache.indices) <= k+2 or h0 != _pair_cache.h0:
562
+ _pair_cache(k, h0)
563
+
564
+ xjc = _pair_cache.xjc
565
+ wj = _pair_cache.wj
566
+ indices = _pair_cache.indices
567
+
568
+ start = 0 if inclusive else indices[k]
569
+ end = indices[k+1]
570
+
571
+ return xjc[start:end].astype(dtype), wj[start:end].astype(dtype)
572
+
573
+
574
+ def _transform_to_limits(xjc, wj, a, b):
575
+ # Transform integral according to user-specified limits. This is just
576
+ # math that follows from the fact that the standard limits are (-1, 1).
577
+ # Note: If we had stored xj instead of xjc, we would have
578
+ # xj = alpha * xj + beta, where beta = (a + b)/2
579
+ alpha = (b - a) / 2
580
+ xj = np.concatenate((-alpha * xjc + b, alpha * xjc + a), axis=-1)
581
+ wj = wj*alpha # arguments get broadcasted, so we can't use *=
582
+ wj = np.concatenate((wj, wj), axis=-1)
583
+
584
+ # Points at the boundaries can be generated due to finite precision
585
+ # arithmetic, but these function values aren't supposed to be included in
586
+ # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at
587
+ # these points; however, we can't easily filter out points since this
588
+ # function is vectorized. Instead, zero the weights.
589
+ invalid = (xj <= a) | (xj >= b)
590
+ wj[invalid] = 0
591
+ return xj, wj
592
+
593
+
594
+ def _euler_maclaurin_sum(fj, work):
595
+ # Perform the Euler-Maclaurin Sum, [1] Section 4
596
+
597
+ # The error estimate needs to know the magnitude of the last term
598
+ # omitted from the Euler-Maclaurin sum. This is a bit involved because
599
+ # it may have been computed at a previous level. I sure hope it's worth
600
+ # all the trouble.
601
+ xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0
602
+ xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0
603
+
604
+ # It is much more convenient to work with the transposes of our work
605
+ # variables here.
606
+ xj, fj, wj = work.xj.T, fj.T, work.wj.T
607
+ n_x, n_active = xj.shape # number of abscissae, number of active elements
608
+
609
+ # We'll work with the left and right sides separately
610
+ xr, xl = xj.reshape(2, n_x // 2, n_active).copy() # this gets modified
611
+ fr, fl = fj.reshape(2, n_x // 2, n_active)
612
+ wr, wl = wj.reshape(2, n_x // 2, n_active)
613
+
614
+ invalid_r = ~np.isfinite(fr) | (wr == 0)
615
+ invalid_l = ~np.isfinite(fl) | (wl == 0)
616
+
617
+ # integer index of the maximum abscissa at this level
618
+ xr[invalid_r] = -np.inf
619
+ ir = np.argmax(xr, axis=0, keepdims=True)
620
+ # abscissa, function value, and weight at this index
621
+ xr_max = np.take_along_axis(xr, ir, axis=0)[0]
622
+ fr_max = np.take_along_axis(fr, ir, axis=0)[0]
623
+ wr_max = np.take_along_axis(wr, ir, axis=0)[0]
624
+ # boolean indices at which maximum abscissa at this level exceeds
625
+ # the incumbent maximum abscissa (from all previous levels)
626
+ j = xr_max > xr0
627
+ # Update record of the incumbent abscissa, function value, and weight
628
+ xr0[j] = xr_max[j]
629
+ fr0[j] = fr_max[j]
630
+ wr0[j] = wr_max[j]
631
+
632
+ # integer index of the minimum abscissa at this level
633
+ xl[invalid_l] = np.inf
634
+ il = np.argmin(xl, axis=0, keepdims=True)
635
+ # abscissa, function value, and weight at this index
636
+ xl_min = np.take_along_axis(xl, il, axis=0)[0]
637
+ fl_min = np.take_along_axis(fl, il, axis=0)[0]
638
+ wl_min = np.take_along_axis(wl, il, axis=0)[0]
639
+ # boolean indices at which minimum abscissa at this level is less than
640
+ # the incumbent minimum abscissa (from all previous levels)
641
+ j = xl_min < xl0
642
+ # Update record of the incumbent abscissa, function value, and weight
643
+ xl0[j] = xl_min[j]
644
+ fl0[j] = fl_min[j]
645
+ wl0[j] = wl_min[j]
646
+ fj = fj.T
647
+
648
+ # Compute the error estimate `d4` - the magnitude of the leftmost or
649
+ # rightmost term, whichever is greater.
650
+ flwl0 = fl0 + np.log(wl0) if work.log else fl0 * wl0 # leftmost term
651
+ frwr0 = fr0 + np.log(wr0) if work.log else fr0 * wr0 # rightmost term
652
+ magnitude = np.real if work.log else np.abs
653
+ work.d4 = np.maximum(magnitude(flwl0), magnitude(frwr0))
654
+
655
+ # There are two approaches to dealing with function values that are
656
+ # numerically infinite due to approaching a singularity - zero them, or
657
+ # replace them with the function value at the nearest non-infinite point.
658
+ # [3] pg. 22 suggests the latter, so let's do that given that we have the
659
+ # information.
660
+ fr0b = np.broadcast_to(fr0[np.newaxis, :], fr.shape)
661
+ fl0b = np.broadcast_to(fl0[np.newaxis, :], fl.shape)
662
+ fr[invalid_r] = fr0b[invalid_r]
663
+ fl[invalid_l] = fl0b[invalid_l]
664
+
665
+ # When wj is zero, log emits a warning
666
+ # with np.errstate(divide='ignore'):
667
+ fjwj = fj + np.log(work.wj) if work.log else fj * work.wj
668
+
669
+ # update integral estimate
670
+ Sn = (special.logsumexp(fjwj + np.log(work.h), axis=-1) if work.log
671
+ else np.sum(fjwj, axis=-1) * work.h)
672
+
673
+ work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0
674
+ work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0
675
+
676
+ return fjwj, Sn
677
+
678
+
679
+ def _estimate_error(work):
680
+ # Estimate the error according to [1] Section 5
681
+
682
+ if work.n == 0 or work.nit == 0:
683
+ # The paper says to use "one" as the error before it can be calculated.
684
+ # NaN seems to be more appropriate.
685
+ nan = np.full_like(work.Sn, np.nan)
686
+ return nan, nan
687
+
688
+ indices = _pair_cache.indices
689
+
690
+ n_active = len(work.Sn) # number of active elements
691
+ axis_kwargs = dict(axis=-1, keepdims=True)
692
+
693
+ # With a jump start (starting at level higher than 0), we haven't
694
+ # explicitly calculated the integral estimate at lower levels. But we have
695
+ # all the function value-weight products, so we can compute the
696
+ # lower-level estimates.
697
+ if work.Sk.shape[-1] == 0:
698
+ h = 2 * work.h # step size at this level
699
+ n_x = indices[work.n] # number of abscissa up to this level
700
+ # The right and left fjwj terms from all levels are concatenated along
701
+ # the last axis. Get out only the terms up to this level.
702
+ fjwj_rl = work.fjwj.reshape(n_active, 2, -1)
703
+ fjwj = fjwj_rl[:, :, :n_x].reshape(n_active, 2*n_x)
704
+ # Compute the Euler-Maclaurin sum at this level
705
+ Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
706
+ else np.sum(fjwj, **axis_kwargs) * h)
707
+ work.Sk = np.concatenate((Snm1, work.Sk), axis=-1)
708
+
709
+ if work.n == 1:
710
+ nan = np.full_like(work.Sn, np.nan)
711
+ return nan, nan
712
+
713
+ # The paper says not to calculate the error for n<=2, but it's not clear
714
+ # about whether it starts at level 0 or level 1. We start at level 0, so
715
+ # why not compute the error beginning in level 2?
716
+ if work.Sk.shape[-1] < 2:
717
+ h = 4 * work.h # step size at this level
718
+ n_x = indices[work.n-1] # number of abscissa up to this level
719
+ # The right and left fjwj terms from all levels are concatenated along
720
+ # the last axis. Get out only the terms up to this level.
721
+ fjwj_rl = work.fjwj.reshape(len(work.Sn), 2, -1)
722
+ fjwj = fjwj_rl[..., :n_x].reshape(n_active, 2*n_x)
723
+ # Compute the Euler-Maclaurin sum at this level
724
+ Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
725
+ else np.sum(fjwj, **axis_kwargs) * h)
726
+ work.Sk = np.concatenate((Snm2, work.Sk), axis=-1)
727
+
728
+ Snm2 = work.Sk[..., -2]
729
+ Snm1 = work.Sk[..., -1]
730
+
731
+ e1 = work.eps
732
+
733
+ if work.log:
734
+ log_e1 = np.log(e1)
735
+ # Currently, only real integrals are supported in log-scale. All
736
+ # complex values have imaginary part in increments of pi*j, which just
737
+ # carries sign information of the original integral, so use of
738
+ # `np.real` here is equivalent to absolute value in real scale.
739
+ d1 = np.real(special.logsumexp([work.Sn, Snm1 + work.pi*1j], axis=0))
740
+ d2 = np.real(special.logsumexp([work.Sn, Snm2 + work.pi*1j], axis=0))
741
+ d3 = log_e1 + np.max(np.real(work.fjwj), axis=-1)
742
+ d4 = work.d4
743
+ aerr = np.max([d1 ** 2 / d2, 2 * d1, d3, d4], axis=0)
744
+ rerr = np.maximum(log_e1, aerr - np.real(work.Sn))
745
+ else:
746
+ # Note: explicit computation of log10 of each of these is unnecessary.
747
+ d1 = np.abs(work.Sn - Snm1)
748
+ d2 = np.abs(work.Sn - Snm2)
749
+ d3 = e1 * np.max(np.abs(work.fjwj), axis=-1)
750
+ d4 = work.d4
751
+ # If `d1` is 0, no need to warn. This does the right thing.
752
+ # with np.errstate(divide='ignore'):
753
+ aerr = np.max([d1**(np.log(d1)/np.log(d2)), d1**2, d3, d4], axis=0)
754
+ rerr = np.maximum(e1, aerr/np.abs(work.Sn))
755
+ return rerr, aerr.reshape(work.Sn.shape)
756
+
757
+
758
+ def _transform_integrals(a, b):
759
+ # Transform integrals to a form with finite a < b
760
+ # For b < a, we reverse the limits and will multiply the final result by -1
761
+ # For infinite limit on the right, we use the substitution x = 1/t - 1 + a
762
+ # For infinite limit on the left, we substitute x = -x and treat as above
763
+ # For infinite limits, we substitute x = t / (1-t**2)
764
+
765
+ negative = b < a
766
+ a[negative], b[negative] = b[negative], a[negative]
767
+
768
+ abinf = np.isinf(a) & np.isinf(b)
769
+ a[abinf], b[abinf] = -1, 1
770
+
771
+ ainf = np.isinf(a)
772
+ a[ainf], b[ainf] = -b[ainf], -a[ainf]
773
+
774
+ binf = np.isinf(b)
775
+ a0 = a.copy()
776
+ a[binf], b[binf] = 0, 1
777
+
778
+ return a, b, a0, negative, abinf, ainf, binf
779
+
780
+
781
+ def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel,
782
+ atol, rtol, args, preserve_shape, callback):
783
+ # Input validation and standardization
784
+
785
+ message = '`f` must be callable.'
786
+ if not callable(f):
787
+ raise ValueError(message)
788
+
789
+ message = 'All elements of `a` and `b` must be real numbers.'
790
+ a, b = np.broadcast_arrays(a, b)
791
+ if np.any(np.iscomplex(a)) or np.any(np.iscomplex(b)):
792
+ raise ValueError(message)
793
+
794
+ message = '`log` must be True or False.'
795
+ if log not in {True, False}:
796
+ raise ValueError(message)
797
+ log = bool(log)
798
+
799
+ if atol is None:
800
+ atol = -np.inf if log else 0
801
+
802
+ rtol_temp = rtol if rtol is not None else 0.
803
+
804
+ params = np.asarray([atol, rtol_temp, 0.])
805
+ message = "`atol` and `rtol` must be real numbers."
806
+ if not np.issubdtype(params.dtype, np.floating):
807
+ raise ValueError(message)
808
+
809
+ if log:
810
+ message = '`atol` and `rtol` may not be positive infinity.'
811
+ if np.any(np.isposinf(params)):
812
+ raise ValueError(message)
813
+ else:
814
+ message = '`atol` and `rtol` must be non-negative and finite.'
815
+ if np.any(params < 0) or np.any(np.isinf(params)):
816
+ raise ValueError(message)
817
+ atol = params[0]
818
+ rtol = rtol if rtol is None else params[1]
819
+
820
+ BIGINT = float(2**62)
821
+ if maxfun is None and maxlevel is None:
822
+ maxlevel = 10
823
+
824
+ maxfun = BIGINT if maxfun is None else maxfun
825
+ maxlevel = BIGINT if maxlevel is None else maxlevel
826
+
827
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.'
828
+ params = np.asarray([maxfun, maxlevel, minlevel])
829
+ if not (np.issubdtype(params.dtype, np.number)
830
+ and np.all(np.isreal(params))
831
+ and np.all(params.astype(np.int64) == params)):
832
+ raise ValueError(message)
833
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.'
834
+ if np.any(params < 0):
835
+ raise ValueError(message)
836
+ maxfun, maxlevel, minlevel = params.astype(np.int64)
837
+ minlevel = min(minlevel, maxlevel)
838
+
839
+ if not np.iterable(args):
840
+ args = (args,)
841
+
842
+ message = '`preserve_shape` must be True or False.'
843
+ if preserve_shape not in {True, False}:
844
+ raise ValueError(message)
845
+
846
+ if callback is not None and not callable(callback):
847
+ raise ValueError('`callback` must be callable.')
848
+
849
+ return (f, a, b, log, maxfun, maxlevel, minlevel,
850
+ atol, rtol, args, preserve_shape, callback)
851
+
852
+
853
+ def _logsumexp(x, axis=0):
854
+ # logsumexp raises with empty array
855
+ x = np.asarray(x)
856
+ shape = list(x.shape)
857
+ if shape[axis] == 0:
858
+ shape.pop(axis)
859
+ return np.full(shape, fill_value=-np.inf, dtype=x.dtype)
860
+ else:
861
+ return special.logsumexp(x, axis=axis)
862
+
863
+
864
+ def _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol):
865
+ # Input validation and standardization
866
+
867
+ message = '`f` must be callable.'
868
+ if not callable(f):
869
+ raise ValueError(message)
870
+
871
+ message = 'All elements of `a`, `b`, and `step` must be real numbers.'
872
+ a, b, step = np.broadcast_arrays(a, b, step)
873
+ dtype = np.result_type(a.dtype, b.dtype, step.dtype)
874
+ if not np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.complexfloating):
875
+ raise ValueError(message)
876
+
877
+ valid_a = np.isfinite(a)
878
+ valid_b = b >= a # NaNs will be False
879
+ valid_step = np.isfinite(step) & (step > 0)
880
+ valid_abstep = valid_a & valid_b & valid_step
881
+
882
+ message = '`log` must be True or False.'
883
+ if log not in {True, False}:
884
+ raise ValueError(message)
885
+
886
+ if atol is None:
887
+ atol = -np.inf if log else 0
888
+
889
+ rtol_temp = rtol if rtol is not None else 0.
890
+
891
+ params = np.asarray([atol, rtol_temp, 0.])
892
+ message = "`atol` and `rtol` must be real numbers."
893
+ if not np.issubdtype(params.dtype, np.floating):
894
+ raise ValueError(message)
895
+
896
+ if log:
897
+ message = '`atol`, `rtol` may not be positive infinity or NaN.'
898
+ if np.any(np.isposinf(params) | np.isnan(params)):
899
+ raise ValueError(message)
900
+ else:
901
+ message = '`atol`, and `rtol` must be non-negative and finite.'
902
+ if np.any((params < 0) | (~np.isfinite(params))):
903
+ raise ValueError(message)
904
+ atol = params[0]
905
+ rtol = rtol if rtol is None else params[1]
906
+
907
+ maxterms_int = int(maxterms)
908
+ if maxterms_int != maxterms or maxterms < 0:
909
+ message = "`maxterms` must be a non-negative integer."
910
+ raise ValueError(message)
911
+
912
+ if not np.iterable(args):
913
+ args = (args,)
914
+
915
+ return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol
916
+
917
+
918
+ def _nsum(f, a, b, step=1, args=(), log=False, maxterms=int(2**20), atol=None,
919
+ rtol=None):
920
+ r"""Evaluate a convergent sum.
921
+
922
+ For finite `b`, this evaluates::
923
+
924
+ f(a + np.arange(n)*step).sum()
925
+
926
+ where ``n = int((b - a) / step) + 1``. If `f` is smooth, positive, and
927
+ monotone decreasing, `b` may be infinite, in which case the infinite sum
928
+ is approximated using integration.
929
+
930
+ Parameters
931
+ ----------
932
+ f : callable
933
+ The function that evaluates terms to be summed. The signature must be::
934
+
935
+ f(x: ndarray, *args) -> ndarray
936
+
937
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
938
+ which may contain an arbitrary number of arrays that are broadcastable
939
+ with `x`. `f` must represent a smooth, positive, and monotone decreasing
940
+ function of `x`; `_nsum` performs no checks to verify that these conditions
941
+ are met and may return erroneous results if they are violated.
942
+ a, b : array_like
943
+ Real lower and upper limits of summed terms. Must be broadcastable.
944
+ Each element of `a` must be finite and less than the corresponding
945
+ element in `b`, but elements of `b` may be infinite.
946
+ step : array_like
947
+ Finite, positive, real step between summed terms. Must be broadcastable
948
+ with `a` and `b`.
949
+ args : tuple, optional
950
+ Additional positional arguments to be passed to `f`. Must be arrays
951
+ broadcastable with `a`, `b`, and `step`. If the callable to be summed
952
+ requires arguments that are not broadcastable with `a`, `b`, and `step`,
953
+ wrap that callable with `f`. See Examples.
954
+ log : bool, default: False
955
+ Setting to True indicates that `f` returns the log of the terms
956
+ and that `atol` and `rtol` are expressed as the logs of the absolute
957
+ and relative errors. In this case, the result object will contain the
958
+ log of the sum and error. This is useful for summands for which
959
+ numerical underflow or overflow would lead to inaccuracies.
960
+ maxterms : int, default: 2**32
961
+ The maximum number of terms to evaluate when summing directly.
962
+ Additional function evaluations may be performed for input
963
+ validation and integral evaluation.
964
+ atol, rtol : float, optional
965
+ Absolute termination tolerance (default: 0) and relative termination
966
+ tolerance (default: ``eps**0.5``, where ``eps`` is the precision of
967
+ the result dtype), respectively. Must be non-negative
968
+ and finite if `log` is False, and must be expressed as the log of a
969
+ non-negative and finite number if `log` is True.
970
+
971
+ Returns
972
+ -------
973
+ res : _RichResult
974
+ An instance of `scipy._lib._util._RichResult` with the following
975
+ attributes. (The descriptions are written as though the values will be
976
+ scalars; however, if `func` returns an array, the outputs will be
977
+
978
+ arrays of the same shape.)
979
+ success : bool
980
+ ``True`` when the algorithm terminated successfully (status ``0``).
981
+ status : int
982
+ An integer representing the exit status of the algorithm.
983
+ ``0`` : The algorithm converged to the specified tolerances.
984
+ ``-1`` : Element(s) of `a`, `b`, or `step` are invalid
985
+ ``-2`` : Numerical integration reached its iteration limit; the sum may be divergent.
986
+ ``-3`` : A non-finite value was encountered.
987
+ sum : float
988
+ An estimate of the sum.
989
+ error : float
990
+ An estimate of the absolute error, assuming all terms are non-negative.
991
+ nfev : int
992
+ The number of points at which `func` was evaluated.
993
+
994
+ See Also
995
+ --------
996
+ tanhsinh
997
+
998
+ Notes
999
+ -----
1000
+ The method implemented for infinite summation is related to the integral
1001
+ test for convergence of an infinite series: assuming `step` size 1 for
1002
+ simplicity of exposition, the sum of a monotone decreasing function is bounded by
1003
+
1004
+ .. math::
1005
+
1006
+ \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u)
1007
+
1008
+ Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a`
1009
+ represent `atol`, and :math:`\epsilon_r` represent `rtol`.
1010
+ The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx`
1011
+ as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such
1012
+ that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise,
1013
+ let :math:`c = a + n`. Then the infinite sum is approximated as
1014
+
1015
+ .. math::
1016
+
1017
+ \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2
1018
+
1019
+ and the reported error is :math:`f(c)/2` plus the error estimate of
1020
+ numerical integration. The approach described above is generalized for non-unit
1021
+ `step` and finite `b` that is too large for direct evaluation of the sum,
1022
+ i.e. ``b - a + 1 > maxterms``.
1023
+
1024
+ References
1025
+ ----------
1026
+ [1] Wikipedia. "Integral test for convergence."
1027
+ https://en.wikipedia.org/wiki/Integral_test_for_convergence
1028
+
1029
+ Examples
1030
+ --------
1031
+ Compute the infinite sum of the reciprocals of squared integers.
1032
+
1033
+ >>> import numpy as np
1034
+ >>> from scipy.integrate._tanhsinh import _nsum
1035
+ >>> res = _nsum(lambda k: 1/k**2, 1, np.inf, maxterms=1e3)
1036
+ >>> ref = np.pi**2/6 # true value
1037
+ >>> res.error # estimated error
1038
+ 4.990014980029223e-07
1039
+ >>> (res.sum - ref)/ref # true error
1040
+ -1.0101760641302586e-10
1041
+ >>> res.nfev # number of points at which callable was evaluated
1042
+ 1142
1043
+
1044
+ Compute the infinite sums of the reciprocals of integers raised to powers ``p``.
1045
+
1046
+ >>> from scipy import special
1047
+ >>> p = np.arange(2, 10)
1048
+ >>> res = _nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,))
1049
+ >>> ref = special.zeta(p, 1)
1050
+ >>> np.allclose(res.sum, ref)
1051
+ True
1052
+
1053
+ """ # noqa: E501
1054
+ # Potential future work:
1055
+ # - more careful testing of when `b` is slightly less than `a` plus an
1056
+ # integer multiple of step (needed before this is public)
1057
+ # - improve error estimate of `_direct` sum
1058
+ # - add other methods for convergence acceleration (Richardson, epsilon)
1059
+ # - support infinite lower limit?
1060
+ # - support negative monotone increasing functions?
1061
+ # - b < a / negative step?
1062
+ # - complex-valued function?
1063
+ # - check for violations of monotonicity?
1064
+
1065
+ # Function-specific input validation / standardization
1066
+ tmp = _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol)
1067
+ f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol = tmp
1068
+
1069
+ # Additional elementwise algorithm input validation / standardization
1070
+ tmp = eim._initialize(f, (a,), args, complex_ok=False)
1071
+ f, xs, fs, args, shape, dtype = tmp
1072
+
1073
+ # Finish preparing `a`, `b`, and `step` arrays
1074
+ a = xs[0]
1075
+ b = np.broadcast_to(b, shape).ravel().astype(dtype)
1076
+ step = np.broadcast_to(step, shape).ravel().astype(dtype)
1077
+ valid_abstep = np.broadcast_to(valid_abstep, shape).ravel()
1078
+ nterms = np.floor((b - a) / step)
1079
+ b = a + nterms*step
1080
+
1081
+ # Define constants
1082
+ eps = np.finfo(dtype).eps
1083
+ zero = np.asarray(-np.inf if log else 0, dtype=dtype)[()]
1084
+ if rtol is None:
1085
+ rtol = 0.5*np.log(eps) if log else eps**0.5
1086
+ constants = (dtype, log, eps, zero, rtol, atol, maxterms)
1087
+
1088
+ # Prepare result arrays
1089
+ S = np.empty_like(a)
1090
+ E = np.empty_like(a)
1091
+ status = np.zeros(len(a), dtype=int)
1092
+ nfev = np.ones(len(a), dtype=int) # one function evaluation above
1093
+
1094
+ # Branch for direct sum evaluation / integral approximation / invalid input
1095
+ i1 = (nterms + 1 <= maxterms) & valid_abstep
1096
+ i2 = (nterms + 1 > maxterms) & valid_abstep
1097
+ i3 = ~valid_abstep
1098
+
1099
+ if np.any(i1):
1100
+ args_direct = [arg[i1] for arg in args]
1101
+ tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants)
1102
+ S[i1], E[i1] = tmp[:-1]
1103
+ nfev[i1] += tmp[-1]
1104
+ status[i1] = -3 * (~np.isfinite(S[i1]))
1105
+
1106
+ if np.any(i2):
1107
+ args_indirect = [arg[i2] for arg in args]
1108
+ tmp = _integral_bound(f, a[i2], b[i2], step[i2], args_indirect, constants)
1109
+ S[i2], E[i2], status[i2] = tmp[:-1]
1110
+ nfev[i2] += tmp[-1]
1111
+
1112
+ if np.any(i3):
1113
+ S[i3], E[i3] = np.nan, np.nan
1114
+ status[i3] = -1
1115
+
1116
+ # Return results
1117
+ S, E = S.reshape(shape)[()], E.reshape(shape)[()]
1118
+ status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()]
1119
+ return _RichResult(sum=S, error=E, status=status, success=status == 0,
1120
+ nfev=nfev)
1121
+
1122
+
1123
+ def _direct(f, a, b, step, args, constants, inclusive=True):
1124
+ # Directly evaluate the sum.
1125
+
1126
+ # When used in the context of distributions, `args` would contain the
1127
+ # distribution parameters. We have broadcasted for simplicity, but we could
1128
+ # reduce function evaluations when distribution parameters are the same but
1129
+ # sum limits differ. Roughly:
1130
+ # - compute the function at all points between min(a) and max(b),
1131
+ # - compute the cumulative sum,
1132
+ # - take the difference between elements of the cumulative sum
1133
+ # corresponding with b and a.
1134
+ # This is left to future enhancement
1135
+
1136
+ dtype, log, eps, zero, _, _, _ = constants
1137
+
1138
+ # To allow computation in a single vectorized call, find the maximum number
1139
+ # of points (over all slices) at which the function needs to be evaluated.
1140
+ # Note: if `inclusive` is `True`, then we want `1` more term in the sum.
1141
+ # I didn't think it was great style to use `True` as `1` in Python, so I
1142
+ # explicitly converted it to an `int` before using it.
1143
+ inclusive_adjustment = int(inclusive)
1144
+ steps = np.round((b - a) / step) + inclusive_adjustment
1145
+ # Equivalently, steps = np.round((b - a) / step) + inclusive
1146
+ max_steps = int(np.max(steps))
1147
+
1148
+ # In each slice, the function will be evaluated at the same number of points,
1149
+ # but excessive points (those beyond the right sum limit `b`) are replaced
1150
+ # with NaN to (potentially) reduce the time of these unnecessary calculations.
1151
+ # Use a new last axis for these calculations for consistency with other
1152
+ # elementwise algorithms.
1153
+ a2, b2, step2 = a[:, np.newaxis], b[:, np.newaxis], step[:, np.newaxis]
1154
+ args2 = [arg[:, np.newaxis] for arg in args]
1155
+ ks = a2 + np.arange(max_steps, dtype=dtype) * step2
1156
+ i_nan = ks >= (b2 + inclusive_adjustment*step2/2)
1157
+ ks[i_nan] = np.nan
1158
+ fs = f(ks, *args2)
1159
+
1160
+ # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum.
1161
+ # In some cases it may be faster to loop over slices than to vectorize
1162
+ # like this. This is an optimization that can be added later.
1163
+ fs[i_nan] = zero
1164
+ nfev = max_steps - i_nan.sum(axis=-1)
1165
+ S = _logsumexp(fs, axis=-1) if log else np.sum(fs, axis=-1)
1166
+ # Rough, non-conservative error estimate. See gh-19667 for improvement ideas.
1167
+ E = np.real(S) + np.log(eps) if log else eps * abs(S)
1168
+ return S, E, nfev
1169
+
1170
+
1171
+ def _integral_bound(f, a, b, step, args, constants):
1172
+ # Estimate the sum with integral approximation
1173
+ dtype, log, _, _, rtol, atol, maxterms = constants
1174
+ log2 = np.log(2, dtype=dtype)
1175
+
1176
+ # Get a lower bound on the sum and compute effective absolute tolerance
1177
+ lb = _tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log)
1178
+ tol = np.broadcast_to(atol, lb.integral.shape)
1179
+ tol = _logsumexp((tol, rtol + lb.integral)) if log else tol + rtol*lb.integral
1180
+ i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent
1181
+ tol[i_skip] = np.nan
1182
+ status = lb.status
1183
+
1184
+ # As in `_direct`, we'll need a temporary new axis for points
1185
+ # at which to evaluate the function. Append axis at the end for
1186
+ # consistency with other elementwise algorithms.
1187
+ a2 = a[..., np.newaxis]
1188
+ step2 = step[..., np.newaxis]
1189
+ args2 = [arg[..., np.newaxis] for arg in args]
1190
+
1191
+ # Find the location of a term that is less than the tolerance (if possible)
1192
+ log2maxterms = np.floor(np.log2(maxterms)) if maxterms else 0
1193
+ n_steps = np.concatenate([2**np.arange(0, log2maxterms), [maxterms]], dtype=dtype)
1194
+ nfev = len(n_steps)
1195
+ ks = a2 + n_steps * step2
1196
+ fks = f(ks, *args2)
1197
+ nt = np.minimum(np.sum(fks > tol[:, np.newaxis], axis=-1), n_steps.shape[-1]-1)
1198
+ n_steps = n_steps[nt]
1199
+
1200
+ # Directly evaluate the sum up to this term
1201
+ k = a + n_steps * step
1202
+ left, left_error, left_nfev = _direct(f, a, k, step, args,
1203
+ constants, inclusive=False)
1204
+ i_skip |= np.isposinf(left) # if sum is not finite, no sense in continuing
1205
+ status[np.isposinf(left)] = -3
1206
+ k[i_skip] = np.nan
1207
+
1208
+ # Use integration to estimate the remaining sum
1209
+ # Possible optimization for future work: if there were no terms less than
1210
+ # the tolerance, there is no need to compute the integral to better accuracy.
1211
+ # Something like:
1212
+ # atol = np.maximum(atol, np.minimum(fk/2 - fb/2))
1213
+ # rtol = np.maximum(rtol, np.minimum((fk/2 - fb/2)/left))
1214
+ # where `fk`/`fb` are currently calculated below.
1215
+ right = _tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log)
1216
+
1217
+ # Calculate the full estimate and error from the pieces
1218
+ fk = fks[np.arange(len(fks)), nt]
1219
+ fb = f(b, *args)
1220
+ nfev += 1
1221
+ if log:
1222
+ log_step = np.log(step)
1223
+ S_terms = (left, right.integral - log_step, fk - log2, fb - log2)
1224
+ S = _logsumexp(S_terms, axis=0)
1225
+ E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+np.pi*1j)
1226
+ E = _logsumexp(E_terms, axis=0).real
1227
+ else:
1228
+ S = left + right.integral/step + fk/2 + fb/2
1229
+ E = left_error + right.error/step + fk/2 - fb/2
1230
+ status[~i_skip] = right.status[~i_skip]
1231
+ return S, E, status, left_nfev + right.nfev + nfev + lb.nfev
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (109 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (166 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/integrate/dop.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = [ # noqa: F822
6
+ 'dopri5',
7
+ 'dop853'
8
+ ]
9
+
10
+
11
+ def __dir__():
12
+ return __all__
13
+
14
+
15
+ def __getattr__(name):
16
+ return _sub_module_deprecation(sub_package="integrate", module="dop",
17
+ private_modules=["_dop"], all=__all__,
18
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/lsoda.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = ['lsoda'] # noqa: F822
6
+
7
+
8
+ def __dir__():
9
+ return __all__
10
+
11
+
12
+ def __getattr__(name):
13
+ return _sub_module_deprecation(sub_package="integrate", module="lsoda",
14
+ private_modules=["_lsoda"], all=__all__,
15
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/odepack.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.integrate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = ['odeint', 'ODEintWarning'] # noqa: F822
8
+
9
+
10
+ def __dir__():
11
+ return __all__
12
+
13
+
14
+ def __getattr__(name):
15
+ return _sub_module_deprecation(sub_package="integrate", module="odepack",
16
+ private_modules=["_odepack_py"], all=__all__,
17
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/quadpack.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.integrate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ "quad",
9
+ "dblquad",
10
+ "tplquad",
11
+ "nquad",
12
+ "IntegrationWarning",
13
+ "error",
14
+ ]
15
+
16
+
17
+ def __dir__():
18
+ return __all__
19
+
20
+
21
+ def __getattr__(name):
22
+ return _sub_module_deprecation(sub_package="integrate", module="quadpack",
23
+ private_modules=["_quadpack_py"], all=__all__,
24
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/integrate/vode.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = [ # noqa: F822
6
+ 'dvode',
7
+ 'zvode'
8
+ ]
9
+
10
+
11
+ def __dir__():
12
+ return __all__
13
+
14
+
15
+ def __getattr__(name):
16
+ return _sub_module_deprecation(sub_package="integrate", module="vode",
17
+ private_modules=["_vode"], all=__all__,
18
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/mmio.cpython-310.pyc ADDED
Binary file (627 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .hb import (MalformedHeader, hb_read, hb_write, HBInfo,
2
+ HBFile, HBMatrixType)
3
+ from ._fortran_format_parser import (FortranFormatParser, IntFormat,
4
+ ExpFormat, BadFortranFormat)
5
+
6
+ # Deprecated namespaces, to be removed in v2.0.0
7
+ from . import hb
8
+
9
+ __all__ = [
10
+ 'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo',
11
+ 'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat',
12
+ 'ExpFormat', 'BadFortranFormat', 'hb'
13
+ ]
14
+
15
+ from scipy._lib._testutils import PytestTester
16
+ test = PytestTester(__name__)
17
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/_fortran_format_parser.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preliminary module to handle Fortran formats for IO. Does not use this outside
3
+ scipy.sparse io for now, until the API is deemed reasonable.
4
+
5
+ The *Format classes handle conversion between Fortran and Python format, and
6
+ FortranFormatParser can create *Format instances from raw Fortran format
7
+ strings (e.g. '(3I4)', '(10I3)', etc...)
8
+ """
9
+ import re
10
+
11
+ import numpy as np
12
+
13
+
14
+ __all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"]
15
+
16
+
17
+ TOKENS = {
18
+ "LPAR": r"\(",
19
+ "RPAR": r"\)",
20
+ "INT_ID": r"I",
21
+ "EXP_ID": r"E",
22
+ "INT": r"\d+",
23
+ "DOT": r"\.",
24
+ }
25
+
26
+
27
+ class BadFortranFormat(SyntaxError):
28
+ pass
29
+
30
+
31
+ def number_digits(n):
32
+ return int(np.floor(np.log10(np.abs(n))) + 1)
33
+
34
+
35
+ class IntFormat:
36
+ @classmethod
37
+ def from_number(cls, n, min=None):
38
+ """Given an integer, returns a "reasonable" IntFormat instance to represent
39
+ any number between 0 and n if n > 0, -n and n if n < 0
40
+
41
+ Parameters
42
+ ----------
43
+ n : int
44
+ max number one wants to be able to represent
45
+ min : int
46
+ minimum number of characters to use for the format
47
+
48
+ Returns
49
+ -------
50
+ res : IntFormat
51
+ IntFormat instance with reasonable (see Notes) computed width
52
+
53
+ Notes
54
+ -----
55
+ Reasonable should be understood as the minimal string length necessary
56
+ without losing precision. For example, IntFormat.from_number(1) will
57
+ return an IntFormat instance of width 2, so that any 0 and 1 may be
58
+ represented as 1-character strings without loss of information.
59
+ """
60
+ width = number_digits(n) + 1
61
+ if n < 0:
62
+ width += 1
63
+ repeat = 80 // width
64
+ return cls(width, min, repeat=repeat)
65
+
66
+ def __init__(self, width, min=None, repeat=None):
67
+ self.width = width
68
+ self.repeat = repeat
69
+ self.min = min
70
+
71
+ def __repr__(self):
72
+ r = "IntFormat("
73
+ if self.repeat:
74
+ r += "%d" % self.repeat
75
+ r += "I%d" % self.width
76
+ if self.min:
77
+ r += ".%d" % self.min
78
+ return r + ")"
79
+
80
+ @property
81
+ def fortran_format(self):
82
+ r = "("
83
+ if self.repeat:
84
+ r += "%d" % self.repeat
85
+ r += "I%d" % self.width
86
+ if self.min:
87
+ r += ".%d" % self.min
88
+ return r + ")"
89
+
90
+ @property
91
+ def python_format(self):
92
+ return "%" + str(self.width) + "d"
93
+
94
+
95
+ class ExpFormat:
96
+ @classmethod
97
+ def from_number(cls, n, min=None):
98
+ """Given a float number, returns a "reasonable" ExpFormat instance to
99
+ represent any number between -n and n.
100
+
101
+ Parameters
102
+ ----------
103
+ n : float
104
+ max number one wants to be able to represent
105
+ min : int
106
+ minimum number of characters to use for the format
107
+
108
+ Returns
109
+ -------
110
+ res : ExpFormat
111
+ ExpFormat instance with reasonable (see Notes) computed width
112
+
113
+ Notes
114
+ -----
115
+ Reasonable should be understood as the minimal string length necessary
116
+ to avoid losing precision.
117
+ """
118
+ # len of one number in exp format: sign + 1|0 + "." +
119
+ # number of digit for fractional part + 'E' + sign of exponent +
120
+ # len of exponent
121
+ finfo = np.finfo(n.dtype)
122
+ # Number of digits for fractional part
123
+ n_prec = finfo.precision + 1
124
+ # Number of digits for exponential part
125
+ n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
126
+ width = 1 + 1 + n_prec + 1 + n_exp + 1
127
+ if n < 0:
128
+ width += 1
129
+ repeat = int(np.floor(80 / width))
130
+ return cls(width, n_prec, min, repeat=repeat)
131
+
132
+ def __init__(self, width, significand, min=None, repeat=None):
133
+ """\
134
+ Parameters
135
+ ----------
136
+ width : int
137
+ number of characters taken by the string (includes space).
138
+ """
139
+ self.width = width
140
+ self.significand = significand
141
+ self.repeat = repeat
142
+ self.min = min
143
+
144
+ def __repr__(self):
145
+ r = "ExpFormat("
146
+ if self.repeat:
147
+ r += "%d" % self.repeat
148
+ r += "E%d.%d" % (self.width, self.significand)
149
+ if self.min:
150
+ r += "E%d" % self.min
151
+ return r + ")"
152
+
153
+ @property
154
+ def fortran_format(self):
155
+ r = "("
156
+ if self.repeat:
157
+ r += "%d" % self.repeat
158
+ r += "E%d.%d" % (self.width, self.significand)
159
+ if self.min:
160
+ r += "E%d" % self.min
161
+ return r + ")"
162
+
163
+ @property
164
+ def python_format(self):
165
+ return "%" + str(self.width-1) + "." + str(self.significand) + "E"
166
+
167
+
168
+ class Token:
169
+ def __init__(self, type, value, pos):
170
+ self.type = type
171
+ self.value = value
172
+ self.pos = pos
173
+
174
+ def __str__(self):
175
+ return f"""Token('{self.type}', "{self.value}")"""
176
+
177
+ def __repr__(self):
178
+ return self.__str__()
179
+
180
+
181
+ class Tokenizer:
182
+ def __init__(self):
183
+ self.tokens = list(TOKENS.keys())
184
+ self.res = [re.compile(TOKENS[i]) for i in self.tokens]
185
+
186
+ def input(self, s):
187
+ self.data = s
188
+ self.curpos = 0
189
+ self.len = len(s)
190
+
191
+ def next_token(self):
192
+ curpos = self.curpos
193
+
194
+ while curpos < self.len:
195
+ for i, r in enumerate(self.res):
196
+ m = r.match(self.data, curpos)
197
+ if m is None:
198
+ continue
199
+ else:
200
+ self.curpos = m.end()
201
+ return Token(self.tokens[i], m.group(), self.curpos)
202
+ raise SyntaxError("Unknown character at position %d (%s)"
203
+ % (self.curpos, self.data[curpos]))
204
+
205
+
206
+ # Grammar for fortran format:
207
+ # format : LPAR format_string RPAR
208
+ # format_string : repeated | simple
209
+ # repeated : repeat simple
210
+ # simple : int_fmt | exp_fmt
211
+ # int_fmt : INT_ID width
212
+ # exp_fmt : simple_exp_fmt
213
+ # simple_exp_fmt : EXP_ID width DOT significand
214
+ # extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
215
+ # repeat : INT
216
+ # width : INT
217
+ # significand : INT
218
+ # ndigits : INT
219
+
220
+ # Naive fortran formatter - parser is hand-made
221
+ class FortranFormatParser:
222
+ """Parser for Fortran format strings. The parse method returns a *Format
223
+ instance.
224
+
225
+ Notes
226
+ -----
227
+ Only ExpFormat (exponential format for floating values) and IntFormat
228
+ (integer format) for now.
229
+ """
230
+ def __init__(self):
231
+ self.tokenizer = Tokenizer()
232
+
233
+ def parse(self, s):
234
+ self.tokenizer.input(s)
235
+
236
+ tokens = []
237
+
238
+ try:
239
+ while True:
240
+ t = self.tokenizer.next_token()
241
+ if t is None:
242
+ break
243
+ else:
244
+ tokens.append(t)
245
+ return self._parse_format(tokens)
246
+ except SyntaxError as e:
247
+ raise BadFortranFormat(str(e)) from e
248
+
249
+ def _get_min(self, tokens):
250
+ next = tokens.pop(0)
251
+ if not next.type == "DOT":
252
+ raise SyntaxError()
253
+ next = tokens.pop(0)
254
+ return next.value
255
+
256
+ def _expect(self, token, tp):
257
+ if not token.type == tp:
258
+ raise SyntaxError()
259
+
260
+ def _parse_format(self, tokens):
261
+ if not tokens[0].type == "LPAR":
262
+ raise SyntaxError("Expected left parenthesis at position "
263
+ "%d (got '%s')" % (0, tokens[0].value))
264
+ elif not tokens[-1].type == "RPAR":
265
+ raise SyntaxError("Expected right parenthesis at position "
266
+ "%d (got '%s')" % (len(tokens), tokens[-1].value))
267
+
268
+ tokens = tokens[1:-1]
269
+ types = [t.type for t in tokens]
270
+ if types[0] == "INT":
271
+ repeat = int(tokens.pop(0).value)
272
+ else:
273
+ repeat = None
274
+
275
+ next = tokens.pop(0)
276
+ if next.type == "INT_ID":
277
+ next = self._next(tokens, "INT")
278
+ width = int(next.value)
279
+ if tokens:
280
+ min = int(self._get_min(tokens))
281
+ else:
282
+ min = None
283
+ return IntFormat(width, min, repeat)
284
+ elif next.type == "EXP_ID":
285
+ next = self._next(tokens, "INT")
286
+ width = int(next.value)
287
+
288
+ next = self._next(tokens, "DOT")
289
+
290
+ next = self._next(tokens, "INT")
291
+ significand = int(next.value)
292
+
293
+ if tokens:
294
+ next = self._next(tokens, "EXP_ID")
295
+
296
+ next = self._next(tokens, "INT")
297
+ min = int(next.value)
298
+ else:
299
+ min = None
300
+ return ExpFormat(width, significand, min, repeat)
301
+ else:
302
+ raise SyntaxError("Invalid formatter type %s" % next.value)
303
+
304
+ def _next(self, tokens, tp):
305
+ if not len(tokens) > 0:
306
+ raise SyntaxError()
307
+ next = tokens.pop(0)
308
+ self._expect(next, tp)
309
+ return next
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/hb.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Harwell-Boeing read/write.
3
+
4
+ At the moment not the full Harwell-Boeing format is supported. Supported
5
+ features are:
6
+
7
+ - assembled, non-symmetric, real matrices
8
+ - integer for pointer/indices
9
+ - exponential format for float values, and int format
10
+
11
+ """
12
+ # TODO:
13
+ # - Add more support (symmetric/complex matrices, non-assembled matrices ?)
14
+
15
+ # XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
16
+ # takes a lot of memory. Being faster would require compiled code.
17
+ # write is not efficient. Although not a terribly exciting task,
18
+ # having reusable facilities to efficiently read/write fortran-formatted files
19
+ # would be useful outside this module.
20
+
21
+ import warnings
22
+
23
+ import numpy as np
24
+ from scipy.sparse import csc_matrix
25
+ from ._fortran_format_parser import FortranFormatParser, IntFormat, ExpFormat
26
+
27
+ __all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
28
+ "HBMatrixType"]
29
+
30
+
31
+ class MalformedHeader(Exception):
32
+ pass
33
+
34
+
35
+ class LineOverflow(Warning):
36
+ pass
37
+
38
+
39
+ def _nbytes_full(fmt, nlines):
40
+ """Return the number of bytes to read to get every full lines for the
41
+ given parsed fortran format."""
42
+ return (fmt.repeat * fmt.width + 1) * (nlines - 1)
43
+
44
+
45
+ class HBInfo:
46
+ @classmethod
47
+ def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
48
+ """Create a HBInfo instance from an existing sparse matrix.
49
+
50
+ Parameters
51
+ ----------
52
+ m : sparse matrix
53
+ the HBInfo instance will derive its parameters from m
54
+ title : str
55
+ Title to put in the HB header
56
+ key : str
57
+ Key
58
+ mxtype : HBMatrixType
59
+ type of the input matrix
60
+ fmt : dict
61
+ not implemented
62
+
63
+ Returns
64
+ -------
65
+ hb_info : HBInfo instance
66
+ """
67
+ m = m.tocsc(copy=False)
68
+
69
+ pointer = m.indptr
70
+ indices = m.indices
71
+ values = m.data
72
+
73
+ nrows, ncols = m.shape
74
+ nnon_zeros = m.nnz
75
+
76
+ if fmt is None:
77
+ # +1 because HB use one-based indexing (Fortran), and we will write
78
+ # the indices /pointer as such
79
+ pointer_fmt = IntFormat.from_number(np.max(pointer+1))
80
+ indices_fmt = IntFormat.from_number(np.max(indices+1))
81
+
82
+ if values.dtype.kind in np.typecodes["AllFloat"]:
83
+ values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
84
+ elif values.dtype.kind in np.typecodes["AllInteger"]:
85
+ values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
86
+ else:
87
+ message = f"type {values.dtype.kind} not implemented yet"
88
+ raise NotImplementedError(message)
89
+ else:
90
+ raise NotImplementedError("fmt argument not supported yet.")
91
+
92
+ if mxtype is None:
93
+ if not np.isrealobj(values):
94
+ raise ValueError("Complex values not supported yet")
95
+ if values.dtype.kind in np.typecodes["AllInteger"]:
96
+ tp = "integer"
97
+ elif values.dtype.kind in np.typecodes["AllFloat"]:
98
+ tp = "real"
99
+ else:
100
+ raise NotImplementedError("type %s for values not implemented"
101
+ % values.dtype)
102
+ mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
103
+ else:
104
+ raise ValueError("mxtype argument not handled yet.")
105
+
106
+ def _nlines(fmt, size):
107
+ nlines = size // fmt.repeat
108
+ if nlines * fmt.repeat != size:
109
+ nlines += 1
110
+ return nlines
111
+
112
+ pointer_nlines = _nlines(pointer_fmt, pointer.size)
113
+ indices_nlines = _nlines(indices_fmt, indices.size)
114
+ values_nlines = _nlines(values_fmt, values.size)
115
+
116
+ total_nlines = pointer_nlines + indices_nlines + values_nlines
117
+
118
+ return cls(title, key,
119
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
120
+ mxtype, nrows, ncols, nnon_zeros,
121
+ pointer_fmt.fortran_format, indices_fmt.fortran_format,
122
+ values_fmt.fortran_format)
123
+
124
+ @classmethod
125
+ def from_file(cls, fid):
126
+ """Create a HBInfo instance from a file object containing a matrix in the
127
+ HB format.
128
+
129
+ Parameters
130
+ ----------
131
+ fid : file-like matrix
132
+ File or file-like object containing a matrix in the HB format.
133
+
134
+ Returns
135
+ -------
136
+ hb_info : HBInfo instance
137
+ """
138
+ # First line
139
+ line = fid.readline().strip("\n")
140
+ if not len(line) > 72:
141
+ raise ValueError("Expected at least 72 characters for first line, "
142
+ "got: \n%s" % line)
143
+ title = line[:72]
144
+ key = line[72:]
145
+
146
+ # Second line
147
+ line = fid.readline().strip("\n")
148
+ if not len(line.rstrip()) >= 56:
149
+ raise ValueError("Expected at least 56 characters for second line, "
150
+ "got: \n%s" % line)
151
+ total_nlines = _expect_int(line[:14])
152
+ pointer_nlines = _expect_int(line[14:28])
153
+ indices_nlines = _expect_int(line[28:42])
154
+ values_nlines = _expect_int(line[42:56])
155
+
156
+ rhs_nlines = line[56:72].strip()
157
+ if rhs_nlines == '':
158
+ rhs_nlines = 0
159
+ else:
160
+ rhs_nlines = _expect_int(rhs_nlines)
161
+ if not rhs_nlines == 0:
162
+ raise ValueError("Only files without right hand side supported for "
163
+ "now.")
164
+
165
+ # Third line
166
+ line = fid.readline().strip("\n")
167
+ if not len(line) >= 70:
168
+ raise ValueError("Expected at least 72 character for third line, got:\n"
169
+ "%s" % line)
170
+
171
+ mxtype_s = line[:3].upper()
172
+ if not len(mxtype_s) == 3:
173
+ raise ValueError("mxtype expected to be 3 characters long")
174
+
175
+ mxtype = HBMatrixType.from_fortran(mxtype_s)
176
+ if mxtype.value_type not in ["real", "integer"]:
177
+ raise ValueError("Only real or integer matrices supported for "
178
+ "now (detected %s)" % mxtype)
179
+ if not mxtype.structure == "unsymmetric":
180
+ raise ValueError("Only unsymmetric matrices supported for "
181
+ "now (detected %s)" % mxtype)
182
+ if not mxtype.storage == "assembled":
183
+ raise ValueError("Only assembled matrices supported for now")
184
+
185
+ if not line[3:14] == " " * 11:
186
+ raise ValueError("Malformed data for third line: %s" % line)
187
+
188
+ nrows = _expect_int(line[14:28])
189
+ ncols = _expect_int(line[28:42])
190
+ nnon_zeros = _expect_int(line[42:56])
191
+ nelementals = _expect_int(line[56:70])
192
+ if not nelementals == 0:
193
+ raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
194
+ % nelementals)
195
+
196
+ # Fourth line
197
+ line = fid.readline().strip("\n")
198
+
199
+ ct = line.split()
200
+ if not len(ct) == 3:
201
+ raise ValueError("Expected 3 formats, got %s" % ct)
202
+
203
+ return cls(title, key,
204
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
205
+ mxtype, nrows, ncols, nnon_zeros,
206
+ ct[0], ct[1], ct[2],
207
+ rhs_nlines, nelementals)
208
+
209
+ def __init__(self, title, key,
210
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
211
+ mxtype, nrows, ncols, nnon_zeros,
212
+ pointer_format_str, indices_format_str, values_format_str,
213
+ right_hand_sides_nlines=0, nelementals=0):
214
+ """Do not use this directly, but the class ctrs (from_* functions)."""
215
+ self.title = title
216
+ self.key = key
217
+ if title is None:
218
+ title = "No Title"
219
+ if len(title) > 72:
220
+ raise ValueError("title cannot be > 72 characters")
221
+
222
+ if key is None:
223
+ key = "|No Key"
224
+ if len(key) > 8:
225
+ warnings.warn("key is > 8 characters (key is %s)" % key,
226
+ LineOverflow, stacklevel=3)
227
+
228
+ self.total_nlines = total_nlines
229
+ self.pointer_nlines = pointer_nlines
230
+ self.indices_nlines = indices_nlines
231
+ self.values_nlines = values_nlines
232
+
233
+ parser = FortranFormatParser()
234
+ pointer_format = parser.parse(pointer_format_str)
235
+ if not isinstance(pointer_format, IntFormat):
236
+ raise ValueError("Expected int format for pointer format, got %s"
237
+ % pointer_format)
238
+
239
+ indices_format = parser.parse(indices_format_str)
240
+ if not isinstance(indices_format, IntFormat):
241
+ raise ValueError("Expected int format for indices format, got %s" %
242
+ indices_format)
243
+
244
+ values_format = parser.parse(values_format_str)
245
+ if isinstance(values_format, ExpFormat):
246
+ if mxtype.value_type not in ["real", "complex"]:
247
+ raise ValueError(f"Inconsistency between matrix type {mxtype} and "
248
+ f"value type {values_format}")
249
+ values_dtype = np.float64
250
+ elif isinstance(values_format, IntFormat):
251
+ if mxtype.value_type not in ["integer"]:
252
+ raise ValueError(f"Inconsistency between matrix type {mxtype} and "
253
+ f"value type {values_format}")
254
+ # XXX: fortran int -> dtype association ?
255
+ values_dtype = int
256
+ else:
257
+ raise ValueError(f"Unsupported format for values {values_format!r}")
258
+
259
+ self.pointer_format = pointer_format
260
+ self.indices_format = indices_format
261
+ self.values_format = values_format
262
+
263
+ self.pointer_dtype = np.int32
264
+ self.indices_dtype = np.int32
265
+ self.values_dtype = values_dtype
266
+
267
+ self.pointer_nlines = pointer_nlines
268
+ self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
269
+
270
+ self.indices_nlines = indices_nlines
271
+ self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
272
+
273
+ self.values_nlines = values_nlines
274
+ self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
275
+
276
+ self.nrows = nrows
277
+ self.ncols = ncols
278
+ self.nnon_zeros = nnon_zeros
279
+ self.nelementals = nelementals
280
+ self.mxtype = mxtype
281
+
282
+ def dump(self):
283
+ """Gives the header corresponding to this instance as a string."""
284
+ header = [self.title.ljust(72) + self.key.ljust(8)]
285
+
286
+ header.append("%14d%14d%14d%14d" %
287
+ (self.total_nlines, self.pointer_nlines,
288
+ self.indices_nlines, self.values_nlines))
289
+ header.append("%14s%14d%14d%14d%14d" %
290
+ (self.mxtype.fortran_format.ljust(14), self.nrows,
291
+ self.ncols, self.nnon_zeros, 0))
292
+
293
+ pffmt = self.pointer_format.fortran_format
294
+ iffmt = self.indices_format.fortran_format
295
+ vffmt = self.values_format.fortran_format
296
+ header.append("%16s%16s%20s" %
297
+ (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
298
+ return "\n".join(header)
299
+
300
+
301
+ def _expect_int(value, msg=None):
302
+ try:
303
+ return int(value)
304
+ except ValueError as e:
305
+ if msg is None:
306
+ msg = "Expected an int, got %s"
307
+ raise ValueError(msg % value) from e
308
+
309
+
310
+ def _read_hb_data(content, header):
311
+ # XXX: look at a way to reduce memory here (big string creation)
312
+ ptr_string = "".join([content.read(header.pointer_nbytes_full),
313
+ content.readline()])
314
+ ptr = np.fromstring(ptr_string,
315
+ dtype=int, sep=' ')
316
+
317
+ ind_string = "".join([content.read(header.indices_nbytes_full),
318
+ content.readline()])
319
+ ind = np.fromstring(ind_string,
320
+ dtype=int, sep=' ')
321
+
322
+ val_string = "".join([content.read(header.values_nbytes_full),
323
+ content.readline()])
324
+ val = np.fromstring(val_string,
325
+ dtype=header.values_dtype, sep=' ')
326
+
327
+ try:
328
+ return csc_matrix((val, ind-1, ptr-1),
329
+ shape=(header.nrows, header.ncols))
330
+ except ValueError as e:
331
+ raise e
332
+
333
+
334
+ def _write_data(m, fid, header):
335
+ m = m.tocsc(copy=False)
336
+
337
+ def write_array(f, ar, nlines, fmt):
338
+ # ar_nlines is the number of full lines, n is the number of items per
339
+ # line, ffmt the fortran format
340
+ pyfmt = fmt.python_format
341
+ pyfmt_full = pyfmt * fmt.repeat
342
+
343
+ # for each array to write, we first write the full lines, and special
344
+ # case for partial line
345
+ full = ar[:(nlines - 1) * fmt.repeat]
346
+ for row in full.reshape((nlines-1, fmt.repeat)):
347
+ f.write(pyfmt_full % tuple(row) + "\n")
348
+ nremain = ar.size - full.size
349
+ if nremain > 0:
350
+ f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
351
+
352
+ fid.write(header.dump())
353
+ fid.write("\n")
354
+ # +1 is for Fortran one-based indexing
355
+ write_array(fid, m.indptr+1, header.pointer_nlines,
356
+ header.pointer_format)
357
+ write_array(fid, m.indices+1, header.indices_nlines,
358
+ header.indices_format)
359
+ write_array(fid, m.data, header.values_nlines,
360
+ header.values_format)
361
+
362
+
363
+ class HBMatrixType:
364
+ """Class to hold the matrix type."""
365
+ # q2f* translates qualified names to Fortran character
366
+ _q2f_type = {
367
+ "real": "R",
368
+ "complex": "C",
369
+ "pattern": "P",
370
+ "integer": "I",
371
+ }
372
+ _q2f_structure = {
373
+ "symmetric": "S",
374
+ "unsymmetric": "U",
375
+ "hermitian": "H",
376
+ "skewsymmetric": "Z",
377
+ "rectangular": "R"
378
+ }
379
+ _q2f_storage = {
380
+ "assembled": "A",
381
+ "elemental": "E",
382
+ }
383
+
384
+ _f2q_type = {j: i for i, j in _q2f_type.items()}
385
+ _f2q_structure = {j: i for i, j in _q2f_structure.items()}
386
+ _f2q_storage = {j: i for i, j in _q2f_storage.items()}
387
+
388
+ @classmethod
389
+ def from_fortran(cls, fmt):
390
+ if not len(fmt) == 3:
391
+ raise ValueError("Fortran format for matrix type should be 3 "
392
+ "characters long")
393
+ try:
394
+ value_type = cls._f2q_type[fmt[0]]
395
+ structure = cls._f2q_structure[fmt[1]]
396
+ storage = cls._f2q_storage[fmt[2]]
397
+ return cls(value_type, structure, storage)
398
+ except KeyError as e:
399
+ raise ValueError("Unrecognized format %s" % fmt) from e
400
+
401
+ def __init__(self, value_type, structure, storage="assembled"):
402
+ self.value_type = value_type
403
+ self.structure = structure
404
+ self.storage = storage
405
+
406
+ if value_type not in self._q2f_type:
407
+ raise ValueError("Unrecognized type %s" % value_type)
408
+ if structure not in self._q2f_structure:
409
+ raise ValueError("Unrecognized structure %s" % structure)
410
+ if storage not in self._q2f_storage:
411
+ raise ValueError("Unrecognized storage %s" % storage)
412
+
413
+ @property
414
+ def fortran_format(self):
415
+ return self._q2f_type[self.value_type] + \
416
+ self._q2f_structure[self.structure] + \
417
+ self._q2f_storage[self.storage]
418
+
419
+ def __repr__(self):
420
+ return f"HBMatrixType({self.value_type}, {self.structure}, {self.storage})"
421
+
422
+
423
+ class HBFile:
424
+ def __init__(self, file, hb_info=None):
425
+ """Create a HBFile instance.
426
+
427
+ Parameters
428
+ ----------
429
+ file : file-object
430
+ StringIO work as well
431
+ hb_info : HBInfo, optional
432
+ Should be given as an argument for writing, in which case the file
433
+ should be writable.
434
+ """
435
+ self._fid = file
436
+ if hb_info is None:
437
+ self._hb_info = HBInfo.from_file(file)
438
+ else:
439
+ #raise OSError("file %s is not writable, and hb_info "
440
+ # "was given." % file)
441
+ self._hb_info = hb_info
442
+
443
+ @property
444
+ def title(self):
445
+ return self._hb_info.title
446
+
447
+ @property
448
+ def key(self):
449
+ return self._hb_info.key
450
+
451
+ @property
452
+ def type(self):
453
+ return self._hb_info.mxtype.value_type
454
+
455
+ @property
456
+ def structure(self):
457
+ return self._hb_info.mxtype.structure
458
+
459
+ @property
460
+ def storage(self):
461
+ return self._hb_info.mxtype.storage
462
+
463
+ def read_matrix(self):
464
+ return _read_hb_data(self._fid, self._hb_info)
465
+
466
+ def write_matrix(self, m):
467
+ return _write_data(m, self._fid, self._hb_info)
468
+
469
+
470
+ def hb_read(path_or_open_file):
471
+ """Read HB-format file.
472
+
473
+ Parameters
474
+ ----------
475
+ path_or_open_file : path-like or file-like
476
+ If a file-like object, it is used as-is. Otherwise, it is opened
477
+ before reading.
478
+
479
+ Returns
480
+ -------
481
+ data : scipy.sparse.csc_matrix instance
482
+ The data read from the HB file as a sparse matrix.
483
+
484
+ Notes
485
+ -----
486
+ At the moment not the full Harwell-Boeing format is supported. Supported
487
+ features are:
488
+
489
+ - assembled, non-symmetric, real matrices
490
+ - integer for pointer/indices
491
+ - exponential format for float values, and int format
492
+
493
+ Examples
494
+ --------
495
+ We can read and write a harwell-boeing format file:
496
+
497
+ >>> from scipy.io import hb_read, hb_write
498
+ >>> from scipy.sparse import csr_matrix, eye
499
+ >>> data = csr_matrix(eye(3)) # create a sparse matrix
500
+ >>> hb_write("data.hb", data) # write a hb file
501
+ >>> print(hb_read("data.hb")) # read a hb file
502
+ (0, 0) 1.0
503
+ (1, 1) 1.0
504
+ (2, 2) 1.0
505
+
506
+ """
507
+ def _get_matrix(fid):
508
+ hb = HBFile(fid)
509
+ return hb.read_matrix()
510
+
511
+ if hasattr(path_or_open_file, 'read'):
512
+ return _get_matrix(path_or_open_file)
513
+ else:
514
+ with open(path_or_open_file) as f:
515
+ return _get_matrix(f)
516
+
517
+
518
+ def hb_write(path_or_open_file, m, hb_info=None):
519
+ """Write HB-format file.
520
+
521
+ Parameters
522
+ ----------
523
+ path_or_open_file : path-like or file-like
524
+ If a file-like object, it is used as-is. Otherwise, it is opened
525
+ before writing.
526
+ m : sparse-matrix
527
+ the sparse matrix to write
528
+ hb_info : HBInfo
529
+ contains the meta-data for write
530
+
531
+ Returns
532
+ -------
533
+ None
534
+
535
+ Notes
536
+ -----
537
+ At the moment not the full Harwell-Boeing format is supported. Supported
538
+ features are:
539
+
540
+ - assembled, non-symmetric, real matrices
541
+ - integer for pointer/indices
542
+ - exponential format for float values, and int format
543
+
544
+ Examples
545
+ --------
546
+ We can read and write a harwell-boeing format file:
547
+
548
+ >>> from scipy.io import hb_read, hb_write
549
+ >>> from scipy.sparse import csr_matrix, eye
550
+ >>> data = csr_matrix(eye(3)) # create a sparse matrix
551
+ >>> hb_write("data.hb", data) # write a hb file
552
+ >>> print(hb_read("data.hb")) # read a hb file
553
+ (0, 0) 1.0
554
+ (1, 1) 1.0
555
+ (2, 2) 1.0
556
+
557
+ """
558
+ m = m.tocsc(copy=False)
559
+
560
+ if hb_info is None:
561
+ hb_info = HBInfo.from_data(m)
562
+
563
+ def _set_matrix(fid):
564
+ hb = HBFile(fid, hb_info)
565
+ return hb.write_matrix(m)
566
+
567
+ if hasattr(path_or_open_file, 'write'):
568
+ return _set_matrix(path_or_open_file)
569
+ else:
570
+ with open(path_or_open_file, 'w') as f:
571
+ return _set_matrix(f)
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_fortran_format.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from numpy.testing import assert_equal
4
+ from pytest import raises as assert_raises
5
+
6
+ from scipy.io._harwell_boeing import (
7
+ FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat)
8
+
9
+
10
+ class TestFortranFormatParser:
11
+ def setup_method(self):
12
+ self.parser = FortranFormatParser()
13
+
14
+ def _test_equal(self, format, ref):
15
+ ret = self.parser.parse(format)
16
+ assert_equal(ret.__dict__, ref.__dict__)
17
+
18
+ def test_simple_int(self):
19
+ self._test_equal("(I4)", IntFormat(4))
20
+
21
+ def test_simple_repeated_int(self):
22
+ self._test_equal("(3I4)", IntFormat(4, repeat=3))
23
+
24
+ def test_simple_exp(self):
25
+ self._test_equal("(E4.3)", ExpFormat(4, 3))
26
+
27
+ def test_exp_exp(self):
28
+ self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3))
29
+
30
+ def test_repeat_exp(self):
31
+ self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2))
32
+
33
+ def test_repeat_exp_exp(self):
34
+ self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2))
35
+
36
+ def test_wrong_formats(self):
37
+ def _test_invalid(bad_format):
38
+ assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format))
39
+ _test_invalid("I4")
40
+ _test_invalid("(E4)")
41
+ _test_invalid("(E4.)")
42
+ _test_invalid("(E4.E3)")
43
+
44
+
45
+ class TestIntFormat:
46
+ def test_to_fortran(self):
47
+ f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)]
48
+ res = ["(I10)", "(I12.10)", "(3I12.10)"]
49
+
50
+ for i, j in zip(f, res):
51
+ assert_equal(i.fortran_format, j)
52
+
53
+ def test_from_number(self):
54
+ f = [10, -12, 123456789]
55
+ r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20),
56
+ IntFormat(10, repeat=8)]
57
+ for i, j in zip(f, r_f):
58
+ assert_equal(IntFormat.from_number(i).__dict__, j.__dict__)
59
+
60
+
61
+ class TestExpFormat:
62
+ def test_to_fortran(self):
63
+ f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3),
64
+ ExpFormat(10, 5, repeat=3)]
65
+ res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"]
66
+
67
+ for i, j in zip(f, res):
68
+ assert_equal(i.fortran_format, j)
69
+
70
+ def test_from_number(self):
71
+ f = np.array([1.0, -1.2])
72
+ r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)]
73
+ for i, j in zip(f, r_f):
74
+ assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/tests/test_hb.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+ import tempfile
3
+
4
+ import numpy as np
5
+
6
+ from numpy.testing import assert_equal, \
7
+ assert_array_almost_equal_nulp
8
+
9
+ from scipy.sparse import coo_matrix, csc_matrix, rand
10
+
11
+ from scipy.io import hb_read, hb_write
12
+
13
+
14
+ SIMPLE = """\
15
+ No Title |No Key
16
+ 9 4 1 4
17
+ RUA 100 100 10 0
18
+ (26I3) (26I3) (3E23.15)
19
+ 1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
20
+ 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
21
+ 3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
22
+ 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
23
+ 37 71 89 18 30 45 70 19 25 52
24
+ 2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
25
+ 6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
26
+ 4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
27
+ 6.912334991524289e-01
28
+ """
29
+
30
+ SIMPLE_MATRIX = coo_matrix(
31
+ ((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
32
+ 0.0661749042483, 0.887037034319, 0.419647859016,
33
+ 0.564960307211, 0.993442388709, 0.691233499152,),
34
+ (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
35
+ [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
36
+
37
+
38
+ def assert_csc_almost_equal(r, l):
39
+ r = csc_matrix(r)
40
+ l = csc_matrix(l)
41
+ assert_equal(r.indptr, l.indptr)
42
+ assert_equal(r.indices, l.indices)
43
+ assert_array_almost_equal_nulp(r.data, l.data, 10000)
44
+
45
+
46
+ class TestHBReader:
47
+ def test_simple(self):
48
+ m = hb_read(StringIO(SIMPLE))
49
+ assert_csc_almost_equal(m, SIMPLE_MATRIX)
50
+
51
+
52
+ class TestHBReadWrite:
53
+
54
+ def check_save_load(self, value):
55
+ with tempfile.NamedTemporaryFile(mode='w+t') as file:
56
+ hb_write(file, value)
57
+ file.file.seek(0)
58
+ value_loaded = hb_read(file)
59
+ assert_csc_almost_equal(value, value_loaded)
60
+
61
+ def test_simple(self):
62
+ random_matrix = rand(10, 100, 0.1)
63
+ for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
64
+ matrix = random_matrix.asformat(matrix_format, copy=False)
65
+ self.check_save_load(matrix)
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__init__.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================================
3
+ Multidimensional image processing (:mod:`scipy.ndimage`)
4
+ =========================================================
5
+
6
+ .. currentmodule:: scipy.ndimage
7
+
8
+ This package contains various functions for multidimensional image
9
+ processing.
10
+
11
+
12
+ Filters
13
+ =======
14
+
15
+ .. autosummary::
16
+ :toctree: generated/
17
+
18
+ convolve - Multidimensional convolution
19
+ convolve1d - 1-D convolution along the given axis
20
+ correlate - Multidimensional correlation
21
+ correlate1d - 1-D correlation along the given axis
22
+ gaussian_filter
23
+ gaussian_filter1d
24
+ gaussian_gradient_magnitude
25
+ gaussian_laplace
26
+ generic_filter - Multidimensional filter using a given function
27
+ generic_filter1d - 1-D generic filter along the given axis
28
+ generic_gradient_magnitude
29
+ generic_laplace
30
+ laplace - N-D Laplace filter based on approximate second derivatives
31
+ maximum_filter
32
+ maximum_filter1d
33
+ median_filter - Calculates a multidimensional median filter
34
+ minimum_filter
35
+ minimum_filter1d
36
+ percentile_filter - Calculates a multidimensional percentile filter
37
+ prewitt
38
+ rank_filter - Calculates a multidimensional rank filter
39
+ sobel
40
+ uniform_filter - Multidimensional uniform filter
41
+ uniform_filter1d - 1-D uniform filter along the given axis
42
+
43
+ Fourier filters
44
+ ===============
45
+
46
+ .. autosummary::
47
+ :toctree: generated/
48
+
49
+ fourier_ellipsoid
50
+ fourier_gaussian
51
+ fourier_shift
52
+ fourier_uniform
53
+
54
+ Interpolation
55
+ =============
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ affine_transform - Apply an affine transformation
61
+ geometric_transform - Apply an arbitrary geometric transform
62
+ map_coordinates - Map input array to new coordinates by interpolation
63
+ rotate - Rotate an array
64
+ shift - Shift an array
65
+ spline_filter
66
+ spline_filter1d
67
+ zoom - Zoom an array
68
+
69
+ Measurements
70
+ ============
71
+
72
+ .. autosummary::
73
+ :toctree: generated/
74
+
75
+ center_of_mass - The center of mass of the values of an array at labels
76
+ extrema - Min's and max's of an array at labels, with their positions
77
+ find_objects - Find objects in a labeled array
78
+ histogram - Histogram of the values of an array, optionally at labels
79
+ label - Label features in an array
80
+ labeled_comprehension
81
+ maximum
82
+ maximum_position
83
+ mean - Mean of the values of an array at labels
84
+ median
85
+ minimum
86
+ minimum_position
87
+ standard_deviation - Standard deviation of an N-D image array
88
+ sum_labels - Sum of the values of the array
89
+ value_indices - Find indices of each distinct value in given array
90
+ variance - Variance of the values of an N-D image array
91
+ watershed_ift
92
+
93
+ Morphology
94
+ ==========
95
+
96
+ .. autosummary::
97
+ :toctree: generated/
98
+
99
+ binary_closing
100
+ binary_dilation
101
+ binary_erosion
102
+ binary_fill_holes
103
+ binary_hit_or_miss
104
+ binary_opening
105
+ binary_propagation
106
+ black_tophat
107
+ distance_transform_bf
108
+ distance_transform_cdt
109
+ distance_transform_edt
110
+ generate_binary_structure
111
+ grey_closing
112
+ grey_dilation
113
+ grey_erosion
114
+ grey_opening
115
+ iterate_structure
116
+ morphological_gradient
117
+ morphological_laplace
118
+ white_tophat
119
+
120
+ """
121
+
122
+ # Copyright (C) 2003-2005 Peter J. Verveer
123
+ #
124
+ # Redistribution and use in source and binary forms, with or without
125
+ # modification, are permitted provided that the following conditions
126
+ # are met:
127
+ #
128
+ # 1. Redistributions of source code must retain the above copyright
129
+ # notice, this list of conditions and the following disclaimer.
130
+ #
131
+ # 2. Redistributions in binary form must reproduce the above
132
+ # copyright notice, this list of conditions and the following
133
+ # disclaimer in the documentation and/or other materials provided
134
+ # with the distribution.
135
+ #
136
+ # 3. The name of the author may not be used to endorse or promote
137
+ # products derived from this software without specific prior
138
+ # written permission.
139
+ #
140
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
141
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
142
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
143
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
144
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
145
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
146
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
147
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
148
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
149
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
150
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
151
+
152
+ from ._filters import *
153
+ from ._fourier import *
154
+ from ._interpolation import *
155
+ from ._measurements import *
156
+ from ._morphology import *
157
+
158
+ # Deprecated namespaces, to be removed in v2.0.0
159
+ from . import filters
160
+ from . import fourier
161
+ from . import interpolation
162
+ from . import measurements
163
+ from . import morphology
164
+
165
+ __all__ = [s for s in dir() if not s.startswith('_')]
166
+
167
+ from scipy._lib._testutils import PytestTester
168
+ test = PytestTester(__name__)
169
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc ADDED
Binary file (52.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc ADDED
Binary file (8.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc ADDED
Binary file (83.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc ADDED
Binary file (8.32 kB). View file