applied-ai-018 commited on
Commit
c1dec76
·
verified ·
1 Parent(s): ecb8b1c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/1.word_embeddings.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/1.word_embeddings.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/scipy/integrate/__init__.py +110 -0
  6. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/scipy/integrate/_bvp.py +1155 -0
  20. venv/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so +0 -0
  21. venv/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so +0 -0
  22. venv/lib/python3.10/site-packages/scipy/integrate/_ode.py +1376 -0
  23. venv/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so +0 -0
  24. venv/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py +262 -0
  25. venv/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py +656 -0
  26. venv/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so +0 -0
  27. venv/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py +1291 -0
  28. venv/lib/python3.10/site-packages/scipy/integrate/_quadrature.py +1830 -0
  29. venv/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py +1231 -0
  30. venv/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so +0 -0
  31. venv/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so +0 -0
  32. venv/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so +0 -0
  33. venv/lib/python3.10/site-packages/scipy/integrate/dop.py +18 -0
  34. venv/lib/python3.10/site-packages/scipy/integrate/lsoda.py +15 -0
  35. venv/lib/python3.10/site-packages/scipy/integrate/odepack.py +17 -0
  36. venv/lib/python3.10/site-packages/scipy/integrate/quadpack.py +24 -0
  37. venv/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py +0 -0
  38. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py +209 -0
  45. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py +218 -0
  46. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py +711 -0
  47. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py +834 -0
  48. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py +74 -0
  49. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py +677 -0
  50. venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py +766 -0
ckpts/universal/global_step40/zero/1.word_embeddings.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a19096a43e9d4f87d7f4eb3617c860d117ffcb3abb1020ac53684462e37447
3
+ size 415237404
ckpts/universal/global_step40/zero/1.word_embeddings.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24280c624bfa5363e724734bcaf1cfc8574475d95b056b9e4b02a9554ce13e7d
3
+ size 415237419
ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b96142d625f42d60001837ea2d0b09bf70d2fd3b0a0acc7d6d2c6095eab421a2
3
+ size 50332843
ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2532de191ce0e634cff5eef68ef1e2a43afad8a69f604524a4ca9f870c2f905d
3
+ size 50332749
venv/lib/python3.10/site-packages/scipy/integrate/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================
3
+ Integration and ODEs (:mod:`scipy.integrate`)
4
+ =============================================
5
+
6
+ .. currentmodule:: scipy.integrate
7
+
8
+ Integrating functions, given function object
9
+ ============================================
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ quad -- General purpose integration
15
+ quad_vec -- General purpose integration of vector-valued functions
16
+ dblquad -- General purpose double integration
17
+ tplquad -- General purpose triple integration
18
+ nquad -- General purpose N-D integration
19
+ fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
20
+ quadrature -- Integrate with given tolerance using Gaussian quadrature
21
+ romberg -- Integrate func using Romberg integration
22
+ newton_cotes -- Weights and error coefficient for Newton-Cotes integration
23
+ qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature
24
+ IntegrationWarning -- Warning on issues during integration
25
+ AccuracyWarning -- Warning on issues during quadrature integration
26
+
27
+ Integrating functions, given fixed samples
28
+ ==========================================
29
+
30
+ .. autosummary::
31
+ :toctree: generated/
32
+
33
+ trapezoid -- Use trapezoidal rule to compute integral.
34
+ cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
35
+ simpson -- Use Simpson's rule to compute integral from samples.
36
+ cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples.
37
+ romb -- Use Romberg Integration to compute integral from
38
+ -- (2**k + 1) evenly-spaced samples.
39
+
40
+ .. seealso::
41
+
42
+ :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
43
+ quadrature roots and weights for other weighting factors and regions.
44
+
45
+ Solving initial value problems for ODE systems
46
+ ==============================================
47
+
48
+ The solvers are implemented as individual classes, which can be used directly
49
+ (low-level usage) or through a convenience function.
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ solve_ivp -- Convenient function for ODE integration.
55
+ RK23 -- Explicit Runge-Kutta solver of order 3(2).
56
+ RK45 -- Explicit Runge-Kutta solver of order 5(4).
57
+ DOP853 -- Explicit Runge-Kutta solver of order 8.
58
+ Radau -- Implicit Runge-Kutta solver of order 5.
59
+ BDF -- Implicit multi-step variable order (1 to 5) solver.
60
+ LSODA -- LSODA solver from ODEPACK Fortran package.
61
+ OdeSolver -- Base class for ODE solvers.
62
+ DenseOutput -- Local interpolant for computing a dense output.
63
+ OdeSolution -- Class which represents a continuous ODE solution.
64
+
65
+
66
+ Old API
67
+ -------
68
+
69
+ These are the routines developed earlier for SciPy. They wrap older solvers
70
+ implemented in Fortran (mostly ODEPACK). While the interface to them is not
71
+ particularly convenient and certain features are missing compared to the new
72
+ API, the solvers themselves are of good quality and work fast as compiled
73
+ Fortran code. In some cases, it might be worth using this old API.
74
+
75
+ .. autosummary::
76
+ :toctree: generated/
77
+
78
+ odeint -- General integration of ordinary differential equations.
79
+ ode -- Integrate ODE using VODE and ZVODE routines.
80
+ complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
81
+ ODEintWarning -- Warning raised during the execution of `odeint`.
82
+
83
+
84
+ Solving boundary value problems for ODE systems
85
+ ===============================================
86
+
87
+ .. autosummary::
88
+ :toctree: generated/
89
+
90
+ solve_bvp -- Solve a boundary value problem for a system of ODEs.
91
+ """ # noqa: E501
92
+
93
+
94
+ from ._quadrature import *
95
+ from ._odepack_py import *
96
+ from ._quadpack_py import *
97
+ from ._ode import *
98
+ from ._bvp import solve_bvp
99
+ from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
100
+ OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
101
+ from ._quad_vec import quad_vec
102
+
103
+ # Deprecated namespaces, to be removed in v2.0.0
104
+ from . import dop, lsoda, vode, odepack, quadpack
105
+
106
+ __all__ = [s for s in dir() if not s.startswith('_')]
107
+
108
+ from scipy._lib._testutils import PytestTester
109
+ test = PytestTester(__name__)
110
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc ADDED
Binary file (35.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc ADDED
Binary file (38.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc ADDED
Binary file (54.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc ADDED
Binary file (35.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc ADDED
Binary file (607 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc ADDED
Binary file (596 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc ADDED
Binary file (625 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc ADDED
Binary file (665 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc ADDED
Binary file (608 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_bvp.py ADDED
@@ -0,0 +1,1155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Boundary value problem solver."""
2
+ from warnings import warn
3
+
4
+ import numpy as np
5
+ from numpy.linalg import pinv
6
+
7
+ from scipy.sparse import coo_matrix, csc_matrix
8
+ from scipy.sparse.linalg import splu
9
+ from scipy.optimize import OptimizeResult
10
+
11
+
12
+ EPS = np.finfo(float).eps
13
+
14
+
15
+ def estimate_fun_jac(fun, x, y, p, f0=None):
16
+ """Estimate derivatives of an ODE system rhs with forward differences.
17
+
18
+ Returns
19
+ -------
20
+ df_dy : ndarray, shape (n, n, m)
21
+ Derivatives with respect to y. An element (i, j, q) corresponds to
22
+ d f_i(x_q, y_q) / d (y_q)_j.
23
+ df_dp : ndarray with shape (n, k, m) or None
24
+ Derivatives with respect to p. An element (i, j, q) corresponds to
25
+ d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
26
+ """
27
+ n, m = y.shape
28
+ if f0 is None:
29
+ f0 = fun(x, y, p)
30
+
31
+ dtype = y.dtype
32
+
33
+ df_dy = np.empty((n, n, m), dtype=dtype)
34
+ h = EPS**0.5 * (1 + np.abs(y))
35
+ for i in range(n):
36
+ y_new = y.copy()
37
+ y_new[i] += h[i]
38
+ hi = y_new[i] - y[i]
39
+ f_new = fun(x, y_new, p)
40
+ df_dy[:, i, :] = (f_new - f0) / hi
41
+
42
+ k = p.shape[0]
43
+ if k == 0:
44
+ df_dp = None
45
+ else:
46
+ df_dp = np.empty((n, k, m), dtype=dtype)
47
+ h = EPS**0.5 * (1 + np.abs(p))
48
+ for i in range(k):
49
+ p_new = p.copy()
50
+ p_new[i] += h[i]
51
+ hi = p_new[i] - p[i]
52
+ f_new = fun(x, y, p_new)
53
+ df_dp[:, i, :] = (f_new - f0) / hi
54
+
55
+ return df_dy, df_dp
56
+
57
+
58
+ def estimate_bc_jac(bc, ya, yb, p, bc0=None):
59
+ """Estimate derivatives of boundary conditions with forward differences.
60
+
61
+ Returns
62
+ -------
63
+ dbc_dya : ndarray, shape (n + k, n)
64
+ Derivatives with respect to ya. An element (i, j) corresponds to
65
+ d bc_i / d ya_j.
66
+ dbc_dyb : ndarray, shape (n + k, n)
67
+ Derivatives with respect to yb. An element (i, j) corresponds to
68
+ d bc_i / d ya_j.
69
+ dbc_dp : ndarray with shape (n + k, k) or None
70
+ Derivatives with respect to p. An element (i, j) corresponds to
71
+ d bc_i / d p_j. If `p` is empty, None is returned.
72
+ """
73
+ n = ya.shape[0]
74
+ k = p.shape[0]
75
+
76
+ if bc0 is None:
77
+ bc0 = bc(ya, yb, p)
78
+
79
+ dtype = ya.dtype
80
+
81
+ dbc_dya = np.empty((n, n + k), dtype=dtype)
82
+ h = EPS**0.5 * (1 + np.abs(ya))
83
+ for i in range(n):
84
+ ya_new = ya.copy()
85
+ ya_new[i] += h[i]
86
+ hi = ya_new[i] - ya[i]
87
+ bc_new = bc(ya_new, yb, p)
88
+ dbc_dya[i] = (bc_new - bc0) / hi
89
+ dbc_dya = dbc_dya.T
90
+
91
+ h = EPS**0.5 * (1 + np.abs(yb))
92
+ dbc_dyb = np.empty((n, n + k), dtype=dtype)
93
+ for i in range(n):
94
+ yb_new = yb.copy()
95
+ yb_new[i] += h[i]
96
+ hi = yb_new[i] - yb[i]
97
+ bc_new = bc(ya, yb_new, p)
98
+ dbc_dyb[i] = (bc_new - bc0) / hi
99
+ dbc_dyb = dbc_dyb.T
100
+
101
+ if k == 0:
102
+ dbc_dp = None
103
+ else:
104
+ h = EPS**0.5 * (1 + np.abs(p))
105
+ dbc_dp = np.empty((k, n + k), dtype=dtype)
106
+ for i in range(k):
107
+ p_new = p.copy()
108
+ p_new[i] += h[i]
109
+ hi = p_new[i] - p[i]
110
+ bc_new = bc(ya, yb, p_new)
111
+ dbc_dp[i] = (bc_new - bc0) / hi
112
+ dbc_dp = dbc_dp.T
113
+
114
+ return dbc_dya, dbc_dyb, dbc_dp
115
+
116
+
117
+ def compute_jac_indices(n, m, k):
118
+ """Compute indices for the collocation system Jacobian construction.
119
+
120
+ See `construct_global_jac` for the explanation.
121
+ """
122
+ i_col = np.repeat(np.arange((m - 1) * n), n)
123
+ j_col = (np.tile(np.arange(n), n * (m - 1)) +
124
+ np.repeat(np.arange(m - 1) * n, n**2))
125
+
126
+ i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
127
+ j_bc = np.tile(np.arange(n), n + k)
128
+
129
+ i_p_col = np.repeat(np.arange((m - 1) * n), k)
130
+ j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
131
+
132
+ i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
133
+ j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
134
+
135
+ i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
136
+ j = np.hstack((j_col, j_col + n,
137
+ j_bc, j_bc + (m - 1) * n,
138
+ j_p_col, j_p_bc))
139
+
140
+ return i, j
141
+
142
+
143
+ def stacked_matmul(a, b):
144
+ """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
145
+
146
+ Empirical optimization. Use outer Python loop and BLAS for large
147
+ matrices, otherwise use a single einsum call.
148
+ """
149
+ if a.shape[1] > 50:
150
+ out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
151
+ for i in range(a.shape[0]):
152
+ out[i] = np.dot(a[i], b[i])
153
+ return out
154
+ else:
155
+ return np.einsum('...ij,...jk->...ik', a, b)
156
+
157
+
158
+ def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
159
+ df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
160
+ """Construct the Jacobian of the collocation system.
161
+
162
+ There are n * m + k functions: m - 1 collocations residuals, each
163
+ containing n components, followed by n + k boundary condition residuals.
164
+
165
+ There are n * m + k variables: m vectors of y, each containing n
166
+ components, followed by k values of vector p.
167
+
168
+ For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
169
+ the following sparsity structure:
170
+
171
+ 1 1 2 2 0 0 0 0 5
172
+ 1 1 2 2 0 0 0 0 5
173
+ 0 0 1 1 2 2 0 0 5
174
+ 0 0 1 1 2 2 0 0 5
175
+ 0 0 0 0 1 1 2 2 5
176
+ 0 0 0 0 1 1 2 2 5
177
+
178
+ 3 3 0 0 0 0 4 4 6
179
+ 3 3 0 0 0 0 4 4 6
180
+ 3 3 0 0 0 0 4 4 6
181
+
182
+ Zeros denote identically zero values, other values denote different kinds
183
+ of blocks in the matrix (see below). The blank row indicates the separation
184
+ of collocation residuals from boundary conditions. And the blank column
185
+ indicates the separation of y values from p values.
186
+
187
+ Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
188
+ of collocation residuals with respect to y.
189
+
190
+ Parameters
191
+ ----------
192
+ n : int
193
+ Number of equations in the ODE system.
194
+ m : int
195
+ Number of nodes in the mesh.
196
+ k : int
197
+ Number of the unknown parameters.
198
+ i_jac, j_jac : ndarray
199
+ Row and column indices returned by `compute_jac_indices`. They
200
+ represent different blocks in the Jacobian matrix in the following
201
+ order (see the scheme above):
202
+
203
+ * 1: m - 1 diagonal n x n blocks for the collocation residuals.
204
+ * 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
205
+ * 3 : (n + k) x n block for the dependency of the boundary
206
+ conditions on ya.
207
+ * 4: (n + k) x n block for the dependency of the boundary
208
+ conditions on yb.
209
+ * 5: (m - 1) * n x k block for the dependency of the collocation
210
+ residuals on p.
211
+ * 6: (n + k) x k block for the dependency of the boundary
212
+ conditions on p.
213
+
214
+ df_dy : ndarray, shape (n, n, m)
215
+ Jacobian of f with respect to y computed at the mesh nodes.
216
+ df_dy_middle : ndarray, shape (n, n, m - 1)
217
+ Jacobian of f with respect to y computed at the middle between the
218
+ mesh nodes.
219
+ df_dp : ndarray with shape (n, k, m) or None
220
+ Jacobian of f with respect to p computed at the mesh nodes.
221
+ df_dp_middle : ndarray with shape (n, k, m - 1) or None
222
+ Jacobian of f with respect to p computed at the middle between the
223
+ mesh nodes.
224
+ dbc_dya, dbc_dyb : ndarray, shape (n, n)
225
+ Jacobian of bc with respect to ya and yb.
226
+ dbc_dp : ndarray with shape (n, k) or None
227
+ Jacobian of bc with respect to p.
228
+
229
+ Returns
230
+ -------
231
+ J : csc_matrix, shape (n * m + k, n * m + k)
232
+ Jacobian of the collocation system in a sparse form.
233
+
234
+ References
235
+ ----------
236
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
237
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
238
+ Number 3, pp. 299-316, 2001.
239
+ """
240
+ df_dy = np.transpose(df_dy, (2, 0, 1))
241
+ df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
242
+
243
+ h = h[:, np.newaxis, np.newaxis]
244
+
245
+ dtype = df_dy.dtype
246
+
247
+ # Computing diagonal n x n blocks.
248
+ dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
249
+ dPhi_dy_0[:] = -np.identity(n)
250
+ dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
251
+ T = stacked_matmul(df_dy_middle, df_dy[:-1])
252
+ dPhi_dy_0 -= h**2 / 12 * T
253
+
254
+ # Computing off-diagonal n x n blocks.
255
+ dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
256
+ dPhi_dy_1[:] = np.identity(n)
257
+ dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
258
+ T = stacked_matmul(df_dy_middle, df_dy[1:])
259
+ dPhi_dy_1 += h**2 / 12 * T
260
+
261
+ values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
262
+ dbc_dyb.ravel()))
263
+
264
+ if k > 0:
265
+ df_dp = np.transpose(df_dp, (2, 0, 1))
266
+ df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
267
+ T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
268
+ df_dp_middle += 0.125 * h * T
269
+ dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
270
+ values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
271
+
272
+ J = coo_matrix((values, (i_jac, j_jac)))
273
+ return csc_matrix(J)
274
+
275
+
276
+ def collocation_fun(fun, y, p, x, h):
277
+ """Evaluate collocation residuals.
278
+
279
+ This function lies in the core of the method. The solution is sought
280
+ as a cubic C1 continuous spline with derivatives matching the ODE rhs
281
+ at given nodes `x`. Collocation conditions are formed from the equality
282
+ of the spline derivatives and rhs of the ODE system in the middle points
283
+ between nodes.
284
+
285
+ Such method is classified to Lobbato IIIA family in ODE literature.
286
+ Refer to [1]_ for the formula and some discussion.
287
+
288
+ Returns
289
+ -------
290
+ col_res : ndarray, shape (n, m - 1)
291
+ Collocation residuals at the middle points of the mesh intervals.
292
+ y_middle : ndarray, shape (n, m - 1)
293
+ Values of the cubic spline evaluated at the middle points of the mesh
294
+ intervals.
295
+ f : ndarray, shape (n, m)
296
+ RHS of the ODE system evaluated at the mesh nodes.
297
+ f_middle : ndarray, shape (n, m - 1)
298
+ RHS of the ODE system evaluated at the middle points of the mesh
299
+ intervals (and using `y_middle`).
300
+
301
+ References
302
+ ----------
303
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
304
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
305
+ Number 3, pp. 299-316, 2001.
306
+ """
307
+ f = fun(x, y, p)
308
+ y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
309
+ 0.125 * h * (f[:, 1:] - f[:, :-1]))
310
+ f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
311
+ col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
312
+ 4 * f_middle)
313
+
314
+ return col_res, y_middle, f, f_middle
315
+
316
+
317
+ def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
318
+ """Create the function and the Jacobian for the collocation system."""
319
+ x_middle = x[:-1] + 0.5 * h
320
+ i_jac, j_jac = compute_jac_indices(n, m, k)
321
+
322
+ def col_fun(y, p):
323
+ return collocation_fun(fun, y, p, x, h)
324
+
325
+ def sys_jac(y, p, y_middle, f, f_middle, bc0):
326
+ if fun_jac is None:
327
+ df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
328
+ df_dy_middle, df_dp_middle = estimate_fun_jac(
329
+ fun, x_middle, y_middle, p, f_middle)
330
+ else:
331
+ df_dy, df_dp = fun_jac(x, y, p)
332
+ df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
333
+
334
+ if bc_jac is None:
335
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
336
+ p, bc0)
337
+ else:
338
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
339
+
340
+ return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
341
+ df_dy_middle, df_dp, df_dp_middle, dbc_dya,
342
+ dbc_dyb, dbc_dp)
343
+
344
+ return col_fun, sys_jac
345
+
346
+
347
+ def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
348
+ """Solve the nonlinear collocation system by a Newton method.
349
+
350
+ This is a simple Newton method with a backtracking line search. As
351
+ advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
352
+ is used, where J is the Jacobian matrix at the current iteration and r is
353
+ the vector or collocation residuals (values of the system lhs).
354
+
355
+ The method alters between full Newton iterations and the fixed-Jacobian
356
+ iterations based
357
+
358
+ There are other tricks proposed in [1]_, but they are not used as they
359
+ don't seem to improve anything significantly, and even break the
360
+ convergence on some test problems I tried.
361
+
362
+ All important parameters of the algorithm are defined inside the function.
363
+
364
+ Parameters
365
+ ----------
366
+ n : int
367
+ Number of equations in the ODE system.
368
+ m : int
369
+ Number of nodes in the mesh.
370
+ h : ndarray, shape (m-1,)
371
+ Mesh intervals.
372
+ col_fun : callable
373
+ Function computing collocation residuals.
374
+ bc : callable
375
+ Function computing boundary condition residuals.
376
+ jac : callable
377
+ Function computing the Jacobian of the whole system (including
378
+ collocation and boundary condition residuals). It is supposed to
379
+ return csc_matrix.
380
+ y : ndarray, shape (n, m)
381
+ Initial guess for the function values at the mesh nodes.
382
+ p : ndarray, shape (k,)
383
+ Initial guess for the unknown parameters.
384
+ B : ndarray with shape (n, n) or None
385
+ Matrix to force the S y(a) = 0 condition for a problems with the
386
+ singular term. If None, the singular term is assumed to be absent.
387
+ bvp_tol : float
388
+ Tolerance to which we want to solve a BVP.
389
+ bc_tol : float
390
+ Tolerance to which we want to satisfy the boundary conditions.
391
+
392
+ Returns
393
+ -------
394
+ y : ndarray, shape (n, m)
395
+ Final iterate for the function values at the mesh nodes.
396
+ p : ndarray, shape (k,)
397
+ Final iterate for the unknown parameters.
398
+ singular : bool
399
+ True, if the LU decomposition failed because Jacobian turned out
400
+ to be singular.
401
+
402
+ References
403
+ ----------
404
+ .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
405
+ Boundary Value Problems for Ordinary Differential Equations"
406
+ """
407
+ # We know that the solution residuals at the middle points of the mesh
408
+ # are connected with collocation residuals r_middle = 1.5 * col_res / h.
409
+ # As our BVP solver tries to decrease relative residuals below a certain
410
+ # tolerance, it seems reasonable to terminated Newton iterations by
411
+ # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
412
+ # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
413
+ # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
414
+ # should be computed as follows:
415
+ tol_r = 2/3 * h * 5e-2 * bvp_tol
416
+
417
+ # Maximum allowed number of Jacobian evaluation and factorization, in
418
+ # other words, the maximum number of full Newton iterations. A small value
419
+ # is recommended in the literature.
420
+ max_njev = 4
421
+
422
+ # Maximum number of iterations, considering that some of them can be
423
+ # performed with the fixed Jacobian. In theory, such iterations are cheap,
424
+ # but it's not that simple in Python.
425
+ max_iter = 8
426
+
427
+ # Minimum relative improvement of the criterion function to accept the
428
+ # step (Armijo constant).
429
+ sigma = 0.2
430
+
431
+ # Step size decrease factor for backtracking.
432
+ tau = 0.5
433
+
434
+ # Maximum number of backtracking steps, the minimum step is then
435
+ # tau ** n_trial.
436
+ n_trial = 4
437
+
438
+ col_res, y_middle, f, f_middle = col_fun(y, p)
439
+ bc_res = bc(y[:, 0], y[:, -1], p)
440
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
441
+
442
+ njev = 0
443
+ singular = False
444
+ recompute_jac = True
445
+ for iteration in range(max_iter):
446
+ if recompute_jac:
447
+ J = jac(y, p, y_middle, f, f_middle, bc_res)
448
+ njev += 1
449
+ try:
450
+ LU = splu(J)
451
+ except RuntimeError:
452
+ singular = True
453
+ break
454
+
455
+ step = LU.solve(res)
456
+ cost = np.dot(step, step)
457
+
458
+ y_step = step[:m * n].reshape((n, m), order='F')
459
+ p_step = step[m * n:]
460
+
461
+ alpha = 1
462
+ for trial in range(n_trial + 1):
463
+ y_new = y - alpha * y_step
464
+ if B is not None:
465
+ y_new[:, 0] = np.dot(B, y_new[:, 0])
466
+ p_new = p - alpha * p_step
467
+
468
+ col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
469
+ bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
470
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
471
+
472
+ step_new = LU.solve(res)
473
+ cost_new = np.dot(step_new, step_new)
474
+ if cost_new < (1 - 2 * alpha * sigma) * cost:
475
+ break
476
+
477
+ if trial < n_trial:
478
+ alpha *= tau
479
+
480
+ y = y_new
481
+ p = p_new
482
+
483
+ if njev == max_njev:
484
+ break
485
+
486
+ if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
487
+ np.all(np.abs(bc_res) < bc_tol)):
488
+ break
489
+
490
+ # If the full step was taken, then we are going to continue with
491
+ # the same Jacobian. This is the approach of BVP_SOLVER.
492
+ if alpha == 1:
493
+ step = step_new
494
+ cost = cost_new
495
+ recompute_jac = False
496
+ else:
497
+ recompute_jac = True
498
+
499
+ return y, p, singular
500
+
501
+
502
+ def print_iteration_header():
503
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
504
+ "Iteration", "Max residual", "Max BC residual", "Total nodes",
505
+ "Nodes added"))
506
+
507
+
508
+ def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
509
+ nodes_added):
510
+ print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
511
+ iteration, residual, bc_residual, total_nodes, nodes_added))
512
+
513
+
514
+ class BVPResult(OptimizeResult):
515
+ pass
516
+
517
+
518
+ TERMINATION_MESSAGES = {
519
+ 0: "The algorithm converged to the desired accuracy.",
520
+ 1: "The maximum number of mesh nodes is exceeded.",
521
+ 2: "A singular Jacobian encountered when solving the collocation system.",
522
+ 3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
523
+ }
524
+
525
+
526
+ def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
527
+ """Estimate rms values of collocation residuals using Lobatto quadrature.
528
+
529
+ The residuals are defined as the difference between the derivatives of
530
+ our solution and rhs of the ODE system. We use relative residuals, i.e.,
531
+ normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
532
+ normalized integrals of the squared relative residuals over each interval.
533
+ Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
534
+ fact that residuals at the mesh nodes are identically zero.
535
+
536
+ In [2] they don't normalize integrals by interval lengths, which gives
537
+ a higher rate of convergence of the residuals by the factor of h**0.5.
538
+ I chose to do such normalization for an ease of interpretation of return
539
+ values as RMS estimates.
540
+
541
+ Returns
542
+ -------
543
+ rms_res : ndarray, shape (m - 1,)
544
+ Estimated rms values of the relative residuals over each interval.
545
+
546
+ References
547
+ ----------
548
+ .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
549
+ .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
550
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
551
+ Number 3, pp. 299-316, 2001.
552
+ """
553
+ x_middle = x[:-1] + 0.5 * h
554
+ s = 0.5 * h * (3/7)**0.5
555
+ x1 = x_middle + s
556
+ x2 = x_middle - s
557
+ y1 = sol(x1)
558
+ y2 = sol(x2)
559
+ y1_prime = sol(x1, 1)
560
+ y2_prime = sol(x2, 1)
561
+ f1 = fun(x1, y1, p)
562
+ f2 = fun(x2, y2, p)
563
+ r1 = y1_prime - f1
564
+ r2 = y2_prime - f2
565
+
566
+ r_middle /= 1 + np.abs(f_middle)
567
+ r1 /= 1 + np.abs(f1)
568
+ r2 /= 1 + np.abs(f2)
569
+
570
+ r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
571
+ r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
572
+ r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
573
+
574
+ return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
575
+
576
+
577
+ def create_spline(y, yp, x, h):
578
+ """Create a cubic spline given values and derivatives.
579
+
580
+ Formulas for the coefficients are taken from interpolate.CubicSpline.
581
+
582
+ Returns
583
+ -------
584
+ sol : PPoly
585
+ Constructed spline as a PPoly instance.
586
+ """
587
+ from scipy.interpolate import PPoly
588
+
589
+ n, m = y.shape
590
+ c = np.empty((4, n, m - 1), dtype=y.dtype)
591
+ slope = (y[:, 1:] - y[:, :-1]) / h
592
+ t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
593
+ c[0] = t / h
594
+ c[1] = (slope - yp[:, :-1]) / h - t
595
+ c[2] = yp[:, :-1]
596
+ c[3] = y[:, :-1]
597
+ c = np.moveaxis(c, 1, 0)
598
+
599
+ return PPoly(c, x, extrapolate=True, axis=1)
600
+
601
+
602
+ def modify_mesh(x, insert_1, insert_2):
603
+ """Insert nodes into a mesh.
604
+
605
+ Nodes removal logic is not established, its impact on the solver is
606
+ presumably negligible. So, only insertion is done in this function.
607
+
608
+ Parameters
609
+ ----------
610
+ x : ndarray, shape (m,)
611
+ Mesh nodes.
612
+ insert_1 : ndarray
613
+ Intervals to each insert 1 new node in the middle.
614
+ insert_2 : ndarray
615
+ Intervals to each insert 2 new nodes, such that divide an interval
616
+ into 3 equal parts.
617
+
618
+ Returns
619
+ -------
620
+ x_new : ndarray
621
+ New mesh nodes.
622
+
623
+ Notes
624
+ -----
625
+ `insert_1` and `insert_2` should not have common values.
626
+ """
627
+ # Because np.insert implementation apparently varies with a version of
628
+ # NumPy, we use a simple and reliable approach with sorting.
629
+ return np.sort(np.hstack((
630
+ x,
631
+ 0.5 * (x[insert_1] + x[insert_1 + 1]),
632
+ (2 * x[insert_2] + x[insert_2 + 1]) / 3,
633
+ (x[insert_2] + 2 * x[insert_2 + 1]) / 3
634
+ )))
635
+
636
+
637
+ def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
638
+ """Wrap functions for unified usage in the solver."""
639
+ if fun_jac is None:
640
+ fun_jac_wrapped = None
641
+
642
+ if bc_jac is None:
643
+ bc_jac_wrapped = None
644
+
645
+ if k == 0:
646
+ def fun_p(x, y, _):
647
+ return np.asarray(fun(x, y), dtype)
648
+
649
+ def bc_wrapped(ya, yb, _):
650
+ return np.asarray(bc(ya, yb), dtype)
651
+
652
+ if fun_jac is not None:
653
+ def fun_jac_p(x, y, _):
654
+ return np.asarray(fun_jac(x, y), dtype), None
655
+
656
+ if bc_jac is not None:
657
+ def bc_jac_wrapped(ya, yb, _):
658
+ dbc_dya, dbc_dyb = bc_jac(ya, yb)
659
+ return (np.asarray(dbc_dya, dtype),
660
+ np.asarray(dbc_dyb, dtype), None)
661
+ else:
662
+ def fun_p(x, y, p):
663
+ return np.asarray(fun(x, y, p), dtype)
664
+
665
+ def bc_wrapped(x, y, p):
666
+ return np.asarray(bc(x, y, p), dtype)
667
+
668
+ if fun_jac is not None:
669
+ def fun_jac_p(x, y, p):
670
+ df_dy, df_dp = fun_jac(x, y, p)
671
+ return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
672
+
673
+ if bc_jac is not None:
674
+ def bc_jac_wrapped(ya, yb, p):
675
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
676
+ return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
677
+ np.asarray(dbc_dp, dtype))
678
+
679
+ if S is None:
680
+ fun_wrapped = fun_p
681
+ else:
682
+ def fun_wrapped(x, y, p):
683
+ f = fun_p(x, y, p)
684
+ if x[0] == a:
685
+ f[:, 0] = np.dot(D, f[:, 0])
686
+ f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
687
+ else:
688
+ f += np.dot(S, y) / (x - a)
689
+ return f
690
+
691
+ if fun_jac is not None:
692
+ if S is None:
693
+ fun_jac_wrapped = fun_jac_p
694
+ else:
695
+ Sr = S[:, :, np.newaxis]
696
+
697
+ def fun_jac_wrapped(x, y, p):
698
+ df_dy, df_dp = fun_jac_p(x, y, p)
699
+ if x[0] == a:
700
+ df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
701
+ df_dy[:, :, 1:] += Sr / (x[1:] - a)
702
+ else:
703
+ df_dy += Sr / (x - a)
704
+
705
+ return df_dy, df_dp
706
+
707
+ return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
708
+
709
+
710
+ def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
711
+ tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
712
+ """Solve a boundary value problem for a system of ODEs.
713
+
714
+ This function numerically solves a first order system of ODEs subject to
715
+ two-point boundary conditions::
716
+
717
+ dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
718
+ bc(y(a), y(b), p) = 0
719
+
720
+ Here x is a 1-D independent variable, y(x) is an N-D
721
+ vector-valued function and p is a k-D vector of unknown
722
+ parameters which is to be found along with y(x). For the problem to be
723
+ determined, there must be n + k boundary conditions, i.e., bc must be an
724
+ (n + k)-D function.
725
+
726
+ The last singular term on the right-hand side of the system is optional.
727
+ It is defined by an n-by-n matrix S, such that the solution must satisfy
728
+ S y(a) = 0. This condition will be forced during iterations, so it must not
729
+ contradict boundary conditions. See [2]_ for the explanation how this term
730
+ is handled when solving BVPs numerically.
731
+
732
+ Problems in a complex domain can be solved as well. In this case, y and p
733
+ are considered to be complex, and f and bc are assumed to be complex-valued
734
+ functions, but x stays real. Note that f and bc must be complex
735
+ differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
736
+ should rewrite your problem for real and imaginary parts separately. To
737
+ solve a problem in a complex domain, pass an initial guess for y with a
738
+ complex data type (see below).
739
+
740
+ Parameters
741
+ ----------
742
+ fun : callable
743
+ Right-hand side of the system. The calling signature is ``fun(x, y)``,
744
+ or ``fun(x, y, p)`` if parameters are present. All arguments are
745
+ ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
746
+ ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
747
+ return value must be an array with shape (n, m) and with the same
748
+ layout as ``y``.
749
+ bc : callable
750
+ Function evaluating residuals of the boundary conditions. The calling
751
+ signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
752
+ present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
753
+ and ``p`` with shape (k,). The return value must be an array with
754
+ shape (n + k,).
755
+ x : array_like, shape (m,)
756
+ Initial mesh. Must be a strictly increasing sequence of real numbers
757
+ with ``x[0]=a`` and ``x[-1]=b``.
758
+ y : array_like, shape (n, m)
759
+ Initial guess for the function values at the mesh nodes, ith column
760
+ corresponds to ``x[i]``. For problems in a complex domain pass `y`
761
+ with a complex data type (even if the initial guess is purely real).
762
+ p : array_like with shape (k,) or None, optional
763
+ Initial guess for the unknown parameters. If None (default), it is
764
+ assumed that the problem doesn't depend on any parameters.
765
+ S : array_like with shape (n, n) or None
766
+ Matrix defining the singular term. If None (default), the problem is
767
+ solved without the singular term.
768
+ fun_jac : callable or None, optional
769
+ Function computing derivatives of f with respect to y and p. The
770
+ calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
771
+ parameters are present. The return must contain 1 or 2 elements in the
772
+ following order:
773
+
774
+ * df_dy : array_like with shape (n, n, m), where an element
775
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
776
+ * df_dp : array_like with shape (n, k, m), where an element
777
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
778
+
779
+ Here q numbers nodes at which x and y are defined, whereas i and j
780
+ number vector components. If the problem is solved without unknown
781
+ parameters, df_dp should not be returned.
782
+
783
+ If `fun_jac` is None (default), the derivatives will be estimated
784
+ by the forward finite differences.
785
+ bc_jac : callable or None, optional
786
+ Function computing derivatives of bc with respect to ya, yb, and p.
787
+ The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
788
+ if parameters are present. The return must contain 2 or 3 elements in
789
+ the following order:
790
+
791
+ * dbc_dya : array_like with shape (n, n), where an element (i, j)
792
+ equals to d bc_i(ya, yb, p) / d ya_j.
793
+ * dbc_dyb : array_like with shape (n, n), where an element (i, j)
794
+ equals to d bc_i(ya, yb, p) / d yb_j.
795
+ * dbc_dp : array_like with shape (n, k), where an element (i, j)
796
+ equals to d bc_i(ya, yb, p) / d p_j.
797
+
798
+ If the problem is solved without unknown parameters, dbc_dp should not
799
+ be returned.
800
+
801
+ If `bc_jac` is None (default), the derivatives will be estimated by
802
+ the forward finite differences.
803
+ tol : float, optional
804
+ Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
805
+ where y is the found solution, then the solver tries to achieve on each
806
+ mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
807
+ estimated in a root mean squared sense (using a numerical quadrature
808
+ formula). Default is 1e-3.
809
+ max_nodes : int, optional
810
+ Maximum allowed number of the mesh nodes. If exceeded, the algorithm
811
+ terminates. Default is 1000.
812
+ verbose : {0, 1, 2}, optional
813
+ Level of algorithm's verbosity:
814
+
815
+ * 0 (default) : work silently.
816
+ * 1 : display a termination report.
817
+ * 2 : display progress during iterations.
818
+ bc_tol : float, optional
819
+ Desired absolute tolerance for the boundary condition residuals: `bc`
820
+ value should satisfy ``abs(bc) < bc_tol`` component-wise.
821
+ Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
822
+ tolerance.
823
+
824
+ Returns
825
+ -------
826
+ Bunch object with the following fields defined:
827
+ sol : PPoly
828
+ Found solution for y as `scipy.interpolate.PPoly` instance, a C1
829
+ continuous cubic spline.
830
+ p : ndarray or None, shape (k,)
831
+ Found parameters. None, if the parameters were not present in the
832
+ problem.
833
+ x : ndarray, shape (m,)
834
+ Nodes of the final mesh.
835
+ y : ndarray, shape (n, m)
836
+ Solution values at the mesh nodes.
837
+ yp : ndarray, shape (n, m)
838
+ Solution derivatives at the mesh nodes.
839
+ rms_residuals : ndarray, shape (m - 1,)
840
+ RMS values of the relative residuals over each mesh interval (see the
841
+ description of `tol` parameter).
842
+ niter : int
843
+ Number of completed iterations.
844
+ status : int
845
+ Reason for algorithm termination:
846
+
847
+ * 0: The algorithm converged to the desired accuracy.
848
+ * 1: The maximum number of mesh nodes is exceeded.
849
+ * 2: A singular Jacobian encountered when solving the collocation
850
+ system.
851
+
852
+ message : string
853
+ Verbal description of the termination reason.
854
+ success : bool
855
+ True if the algorithm converged to the desired accuracy (``status=0``).
856
+
857
+ Notes
858
+ -----
859
+ This function implements a 4th order collocation algorithm with the
860
+ control of residuals similar to [1]_. A collocation system is solved
861
+ by a damped Newton method with an affine-invariant criterion function as
862
+ described in [3]_.
863
+
864
+ Note that in [1]_ integral residuals are defined without normalization
865
+ by interval lengths. So, their definition is different by a multiplier of
866
+ h**0.5 (h is an interval length) from the definition used here.
867
+
868
+ .. versionadded:: 0.18.0
869
+
870
+ References
871
+ ----------
872
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
873
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
874
+ Number 3, pp. 299-316, 2001.
875
+ .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
876
+ Solver".
877
+ .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
878
+ Boundary Value Problems for Ordinary Differential Equations".
879
+ .. [4] `Cauchy-Riemann equations
880
+ <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
881
+ Wikipedia.
882
+
883
+ Examples
884
+ --------
885
+ In the first example, we solve Bratu's problem::
886
+
887
+ y'' + k * exp(y) = 0
888
+ y(0) = y(1) = 0
889
+
890
+ for k = 1.
891
+
892
+ We rewrite the equation as a first-order system and implement its
893
+ right-hand side evaluation::
894
+
895
+ y1' = y2
896
+ y2' = -exp(y1)
897
+
898
+ >>> import numpy as np
899
+ >>> def fun(x, y):
900
+ ... return np.vstack((y[1], -np.exp(y[0])))
901
+
902
+ Implement evaluation of the boundary condition residuals:
903
+
904
+ >>> def bc(ya, yb):
905
+ ... return np.array([ya[0], yb[0]])
906
+
907
+ Define the initial mesh with 5 nodes:
908
+
909
+ >>> x = np.linspace(0, 1, 5)
910
+
911
+ This problem is known to have two solutions. To obtain both of them, we
912
+ use two different initial guesses for y. We denote them by subscripts
913
+ a and b.
914
+
915
+ >>> y_a = np.zeros((2, x.size))
916
+ >>> y_b = np.zeros((2, x.size))
917
+ >>> y_b[0] = 3
918
+
919
+ Now we are ready to run the solver.
920
+
921
+ >>> from scipy.integrate import solve_bvp
922
+ >>> res_a = solve_bvp(fun, bc, x, y_a)
923
+ >>> res_b = solve_bvp(fun, bc, x, y_b)
924
+
925
+ Let's plot the two found solutions. We take an advantage of having the
926
+ solution in a spline form to produce a smooth plot.
927
+
928
+ >>> x_plot = np.linspace(0, 1, 100)
929
+ >>> y_plot_a = res_a.sol(x_plot)[0]
930
+ >>> y_plot_b = res_b.sol(x_plot)[0]
931
+ >>> import matplotlib.pyplot as plt
932
+ >>> plt.plot(x_plot, y_plot_a, label='y_a')
933
+ >>> plt.plot(x_plot, y_plot_b, label='y_b')
934
+ >>> plt.legend()
935
+ >>> plt.xlabel("x")
936
+ >>> plt.ylabel("y")
937
+ >>> plt.show()
938
+
939
+ We see that the two solutions have similar shape, but differ in scale
940
+ significantly.
941
+
942
+ In the second example, we solve a simple Sturm-Liouville problem::
943
+
944
+ y'' + k**2 * y = 0
945
+ y(0) = y(1) = 0
946
+
947
+ It is known that a non-trivial solution y = A * sin(k * x) is possible for
948
+ k = pi * n, where n is an integer. To establish the normalization constant
949
+ A = 1 we add a boundary condition::
950
+
951
+ y'(0) = k
952
+
953
+ Again, we rewrite our equation as a first-order system and implement its
954
+ right-hand side evaluation::
955
+
956
+ y1' = y2
957
+ y2' = -k**2 * y1
958
+
959
+ >>> def fun(x, y, p):
960
+ ... k = p[0]
961
+ ... return np.vstack((y[1], -k**2 * y[0]))
962
+
963
+ Note that parameters p are passed as a vector (with one element in our
964
+ case).
965
+
966
+ Implement the boundary conditions:
967
+
968
+ >>> def bc(ya, yb, p):
969
+ ... k = p[0]
970
+ ... return np.array([ya[0], yb[0], ya[1] - k])
971
+
972
+ Set up the initial mesh and guess for y. We aim to find the solution for
973
+ k = 2 * pi, to achieve that we set values of y to approximately follow
974
+ sin(2 * pi * x):
975
+
976
+ >>> x = np.linspace(0, 1, 5)
977
+ >>> y = np.zeros((2, x.size))
978
+ >>> y[0, 1] = 1
979
+ >>> y[0, 3] = -1
980
+
981
+ Run the solver with 6 as an initial guess for k.
982
+
983
+ >>> sol = solve_bvp(fun, bc, x, y, p=[6])
984
+
985
+ We see that the found k is approximately correct:
986
+
987
+ >>> sol.p[0]
988
+ 6.28329460046
989
+
990
+ And, finally, plot the solution to see the anticipated sinusoid:
991
+
992
+ >>> x_plot = np.linspace(0, 1, 100)
993
+ >>> y_plot = sol.sol(x_plot)[0]
994
+ >>> plt.plot(x_plot, y_plot)
995
+ >>> plt.xlabel("x")
996
+ >>> plt.ylabel("y")
997
+ >>> plt.show()
998
+ """
999
+ x = np.asarray(x, dtype=float)
1000
+ if x.ndim != 1:
1001
+ raise ValueError("`x` must be 1 dimensional.")
1002
+ h = np.diff(x)
1003
+ if np.any(h <= 0):
1004
+ raise ValueError("`x` must be strictly increasing.")
1005
+ a = x[0]
1006
+
1007
+ y = np.asarray(y)
1008
+ if np.issubdtype(y.dtype, np.complexfloating):
1009
+ dtype = complex
1010
+ else:
1011
+ dtype = float
1012
+ y = y.astype(dtype, copy=False)
1013
+
1014
+ if y.ndim != 2:
1015
+ raise ValueError("`y` must be 2 dimensional.")
1016
+ if y.shape[1] != x.shape[0]:
1017
+ raise ValueError(f"`y` is expected to have {x.shape[0]} columns, but actually "
1018
+ f"has {y.shape[1]}.")
1019
+
1020
+ if p is None:
1021
+ p = np.array([])
1022
+ else:
1023
+ p = np.asarray(p, dtype=dtype)
1024
+ if p.ndim != 1:
1025
+ raise ValueError("`p` must be 1 dimensional.")
1026
+
1027
+ if tol < 100 * EPS:
1028
+ warn(f"`tol` is too low, setting to {100 * EPS:.2e}", stacklevel=2)
1029
+ tol = 100 * EPS
1030
+
1031
+ if verbose not in [0, 1, 2]:
1032
+ raise ValueError("`verbose` must be in [0, 1, 2].")
1033
+
1034
+ n = y.shape[0]
1035
+ k = p.shape[0]
1036
+
1037
+ if S is not None:
1038
+ S = np.asarray(S, dtype=dtype)
1039
+ if S.shape != (n, n):
1040
+ raise ValueError(f"`S` is expected to have shape {(n, n)}, "
1041
+ f"but actually has {S.shape}")
1042
+
1043
+ # Compute I - S^+ S to impose necessary boundary conditions.
1044
+ B = np.identity(n) - np.dot(pinv(S), S)
1045
+
1046
+ y[:, 0] = np.dot(B, y[:, 0])
1047
+
1048
+ # Compute (I - S)^+ to correct derivatives at x=a.
1049
+ D = pinv(np.identity(n) - S)
1050
+ else:
1051
+ B = None
1052
+ D = None
1053
+
1054
+ if bc_tol is None:
1055
+ bc_tol = tol
1056
+
1057
+ # Maximum number of iterations
1058
+ max_iteration = 10
1059
+
1060
+ fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
1061
+ fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
1062
+
1063
+ f = fun_wrapped(x, y, p)
1064
+ if f.shape != y.shape:
1065
+ raise ValueError(f"`fun` return is expected to have shape {y.shape}, "
1066
+ f"but actually has {f.shape}.")
1067
+
1068
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
1069
+ if bc_res.shape != (n + k,):
1070
+ raise ValueError(f"`bc` return is expected to have shape {(n + k,)}, "
1071
+ f"but actually has {bc_res.shape}.")
1072
+
1073
+ status = 0
1074
+ iteration = 0
1075
+ if verbose == 2:
1076
+ print_iteration_header()
1077
+
1078
+ while True:
1079
+ m = x.shape[0]
1080
+
1081
+ col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
1082
+ fun_jac_wrapped, bc_jac_wrapped, x, h)
1083
+ y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
1084
+ y, p, B, tol, bc_tol)
1085
+ iteration += 1
1086
+
1087
+ col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
1088
+ p, x, h)
1089
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
1090
+ max_bc_res = np.max(abs(bc_res))
1091
+
1092
+ # This relation is not trivial, but can be verified.
1093
+ r_middle = 1.5 * col_res / h
1094
+ sol = create_spline(y, f, x, h)
1095
+ rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
1096
+ r_middle, f_middle)
1097
+ max_rms_res = np.max(rms_res)
1098
+
1099
+ if singular:
1100
+ status = 2
1101
+ break
1102
+
1103
+ insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
1104
+ insert_2, = np.nonzero(rms_res >= 100 * tol)
1105
+ nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
1106
+
1107
+ if m + nodes_added > max_nodes:
1108
+ status = 1
1109
+ if verbose == 2:
1110
+ nodes_added = f"({nodes_added})"
1111
+ print_iteration_progress(iteration, max_rms_res, max_bc_res,
1112
+ m, nodes_added)
1113
+ break
1114
+
1115
+ if verbose == 2:
1116
+ print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
1117
+ nodes_added)
1118
+
1119
+ if nodes_added > 0:
1120
+ x = modify_mesh(x, insert_1, insert_2)
1121
+ h = np.diff(x)
1122
+ y = sol(x)
1123
+ elif max_bc_res <= bc_tol:
1124
+ status = 0
1125
+ break
1126
+ elif iteration >= max_iteration:
1127
+ status = 3
1128
+ break
1129
+
1130
+ if verbose > 0:
1131
+ if status == 0:
1132
+ print(f"Solved in {iteration} iterations, number of nodes {x.shape[0]}. \n"
1133
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1134
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1135
+ elif status == 1:
1136
+ print(f"Number of nodes is exceeded after iteration {iteration}. \n"
1137
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1138
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1139
+ elif status == 2:
1140
+ print("Singular Jacobian encountered when solving the collocation "
1141
+ f"system on iteration {iteration}. \n"
1142
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1143
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1144
+ elif status == 3:
1145
+ print("The solver was unable to satisfy boundary conditions "
1146
+ f"tolerance on iteration {iteration}. \n"
1147
+ f"Maximum relative residual: {max_rms_res:.2e} \n"
1148
+ f"Maximum boundary residual: {max_bc_res:.2e}")
1149
+
1150
+ if p.size == 0:
1151
+ p = None
1152
+
1153
+ return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
1154
+ niter=iteration, status=status,
1155
+ message=TERMINATION_MESSAGES[status], success=status == 0)
venv/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (117 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (113 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ode.py ADDED
@@ -0,0 +1,1376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Pearu Peterson, Pauli Virtanen, John Travers
2
+ """
3
+ First-order ODE integrators.
4
+
5
+ User-friendly interface to various numerical integrators for solving a
6
+ system of first order ODEs with prescribed initial conditions::
7
+
8
+ d y(t)[i]
9
+ --------- = f(t,y(t))[i],
10
+ d t
11
+
12
+ y(t=0)[i] = y0[i],
13
+
14
+ where::
15
+
16
+ i = 0, ..., len(y0) - 1
17
+
18
+ class ode
19
+ ---------
20
+
21
+ A generic interface class to numeric integrators. It has the following
22
+ methods::
23
+
24
+ integrator = ode(f, jac=None)
25
+ integrator = integrator.set_integrator(name, **params)
26
+ integrator = integrator.set_initial_value(y0, t0=0.0)
27
+ integrator = integrator.set_f_params(*args)
28
+ integrator = integrator.set_jac_params(*args)
29
+ y1 = integrator.integrate(t1, step=False, relax=False)
30
+ flag = integrator.successful()
31
+
32
+ class complex_ode
33
+ -----------------
34
+
35
+ This class has the same generic interface as ode, except it can handle complex
36
+ f, y and Jacobians by transparently translating them into the equivalent
37
+ real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
38
+ an alternative to ode with the zvode solver, sometimes performing better.
39
+ """
40
+ # XXX: Integrators must have:
41
+ # ===========================
42
+ # cvode - C version of vode and vodpk with many improvements.
43
+ # Get it from http://www.netlib.org/ode/cvode.tar.gz.
44
+ # To wrap cvode to Python, one must write the extension module by
45
+ # hand. Its interface is too much 'advanced C' that using f2py
46
+ # would be too complicated (or impossible).
47
+ #
48
+ # How to define a new integrator:
49
+ # ===============================
50
+ #
51
+ # class myodeint(IntegratorBase):
52
+ #
53
+ # runner = <odeint function> or None
54
+ #
55
+ # def __init__(self,...): # required
56
+ # <initialize>
57
+ #
58
+ # def reset(self,n,has_jac): # optional
59
+ # # n - the size of the problem (number of equations)
60
+ # # has_jac - whether user has supplied its own routine for Jacobian
61
+ # <allocate memory,initialize further>
62
+ #
63
+ # def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
64
+ # # this method is called to integrate from t=t0 to t=t1
65
+ # # with initial condition y0. f and jac are user-supplied functions
66
+ # # that define the problem. f_params,jac_params are additional
67
+ # # arguments
68
+ # # to these functions.
69
+ # <calculate y1>
70
+ # if <calculation was unsuccessful>:
71
+ # self.success = 0
72
+ # return t1,y1
73
+ #
74
+ # # In addition, one can define step() and run_relax() methods (they
75
+ # # take the same arguments as run()) if the integrator can support
76
+ # # these features (see IntegratorBase doc strings).
77
+ #
78
+ # if myodeint.runner:
79
+ # IntegratorBase.integrator_classes.append(myodeint)
80
+
81
+ __all__ = ['ode', 'complex_ode']
82
+
83
+ import re
84
+ import warnings
85
+
86
+ from numpy import asarray, array, zeros, isscalar, real, imag, vstack
87
+
88
+ from . import _vode
89
+ from . import _dop
90
+ from . import _lsoda
91
+
92
+
93
+ _dop_int_dtype = _dop.types.intvar.dtype
94
+ _vode_int_dtype = _vode.types.intvar.dtype
95
+ _lsoda_int_dtype = _lsoda.types.intvar.dtype
96
+
97
+
98
+ # ------------------------------------------------------------------------------
99
+ # User interface
100
+ # ------------------------------------------------------------------------------
101
+
102
+
103
+ class ode:
104
+ """
105
+ A generic interface class to numeric integrators.
106
+
107
+ Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
108
+
109
+ *Note*: The first two arguments of ``f(t, y, ...)`` are in the
110
+ opposite order of the arguments in the system definition function used
111
+ by `scipy.integrate.odeint`.
112
+
113
+ Parameters
114
+ ----------
115
+ f : callable ``f(t, y, *f_args)``
116
+ Right-hand side of the differential equation. t is a scalar,
117
+ ``y.shape == (n,)``.
118
+ ``f_args`` is set by calling ``set_f_params(*args)``.
119
+ `f` should return a scalar, array or list (not a tuple).
120
+ jac : callable ``jac(t, y, *jac_args)``, optional
121
+ Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
122
+ ``jac_args`` is set by calling ``set_jac_params(*args)``.
123
+
124
+ Attributes
125
+ ----------
126
+ t : float
127
+ Current time.
128
+ y : ndarray
129
+ Current variable values.
130
+
131
+ See also
132
+ --------
133
+ odeint : an integrator with a simpler interface based on lsoda from ODEPACK
134
+ quad : for finding the area under a curve
135
+
136
+ Notes
137
+ -----
138
+ Available integrators are listed below. They can be selected using
139
+ the `set_integrator` method.
140
+
141
+ "vode"
142
+
143
+ Real-valued Variable-coefficient Ordinary Differential Equation
144
+ solver, with fixed-leading-coefficient implementation. It provides
145
+ implicit Adams method (for non-stiff problems) and a method based on
146
+ backward differentiation formulas (BDF) (for stiff problems).
147
+
148
+ Source: http://www.netlib.org/ode/vode.f
149
+
150
+ .. warning::
151
+
152
+ This integrator is not re-entrant. You cannot have two `ode`
153
+ instances using the "vode" integrator at the same time.
154
+
155
+ This integrator accepts the following parameters in `set_integrator`
156
+ method of the `ode` class:
157
+
158
+ - atol : float or sequence
159
+ absolute tolerance for solution
160
+ - rtol : float or sequence
161
+ relative tolerance for solution
162
+ - lband : None or int
163
+ - uband : None or int
164
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
165
+ Setting these requires your jac routine to return the jacobian
166
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
167
+ dimension of the matrix must be (lband+uband+1, len(y)).
168
+ - method: 'adams' or 'bdf'
169
+ Which solver to use, Adams (non-stiff) or BDF (stiff)
170
+ - with_jacobian : bool
171
+ This option is only considered when the user has not supplied a
172
+ Jacobian function and has not indicated (by setting either band)
173
+ that the Jacobian is banded. In this case, `with_jacobian` specifies
174
+ whether the iteration method of the ODE solver's correction step is
175
+ chord iteration with an internally generated full Jacobian or
176
+ functional iteration with no Jacobian.
177
+ - nsteps : int
178
+ Maximum number of (internally defined) steps allowed during one
179
+ call to the solver.
180
+ - first_step : float
181
+ - min_step : float
182
+ - max_step : float
183
+ Limits for the step sizes used by the integrator.
184
+ - order : int
185
+ Maximum order used by the integrator,
186
+ order <= 12 for Adams, <= 5 for BDF.
187
+
188
+ "zvode"
189
+
190
+ Complex-valued Variable-coefficient Ordinary Differential Equation
191
+ solver, with fixed-leading-coefficient implementation. It provides
192
+ implicit Adams method (for non-stiff problems) and a method based on
193
+ backward differentiation formulas (BDF) (for stiff problems).
194
+
195
+ Source: http://www.netlib.org/ode/zvode.f
196
+
197
+ .. warning::
198
+
199
+ This integrator is not re-entrant. You cannot have two `ode`
200
+ instances using the "zvode" integrator at the same time.
201
+
202
+ This integrator accepts the same parameters in `set_integrator`
203
+ as the "vode" solver.
204
+
205
+ .. note::
206
+
207
+ When using ZVODE for a stiff system, it should only be used for
208
+ the case in which the function f is analytic, that is, when each f(i)
209
+ is an analytic function of each y(j). Analyticity means that the
210
+ partial derivative df(i)/dy(j) is a unique complex number, and this
211
+ fact is critical in the way ZVODE solves the dense or banded linear
212
+ systems that arise in the stiff case. For a complex stiff ODE system
213
+ in which f is not analytic, ZVODE is likely to have convergence
214
+ failures, and for this problem one should instead use DVODE on the
215
+ equivalent real system (in the real and imaginary parts of y).
216
+
217
+ "lsoda"
218
+
219
+ Real-valued Variable-coefficient Ordinary Differential Equation
220
+ solver, with fixed-leading-coefficient implementation. It provides
221
+ automatic method switching between implicit Adams method (for non-stiff
222
+ problems) and a method based on backward differentiation formulas (BDF)
223
+ (for stiff problems).
224
+
225
+ Source: http://www.netlib.org/odepack
226
+
227
+ .. warning::
228
+
229
+ This integrator is not re-entrant. You cannot have two `ode`
230
+ instances using the "lsoda" integrator at the same time.
231
+
232
+ This integrator accepts the following parameters in `set_integrator`
233
+ method of the `ode` class:
234
+
235
+ - atol : float or sequence
236
+ absolute tolerance for solution
237
+ - rtol : float or sequence
238
+ relative tolerance for solution
239
+ - lband : None or int
240
+ - uband : None or int
241
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
242
+ Setting these requires your jac routine to return the jacobian
243
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j].
244
+ - with_jacobian : bool
245
+ *Not used.*
246
+ - nsteps : int
247
+ Maximum number of (internally defined) steps allowed during one
248
+ call to the solver.
249
+ - first_step : float
250
+ - min_step : float
251
+ - max_step : float
252
+ Limits for the step sizes used by the integrator.
253
+ - max_order_ns : int
254
+ Maximum order used in the nonstiff case (default 12).
255
+ - max_order_s : int
256
+ Maximum order used in the stiff case (default 5).
257
+ - max_hnil : int
258
+ Maximum number of messages reporting too small step size (t + h = t)
259
+ (default 0)
260
+ - ixpr : int
261
+ Whether to generate extra printing at method switches (default False).
262
+
263
+ "dopri5"
264
+
265
+ This is an explicit runge-kutta method of order (4)5 due to Dormand &
266
+ Prince (with stepsize control and dense output).
267
+
268
+ Authors:
269
+
270
+ E. Hairer and G. Wanner
271
+ Universite de Geneve, Dept. de Mathematiques
272
+ CH-1211 Geneve 24, Switzerland
273
274
+
275
+ This code is described in [HNW93]_.
276
+
277
+ This integrator accepts the following parameters in set_integrator()
278
+ method of the ode class:
279
+
280
+ - atol : float or sequence
281
+ absolute tolerance for solution
282
+ - rtol : float or sequence
283
+ relative tolerance for solution
284
+ - nsteps : int
285
+ Maximum number of (internally defined) steps allowed during one
286
+ call to the solver.
287
+ - first_step : float
288
+ - max_step : float
289
+ - safety : float
290
+ Safety factor on new step selection (default 0.9)
291
+ - ifactor : float
292
+ - dfactor : float
293
+ Maximum factor to increase/decrease step size by in one step
294
+ - beta : float
295
+ Beta parameter for stabilised step size control.
296
+ - verbosity : int
297
+ Switch for printing messages (< 0 for no messages).
298
+
299
+ "dop853"
300
+
301
+ This is an explicit runge-kutta method of order 8(5,3) due to Dormand
302
+ & Prince (with stepsize control and dense output).
303
+
304
+ Options and references the same as "dopri5".
305
+
306
+ Examples
307
+ --------
308
+
309
+ A problem to integrate and the corresponding jacobian:
310
+
311
+ >>> from scipy.integrate import ode
312
+ >>>
313
+ >>> y0, t0 = [1.0j, 2.0], 0
314
+ >>>
315
+ >>> def f(t, y, arg1):
316
+ ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
317
+ >>> def jac(t, y, arg1):
318
+ ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
319
+
320
+ The integration:
321
+
322
+ >>> r = ode(f, jac).set_integrator('zvode', method='bdf')
323
+ >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
324
+ >>> t1 = 10
325
+ >>> dt = 1
326
+ >>> while r.successful() and r.t < t1:
327
+ ... print(r.t+dt, r.integrate(r.t+dt))
328
+ 1 [-0.71038232+0.23749653j 0.40000271+0.j ]
329
+ 2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
330
+ 3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
331
+ 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
332
+ 5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
333
+ 6.0 [0.58643071+0.339819j 0.08000018+0.j ]
334
+ 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
335
+ 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
336
+ 9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
337
+ 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
338
+
339
+ References
340
+ ----------
341
+ .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
342
+ Differential Equations i. Nonstiff Problems. 2nd edition.
343
+ Springer Series in Computational Mathematics,
344
+ Springer-Verlag (1993)
345
+
346
+ """
347
+
348
+ def __init__(self, f, jac=None):
349
+ self.stiff = 0
350
+ self.f = f
351
+ self.jac = jac
352
+ self.f_params = ()
353
+ self.jac_params = ()
354
+ self._y = []
355
+
356
+ @property
357
+ def y(self):
358
+ return self._y
359
+
360
+ def set_initial_value(self, y, t=0.0):
361
+ """Set initial conditions y(t) = y."""
362
+ if isscalar(y):
363
+ y = [y]
364
+ n_prev = len(self._y)
365
+ if not n_prev:
366
+ self.set_integrator('') # find first available integrator
367
+ self._y = asarray(y, self._integrator.scalar)
368
+ self.t = t
369
+ self._integrator.reset(len(self._y), self.jac is not None)
370
+ return self
371
+
372
+ def set_integrator(self, name, **integrator_params):
373
+ """
374
+ Set integrator by name.
375
+
376
+ Parameters
377
+ ----------
378
+ name : str
379
+ Name of the integrator.
380
+ **integrator_params
381
+ Additional parameters for the integrator.
382
+ """
383
+ integrator = find_integrator(name)
384
+ if integrator is None:
385
+ # FIXME: this really should be raise an exception. Will that break
386
+ # any code?
387
+ message = f'No integrator name match with {name!r} or is not available.'
388
+ warnings.warn(message, stacklevel=2)
389
+ else:
390
+ self._integrator = integrator(**integrator_params)
391
+ if not len(self._y):
392
+ self.t = 0.0
393
+ self._y = array([0.0], self._integrator.scalar)
394
+ self._integrator.reset(len(self._y), self.jac is not None)
395
+ return self
396
+
397
+ def integrate(self, t, step=False, relax=False):
398
+ """Find y=y(t), set y as an initial condition, and return y.
399
+
400
+ Parameters
401
+ ----------
402
+ t : float
403
+ The endpoint of the integration step.
404
+ step : bool
405
+ If True, and if the integrator supports the step method,
406
+ then perform a single integration step and return.
407
+ This parameter is provided in order to expose internals of
408
+ the implementation, and should not be changed from its default
409
+ value in most cases.
410
+ relax : bool
411
+ If True and if the integrator supports the run_relax method,
412
+ then integrate until t_1 >= t and return. ``relax`` is not
413
+ referenced if ``step=True``.
414
+ This parameter is provided in order to expose internals of
415
+ the implementation, and should not be changed from its default
416
+ value in most cases.
417
+
418
+ Returns
419
+ -------
420
+ y : float
421
+ The integrated value at t
422
+ """
423
+ if step and self._integrator.supports_step:
424
+ mth = self._integrator.step
425
+ elif relax and self._integrator.supports_run_relax:
426
+ mth = self._integrator.run_relax
427
+ else:
428
+ mth = self._integrator.run
429
+
430
+ try:
431
+ self._y, self.t = mth(self.f, self.jac or (lambda: None),
432
+ self._y, self.t, t,
433
+ self.f_params, self.jac_params)
434
+ except SystemError as e:
435
+ # f2py issue with tuple returns, see ticket 1187.
436
+ raise ValueError(
437
+ 'Function to integrate must not return a tuple.'
438
+ ) from e
439
+
440
+ return self._y
441
+
442
+ def successful(self):
443
+ """Check if integration was successful."""
444
+ try:
445
+ self._integrator
446
+ except AttributeError:
447
+ self.set_integrator('')
448
+ return self._integrator.success == 1
449
+
450
+ def get_return_code(self):
451
+ """Extracts the return code for the integration to enable better control
452
+ if the integration fails.
453
+
454
+ In general, a return code > 0 implies success, while a return code < 0
455
+ implies failure.
456
+
457
+ Notes
458
+ -----
459
+ This section describes possible return codes and their meaning, for available
460
+ integrators that can be selected by `set_integrator` method.
461
+
462
+ "vode"
463
+
464
+ =========== =======
465
+ Return Code Message
466
+ =========== =======
467
+ 2 Integration successful.
468
+ -1 Excess work done on this call. (Perhaps wrong MF.)
469
+ -2 Excess accuracy requested. (Tolerances too small.)
470
+ -3 Illegal input detected. (See printed message.)
471
+ -4 Repeated error test failures. (Check all input.)
472
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
473
+ supplied or wrong choice of MF or tolerances.)
474
+ -6 Error weight became zero during problem. (Solution
475
+ component i vanished, and ATOL or ATOL(i) = 0.)
476
+ =========== =======
477
+
478
+ "zvode"
479
+
480
+ =========== =======
481
+ Return Code Message
482
+ =========== =======
483
+ 2 Integration successful.
484
+ -1 Excess work done on this call. (Perhaps wrong MF.)
485
+ -2 Excess accuracy requested. (Tolerances too small.)
486
+ -3 Illegal input detected. (See printed message.)
487
+ -4 Repeated error test failures. (Check all input.)
488
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
489
+ supplied or wrong choice of MF or tolerances.)
490
+ -6 Error weight became zero during problem. (Solution
491
+ component i vanished, and ATOL or ATOL(i) = 0.)
492
+ =========== =======
493
+
494
+ "dopri5"
495
+
496
+ =========== =======
497
+ Return Code Message
498
+ =========== =======
499
+ 1 Integration successful.
500
+ 2 Integration successful (interrupted by solout).
501
+ -1 Input is not consistent.
502
+ -2 Larger nsteps is needed.
503
+ -3 Step size becomes too small.
504
+ -4 Problem is probably stiff (interrupted).
505
+ =========== =======
506
+
507
+ "dop853"
508
+
509
+ =========== =======
510
+ Return Code Message
511
+ =========== =======
512
+ 1 Integration successful.
513
+ 2 Integration successful (interrupted by solout).
514
+ -1 Input is not consistent.
515
+ -2 Larger nsteps is needed.
516
+ -3 Step size becomes too small.
517
+ -4 Problem is probably stiff (interrupted).
518
+ =========== =======
519
+
520
+ "lsoda"
521
+
522
+ =========== =======
523
+ Return Code Message
524
+ =========== =======
525
+ 2 Integration successful.
526
+ -1 Excess work done on this call (perhaps wrong Dfun type).
527
+ -2 Excess accuracy requested (tolerances too small).
528
+ -3 Illegal input detected (internal error).
529
+ -4 Repeated error test failures (internal error).
530
+ -5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
531
+ -6 Error weight became zero during problem.
532
+ -7 Internal workspace insufficient to finish (internal error).
533
+ =========== =======
534
+ """
535
+ try:
536
+ self._integrator
537
+ except AttributeError:
538
+ self.set_integrator('')
539
+ return self._integrator.istate
540
+
541
+ def set_f_params(self, *args):
542
+ """Set extra parameters for user-supplied function f."""
543
+ self.f_params = args
544
+ return self
545
+
546
+ def set_jac_params(self, *args):
547
+ """Set extra parameters for user-supplied function jac."""
548
+ self.jac_params = args
549
+ return self
550
+
551
+ def set_solout(self, solout):
552
+ """
553
+ Set callable to be called at every successful integration step.
554
+
555
+ Parameters
556
+ ----------
557
+ solout : callable
558
+ ``solout(t, y)`` is called at each internal integrator step,
559
+ t is a scalar providing the current independent position
560
+ y is the current solution ``y.shape == (n,)``
561
+ solout should return -1 to stop integration
562
+ otherwise it should return None or 0
563
+
564
+ """
565
+ if self._integrator.supports_solout:
566
+ self._integrator.set_solout(solout)
567
+ if self._y is not None:
568
+ self._integrator.reset(len(self._y), self.jac is not None)
569
+ else:
570
+ raise ValueError("selected integrator does not support solout,"
571
+ " choose another one")
572
+
573
+
574
+ def _transform_banded_jac(bjac):
575
+ """
576
+ Convert a real matrix of the form (for example)
577
+
578
+ [0 0 A B] [0 0 0 B]
579
+ [0 0 C D] [0 0 A D]
580
+ [E F G H] to [0 F C H]
581
+ [I J K L] [E J G L]
582
+ [I 0 K 0]
583
+
584
+ That is, every other column is shifted up one.
585
+ """
586
+ # Shift every other column.
587
+ newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
588
+ newjac[1:, ::2] = bjac[:, ::2]
589
+ newjac[:-1, 1::2] = bjac[:, 1::2]
590
+ return newjac
591
+
592
+
593
+ class complex_ode(ode):
594
+ """
595
+ A wrapper of ode for complex systems.
596
+
597
+ This functions similarly as `ode`, but re-maps a complex-valued
598
+ equation system to a real-valued one before using the integrators.
599
+
600
+ Parameters
601
+ ----------
602
+ f : callable ``f(t, y, *f_args)``
603
+ Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
604
+ ``f_args`` is set by calling ``set_f_params(*args)``.
605
+ jac : callable ``jac(t, y, *jac_args)``
606
+ Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
607
+ ``jac_args`` is set by calling ``set_f_params(*args)``.
608
+
609
+ Attributes
610
+ ----------
611
+ t : float
612
+ Current time.
613
+ y : ndarray
614
+ Current variable values.
615
+
616
+ Examples
617
+ --------
618
+ For usage examples, see `ode`.
619
+
620
+ """
621
+
622
+ def __init__(self, f, jac=None):
623
+ self.cf = f
624
+ self.cjac = jac
625
+ if jac is None:
626
+ ode.__init__(self, self._wrap, None)
627
+ else:
628
+ ode.__init__(self, self._wrap, self._wrap_jac)
629
+
630
+ def _wrap(self, t, y, *f_args):
631
+ f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
632
+ # self.tmp is a real-valued array containing the interleaved
633
+ # real and imaginary parts of f.
634
+ self.tmp[::2] = real(f)
635
+ self.tmp[1::2] = imag(f)
636
+ return self.tmp
637
+
638
+ def _wrap_jac(self, t, y, *jac_args):
639
+ # jac is the complex Jacobian computed by the user-defined function.
640
+ jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
641
+
642
+ # jac_tmp is the real version of the complex Jacobian. Each complex
643
+ # entry in jac, say 2+3j, becomes a 2x2 block of the form
644
+ # [2 -3]
645
+ # [3 2]
646
+ jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
647
+ jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
648
+ jac_tmp[1::2, ::2] = imag(jac)
649
+ jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
650
+
651
+ ml = getattr(self._integrator, 'ml', None)
652
+ mu = getattr(self._integrator, 'mu', None)
653
+ if ml is not None or mu is not None:
654
+ # Jacobian is banded. The user's Jacobian function has computed
655
+ # the complex Jacobian in packed format. The corresponding
656
+ # real-valued version has every other column shifted up.
657
+ jac_tmp = _transform_banded_jac(jac_tmp)
658
+
659
+ return jac_tmp
660
+
661
+ @property
662
+ def y(self):
663
+ return self._y[::2] + 1j * self._y[1::2]
664
+
665
+ def set_integrator(self, name, **integrator_params):
666
+ """
667
+ Set integrator by name.
668
+
669
+ Parameters
670
+ ----------
671
+ name : str
672
+ Name of the integrator
673
+ **integrator_params
674
+ Additional parameters for the integrator.
675
+ """
676
+ if name == 'zvode':
677
+ raise ValueError("zvode must be used with ode, not complex_ode")
678
+
679
+ lband = integrator_params.get('lband')
680
+ uband = integrator_params.get('uband')
681
+ if lband is not None or uband is not None:
682
+ # The Jacobian is banded. Override the user-supplied bandwidths
683
+ # (which are for the complex Jacobian) with the bandwidths of
684
+ # the corresponding real-valued Jacobian wrapper of the complex
685
+ # Jacobian.
686
+ integrator_params['lband'] = 2 * (lband or 0) + 1
687
+ integrator_params['uband'] = 2 * (uband or 0) + 1
688
+
689
+ return ode.set_integrator(self, name, **integrator_params)
690
+
691
+ def set_initial_value(self, y, t=0.0):
692
+ """Set initial conditions y(t) = y."""
693
+ y = asarray(y)
694
+ self.tmp = zeros(y.size * 2, 'float')
695
+ self.tmp[::2] = real(y)
696
+ self.tmp[1::2] = imag(y)
697
+ return ode.set_initial_value(self, self.tmp, t)
698
+
699
+ def integrate(self, t, step=False, relax=False):
700
+ """Find y=y(t), set y as an initial condition, and return y.
701
+
702
+ Parameters
703
+ ----------
704
+ t : float
705
+ The endpoint of the integration step.
706
+ step : bool
707
+ If True, and if the integrator supports the step method,
708
+ then perform a single integration step and return.
709
+ This parameter is provided in order to expose internals of
710
+ the implementation, and should not be changed from its default
711
+ value in most cases.
712
+ relax : bool
713
+ If True and if the integrator supports the run_relax method,
714
+ then integrate until t_1 >= t and return. ``relax`` is not
715
+ referenced if ``step=True``.
716
+ This parameter is provided in order to expose internals of
717
+ the implementation, and should not be changed from its default
718
+ value in most cases.
719
+
720
+ Returns
721
+ -------
722
+ y : float
723
+ The integrated value at t
724
+ """
725
+ y = ode.integrate(self, t, step, relax)
726
+ return y[::2] + 1j * y[1::2]
727
+
728
+ def set_solout(self, solout):
729
+ """
730
+ Set callable to be called at every successful integration step.
731
+
732
+ Parameters
733
+ ----------
734
+ solout : callable
735
+ ``solout(t, y)`` is called at each internal integrator step,
736
+ t is a scalar providing the current independent position
737
+ y is the current solution ``y.shape == (n,)``
738
+ solout should return -1 to stop integration
739
+ otherwise it should return None or 0
740
+
741
+ """
742
+ if self._integrator.supports_solout:
743
+ self._integrator.set_solout(solout, complex=True)
744
+ else:
745
+ raise TypeError("selected integrator does not support solouta,"
746
+ + "choose another one")
747
+
748
+
749
+ # ------------------------------------------------------------------------------
750
+ # ODE integrators
751
+ # ------------------------------------------------------------------------------
752
+
753
+ def find_integrator(name):
754
+ for cl in IntegratorBase.integrator_classes:
755
+ if re.match(name, cl.__name__, re.I):
756
+ return cl
757
+ return None
758
+
759
+
760
+ class IntegratorConcurrencyError(RuntimeError):
761
+ """
762
+ Failure due to concurrent usage of an integrator that can be used
763
+ only for a single problem at a time.
764
+
765
+ """
766
+
767
+ def __init__(self, name):
768
+ msg = ("Integrator `%s` can be used to solve only a single problem "
769
+ "at a time. If you want to integrate multiple problems, "
770
+ "consider using a different integrator "
771
+ "(see `ode.set_integrator`)") % name
772
+ RuntimeError.__init__(self, msg)
773
+
774
+
775
+ class IntegratorBase:
776
+ runner = None # runner is None => integrator is not available
777
+ success = None # success==1 if integrator was called successfully
778
+ istate = None # istate > 0 means success, istate < 0 means failure
779
+ supports_run_relax = None
780
+ supports_step = None
781
+ supports_solout = False
782
+ integrator_classes = []
783
+ scalar = float
784
+
785
+ def acquire_new_handle(self):
786
+ # Some of the integrators have internal state (ancient
787
+ # Fortran...), and so only one instance can use them at a time.
788
+ # We keep track of this, and fail when concurrent usage is tried.
789
+ self.__class__.active_global_handle += 1
790
+ self.handle = self.__class__.active_global_handle
791
+
792
+ def check_handle(self):
793
+ if self.handle is not self.__class__.active_global_handle:
794
+ raise IntegratorConcurrencyError(self.__class__.__name__)
795
+
796
+ def reset(self, n, has_jac):
797
+ """Prepare integrator for call: allocate memory, set flags, etc.
798
+ n - number of equations.
799
+ has_jac - if user has supplied function for evaluating Jacobian.
800
+ """
801
+
802
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
803
+ """Integrate from t=t0 to t=t1 using y0 as an initial condition.
804
+ Return 2-tuple (y1,t1) where y1 is the result and t=t1
805
+ defines the stoppage coordinate of the result.
806
+ """
807
+ raise NotImplementedError('all integrators must define '
808
+ 'run(f, jac, t0, t1, y0, f_params, jac_params)')
809
+
810
+ def step(self, f, jac, y0, t0, t1, f_params, jac_params):
811
+ """Make one integration step and return (y1,t1)."""
812
+ raise NotImplementedError('%s does not support step() method' %
813
+ self.__class__.__name__)
814
+
815
+ def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
816
+ """Integrate from t=t0 to t>=t1 and return (y1,t)."""
817
+ raise NotImplementedError('%s does not support run_relax() method' %
818
+ self.__class__.__name__)
819
+
820
+ # XXX: __str__ method for getting visual state of the integrator
821
+
822
+
823
+ def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
824
+ """
825
+ Wrap a banded Jacobian function with a function that pads
826
+ the Jacobian with `ml` rows of zeros.
827
+ """
828
+
829
+ def jac_wrapper(t, y):
830
+ jac = asarray(jacfunc(t, y, *jac_params))
831
+ padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
832
+ return padded_jac
833
+
834
+ return jac_wrapper
835
+
836
+
837
+ class vode(IntegratorBase):
838
+ runner = getattr(_vode, 'dvode', None)
839
+
840
+ messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
841
+ -2: 'Excess accuracy requested. (Tolerances too small.)',
842
+ -3: 'Illegal input detected. (See printed message.)',
843
+ -4: 'Repeated error test failures. (Check all input.)',
844
+ -5: 'Repeated convergence failures. (Perhaps bad'
845
+ ' Jacobian supplied or wrong choice of MF or tolerances.)',
846
+ -6: 'Error weight became zero during problem. (Solution'
847
+ ' component i vanished, and ATOL or ATOL(i) = 0.)'
848
+ }
849
+ supports_run_relax = 1
850
+ supports_step = 1
851
+ active_global_handle = 0
852
+
853
+ def __init__(self,
854
+ method='adams',
855
+ with_jacobian=False,
856
+ rtol=1e-6, atol=1e-12,
857
+ lband=None, uband=None,
858
+ order=12,
859
+ nsteps=500,
860
+ max_step=0.0, # corresponds to infinite
861
+ min_step=0.0,
862
+ first_step=0.0, # determined by solver
863
+ ):
864
+
865
+ if re.match(method, r'adams', re.I):
866
+ self.meth = 1
867
+ elif re.match(method, r'bdf', re.I):
868
+ self.meth = 2
869
+ else:
870
+ raise ValueError('Unknown integration method %s' % method)
871
+ self.with_jacobian = with_jacobian
872
+ self.rtol = rtol
873
+ self.atol = atol
874
+ self.mu = uband
875
+ self.ml = lband
876
+
877
+ self.order = order
878
+ self.nsteps = nsteps
879
+ self.max_step = max_step
880
+ self.min_step = min_step
881
+ self.first_step = first_step
882
+ self.success = 1
883
+
884
+ self.initialized = False
885
+
886
+ def _determine_mf_and_set_bands(self, has_jac):
887
+ """
888
+ Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
889
+
890
+ In the Fortran code, the legal values of `MF` are:
891
+ 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
892
+ -11, -12, -14, -15, -21, -22, -24, -25
893
+ but this Python wrapper does not use negative values.
894
+
895
+ Returns
896
+
897
+ mf = 10*self.meth + miter
898
+
899
+ self.meth is the linear multistep method:
900
+ self.meth == 1: method="adams"
901
+ self.meth == 2: method="bdf"
902
+
903
+ miter is the correction iteration method:
904
+ miter == 0: Functional iteration; no Jacobian involved.
905
+ miter == 1: Chord iteration with user-supplied full Jacobian.
906
+ miter == 2: Chord iteration with internally computed full Jacobian.
907
+ miter == 3: Chord iteration with internally computed diagonal Jacobian.
908
+ miter == 4: Chord iteration with user-supplied banded Jacobian.
909
+ miter == 5: Chord iteration with internally computed banded Jacobian.
910
+
911
+ Side effects: If either self.mu or self.ml is not None and the other is None,
912
+ then the one that is None is set to 0.
913
+ """
914
+
915
+ jac_is_banded = self.mu is not None or self.ml is not None
916
+ if jac_is_banded:
917
+ if self.mu is None:
918
+ self.mu = 0
919
+ if self.ml is None:
920
+ self.ml = 0
921
+
922
+ # has_jac is True if the user provided a Jacobian function.
923
+ if has_jac:
924
+ if jac_is_banded:
925
+ miter = 4
926
+ else:
927
+ miter = 1
928
+ else:
929
+ if jac_is_banded:
930
+ if self.ml == self.mu == 0:
931
+ miter = 3 # Chord iteration with internal diagonal Jacobian.
932
+ else:
933
+ miter = 5 # Chord iteration with internal banded Jacobian.
934
+ else:
935
+ # self.with_jacobian is set by the user in
936
+ # the call to ode.set_integrator.
937
+ if self.with_jacobian:
938
+ miter = 2 # Chord iteration with internal full Jacobian.
939
+ else:
940
+ miter = 0 # Functional iteration; no Jacobian involved.
941
+
942
+ mf = 10 * self.meth + miter
943
+ return mf
944
+
945
+ def reset(self, n, has_jac):
946
+ mf = self._determine_mf_and_set_bands(has_jac)
947
+
948
+ if mf == 10:
949
+ lrw = 20 + 16 * n
950
+ elif mf in [11, 12]:
951
+ lrw = 22 + 16 * n + 2 * n * n
952
+ elif mf == 13:
953
+ lrw = 22 + 17 * n
954
+ elif mf in [14, 15]:
955
+ lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
956
+ elif mf == 20:
957
+ lrw = 20 + 9 * n
958
+ elif mf in [21, 22]:
959
+ lrw = 22 + 9 * n + 2 * n * n
960
+ elif mf == 23:
961
+ lrw = 22 + 10 * n
962
+ elif mf in [24, 25]:
963
+ lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
964
+ else:
965
+ raise ValueError('Unexpected mf=%s' % mf)
966
+
967
+ if mf % 10 in [0, 3]:
968
+ liw = 30
969
+ else:
970
+ liw = 30 + n
971
+
972
+ rwork = zeros((lrw,), float)
973
+ rwork[4] = self.first_step
974
+ rwork[5] = self.max_step
975
+ rwork[6] = self.min_step
976
+ self.rwork = rwork
977
+
978
+ iwork = zeros((liw,), _vode_int_dtype)
979
+ if self.ml is not None:
980
+ iwork[0] = self.ml
981
+ if self.mu is not None:
982
+ iwork[1] = self.mu
983
+ iwork[4] = self.order
984
+ iwork[5] = self.nsteps
985
+ iwork[6] = 2 # mxhnil
986
+ self.iwork = iwork
987
+
988
+ self.call_args = [self.rtol, self.atol, 1, 1,
989
+ self.rwork, self.iwork, mf]
990
+ self.success = 1
991
+ self.initialized = False
992
+
993
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
994
+ if self.initialized:
995
+ self.check_handle()
996
+ else:
997
+ self.initialized = True
998
+ self.acquire_new_handle()
999
+
1000
+ if self.ml is not None and self.ml > 0:
1001
+ # Banded Jacobian. Wrap the user-provided function with one
1002
+ # that pads the Jacobian array with the extra `self.ml` rows
1003
+ # required by the f2py-generated wrapper.
1004
+ jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
1005
+
1006
+ args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
1007
+ (f_params, jac_params))
1008
+ y1, t, istate = self.runner(*args)
1009
+ self.istate = istate
1010
+ if istate < 0:
1011
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1012
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1013
+ self.messages.get(istate, unexpected_istate_msg)),
1014
+ stacklevel=2)
1015
+ self.success = 0
1016
+ else:
1017
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1018
+ self.istate = 2
1019
+ return y1, t
1020
+
1021
+ def step(self, *args):
1022
+ itask = self.call_args[2]
1023
+ self.call_args[2] = 2
1024
+ r = self.run(*args)
1025
+ self.call_args[2] = itask
1026
+ return r
1027
+
1028
+ def run_relax(self, *args):
1029
+ itask = self.call_args[2]
1030
+ self.call_args[2] = 3
1031
+ r = self.run(*args)
1032
+ self.call_args[2] = itask
1033
+ return r
1034
+
1035
+
1036
+ if vode.runner is not None:
1037
+ IntegratorBase.integrator_classes.append(vode)
1038
+
1039
+
1040
+ class zvode(vode):
1041
+ runner = getattr(_vode, 'zvode', None)
1042
+
1043
+ supports_run_relax = 1
1044
+ supports_step = 1
1045
+ scalar = complex
1046
+ active_global_handle = 0
1047
+
1048
+ def reset(self, n, has_jac):
1049
+ mf = self._determine_mf_and_set_bands(has_jac)
1050
+
1051
+ if mf in (10,):
1052
+ lzw = 15 * n
1053
+ elif mf in (11, 12):
1054
+ lzw = 15 * n + 2 * n ** 2
1055
+ elif mf in (-11, -12):
1056
+ lzw = 15 * n + n ** 2
1057
+ elif mf in (13,):
1058
+ lzw = 16 * n
1059
+ elif mf in (14, 15):
1060
+ lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
1061
+ elif mf in (-14, -15):
1062
+ lzw = 16 * n + (2 * self.ml + self.mu) * n
1063
+ elif mf in (20,):
1064
+ lzw = 8 * n
1065
+ elif mf in (21, 22):
1066
+ lzw = 8 * n + 2 * n ** 2
1067
+ elif mf in (-21, -22):
1068
+ lzw = 8 * n + n ** 2
1069
+ elif mf in (23,):
1070
+ lzw = 9 * n
1071
+ elif mf in (24, 25):
1072
+ lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
1073
+ elif mf in (-24, -25):
1074
+ lzw = 9 * n + (2 * self.ml + self.mu) * n
1075
+
1076
+ lrw = 20 + n
1077
+
1078
+ if mf % 10 in (0, 3):
1079
+ liw = 30
1080
+ else:
1081
+ liw = 30 + n
1082
+
1083
+ zwork = zeros((lzw,), complex)
1084
+ self.zwork = zwork
1085
+
1086
+ rwork = zeros((lrw,), float)
1087
+ rwork[4] = self.first_step
1088
+ rwork[5] = self.max_step
1089
+ rwork[6] = self.min_step
1090
+ self.rwork = rwork
1091
+
1092
+ iwork = zeros((liw,), _vode_int_dtype)
1093
+ if self.ml is not None:
1094
+ iwork[0] = self.ml
1095
+ if self.mu is not None:
1096
+ iwork[1] = self.mu
1097
+ iwork[4] = self.order
1098
+ iwork[5] = self.nsteps
1099
+ iwork[6] = 2 # mxhnil
1100
+ self.iwork = iwork
1101
+
1102
+ self.call_args = [self.rtol, self.atol, 1, 1,
1103
+ self.zwork, self.rwork, self.iwork, mf]
1104
+ self.success = 1
1105
+ self.initialized = False
1106
+
1107
+
1108
+ if zvode.runner is not None:
1109
+ IntegratorBase.integrator_classes.append(zvode)
1110
+
1111
+
1112
+ class dopri5(IntegratorBase):
1113
+ runner = getattr(_dop, 'dopri5', None)
1114
+ name = 'dopri5'
1115
+ supports_solout = True
1116
+
1117
+ messages = {1: 'computation successful',
1118
+ 2: 'computation successful (interrupted by solout)',
1119
+ -1: 'input is not consistent',
1120
+ -2: 'larger nsteps is needed',
1121
+ -3: 'step size becomes too small',
1122
+ -4: 'problem is probably stiff (interrupted)',
1123
+ }
1124
+
1125
+ def __init__(self,
1126
+ rtol=1e-6, atol=1e-12,
1127
+ nsteps=500,
1128
+ max_step=0.0,
1129
+ first_step=0.0, # determined by solver
1130
+ safety=0.9,
1131
+ ifactor=10.0,
1132
+ dfactor=0.2,
1133
+ beta=0.0,
1134
+ method=None,
1135
+ verbosity=-1, # no messages if negative
1136
+ ):
1137
+ self.rtol = rtol
1138
+ self.atol = atol
1139
+ self.nsteps = nsteps
1140
+ self.max_step = max_step
1141
+ self.first_step = first_step
1142
+ self.safety = safety
1143
+ self.ifactor = ifactor
1144
+ self.dfactor = dfactor
1145
+ self.beta = beta
1146
+ self.verbosity = verbosity
1147
+ self.success = 1
1148
+ self.set_solout(None)
1149
+
1150
+ def set_solout(self, solout, complex=False):
1151
+ self.solout = solout
1152
+ self.solout_cmplx = complex
1153
+ if solout is None:
1154
+ self.iout = 0
1155
+ else:
1156
+ self.iout = 1
1157
+
1158
+ def reset(self, n, has_jac):
1159
+ work = zeros((8 * n + 21,), float)
1160
+ work[1] = self.safety
1161
+ work[2] = self.dfactor
1162
+ work[3] = self.ifactor
1163
+ work[4] = self.beta
1164
+ work[5] = self.max_step
1165
+ work[6] = self.first_step
1166
+ self.work = work
1167
+ iwork = zeros((21,), _dop_int_dtype)
1168
+ iwork[0] = self.nsteps
1169
+ iwork[2] = self.verbosity
1170
+ self.iwork = iwork
1171
+ self.call_args = [self.rtol, self.atol, self._solout,
1172
+ self.iout, self.work, self.iwork]
1173
+ self.success = 1
1174
+
1175
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1176
+ x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
1177
+ tuple(self.call_args) + (f_params,)))
1178
+ self.istate = istate
1179
+ if istate < 0:
1180
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1181
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1182
+ self.messages.get(istate, unexpected_istate_msg)),
1183
+ stacklevel=2)
1184
+ self.success = 0
1185
+ return y, x
1186
+
1187
+ def _solout(self, nr, xold, x, y, nd, icomp, con):
1188
+ if self.solout is not None:
1189
+ if self.solout_cmplx:
1190
+ y = y[::2] + 1j * y[1::2]
1191
+ return self.solout(x, y)
1192
+ else:
1193
+ return 1
1194
+
1195
+
1196
+ if dopri5.runner is not None:
1197
+ IntegratorBase.integrator_classes.append(dopri5)
1198
+
1199
+
1200
+ class dop853(dopri5):
1201
+ runner = getattr(_dop, 'dop853', None)
1202
+ name = 'dop853'
1203
+
1204
+ def __init__(self,
1205
+ rtol=1e-6, atol=1e-12,
1206
+ nsteps=500,
1207
+ max_step=0.0,
1208
+ first_step=0.0, # determined by solver
1209
+ safety=0.9,
1210
+ ifactor=6.0,
1211
+ dfactor=0.3,
1212
+ beta=0.0,
1213
+ method=None,
1214
+ verbosity=-1, # no messages if negative
1215
+ ):
1216
+ super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
1217
+ ifactor, dfactor, beta, method, verbosity)
1218
+
1219
+ def reset(self, n, has_jac):
1220
+ work = zeros((11 * n + 21,), float)
1221
+ work[1] = self.safety
1222
+ work[2] = self.dfactor
1223
+ work[3] = self.ifactor
1224
+ work[4] = self.beta
1225
+ work[5] = self.max_step
1226
+ work[6] = self.first_step
1227
+ self.work = work
1228
+ iwork = zeros((21,), _dop_int_dtype)
1229
+ iwork[0] = self.nsteps
1230
+ iwork[2] = self.verbosity
1231
+ self.iwork = iwork
1232
+ self.call_args = [self.rtol, self.atol, self._solout,
1233
+ self.iout, self.work, self.iwork]
1234
+ self.success = 1
1235
+
1236
+
1237
+ if dop853.runner is not None:
1238
+ IntegratorBase.integrator_classes.append(dop853)
1239
+
1240
+
1241
+ class lsoda(IntegratorBase):
1242
+ runner = getattr(_lsoda, 'lsoda', None)
1243
+ active_global_handle = 0
1244
+
1245
+ messages = {
1246
+ 2: "Integration successful.",
1247
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
1248
+ -2: "Excess accuracy requested (tolerances too small).",
1249
+ -3: "Illegal input detected (internal error).",
1250
+ -4: "Repeated error test failures (internal error).",
1251
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
1252
+ -6: "Error weight became zero during problem.",
1253
+ -7: "Internal workspace insufficient to finish (internal error)."
1254
+ }
1255
+
1256
+ def __init__(self,
1257
+ with_jacobian=False,
1258
+ rtol=1e-6, atol=1e-12,
1259
+ lband=None, uband=None,
1260
+ nsteps=500,
1261
+ max_step=0.0, # corresponds to infinite
1262
+ min_step=0.0,
1263
+ first_step=0.0, # determined by solver
1264
+ ixpr=0,
1265
+ max_hnil=0,
1266
+ max_order_ns=12,
1267
+ max_order_s=5,
1268
+ method=None
1269
+ ):
1270
+
1271
+ self.with_jacobian = with_jacobian
1272
+ self.rtol = rtol
1273
+ self.atol = atol
1274
+ self.mu = uband
1275
+ self.ml = lband
1276
+
1277
+ self.max_order_ns = max_order_ns
1278
+ self.max_order_s = max_order_s
1279
+ self.nsteps = nsteps
1280
+ self.max_step = max_step
1281
+ self.min_step = min_step
1282
+ self.first_step = first_step
1283
+ self.ixpr = ixpr
1284
+ self.max_hnil = max_hnil
1285
+ self.success = 1
1286
+
1287
+ self.initialized = False
1288
+
1289
+ def reset(self, n, has_jac):
1290
+ # Calculate parameters for Fortran subroutine dvode.
1291
+ if has_jac:
1292
+ if self.mu is None and self.ml is None:
1293
+ jt = 1
1294
+ else:
1295
+ if self.mu is None:
1296
+ self.mu = 0
1297
+ if self.ml is None:
1298
+ self.ml = 0
1299
+ jt = 4
1300
+ else:
1301
+ if self.mu is None and self.ml is None:
1302
+ jt = 2
1303
+ else:
1304
+ if self.mu is None:
1305
+ self.mu = 0
1306
+ if self.ml is None:
1307
+ self.ml = 0
1308
+ jt = 5
1309
+ lrn = 20 + (self.max_order_ns + 4) * n
1310
+ if jt in [1, 2]:
1311
+ lrs = 22 + (self.max_order_s + 4) * n + n * n
1312
+ elif jt in [4, 5]:
1313
+ lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
1314
+ else:
1315
+ raise ValueError('Unexpected jt=%s' % jt)
1316
+ lrw = max(lrn, lrs)
1317
+ liw = 20 + n
1318
+ rwork = zeros((lrw,), float)
1319
+ rwork[4] = self.first_step
1320
+ rwork[5] = self.max_step
1321
+ rwork[6] = self.min_step
1322
+ self.rwork = rwork
1323
+ iwork = zeros((liw,), _lsoda_int_dtype)
1324
+ if self.ml is not None:
1325
+ iwork[0] = self.ml
1326
+ if self.mu is not None:
1327
+ iwork[1] = self.mu
1328
+ iwork[4] = self.ixpr
1329
+ iwork[5] = self.nsteps
1330
+ iwork[6] = self.max_hnil
1331
+ iwork[7] = self.max_order_ns
1332
+ iwork[8] = self.max_order_s
1333
+ self.iwork = iwork
1334
+ self.call_args = [self.rtol, self.atol, 1, 1,
1335
+ self.rwork, self.iwork, jt]
1336
+ self.success = 1
1337
+ self.initialized = False
1338
+
1339
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1340
+ if self.initialized:
1341
+ self.check_handle()
1342
+ else:
1343
+ self.initialized = True
1344
+ self.acquire_new_handle()
1345
+ args = [f, y0, t0, t1] + self.call_args[:-1] + \
1346
+ [jac, self.call_args[-1], f_params, 0, jac_params]
1347
+ y1, t, istate = self.runner(*args)
1348
+ self.istate = istate
1349
+ if istate < 0:
1350
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1351
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1352
+ self.messages.get(istate, unexpected_istate_msg)),
1353
+ stacklevel=2)
1354
+ self.success = 0
1355
+ else:
1356
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1357
+ self.istate = 2
1358
+ return y1, t
1359
+
1360
+ def step(self, *args):
1361
+ itask = self.call_args[2]
1362
+ self.call_args[2] = 2
1363
+ r = self.run(*args)
1364
+ self.call_args[2] = itask
1365
+ return r
1366
+
1367
+ def run_relax(self, *args):
1368
+ itask = self.call_args[2]
1369
+ self.call_args[2] = 3
1370
+ r = self.run(*args)
1371
+ self.call_args[2] = itask
1372
+ return r
1373
+
1374
+
1375
+ if lsoda.runner:
1376
+ IntegratorBase.integrator_classes.append(lsoda)
venv/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (83.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant
2
+
3
+ __all__ = ['odeint', 'ODEintWarning']
4
+
5
+ import numpy as np
6
+ from . import _odepack
7
+ from copy import copy
8
+ import warnings
9
+
10
+
11
+ class ODEintWarning(Warning):
12
+ """Warning raised during the execution of `odeint`."""
13
+ pass
14
+
15
+
16
+ _msgs = {2: "Integration successful.",
17
+ 1: "Nothing was done; the integration time was 0.",
18
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
19
+ -2: "Excess accuracy requested (tolerances too small).",
20
+ -3: "Illegal input detected (internal error).",
21
+ -4: "Repeated error test failures (internal error).",
22
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
23
+ -6: "Error weight became zero during problem.",
24
+ -7: "Internal workspace insufficient to finish (internal error).",
25
+ -8: "Run terminated (internal error)."
26
+ }
27
+
28
+
29
+ def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
30
+ ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
31
+ hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
32
+ mxords=5, printmessg=0, tfirst=False):
33
+ """
34
+ Integrate a system of ordinary differential equations.
35
+
36
+ .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
37
+ differential equation.
38
+
39
+ Solve a system of ordinary differential equations using lsoda from the
40
+ FORTRAN library odepack.
41
+
42
+ Solves the initial value problem for stiff or non-stiff systems
43
+ of first order ode-s::
44
+
45
+ dy/dt = func(y, t, ...) [or func(t, y, ...)]
46
+
47
+ where y can be a vector.
48
+
49
+ .. note:: By default, the required order of the first two arguments of
50
+ `func` are in the opposite order of the arguments in the system
51
+ definition function used by the `scipy.integrate.ode` class and
52
+ the function `scipy.integrate.solve_ivp`. To use a function with
53
+ the signature ``func(t, y, ...)``, the argument `tfirst` must be
54
+ set to ``True``.
55
+
56
+ Parameters
57
+ ----------
58
+ func : callable(y, t, ...) or callable(t, y, ...)
59
+ Computes the derivative of y at t.
60
+ If the signature is ``callable(t, y, ...)``, then the argument
61
+ `tfirst` must be set ``True``.
62
+ y0 : array
63
+ Initial condition on y (can be a vector).
64
+ t : array
65
+ A sequence of time points for which to solve for y. The initial
66
+ value point should be the first element of this sequence.
67
+ This sequence must be monotonically increasing or monotonically
68
+ decreasing; repeated values are allowed.
69
+ args : tuple, optional
70
+ Extra arguments to pass to function.
71
+ Dfun : callable(y, t, ...) or callable(t, y, ...)
72
+ Gradient (Jacobian) of `func`.
73
+ If the signature is ``callable(t, y, ...)``, then the argument
74
+ `tfirst` must be set ``True``.
75
+ col_deriv : bool, optional
76
+ True if `Dfun` defines derivatives down columns (faster),
77
+ otherwise `Dfun` should define derivatives across rows.
78
+ full_output : bool, optional
79
+ True if to return a dictionary of optional outputs as the second output
80
+ printmessg : bool, optional
81
+ Whether to print the convergence message
82
+ tfirst : bool, optional
83
+ If True, the first two arguments of `func` (and `Dfun`, if given)
84
+ must ``t, y`` instead of the default ``y, t``.
85
+
86
+ .. versionadded:: 1.1.0
87
+
88
+ Returns
89
+ -------
90
+ y : array, shape (len(t), len(y0))
91
+ Array containing the value of y for each desired time in t,
92
+ with the initial value `y0` in the first row.
93
+ infodict : dict, only returned if full_output == True
94
+ Dictionary containing additional output information
95
+
96
+ ======= ============================================================
97
+ key meaning
98
+ ======= ============================================================
99
+ 'hu' vector of step sizes successfully used for each time step
100
+ 'tcur' vector with the value of t reached for each time step
101
+ (will always be at least as large as the input times)
102
+ 'tolsf' vector of tolerance scale factors, greater than 1.0,
103
+ computed when a request for too much accuracy was detected
104
+ 'tsw' value of t at the time of the last method switch
105
+ (given for each time step)
106
+ 'nst' cumulative number of time steps
107
+ 'nfe' cumulative number of function evaluations for each time step
108
+ 'nje' cumulative number of jacobian evaluations for each time step
109
+ 'nqu' a vector of method orders for each successful step
110
+ 'imxer' index of the component of largest magnitude in the
111
+ weighted local error vector (e / ewt) on an error return, -1
112
+ otherwise
113
+ 'lenrw' the length of the double work array required
114
+ 'leniw' the length of integer work array required
115
+ 'mused' a vector of method indicators for each successful time step:
116
+ 1: adams (nonstiff), 2: bdf (stiff)
117
+ ======= ============================================================
118
+
119
+ Other Parameters
120
+ ----------------
121
+ ml, mu : int, optional
122
+ If either of these are not None or non-negative, then the
123
+ Jacobian is assumed to be banded. These give the number of
124
+ lower and upper non-zero diagonals in this banded matrix.
125
+ For the banded case, `Dfun` should return a matrix whose
126
+ rows contain the non-zero bands (starting with the lowest diagonal).
127
+ Thus, the return matrix `jac` from `Dfun` should have shape
128
+ ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
129
+ The data in `jac` must be stored such that ``jac[i - j + mu, j]``
130
+ holds the derivative of the ``i``\\ th equation with respect to the
131
+ ``j``\\ th state variable. If `col_deriv` is True, the transpose of
132
+ this `jac` must be returned.
133
+ rtol, atol : float, optional
134
+ The input parameters `rtol` and `atol` determine the error
135
+ control performed by the solver. The solver will control the
136
+ vector, e, of estimated local errors in y, according to an
137
+ inequality of the form ``max-norm of (e / ewt) <= 1``,
138
+ where ewt is a vector of positive error weights computed as
139
+ ``ewt = rtol * abs(y) + atol``.
140
+ rtol and atol can be either vectors the same length as y or scalars.
141
+ Defaults to 1.49012e-8.
142
+ tcrit : ndarray, optional
143
+ Vector of critical points (e.g., singularities) where integration
144
+ care should be taken.
145
+ h0 : float, (0: solver-determined), optional
146
+ The step size to be attempted on the first step.
147
+ hmax : float, (0: solver-determined), optional
148
+ The maximum absolute step size allowed.
149
+ hmin : float, (0: solver-determined), optional
150
+ The minimum absolute step size allowed.
151
+ ixpr : bool, optional
152
+ Whether to generate extra printing at method switches.
153
+ mxstep : int, (0: solver-determined), optional
154
+ Maximum number of (internally defined) steps allowed for each
155
+ integration point in t.
156
+ mxhnil : int, (0: solver-determined), optional
157
+ Maximum number of messages printed.
158
+ mxordn : int, (0: solver-determined), optional
159
+ Maximum order to be allowed for the non-stiff (Adams) method.
160
+ mxords : int, (0: solver-determined), optional
161
+ Maximum order to be allowed for the stiff (BDF) method.
162
+
163
+ See Also
164
+ --------
165
+ solve_ivp : solve an initial value problem for a system of ODEs
166
+ ode : a more object-oriented integrator based on VODE
167
+ quad : for finding the area under a curve
168
+
169
+ Examples
170
+ --------
171
+ The second order differential equation for the angle `theta` of a
172
+ pendulum acted on by gravity with friction can be written::
173
+
174
+ theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
175
+
176
+ where `b` and `c` are positive constants, and a prime (') denotes a
177
+ derivative. To solve this equation with `odeint`, we must first convert
178
+ it to a system of first order equations. By defining the angular
179
+ velocity ``omega(t) = theta'(t)``, we obtain the system::
180
+
181
+ theta'(t) = omega(t)
182
+ omega'(t) = -b*omega(t) - c*sin(theta(t))
183
+
184
+ Let `y` be the vector [`theta`, `omega`]. We implement this system
185
+ in Python as:
186
+
187
+ >>> import numpy as np
188
+ >>> def pend(y, t, b, c):
189
+ ... theta, omega = y
190
+ ... dydt = [omega, -b*omega - c*np.sin(theta)]
191
+ ... return dydt
192
+ ...
193
+
194
+ We assume the constants are `b` = 0.25 and `c` = 5.0:
195
+
196
+ >>> b = 0.25
197
+ >>> c = 5.0
198
+
199
+ For initial conditions, we assume the pendulum is nearly vertical
200
+ with `theta(0)` = `pi` - 0.1, and is initially at rest, so
201
+ `omega(0)` = 0. Then the vector of initial conditions is
202
+
203
+ >>> y0 = [np.pi - 0.1, 0.0]
204
+
205
+ We will generate a solution at 101 evenly spaced samples in the interval
206
+ 0 <= `t` <= 10. So our array of times is:
207
+
208
+ >>> t = np.linspace(0, 10, 101)
209
+
210
+ Call `odeint` to generate the solution. To pass the parameters
211
+ `b` and `c` to `pend`, we give them to `odeint` using the `args`
212
+ argument.
213
+
214
+ >>> from scipy.integrate import odeint
215
+ >>> sol = odeint(pend, y0, t, args=(b, c))
216
+
217
+ The solution is an array with shape (101, 2). The first column
218
+ is `theta(t)`, and the second is `omega(t)`. The following code
219
+ plots both components.
220
+
221
+ >>> import matplotlib.pyplot as plt
222
+ >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
223
+ >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
224
+ >>> plt.legend(loc='best')
225
+ >>> plt.xlabel('t')
226
+ >>> plt.grid()
227
+ >>> plt.show()
228
+ """
229
+
230
+ if ml is None:
231
+ ml = -1 # changed to zero inside function call
232
+ if mu is None:
233
+ mu = -1 # changed to zero inside function call
234
+
235
+ dt = np.diff(t)
236
+ if not ((dt >= 0).all() or (dt <= 0).all()):
237
+ raise ValueError("The values in t must be monotonically increasing "
238
+ "or monotonically decreasing; repeated values are "
239
+ "allowed.")
240
+
241
+ t = copy(t)
242
+ y0 = copy(y0)
243
+ output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
244
+ full_output, rtol, atol, tcrit, h0, hmax, hmin,
245
+ ixpr, mxstep, mxhnil, mxordn, mxords,
246
+ int(bool(tfirst)))
247
+ if output[-1] < 0:
248
+ warning_msg = (f"{_msgs[output[-1]]} Run with full_output = 1 to "
249
+ f"get quantitative information.")
250
+ warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
251
+ elif printmessg:
252
+ warning_msg = _msgs[output[-1]]
253
+ warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
254
+
255
+ if full_output:
256
+ output[1]['message'] = _msgs[output[-1]]
257
+
258
+ output = output[:-1]
259
+ if len(output) == 1:
260
+ return output[0]
261
+ else:
262
+ return output
venv/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import copy
3
+ import heapq
4
+ import collections
5
+ import functools
6
+
7
+ import numpy as np
8
+
9
+ from scipy._lib._util import MapWrapper, _FunctionWrapper
10
+
11
+
12
+ class LRUDict(collections.OrderedDict):
13
+ def __init__(self, max_size):
14
+ self.__max_size = max_size
15
+
16
+ def __setitem__(self, key, value):
17
+ existing_key = (key in self)
18
+ super().__setitem__(key, value)
19
+ if existing_key:
20
+ self.move_to_end(key)
21
+ elif len(self) > self.__max_size:
22
+ self.popitem(last=False)
23
+
24
+ def update(self, other):
25
+ # Not needed below
26
+ raise NotImplementedError()
27
+
28
+
29
+ class SemiInfiniteFunc:
30
+ """
31
+ Argument transform from (start, +-oo) to (0, 1)
32
+ """
33
+ def __init__(self, func, start, infty):
34
+ self._func = func
35
+ self._start = start
36
+ self._sgn = -1 if infty < 0 else 1
37
+
38
+ # Overflow threshold for the 1/t**2 factor
39
+ self._tmin = sys.float_info.min**0.5
40
+
41
+ def get_t(self, x):
42
+ z = self._sgn * (x - self._start) + 1
43
+ if z == 0:
44
+ # Can happen only if point not in range
45
+ return np.inf
46
+ return 1 / z
47
+
48
+ def __call__(self, t):
49
+ if t < self._tmin:
50
+ return 0.0
51
+ else:
52
+ x = self._start + self._sgn * (1 - t) / t
53
+ f = self._func(x)
54
+ return self._sgn * (f / t) / t
55
+
56
+
57
+ class DoubleInfiniteFunc:
58
+ """
59
+ Argument transform from (-oo, oo) to (-1, 1)
60
+ """
61
+ def __init__(self, func):
62
+ self._func = func
63
+
64
+ # Overflow threshold for the 1/t**2 factor
65
+ self._tmin = sys.float_info.min**0.5
66
+
67
+ def get_t(self, x):
68
+ s = -1 if x < 0 else 1
69
+ return s / (abs(x) + 1)
70
+
71
+ def __call__(self, t):
72
+ if abs(t) < self._tmin:
73
+ return 0.0
74
+ else:
75
+ x = (1 - abs(t)) / t
76
+ f = self._func(x)
77
+ return (f / t) / t
78
+
79
+
80
+ def _max_norm(x):
81
+ return np.amax(abs(x))
82
+
83
+
84
+ def _get_sizeof(obj):
85
+ try:
86
+ return sys.getsizeof(obj)
87
+ except TypeError:
88
+ # occurs on pypy
89
+ if hasattr(obj, '__sizeof__'):
90
+ return int(obj.__sizeof__())
91
+ return 64
92
+
93
+
94
+ class _Bunch:
95
+ def __init__(self, **kwargs):
96
+ self.__keys = kwargs.keys()
97
+ self.__dict__.update(**kwargs)
98
+
99
+ def __repr__(self):
100
+ return "_Bunch({})".format(", ".join(f"{k}={repr(self.__dict__[k])}"
101
+ for k in self.__keys))
102
+
103
+
104
+ def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6,
105
+ limit=10000, workers=1, points=None, quadrature=None, full_output=False,
106
+ *, args=()):
107
+ r"""Adaptive integration of a vector-valued function.
108
+
109
+ Parameters
110
+ ----------
111
+ f : callable
112
+ Vector-valued function f(x) to integrate.
113
+ a : float
114
+ Initial point.
115
+ b : float
116
+ Final point.
117
+ epsabs : float, optional
118
+ Absolute tolerance.
119
+ epsrel : float, optional
120
+ Relative tolerance.
121
+ norm : {'max', '2'}, optional
122
+ Vector norm to use for error estimation.
123
+ cache_size : int, optional
124
+ Number of bytes to use for memoization.
125
+ limit : float or int, optional
126
+ An upper bound on the number of subintervals used in the adaptive
127
+ algorithm.
128
+ workers : int or map-like callable, optional
129
+ If `workers` is an integer, part of the computation is done in
130
+ parallel subdivided to this many tasks (using
131
+ :class:`python:multiprocessing.pool.Pool`).
132
+ Supply `-1` to use all cores available to the Process.
133
+ Alternatively, supply a map-like callable, such as
134
+ :meth:`python:multiprocessing.pool.Pool.map` for evaluating the
135
+ population in parallel.
136
+ This evaluation is carried out as ``workers(func, iterable)``.
137
+ points : list, optional
138
+ List of additional breakpoints.
139
+ quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
140
+ Quadrature rule to use on subintervals.
141
+ Options: 'gk21' (Gauss-Kronrod 21-point rule),
142
+ 'gk15' (Gauss-Kronrod 15-point rule),
143
+ 'trapezoid' (composite trapezoid rule).
144
+ Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
145
+ full_output : bool, optional
146
+ Return an additional ``info`` dictionary.
147
+ args : tuple, optional
148
+ Extra arguments to pass to function, if any.
149
+
150
+ .. versionadded:: 1.8.0
151
+
152
+ Returns
153
+ -------
154
+ res : {float, array-like}
155
+ Estimate for the result
156
+ err : float
157
+ Error estimate for the result in the given norm
158
+ info : dict
159
+ Returned only when ``full_output=True``.
160
+ Info dictionary. Is an object with the attributes:
161
+
162
+ success : bool
163
+ Whether integration reached target precision.
164
+ status : int
165
+ Indicator for convergence, success (0),
166
+ failure (1), and failure due to rounding error (2).
167
+ neval : int
168
+ Number of function evaluations.
169
+ intervals : ndarray, shape (num_intervals, 2)
170
+ Start and end points of subdivision intervals.
171
+ integrals : ndarray, shape (num_intervals, ...)
172
+ Integral for each interval.
173
+ Note that at most ``cache_size`` values are recorded,
174
+ and the array may contains *nan* for missing items.
175
+ errors : ndarray, shape (num_intervals,)
176
+ Estimated integration error for each interval.
177
+
178
+ Notes
179
+ -----
180
+ The algorithm mainly follows the implementation of QUADPACK's
181
+ DQAG* algorithms, implementing global error control and adaptive
182
+ subdivision.
183
+
184
+ The algorithm here has some differences to the QUADPACK approach:
185
+
186
+ Instead of subdividing one interval at a time, the algorithm
187
+ subdivides N intervals with largest errors at once. This enables
188
+ (partial) parallelization of the integration.
189
+
190
+ The logic of subdividing "next largest" intervals first is then
191
+ not implemented, and we rely on the above extension to avoid
192
+ concentrating on "small" intervals only.
193
+
194
+ The Wynn epsilon table extrapolation is not used (QUADPACK uses it
195
+ for infinite intervals). This is because the algorithm here is
196
+ supposed to work on vector-valued functions, in an user-specified
197
+ norm, and the extension of the epsilon algorithm to this case does
198
+ not appear to be widely agreed. For max-norm, using elementwise
199
+ Wynn epsilon could be possible, but we do not do this here with
200
+ the hope that the epsilon extrapolation is mainly useful in
201
+ special cases.
202
+
203
+ References
204
+ ----------
205
+ [1] R. Piessens, E. de Doncker, QUADPACK (1983).
206
+
207
+ Examples
208
+ --------
209
+ We can compute integrations of a vector-valued function:
210
+
211
+ >>> from scipy.integrate import quad_vec
212
+ >>> import numpy as np
213
+ >>> import matplotlib.pyplot as plt
214
+ >>> alpha = np.linspace(0.0, 2.0, num=30)
215
+ >>> f = lambda x: x**alpha
216
+ >>> x0, x1 = 0, 2
217
+ >>> y, err = quad_vec(f, x0, x1)
218
+ >>> plt.plot(alpha, y)
219
+ >>> plt.xlabel(r"$\alpha$")
220
+ >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
221
+ >>> plt.show()
222
+
223
+ """
224
+ a = float(a)
225
+ b = float(b)
226
+
227
+ if args:
228
+ if not isinstance(args, tuple):
229
+ args = (args,)
230
+
231
+ # create a wrapped function to allow the use of map and Pool.map
232
+ f = _FunctionWrapper(f, args)
233
+
234
+ # Use simple transformations to deal with integrals over infinite
235
+ # intervals.
236
+ kwargs = dict(epsabs=epsabs,
237
+ epsrel=epsrel,
238
+ norm=norm,
239
+ cache_size=cache_size,
240
+ limit=limit,
241
+ workers=workers,
242
+ points=points,
243
+ quadrature='gk15' if quadrature is None else quadrature,
244
+ full_output=full_output)
245
+ if np.isfinite(a) and np.isinf(b):
246
+ f2 = SemiInfiniteFunc(f, start=a, infty=b)
247
+ if points is not None:
248
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
249
+ return quad_vec(f2, 0, 1, **kwargs)
250
+ elif np.isfinite(b) and np.isinf(a):
251
+ f2 = SemiInfiniteFunc(f, start=b, infty=a)
252
+ if points is not None:
253
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
254
+ res = quad_vec(f2, 0, 1, **kwargs)
255
+ return (-res[0],) + res[1:]
256
+ elif np.isinf(a) and np.isinf(b):
257
+ sgn = -1 if b < a else 1
258
+
259
+ # NB. explicitly split integral at t=0, which separates
260
+ # the positive and negative sides
261
+ f2 = DoubleInfiniteFunc(f)
262
+ if points is not None:
263
+ kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
264
+ else:
265
+ kwargs['points'] = (0,)
266
+
267
+ if a != b:
268
+ res = quad_vec(f2, -1, 1, **kwargs)
269
+ else:
270
+ res = quad_vec(f2, 1, 1, **kwargs)
271
+
272
+ return (res[0]*sgn,) + res[1:]
273
+ elif not (np.isfinite(a) and np.isfinite(b)):
274
+ raise ValueError(f"invalid integration bounds a={a}, b={b}")
275
+
276
+ norm_funcs = {
277
+ None: _max_norm,
278
+ 'max': _max_norm,
279
+ '2': np.linalg.norm
280
+ }
281
+ if callable(norm):
282
+ norm_func = norm
283
+ else:
284
+ norm_func = norm_funcs[norm]
285
+
286
+ parallel_count = 128
287
+ min_intervals = 2
288
+
289
+ try:
290
+ _quadrature = {None: _quadrature_gk21,
291
+ 'gk21': _quadrature_gk21,
292
+ 'gk15': _quadrature_gk15,
293
+ 'trapz': _quadrature_trapezoid, # alias for backcompat
294
+ 'trapezoid': _quadrature_trapezoid}[quadrature]
295
+ except KeyError as e:
296
+ raise ValueError(f"unknown quadrature {quadrature!r}") from e
297
+
298
+ # Initial interval set
299
+ if points is None:
300
+ initial_intervals = [(a, b)]
301
+ else:
302
+ prev = a
303
+ initial_intervals = []
304
+ for p in sorted(points):
305
+ p = float(p)
306
+ if not (a < p < b) or p == prev:
307
+ continue
308
+ initial_intervals.append((prev, p))
309
+ prev = p
310
+ initial_intervals.append((prev, b))
311
+
312
+ global_integral = None
313
+ global_error = None
314
+ rounding_error = None
315
+ interval_cache = None
316
+ intervals = []
317
+ neval = 0
318
+
319
+ for x1, x2 in initial_intervals:
320
+ ig, err, rnd = _quadrature(x1, x2, f, norm_func)
321
+ neval += _quadrature.num_eval
322
+
323
+ if global_integral is None:
324
+ if isinstance(ig, (float, complex)):
325
+ # Specialize for scalars
326
+ if norm_func in (_max_norm, np.linalg.norm):
327
+ norm_func = abs
328
+
329
+ global_integral = ig
330
+ global_error = float(err)
331
+ rounding_error = float(rnd)
332
+
333
+ cache_count = cache_size // _get_sizeof(ig)
334
+ interval_cache = LRUDict(cache_count)
335
+ else:
336
+ global_integral += ig
337
+ global_error += err
338
+ rounding_error += rnd
339
+
340
+ interval_cache[(x1, x2)] = copy.copy(ig)
341
+ intervals.append((-err, x1, x2))
342
+
343
+ heapq.heapify(intervals)
344
+
345
+ CONVERGED = 0
346
+ NOT_CONVERGED = 1
347
+ ROUNDING_ERROR = 2
348
+ NOT_A_NUMBER = 3
349
+
350
+ status_msg = {
351
+ CONVERGED: "Target precision reached.",
352
+ NOT_CONVERGED: "Target precision not reached.",
353
+ ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
354
+ NOT_A_NUMBER: "Non-finite values encountered."
355
+ }
356
+
357
+ # Process intervals
358
+ with MapWrapper(workers) as mapwrapper:
359
+ ier = NOT_CONVERGED
360
+
361
+ while intervals and len(intervals) < limit:
362
+ # Select intervals with largest errors for subdivision
363
+ tol = max(epsabs, epsrel*norm_func(global_integral))
364
+
365
+ to_process = []
366
+ err_sum = 0
367
+
368
+ for j in range(parallel_count):
369
+ if not intervals:
370
+ break
371
+
372
+ if j > 0 and err_sum > global_error - tol/8:
373
+ # avoid unnecessary parallel splitting
374
+ break
375
+
376
+ interval = heapq.heappop(intervals)
377
+
378
+ neg_old_err, a, b = interval
379
+ old_int = interval_cache.pop((a, b), None)
380
+ to_process.append(
381
+ ((-neg_old_err, a, b, old_int), f, norm_func, _quadrature)
382
+ )
383
+ err_sum += -neg_old_err
384
+
385
+ # Subdivide intervals
386
+ for parts in mapwrapper(_subdivide_interval, to_process):
387
+ dint, derr, dround_err, subint, dneval = parts
388
+ neval += dneval
389
+ global_integral += dint
390
+ global_error += derr
391
+ rounding_error += dround_err
392
+ for x in subint:
393
+ x1, x2, ig, err = x
394
+ interval_cache[(x1, x2)] = ig
395
+ heapq.heappush(intervals, (-err, x1, x2))
396
+
397
+ # Termination check
398
+ if len(intervals) >= min_intervals:
399
+ tol = max(epsabs, epsrel*norm_func(global_integral))
400
+ if global_error < tol/8:
401
+ ier = CONVERGED
402
+ break
403
+ if global_error < rounding_error:
404
+ ier = ROUNDING_ERROR
405
+ break
406
+
407
+ if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
408
+ ier = NOT_A_NUMBER
409
+ break
410
+
411
+ res = global_integral
412
+ err = global_error + rounding_error
413
+
414
+ if full_output:
415
+ res_arr = np.asarray(res)
416
+ dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
417
+ integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
418
+ for z in intervals], dtype=res_arr.dtype)
419
+ errors = np.array([-z[0] for z in intervals])
420
+ intervals = np.array([[z[1], z[2]] for z in intervals])
421
+
422
+ info = _Bunch(neval=neval,
423
+ success=(ier == CONVERGED),
424
+ status=ier,
425
+ message=status_msg[ier],
426
+ intervals=intervals,
427
+ integrals=integrals,
428
+ errors=errors)
429
+ return (res, err, info)
430
+ else:
431
+ return (res, err)
432
+
433
+
434
+ def _subdivide_interval(args):
435
+ interval, f, norm_func, _quadrature = args
436
+ old_err, a, b, old_int = interval
437
+
438
+ c = 0.5 * (a + b)
439
+
440
+ # Left-hand side
441
+ if getattr(_quadrature, 'cache_size', 0) > 0:
442
+ f = functools.lru_cache(_quadrature.cache_size)(f)
443
+
444
+ s1, err1, round1 = _quadrature(a, c, f, norm_func)
445
+ dneval = _quadrature.num_eval
446
+ s2, err2, round2 = _quadrature(c, b, f, norm_func)
447
+ dneval += _quadrature.num_eval
448
+ if old_int is None:
449
+ old_int, _, _ = _quadrature(a, b, f, norm_func)
450
+ dneval += _quadrature.num_eval
451
+
452
+ if getattr(_quadrature, 'cache_size', 0) > 0:
453
+ dneval = f.cache_info().misses
454
+
455
+ dint = s1 + s2 - old_int
456
+ derr = err1 + err2 - old_err
457
+ dround_err = round1 + round2
458
+
459
+ subintervals = ((a, c, s1, err1), (c, b, s2, err2))
460
+ return dint, derr, dround_err, subintervals, dneval
461
+
462
+
463
+ def _quadrature_trapezoid(x1, x2, f, norm_func):
464
+ """
465
+ Composite trapezoid quadrature
466
+ """
467
+ x3 = 0.5*(x1 + x2)
468
+ f1 = f(x1)
469
+ f2 = f(x2)
470
+ f3 = f(x3)
471
+
472
+ s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
473
+
474
+ round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
475
+ + 2*float(norm_func(f3))
476
+ + float(norm_func(f2))) * 2e-16
477
+
478
+ s1 = 0.5 * (x2 - x1) * (f1 + f2)
479
+ err = 1/3 * float(norm_func(s1 - s2))
480
+ return s2, err, round_err
481
+
482
+
483
+ _quadrature_trapezoid.cache_size = 3 * 3
484
+ _quadrature_trapezoid.num_eval = 3
485
+
486
+
487
+ def _quadrature_gk(a, b, f, norm_func, x, w, v):
488
+ """
489
+ Generic Gauss-Kronrod quadrature
490
+ """
491
+
492
+ fv = [0.0]*len(x)
493
+
494
+ c = 0.5 * (a + b)
495
+ h = 0.5 * (b - a)
496
+
497
+ # Gauss-Kronrod
498
+ s_k = 0.0
499
+ s_k_abs = 0.0
500
+ for i in range(len(x)):
501
+ ff = f(c + h*x[i])
502
+ fv[i] = ff
503
+
504
+ vv = v[i]
505
+
506
+ # \int f(x)
507
+ s_k += vv * ff
508
+ # \int |f(x)|
509
+ s_k_abs += vv * abs(ff)
510
+
511
+ # Gauss
512
+ s_g = 0.0
513
+ for i in range(len(w)):
514
+ s_g += w[i] * fv[2*i + 1]
515
+
516
+ # Quadrature of abs-deviation from average
517
+ s_k_dabs = 0.0
518
+ y0 = s_k / 2.0
519
+ for i in range(len(x)):
520
+ # \int |f(x) - y0|
521
+ s_k_dabs += v[i] * abs(fv[i] - y0)
522
+
523
+ # Use similar error estimation as quadpack
524
+ err = float(norm_func((s_k - s_g) * h))
525
+ dabs = float(norm_func(s_k_dabs * h))
526
+ if dabs != 0 and err != 0:
527
+ err = dabs * min(1.0, (200 * err / dabs)**1.5)
528
+
529
+ eps = sys.float_info.epsilon
530
+ round_err = float(norm_func(50 * eps * h * s_k_abs))
531
+
532
+ if round_err > sys.float_info.min:
533
+ err = max(err, round_err)
534
+
535
+ return h * s_k, err, round_err
536
+
537
+
538
+ def _quadrature_gk21(a, b, f, norm_func):
539
+ """
540
+ Gauss-Kronrod 21 quadrature with error estimate
541
+ """
542
+ # Gauss-Kronrod points
543
+ x = (0.995657163025808080735527280689003,
544
+ 0.973906528517171720077964012084452,
545
+ 0.930157491355708226001207180059508,
546
+ 0.865063366688984510732096688423493,
547
+ 0.780817726586416897063717578345042,
548
+ 0.679409568299024406234327365114874,
549
+ 0.562757134668604683339000099272694,
550
+ 0.433395394129247190799265943165784,
551
+ 0.294392862701460198131126603103866,
552
+ 0.148874338981631210884826001129720,
553
+ 0,
554
+ -0.148874338981631210884826001129720,
555
+ -0.294392862701460198131126603103866,
556
+ -0.433395394129247190799265943165784,
557
+ -0.562757134668604683339000099272694,
558
+ -0.679409568299024406234327365114874,
559
+ -0.780817726586416897063717578345042,
560
+ -0.865063366688984510732096688423493,
561
+ -0.930157491355708226001207180059508,
562
+ -0.973906528517171720077964012084452,
563
+ -0.995657163025808080735527280689003)
564
+
565
+ # 10-point weights
566
+ w = (0.066671344308688137593568809893332,
567
+ 0.149451349150580593145776339657697,
568
+ 0.219086362515982043995534934228163,
569
+ 0.269266719309996355091226921569469,
570
+ 0.295524224714752870173892994651338,
571
+ 0.295524224714752870173892994651338,
572
+ 0.269266719309996355091226921569469,
573
+ 0.219086362515982043995534934228163,
574
+ 0.149451349150580593145776339657697,
575
+ 0.066671344308688137593568809893332)
576
+
577
+ # 21-point weights
578
+ v = (0.011694638867371874278064396062192,
579
+ 0.032558162307964727478818972459390,
580
+ 0.054755896574351996031381300244580,
581
+ 0.075039674810919952767043140916190,
582
+ 0.093125454583697605535065465083366,
583
+ 0.109387158802297641899210590325805,
584
+ 0.123491976262065851077958109831074,
585
+ 0.134709217311473325928054001771707,
586
+ 0.142775938577060080797094273138717,
587
+ 0.147739104901338491374841515972068,
588
+ 0.149445554002916905664936468389821,
589
+ 0.147739104901338491374841515972068,
590
+ 0.142775938577060080797094273138717,
591
+ 0.134709217311473325928054001771707,
592
+ 0.123491976262065851077958109831074,
593
+ 0.109387158802297641899210590325805,
594
+ 0.093125454583697605535065465083366,
595
+ 0.075039674810919952767043140916190,
596
+ 0.054755896574351996031381300244580,
597
+ 0.032558162307964727478818972459390,
598
+ 0.011694638867371874278064396062192)
599
+
600
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
601
+
602
+
603
+ _quadrature_gk21.num_eval = 21
604
+
605
+
606
+ def _quadrature_gk15(a, b, f, norm_func):
607
+ """
608
+ Gauss-Kronrod 15 quadrature with error estimate
609
+ """
610
+ # Gauss-Kronrod points
611
+ x = (0.991455371120812639206854697526329,
612
+ 0.949107912342758524526189684047851,
613
+ 0.864864423359769072789712788640926,
614
+ 0.741531185599394439863864773280788,
615
+ 0.586087235467691130294144838258730,
616
+ 0.405845151377397166906606412076961,
617
+ 0.207784955007898467600689403773245,
618
+ 0.000000000000000000000000000000000,
619
+ -0.207784955007898467600689403773245,
620
+ -0.405845151377397166906606412076961,
621
+ -0.586087235467691130294144838258730,
622
+ -0.741531185599394439863864773280788,
623
+ -0.864864423359769072789712788640926,
624
+ -0.949107912342758524526189684047851,
625
+ -0.991455371120812639206854697526329)
626
+
627
+ # 7-point weights
628
+ w = (0.129484966168869693270611432679082,
629
+ 0.279705391489276667901467771423780,
630
+ 0.381830050505118944950369775488975,
631
+ 0.417959183673469387755102040816327,
632
+ 0.381830050505118944950369775488975,
633
+ 0.279705391489276667901467771423780,
634
+ 0.129484966168869693270611432679082)
635
+
636
+ # 15-point weights
637
+ v = (0.022935322010529224963732008058970,
638
+ 0.063092092629978553290700663189204,
639
+ 0.104790010322250183839876322541518,
640
+ 0.140653259715525918745189590510238,
641
+ 0.169004726639267902826583426598550,
642
+ 0.190350578064785409913256402421014,
643
+ 0.204432940075298892414161999234649,
644
+ 0.209482141084727828012999174891714,
645
+ 0.204432940075298892414161999234649,
646
+ 0.190350578064785409913256402421014,
647
+ 0.169004726639267902826583426598550,
648
+ 0.140653259715525918745189590510238,
649
+ 0.104790010322250183839876322541518,
650
+ 0.063092092629978553290700663189204,
651
+ 0.022935322010529224963732008058970)
652
+
653
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
654
+
655
+
656
+ _quadrature_gk15.num_eval = 15
venv/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (116 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py ADDED
@@ -0,0 +1,1291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant 2001
2
+ # Author: Nathan Woods 2013 (nquad &c)
3
+ import sys
4
+ import warnings
5
+ from functools import partial
6
+
7
+ from . import _quadpack
8
+ import numpy as np
9
+
10
+ __all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"]
11
+
12
+
13
+ error = _quadpack.error
14
+
15
+ class IntegrationWarning(UserWarning):
16
+ """
17
+ Warning on issues during integration.
18
+ """
19
+ pass
20
+
21
+
22
+ def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
23
+ limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
24
+ limlst=50, complex_func=False):
25
+ """
26
+ Compute a definite integral.
27
+
28
+ Integrate func from `a` to `b` (possibly infinite interval) using a
29
+ technique from the Fortran library QUADPACK.
30
+
31
+ Parameters
32
+ ----------
33
+ func : {function, scipy.LowLevelCallable}
34
+ A Python function or method to integrate. If `func` takes many
35
+ arguments, it is integrated along the axis corresponding to the
36
+ first argument.
37
+
38
+ If the user desires improved integration performance, then `f` may
39
+ be a `scipy.LowLevelCallable` with one of the signatures::
40
+
41
+ double func(double x)
42
+ double func(double x, void *user_data)
43
+ double func(int n, double *xx)
44
+ double func(int n, double *xx, void *user_data)
45
+
46
+ The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
47
+ In the call forms with ``xx``, ``n`` is the length of the ``xx``
48
+ array which contains ``xx[0] == x`` and the rest of the items are
49
+ numbers contained in the ``args`` argument of quad.
50
+
51
+ In addition, certain ctypes call signatures are supported for
52
+ backward compatibility, but those should not be used in new code.
53
+ a : float
54
+ Lower limit of integration (use -numpy.inf for -infinity).
55
+ b : float
56
+ Upper limit of integration (use numpy.inf for +infinity).
57
+ args : tuple, optional
58
+ Extra arguments to pass to `func`.
59
+ full_output : int, optional
60
+ Non-zero to return a dictionary of integration information.
61
+ If non-zero, warning messages are also suppressed and the
62
+ message is appended to the output tuple.
63
+ complex_func : bool, optional
64
+ Indicate if the function's (`func`) return type is real
65
+ (``complex_func=False``: default) or complex (``complex_func=True``).
66
+ In both cases, the function's argument is real.
67
+ If full_output is also non-zero, the `infodict`, `message`, and
68
+ `explain` for the real and complex components are returned in
69
+ a dictionary with keys "real output" and "imag output".
70
+
71
+ Returns
72
+ -------
73
+ y : float
74
+ The integral of func from `a` to `b`.
75
+ abserr : float
76
+ An estimate of the absolute error in the result.
77
+ infodict : dict
78
+ A dictionary containing additional information.
79
+ message
80
+ A convergence message.
81
+ explain
82
+ Appended only with 'cos' or 'sin' weighting and infinite
83
+ integration limits, it contains an explanation of the codes in
84
+ infodict['ierlst']
85
+
86
+ Other Parameters
87
+ ----------------
88
+ epsabs : float or int, optional
89
+ Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
90
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
91
+ where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
92
+ numerical approximation. See `epsrel` below.
93
+ epsrel : float or int, optional
94
+ Relative error tolerance. Default is 1.49e-8.
95
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
96
+ and ``50 * (machine epsilon)``. See `epsabs` above.
97
+ limit : float or int, optional
98
+ An upper bound on the number of subintervals used in the adaptive
99
+ algorithm.
100
+ points : (sequence of floats,ints), optional
101
+ A sequence of break points in the bounded integration interval
102
+ where local difficulties of the integrand may occur (e.g.,
103
+ singularities, discontinuities). The sequence does not have
104
+ to be sorted. Note that this option cannot be used in conjunction
105
+ with ``weight``.
106
+ weight : float or int, optional
107
+ String indicating weighting function. Full explanation for this
108
+ and the remaining arguments can be found below.
109
+ wvar : optional
110
+ Variables for use with weighting functions.
111
+ wopts : optional
112
+ Optional input for reusing Chebyshev moments.
113
+ maxp1 : float or int, optional
114
+ An upper bound on the number of Chebyshev moments.
115
+ limlst : int, optional
116
+ Upper bound on the number of cycles (>=3) for use with a sinusoidal
117
+ weighting and an infinite end-point.
118
+
119
+ See Also
120
+ --------
121
+ dblquad : double integral
122
+ tplquad : triple integral
123
+ nquad : n-dimensional integrals (uses `quad` recursively)
124
+ fixed_quad : fixed-order Gaussian quadrature
125
+ quadrature : adaptive Gaussian quadrature
126
+ odeint : ODE integrator
127
+ ode : ODE integrator
128
+ simpson : integrator for sampled data
129
+ romb : integrator for sampled data
130
+ scipy.special : for coefficients and roots of orthogonal polynomials
131
+
132
+ Notes
133
+ -----
134
+ For valid results, the integral must converge; behavior for divergent
135
+ integrals is not guaranteed.
136
+
137
+ **Extra information for quad() inputs and outputs**
138
+
139
+ If full_output is non-zero, then the third output argument
140
+ (infodict) is a dictionary with entries as tabulated below. For
141
+ infinite limits, the range is transformed to (0,1) and the
142
+ optional outputs are given with respect to this transformed range.
143
+ Let M be the input argument limit and let K be infodict['last'].
144
+ The entries are:
145
+
146
+ 'neval'
147
+ The number of function evaluations.
148
+ 'last'
149
+ The number, K, of subintervals produced in the subdivision process.
150
+ 'alist'
151
+ A rank-1 array of length M, the first K elements of which are the
152
+ left end points of the subintervals in the partition of the
153
+ integration range.
154
+ 'blist'
155
+ A rank-1 array of length M, the first K elements of which are the
156
+ right end points of the subintervals.
157
+ 'rlist'
158
+ A rank-1 array of length M, the first K elements of which are the
159
+ integral approximations on the subintervals.
160
+ 'elist'
161
+ A rank-1 array of length M, the first K elements of which are the
162
+ moduli of the absolute error estimates on the subintervals.
163
+ 'iord'
164
+ A rank-1 integer array of length M, the first L elements of
165
+ which are pointers to the error estimates over the subintervals
166
+ with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
167
+ sequence ``infodict['iord']`` and let E be the sequence
168
+ ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
169
+ decreasing sequence.
170
+
171
+ If the input argument points is provided (i.e., it is not None),
172
+ the following additional outputs are placed in the output
173
+ dictionary. Assume the points sequence is of length P.
174
+
175
+ 'pts'
176
+ A rank-1 array of length P+2 containing the integration limits
177
+ and the break points of the intervals in ascending order.
178
+ This is an array giving the subintervals over which integration
179
+ will occur.
180
+ 'level'
181
+ A rank-1 integer array of length M (=limit), containing the
182
+ subdivision levels of the subintervals, i.e., if (aa,bb) is a
183
+ subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
184
+ are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
185
+ if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
186
+ 'ndin'
187
+ A rank-1 integer array of length P+2. After the first integration
188
+ over the intervals (pts[1], pts[2]), the error estimates over some
189
+ of the intervals may have been increased artificially in order to
190
+ put their subdivision forward. This array has ones in slots
191
+ corresponding to the subintervals for which this happens.
192
+
193
+ **Weighting the integrand**
194
+
195
+ The input variables, *weight* and *wvar*, are used to weight the
196
+ integrand by a select list of functions. Different integration
197
+ methods are used to compute the integral with these weighting
198
+ functions, and these do not support specifying break points. The
199
+ possible values of weight and the corresponding weighting functions are.
200
+
201
+ ========== =================================== =====================
202
+ ``weight`` Weight function used ``wvar``
203
+ ========== =================================== =====================
204
+ 'cos' cos(w*x) wvar = w
205
+ 'sin' sin(w*x) wvar = w
206
+ 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
207
+ 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
208
+ 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
209
+ 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
210
+ 'cauchy' 1/(x-c) wvar = c
211
+ ========== =================================== =====================
212
+
213
+ wvar holds the parameter w, (alpha, beta), or c depending on the weight
214
+ selected. In these expressions, a and b are the integration limits.
215
+
216
+ For the 'cos' and 'sin' weighting, additional inputs and outputs are
217
+ available.
218
+
219
+ For finite integration limits, the integration is performed using a
220
+ Clenshaw-Curtis method which uses Chebyshev moments. For repeated
221
+ calculations, these moments are saved in the output dictionary:
222
+
223
+ 'momcom'
224
+ The maximum level of Chebyshev moments that have been computed,
225
+ i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
226
+ computed for intervals of length ``|b-a| * 2**(-l)``,
227
+ ``l=0,1,...,M_c``.
228
+ 'nnlog'
229
+ A rank-1 integer array of length M(=limit), containing the
230
+ subdivision levels of the subintervals, i.e., an element of this
231
+ array is equal to l if the corresponding subinterval is
232
+ ``|b-a|* 2**(-l)``.
233
+ 'chebmo'
234
+ A rank-2 array of shape (25, maxp1) containing the computed
235
+ Chebyshev moments. These can be passed on to an integration
236
+ over the same interval by passing this array as the second
237
+ element of the sequence wopts and passing infodict['momcom'] as
238
+ the first element.
239
+
240
+ If one of the integration limits is infinite, then a Fourier integral is
241
+ computed (assuming w neq 0). If full_output is 1 and a numerical error
242
+ is encountered, besides the error message attached to the output tuple,
243
+ a dictionary is also appended to the output tuple which translates the
244
+ error codes in the array ``info['ierlst']`` to English messages. The
245
+ output information dictionary contains the following entries instead of
246
+ 'last', 'alist', 'blist', 'rlist', and 'elist':
247
+
248
+ 'lst'
249
+ The number of subintervals needed for the integration (call it ``K_f``).
250
+ 'rslst'
251
+ A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
252
+ contain the integral contribution over the interval
253
+ ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
254
+ and ``k=1,2,...,K_f``.
255
+ 'erlst'
256
+ A rank-1 array of length ``M_f`` containing the error estimate
257
+ corresponding to the interval in the same position in
258
+ ``infodict['rslist']``.
259
+ 'ierlst'
260
+ A rank-1 integer array of length ``M_f`` containing an error flag
261
+ corresponding to the interval in the same position in
262
+ ``infodict['rslist']``. See the explanation dictionary (last entry
263
+ in the output tuple) for the meaning of the codes.
264
+
265
+
266
+ **Details of QUADPACK level routines**
267
+
268
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
269
+ provides details on the conditions for each routine to be called and a
270
+ short description of each routine. The routine called depends on
271
+ `weight`, `points` and the integration limits `a` and `b`.
272
+
273
+ ================ ============== ========== =====================
274
+ QUADPACK routine `weight` `points` infinite bounds
275
+ ================ ============== ========== =====================
276
+ qagse None No No
277
+ qagie None No Yes
278
+ qagpe None Yes No
279
+ qawoe 'sin', 'cos' No No
280
+ qawfe 'sin', 'cos' No either `a` or `b`
281
+ qawse 'alg*' No No
282
+ qawce 'cauchy' No No
283
+ ================ ============== ========== =====================
284
+
285
+ The following provides a short description from [1]_ for each
286
+ routine.
287
+
288
+ qagse
289
+ is an integrator based on globally adaptive interval
290
+ subdivision in connection with extrapolation, which will
291
+ eliminate the effects of integrand singularities of
292
+ several types.
293
+ qagie
294
+ handles integration over infinite intervals. The infinite range is
295
+ mapped onto a finite interval and subsequently the same strategy as
296
+ in ``QAGS`` is applied.
297
+ qagpe
298
+ serves the same purposes as QAGS, but also allows the
299
+ user to provide explicit information about the location
300
+ and type of trouble-spots i.e. the abscissae of internal
301
+ singularities, discontinuities and other difficulties of
302
+ the integrand function.
303
+ qawoe
304
+ is an integrator for the evaluation of
305
+ :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
306
+ :math:`\\int^b_a \\sin(\\omega x)f(x)dx`
307
+ over a finite interval [a,b], where :math:`\\omega` and :math:`f`
308
+ are specified by the user. The rule evaluation component is based
309
+ on the modified Clenshaw-Curtis technique
310
+
311
+ An adaptive subdivision scheme is used in connection
312
+ with an extrapolation procedure, which is a modification
313
+ of that in ``QAGS`` and allows the algorithm to deal with
314
+ singularities in :math:`f(x)`.
315
+ qawfe
316
+ calculates the Fourier transform
317
+ :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
318
+ :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
319
+ for user-provided :math:`\\omega` and :math:`f`. The procedure of
320
+ ``QAWO`` is applied on successive finite intervals, and convergence
321
+ acceleration by means of the :math:`\\varepsilon`-algorithm is applied
322
+ to the series of integral approximations.
323
+ qawse
324
+ approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
325
+ :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
326
+ :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
327
+ following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
328
+ :math:`\\log(x-a)\\log(b-x)`.
329
+
330
+ The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
331
+ function :math:`v`. A globally adaptive subdivision strategy is
332
+ applied, with modified Clenshaw-Curtis integration on those
333
+ subintervals which contain `a` or `b`.
334
+ qawce
335
+ compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
336
+ interpreted as a Cauchy principal value integral, for user specified
337
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
338
+ Clenshaw-Curtis integration is used on those intervals containing the
339
+ point :math:`x = c`.
340
+
341
+ **Integration of Complex Function of a Real Variable**
342
+
343
+ A complex valued function, :math:`f`, of a real variable can be written as
344
+ :math:`f = g + ih`. Similarly, the integral of :math:`f` can be
345
+ written as
346
+
347
+ .. math::
348
+ \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
349
+
350
+ assuming that the integrals of :math:`g` and :math:`h` exist
351
+ over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
352
+ complex-valued functions by integrating the real and imaginary components
353
+ separately.
354
+
355
+
356
+ References
357
+ ----------
358
+
359
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
360
+ Überhuber, Christoph W.; Kahaner, David (1983).
361
+ QUADPACK: A subroutine package for automatic integration.
362
+ Springer-Verlag.
363
+ ISBN 978-3-540-12553-2.
364
+
365
+ .. [2] McCullough, Thomas; Phillips, Keith (1973).
366
+ Foundations of Analysis in the Complex Plane.
367
+ Holt Rinehart Winston.
368
+ ISBN 0-03-086370-8
369
+
370
+ Examples
371
+ --------
372
+ Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
373
+
374
+ >>> from scipy import integrate
375
+ >>> import numpy as np
376
+ >>> x2 = lambda x: x**2
377
+ >>> integrate.quad(x2, 0, 4)
378
+ (21.333333333333332, 2.3684757858670003e-13)
379
+ >>> print(4**3 / 3.) # analytical result
380
+ 21.3333333333
381
+
382
+ Calculate :math:`\\int^\\infty_0 e^{-x} dx`
383
+
384
+ >>> invexp = lambda x: np.exp(-x)
385
+ >>> integrate.quad(invexp, 0, np.inf)
386
+ (1.0, 5.842605999138044e-11)
387
+
388
+ Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
389
+
390
+ >>> f = lambda x, a: a*x
391
+ >>> y, err = integrate.quad(f, 0, 1, args=(1,))
392
+ >>> y
393
+ 0.5
394
+ >>> y, err = integrate.quad(f, 0, 1, args=(3,))
395
+ >>> y
396
+ 1.5
397
+
398
+ Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
399
+ y parameter as 1::
400
+
401
+ testlib.c =>
402
+ double func(int n, double args[n]){
403
+ return args[0]*args[0] + args[1]*args[1];}
404
+ compile to library testlib.*
405
+
406
+ ::
407
+
408
+ from scipy import integrate
409
+ import ctypes
410
+ lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
411
+ lib.func.restype = ctypes.c_double
412
+ lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
413
+ integrate.quad(lib.func,0,1,(1))
414
+ #(1.3333333333333333, 1.4802973661668752e-14)
415
+ print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
416
+ # 1.3333333333333333
417
+
418
+ Be aware that pulse shapes and other sharp features as compared to the
419
+ size of the integration interval may not be integrated correctly using
420
+ this method. A simplified example of this limitation is integrating a
421
+ y-axis reflected step function with many zero values within the integrals
422
+ bounds.
423
+
424
+ >>> y = lambda x: 1 if x<=0 else 0
425
+ >>> integrate.quad(y, -1, 1)
426
+ (1.0, 1.1102230246251565e-14)
427
+ >>> integrate.quad(y, -1, 100)
428
+ (1.0000000002199108, 1.0189464580163188e-08)
429
+ >>> integrate.quad(y, -1, 10000)
430
+ (0.0, 0.0)
431
+
432
+ """
433
+ if not isinstance(args, tuple):
434
+ args = (args,)
435
+
436
+ # check the limits of integration: \int_a^b, expect a < b
437
+ flip, a, b = b < a, min(a, b), max(a, b)
438
+
439
+ if complex_func:
440
+ def imfunc(x, *args):
441
+ return func(x, *args).imag
442
+
443
+ def refunc(x, *args):
444
+ return func(x, *args).real
445
+
446
+ re_retval = quad(refunc, a, b, args, full_output, epsabs,
447
+ epsrel, limit, points, weight, wvar, wopts,
448
+ maxp1, limlst, complex_func=False)
449
+ im_retval = quad(imfunc, a, b, args, full_output, epsabs,
450
+ epsrel, limit, points, weight, wvar, wopts,
451
+ maxp1, limlst, complex_func=False)
452
+ integral = re_retval[0] + 1j*im_retval[0]
453
+ error_estimate = re_retval[1] + 1j*im_retval[1]
454
+ retval = integral, error_estimate
455
+ if full_output:
456
+ msgexp = {}
457
+ msgexp["real"] = re_retval[2:]
458
+ msgexp["imag"] = im_retval[2:]
459
+ retval = retval + (msgexp,)
460
+
461
+ return retval
462
+
463
+ if weight is None:
464
+ retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
465
+ points)
466
+ else:
467
+ if points is not None:
468
+ msg = ("Break points cannot be specified when using weighted integrand.\n"
469
+ "Continuing, ignoring specified points.")
470
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
471
+ retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
472
+ limlst, limit, maxp1, weight, wvar, wopts)
473
+
474
+ if flip:
475
+ retval = (-retval[0],) + retval[1:]
476
+
477
+ ier = retval[-1]
478
+ if ier == 0:
479
+ return retval[:-1]
480
+
481
+ msgs = {80: "A Python error occurred possibly while calling the function.",
482
+ 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n "
483
+ f"If increasing the limit yields no improvement it is advised to "
484
+ f"analyze \n the integrand in order to determine the difficulties. "
485
+ f"If the position of a \n local difficulty can be determined "
486
+ f"(singularity, discontinuity) one will \n probably gain from "
487
+ f"splitting up the interval and calling the integrator \n on the "
488
+ f"subranges. Perhaps a special-purpose integrator should be used.",
489
+ 2: "The occurrence of roundoff error is detected, which prevents \n "
490
+ "the requested tolerance from being achieved. "
491
+ "The error may be \n underestimated.",
492
+ 3: "Extremely bad integrand behavior occurs at some points of the\n "
493
+ "integration interval.",
494
+ 4: "The algorithm does not converge. Roundoff error is detected\n "
495
+ "in the extrapolation table. It is assumed that the requested "
496
+ "tolerance\n cannot be achieved, and that the returned result "
497
+ "(if full_output = 1) is \n the best which can be obtained.",
498
+ 5: "The integral is probably divergent, or slowly convergent.",
499
+ 6: "The input is invalid.",
500
+ 7: "Abnormal termination of the routine. The estimates for result\n "
501
+ "and error are less reliable. It is assumed that the requested "
502
+ "accuracy\n has not been achieved.",
503
+ 'unknown': "Unknown error."}
504
+
505
+ if weight in ['cos','sin'] and (b == np.inf or a == -np.inf):
506
+ msgs[1] = (
507
+ "The maximum number of cycles allowed has been achieved., e.e.\n of "
508
+ "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n "
509
+ "*pi/abs(omega), for k = 1, 2, ..., lst. "
510
+ "One can allow more cycles by increasing the value of limlst. "
511
+ "Look at info['ierlst'] with full_output=1."
512
+ )
513
+ msgs[4] = (
514
+ "The extrapolation table constructed for convergence acceleration\n of "
515
+ "the series formed by the integral contributions over the cycles, \n does "
516
+ "not converge to within the requested accuracy. "
517
+ "Look at \n info['ierlst'] with full_output=1."
518
+ )
519
+ msgs[7] = (
520
+ "Bad integrand behavior occurs within one or more of the cycles.\n "
521
+ "Location and type of the difficulty involved can be determined from \n "
522
+ "the vector info['ierlist'] obtained with full_output=1."
523
+ )
524
+ explain = {1: "The maximum number of subdivisions (= limit) has been \n "
525
+ "achieved on this cycle.",
526
+ 2: "The occurrence of roundoff error is detected and prevents\n "
527
+ "the tolerance imposed on this cycle from being achieved.",
528
+ 3: "Extremely bad integrand behavior occurs at some points of\n "
529
+ "this cycle.",
530
+ 4: "The integral over this cycle does not converge (to within the "
531
+ "required accuracy) due to roundoff in the extrapolation "
532
+ "procedure invoked on this cycle. It is assumed that the result "
533
+ "on this interval is the best which can be obtained.",
534
+ 5: "The integral over this cycle is probably divergent or "
535
+ "slowly convergent."}
536
+
537
+ try:
538
+ msg = msgs[ier]
539
+ except KeyError:
540
+ msg = msgs['unknown']
541
+
542
+ if ier in [1,2,3,4,5,7]:
543
+ if full_output:
544
+ if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf):
545
+ return retval[:-1] + (msg, explain)
546
+ else:
547
+ return retval[:-1] + (msg,)
548
+ else:
549
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
550
+ return retval[:-1]
551
+
552
+ elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
553
+ if epsabs <= 0: # Small error tolerance - applies to all methods
554
+ if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
555
+ msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
556
+ " 5e-29 and 50*(machine epsilon).")
557
+ elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf):
558
+ msg = ("Sine or cosine weighted integrals with infinite domain"
559
+ " must have 'epsabs'>0.")
560
+
561
+ elif weight is None:
562
+ if points is None: # QAGSE/QAGIE
563
+ msg = ("Invalid 'limit' argument. There must be"
564
+ " at least one subinterval")
565
+ else: # QAGPE
566
+ if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
567
+ msg = ("All break points in 'points' must lie within the"
568
+ " integration limits.")
569
+ elif len(points) >= limit:
570
+ msg = (f"Number of break points ({len(points):d}) "
571
+ f"must be less than subinterval limit ({limit:d})")
572
+
573
+ else:
574
+ if maxp1 < 1:
575
+ msg = "Chebyshev moment limit maxp1 must be >=1."
576
+
577
+ elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE
578
+ msg = "Cycle limit limlst must be >=3."
579
+
580
+ elif weight.startswith('alg'): # QAWSE
581
+ if min(wvar) < -1:
582
+ msg = "wvar parameters (alpha, beta) must both be >= -1."
583
+ if b < a:
584
+ msg = "Integration limits a, b must satistfy a<b."
585
+
586
+ elif weight == 'cauchy' and wvar in (a, b):
587
+ msg = ("Parameter 'wvar' must not equal"
588
+ " integration limits 'a' or 'b'.")
589
+
590
+ raise ValueError(msg)
591
+
592
+
593
+ def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
594
+ infbounds = 0
595
+ if (b != np.inf and a != -np.inf):
596
+ pass # standard integration
597
+ elif (b == np.inf and a != -np.inf):
598
+ infbounds = 1
599
+ bound = a
600
+ elif (b == np.inf and a == -np.inf):
601
+ infbounds = 2
602
+ bound = 0 # ignored
603
+ elif (b != np.inf and a == -np.inf):
604
+ infbounds = -1
605
+ bound = b
606
+ else:
607
+ raise RuntimeError("Infinity comparisons don't work for you.")
608
+
609
+ if points is None:
610
+ if infbounds == 0:
611
+ return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
612
+ else:
613
+ return _quadpack._qagie(func, bound, infbounds, args, full_output,
614
+ epsabs, epsrel, limit)
615
+ else:
616
+ if infbounds != 0:
617
+ raise ValueError("Infinity inputs cannot be used with break points.")
618
+ else:
619
+ #Duplicates force function evaluation at singular points
620
+ the_points = np.unique(points)
621
+ the_points = the_points[a < the_points]
622
+ the_points = the_points[the_points < b]
623
+ the_points = np.concatenate((the_points, (0., 0.)))
624
+ return _quadpack._qagpe(func, a, b, the_points, args, full_output,
625
+ epsabs, epsrel, limit)
626
+
627
+
628
+ def _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
629
+ limlst, limit, maxp1,weight, wvar, wopts):
630
+ if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
631
+ raise ValueError("%s not a recognized weighting function." % weight)
632
+
633
+ strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
634
+
635
+ if weight in ['cos','sin']:
636
+ integr = strdict[weight]
637
+ if (b != np.inf and a != -np.inf): # finite limits
638
+ if wopts is None: # no precomputed Chebyshev moments
639
+ return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
640
+ epsabs, epsrel, limit, maxp1,1)
641
+ else: # precomputed Chebyshev moments
642
+ momcom = wopts[0]
643
+ chebcom = wopts[1]
644
+ return _quadpack._qawoe(func, a, b, wvar, integr, args,
645
+ full_output,epsabs, epsrel, limit, maxp1, 2,
646
+ momcom, chebcom)
647
+
648
+ elif (b == np.inf and a != -np.inf):
649
+ return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
650
+ epsabs, limlst, limit, maxp1)
651
+ elif (b != np.inf and a == -np.inf): # remap function and interval
652
+ if weight == 'cos':
653
+ def thefunc(x,*myargs):
654
+ y = -x
655
+ func = myargs[0]
656
+ myargs = (y,) + myargs[1:]
657
+ return func(*myargs)
658
+ else:
659
+ def thefunc(x,*myargs):
660
+ y = -x
661
+ func = myargs[0]
662
+ myargs = (y,) + myargs[1:]
663
+ return -func(*myargs)
664
+ args = (func,) + args
665
+ return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
666
+ full_output, epsabs, limlst, limit, maxp1)
667
+ else:
668
+ raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
669
+ else:
670
+ if a in [-np.inf, np.inf] or b in [-np.inf, np.inf]:
671
+ message = "Cannot integrate with this weight over an infinite interval."
672
+ raise ValueError(message)
673
+
674
+ if weight.startswith('alg'):
675
+ integr = strdict[weight]
676
+ return _quadpack._qawse(func, a, b, wvar, integr, args,
677
+ full_output, epsabs, epsrel, limit)
678
+ else: # weight == 'cauchy'
679
+ return _quadpack._qawce(func, a, b, wvar, args, full_output,
680
+ epsabs, epsrel, limit)
681
+
682
+
683
+ def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
684
+ """
685
+ Compute a double integral.
686
+
687
+ Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
688
+ and ``y = gfun(x)..hfun(x)``.
689
+
690
+ Parameters
691
+ ----------
692
+ func : callable
693
+ A Python function or method of at least two variables: y must be the
694
+ first argument and x the second argument.
695
+ a, b : float
696
+ The limits of integration in x: `a` < `b`
697
+ gfun : callable or float
698
+ The lower boundary curve in y which is a function taking a single
699
+ floating point argument (x) and returning a floating point result
700
+ or a float indicating a constant boundary curve.
701
+ hfun : callable or float
702
+ The upper boundary curve in y (same requirements as `gfun`).
703
+ args : sequence, optional
704
+ Extra arguments to pass to `func`.
705
+ epsabs : float, optional
706
+ Absolute tolerance passed directly to the inner 1-D quadrature
707
+ integration. Default is 1.49e-8. ``dblquad`` tries to obtain
708
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
709
+ where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
710
+ to ``hfun(x)``, and ``result`` is the numerical approximation.
711
+ See `epsrel` below.
712
+ epsrel : float, optional
713
+ Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
714
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
715
+ and ``50 * (machine epsilon)``. See `epsabs` above.
716
+
717
+ Returns
718
+ -------
719
+ y : float
720
+ The resultant integral.
721
+ abserr : float
722
+ An estimate of the error.
723
+
724
+ See Also
725
+ --------
726
+ quad : single integral
727
+ tplquad : triple integral
728
+ nquad : N-dimensional integrals
729
+ fixed_quad : fixed-order Gaussian quadrature
730
+ quadrature : adaptive Gaussian quadrature
731
+ odeint : ODE integrator
732
+ ode : ODE integrator
733
+ simpson : integrator for sampled data
734
+ romb : integrator for sampled data
735
+ scipy.special : for coefficients and roots of orthogonal polynomials
736
+
737
+
738
+ Notes
739
+ -----
740
+ For valid results, the integral must converge; behavior for divergent
741
+ integrals is not guaranteed.
742
+
743
+ **Details of QUADPACK level routines**
744
+
745
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
746
+ provides details on the conditions for each routine to be called and a
747
+ short description of each routine. For each level of integration, ``qagse``
748
+ is used for finite limits or ``qagie`` is used if either limit (or both!)
749
+ are infinite. The following provides a short description from [1]_ for each
750
+ routine.
751
+
752
+ qagse
753
+ is an integrator based on globally adaptive interval
754
+ subdivision in connection with extrapolation, which will
755
+ eliminate the effects of integrand singularities of
756
+ several types.
757
+ qagie
758
+ handles integration over infinite intervals. The infinite range is
759
+ mapped onto a finite interval and subsequently the same strategy as
760
+ in ``QAGS`` is applied.
761
+
762
+ References
763
+ ----------
764
+
765
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
766
+ Überhuber, Christoph W.; Kahaner, David (1983).
767
+ QUADPACK: A subroutine package for automatic integration.
768
+ Springer-Verlag.
769
+ ISBN 978-3-540-12553-2.
770
+
771
+ Examples
772
+ --------
773
+ Compute the double integral of ``x * y**2`` over the box
774
+ ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
775
+ That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
776
+
777
+ >>> import numpy as np
778
+ >>> from scipy import integrate
779
+ >>> f = lambda y, x: x*y**2
780
+ >>> integrate.dblquad(f, 0, 2, 0, 1)
781
+ (0.6666666666666667, 7.401486830834377e-15)
782
+
783
+ Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
784
+ \\,dy \\,dx`.
785
+
786
+ >>> f = lambda y, x: 1
787
+ >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
788
+ (0.41421356237309503, 1.1083280054755938e-14)
789
+
790
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
791
+ for :math:`a=1, 3`.
792
+
793
+ >>> f = lambda y, x, a: a*x*y
794
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
795
+ (0.33333333333333337, 5.551115123125783e-15)
796
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
797
+ (0.9999999999999999, 1.6653345369377348e-14)
798
+
799
+ Compute the two-dimensional Gaussian Integral, which is the integral of the
800
+ Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
801
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
802
+ :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
803
+
804
+ >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
805
+ >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
806
+ (3.141592653589777, 2.5173086737433208e-08)
807
+
808
+ """
809
+
810
+ def temp_ranges(*args):
811
+ return [gfun(args[0]) if callable(gfun) else gfun,
812
+ hfun(args[0]) if callable(hfun) else hfun]
813
+
814
+ return nquad(func, [temp_ranges, [a, b]], args=args,
815
+ opts={"epsabs": epsabs, "epsrel": epsrel})
816
+
817
+
818
+ def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
819
+ epsrel=1.49e-8):
820
+ """
821
+ Compute a triple (definite) integral.
822
+
823
+ Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
824
+ ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
825
+
826
+ Parameters
827
+ ----------
828
+ func : function
829
+ A Python function or method of at least three variables in the
830
+ order (z, y, x).
831
+ a, b : float
832
+ The limits of integration in x: `a` < `b`
833
+ gfun : function or float
834
+ The lower boundary curve in y which is a function taking a single
835
+ floating point argument (x) and returning a floating point result
836
+ or a float indicating a constant boundary curve.
837
+ hfun : function or float
838
+ The upper boundary curve in y (same requirements as `gfun`).
839
+ qfun : function or float
840
+ The lower boundary surface in z. It must be a function that takes
841
+ two floats in the order (x, y) and returns a float or a float
842
+ indicating a constant boundary surface.
843
+ rfun : function or float
844
+ The upper boundary surface in z. (Same requirements as `qfun`.)
845
+ args : tuple, optional
846
+ Extra arguments to pass to `func`.
847
+ epsabs : float, optional
848
+ Absolute tolerance passed directly to the innermost 1-D quadrature
849
+ integration. Default is 1.49e-8.
850
+ epsrel : float, optional
851
+ Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
852
+
853
+ Returns
854
+ -------
855
+ y : float
856
+ The resultant integral.
857
+ abserr : float
858
+ An estimate of the error.
859
+
860
+ See Also
861
+ --------
862
+ quad : Adaptive quadrature using QUADPACK
863
+ quadrature : Adaptive Gaussian quadrature
864
+ fixed_quad : Fixed-order Gaussian quadrature
865
+ dblquad : Double integrals
866
+ nquad : N-dimensional integrals
867
+ romb : Integrators for sampled data
868
+ simpson : Integrators for sampled data
869
+ ode : ODE integrators
870
+ odeint : ODE integrators
871
+ scipy.special : For coefficients and roots of orthogonal polynomials
872
+
873
+ Notes
874
+ -----
875
+ For valid results, the integral must converge; behavior for divergent
876
+ integrals is not guaranteed.
877
+
878
+ **Details of QUADPACK level routines**
879
+
880
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
881
+ provides details on the conditions for each routine to be called and a
882
+ short description of each routine. For each level of integration, ``qagse``
883
+ is used for finite limits or ``qagie`` is used, if either limit (or both!)
884
+ are infinite. The following provides a short description from [1]_ for each
885
+ routine.
886
+
887
+ qagse
888
+ is an integrator based on globally adaptive interval
889
+ subdivision in connection with extrapolation, which will
890
+ eliminate the effects of integrand singularities of
891
+ several types.
892
+ qagie
893
+ handles integration over infinite intervals. The infinite range is
894
+ mapped onto a finite interval and subsequently the same strategy as
895
+ in ``QAGS`` is applied.
896
+
897
+ References
898
+ ----------
899
+
900
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
901
+ Überhuber, Christoph W.; Kahaner, David (1983).
902
+ QUADPACK: A subroutine package for automatic integration.
903
+ Springer-Verlag.
904
+ ISBN 978-3-540-12553-2.
905
+
906
+ Examples
907
+ --------
908
+ Compute the triple integral of ``x * y * z``, over ``x`` ranging
909
+ from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
910
+ That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
911
+ \\,dz \\,dy \\,dx`.
912
+
913
+ >>> import numpy as np
914
+ >>> from scipy import integrate
915
+ >>> f = lambda z, y, x: x*y*z
916
+ >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
917
+ (1.8749999999999998, 3.3246447942574074e-14)
918
+
919
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
920
+ \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
921
+ Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
922
+ takes arguments in the order (z, y, x).
923
+
924
+ >>> f = lambda z, y, x: x*y*z
925
+ >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
926
+ (0.05416666666666668, 2.1774196738157757e-14)
927
+
928
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
929
+ a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
930
+
931
+ >>> f = lambda z, y, x, a: a*x*y*z
932
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
933
+ (0.125, 5.527033708952211e-15)
934
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
935
+ (0.375, 1.6581101126856635e-14)
936
+
937
+ Compute the three-dimensional Gaussian Integral, which is the integral of
938
+ the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
939
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
940
+ :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
941
+ \\,dy\\,dx`.
942
+
943
+ >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
944
+ >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
945
+ (5.568327996830833, 4.4619078828029765e-08)
946
+
947
+ """
948
+ # f(z, y, x)
949
+ # qfun/rfun(x, y)
950
+ # gfun/hfun(x)
951
+ # nquad will hand (y, x, t0, ...) to ranges0
952
+ # nquad will hand (x, t0, ...) to ranges1
953
+ # Only qfun / rfun is different API...
954
+
955
+ def ranges0(*args):
956
+ return [qfun(args[1], args[0]) if callable(qfun) else qfun,
957
+ rfun(args[1], args[0]) if callable(rfun) else rfun]
958
+
959
+ def ranges1(*args):
960
+ return [gfun(args[0]) if callable(gfun) else gfun,
961
+ hfun(args[0]) if callable(hfun) else hfun]
962
+
963
+ ranges = [ranges0, ranges1, [a, b]]
964
+ return nquad(func, ranges, args=args,
965
+ opts={"epsabs": epsabs, "epsrel": epsrel})
966
+
967
+
968
+ def nquad(func, ranges, args=None, opts=None, full_output=False):
969
+ r"""
970
+ Integration over multiple variables.
971
+
972
+ Wraps `quad` to enable integration over multiple variables.
973
+ Various options allow improved integration of discontinuous functions, as
974
+ well as the use of weighted integration, and generally finer control of the
975
+ integration process.
976
+
977
+ Parameters
978
+ ----------
979
+ func : {callable, scipy.LowLevelCallable}
980
+ The function to be integrated. Has arguments of ``x0, ... xn``,
981
+ ``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
982
+ which must be floats. Where ``t0, ... tm`` are extra arguments
983
+ passed in args.
984
+ Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
985
+ Integration is carried out in order. That is, integration over ``x0``
986
+ is the innermost integral, and ``xn`` is the outermost.
987
+
988
+ If the user desires improved integration performance, then `f` may
989
+ be a `scipy.LowLevelCallable` with one of the signatures::
990
+
991
+ double func(int n, double *xx)
992
+ double func(int n, double *xx, void *user_data)
993
+
994
+ where ``n`` is the number of variables and args. The ``xx`` array
995
+ contains the coordinates and extra arguments. ``user_data`` is the data
996
+ contained in the `scipy.LowLevelCallable`.
997
+ ranges : iterable object
998
+ Each element of ranges may be either a sequence of 2 numbers, or else
999
+ a callable that returns such a sequence. ``ranges[0]`` corresponds to
1000
+ integration over x0, and so on. If an element of ranges is a callable,
1001
+ then it will be called with all of the integration arguments available,
1002
+ as well as any parametric arguments. e.g., if
1003
+ ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
1004
+ either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
1005
+ args : iterable object, optional
1006
+ Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``,
1007
+ and ``opts``.
1008
+ opts : iterable object or dict, optional
1009
+ Options to be passed to `quad`. May be empty, a dict, or
1010
+ a sequence of dicts or functions that return a dict. If empty, the
1011
+ default options from scipy.integrate.quad are used. If a dict, the same
1012
+ options are used for all levels of integraion. If a sequence, then each
1013
+ element of the sequence corresponds to a particular integration. e.g.,
1014
+ ``opts[0]`` corresponds to integration over ``x0``, and so on. If a
1015
+ callable, the signature must be the same as for ``ranges``. The
1016
+ available options together with their default values are:
1017
+
1018
+ - epsabs = 1.49e-08
1019
+ - epsrel = 1.49e-08
1020
+ - limit = 50
1021
+ - points = None
1022
+ - weight = None
1023
+ - wvar = None
1024
+ - wopts = None
1025
+
1026
+ For more information on these options, see `quad`.
1027
+
1028
+ full_output : bool, optional
1029
+ Partial implementation of ``full_output`` from scipy.integrate.quad.
1030
+ The number of integrand function evaluations ``neval`` can be obtained
1031
+ by setting ``full_output=True`` when calling nquad.
1032
+
1033
+ Returns
1034
+ -------
1035
+ result : float
1036
+ The result of the integration.
1037
+ abserr : float
1038
+ The maximum of the estimates of the absolute error in the various
1039
+ integration results.
1040
+ out_dict : dict, optional
1041
+ A dict containing additional information on the integration.
1042
+
1043
+ See Also
1044
+ --------
1045
+ quad : 1-D numerical integration
1046
+ dblquad, tplquad : double and triple integrals
1047
+ fixed_quad : fixed-order Gaussian quadrature
1048
+ quadrature : adaptive Gaussian quadrature
1049
+
1050
+ Notes
1051
+ -----
1052
+ For valid results, the integral must converge; behavior for divergent
1053
+ integrals is not guaranteed.
1054
+
1055
+ **Details of QUADPACK level routines**
1056
+
1057
+ `nquad` calls routines from the FORTRAN library QUADPACK. This section
1058
+ provides details on the conditions for each routine to be called and a
1059
+ short description of each routine. The routine called depends on
1060
+ `weight`, `points` and the integration limits `a` and `b`.
1061
+
1062
+ ================ ============== ========== =====================
1063
+ QUADPACK routine `weight` `points` infinite bounds
1064
+ ================ ============== ========== =====================
1065
+ qagse None No No
1066
+ qagie None No Yes
1067
+ qagpe None Yes No
1068
+ qawoe 'sin', 'cos' No No
1069
+ qawfe 'sin', 'cos' No either `a` or `b`
1070
+ qawse 'alg*' No No
1071
+ qawce 'cauchy' No No
1072
+ ================ ============== ========== =====================
1073
+
1074
+ The following provides a short description from [1]_ for each
1075
+ routine.
1076
+
1077
+ qagse
1078
+ is an integrator based on globally adaptive interval
1079
+ subdivision in connection with extrapolation, which will
1080
+ eliminate the effects of integrand singularities of
1081
+ several types.
1082
+ qagie
1083
+ handles integration over infinite intervals. The infinite range is
1084
+ mapped onto a finite interval and subsequently the same strategy as
1085
+ in ``QAGS`` is applied.
1086
+ qagpe
1087
+ serves the same purposes as QAGS, but also allows the
1088
+ user to provide explicit information about the location
1089
+ and type of trouble-spots i.e. the abscissae of internal
1090
+ singularities, discontinuities and other difficulties of
1091
+ the integrand function.
1092
+ qawoe
1093
+ is an integrator for the evaluation of
1094
+ :math:`\int^b_a \cos(\omega x)f(x)dx` or
1095
+ :math:`\int^b_a \sin(\omega x)f(x)dx`
1096
+ over a finite interval [a,b], where :math:`\omega` and :math:`f`
1097
+ are specified by the user. The rule evaluation component is based
1098
+ on the modified Clenshaw-Curtis technique
1099
+
1100
+ An adaptive subdivision scheme is used in connection
1101
+ with an extrapolation procedure, which is a modification
1102
+ of that in ``QAGS`` and allows the algorithm to deal with
1103
+ singularities in :math:`f(x)`.
1104
+ qawfe
1105
+ calculates the Fourier transform
1106
+ :math:`\int^\infty_a \cos(\omega x)f(x)dx` or
1107
+ :math:`\int^\infty_a \sin(\omega x)f(x)dx`
1108
+ for user-provided :math:`\omega` and :math:`f`. The procedure of
1109
+ ``QAWO`` is applied on successive finite intervals, and convergence
1110
+ acceleration by means of the :math:`\varepsilon`-algorithm is applied
1111
+ to the series of integral approximations.
1112
+ qawse
1113
+ approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
1114
+ :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
1115
+ :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
1116
+ following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
1117
+ :math:`\log(x-a)\log(b-x)`.
1118
+
1119
+ The user specifies :math:`\alpha`, :math:`\beta` and the type of the
1120
+ function :math:`v`. A globally adaptive subdivision strategy is
1121
+ applied, with modified Clenshaw-Curtis integration on those
1122
+ subintervals which contain `a` or `b`.
1123
+ qawce
1124
+ compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
1125
+ interpreted as a Cauchy principal value integral, for user specified
1126
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
1127
+ Clenshaw-Curtis integration is used on those intervals containing the
1128
+ point :math:`x = c`.
1129
+
1130
+ References
1131
+ ----------
1132
+
1133
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
1134
+ Überhuber, Christoph W.; Kahaner, David (1983).
1135
+ QUADPACK: A subroutine package for automatic integration.
1136
+ Springer-Verlag.
1137
+ ISBN 978-3-540-12553-2.
1138
+
1139
+ Examples
1140
+ --------
1141
+ Calculate
1142
+
1143
+ .. math::
1144
+
1145
+ \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
1146
+ f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
1147
+
1148
+ where
1149
+
1150
+ .. math::
1151
+
1152
+ f(x_0, x_1, x_2, x_3) = \begin{cases}
1153
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
1154
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
1155
+ \end{cases} .
1156
+
1157
+ >>> import numpy as np
1158
+ >>> from scipy import integrate
1159
+ >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
1160
+ ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
1161
+ >>> def opts0(*args, **kwargs):
1162
+ ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
1163
+ >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
1164
+ ... opts=[opts0,{},{},{}], full_output=True)
1165
+ (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
1166
+
1167
+ Calculate
1168
+
1169
+ .. math::
1170
+
1171
+ \int^{t_0+t_1+1}_{t_0+t_1-1}
1172
+ \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
1173
+ \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
1174
+ f(x_0,x_1, x_2,t_0,t_1)
1175
+ \,dx_0 \,dx_1 \,dx_2,
1176
+
1177
+ where
1178
+
1179
+ .. math::
1180
+
1181
+ f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
1182
+ x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
1183
+ x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
1184
+ \end{cases}
1185
+
1186
+ and :math:`(t_0, t_1) = (0, 1)` .
1187
+
1188
+ >>> def func2(x0, x1, x2, t0, t1):
1189
+ ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
1190
+ >>> def lim0(x1, x2, t0, t1):
1191
+ ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
1192
+ >>> def lim1(x2, t0, t1):
1193
+ ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
1194
+ >>> def lim2(t0, t1):
1195
+ ... return [t0 + t1 - 1, t0 + t1 + 1]
1196
+ >>> def opts0(x1, x2, t0, t1):
1197
+ ... return {'points' : [t0 - t1*x1]}
1198
+ >>> def opts1(x2, t0, t1):
1199
+ ... return {}
1200
+ >>> def opts2(t0, t1):
1201
+ ... return {}
1202
+ >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
1203
+ ... opts=[opts0, opts1, opts2])
1204
+ (36.099919226771625, 1.8546948553373528e-07)
1205
+
1206
+ """
1207
+ depth = len(ranges)
1208
+ ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
1209
+ if args is None:
1210
+ args = ()
1211
+ if opts is None:
1212
+ opts = [dict([])] * depth
1213
+
1214
+ if isinstance(opts, dict):
1215
+ opts = [_OptFunc(opts)] * depth
1216
+ else:
1217
+ opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
1218
+ return _NQuad(func, ranges, opts, full_output).integrate(*args)
1219
+
1220
+
1221
+ class _RangeFunc:
1222
+ def __init__(self, range_):
1223
+ self.range_ = range_
1224
+
1225
+ def __call__(self, *args):
1226
+ """Return stored value.
1227
+
1228
+ *args needed because range_ can be float or func, and is called with
1229
+ variable number of parameters.
1230
+ """
1231
+ return self.range_
1232
+
1233
+
1234
+ class _OptFunc:
1235
+ def __init__(self, opt):
1236
+ self.opt = opt
1237
+
1238
+ def __call__(self, *args):
1239
+ """Return stored dict."""
1240
+ return self.opt
1241
+
1242
+
1243
+ class _NQuad:
1244
+ def __init__(self, func, ranges, opts, full_output):
1245
+ self.abserr = 0
1246
+ self.func = func
1247
+ self.ranges = ranges
1248
+ self.opts = opts
1249
+ self.maxdepth = len(ranges)
1250
+ self.full_output = full_output
1251
+ if self.full_output:
1252
+ self.out_dict = {'neval': 0}
1253
+
1254
+ def integrate(self, *args, **kwargs):
1255
+ depth = kwargs.pop('depth', 0)
1256
+ if kwargs:
1257
+ raise ValueError('unexpected kwargs')
1258
+
1259
+ # Get the integration range and options for this depth.
1260
+ ind = -(depth + 1)
1261
+ fn_range = self.ranges[ind]
1262
+ low, high = fn_range(*args)
1263
+ fn_opt = self.opts[ind]
1264
+ opt = dict(fn_opt(*args))
1265
+
1266
+ if 'points' in opt:
1267
+ opt['points'] = [x for x in opt['points'] if low <= x <= high]
1268
+ if depth + 1 == self.maxdepth:
1269
+ f = self.func
1270
+ else:
1271
+ f = partial(self.integrate, depth=depth+1)
1272
+ quad_r = quad(f, low, high, args=args, full_output=self.full_output,
1273
+ **opt)
1274
+ value = quad_r[0]
1275
+ abserr = quad_r[1]
1276
+ if self.full_output:
1277
+ infodict = quad_r[2]
1278
+ # The 'neval' parameter in full_output returns the total
1279
+ # number of times the integrand function was evaluated.
1280
+ # Therefore, only the innermost integration loop counts.
1281
+ if depth + 1 == self.maxdepth:
1282
+ self.out_dict['neval'] += infodict['neval']
1283
+ self.abserr = max(self.abserr, abserr)
1284
+ if depth > 0:
1285
+ return value
1286
+ else:
1287
+ # Final result of N-D integration with error
1288
+ if self.full_output:
1289
+ return value, self.abserr, self.out_dict
1290
+ else:
1291
+ return value, self.abserr
venv/lib/python3.10/site-packages/scipy/integrate/_quadrature.py ADDED
@@ -0,0 +1,1830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Callable, Any, cast
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+ import math
6
+ import warnings
7
+ from collections import namedtuple
8
+
9
+ from scipy.special import roots_legendre
10
+ from scipy.special import gammaln, logsumexp
11
+ from scipy._lib._util import _rng_spawn
12
+ from scipy._lib.deprecation import (_NoValue, _deprecate_positional_args,
13
+ _deprecated)
14
+
15
+
16
+ __all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
17
+ 'trapezoid', 'trapz', 'simps', 'simpson',
18
+ 'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
19
+ 'qmc_quad', 'AccuracyWarning', 'cumulative_simpson']
20
+
21
+
22
+ def trapezoid(y, x=None, dx=1.0, axis=-1):
23
+ r"""
24
+ Integrate along the given axis using the composite trapezoidal rule.
25
+
26
+ If `x` is provided, the integration happens in sequence along its
27
+ elements - they are not sorted.
28
+
29
+ Integrate `y` (`x`) along each 1d slice on the given axis, compute
30
+ :math:`\int y(x) dx`.
31
+ When `x` is specified, this integrates along the parametric curve,
32
+ computing :math:`\int_t y(t) dt =
33
+ \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
34
+
35
+ Parameters
36
+ ----------
37
+ y : array_like
38
+ Input array to integrate.
39
+ x : array_like, optional
40
+ The sample points corresponding to the `y` values. If `x` is None,
41
+ the sample points are assumed to be evenly spaced `dx` apart. The
42
+ default is None.
43
+ dx : scalar, optional
44
+ The spacing between sample points when `x` is None. The default is 1.
45
+ axis : int, optional
46
+ The axis along which to integrate.
47
+
48
+ Returns
49
+ -------
50
+ trapezoid : float or ndarray
51
+ Definite integral of `y` = n-dimensional array as approximated along
52
+ a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
53
+ then the result is a float. If `n` is greater than 1, then the result
54
+ is an `n`-1 dimensional array.
55
+
56
+ See Also
57
+ --------
58
+ cumulative_trapezoid, simpson, romb
59
+
60
+ Notes
61
+ -----
62
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
63
+ will be taken from `y` array, by default x-axis distances between
64
+ points will be 1.0, alternatively they can be provided with `x` array
65
+ or with `dx` scalar. Return value will be equal to combined area under
66
+ the red lines.
67
+
68
+ References
69
+ ----------
70
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
71
+
72
+ .. [2] Illustration image:
73
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
74
+
75
+ Examples
76
+ --------
77
+ Use the trapezoidal rule on evenly spaced points:
78
+
79
+ >>> import numpy as np
80
+ >>> from scipy import integrate
81
+ >>> integrate.trapezoid([1, 2, 3])
82
+ 4.0
83
+
84
+ The spacing between sample points can be selected by either the
85
+ ``x`` or ``dx`` arguments:
86
+
87
+ >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8])
88
+ 8.0
89
+ >>> integrate.trapezoid([1, 2, 3], dx=2)
90
+ 8.0
91
+
92
+ Using a decreasing ``x`` corresponds to integrating in reverse:
93
+
94
+ >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4])
95
+ -8.0
96
+
97
+ More generally ``x`` is used to integrate along a parametric curve. We can
98
+ estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
99
+
100
+ >>> x = np.linspace(0, 1, num=50)
101
+ >>> y = x**2
102
+ >>> integrate.trapezoid(y, x)
103
+ 0.33340274885464394
104
+
105
+ Or estimate the area of a circle, noting we repeat the sample which closes
106
+ the curve:
107
+
108
+ >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
109
+ >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta))
110
+ 3.141571941375841
111
+
112
+ ``trapezoid`` can be applied along a specified axis to do multiple
113
+ computations in one call:
114
+
115
+ >>> a = np.arange(6).reshape(2, 3)
116
+ >>> a
117
+ array([[0, 1, 2],
118
+ [3, 4, 5]])
119
+ >>> integrate.trapezoid(a, axis=0)
120
+ array([1.5, 2.5, 3.5])
121
+ >>> integrate.trapezoid(a, axis=1)
122
+ array([2., 8.])
123
+ """
124
+ y = np.asanyarray(y)
125
+ if x is None:
126
+ d = dx
127
+ else:
128
+ x = np.asanyarray(x)
129
+ if x.ndim == 1:
130
+ d = np.diff(x)
131
+ # reshape to correct shape
132
+ shape = [1]*y.ndim
133
+ shape[axis] = d.shape[0]
134
+ d = d.reshape(shape)
135
+ else:
136
+ d = np.diff(x, axis=axis)
137
+ nd = y.ndim
138
+ slice1 = [slice(None)]*nd
139
+ slice2 = [slice(None)]*nd
140
+ slice1[axis] = slice(1, None)
141
+ slice2[axis] = slice(None, -1)
142
+ try:
143
+ ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
144
+ except ValueError:
145
+ # Operations didn't work, cast to ndarray
146
+ d = np.asarray(d)
147
+ y = np.asarray(y)
148
+ ret = np.add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
149
+ return ret
150
+
151
+
152
+ # Note: alias kept for backwards compatibility. Rename was done
153
+ # because trapz is a slur in colloquial English (see gh-12924).
154
+ def trapz(y, x=None, dx=1.0, axis=-1):
155
+ """An alias of `trapezoid`.
156
+
157
+ `trapz` is kept for backwards compatibility. For new code, prefer
158
+ `trapezoid` instead.
159
+ """
160
+ msg = ("'scipy.integrate.trapz' is deprecated in favour of "
161
+ "'scipy.integrate.trapezoid' and will be removed in SciPy 1.14.0")
162
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
163
+ return trapezoid(y, x=x, dx=dx, axis=axis)
164
+
165
+
166
+ class AccuracyWarning(Warning):
167
+ pass
168
+
169
+
170
+ if TYPE_CHECKING:
171
+ # workaround for mypy function attributes see:
172
+ # https://github.com/python/mypy/issues/2087#issuecomment-462726600
173
+ from typing import Protocol
174
+
175
+ class CacheAttributes(Protocol):
176
+ cache: dict[int, tuple[Any, Any]]
177
+ else:
178
+ CacheAttributes = Callable
179
+
180
+
181
+ def cache_decorator(func: Callable) -> CacheAttributes:
182
+ return cast(CacheAttributes, func)
183
+
184
+
185
+ @cache_decorator
186
+ def _cached_roots_legendre(n):
187
+ """
188
+ Cache roots_legendre results to speed up calls of the fixed_quad
189
+ function.
190
+ """
191
+ if n in _cached_roots_legendre.cache:
192
+ return _cached_roots_legendre.cache[n]
193
+
194
+ _cached_roots_legendre.cache[n] = roots_legendre(n)
195
+ return _cached_roots_legendre.cache[n]
196
+
197
+
198
+ _cached_roots_legendre.cache = dict()
199
+
200
+
201
+ def fixed_quad(func, a, b, args=(), n=5):
202
+ """
203
+ Compute a definite integral using fixed-order Gaussian quadrature.
204
+
205
+ Integrate `func` from `a` to `b` using Gaussian quadrature of
206
+ order `n`.
207
+
208
+ Parameters
209
+ ----------
210
+ func : callable
211
+ A Python function or method to integrate (must accept vector inputs).
212
+ If integrating a vector-valued function, the returned array must have
213
+ shape ``(..., len(x))``.
214
+ a : float
215
+ Lower limit of integration.
216
+ b : float
217
+ Upper limit of integration.
218
+ args : tuple, optional
219
+ Extra arguments to pass to function, if any.
220
+ n : int, optional
221
+ Order of quadrature integration. Default is 5.
222
+
223
+ Returns
224
+ -------
225
+ val : float
226
+ Gaussian quadrature approximation to the integral
227
+ none : None
228
+ Statically returned value of None
229
+
230
+ See Also
231
+ --------
232
+ quad : adaptive quadrature using QUADPACK
233
+ dblquad : double integrals
234
+ tplquad : triple integrals
235
+ romberg : adaptive Romberg quadrature
236
+ quadrature : adaptive Gaussian quadrature
237
+ romb : integrators for sampled data
238
+ simpson : integrators for sampled data
239
+ cumulative_trapezoid : cumulative integration for sampled data
240
+ ode : ODE integrator
241
+ odeint : ODE integrator
242
+
243
+ Examples
244
+ --------
245
+ >>> from scipy import integrate
246
+ >>> import numpy as np
247
+ >>> f = lambda x: x**8
248
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
249
+ (0.1110884353741496, None)
250
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
251
+ (0.11111111111111102, None)
252
+ >>> print(1/9.0) # analytical result
253
+ 0.1111111111111111
254
+
255
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
256
+ (0.9999999771971152, None)
257
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
258
+ (1.000000000039565, None)
259
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
260
+ 1.0
261
+
262
+ """
263
+ x, w = _cached_roots_legendre(n)
264
+ x = np.real(x)
265
+ if np.isinf(a) or np.isinf(b):
266
+ raise ValueError("Gaussian quadrature is only available for "
267
+ "finite limits.")
268
+ y = (b-a)*(x+1)/2.0 + a
269
+ return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
270
+
271
+
272
+ def vectorize1(func, args=(), vec_func=False):
273
+ """Vectorize the call to a function.
274
+
275
+ This is an internal utility function used by `romberg` and
276
+ `quadrature` to create a vectorized version of a function.
277
+
278
+ If `vec_func` is True, the function `func` is assumed to take vector
279
+ arguments.
280
+
281
+ Parameters
282
+ ----------
283
+ func : callable
284
+ User defined function.
285
+ args : tuple, optional
286
+ Extra arguments for the function.
287
+ vec_func : bool, optional
288
+ True if the function func takes vector arguments.
289
+
290
+ Returns
291
+ -------
292
+ vfunc : callable
293
+ A function that will take a vector argument and return the
294
+ result.
295
+
296
+ """
297
+ if vec_func:
298
+ def vfunc(x):
299
+ return func(x, *args)
300
+ else:
301
+ def vfunc(x):
302
+ if np.isscalar(x):
303
+ return func(x, *args)
304
+ x = np.asarray(x)
305
+ # call with first point to get output type
306
+ y0 = func(x[0], *args)
307
+ n = len(x)
308
+ dtype = getattr(y0, 'dtype', type(y0))
309
+ output = np.empty((n,), dtype=dtype)
310
+ output[0] = y0
311
+ for i in range(1, n):
312
+ output[i] = func(x[i], *args)
313
+ return output
314
+ return vfunc
315
+
316
+
317
+ @_deprecated("`scipy.integrate.quadrature` is deprecated as of SciPy 1.12.0"
318
+ "and will be removed in SciPy 1.15.0. Please use"
319
+ "`scipy.integrate.quad` instead.")
320
+ def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
321
+ vec_func=True, miniter=1):
322
+ """
323
+ Compute a definite integral using fixed-tolerance Gaussian quadrature.
324
+
325
+ .. deprecated:: 1.12.0
326
+
327
+ This function is deprecated as of SciPy 1.12.0 and will be removed
328
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
329
+
330
+ Integrate `func` from `a` to `b` using Gaussian quadrature
331
+ with absolute tolerance `tol`.
332
+
333
+ Parameters
334
+ ----------
335
+ func : function
336
+ A Python function or method to integrate.
337
+ a : float
338
+ Lower limit of integration.
339
+ b : float
340
+ Upper limit of integration.
341
+ args : tuple, optional
342
+ Extra arguments to pass to function.
343
+ tol, rtol : float, optional
344
+ Iteration stops when error between last two iterates is less than
345
+ `tol` OR the relative change is less than `rtol`.
346
+ maxiter : int, optional
347
+ Maximum order of Gaussian quadrature.
348
+ vec_func : bool, optional
349
+ True or False if func handles arrays as arguments (is
350
+ a "vector" function). Default is True.
351
+ miniter : int, optional
352
+ Minimum order of Gaussian quadrature.
353
+
354
+ Returns
355
+ -------
356
+ val : float
357
+ Gaussian quadrature approximation (within tolerance) to integral.
358
+ err : float
359
+ Difference between last two estimates of the integral.
360
+
361
+ See Also
362
+ --------
363
+ romberg : adaptive Romberg quadrature
364
+ fixed_quad : fixed-order Gaussian quadrature
365
+ quad : adaptive quadrature using QUADPACK
366
+ dblquad : double integrals
367
+ tplquad : triple integrals
368
+ romb : integrator for sampled data
369
+ simpson : integrator for sampled data
370
+ cumulative_trapezoid : cumulative integration for sampled data
371
+ ode : ODE integrator
372
+ odeint : ODE integrator
373
+
374
+ Examples
375
+ --------
376
+ >>> from scipy import integrate
377
+ >>> import numpy as np
378
+ >>> f = lambda x: x**8
379
+ >>> integrate.quadrature(f, 0.0, 1.0)
380
+ (0.11111111111111106, 4.163336342344337e-17)
381
+ >>> print(1/9.0) # analytical result
382
+ 0.1111111111111111
383
+
384
+ >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
385
+ (0.9999999999999536, 3.9611425250996035e-11)
386
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
387
+ 1.0
388
+
389
+ """
390
+ if not isinstance(args, tuple):
391
+ args = (args,)
392
+ vfunc = vectorize1(func, args, vec_func=vec_func)
393
+ val = np.inf
394
+ err = np.inf
395
+ maxiter = max(miniter+1, maxiter)
396
+ for n in range(miniter, maxiter+1):
397
+ newval = fixed_quad(vfunc, a, b, (), n)[0]
398
+ err = abs(newval-val)
399
+ val = newval
400
+
401
+ if err < tol or err < rtol*abs(val):
402
+ break
403
+ else:
404
+ warnings.warn(
405
+ "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
406
+ AccuracyWarning, stacklevel=2
407
+ )
408
+ return val, err
409
+
410
+
411
+ def tupleset(t, i, value):
412
+ l = list(t)
413
+ l[i] = value
414
+ return tuple(l)
415
+
416
+
417
+ # Note: alias kept for backwards compatibility. Rename was done
418
+ # because cumtrapz is a slur in colloquial English (see gh-12924).
419
+ def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
420
+ """An alias of `cumulative_trapezoid`.
421
+
422
+ `cumtrapz` is kept for backwards compatibility. For new code, prefer
423
+ `cumulative_trapezoid` instead.
424
+ """
425
+ msg = ("'scipy.integrate.cumtrapz' is deprecated in favour of "
426
+ "'scipy.integrate.cumulative_trapezoid' and will be removed "
427
+ "in SciPy 1.14.0")
428
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
429
+ return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
430
+
431
+
432
+ def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
433
+ """
434
+ Cumulatively integrate y(x) using the composite trapezoidal rule.
435
+
436
+ Parameters
437
+ ----------
438
+ y : array_like
439
+ Values to integrate.
440
+ x : array_like, optional
441
+ The coordinate to integrate along. If None (default), use spacing `dx`
442
+ between consecutive elements in `y`.
443
+ dx : float, optional
444
+ Spacing between elements of `y`. Only used if `x` is None.
445
+ axis : int, optional
446
+ Specifies the axis to cumulate. Default is -1 (last axis).
447
+ initial : scalar, optional
448
+ If given, insert this value at the beginning of the returned result.
449
+ 0 or None are the only values accepted. Default is None, which means
450
+ `res` has one element less than `y` along the axis of integration.
451
+
452
+ .. deprecated:: 1.12.0
453
+ The option for non-zero inputs for `initial` will be deprecated in
454
+ SciPy 1.15.0. After this time, a ValueError will be raised if
455
+ `initial` is not None or 0.
456
+
457
+ Returns
458
+ -------
459
+ res : ndarray
460
+ The result of cumulative integration of `y` along `axis`.
461
+ If `initial` is None, the shape is such that the axis of integration
462
+ has one less value than `y`. If `initial` is given, the shape is equal
463
+ to that of `y`.
464
+
465
+ See Also
466
+ --------
467
+ numpy.cumsum, numpy.cumprod
468
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
469
+ quad : adaptive quadrature using QUADPACK
470
+ romberg : adaptive Romberg quadrature
471
+ quadrature : adaptive Gaussian quadrature
472
+ fixed_quad : fixed-order Gaussian quadrature
473
+ dblquad : double integrals
474
+ tplquad : triple integrals
475
+ romb : integrators for sampled data
476
+ ode : ODE integrators
477
+ odeint : ODE integrators
478
+
479
+ Examples
480
+ --------
481
+ >>> from scipy import integrate
482
+ >>> import numpy as np
483
+ >>> import matplotlib.pyplot as plt
484
+
485
+ >>> x = np.linspace(-2, 2, num=20)
486
+ >>> y = x
487
+ >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
488
+ >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
489
+ >>> plt.show()
490
+
491
+ """
492
+ y = np.asarray(y)
493
+ if y.shape[axis] == 0:
494
+ raise ValueError("At least one point is required along `axis`.")
495
+ if x is None:
496
+ d = dx
497
+ else:
498
+ x = np.asarray(x)
499
+ if x.ndim == 1:
500
+ d = np.diff(x)
501
+ # reshape to correct shape
502
+ shape = [1] * y.ndim
503
+ shape[axis] = -1
504
+ d = d.reshape(shape)
505
+ elif len(x.shape) != len(y.shape):
506
+ raise ValueError("If given, shape of x must be 1-D or the "
507
+ "same as y.")
508
+ else:
509
+ d = np.diff(x, axis=axis)
510
+
511
+ if d.shape[axis] != y.shape[axis] - 1:
512
+ raise ValueError("If given, length of x along axis must be the "
513
+ "same as y.")
514
+
515
+ nd = len(y.shape)
516
+ slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
517
+ slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
518
+ res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
519
+
520
+ if initial is not None:
521
+ if initial != 0:
522
+ warnings.warn(
523
+ "The option for values for `initial` other than None or 0 is "
524
+ "deprecated as of SciPy 1.12.0 and will raise a value error in"
525
+ " SciPy 1.15.0.",
526
+ DeprecationWarning, stacklevel=2
527
+ )
528
+ if not np.isscalar(initial):
529
+ raise ValueError("`initial` parameter should be a scalar.")
530
+
531
+ shape = list(res.shape)
532
+ shape[axis] = 1
533
+ res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
534
+ axis=axis)
535
+
536
+ return res
537
+
538
+
539
+ def _basic_simpson(y, start, stop, x, dx, axis):
540
+ nd = len(y.shape)
541
+ if start is None:
542
+ start = 0
543
+ step = 2
544
+ slice_all = (slice(None),)*nd
545
+ slice0 = tupleset(slice_all, axis, slice(start, stop, step))
546
+ slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
547
+ slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
548
+
549
+ if x is None: # Even-spaced Simpson's rule.
550
+ result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis)
551
+ result *= dx / 3.0
552
+ else:
553
+ # Account for possibly different spacings.
554
+ # Simpson's rule changes a bit.
555
+ h = np.diff(x, axis=axis)
556
+ sl0 = tupleset(slice_all, axis, slice(start, stop, step))
557
+ sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
558
+ h0 = h[sl0].astype(float, copy=False)
559
+ h1 = h[sl1].astype(float, copy=False)
560
+ hsum = h0 + h1
561
+ hprod = h0 * h1
562
+ h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
563
+ tmp = hsum/6.0 * (y[slice0] *
564
+ (2.0 - np.true_divide(1.0, h0divh1,
565
+ out=np.zeros_like(h0divh1),
566
+ where=h0divh1 != 0)) +
567
+ y[slice1] * (hsum *
568
+ np.true_divide(hsum, hprod,
569
+ out=np.zeros_like(hsum),
570
+ where=hprod != 0)) +
571
+ y[slice2] * (2.0 - h0divh1))
572
+ result = np.sum(tmp, axis=axis)
573
+ return result
574
+
575
+
576
+ # Note: alias kept for backwards compatibility. simps was renamed to simpson
577
+ # because the former is a slur in colloquial English (see gh-12924).
578
+ def simps(y, x=None, dx=1.0, axis=-1, even=_NoValue):
579
+ """An alias of `simpson`.
580
+
581
+ `simps` is kept for backwards compatibility. For new code, prefer
582
+ `simpson` instead.
583
+ """
584
+ msg = ("'scipy.integrate.simps' is deprecated in favour of "
585
+ "'scipy.integrate.simpson' and will be removed in SciPy 1.14.0")
586
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
587
+ # we don't deprecate positional use as the wrapper is going away completely
588
+ return simpson(y, x=x, dx=dx, axis=axis, even=even)
589
+
590
+
591
+ @_deprecate_positional_args(version="1.14")
592
+ def simpson(y, *, x=None, dx=1.0, axis=-1, even=_NoValue):
593
+ """
594
+ Integrate y(x) using samples along the given axis and the composite
595
+ Simpson's rule. If x is None, spacing of dx is assumed.
596
+
597
+ If there are an even number of samples, N, then there are an odd
598
+ number of intervals (N-1), but Simpson's rule requires an even number
599
+ of intervals. The parameter 'even' controls how this is handled.
600
+
601
+ Parameters
602
+ ----------
603
+ y : array_like
604
+ Array to be integrated.
605
+ x : array_like, optional
606
+ If given, the points at which `y` is sampled.
607
+ dx : float, optional
608
+ Spacing of integration points along axis of `x`. Only used when
609
+ `x` is None. Default is 1.
610
+ axis : int, optional
611
+ Axis along which to integrate. Default is the last axis.
612
+ even : {None, 'simpson', 'avg', 'first', 'last'}, optional
613
+ 'avg' : Average two results:
614
+ 1) use the first N-2 intervals with
615
+ a trapezoidal rule on the last interval and
616
+ 2) use the last
617
+ N-2 intervals with a trapezoidal rule on the first interval.
618
+
619
+ 'first' : Use Simpson's rule for the first N-2 intervals with
620
+ a trapezoidal rule on the last interval.
621
+
622
+ 'last' : Use Simpson's rule for the last N-2 intervals with a
623
+ trapezoidal rule on the first interval.
624
+
625
+ None : equivalent to 'simpson' (default)
626
+
627
+ 'simpson' : Use Simpson's rule for the first N-2 intervals with the
628
+ addition of a 3-point parabolic segment for the last
629
+ interval using equations outlined by Cartwright [1]_.
630
+ If the axis to be integrated over only has two points then
631
+ the integration falls back to a trapezoidal integration.
632
+
633
+ .. versionadded:: 1.11.0
634
+
635
+ .. versionchanged:: 1.11.0
636
+ The newly added 'simpson' option is now the default as it is more
637
+ accurate in most situations.
638
+
639
+ .. deprecated:: 1.11.0
640
+ Parameter `even` is deprecated and will be removed in SciPy
641
+ 1.14.0. After this time the behaviour for an even number of
642
+ points will follow that of `even='simpson'`.
643
+
644
+ Returns
645
+ -------
646
+ float
647
+ The estimated integral computed with the composite Simpson's rule.
648
+
649
+ See Also
650
+ --------
651
+ quad : adaptive quadrature using QUADPACK
652
+ romberg : adaptive Romberg quadrature
653
+ quadrature : adaptive Gaussian quadrature
654
+ fixed_quad : fixed-order Gaussian quadrature
655
+ dblquad : double integrals
656
+ tplquad : triple integrals
657
+ romb : integrators for sampled data
658
+ cumulative_trapezoid : cumulative integration for sampled data
659
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
660
+ ode : ODE integrators
661
+ odeint : ODE integrators
662
+
663
+ Notes
664
+ -----
665
+ For an odd number of samples that are equally spaced the result is
666
+ exact if the function is a polynomial of order 3 or less. If
667
+ the samples are not equally spaced, then the result is exact only
668
+ if the function is a polynomial of order 2 or less.
669
+
670
+ References
671
+ ----------
672
+ .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
673
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
674
+ Sciences and Mathematics Education. 12 (2): 1-9
675
+
676
+ Examples
677
+ --------
678
+ >>> from scipy import integrate
679
+ >>> import numpy as np
680
+ >>> x = np.arange(0, 10)
681
+ >>> y = np.arange(0, 10)
682
+
683
+ >>> integrate.simpson(y, x=x)
684
+ 40.5
685
+
686
+ >>> y = np.power(x, 3)
687
+ >>> integrate.simpson(y, x=x)
688
+ 1640.5
689
+ >>> integrate.quad(lambda x: x**3, 0, 9)[0]
690
+ 1640.25
691
+
692
+ >>> integrate.simpson(y, x=x, even='first')
693
+ 1644.5
694
+
695
+ """
696
+ y = np.asarray(y)
697
+ nd = len(y.shape)
698
+ N = y.shape[axis]
699
+ last_dx = dx
700
+ first_dx = dx
701
+ returnshape = 0
702
+ if x is not None:
703
+ x = np.asarray(x)
704
+ if len(x.shape) == 1:
705
+ shapex = [1] * nd
706
+ shapex[axis] = x.shape[0]
707
+ saveshape = x.shape
708
+ returnshape = 1
709
+ x = x.reshape(tuple(shapex))
710
+ elif len(x.shape) != len(y.shape):
711
+ raise ValueError("If given, shape of x must be 1-D or the "
712
+ "same as y.")
713
+ if x.shape[axis] != N:
714
+ raise ValueError("If given, length of x along axis must be the "
715
+ "same as y.")
716
+
717
+ # even keyword parameter is deprecated
718
+ if even is not _NoValue:
719
+ warnings.warn(
720
+ "The 'even' keyword is deprecated as of SciPy 1.11.0 and will be "
721
+ "removed in SciPy 1.14.0",
722
+ DeprecationWarning, stacklevel=2
723
+ )
724
+
725
+ if N % 2 == 0:
726
+ val = 0.0
727
+ result = 0.0
728
+ slice_all = (slice(None),) * nd
729
+
730
+ # default is 'simpson'
731
+ even = even if even not in (_NoValue, None) else "simpson"
732
+
733
+ if even not in ['avg', 'last', 'first', 'simpson']:
734
+ raise ValueError(
735
+ "Parameter 'even' must be 'simpson', "
736
+ "'avg', 'last', or 'first'."
737
+ )
738
+
739
+ if N == 2:
740
+ # need at least 3 points in integration axis to form parabolic
741
+ # segment. If there are two points then any of 'avg', 'first',
742
+ # 'last' should give the same result.
743
+ slice1 = tupleset(slice_all, axis, -1)
744
+ slice2 = tupleset(slice_all, axis, -2)
745
+ if x is not None:
746
+ last_dx = x[slice1] - x[slice2]
747
+ val += 0.5 * last_dx * (y[slice1] + y[slice2])
748
+
749
+ # calculation is finished. Set `even` to None to skip other
750
+ # scenarios
751
+ even = None
752
+
753
+ if even == 'simpson':
754
+ # use Simpson's rule on first intervals
755
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
756
+
757
+ slice1 = tupleset(slice_all, axis, -1)
758
+ slice2 = tupleset(slice_all, axis, -2)
759
+ slice3 = tupleset(slice_all, axis, -3)
760
+
761
+ h = np.asarray([dx, dx], dtype=np.float64)
762
+ if x is not None:
763
+ # grab the last two spacings from the appropriate axis
764
+ hm2 = tupleset(slice_all, axis, slice(-2, -1, 1))
765
+ hm1 = tupleset(slice_all, axis, slice(-1, None, 1))
766
+
767
+ diffs = np.float64(np.diff(x, axis=axis))
768
+ h = [np.squeeze(diffs[hm2], axis=axis),
769
+ np.squeeze(diffs[hm1], axis=axis)]
770
+
771
+ # This is the correction for the last interval according to
772
+ # Cartwright.
773
+ # However, I used the equations given at
774
+ # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data
775
+ # A footnote on Wikipedia says:
776
+ # Cartwright 2017, Equation 8. The equation in Cartwright is
777
+ # calculating the first interval whereas the equations in the
778
+ # Wikipedia article are adjusting for the last integral. If the
779
+ # proper algebraic substitutions are made, the equation results in
780
+ # the values shown.
781
+ num = 2 * h[1] ** 2 + 3 * h[0] * h[1]
782
+ den = 6 * (h[1] + h[0])
783
+ alpha = np.true_divide(
784
+ num,
785
+ den,
786
+ out=np.zeros_like(den),
787
+ where=den != 0
788
+ )
789
+
790
+ num = h[1] ** 2 + 3.0 * h[0] * h[1]
791
+ den = 6 * h[0]
792
+ beta = np.true_divide(
793
+ num,
794
+ den,
795
+ out=np.zeros_like(den),
796
+ where=den != 0
797
+ )
798
+
799
+ num = 1 * h[1] ** 3
800
+ den = 6 * h[0] * (h[0] + h[1])
801
+ eta = np.true_divide(
802
+ num,
803
+ den,
804
+ out=np.zeros_like(den),
805
+ where=den != 0
806
+ )
807
+
808
+ result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3]
809
+
810
+ # The following code (down to result=result+val) can be removed
811
+ # once the 'even' keyword is removed.
812
+
813
+ # Compute using Simpson's rule on first intervals
814
+ if even in ['avg', 'first']:
815
+ slice1 = tupleset(slice_all, axis, -1)
816
+ slice2 = tupleset(slice_all, axis, -2)
817
+ if x is not None:
818
+ last_dx = x[slice1] - x[slice2]
819
+ val += 0.5*last_dx*(y[slice1]+y[slice2])
820
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
821
+ # Compute using Simpson's rule on last set of intervals
822
+ if even in ['avg', 'last']:
823
+ slice1 = tupleset(slice_all, axis, 0)
824
+ slice2 = tupleset(slice_all, axis, 1)
825
+ if x is not None:
826
+ first_dx = x[tuple(slice2)] - x[tuple(slice1)]
827
+ val += 0.5*first_dx*(y[slice2]+y[slice1])
828
+ result += _basic_simpson(y, 1, N-2, x, dx, axis)
829
+ if even == 'avg':
830
+ val /= 2.0
831
+ result /= 2.0
832
+ result = result + val
833
+ else:
834
+ result = _basic_simpson(y, 0, N-2, x, dx, axis)
835
+ if returnshape:
836
+ x = x.reshape(saveshape)
837
+ return result
838
+
839
+
840
+ def _cumulatively_sum_simpson_integrals(
841
+ y: np.ndarray,
842
+ dx: np.ndarray,
843
+ integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray],
844
+ ) -> np.ndarray:
845
+ """Calculate cumulative sum of Simpson integrals.
846
+ Takes as input the integration function to be used.
847
+ The integration_func is assumed to return the cumulative sum using
848
+ composite Simpson's rule. Assumes the axis of summation is -1.
849
+ """
850
+ sub_integrals_h1 = integration_func(y, dx)
851
+ sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1]
852
+
853
+ shape = list(sub_integrals_h1.shape)
854
+ shape[-1] += 1
855
+ sub_integrals = np.empty(shape)
856
+ sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2]
857
+ sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2]
858
+ # Integral over last subinterval can only be calculated from
859
+ # formula for h2
860
+ sub_integrals[..., -1] = sub_integrals_h2[..., -1]
861
+ res = np.cumsum(sub_integrals, axis=-1)
862
+ return res
863
+
864
+
865
+ def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
866
+ """Calculate the Simpson integrals for all h1 intervals assuming equal interval
867
+ widths. The function can also be used to calculate the integral for all
868
+ h2 intervals by reversing the inputs, `y` and `dx`.
869
+ """
870
+ d = dx[..., :-1]
871
+ f1 = y[..., :-2]
872
+ f2 = y[..., 1:-1]
873
+ f3 = y[..., 2:]
874
+
875
+ # Calculate integral over the subintervals (eqn (10) of Reference [2])
876
+ return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4)
877
+
878
+
879
+ def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
880
+ """Calculate the Simpson integrals for all h1 intervals assuming unequal interval
881
+ widths. The function can also be used to calculate the integral for all
882
+ h2 intervals by reversing the inputs, `y` and `dx`.
883
+ """
884
+ x21 = dx[..., :-1]
885
+ x32 = dx[..., 1:]
886
+ f1 = y[..., :-2]
887
+ f2 = y[..., 1:-1]
888
+ f3 = y[..., 2:]
889
+
890
+ x31 = x21 + x32
891
+ x21_x31 = x21/x31
892
+ x21_x32 = x21/x32
893
+ x21x21_x31x32 = x21_x31 * x21_x32
894
+
895
+ # Calculate integral over the subintervals (eqn (8) of Reference [2])
896
+ coeff1 = 3 - x21_x31
897
+ coeff2 = 3 + x21x21_x31x32 + x21_x31
898
+ coeff3 = -x21x21_x31x32
899
+
900
+ return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3)
901
+
902
+
903
+ def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray:
904
+ arr = np.asarray(arr)
905
+ if np.issubdtype(arr.dtype, np.integer):
906
+ arr = arr.astype(float, copy=False)
907
+ return arr
908
+
909
+
910
+ def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None):
911
+ r"""
912
+ Cumulatively integrate y(x) using the composite Simpson's 1/3 rule.
913
+ The integral of the samples at every point is calculated by assuming a
914
+ quadratic relationship between each point and the two adjacent points.
915
+
916
+ Parameters
917
+ ----------
918
+ y : array_like
919
+ Values to integrate. Requires at least one point along `axis`. If two or fewer
920
+ points are provided along `axis`, Simpson's integration is not possible and the
921
+ result is calculated with `cumulative_trapezoid`.
922
+ x : array_like, optional
923
+ The coordinate to integrate along. Must have the same shape as `y` or
924
+ must be 1D with the same length as `y` along `axis`. `x` must also be
925
+ strictly increasing along `axis`.
926
+ If `x` is None (default), integration is performed using spacing `dx`
927
+ between consecutive elements in `y`.
928
+ dx : scalar or array_like, optional
929
+ Spacing between elements of `y`. Only used if `x` is None. Can either
930
+ be a float, or an array with the same shape as `y`, but of length one along
931
+ `axis`. Default is 1.0.
932
+ axis : int, optional
933
+ Specifies the axis to integrate along. Default is -1 (last axis).
934
+ initial : scalar or array_like, optional
935
+ If given, insert this value at the beginning of the returned result,
936
+ and add it to the rest of the result. Default is None, which means no
937
+ value at ``x[0]`` is returned and `res` has one element less than `y`
938
+ along the axis of integration. Can either be a float, or an array with
939
+ the same shape as `y`, but of length one along `axis`.
940
+
941
+ Returns
942
+ -------
943
+ res : ndarray
944
+ The result of cumulative integration of `y` along `axis`.
945
+ If `initial` is None, the shape is such that the axis of integration
946
+ has one less value than `y`. If `initial` is given, the shape is equal
947
+ to that of `y`.
948
+
949
+ See Also
950
+ --------
951
+ numpy.cumsum
952
+ cumulative_trapezoid : cumulative integration using the composite
953
+ trapezoidal rule
954
+ simpson : integrator for sampled data using the Composite Simpson's Rule
955
+
956
+ Notes
957
+ -----
958
+
959
+ .. versionadded:: 1.12.0
960
+
961
+ The composite Simpson's 1/3 method can be used to approximate the definite
962
+ integral of a sampled input function :math:`y(x)` [1]_. The method assumes
963
+ a quadratic relationship over the interval containing any three consecutive
964
+ sampled points.
965
+
966
+ Consider three consecutive points:
967
+ :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`.
968
+
969
+ Assuming a quadratic relationship over the three points, the integral over
970
+ the subinterval between :math:`x_1` and :math:`x_2` is given by formula
971
+ (8) of [2]_:
972
+
973
+ .. math::
974
+ \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\
975
+ \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \
976
+ \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \
977
+ \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\
978
+ - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right]
979
+
980
+ The integral between :math:`x_2` and :math:`x_3` is given by swapping
981
+ appearances of :math:`x_1` and :math:`x_3`. The integral is estimated
982
+ separately for each subinterval and then cumulatively summed to obtain
983
+ the final result.
984
+
985
+ For samples that are equally spaced, the result is exact if the function
986
+ is a polynomial of order three or less [1]_ and the number of subintervals
987
+ is even. Otherwise, the integral is exact for polynomials of order two or
988
+ less.
989
+
990
+ References
991
+ ----------
992
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule
993
+ .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
994
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
995
+ Sciences and Mathematics Education. 12 (2): 1-9
996
+
997
+ Examples
998
+ --------
999
+ >>> from scipy import integrate
1000
+ >>> import numpy as np
1001
+ >>> import matplotlib.pyplot as plt
1002
+ >>> x = np.linspace(-2, 2, num=20)
1003
+ >>> y = x**2
1004
+ >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0)
1005
+ >>> fig, ax = plt.subplots()
1006
+ >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-')
1007
+ >>> ax.grid()
1008
+ >>> plt.show()
1009
+
1010
+ The output of `cumulative_simpson` is similar to that of iteratively
1011
+ calling `simpson` with successively higher upper limits of integration, but
1012
+ not identical.
1013
+
1014
+ >>> def cumulative_simpson_reference(y, x):
1015
+ ... return np.asarray([integrate.simpson(y[:i], x=x[:i])
1016
+ ... for i in range(2, len(y) + 1)])
1017
+ >>>
1018
+ >>> rng = np.random.default_rng(354673834679465)
1019
+ >>> x, y = rng.random(size=(2, 10))
1020
+ >>> x.sort()
1021
+ >>>
1022
+ >>> res = integrate.cumulative_simpson(y, x=x)
1023
+ >>> ref = cumulative_simpson_reference(y, x)
1024
+ >>> equal = np.abs(res - ref) < 1e-15
1025
+ >>> equal # not equal when `simpson` has even number of subintervals
1026
+ array([False, True, False, True, False, True, False, True, True])
1027
+
1028
+ This is expected: because `cumulative_simpson` has access to more
1029
+ information than `simpson`, it can typically produce more accurate
1030
+ estimates of the underlying integral over subintervals.
1031
+
1032
+ """
1033
+ y = _ensure_float_array(y)
1034
+
1035
+ # validate `axis` and standardize to work along the last axis
1036
+ original_y = y
1037
+ original_shape = y.shape
1038
+ try:
1039
+ y = np.swapaxes(y, axis, -1)
1040
+ except IndexError as e:
1041
+ message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`."
1042
+ raise ValueError(message) from e
1043
+ if y.shape[-1] < 3:
1044
+ res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None)
1045
+ res = np.swapaxes(res, axis, -1)
1046
+
1047
+ elif x is not None:
1048
+ x = _ensure_float_array(x)
1049
+ message = ("If given, shape of `x` must be the same as `y` or 1-D with "
1050
+ "the same length as `y` along `axis`.")
1051
+ if not (x.shape == original_shape
1052
+ or (x.ndim == 1 and len(x) == original_shape[axis])):
1053
+ raise ValueError(message)
1054
+
1055
+ x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1)
1056
+ dx = np.diff(x, axis=-1)
1057
+ if np.any(dx <= 0):
1058
+ raise ValueError("Input x must be strictly increasing.")
1059
+ res = _cumulatively_sum_simpson_integrals(
1060
+ y, dx, _cumulative_simpson_unequal_intervals
1061
+ )
1062
+
1063
+ else:
1064
+ dx = _ensure_float_array(dx)
1065
+ final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1)
1066
+ alt_input_dx_shape = tupleset(original_shape, axis, 1)
1067
+ message = ("If provided, `dx` must either be a scalar or have the same "
1068
+ "shape as `y` but with only 1 point along `axis`.")
1069
+ if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape):
1070
+ raise ValueError(message)
1071
+ dx = np.broadcast_to(dx, final_dx_shape)
1072
+ dx = np.swapaxes(dx, axis, -1)
1073
+ res = _cumulatively_sum_simpson_integrals(
1074
+ y, dx, _cumulative_simpson_equal_intervals
1075
+ )
1076
+
1077
+ if initial is not None:
1078
+ initial = _ensure_float_array(initial)
1079
+ alt_initial_input_shape = tupleset(original_shape, axis, 1)
1080
+ message = ("If provided, `initial` must either be a scalar or have the "
1081
+ "same shape as `y` but with only 1 point along `axis`.")
1082
+ if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape):
1083
+ raise ValueError(message)
1084
+ initial = np.broadcast_to(initial, alt_initial_input_shape)
1085
+ initial = np.swapaxes(initial, axis, -1)
1086
+
1087
+ res += initial
1088
+ res = np.concatenate((initial, res), axis=-1)
1089
+
1090
+ res = np.swapaxes(res, -1, axis)
1091
+ return res
1092
+
1093
+
1094
+ def romb(y, dx=1.0, axis=-1, show=False):
1095
+ """
1096
+ Romberg integration using samples of a function.
1097
+
1098
+ Parameters
1099
+ ----------
1100
+ y : array_like
1101
+ A vector of ``2**k + 1`` equally-spaced samples of a function.
1102
+ dx : float, optional
1103
+ The sample spacing. Default is 1.
1104
+ axis : int, optional
1105
+ The axis along which to integrate. Default is -1 (last axis).
1106
+ show : bool, optional
1107
+ When `y` is a single 1-D array, then if this argument is True
1108
+ print the table showing Richardson extrapolation from the
1109
+ samples. Default is False.
1110
+
1111
+ Returns
1112
+ -------
1113
+ romb : ndarray
1114
+ The integrated result for `axis`.
1115
+
1116
+ See Also
1117
+ --------
1118
+ quad : adaptive quadrature using QUADPACK
1119
+ romberg : adaptive Romberg quadrature
1120
+ quadrature : adaptive Gaussian quadrature
1121
+ fixed_quad : fixed-order Gaussian quadrature
1122
+ dblquad : double integrals
1123
+ tplquad : triple integrals
1124
+ simpson : integrators for sampled data
1125
+ cumulative_trapezoid : cumulative integration for sampled data
1126
+ ode : ODE integrators
1127
+ odeint : ODE integrators
1128
+
1129
+ Examples
1130
+ --------
1131
+ >>> from scipy import integrate
1132
+ >>> import numpy as np
1133
+ >>> x = np.arange(10, 14.25, 0.25)
1134
+ >>> y = np.arange(3, 12)
1135
+
1136
+ >>> integrate.romb(y)
1137
+ 56.0
1138
+
1139
+ >>> y = np.sin(np.power(x, 2.5))
1140
+ >>> integrate.romb(y)
1141
+ -0.742561336672229
1142
+
1143
+ >>> integrate.romb(y, show=True)
1144
+ Richardson Extrapolation Table for Romberg Integration
1145
+ ======================================================
1146
+ -0.81576
1147
+ 4.63862 6.45674
1148
+ -1.10581 -3.02062 -3.65245
1149
+ -2.57379 -3.06311 -3.06595 -3.05664
1150
+ -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
1151
+ ======================================================
1152
+ -0.742561336672229 # may vary
1153
+
1154
+ """
1155
+ y = np.asarray(y)
1156
+ nd = len(y.shape)
1157
+ Nsamps = y.shape[axis]
1158
+ Ninterv = Nsamps-1
1159
+ n = 1
1160
+ k = 0
1161
+ while n < Ninterv:
1162
+ n <<= 1
1163
+ k += 1
1164
+ if n != Ninterv:
1165
+ raise ValueError("Number of samples must be one plus a "
1166
+ "non-negative power of 2.")
1167
+
1168
+ R = {}
1169
+ slice_all = (slice(None),) * nd
1170
+ slice0 = tupleset(slice_all, axis, 0)
1171
+ slicem1 = tupleset(slice_all, axis, -1)
1172
+ h = Ninterv * np.asarray(dx, dtype=float)
1173
+ R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
1174
+ slice_R = slice_all
1175
+ start = stop = step = Ninterv
1176
+ for i in range(1, k+1):
1177
+ start >>= 1
1178
+ slice_R = tupleset(slice_R, axis, slice(start, stop, step))
1179
+ step >>= 1
1180
+ R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
1181
+ for j in range(1, i+1):
1182
+ prev = R[(i, j-1)]
1183
+ R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
1184
+ h /= 2.0
1185
+
1186
+ if show:
1187
+ if not np.isscalar(R[(0, 0)]):
1188
+ print("*** Printing table only supported for integrals" +
1189
+ " of a single data set.")
1190
+ else:
1191
+ try:
1192
+ precis = show[0]
1193
+ except (TypeError, IndexError):
1194
+ precis = 5
1195
+ try:
1196
+ width = show[1]
1197
+ except (TypeError, IndexError):
1198
+ width = 8
1199
+ formstr = "%%%d.%df" % (width, precis)
1200
+
1201
+ title = "Richardson Extrapolation Table for Romberg Integration"
1202
+ print(title, "=" * len(title), sep="\n", end="\n")
1203
+ for i in range(k+1):
1204
+ for j in range(i+1):
1205
+ print(formstr % R[(i, j)], end=" ")
1206
+ print()
1207
+ print("=" * len(title))
1208
+
1209
+ return R[(k, k)]
1210
+
1211
+ # Romberg quadratures for numeric integration.
1212
+ #
1213
+ # Written by Scott M. Ransom <[email protected]>
1214
+ # last revision: 14 Nov 98
1215
+ #
1216
+ # Cosmetic changes by Konrad Hinsen <[email protected]>
1217
+ # last revision: 1999-7-21
1218
+ #
1219
+ # Adapted to SciPy by Travis Oliphant <[email protected]>
1220
+ # last revision: Dec 2001
1221
+
1222
+
1223
+ def _difftrap(function, interval, numtraps):
1224
+ """
1225
+ Perform part of the trapezoidal rule to integrate a function.
1226
+ Assume that we had called difftrap with all lower powers-of-2
1227
+ starting with 1. Calling difftrap only returns the summation
1228
+ of the new ordinates. It does _not_ multiply by the width
1229
+ of the trapezoids. This must be performed by the caller.
1230
+ 'function' is the function to evaluate (must accept vector arguments).
1231
+ 'interval' is a sequence with lower and upper limits
1232
+ of integration.
1233
+ 'numtraps' is the number of trapezoids to use (must be a
1234
+ power-of-2).
1235
+ """
1236
+ if numtraps <= 0:
1237
+ raise ValueError("numtraps must be > 0 in difftrap().")
1238
+ elif numtraps == 1:
1239
+ return 0.5*(function(interval[0])+function(interval[1]))
1240
+ else:
1241
+ numtosum = numtraps/2
1242
+ h = float(interval[1]-interval[0])/numtosum
1243
+ lox = interval[0] + 0.5 * h
1244
+ points = lox + h * np.arange(numtosum)
1245
+ s = np.sum(function(points), axis=0)
1246
+ return s
1247
+
1248
+
1249
+ def _romberg_diff(b, c, k):
1250
+ """
1251
+ Compute the differences for the Romberg quadrature corrections.
1252
+ See Forman Acton's "Real Computing Made Real," p 143.
1253
+ """
1254
+ tmp = 4.0**k
1255
+ return (tmp * c - b)/(tmp - 1.0)
1256
+
1257
+
1258
+ def _printresmat(function, interval, resmat):
1259
+ # Print the Romberg result matrix.
1260
+ i = j = 0
1261
+ print('Romberg integration of', repr(function), end=' ')
1262
+ print('from', interval)
1263
+ print('')
1264
+ print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
1265
+ for i in range(len(resmat)):
1266
+ print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
1267
+ for j in range(i+1):
1268
+ print('%9f' % (resmat[i][j]), end=' ')
1269
+ print('')
1270
+ print('')
1271
+ print('The final result is', resmat[i][j], end=' ')
1272
+ print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
1273
+
1274
+
1275
+ @_deprecated("`scipy.integrate.romberg` is deprecated as of SciPy 1.12.0"
1276
+ "and will be removed in SciPy 1.15.0. Please use"
1277
+ "`scipy.integrate.quad` instead.")
1278
+ def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
1279
+ divmax=10, vec_func=False):
1280
+ """
1281
+ Romberg integration of a callable function or method.
1282
+
1283
+ .. deprecated:: 1.12.0
1284
+
1285
+ This function is deprecated as of SciPy 1.12.0 and will be removed
1286
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
1287
+
1288
+ Returns the integral of `function` (a function of one variable)
1289
+ over the interval (`a`, `b`).
1290
+
1291
+ If `show` is 1, the triangular array of the intermediate results
1292
+ will be printed. If `vec_func` is True (default is False), then
1293
+ `function` is assumed to support vector arguments.
1294
+
1295
+ Parameters
1296
+ ----------
1297
+ function : callable
1298
+ Function to be integrated.
1299
+ a : float
1300
+ Lower limit of integration.
1301
+ b : float
1302
+ Upper limit of integration.
1303
+
1304
+ Returns
1305
+ -------
1306
+ results : float
1307
+ Result of the integration.
1308
+
1309
+ Other Parameters
1310
+ ----------------
1311
+ args : tuple, optional
1312
+ Extra arguments to pass to function. Each element of `args` will
1313
+ be passed as a single argument to `func`. Default is to pass no
1314
+ extra arguments.
1315
+ tol, rtol : float, optional
1316
+ The desired absolute and relative tolerances. Defaults are 1.48e-8.
1317
+ show : bool, optional
1318
+ Whether to print the results. Default is False.
1319
+ divmax : int, optional
1320
+ Maximum order of extrapolation. Default is 10.
1321
+ vec_func : bool, optional
1322
+ Whether `func` handles arrays as arguments (i.e., whether it is a
1323
+ "vector" function). Default is False.
1324
+
1325
+ See Also
1326
+ --------
1327
+ fixed_quad : Fixed-order Gaussian quadrature.
1328
+ quad : Adaptive quadrature using QUADPACK.
1329
+ dblquad : Double integrals.
1330
+ tplquad : Triple integrals.
1331
+ romb : Integrators for sampled data.
1332
+ simpson : Integrators for sampled data.
1333
+ cumulative_trapezoid : Cumulative integration for sampled data.
1334
+ ode : ODE integrator.
1335
+ odeint : ODE integrator.
1336
+
1337
+ References
1338
+ ----------
1339
+ .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
1340
+
1341
+ Examples
1342
+ --------
1343
+ Integrate a gaussian from 0 to 1 and compare to the error function.
1344
+
1345
+ >>> from scipy import integrate
1346
+ >>> from scipy.special import erf
1347
+ >>> import numpy as np
1348
+ >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
1349
+ >>> result = integrate.romberg(gaussian, 0, 1, show=True)
1350
+ Romberg integration of <function vfunc at ...> from [0, 1]
1351
+
1352
+ ::
1353
+
1354
+ Steps StepSize Results
1355
+ 1 1.000000 0.385872
1356
+ 2 0.500000 0.412631 0.421551
1357
+ 4 0.250000 0.419184 0.421368 0.421356
1358
+ 8 0.125000 0.420810 0.421352 0.421350 0.421350
1359
+ 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
1360
+ 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
1361
+
1362
+ The final result is 0.421350396475 after 33 function evaluations.
1363
+
1364
+ >>> print("%g %g" % (2*result, erf(1)))
1365
+ 0.842701 0.842701
1366
+
1367
+ """
1368
+ if np.isinf(a) or np.isinf(b):
1369
+ raise ValueError("Romberg integration only available "
1370
+ "for finite limits.")
1371
+ vfunc = vectorize1(function, args, vec_func=vec_func)
1372
+ n = 1
1373
+ interval = [a, b]
1374
+ intrange = b - a
1375
+ ordsum = _difftrap(vfunc, interval, n)
1376
+ result = intrange * ordsum
1377
+ resmat = [[result]]
1378
+ err = np.inf
1379
+ last_row = resmat[0]
1380
+ for i in range(1, divmax+1):
1381
+ n *= 2
1382
+ ordsum += _difftrap(vfunc, interval, n)
1383
+ row = [intrange * ordsum / n]
1384
+ for k in range(i):
1385
+ row.append(_romberg_diff(last_row[k], row[k], k+1))
1386
+ result = row[i]
1387
+ lastresult = last_row[i-1]
1388
+ if show:
1389
+ resmat.append(row)
1390
+ err = abs(result - lastresult)
1391
+ if err < tol or err < rtol * abs(result):
1392
+ break
1393
+ last_row = row
1394
+ else:
1395
+ warnings.warn(
1396
+ "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
1397
+ AccuracyWarning, stacklevel=2)
1398
+
1399
+ if show:
1400
+ _printresmat(vfunc, interval, resmat)
1401
+ return result
1402
+
1403
+
1404
+ # Coefficients for Newton-Cotes quadrature
1405
+ #
1406
+ # These are the points being used
1407
+ # to construct the local interpolating polynomial
1408
+ # a are the weights for Newton-Cotes integration
1409
+ # B is the error coefficient.
1410
+ # error in these coefficients grows as N gets larger.
1411
+ # or as samples are closer and closer together
1412
+
1413
+ # You can use maxima to find these rational coefficients
1414
+ # for equally spaced data using the commands
1415
+ # a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N)
1416
+ # / ((N-i)! * i!) * (-1)^(N-i));
1417
+ # Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
1418
+ # Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
1419
+ # B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
1420
+ #
1421
+ # pre-computed for equally-spaced weights
1422
+ #
1423
+ # num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
1424
+ #
1425
+ # a = num_a*array(int_a)/den_a
1426
+ # B = num_B*1.0 / den_B
1427
+ #
1428
+ # integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
1429
+ # where k = N // 2
1430
+ #
1431
+ _builtincoeffs = {
1432
+ 1: (1,2,[1,1],-1,12),
1433
+ 2: (1,3,[1,4,1],-1,90),
1434
+ 3: (3,8,[1,3,3,1],-3,80),
1435
+ 4: (2,45,[7,32,12,32,7],-8,945),
1436
+ 5: (5,288,[19,75,50,50,75,19],-275,12096),
1437
+ 6: (1,140,[41,216,27,272,27,216,41],-9,1400),
1438
+ 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
1439
+ 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
1440
+ -2368,467775),
1441
+ 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
1442
+ 15741,2857], -4671, 394240),
1443
+ 10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
1444
+ -260550,272400,-48525,106300,16067],
1445
+ -673175, 163459296),
1446
+ 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
1447
+ 15493566,15493566,-9595542,25226685,-3237113,
1448
+ 13486539,2171465], -2224234463, 237758976000),
1449
+ 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
1450
+ 87516288,-87797136,87516288,-51491295,35725120,
1451
+ -7587864,9903168,1364651], -3012, 875875),
1452
+ 13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
1453
+ 156074417954,-151659573325,206683437987,
1454
+ -43111992612,-43111992612,206683437987,
1455
+ -151659573325,156074417954,-31268252574,
1456
+ 56280729661,8181904909], -2639651053,
1457
+ 344881152000),
1458
+ 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
1459
+ -6625093363,12630121616,-16802270373,19534438464,
1460
+ -16802270373,12630121616,-6625093363,3501442784,
1461
+ -770720657,710986864,90241897], -3740727473,
1462
+ 1275983280000)
1463
+ }
1464
+
1465
+
1466
+ def newton_cotes(rn, equal=0):
1467
+ r"""
1468
+ Return weights and error coefficient for Newton-Cotes integration.
1469
+
1470
+ Suppose we have (N+1) samples of f at the positions
1471
+ x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
1472
+ integral between x_0 and x_N is:
1473
+
1474
+ :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
1475
+ + B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
1476
+
1477
+ where :math:`\xi \in [x_0,x_N]`
1478
+ and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
1479
+
1480
+ If the samples are equally-spaced and N is even, then the error
1481
+ term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
1482
+
1483
+ Parameters
1484
+ ----------
1485
+ rn : int
1486
+ The integer order for equally-spaced data or the relative positions of
1487
+ the samples with the first sample at 0 and the last at N, where N+1 is
1488
+ the length of `rn`. N is the order of the Newton-Cotes integration.
1489
+ equal : int, optional
1490
+ Set to 1 to enforce equally spaced data.
1491
+
1492
+ Returns
1493
+ -------
1494
+ an : ndarray
1495
+ 1-D array of weights to apply to the function at the provided sample
1496
+ positions.
1497
+ B : float
1498
+ Error coefficient.
1499
+
1500
+ Notes
1501
+ -----
1502
+ Normally, the Newton-Cotes rules are used on smaller integration
1503
+ regions and a composite rule is used to return the total integral.
1504
+
1505
+ Examples
1506
+ --------
1507
+ Compute the integral of sin(x) in [0, :math:`\pi`]:
1508
+
1509
+ >>> from scipy.integrate import newton_cotes
1510
+ >>> import numpy as np
1511
+ >>> def f(x):
1512
+ ... return np.sin(x)
1513
+ >>> a = 0
1514
+ >>> b = np.pi
1515
+ >>> exact = 2
1516
+ >>> for N in [2, 4, 6, 8, 10]:
1517
+ ... x = np.linspace(a, b, N + 1)
1518
+ ... an, B = newton_cotes(N, 1)
1519
+ ... dx = (b - a) / N
1520
+ ... quad = dx * np.sum(an * f(x))
1521
+ ... error = abs(quad - exact)
1522
+ ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
1523
+ ...
1524
+ 2 2.094395102 9.43951e-02
1525
+ 4 1.998570732 1.42927e-03
1526
+ 6 2.000017814 1.78136e-05
1527
+ 8 1.999999835 1.64725e-07
1528
+ 10 2.000000001 1.14677e-09
1529
+
1530
+ """
1531
+ try:
1532
+ N = len(rn)-1
1533
+ if equal:
1534
+ rn = np.arange(N+1)
1535
+ elif np.all(np.diff(rn) == 1):
1536
+ equal = 1
1537
+ except Exception:
1538
+ N = rn
1539
+ rn = np.arange(N+1)
1540
+ equal = 1
1541
+
1542
+ if equal and N in _builtincoeffs:
1543
+ na, da, vi, nb, db = _builtincoeffs[N]
1544
+ an = na * np.array(vi, dtype=float) / da
1545
+ return an, float(nb)/db
1546
+
1547
+ if (rn[0] != 0) or (rn[-1] != N):
1548
+ raise ValueError("The sample positions must start at 0"
1549
+ " and end at N")
1550
+ yi = rn / float(N)
1551
+ ti = 2 * yi - 1
1552
+ nvec = np.arange(N+1)
1553
+ C = ti ** nvec[:, np.newaxis]
1554
+ Cinv = np.linalg.inv(C)
1555
+ # improve precision of result
1556
+ for i in range(2):
1557
+ Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
1558
+ vec = 2.0 / (nvec[::2]+1)
1559
+ ai = Cinv[:, ::2].dot(vec) * (N / 2.)
1560
+
1561
+ if (N % 2 == 0) and equal:
1562
+ BN = N/(N+3.)
1563
+ power = N+2
1564
+ else:
1565
+ BN = N/(N+2.)
1566
+ power = N+1
1567
+
1568
+ BN = BN - np.dot(yi**power, ai)
1569
+ p1 = power+1
1570
+ fac = power*math.log(N) - gammaln(p1)
1571
+ fac = math.exp(fac)
1572
+ return ai, BN*fac
1573
+
1574
+
1575
+ def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log):
1576
+
1577
+ # lazy import to avoid issues with partially-initialized submodule
1578
+ if not hasattr(qmc_quad, 'qmc'):
1579
+ from scipy import stats
1580
+ qmc_quad.stats = stats
1581
+ else:
1582
+ stats = qmc_quad.stats
1583
+
1584
+ if not callable(func):
1585
+ message = "`func` must be callable."
1586
+ raise TypeError(message)
1587
+
1588
+ # a, b will be modified, so copy. Oh well if it's copied twice.
1589
+ a = np.atleast_1d(a).copy()
1590
+ b = np.atleast_1d(b).copy()
1591
+ a, b = np.broadcast_arrays(a, b)
1592
+ dim = a.shape[0]
1593
+
1594
+ try:
1595
+ func((a + b) / 2)
1596
+ except Exception as e:
1597
+ message = ("`func` must evaluate the integrand at points within "
1598
+ "the integration range; e.g. `func( (a + b) / 2)` "
1599
+ "must return the integrand at the centroid of the "
1600
+ "integration volume.")
1601
+ raise ValueError(message) from e
1602
+
1603
+ try:
1604
+ func(np.array([a, b]).T)
1605
+ vfunc = func
1606
+ except Exception as e:
1607
+ message = ("Exception encountered when attempting vectorized call to "
1608
+ f"`func`: {e}. For better performance, `func` should "
1609
+ "accept two-dimensional array `x` with shape `(len(a), "
1610
+ "n_points)` and return an array of the integrand value at "
1611
+ "each of the `n_points.")
1612
+ warnings.warn(message, stacklevel=3)
1613
+
1614
+ def vfunc(x):
1615
+ return np.apply_along_axis(func, axis=-1, arr=x)
1616
+
1617
+ n_points_int = np.int64(n_points)
1618
+ if n_points != n_points_int:
1619
+ message = "`n_points` must be an integer."
1620
+ raise TypeError(message)
1621
+
1622
+ n_estimates_int = np.int64(n_estimates)
1623
+ if n_estimates != n_estimates_int:
1624
+ message = "`n_estimates` must be an integer."
1625
+ raise TypeError(message)
1626
+
1627
+ if qrng is None:
1628
+ qrng = stats.qmc.Halton(dim)
1629
+ elif not isinstance(qrng, stats.qmc.QMCEngine):
1630
+ message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
1631
+ raise TypeError(message)
1632
+
1633
+ if qrng.d != a.shape[0]:
1634
+ message = ("`qrng` must be initialized with dimensionality equal to "
1635
+ "the number of variables in `a`, i.e., "
1636
+ "`qrng.random().shape[-1]` must equal `a.shape[0]`.")
1637
+ raise ValueError(message)
1638
+
1639
+ rng_seed = getattr(qrng, 'rng_seed', None)
1640
+ rng = stats._qmc.check_random_state(rng_seed)
1641
+
1642
+ if log not in {True, False}:
1643
+ message = "`log` must be boolean (`True` or `False`)."
1644
+ raise TypeError(message)
1645
+
1646
+ return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats)
1647
+
1648
+
1649
+ QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error'])
1650
+
1651
+
1652
+ def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None,
1653
+ log=False):
1654
+ """
1655
+ Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature.
1656
+
1657
+ Parameters
1658
+ ----------
1659
+ func : callable
1660
+ The integrand. Must accept a single argument ``x``, an array which
1661
+ specifies the point(s) at which to evaluate the scalar-valued
1662
+ integrand, and return the value(s) of the integrand.
1663
+ For efficiency, the function should be vectorized to accept an array of
1664
+ shape ``(d, n_points)``, where ``d`` is the number of variables (i.e.
1665
+ the dimensionality of the function domain) and `n_points` is the number
1666
+ of quadrature points, and return an array of shape ``(n_points,)``,
1667
+ the integrand at each quadrature point.
1668
+ a, b : array-like
1669
+ One-dimensional arrays specifying the lower and upper integration
1670
+ limits, respectively, of each of the ``d`` variables.
1671
+ n_estimates, n_points : int, optional
1672
+ `n_estimates` (default: 8) statistically independent QMC samples, each
1673
+ of `n_points` (default: 1024) points, will be generated by `qrng`.
1674
+ The total number of points at which the integrand `func` will be
1675
+ evaluated is ``n_points * n_estimates``. See Notes for details.
1676
+ qrng : `~scipy.stats.qmc.QMCEngine`, optional
1677
+ An instance of the QMCEngine from which to sample QMC points.
1678
+ The QMCEngine must be initialized to a number of dimensions ``d``
1679
+ corresponding with the number of variables ``x1, ..., xd`` passed to
1680
+ `func`.
1681
+ The provided QMCEngine is used to produce the first integral estimate.
1682
+ If `n_estimates` is greater than one, additional QMCEngines are
1683
+ spawned from the first (with scrambling enabled, if it is an option.)
1684
+ If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton`
1685
+ will be initialized with the number of dimensions determine from
1686
+ the length of `a`.
1687
+ log : boolean, default: False
1688
+ When set to True, `func` returns the log of the integrand, and
1689
+ the result object contains the log of the integral.
1690
+
1691
+ Returns
1692
+ -------
1693
+ result : object
1694
+ A result object with attributes:
1695
+
1696
+ integral : float
1697
+ The estimate of the integral.
1698
+ standard_error :
1699
+ The error estimate. See Notes for interpretation.
1700
+
1701
+ Notes
1702
+ -----
1703
+ Values of the integrand at each of the `n_points` points of a QMC sample
1704
+ are used to produce an estimate of the integral. This estimate is drawn
1705
+ from a population of possible estimates of the integral, the value of
1706
+ which we obtain depends on the particular points at which the integral
1707
+ was evaluated. We perform this process `n_estimates` times, each time
1708
+ evaluating the integrand at different scrambled QMC points, effectively
1709
+ drawing i.i.d. random samples from the population of integral estimates.
1710
+ The sample mean :math:`m` of these integral estimates is an
1711
+ unbiased estimator of the true value of the integral, and the standard
1712
+ error of the mean :math:`s` of these estimates may be used to generate
1713
+ confidence intervals using the t distribution with ``n_estimates - 1``
1714
+ degrees of freedom. Perhaps counter-intuitively, increasing `n_points`
1715
+ while keeping the total number of function evaluation points
1716
+ ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas
1717
+ increasing `n_estimates` tends to decrease the error estimate.
1718
+
1719
+ Examples
1720
+ --------
1721
+ QMC quadrature is particularly useful for computing integrals in higher
1722
+ dimensions. An example integrand is the probability density function
1723
+ of a multivariate normal distribution.
1724
+
1725
+ >>> import numpy as np
1726
+ >>> from scipy import stats
1727
+ >>> dim = 8
1728
+ >>> mean = np.zeros(dim)
1729
+ >>> cov = np.eye(dim)
1730
+ >>> def func(x):
1731
+ ... # `multivariate_normal` expects the _last_ axis to correspond with
1732
+ ... # the dimensionality of the space, so `x` must be transposed
1733
+ ... return stats.multivariate_normal.pdf(x.T, mean, cov)
1734
+
1735
+ To compute the integral over the unit hypercube:
1736
+
1737
+ >>> from scipy.integrate import qmc_quad
1738
+ >>> a = np.zeros(dim)
1739
+ >>> b = np.ones(dim)
1740
+ >>> rng = np.random.default_rng()
1741
+ >>> qrng = stats.qmc.Halton(d=dim, seed=rng)
1742
+ >>> n_estimates = 8
1743
+ >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng)
1744
+ >>> res.integral, res.standard_error
1745
+ (0.00018429555666024108, 1.0389431116001344e-07)
1746
+
1747
+ A two-sided, 99% confidence interval for the integral may be estimated
1748
+ as:
1749
+
1750
+ >>> t = stats.t(df=n_estimates-1, loc=res.integral,
1751
+ ... scale=res.standard_error)
1752
+ >>> t.interval(0.99)
1753
+ (0.0001839319802536469, 0.00018465913306683527)
1754
+
1755
+ Indeed, the value reported by `scipy.stats.multivariate_normal` is
1756
+ within this range.
1757
+
1758
+ >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
1759
+ 0.00018430867675187443
1760
+
1761
+ """
1762
+ args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log)
1763
+ func, a, b, n_points, n_estimates, qrng, rng, log, stats = args
1764
+
1765
+ def sum_product(integrands, dA, log=False):
1766
+ if log:
1767
+ return logsumexp(integrands) + np.log(dA)
1768
+ else:
1769
+ return np.sum(integrands * dA)
1770
+
1771
+ def mean(estimates, log=False):
1772
+ if log:
1773
+ return logsumexp(estimates) - np.log(n_estimates)
1774
+ else:
1775
+ return np.mean(estimates)
1776
+
1777
+ def std(estimates, m=None, ddof=0, log=False):
1778
+ m = m or mean(estimates, log)
1779
+ if log:
1780
+ estimates, m = np.broadcast_arrays(estimates, m)
1781
+ temp = np.vstack((estimates, m + np.pi * 1j))
1782
+ diff = logsumexp(temp, axis=0)
1783
+ return np.real(0.5 * (logsumexp(2 * diff)
1784
+ - np.log(n_estimates - ddof)))
1785
+ else:
1786
+ return np.std(estimates, ddof=ddof)
1787
+
1788
+ def sem(estimates, m=None, s=None, log=False):
1789
+ m = m or mean(estimates, log)
1790
+ s = s or std(estimates, m, ddof=1, log=log)
1791
+ if log:
1792
+ return s - 0.5*np.log(n_estimates)
1793
+ else:
1794
+ return s / np.sqrt(n_estimates)
1795
+
1796
+ # The sign of the integral depends on the order of the limits. Fix this by
1797
+ # ensuring that lower bounds are indeed lower and setting sign of resulting
1798
+ # integral manually
1799
+ if np.any(a == b):
1800
+ message = ("A lower limit was equal to an upper limit, so the value "
1801
+ "of the integral is zero by definition.")
1802
+ warnings.warn(message, stacklevel=2)
1803
+ return QMCQuadResult(-np.inf if log else 0, 0)
1804
+
1805
+ i_swap = b < a
1806
+ sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
1807
+ a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
1808
+
1809
+ A = np.prod(b - a)
1810
+ dA = A / n_points
1811
+
1812
+ estimates = np.zeros(n_estimates)
1813
+ rngs = _rng_spawn(qrng.rng, n_estimates)
1814
+ for i in range(n_estimates):
1815
+ # Generate integral estimate
1816
+ sample = qrng.random(n_points)
1817
+ # The rationale for transposing is that this allows users to easily
1818
+ # unpack `x` into separate variables, if desired. This is consistent
1819
+ # with the `xx` array passed into the `scipy.integrate.nquad` `func`.
1820
+ x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points)
1821
+ integrands = func(x)
1822
+ estimates[i] = sum_product(integrands, dA, log)
1823
+
1824
+ # Get a new, independently-scrambled QRNG for next time
1825
+ qrng = type(qrng)(seed=rngs[i], **qrng._init_quad)
1826
+
1827
+ integral = mean(estimates, log)
1828
+ standard_error = sem(estimates, m=integral, log=log)
1829
+ integral = integral + np.pi*1j if (log and sign < 0) else integral*sign
1830
+ return QMCQuadResult(integral, standard_error)
venv/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ from scipy import special
4
+ import scipy._lib._elementwise_iterative_method as eim
5
+ from scipy._lib._util import _RichResult
6
+
7
+ # todo:
8
+ # figure out warning situation
9
+ # address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521
10
+ # without `minweight`, we are also suppressing infinities within the interval.
11
+ # Is that OK? If so, we can probably get rid of `status=3`.
12
+ # Add heuristic to stop when improvement is too slow / antithrashing
13
+ # support singularities? interval subdivision? this feature will be added
14
+ # eventually, but do we adjust the interface now?
15
+ # When doing log-integration, should the tolerances control the error of the
16
+ # log-integral or the error of the integral? The trouble is that `log`
17
+ # inherently looses some precision so it may not be possible to refine
18
+ # the integral further. Example: 7th moment of stats.f(15, 20)
19
+ # respect function evaluation limit?
20
+ # make public?
21
+
22
+
23
+ def _tanhsinh(f, a, b, *, args=(), log=False, maxfun=None, maxlevel=None,
24
+ minlevel=2, atol=None, rtol=None, preserve_shape=False,
25
+ callback=None):
26
+ """Evaluate a convergent integral numerically using tanh-sinh quadrature.
27
+
28
+ In practice, tanh-sinh quadrature achieves quadratic convergence for
29
+ many integrands: the number of accurate *digits* scales roughly linearly
30
+ with the number of function evaluations [1]_.
31
+
32
+ Either or both of the limits of integration may be infinite, and
33
+ singularities at the endpoints are acceptable. Divergent integrals and
34
+ integrands with non-finite derivatives or singularities within an interval
35
+ are out of scope, but the latter may be evaluated be calling `_tanhsinh` on
36
+ each sub-interval separately.
37
+
38
+ Parameters
39
+ ----------
40
+ f : callable
41
+ The function to be integrated. The signature must be::
42
+ func(x: ndarray, *fargs) -> ndarray
43
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
44
+ which may contain an arbitrary number of arrays that are broadcastable
45
+ with `x`. ``func`` must be an elementwise-scalar function; see
46
+ documentation of parameter `preserve_shape` for details.
47
+ If ``func`` returns a value with complex dtype when evaluated at
48
+ either endpoint, subsequent arguments ``x`` will have complex dtype
49
+ (but zero imaginary part).
50
+ a, b : array_like
51
+ Real lower and upper limits of integration. Must be broadcastable.
52
+ Elements may be infinite.
53
+ args : tuple, optional
54
+ Additional positional arguments to be passed to `func`. Must be arrays
55
+ broadcastable with `a` and `b`. If the callable to be integrated
56
+ requires arguments that are not broadcastable with `a` and `b`, wrap
57
+ that callable with `f`. See Examples.
58
+ log : bool, default: False
59
+ Setting to True indicates that `f` returns the log of the integrand
60
+ and that `atol` and `rtol` are expressed as the logs of the absolute
61
+ and relative errors. In this case, the result object will contain the
62
+ log of the integral and error. This is useful for integrands for which
63
+ numerical underflow or overflow would lead to inaccuracies.
64
+ When ``log=True``, the integrand (the exponential of `f`) must be real,
65
+ but it may be negative, in which case the log of the integrand is a
66
+ complex number with an imaginary part that is an odd multiple of π.
67
+ maxlevel : int, default: 10
68
+ The maximum refinement level of the algorithm.
69
+
70
+ At the zeroth level, `f` is called once, performing 16 function
71
+ evaluations. At each subsequent level, `f` is called once more,
72
+ approximately doubling the number of function evaluations that have
73
+ been performed. Accordingly, for many integrands, each successive level
74
+ will double the number of accurate digits in the result (up to the
75
+ limits of floating point precision).
76
+
77
+ The algorithm will terminate after completing level `maxlevel` or after
78
+ another termination condition is satisfied, whichever comes first.
79
+ minlevel : int, default: 2
80
+ The level at which to begin iteration (default: 2). This does not
81
+ change the total number of function evaluations or the abscissae at
82
+ which the function is evaluated; it changes only the *number of times*
83
+ `f` is called. If ``minlevel=k``, then the integrand is evaluated at
84
+ all abscissae from levels ``0`` through ``k`` in a single call.
85
+ Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is
86
+ ignored, and `minlevel` is set equal to `maxlevel`.
87
+ atol, rtol : float, optional
88
+ Absolute termination tolerance (default: 0) and relative termination
89
+ tolerance (default: ``eps**0.75``, where ``eps`` is the precision of
90
+ the result dtype), respectively. The error estimate is as
91
+ described in [1]_ Section 5. While not theoretically rigorous or
92
+ conservative, it is said to work well in practice. Must be non-negative
93
+ and finite if `log` is False, and must be expressed as the log of a
94
+ non-negative and finite number if `log` is True.
95
+ preserve_shape : bool, default: False
96
+ In the following, "arguments of `f`" refers to the array ``x`` and
97
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
98
+ of `a`, `b`, and all elements of `args` (which is conceptually
99
+ distinct from ``fargs`` passed into `f`).
100
+
101
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
102
+ of *any* broadcastable shapes.
103
+
104
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
105
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
106
+ abscissae at which the function is being evaluated.
107
+
108
+ In either case, for each scalar element ``xi`` within `x`, the array
109
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
110
+ Consequently, the shape of the output is always the shape of the input
111
+ ``x``.
112
+
113
+ See Examples.
114
+
115
+ callback : callable, optional
116
+ An optional user-supplied function to be called before the first
117
+ iteration and after each iteration.
118
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
119
+ similar to that returned by `_differentiate` (but containing the
120
+ current iterate's values of all variables). If `callback` raises a
121
+ ``StopIteration``, the algorithm will terminate immediately and
122
+ `_tanhsinh` will return a result object.
123
+
124
+ Returns
125
+ -------
126
+ res : _RichResult
127
+ An instance of `scipy._lib._util._RichResult` with the following
128
+ attributes. (The descriptions are written as though the values will be
129
+ scalars; however, if `func` returns an array, the outputs will be
130
+ arrays of the same shape.)
131
+ success : bool
132
+ ``True`` when the algorithm terminated successfully (status ``0``).
133
+ status : int
134
+ An integer representing the exit status of the algorithm.
135
+ ``0`` : The algorithm converged to the specified tolerances.
136
+ ``-1`` : (unused)
137
+ ``-2`` : The maximum number of iterations was reached.
138
+ ``-3`` : A non-finite value was encountered.
139
+ ``-4`` : Iteration was terminated by `callback`.
140
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
141
+ integral : float
142
+ An estimate of the integral
143
+ error : float
144
+ An estimate of the error. Only available if level two or higher
145
+ has been completed; otherwise NaN.
146
+ maxlevel : int
147
+ The maximum refinement level used.
148
+ nfev : int
149
+ The number of points at which `func` was evaluated.
150
+
151
+ See Also
152
+ --------
153
+ quad, quadrature
154
+
155
+ Notes
156
+ -----
157
+ Implements the algorithm as described in [1]_ with minor adaptations for
158
+ finite-precision arithmetic, including some described by [2]_ and [3]_. The
159
+ tanh-sinh scheme was originally introduced in [4]_.
160
+
161
+ Due to floating-point error in the abscissae, the function may be evaluated
162
+ at the endpoints of the interval during iterations. The values returned by
163
+ the function at the endpoints will be ignored.
164
+
165
+ References
166
+ ----------
167
+ [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of
168
+ three high-precision quadrature schemes." Experimental Mathematics 14.3
169
+ (2005): 317-329.
170
+ [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for
171
+ single and multiple integration using floating-point arithmetic."
172
+ arXiv preprint arXiv:2007.15057 (2020).
173
+ [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature
174
+ Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas."
175
+ https://www.genivia.com/files/qthsh.pdf
176
+ [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for
177
+ numerical integration." Publications of the Research Institute for
178
+ Mathematical Sciences 9.3 (1974): 721-741.
179
+
180
+ Example
181
+ -------
182
+ Evaluate the Gaussian integral:
183
+
184
+ >>> import numpy as np
185
+ >>> from scipy.integrate._tanhsinh import _tanhsinh
186
+ >>> def f(x):
187
+ ... return np.exp(-x**2)
188
+ >>> res = _tanhsinh(f, -np.inf, np.inf)
189
+ >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159
190
+ 1.7724538509055159
191
+ >>> res.error # actual error is 0
192
+ 4.0007963937534104e-16
193
+
194
+ The value of the Gaussian function (bell curve) is nearly zero for
195
+ arguments sufficiently far from zero, so the value of the integral
196
+ over a finite interval is nearly the same.
197
+
198
+ >>> _tanhsinh(f, -20, 20).integral
199
+ 1.772453850905518
200
+
201
+ However, with unfavorable integration limits, the integration scheme
202
+ may not be able to find the important region.
203
+
204
+ >>> _tanhsinh(f, -np.inf, 1000).integral
205
+ 4.500490856620352
206
+
207
+ In such cases, or when there are singularities within the interval,
208
+ break the integral into parts with endpoints at the important points.
209
+
210
+ >>> _tanhsinh(f, -np.inf, 0).integral + _tanhsinh(f, 0, 1000).integral
211
+ 1.772453850905404
212
+
213
+ For integration involving very large or very small magnitudes, use
214
+ log-integration. (For illustrative purposes, the following example shows a
215
+ case in which both regular and log-integration work, but for more extreme
216
+ limits of integration, log-integration would avoid the underflow
217
+ experienced when evaluating the integral normally.)
218
+
219
+ >>> res = _tanhsinh(f, 20, 30, rtol=1e-10)
220
+ >>> res.integral, res.error
221
+ 4.7819613911309014e-176, 4.670364401645202e-187
222
+ >>> def log_f(x):
223
+ ... return -x**2
224
+ >>> np.exp(res.integral), np.exp(res.error)
225
+ 4.7819613911306924e-176, 4.670364401645093e-187
226
+
227
+ The limits of integration and elements of `args` may be broadcastable
228
+ arrays, and integration is performed elementwise.
229
+
230
+ >>> from scipy import stats
231
+ >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18)
232
+ >>> a, b = dist.support()
233
+ >>> x = np.linspace(a, b, 100)
234
+ >>> res = _tanhsinh(dist.pdf, a, x)
235
+ >>> ref = dist.cdf(x)
236
+ >>> np.allclose(res.integral, ref)
237
+
238
+ By default, `preserve_shape` is False, and therefore the callable
239
+ `f` may be called with arrays of any broadcastable shapes.
240
+ For example:
241
+
242
+ >>> shapes = []
243
+ >>> def f(x, c):
244
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
245
+ ... shapes.append(shape)
246
+ ... return np.sin(c*x)
247
+ >>>
248
+ >>> c = [1, 10, 30, 100]
249
+ >>> res = _tanhsinh(f, 0, 1, args=(c,), minlevel=1)
250
+ >>> shapes
251
+ [(4,), (4, 66), (3, 64), (2, 128), (1, 256)]
252
+
253
+ To understand where these shapes are coming from - and to better
254
+ understand how `_tanhsinh` computes accurate results - note that
255
+ higher values of ``c`` correspond with higher frequency sinusoids.
256
+ The higher frequency sinusoids make the integrand more complicated,
257
+ so more function evaluations are required to achieve the target
258
+ accuracy:
259
+
260
+ >>> res.nfev
261
+ array([ 67, 131, 259, 515])
262
+
263
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
264
+ integrand at a single abscissa and all four frequencies; this is used
265
+ for input validation and to determine the size and dtype of the arrays
266
+ that store results. The next shape corresponds with evaluating the
267
+ integrand at an initial grid of abscissae and all four frequencies.
268
+ Successive calls to the function double the total number of abscissae at
269
+ which the function has been evaluated. However, in later function
270
+ evaluations, the integrand is evaluated at fewer frequencies because
271
+ the corresponding integral has already converged to the required
272
+ tolerance. This saves function evaluations to improve performance, but
273
+ it requires the function to accept arguments of any shape.
274
+
275
+ "Vector-valued" integrands, such as those written for use with
276
+ `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement.
277
+ For example, consider
278
+
279
+ >>> def f(x):
280
+ ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2]
281
+
282
+ This integrand is not compatible with `_tanhsinh` as written; for instance,
283
+ the shape of the output will not be the same as the shape of ``x``. Such a
284
+ function *could* be converted to a compatible form with the introduction of
285
+ additional parameters, but this would be inconvenient. In such cases,
286
+ a simpler solution would be to use `preserve_shape`.
287
+
288
+ >>> shapes = []
289
+ >>> def f(x):
290
+ ... shapes.append(x.shape)
291
+ ... x0, x1, x2, x3 = x
292
+ ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)]
293
+ >>>
294
+ >>> a = np.zeros(4)
295
+ >>> res = _tanhsinh(f, a, 1, preserve_shape=True)
296
+ >>> shapes
297
+ [(4,), (4, 66), (4, 64), (4, 128), (4, 256)]
298
+
299
+ Here, the broadcasted shape of `a` and `b` is ``(4,)``. With
300
+ ``preserve_shape=True``, the function may be called with argument
301
+ ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe.
302
+
303
+ """
304
+ (f, a, b, log, maxfun, maxlevel, minlevel,
305
+ atol, rtol, args, preserve_shape, callback) = _tanhsinh_iv(
306
+ f, a, b, log, maxfun, maxlevel, minlevel, atol,
307
+ rtol, args, preserve_shape, callback)
308
+
309
+ # Initialization
310
+ # `eim._initialize` does several important jobs, including
311
+ # ensuring that limits, each of the `args`, and the output of `f`
312
+ # broadcast correctly and are of consistent types. To save a function
313
+ # evaluation, I pass the midpoint of the integration interval. This comes
314
+ # at a cost of some gymnastics to ensure that the midpoint has the right
315
+ # shape and dtype. Did you know that 0d and >0d arrays follow different
316
+ # type promotion rules?
317
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
318
+ c = ((a.ravel() + b.ravel())/2).reshape(a.shape)
319
+ inf_a, inf_b = np.isinf(a), np.isinf(b)
320
+ c[inf_a] = b[inf_a] - 1 # takes care of infinite a
321
+ c[inf_b] = a[inf_b] + 1 # takes care of infinite b
322
+ c[inf_a & inf_b] = 0 # takes care of infinite a and b
323
+ temp = eim._initialize(f, (c,), args, complex_ok=True,
324
+ preserve_shape=preserve_shape)
325
+ f, xs, fs, args, shape, dtype = temp
326
+ a = np.broadcast_to(a, shape).astype(dtype).ravel()
327
+ b = np.broadcast_to(b, shape).astype(dtype).ravel()
328
+
329
+ # Transform improper integrals
330
+ a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b)
331
+
332
+ # Define variables we'll need
333
+ nit, nfev = 0, 1 # one function evaluation performed above
334
+ zero = -np.inf if log else 0
335
+ pi = dtype.type(np.pi)
336
+ maxiter = maxlevel - minlevel + 1
337
+ eps = np.finfo(dtype).eps
338
+ if rtol is None:
339
+ rtol = 0.75*np.log(eps) if log else eps**0.75
340
+
341
+ Sn = np.full(shape, zero, dtype=dtype).ravel() # latest integral estimate
342
+ Sn[np.isnan(a) | np.isnan(b) | np.isnan(fs[0])] = np.nan
343
+ Sk = np.empty_like(Sn).reshape(-1, 1)[:, 0:0] # all integral estimates
344
+ aerr = np.full(shape, np.nan, dtype=dtype).ravel() # absolute error
345
+ status = np.full(shape, eim._EINPROGRESS, dtype=int).ravel()
346
+ h0 = np.real(_get_base_step(dtype=dtype)) # base step
347
+
348
+ # For term `d4` of error estimate ([1] Section 5), we need to keep the
349
+ # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin
350
+ # sum. Here, we initialize these variables.
351
+ xr0 = np.full(shape, -np.inf, dtype=dtype).ravel()
352
+ fr0 = np.full(shape, np.nan, dtype=dtype).ravel()
353
+ wr0 = np.zeros(shape, dtype=dtype).ravel()
354
+ xl0 = np.full(shape, np.inf, dtype=dtype).ravel()
355
+ fl0 = np.full(shape, np.nan, dtype=dtype).ravel()
356
+ wl0 = np.zeros(shape, dtype=dtype).ravel()
357
+ d4 = np.zeros(shape, dtype=dtype).ravel()
358
+
359
+ work = _RichResult(
360
+ Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps,
361
+ a=a.reshape(-1, 1), b=b.reshape(-1, 1), # integration limits
362
+ n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts
363
+ xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est
364
+ ainf=ainf, binf=binf, abinf=abinf, a0=a0.reshape(-1, 1)) # transforms
365
+ # Constant scalars don't need to be put in `work` unless they need to be
366
+ # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel.
367
+
368
+ # Correspondence between terms in the `work` object and the result
369
+ res_work_pairs = [('status', 'status'), ('integral', 'Sn'),
370
+ ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')]
371
+
372
+ def pre_func_eval(work):
373
+ # Determine abscissae at which to evaluate `f`
374
+ work.h = h0 / 2**work.n
375
+ xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype,
376
+ inclusive=(work.n == minlevel))
377
+ work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b)
378
+
379
+ # Perform abscissae substitutions for infinite limits of integration
380
+ xj = work.xj.copy()
381
+ xj[work.abinf] = xj[work.abinf] / (1 - xj[work.abinf]**2)
382
+ xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf]
383
+ xj[work.ainf] *= -1
384
+ return xj
385
+
386
+ def post_func_eval(x, fj, work):
387
+ # Weight integrand as required by substitutions for infinite limits
388
+ if work.log:
389
+ fj[work.abinf] += (np.log(1 + work.xj[work.abinf] ** 2)
390
+ - 2*np.log(1 - work.xj[work.abinf] ** 2))
391
+ fj[work.binf] -= 2 * np.log(work.xj[work.binf])
392
+ else:
393
+ fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) /
394
+ (1 - work.xj[work.abinf]**2)**2)
395
+ fj[work.binf] *= work.xj[work.binf]**-2.
396
+
397
+ # Estimate integral with Euler-Maclaurin Sum
398
+ fjwj, Sn = _euler_maclaurin_sum(fj, work)
399
+ if work.Sk.shape[-1]:
400
+ Snm1 = work.Sk[:, -1]
401
+ Sn = (special.logsumexp([Snm1 - np.log(2), Sn], axis=0) if log
402
+ else Snm1 / 2 + Sn)
403
+
404
+ work.fjwj = fjwj
405
+ work.Sn = Sn
406
+
407
+ def check_termination(work):
408
+ """Terminate due to convergence or encountering non-finite values"""
409
+ stop = np.zeros(work.Sn.shape, dtype=bool)
410
+
411
+ # Terminate before first iteration if integration limits are equal
412
+ if work.nit == 0:
413
+ i = (work.a == work.b).ravel() # ravel singleton dimension
414
+ zero = -np.inf if log else 0
415
+ work.Sn[i] = zero
416
+ work.aerr[i] = zero
417
+ work.status[i] = eim._ECONVERGED
418
+ stop[i] = True
419
+ else:
420
+ # Terminate if convergence criterion is met
421
+ work.rerr, work.aerr = _estimate_error(work)
422
+ i = ((work.rerr < rtol) | (work.rerr + np.real(work.Sn) < atol) if log
423
+ else (work.rerr < rtol) | (work.rerr * abs(work.Sn) < atol))
424
+ work.status[i] = eim._ECONVERGED
425
+ stop[i] = True
426
+
427
+ # Terminate if integral estimate becomes invalid
428
+ if log:
429
+ i = (np.isposinf(np.real(work.Sn)) | np.isnan(work.Sn)) & ~stop
430
+ else:
431
+ i = ~np.isfinite(work.Sn) & ~stop
432
+ work.status[i] = eim._EVALUEERR
433
+ stop[i] = True
434
+
435
+ return stop
436
+
437
+ def post_termination_check(work):
438
+ work.n += 1
439
+ work.Sk = np.concatenate((work.Sk, work.Sn[:, np.newaxis]), axis=-1)
440
+ return
441
+
442
+ def customize_result(res, shape):
443
+ # If the integration limits were such that b < a, we reversed them
444
+ # to perform the calculation, and the final result needs to be negated.
445
+ if log and np.any(negative):
446
+ pi = res['integral'].dtype.type(np.pi)
447
+ j = np.complex64(1j) # minimum complex type
448
+ res['integral'] = res['integral'] + negative*pi*j
449
+ else:
450
+ res['integral'][negative] *= -1
451
+
452
+ # For this algorithm, it seems more appropriate to report the maximum
453
+ # level rather than the number of iterations in which it was performed.
454
+ res['maxlevel'] = minlevel + res['nit'] - 1
455
+ res['maxlevel'][res['nit'] == 0] = -1
456
+ del res['nit']
457
+ return shape
458
+
459
+ # Suppress all warnings initially, since there are many places in the code
460
+ # for which this is expected behavior.
461
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
462
+ res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval,
463
+ post_func_eval, check_termination, post_termination_check,
464
+ customize_result, res_work_pairs, preserve_shape)
465
+ return res
466
+
467
+
468
+ def _get_base_step(dtype=np.float64):
469
+ # Compute the base step length for the provided dtype. Theoretically, the
470
+ # Euler-Maclaurin sum is infinite, but it gets cut off when either the
471
+ # weights underflow or the abscissae cannot be distinguished from the
472
+ # limits of integration. The latter happens to occur first for float32 and
473
+ # float64, and it occurs when `xjc` (the abscissa complement)
474
+ # in `_compute_pair` underflows. We can solve for the argument `tmax` at
475
+ # which it will underflow using [2] Eq. 13.
476
+ fmin = 4*np.finfo(dtype).tiny # stay a little away from the limit
477
+ tmax = np.arcsinh(np.log(2/fmin - 1) / np.pi)
478
+
479
+ # Based on this, we can choose a base step size `h` for level 0.
480
+ # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is
481
+ # the level and `m` is an integer we get to choose. I choose
482
+ # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a
483
+ # power of 2 makes floating point arithmetic more predictable. It also
484
+ # results in a base step size close to `1`, which is what [1] uses (and I
485
+ # used here until I found [2] and these ideas settled).
486
+ h0 = tmax / _N_BASE_STEPS
487
+ return h0.astype(dtype)
488
+
489
+
490
+ _N_BASE_STEPS = 8
491
+
492
+
493
+ def _compute_pair(k, h0):
494
+ # Compute the abscissa-weight pairs for each level k. See [1] page 9.
495
+
496
+ # For now, we compute and store in 64-bit precision. If higher-precision
497
+ # data types become better supported, it would be good to compute these
498
+ # using the highest precision available. Or, once there is an Array API-
499
+ # compatible arbitrary precision array, we can compute at the required
500
+ # precision.
501
+
502
+ # "....each level k of abscissa-weight pairs uses h = 2 **-k"
503
+ # We adapt to floating point arithmetic using ideas of [2].
504
+ h = h0 / 2**k
505
+ max = _N_BASE_STEPS * 2**k
506
+
507
+ # For iterations after the first, "....the integrand function needs to be
508
+ # evaluated only at the odd-indexed abscissas at each level."
509
+ j = np.arange(max+1) if k == 0 else np.arange(1, max+1, 2)
510
+ jh = j * h
511
+
512
+ # "In this case... the weights wj = u1/cosh(u2)^2, where..."
513
+ pi_2 = np.pi / 2
514
+ u1 = pi_2*np.cosh(jh)
515
+ u2 = pi_2*np.sinh(jh)
516
+ # Denominators get big here. Overflow then underflow doesn't need warning.
517
+ # with np.errstate(under='ignore', over='ignore'):
518
+ wj = u1 / np.cosh(u2)**2
519
+ # "We actually store 1-xj = 1/(...)."
520
+ xjc = 1 / (np.exp(u2) * np.cosh(u2)) # complement of xj = np.tanh(u2)
521
+
522
+ # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify
523
+ # code, the function will be evaluated there twice; each gets half weight.
524
+ wj[0] = wj[0] / 2 if k == 0 else wj[0]
525
+
526
+ return xjc, wj # store at full precision
527
+
528
+
529
+ def _pair_cache(k, h0):
530
+ # Cache the abscissa-weight pairs up to a specified level.
531
+ # Abscissae and weights of consecutive levels are concatenated.
532
+ # `index` records the indices that correspond with each level:
533
+ # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae.
534
+ if h0 != _pair_cache.h0:
535
+ _pair_cache.xjc = np.empty(0)
536
+ _pair_cache.wj = np.empty(0)
537
+ _pair_cache.indices = [0]
538
+
539
+ xjcs = [_pair_cache.xjc]
540
+ wjs = [_pair_cache.wj]
541
+
542
+ for i in range(len(_pair_cache.indices)-1, k + 1):
543
+ xjc, wj = _compute_pair(i, h0)
544
+ xjcs.append(xjc)
545
+ wjs.append(wj)
546
+ _pair_cache.indices.append(_pair_cache.indices[-1] + len(xjc))
547
+
548
+ _pair_cache.xjc = np.concatenate(xjcs)
549
+ _pair_cache.wj = np.concatenate(wjs)
550
+ _pair_cache.h0 = h0
551
+
552
+ _pair_cache.xjc = np.empty(0)
553
+ _pair_cache.wj = np.empty(0)
554
+ _pair_cache.indices = [0]
555
+ _pair_cache.h0 = None
556
+
557
+
558
+ def _get_pairs(k, h0, inclusive=False, dtype=np.float64):
559
+ # Retrieve the specified abscissa-weight pairs from the cache
560
+ # If `inclusive`, return all up to and including the specified level
561
+ if len(_pair_cache.indices) <= k+2 or h0 != _pair_cache.h0:
562
+ _pair_cache(k, h0)
563
+
564
+ xjc = _pair_cache.xjc
565
+ wj = _pair_cache.wj
566
+ indices = _pair_cache.indices
567
+
568
+ start = 0 if inclusive else indices[k]
569
+ end = indices[k+1]
570
+
571
+ return xjc[start:end].astype(dtype), wj[start:end].astype(dtype)
572
+
573
+
574
+ def _transform_to_limits(xjc, wj, a, b):
575
+ # Transform integral according to user-specified limits. This is just
576
+ # math that follows from the fact that the standard limits are (-1, 1).
577
+ # Note: If we had stored xj instead of xjc, we would have
578
+ # xj = alpha * xj + beta, where beta = (a + b)/2
579
+ alpha = (b - a) / 2
580
+ xj = np.concatenate((-alpha * xjc + b, alpha * xjc + a), axis=-1)
581
+ wj = wj*alpha # arguments get broadcasted, so we can't use *=
582
+ wj = np.concatenate((wj, wj), axis=-1)
583
+
584
+ # Points at the boundaries can be generated due to finite precision
585
+ # arithmetic, but these function values aren't supposed to be included in
586
+ # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at
587
+ # these points; however, we can't easily filter out points since this
588
+ # function is vectorized. Instead, zero the weights.
589
+ invalid = (xj <= a) | (xj >= b)
590
+ wj[invalid] = 0
591
+ return xj, wj
592
+
593
+
594
+ def _euler_maclaurin_sum(fj, work):
595
+ # Perform the Euler-Maclaurin Sum, [1] Section 4
596
+
597
+ # The error estimate needs to know the magnitude of the last term
598
+ # omitted from the Euler-Maclaurin sum. This is a bit involved because
599
+ # it may have been computed at a previous level. I sure hope it's worth
600
+ # all the trouble.
601
+ xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0
602
+ xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0
603
+
604
+ # It is much more convenient to work with the transposes of our work
605
+ # variables here.
606
+ xj, fj, wj = work.xj.T, fj.T, work.wj.T
607
+ n_x, n_active = xj.shape # number of abscissae, number of active elements
608
+
609
+ # We'll work with the left and right sides separately
610
+ xr, xl = xj.reshape(2, n_x // 2, n_active).copy() # this gets modified
611
+ fr, fl = fj.reshape(2, n_x // 2, n_active)
612
+ wr, wl = wj.reshape(2, n_x // 2, n_active)
613
+
614
+ invalid_r = ~np.isfinite(fr) | (wr == 0)
615
+ invalid_l = ~np.isfinite(fl) | (wl == 0)
616
+
617
+ # integer index of the maximum abscissa at this level
618
+ xr[invalid_r] = -np.inf
619
+ ir = np.argmax(xr, axis=0, keepdims=True)
620
+ # abscissa, function value, and weight at this index
621
+ xr_max = np.take_along_axis(xr, ir, axis=0)[0]
622
+ fr_max = np.take_along_axis(fr, ir, axis=0)[0]
623
+ wr_max = np.take_along_axis(wr, ir, axis=0)[0]
624
+ # boolean indices at which maximum abscissa at this level exceeds
625
+ # the incumbent maximum abscissa (from all previous levels)
626
+ j = xr_max > xr0
627
+ # Update record of the incumbent abscissa, function value, and weight
628
+ xr0[j] = xr_max[j]
629
+ fr0[j] = fr_max[j]
630
+ wr0[j] = wr_max[j]
631
+
632
+ # integer index of the minimum abscissa at this level
633
+ xl[invalid_l] = np.inf
634
+ il = np.argmin(xl, axis=0, keepdims=True)
635
+ # abscissa, function value, and weight at this index
636
+ xl_min = np.take_along_axis(xl, il, axis=0)[0]
637
+ fl_min = np.take_along_axis(fl, il, axis=0)[0]
638
+ wl_min = np.take_along_axis(wl, il, axis=0)[0]
639
+ # boolean indices at which minimum abscissa at this level is less than
640
+ # the incumbent minimum abscissa (from all previous levels)
641
+ j = xl_min < xl0
642
+ # Update record of the incumbent abscissa, function value, and weight
643
+ xl0[j] = xl_min[j]
644
+ fl0[j] = fl_min[j]
645
+ wl0[j] = wl_min[j]
646
+ fj = fj.T
647
+
648
+ # Compute the error estimate `d4` - the magnitude of the leftmost or
649
+ # rightmost term, whichever is greater.
650
+ flwl0 = fl0 + np.log(wl0) if work.log else fl0 * wl0 # leftmost term
651
+ frwr0 = fr0 + np.log(wr0) if work.log else fr0 * wr0 # rightmost term
652
+ magnitude = np.real if work.log else np.abs
653
+ work.d4 = np.maximum(magnitude(flwl0), magnitude(frwr0))
654
+
655
+ # There are two approaches to dealing with function values that are
656
+ # numerically infinite due to approaching a singularity - zero them, or
657
+ # replace them with the function value at the nearest non-infinite point.
658
+ # [3] pg. 22 suggests the latter, so let's do that given that we have the
659
+ # information.
660
+ fr0b = np.broadcast_to(fr0[np.newaxis, :], fr.shape)
661
+ fl0b = np.broadcast_to(fl0[np.newaxis, :], fl.shape)
662
+ fr[invalid_r] = fr0b[invalid_r]
663
+ fl[invalid_l] = fl0b[invalid_l]
664
+
665
+ # When wj is zero, log emits a warning
666
+ # with np.errstate(divide='ignore'):
667
+ fjwj = fj + np.log(work.wj) if work.log else fj * work.wj
668
+
669
+ # update integral estimate
670
+ Sn = (special.logsumexp(fjwj + np.log(work.h), axis=-1) if work.log
671
+ else np.sum(fjwj, axis=-1) * work.h)
672
+
673
+ work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0
674
+ work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0
675
+
676
+ return fjwj, Sn
677
+
678
+
679
+ def _estimate_error(work):
680
+ # Estimate the error according to [1] Section 5
681
+
682
+ if work.n == 0 or work.nit == 0:
683
+ # The paper says to use "one" as the error before it can be calculated.
684
+ # NaN seems to be more appropriate.
685
+ nan = np.full_like(work.Sn, np.nan)
686
+ return nan, nan
687
+
688
+ indices = _pair_cache.indices
689
+
690
+ n_active = len(work.Sn) # number of active elements
691
+ axis_kwargs = dict(axis=-1, keepdims=True)
692
+
693
+ # With a jump start (starting at level higher than 0), we haven't
694
+ # explicitly calculated the integral estimate at lower levels. But we have
695
+ # all the function value-weight products, so we can compute the
696
+ # lower-level estimates.
697
+ if work.Sk.shape[-1] == 0:
698
+ h = 2 * work.h # step size at this level
699
+ n_x = indices[work.n] # number of abscissa up to this level
700
+ # The right and left fjwj terms from all levels are concatenated along
701
+ # the last axis. Get out only the terms up to this level.
702
+ fjwj_rl = work.fjwj.reshape(n_active, 2, -1)
703
+ fjwj = fjwj_rl[:, :, :n_x].reshape(n_active, 2*n_x)
704
+ # Compute the Euler-Maclaurin sum at this level
705
+ Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
706
+ else np.sum(fjwj, **axis_kwargs) * h)
707
+ work.Sk = np.concatenate((Snm1, work.Sk), axis=-1)
708
+
709
+ if work.n == 1:
710
+ nan = np.full_like(work.Sn, np.nan)
711
+ return nan, nan
712
+
713
+ # The paper says not to calculate the error for n<=2, but it's not clear
714
+ # about whether it starts at level 0 or level 1. We start at level 0, so
715
+ # why not compute the error beginning in level 2?
716
+ if work.Sk.shape[-1] < 2:
717
+ h = 4 * work.h # step size at this level
718
+ n_x = indices[work.n-1] # number of abscissa up to this level
719
+ # The right and left fjwj terms from all levels are concatenated along
720
+ # the last axis. Get out only the terms up to this level.
721
+ fjwj_rl = work.fjwj.reshape(len(work.Sn), 2, -1)
722
+ fjwj = fjwj_rl[..., :n_x].reshape(n_active, 2*n_x)
723
+ # Compute the Euler-Maclaurin sum at this level
724
+ Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
725
+ else np.sum(fjwj, **axis_kwargs) * h)
726
+ work.Sk = np.concatenate((Snm2, work.Sk), axis=-1)
727
+
728
+ Snm2 = work.Sk[..., -2]
729
+ Snm1 = work.Sk[..., -1]
730
+
731
+ e1 = work.eps
732
+
733
+ if work.log:
734
+ log_e1 = np.log(e1)
735
+ # Currently, only real integrals are supported in log-scale. All
736
+ # complex values have imaginary part in increments of pi*j, which just
737
+ # carries sign information of the original integral, so use of
738
+ # `np.real` here is equivalent to absolute value in real scale.
739
+ d1 = np.real(special.logsumexp([work.Sn, Snm1 + work.pi*1j], axis=0))
740
+ d2 = np.real(special.logsumexp([work.Sn, Snm2 + work.pi*1j], axis=0))
741
+ d3 = log_e1 + np.max(np.real(work.fjwj), axis=-1)
742
+ d4 = work.d4
743
+ aerr = np.max([d1 ** 2 / d2, 2 * d1, d3, d4], axis=0)
744
+ rerr = np.maximum(log_e1, aerr - np.real(work.Sn))
745
+ else:
746
+ # Note: explicit computation of log10 of each of these is unnecessary.
747
+ d1 = np.abs(work.Sn - Snm1)
748
+ d2 = np.abs(work.Sn - Snm2)
749
+ d3 = e1 * np.max(np.abs(work.fjwj), axis=-1)
750
+ d4 = work.d4
751
+ # If `d1` is 0, no need to warn. This does the right thing.
752
+ # with np.errstate(divide='ignore'):
753
+ aerr = np.max([d1**(np.log(d1)/np.log(d2)), d1**2, d3, d4], axis=0)
754
+ rerr = np.maximum(e1, aerr/np.abs(work.Sn))
755
+ return rerr, aerr.reshape(work.Sn.shape)
756
+
757
+
758
+ def _transform_integrals(a, b):
759
+ # Transform integrals to a form with finite a < b
760
+ # For b < a, we reverse the limits and will multiply the final result by -1
761
+ # For infinite limit on the right, we use the substitution x = 1/t - 1 + a
762
+ # For infinite limit on the left, we substitute x = -x and treat as above
763
+ # For infinite limits, we substitute x = t / (1-t**2)
764
+
765
+ negative = b < a
766
+ a[negative], b[negative] = b[negative], a[negative]
767
+
768
+ abinf = np.isinf(a) & np.isinf(b)
769
+ a[abinf], b[abinf] = -1, 1
770
+
771
+ ainf = np.isinf(a)
772
+ a[ainf], b[ainf] = -b[ainf], -a[ainf]
773
+
774
+ binf = np.isinf(b)
775
+ a0 = a.copy()
776
+ a[binf], b[binf] = 0, 1
777
+
778
+ return a, b, a0, negative, abinf, ainf, binf
779
+
780
+
781
+ def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel,
782
+ atol, rtol, args, preserve_shape, callback):
783
+ # Input validation and standardization
784
+
785
+ message = '`f` must be callable.'
786
+ if not callable(f):
787
+ raise ValueError(message)
788
+
789
+ message = 'All elements of `a` and `b` must be real numbers.'
790
+ a, b = np.broadcast_arrays(a, b)
791
+ if np.any(np.iscomplex(a)) or np.any(np.iscomplex(b)):
792
+ raise ValueError(message)
793
+
794
+ message = '`log` must be True or False.'
795
+ if log not in {True, False}:
796
+ raise ValueError(message)
797
+ log = bool(log)
798
+
799
+ if atol is None:
800
+ atol = -np.inf if log else 0
801
+
802
+ rtol_temp = rtol if rtol is not None else 0.
803
+
804
+ params = np.asarray([atol, rtol_temp, 0.])
805
+ message = "`atol` and `rtol` must be real numbers."
806
+ if not np.issubdtype(params.dtype, np.floating):
807
+ raise ValueError(message)
808
+
809
+ if log:
810
+ message = '`atol` and `rtol` may not be positive infinity.'
811
+ if np.any(np.isposinf(params)):
812
+ raise ValueError(message)
813
+ else:
814
+ message = '`atol` and `rtol` must be non-negative and finite.'
815
+ if np.any(params < 0) or np.any(np.isinf(params)):
816
+ raise ValueError(message)
817
+ atol = params[0]
818
+ rtol = rtol if rtol is None else params[1]
819
+
820
+ BIGINT = float(2**62)
821
+ if maxfun is None and maxlevel is None:
822
+ maxlevel = 10
823
+
824
+ maxfun = BIGINT if maxfun is None else maxfun
825
+ maxlevel = BIGINT if maxlevel is None else maxlevel
826
+
827
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.'
828
+ params = np.asarray([maxfun, maxlevel, minlevel])
829
+ if not (np.issubdtype(params.dtype, np.number)
830
+ and np.all(np.isreal(params))
831
+ and np.all(params.astype(np.int64) == params)):
832
+ raise ValueError(message)
833
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.'
834
+ if np.any(params < 0):
835
+ raise ValueError(message)
836
+ maxfun, maxlevel, minlevel = params.astype(np.int64)
837
+ minlevel = min(minlevel, maxlevel)
838
+
839
+ if not np.iterable(args):
840
+ args = (args,)
841
+
842
+ message = '`preserve_shape` must be True or False.'
843
+ if preserve_shape not in {True, False}:
844
+ raise ValueError(message)
845
+
846
+ if callback is not None and not callable(callback):
847
+ raise ValueError('`callback` must be callable.')
848
+
849
+ return (f, a, b, log, maxfun, maxlevel, minlevel,
850
+ atol, rtol, args, preserve_shape, callback)
851
+
852
+
853
+ def _logsumexp(x, axis=0):
854
+ # logsumexp raises with empty array
855
+ x = np.asarray(x)
856
+ shape = list(x.shape)
857
+ if shape[axis] == 0:
858
+ shape.pop(axis)
859
+ return np.full(shape, fill_value=-np.inf, dtype=x.dtype)
860
+ else:
861
+ return special.logsumexp(x, axis=axis)
862
+
863
+
864
+ def _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol):
865
+ # Input validation and standardization
866
+
867
+ message = '`f` must be callable.'
868
+ if not callable(f):
869
+ raise ValueError(message)
870
+
871
+ message = 'All elements of `a`, `b`, and `step` must be real numbers.'
872
+ a, b, step = np.broadcast_arrays(a, b, step)
873
+ dtype = np.result_type(a.dtype, b.dtype, step.dtype)
874
+ if not np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.complexfloating):
875
+ raise ValueError(message)
876
+
877
+ valid_a = np.isfinite(a)
878
+ valid_b = b >= a # NaNs will be False
879
+ valid_step = np.isfinite(step) & (step > 0)
880
+ valid_abstep = valid_a & valid_b & valid_step
881
+
882
+ message = '`log` must be True or False.'
883
+ if log not in {True, False}:
884
+ raise ValueError(message)
885
+
886
+ if atol is None:
887
+ atol = -np.inf if log else 0
888
+
889
+ rtol_temp = rtol if rtol is not None else 0.
890
+
891
+ params = np.asarray([atol, rtol_temp, 0.])
892
+ message = "`atol` and `rtol` must be real numbers."
893
+ if not np.issubdtype(params.dtype, np.floating):
894
+ raise ValueError(message)
895
+
896
+ if log:
897
+ message = '`atol`, `rtol` may not be positive infinity or NaN.'
898
+ if np.any(np.isposinf(params) | np.isnan(params)):
899
+ raise ValueError(message)
900
+ else:
901
+ message = '`atol`, and `rtol` must be non-negative and finite.'
902
+ if np.any((params < 0) | (~np.isfinite(params))):
903
+ raise ValueError(message)
904
+ atol = params[0]
905
+ rtol = rtol if rtol is None else params[1]
906
+
907
+ maxterms_int = int(maxterms)
908
+ if maxterms_int != maxterms or maxterms < 0:
909
+ message = "`maxterms` must be a non-negative integer."
910
+ raise ValueError(message)
911
+
912
+ if not np.iterable(args):
913
+ args = (args,)
914
+
915
+ return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol
916
+
917
+
918
+ def _nsum(f, a, b, step=1, args=(), log=False, maxterms=int(2**20), atol=None,
919
+ rtol=None):
920
+ r"""Evaluate a convergent sum.
921
+
922
+ For finite `b`, this evaluates::
923
+
924
+ f(a + np.arange(n)*step).sum()
925
+
926
+ where ``n = int((b - a) / step) + 1``. If `f` is smooth, positive, and
927
+ monotone decreasing, `b` may be infinite, in which case the infinite sum
928
+ is approximated using integration.
929
+
930
+ Parameters
931
+ ----------
932
+ f : callable
933
+ The function that evaluates terms to be summed. The signature must be::
934
+
935
+ f(x: ndarray, *args) -> ndarray
936
+
937
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
938
+ which may contain an arbitrary number of arrays that are broadcastable
939
+ with `x`. `f` must represent a smooth, positive, and monotone decreasing
940
+ function of `x`; `_nsum` performs no checks to verify that these conditions
941
+ are met and may return erroneous results if they are violated.
942
+ a, b : array_like
943
+ Real lower and upper limits of summed terms. Must be broadcastable.
944
+ Each element of `a` must be finite and less than the corresponding
945
+ element in `b`, but elements of `b` may be infinite.
946
+ step : array_like
947
+ Finite, positive, real step between summed terms. Must be broadcastable
948
+ with `a` and `b`.
949
+ args : tuple, optional
950
+ Additional positional arguments to be passed to `f`. Must be arrays
951
+ broadcastable with `a`, `b`, and `step`. If the callable to be summed
952
+ requires arguments that are not broadcastable with `a`, `b`, and `step`,
953
+ wrap that callable with `f`. See Examples.
954
+ log : bool, default: False
955
+ Setting to True indicates that `f` returns the log of the terms
956
+ and that `atol` and `rtol` are expressed as the logs of the absolute
957
+ and relative errors. In this case, the result object will contain the
958
+ log of the sum and error. This is useful for summands for which
959
+ numerical underflow or overflow would lead to inaccuracies.
960
+ maxterms : int, default: 2**32
961
+ The maximum number of terms to evaluate when summing directly.
962
+ Additional function evaluations may be performed for input
963
+ validation and integral evaluation.
964
+ atol, rtol : float, optional
965
+ Absolute termination tolerance (default: 0) and relative termination
966
+ tolerance (default: ``eps**0.5``, where ``eps`` is the precision of
967
+ the result dtype), respectively. Must be non-negative
968
+ and finite if `log` is False, and must be expressed as the log of a
969
+ non-negative and finite number if `log` is True.
970
+
971
+ Returns
972
+ -------
973
+ res : _RichResult
974
+ An instance of `scipy._lib._util._RichResult` with the following
975
+ attributes. (The descriptions are written as though the values will be
976
+ scalars; however, if `func` returns an array, the outputs will be
977
+
978
+ arrays of the same shape.)
979
+ success : bool
980
+ ``True`` when the algorithm terminated successfully (status ``0``).
981
+ status : int
982
+ An integer representing the exit status of the algorithm.
983
+ ``0`` : The algorithm converged to the specified tolerances.
984
+ ``-1`` : Element(s) of `a`, `b`, or `step` are invalid
985
+ ``-2`` : Numerical integration reached its iteration limit; the sum may be divergent.
986
+ ``-3`` : A non-finite value was encountered.
987
+ sum : float
988
+ An estimate of the sum.
989
+ error : float
990
+ An estimate of the absolute error, assuming all terms are non-negative.
991
+ nfev : int
992
+ The number of points at which `func` was evaluated.
993
+
994
+ See Also
995
+ --------
996
+ tanhsinh
997
+
998
+ Notes
999
+ -----
1000
+ The method implemented for infinite summation is related to the integral
1001
+ test for convergence of an infinite series: assuming `step` size 1 for
1002
+ simplicity of exposition, the sum of a monotone decreasing function is bounded by
1003
+
1004
+ .. math::
1005
+
1006
+ \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u)
1007
+
1008
+ Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a`
1009
+ represent `atol`, and :math:`\epsilon_r` represent `rtol`.
1010
+ The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx`
1011
+ as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such
1012
+ that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise,
1013
+ let :math:`c = a + n`. Then the infinite sum is approximated as
1014
+
1015
+ .. math::
1016
+
1017
+ \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2
1018
+
1019
+ and the reported error is :math:`f(c)/2` plus the error estimate of
1020
+ numerical integration. The approach described above is generalized for non-unit
1021
+ `step` and finite `b` that is too large for direct evaluation of the sum,
1022
+ i.e. ``b - a + 1 > maxterms``.
1023
+
1024
+ References
1025
+ ----------
1026
+ [1] Wikipedia. "Integral test for convergence."
1027
+ https://en.wikipedia.org/wiki/Integral_test_for_convergence
1028
+
1029
+ Examples
1030
+ --------
1031
+ Compute the infinite sum of the reciprocals of squared integers.
1032
+
1033
+ >>> import numpy as np
1034
+ >>> from scipy.integrate._tanhsinh import _nsum
1035
+ >>> res = _nsum(lambda k: 1/k**2, 1, np.inf, maxterms=1e3)
1036
+ >>> ref = np.pi**2/6 # true value
1037
+ >>> res.error # estimated error
1038
+ 4.990014980029223e-07
1039
+ >>> (res.sum - ref)/ref # true error
1040
+ -1.0101760641302586e-10
1041
+ >>> res.nfev # number of points at which callable was evaluated
1042
+ 1142
1043
+
1044
+ Compute the infinite sums of the reciprocals of integers raised to powers ``p``.
1045
+
1046
+ >>> from scipy import special
1047
+ >>> p = np.arange(2, 10)
1048
+ >>> res = _nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,))
1049
+ >>> ref = special.zeta(p, 1)
1050
+ >>> np.allclose(res.sum, ref)
1051
+ True
1052
+
1053
+ """ # noqa: E501
1054
+ # Potential future work:
1055
+ # - more careful testing of when `b` is slightly less than `a` plus an
1056
+ # integer multiple of step (needed before this is public)
1057
+ # - improve error estimate of `_direct` sum
1058
+ # - add other methods for convergence acceleration (Richardson, epsilon)
1059
+ # - support infinite lower limit?
1060
+ # - support negative monotone increasing functions?
1061
+ # - b < a / negative step?
1062
+ # - complex-valued function?
1063
+ # - check for violations of monotonicity?
1064
+
1065
+ # Function-specific input validation / standardization
1066
+ tmp = _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol)
1067
+ f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol = tmp
1068
+
1069
+ # Additional elementwise algorithm input validation / standardization
1070
+ tmp = eim._initialize(f, (a,), args, complex_ok=False)
1071
+ f, xs, fs, args, shape, dtype = tmp
1072
+
1073
+ # Finish preparing `a`, `b`, and `step` arrays
1074
+ a = xs[0]
1075
+ b = np.broadcast_to(b, shape).ravel().astype(dtype)
1076
+ step = np.broadcast_to(step, shape).ravel().astype(dtype)
1077
+ valid_abstep = np.broadcast_to(valid_abstep, shape).ravel()
1078
+ nterms = np.floor((b - a) / step)
1079
+ b = a + nterms*step
1080
+
1081
+ # Define constants
1082
+ eps = np.finfo(dtype).eps
1083
+ zero = np.asarray(-np.inf if log else 0, dtype=dtype)[()]
1084
+ if rtol is None:
1085
+ rtol = 0.5*np.log(eps) if log else eps**0.5
1086
+ constants = (dtype, log, eps, zero, rtol, atol, maxterms)
1087
+
1088
+ # Prepare result arrays
1089
+ S = np.empty_like(a)
1090
+ E = np.empty_like(a)
1091
+ status = np.zeros(len(a), dtype=int)
1092
+ nfev = np.ones(len(a), dtype=int) # one function evaluation above
1093
+
1094
+ # Branch for direct sum evaluation / integral approximation / invalid input
1095
+ i1 = (nterms + 1 <= maxterms) & valid_abstep
1096
+ i2 = (nterms + 1 > maxterms) & valid_abstep
1097
+ i3 = ~valid_abstep
1098
+
1099
+ if np.any(i1):
1100
+ args_direct = [arg[i1] for arg in args]
1101
+ tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants)
1102
+ S[i1], E[i1] = tmp[:-1]
1103
+ nfev[i1] += tmp[-1]
1104
+ status[i1] = -3 * (~np.isfinite(S[i1]))
1105
+
1106
+ if np.any(i2):
1107
+ args_indirect = [arg[i2] for arg in args]
1108
+ tmp = _integral_bound(f, a[i2], b[i2], step[i2], args_indirect, constants)
1109
+ S[i2], E[i2], status[i2] = tmp[:-1]
1110
+ nfev[i2] += tmp[-1]
1111
+
1112
+ if np.any(i3):
1113
+ S[i3], E[i3] = np.nan, np.nan
1114
+ status[i3] = -1
1115
+
1116
+ # Return results
1117
+ S, E = S.reshape(shape)[()], E.reshape(shape)[()]
1118
+ status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()]
1119
+ return _RichResult(sum=S, error=E, status=status, success=status == 0,
1120
+ nfev=nfev)
1121
+
1122
+
1123
+ def _direct(f, a, b, step, args, constants, inclusive=True):
1124
+ # Directly evaluate the sum.
1125
+
1126
+ # When used in the context of distributions, `args` would contain the
1127
+ # distribution parameters. We have broadcasted for simplicity, but we could
1128
+ # reduce function evaluations when distribution parameters are the same but
1129
+ # sum limits differ. Roughly:
1130
+ # - compute the function at all points between min(a) and max(b),
1131
+ # - compute the cumulative sum,
1132
+ # - take the difference between elements of the cumulative sum
1133
+ # corresponding with b and a.
1134
+ # This is left to future enhancement
1135
+
1136
+ dtype, log, eps, zero, _, _, _ = constants
1137
+
1138
+ # To allow computation in a single vectorized call, find the maximum number
1139
+ # of points (over all slices) at which the function needs to be evaluated.
1140
+ # Note: if `inclusive` is `True`, then we want `1` more term in the sum.
1141
+ # I didn't think it was great style to use `True` as `1` in Python, so I
1142
+ # explicitly converted it to an `int` before using it.
1143
+ inclusive_adjustment = int(inclusive)
1144
+ steps = np.round((b - a) / step) + inclusive_adjustment
1145
+ # Equivalently, steps = np.round((b - a) / step) + inclusive
1146
+ max_steps = int(np.max(steps))
1147
+
1148
+ # In each slice, the function will be evaluated at the same number of points,
1149
+ # but excessive points (those beyond the right sum limit `b`) are replaced
1150
+ # with NaN to (potentially) reduce the time of these unnecessary calculations.
1151
+ # Use a new last axis for these calculations for consistency with other
1152
+ # elementwise algorithms.
1153
+ a2, b2, step2 = a[:, np.newaxis], b[:, np.newaxis], step[:, np.newaxis]
1154
+ args2 = [arg[:, np.newaxis] for arg in args]
1155
+ ks = a2 + np.arange(max_steps, dtype=dtype) * step2
1156
+ i_nan = ks >= (b2 + inclusive_adjustment*step2/2)
1157
+ ks[i_nan] = np.nan
1158
+ fs = f(ks, *args2)
1159
+
1160
+ # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum.
1161
+ # In some cases it may be faster to loop over slices than to vectorize
1162
+ # like this. This is an optimization that can be added later.
1163
+ fs[i_nan] = zero
1164
+ nfev = max_steps - i_nan.sum(axis=-1)
1165
+ S = _logsumexp(fs, axis=-1) if log else np.sum(fs, axis=-1)
1166
+ # Rough, non-conservative error estimate. See gh-19667 for improvement ideas.
1167
+ E = np.real(S) + np.log(eps) if log else eps * abs(S)
1168
+ return S, E, nfev
1169
+
1170
+
1171
+ def _integral_bound(f, a, b, step, args, constants):
1172
+ # Estimate the sum with integral approximation
1173
+ dtype, log, _, _, rtol, atol, maxterms = constants
1174
+ log2 = np.log(2, dtype=dtype)
1175
+
1176
+ # Get a lower bound on the sum and compute effective absolute tolerance
1177
+ lb = _tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log)
1178
+ tol = np.broadcast_to(atol, lb.integral.shape)
1179
+ tol = _logsumexp((tol, rtol + lb.integral)) if log else tol + rtol*lb.integral
1180
+ i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent
1181
+ tol[i_skip] = np.nan
1182
+ status = lb.status
1183
+
1184
+ # As in `_direct`, we'll need a temporary new axis for points
1185
+ # at which to evaluate the function. Append axis at the end for
1186
+ # consistency with other elementwise algorithms.
1187
+ a2 = a[..., np.newaxis]
1188
+ step2 = step[..., np.newaxis]
1189
+ args2 = [arg[..., np.newaxis] for arg in args]
1190
+
1191
+ # Find the location of a term that is less than the tolerance (if possible)
1192
+ log2maxterms = np.floor(np.log2(maxterms)) if maxterms else 0
1193
+ n_steps = np.concatenate([2**np.arange(0, log2maxterms), [maxterms]], dtype=dtype)
1194
+ nfev = len(n_steps)
1195
+ ks = a2 + n_steps * step2
1196
+ fks = f(ks, *args2)
1197
+ nt = np.minimum(np.sum(fks > tol[:, np.newaxis], axis=-1), n_steps.shape[-1]-1)
1198
+ n_steps = n_steps[nt]
1199
+
1200
+ # Directly evaluate the sum up to this term
1201
+ k = a + n_steps * step
1202
+ left, left_error, left_nfev = _direct(f, a, k, step, args,
1203
+ constants, inclusive=False)
1204
+ i_skip |= np.isposinf(left) # if sum is not finite, no sense in continuing
1205
+ status[np.isposinf(left)] = -3
1206
+ k[i_skip] = np.nan
1207
+
1208
+ # Use integration to estimate the remaining sum
1209
+ # Possible optimization for future work: if there were no terms less than
1210
+ # the tolerance, there is no need to compute the integral to better accuracy.
1211
+ # Something like:
1212
+ # atol = np.maximum(atol, np.minimum(fk/2 - fb/2))
1213
+ # rtol = np.maximum(rtol, np.minimum((fk/2 - fb/2)/left))
1214
+ # where `fk`/`fb` are currently calculated below.
1215
+ right = _tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log)
1216
+
1217
+ # Calculate the full estimate and error from the pieces
1218
+ fk = fks[np.arange(len(fks)), nt]
1219
+ fb = f(b, *args)
1220
+ nfev += 1
1221
+ if log:
1222
+ log_step = np.log(step)
1223
+ S_terms = (left, right.integral - log_step, fk - log2, fb - log2)
1224
+ S = _logsumexp(S_terms, axis=0)
1225
+ E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+np.pi*1j)
1226
+ E = _logsumexp(E_terms, axis=0).real
1227
+ else:
1228
+ S = left + right.integral/step + fk/2 + fb/2
1229
+ E = left_error + right.error/step + fk/2 - fb/2
1230
+ status[~i_skip] = right.status[~i_skip]
1231
+ return S, E, status, left_nfev + right.nfev + nfev + lb.nfev
venv/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (109 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (166 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/dop.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = [ # noqa: F822
6
+ 'dopri5',
7
+ 'dop853'
8
+ ]
9
+
10
+
11
+ def __dir__():
12
+ return __all__
13
+
14
+
15
+ def __getattr__(name):
16
+ return _sub_module_deprecation(sub_package="integrate", module="dop",
17
+ private_modules=["_dop"], all=__all__,
18
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/integrate/lsoda.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = ['lsoda'] # noqa: F822
6
+
7
+
8
+ def __dir__():
9
+ return __all__
10
+
11
+
12
+ def __getattr__(name):
13
+ return _sub_module_deprecation(sub_package="integrate", module="lsoda",
14
+ private_modules=["_lsoda"], all=__all__,
15
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/integrate/odepack.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.integrate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = ['odeint', 'ODEintWarning'] # noqa: F822
8
+
9
+
10
+ def __dir__():
11
+ return __all__
12
+
13
+
14
+ def __getattr__(name):
15
+ return _sub_module_deprecation(sub_package="integrate", module="odepack",
16
+ private_modules=["_odepack_py"], all=__all__,
17
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/integrate/quadpack.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.integrate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ "quad",
9
+ "dblquad",
10
+ "tplquad",
11
+ "nquad",
12
+ "IntegrationWarning",
13
+ "error",
14
+ ]
15
+
16
+
17
+ def __dir__():
18
+ return __all__
19
+
20
+
21
+ def __getattr__(name):
22
+ return _sub_module_deprecation(sub_package="integrate", module="quadpack",
23
+ private_modules=["_quadpack_py"], all=__all__,
24
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc ADDED
Binary file (5.27 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc ADDED
Binary file (29.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_allclose
5
+
6
+ from scipy.integrate import quad_vec
7
+
8
+ from multiprocessing.dummy import Pool
9
+
10
+
11
+ quadrature_params = pytest.mark.parametrize(
12
+ 'quadrature', [None, "gk15", "gk21", "trapezoid"])
13
+
14
+
15
+ @quadrature_params
16
+ def test_quad_vec_simple(quadrature):
17
+ n = np.arange(10)
18
+ def f(x):
19
+ return x ** n
20
+ for epsabs in [0.1, 1e-3, 1e-6]:
21
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
22
+ # slow: skip
23
+ continue
24
+
25
+ kwargs = dict(epsabs=epsabs, quadrature=quadrature)
26
+
27
+ exact = 2**(n+1)/(n + 1)
28
+
29
+ res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
30
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
31
+
32
+ res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
33
+ assert np.linalg.norm(res - exact) < epsabs
34
+
35
+ res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
36
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
37
+
38
+ res, err, *rest = quad_vec(f, 0, 2, norm='max',
39
+ epsrel=1e-8,
40
+ full_output=True,
41
+ limit=10000,
42
+ **kwargs)
43
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
44
+
45
+
46
+ @quadrature_params
47
+ def test_quad_vec_simple_inf(quadrature):
48
+ def f(x):
49
+ return 1 / (1 + np.float64(x) ** 2)
50
+
51
+ for epsabs in [0.1, 1e-3, 1e-6]:
52
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
53
+ # slow: skip
54
+ continue
55
+
56
+ kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
57
+
58
+ res, err = quad_vec(f, 0, np.inf, **kwargs)
59
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
60
+
61
+ res, err = quad_vec(f, 0, -np.inf, **kwargs)
62
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
63
+
64
+ res, err = quad_vec(f, -np.inf, 0, **kwargs)
65
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
66
+
67
+ res, err = quad_vec(f, np.inf, 0, **kwargs)
68
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
69
+
70
+ res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
71
+ assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
72
+
73
+ res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
74
+ assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
75
+
76
+ res, err = quad_vec(f, np.inf, np.inf, **kwargs)
77
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
78
+
79
+ res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
80
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
81
+
82
+ res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
83
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
84
+
85
+ def f(x):
86
+ return np.sin(x + 2) / (1 + x ** 2)
87
+ exact = np.pi / np.e * np.sin(2)
88
+ epsabs = 1e-5
89
+
90
+ res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
91
+ quadrature=quadrature, full_output=True)
92
+ assert info.status == 1
93
+ assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
94
+
95
+
96
+ def test_quad_vec_args():
97
+ def f(x, a):
98
+ return x * (x + a) * np.arange(3)
99
+ a = 2
100
+ exact = np.array([0, 4/3, 8/3])
101
+
102
+ res, err = quad_vec(f, 0, 1, args=(a,))
103
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
104
+
105
+
106
+ def _lorenzian(x):
107
+ return 1 / (1 + x**2)
108
+
109
+
110
+ def test_quad_vec_pool():
111
+ f = _lorenzian
112
+ res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
113
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
114
+
115
+ with Pool(10) as pool:
116
+ def f(x):
117
+ return 1 / (1 + x ** 2)
118
+ res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
119
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
120
+
121
+
122
+ def _func_with_args(x, a):
123
+ return x * (x + a) * np.arange(3)
124
+
125
+
126
+ @pytest.mark.parametrize('extra_args', [2, (2,)])
127
+ @pytest.mark.parametrize('workers', [1, 10])
128
+ def test_quad_vec_pool_args(extra_args, workers):
129
+ f = _func_with_args
130
+ exact = np.array([0, 4/3, 8/3])
131
+
132
+ res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers)
133
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
134
+
135
+ with Pool(workers) as pool:
136
+ res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map)
137
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
138
+
139
+
140
+ @quadrature_params
141
+ def test_num_eval(quadrature):
142
+ def f(x):
143
+ count[0] += 1
144
+ return x**5
145
+
146
+ count = [0]
147
+ res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
148
+ assert res[2].neval == count[0]
149
+
150
+
151
+ def test_info():
152
+ def f(x):
153
+ return np.ones((3, 2, 1))
154
+
155
+ res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
156
+
157
+ assert info.success is True
158
+ assert info.status == 0
159
+ assert info.message == 'Target precision reached.'
160
+ assert info.neval > 0
161
+ assert info.intervals.shape[1] == 2
162
+ assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
163
+ assert info.errors.shape == (info.intervals.shape[0],)
164
+
165
+
166
+ def test_nan_inf():
167
+ def f_nan(x):
168
+ return np.nan
169
+
170
+ def f_inf(x):
171
+ return np.inf if x < 0.1 else 1/x
172
+
173
+ res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
174
+ assert info.status == 3
175
+
176
+ res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
177
+ assert info.status == 3
178
+
179
+
180
+ @pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
181
+ (-np.inf, np.inf), (np.inf, -np.inf)])
182
+ def test_points(a, b):
183
+ # Check that initial interval splitting is done according to
184
+ # `points`, by checking that consecutive sets of 15 point (for
185
+ # gk15) function evaluations lie between `points`
186
+
187
+ points = (0, 0.25, 0.5, 0.75, 1.0)
188
+ points += tuple(-x for x in points)
189
+
190
+ quadrature_points = 15
191
+ interval_sets = []
192
+ count = 0
193
+
194
+ def f(x):
195
+ nonlocal count
196
+
197
+ if count % quadrature_points == 0:
198
+ interval_sets.append(set())
199
+
200
+ count += 1
201
+ interval_sets[-1].add(float(x))
202
+ return 0.0
203
+
204
+ quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
205
+
206
+ # Check that all point sets lie in a single `points` interval
207
+ for p in interval_sets:
208
+ j = np.searchsorted(sorted(points), tuple(p))
209
+ assert np.all(j == j[0])
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose
4
+ from scipy.integrate import ode
5
+
6
+
7
+ def _band_count(a):
8
+ """Returns ml and mu, the lower and upper band sizes of a."""
9
+ nrows, ncols = a.shape
10
+ ml = 0
11
+ for k in range(-nrows+1, 0):
12
+ if np.diag(a, k).any():
13
+ ml = -k
14
+ break
15
+ mu = 0
16
+ for k in range(nrows-1, 0, -1):
17
+ if np.diag(a, k).any():
18
+ mu = k
19
+ break
20
+ return ml, mu
21
+
22
+
23
+ def _linear_func(t, y, a):
24
+ """Linear system dy/dt = a * y"""
25
+ return a.dot(y)
26
+
27
+
28
+ def _linear_jac(t, y, a):
29
+ """Jacobian of a * y is a."""
30
+ return a
31
+
32
+
33
+ def _linear_banded_jac(t, y, a):
34
+ """Banded Jacobian."""
35
+ ml, mu = _band_count(a)
36
+ bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
37
+ bjac.append(np.diag(a))
38
+ for k in range(-1, -ml-1, -1):
39
+ bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
40
+ return bjac
41
+
42
+
43
+ def _solve_linear_sys(a, y0, tend=1, dt=0.1,
44
+ solver=None, method='bdf', use_jac=True,
45
+ with_jacobian=False, banded=False):
46
+ """Use scipy.integrate.ode to solve a linear system of ODEs.
47
+
48
+ a : square ndarray
49
+ Matrix of the linear system to be solved.
50
+ y0 : ndarray
51
+ Initial condition
52
+ tend : float
53
+ Stop time.
54
+ dt : float
55
+ Step size of the output.
56
+ solver : str
57
+ If not None, this must be "vode", "lsoda" or "zvode".
58
+ method : str
59
+ Either "bdf" or "adams".
60
+ use_jac : bool
61
+ Determines if the jacobian function is passed to ode().
62
+ with_jacobian : bool
63
+ Passed to ode.set_integrator().
64
+ banded : bool
65
+ Determines whether a banded or full jacobian is used.
66
+ If `banded` is True, `lband` and `uband` are determined by the
67
+ values in `a`.
68
+ """
69
+ if banded:
70
+ lband, uband = _band_count(a)
71
+ else:
72
+ lband = None
73
+ uband = None
74
+
75
+ if use_jac:
76
+ if banded:
77
+ r = ode(_linear_func, _linear_banded_jac)
78
+ else:
79
+ r = ode(_linear_func, _linear_jac)
80
+ else:
81
+ r = ode(_linear_func)
82
+
83
+ if solver is None:
84
+ if np.iscomplexobj(a):
85
+ solver = "zvode"
86
+ else:
87
+ solver = "vode"
88
+
89
+ r.set_integrator(solver,
90
+ with_jacobian=with_jacobian,
91
+ method=method,
92
+ lband=lband, uband=uband,
93
+ rtol=1e-9, atol=1e-10,
94
+ )
95
+ t0 = 0
96
+ r.set_initial_value(y0, t0)
97
+ r.set_f_params(a)
98
+ r.set_jac_params(a)
99
+
100
+ t = [t0]
101
+ y = [y0]
102
+ while r.successful() and r.t < tend:
103
+ r.integrate(r.t + dt)
104
+ t.append(r.t)
105
+ y.append(r.y)
106
+
107
+ t = np.array(t)
108
+ y = np.array(y)
109
+ return t, y
110
+
111
+
112
+ def _analytical_solution(a, y0, t):
113
+ """
114
+ Analytical solution to the linear differential equations dy/dt = a*y.
115
+
116
+ The solution is only valid if `a` is diagonalizable.
117
+
118
+ Returns a 2-D array with shape (len(t), len(y0)).
119
+ """
120
+ lam, v = np.linalg.eig(a)
121
+ c = np.linalg.solve(v, y0)
122
+ e = c * np.exp(lam * t.reshape(-1, 1))
123
+ sol = e.dot(v.T)
124
+ return sol
125
+
126
+
127
+ def test_banded_ode_solvers():
128
+ # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
129
+ # with a system that has a banded Jacobian matrix.
130
+
131
+ t_exact = np.linspace(0, 1.0, 5)
132
+
133
+ # --- Real arrays for testing the "lsoda" and "vode" solvers ---
134
+
135
+ # lband = 2, uband = 1:
136
+ a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
137
+ [0.2, -0.5, 0.9, 0.0, 0.0],
138
+ [0.1, 0.1, -0.4, 0.1, 0.0],
139
+ [0.0, 0.3, -0.1, -0.9, -0.3],
140
+ [0.0, 0.0, 0.1, 0.1, -0.7]])
141
+
142
+ # lband = 0, uband = 1:
143
+ a_real_upper = np.triu(a_real)
144
+
145
+ # lband = 2, uband = 0:
146
+ a_real_lower = np.tril(a_real)
147
+
148
+ # lband = 0, uband = 0:
149
+ a_real_diag = np.triu(a_real_lower)
150
+
151
+ real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
152
+ real_solutions = []
153
+
154
+ for a in real_matrices:
155
+ y0 = np.arange(1, a.shape[0] + 1)
156
+ y_exact = _analytical_solution(a, y0, t_exact)
157
+ real_solutions.append((y0, t_exact, y_exact))
158
+
159
+ def check_real(idx, solver, meth, use_jac, with_jac, banded):
160
+ a = real_matrices[idx]
161
+ y0, t_exact, y_exact = real_solutions[idx]
162
+ t, y = _solve_linear_sys(a, y0,
163
+ tend=t_exact[-1],
164
+ dt=t_exact[1] - t_exact[0],
165
+ solver=solver,
166
+ method=meth,
167
+ use_jac=use_jac,
168
+ with_jacobian=with_jac,
169
+ banded=banded)
170
+ assert_allclose(t, t_exact)
171
+ assert_allclose(y, y_exact)
172
+
173
+ for idx in range(len(real_matrices)):
174
+ p = [['vode', 'lsoda'], # solver
175
+ ['bdf', 'adams'], # method
176
+ [False, True], # use_jac
177
+ [False, True], # with_jacobian
178
+ [False, True]] # banded
179
+ for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
180
+ check_real(idx, solver, meth, use_jac, with_jac, banded)
181
+
182
+ # --- Complex arrays for testing the "zvode" solver ---
183
+
184
+ # complex, lband = 2, uband = 1:
185
+ a_complex = a_real - 0.5j * a_real
186
+
187
+ # complex, lband = 0, uband = 0:
188
+ a_complex_diag = np.diag(np.diag(a_complex))
189
+
190
+ complex_matrices = [a_complex, a_complex_diag]
191
+ complex_solutions = []
192
+
193
+ for a in complex_matrices:
194
+ y0 = np.arange(1, a.shape[0] + 1) + 1j
195
+ y_exact = _analytical_solution(a, y0, t_exact)
196
+ complex_solutions.append((y0, t_exact, y_exact))
197
+
198
+ def check_complex(idx, solver, meth, use_jac, with_jac, banded):
199
+ a = complex_matrices[idx]
200
+ y0, t_exact, y_exact = complex_solutions[idx]
201
+ t, y = _solve_linear_sys(a, y0,
202
+ tend=t_exact[-1],
203
+ dt=t_exact[1] - t_exact[0],
204
+ solver=solver,
205
+ method=meth,
206
+ use_jac=use_jac,
207
+ with_jacobian=with_jac,
208
+ banded=banded)
209
+ assert_allclose(t, t_exact)
210
+ assert_allclose(y, y_exact)
211
+
212
+ for idx in range(len(complex_matrices)):
213
+ p = [['bdf', 'adams'], # method
214
+ [False, True], # use_jac
215
+ [False, True], # with_jacobian
216
+ [False, True]] # banded
217
+ for meth, use_jac, with_jac, banded in itertools.product(*p):
218
+ check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ try:
4
+ from StringIO import StringIO
5
+ except ImportError:
6
+ from io import StringIO
7
+
8
+ import numpy as np
9
+ from numpy.testing import (assert_, assert_array_equal, assert_allclose,
10
+ assert_equal)
11
+ from pytest import raises as assert_raises
12
+
13
+ from scipy.sparse import coo_matrix
14
+ from scipy.special import erf
15
+ from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
16
+ estimate_bc_jac, compute_jac_indices,
17
+ construct_global_jac, solve_bvp)
18
+
19
+
20
+ def exp_fun(x, y):
21
+ return np.vstack((y[1], y[0]))
22
+
23
+
24
+ def exp_fun_jac(x, y):
25
+ df_dy = np.empty((2, 2, x.shape[0]))
26
+ df_dy[0, 0] = 0
27
+ df_dy[0, 1] = 1
28
+ df_dy[1, 0] = 1
29
+ df_dy[1, 1] = 0
30
+ return df_dy
31
+
32
+
33
+ def exp_bc(ya, yb):
34
+ return np.hstack((ya[0] - 1, yb[0]))
35
+
36
+
37
+ def exp_bc_complex(ya, yb):
38
+ return np.hstack((ya[0] - 1 - 1j, yb[0]))
39
+
40
+
41
+ def exp_bc_jac(ya, yb):
42
+ dbc_dya = np.array([
43
+ [1, 0],
44
+ [0, 0]
45
+ ])
46
+ dbc_dyb = np.array([
47
+ [0, 0],
48
+ [1, 0]
49
+ ])
50
+ return dbc_dya, dbc_dyb
51
+
52
+
53
+ def exp_sol(x):
54
+ return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
55
+
56
+
57
+ def sl_fun(x, y, p):
58
+ return np.vstack((y[1], -p[0]**2 * y[0]))
59
+
60
+
61
+ def sl_fun_jac(x, y, p):
62
+ n, m = y.shape
63
+ df_dy = np.empty((n, 2, m))
64
+ df_dy[0, 0] = 0
65
+ df_dy[0, 1] = 1
66
+ df_dy[1, 0] = -p[0]**2
67
+ df_dy[1, 1] = 0
68
+
69
+ df_dp = np.empty((n, 1, m))
70
+ df_dp[0, 0] = 0
71
+ df_dp[1, 0] = -2 * p[0] * y[0]
72
+
73
+ return df_dy, df_dp
74
+
75
+
76
+ def sl_bc(ya, yb, p):
77
+ return np.hstack((ya[0], yb[0], ya[1] - p[0]))
78
+
79
+
80
+ def sl_bc_jac(ya, yb, p):
81
+ dbc_dya = np.zeros((3, 2))
82
+ dbc_dya[0, 0] = 1
83
+ dbc_dya[2, 1] = 1
84
+
85
+ dbc_dyb = np.zeros((3, 2))
86
+ dbc_dyb[1, 0] = 1
87
+
88
+ dbc_dp = np.zeros((3, 1))
89
+ dbc_dp[2, 0] = -1
90
+
91
+ return dbc_dya, dbc_dyb, dbc_dp
92
+
93
+
94
+ def sl_sol(x, p):
95
+ return np.sin(p[0] * x)
96
+
97
+
98
+ def emden_fun(x, y):
99
+ return np.vstack((y[1], -y[0]**5))
100
+
101
+
102
+ def emden_fun_jac(x, y):
103
+ df_dy = np.empty((2, 2, x.shape[0]))
104
+ df_dy[0, 0] = 0
105
+ df_dy[0, 1] = 1
106
+ df_dy[1, 0] = -5 * y[0]**4
107
+ df_dy[1, 1] = 0
108
+ return df_dy
109
+
110
+
111
+ def emden_bc(ya, yb):
112
+ return np.array([ya[1], yb[0] - (3/4)**0.5])
113
+
114
+
115
+ def emden_bc_jac(ya, yb):
116
+ dbc_dya = np.array([
117
+ [0, 1],
118
+ [0, 0]
119
+ ])
120
+ dbc_dyb = np.array([
121
+ [0, 0],
122
+ [1, 0]
123
+ ])
124
+ return dbc_dya, dbc_dyb
125
+
126
+
127
+ def emden_sol(x):
128
+ return (1 + x**2/3)**-0.5
129
+
130
+
131
+ def undefined_fun(x, y):
132
+ return np.zeros_like(y)
133
+
134
+
135
+ def undefined_bc(ya, yb):
136
+ return np.array([ya[0], yb[0] - 1])
137
+
138
+
139
+ def big_fun(x, y):
140
+ f = np.zeros_like(y)
141
+ f[::2] = y[1::2]
142
+ return f
143
+
144
+
145
+ def big_bc(ya, yb):
146
+ return np.hstack((ya[::2], yb[::2] - 1))
147
+
148
+
149
+ def big_sol(x, n):
150
+ y = np.ones((2 * n, x.size))
151
+ y[::2] = x
152
+ return x
153
+
154
+
155
+ def big_fun_with_parameters(x, y, p):
156
+ """ Big version of sl_fun, with two parameters.
157
+
158
+ The two differential equations represented by sl_fun are broadcast to the
159
+ number of rows of y, rotating between the parameters p[0] and p[1].
160
+ Here are the differential equations:
161
+
162
+ dy[0]/dt = y[1]
163
+ dy[1]/dt = -p[0]**2 * y[0]
164
+ dy[2]/dt = y[3]
165
+ dy[3]/dt = -p[1]**2 * y[2]
166
+ dy[4]/dt = y[5]
167
+ dy[5]/dt = -p[0]**2 * y[4]
168
+ dy[6]/dt = y[7]
169
+ dy[7]/dt = -p[1]**2 * y[6]
170
+ .
171
+ .
172
+ .
173
+
174
+ """
175
+ f = np.zeros_like(y)
176
+ f[::2] = y[1::2]
177
+ f[1::4] = -p[0]**2 * y[::4]
178
+ f[3::4] = -p[1]**2 * y[2::4]
179
+ return f
180
+
181
+
182
+ def big_fun_with_parameters_jac(x, y, p):
183
+ # big version of sl_fun_jac, with two parameters
184
+ n, m = y.shape
185
+ df_dy = np.zeros((n, n, m))
186
+ df_dy[range(0, n, 2), range(1, n, 2)] = 1
187
+ df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
188
+ df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
189
+
190
+ df_dp = np.zeros((n, 2, m))
191
+ df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
192
+ df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
193
+
194
+ return df_dy, df_dp
195
+
196
+
197
+ def big_bc_with_parameters(ya, yb, p):
198
+ # big version of sl_bc, with two parameters
199
+ return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
200
+
201
+
202
+ def big_bc_with_parameters_jac(ya, yb, p):
203
+ # big version of sl_bc_jac, with two parameters
204
+ n = ya.shape[0]
205
+ dbc_dya = np.zeros((n + 2, n))
206
+ dbc_dyb = np.zeros((n + 2, n))
207
+
208
+ dbc_dya[range(n // 2), range(0, n, 2)] = 1
209
+ dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
210
+
211
+ dbc_dp = np.zeros((n + 2, 2))
212
+ dbc_dp[n, 0] = -1
213
+ dbc_dya[n, 1] = 1
214
+ dbc_dp[n + 1, 1] = -1
215
+ dbc_dya[n + 1, 3] = 1
216
+
217
+ return dbc_dya, dbc_dyb, dbc_dp
218
+
219
+
220
+ def big_sol_with_parameters(x, p):
221
+ # big version of sl_sol, with two parameters
222
+ return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
223
+
224
+
225
+ def shock_fun(x, y):
226
+ eps = 1e-3
227
+ return np.vstack((
228
+ y[1],
229
+ -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
230
+ np.pi * x * np.sin(np.pi * x)) / eps
231
+ ))
232
+
233
+
234
+ def shock_bc(ya, yb):
235
+ return np.array([ya[0] + 2, yb[0]])
236
+
237
+
238
+ def shock_sol(x):
239
+ eps = 1e-3
240
+ k = np.sqrt(2 * eps)
241
+ return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
242
+
243
+
244
+ def nonlin_bc_fun(x, y):
245
+ # laplace eq.
246
+ return np.stack([y[1], np.zeros_like(x)])
247
+
248
+
249
+ def nonlin_bc_bc(ya, yb):
250
+ phiA, phipA = ya
251
+ phiC, phipC = yb
252
+
253
+ kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
254
+
255
+ # Butler-Volmer Kinetics at Anode
256
+ hA = 0.0-phiA-0.0
257
+ iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
258
+ res0 = iA + kappa * phipA
259
+
260
+ # Butler-Volmer Kinetics at Cathode
261
+ hC = V - phiC - 1.0
262
+ iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
263
+ res1 = iC - kappa*phipC
264
+
265
+ return np.array([res0, res1])
266
+
267
+
268
+ def nonlin_bc_sol(x):
269
+ return -0.13426436116763119 - 1.1308709 * x
270
+
271
+
272
+ def test_modify_mesh():
273
+ x = np.array([0, 1, 3, 9], dtype=float)
274
+ x_new = modify_mesh(x, np.array([0]), np.array([2]))
275
+ assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
276
+
277
+ x = np.array([-6, -3, 0, 3, 6], dtype=float)
278
+ x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
279
+ assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
280
+
281
+
282
+ def test_compute_fun_jac():
283
+ x = np.linspace(0, 1, 5)
284
+ y = np.empty((2, x.shape[0]))
285
+ y[0] = 0.01
286
+ y[1] = 0.02
287
+ p = np.array([])
288
+ df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
289
+ df_dy_an = exp_fun_jac(x, y)
290
+ assert_allclose(df_dy, df_dy_an)
291
+ assert_(df_dp is None)
292
+
293
+ x = np.linspace(0, np.pi, 5)
294
+ y = np.empty((2, x.shape[0]))
295
+ y[0] = np.sin(x)
296
+ y[1] = np.cos(x)
297
+ p = np.array([1.0])
298
+ df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
299
+ df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
300
+ assert_allclose(df_dy, df_dy_an)
301
+ assert_allclose(df_dp, df_dp_an)
302
+
303
+ x = np.linspace(0, 1, 10)
304
+ y = np.empty((2, x.shape[0]))
305
+ y[0] = (3/4)**0.5
306
+ y[1] = 1e-4
307
+ p = np.array([])
308
+ df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
309
+ df_dy_an = emden_fun_jac(x, y)
310
+ assert_allclose(df_dy, df_dy_an)
311
+ assert_(df_dp is None)
312
+
313
+
314
+ def test_compute_bc_jac():
315
+ ya = np.array([-1.0, 2])
316
+ yb = np.array([0.5, 3])
317
+ p = np.array([])
318
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
319
+ lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
320
+ dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
321
+ assert_allclose(dbc_dya, dbc_dya_an)
322
+ assert_allclose(dbc_dyb, dbc_dyb_an)
323
+ assert_(dbc_dp is None)
324
+
325
+ ya = np.array([0.0, 1])
326
+ yb = np.array([0.0, -1])
327
+ p = np.array([0.5])
328
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
329
+ dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
330
+ assert_allclose(dbc_dya, dbc_dya_an)
331
+ assert_allclose(dbc_dyb, dbc_dyb_an)
332
+ assert_allclose(dbc_dp, dbc_dp_an)
333
+
334
+ ya = np.array([0.5, 100])
335
+ yb = np.array([-1000, 10.5])
336
+ p = np.array([])
337
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
338
+ lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
339
+ dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
340
+ assert_allclose(dbc_dya, dbc_dya_an)
341
+ assert_allclose(dbc_dyb, dbc_dyb_an)
342
+ assert_(dbc_dp is None)
343
+
344
+
345
+ def test_compute_jac_indices():
346
+ n = 2
347
+ m = 4
348
+ k = 2
349
+ i, j = compute_jac_indices(n, m, k)
350
+ s = coo_matrix((np.ones_like(i), (i, j))).toarray()
351
+ s_true = np.array([
352
+ [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
353
+ [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
354
+ [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
355
+ [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
356
+ [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
357
+ [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
358
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
359
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
360
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
361
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
362
+ ])
363
+ assert_array_equal(s, s_true)
364
+
365
+
366
+ def test_compute_global_jac():
367
+ n = 2
368
+ m = 5
369
+ k = 1
370
+ i_jac, j_jac = compute_jac_indices(2, 5, 1)
371
+ x = np.linspace(0, 1, 5)
372
+ h = np.diff(x)
373
+ y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
374
+ p = np.array([3.0])
375
+
376
+ f = sl_fun(x, y, p)
377
+
378
+ x_middle = x[:-1] + 0.5 * h
379
+ y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
380
+
381
+ df_dy, df_dp = sl_fun_jac(x, y, p)
382
+ df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
383
+ dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
384
+
385
+ J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
386
+ df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
387
+ J = J.toarray()
388
+
389
+ def J_block(h, p):
390
+ return np.array([
391
+ [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
392
+ [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
393
+ ])
394
+
395
+ J_true = np.zeros((m * n + k, m * n + k))
396
+ for i in range(m - 1):
397
+ J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
398
+
399
+ J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
400
+ J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
401
+ h**2/6 * (y[1, :-1] - y[1, 1:]))
402
+
403
+ J_true[8, 0] = 1
404
+ J_true[9, 8] = 1
405
+ J_true[10, 1] = 1
406
+ J_true[10, 10] = -1
407
+
408
+ assert_allclose(J, J_true, rtol=1e-10)
409
+
410
+ df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
411
+ df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
412
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
413
+ J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
414
+ df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
415
+ J = J.toarray()
416
+ assert_allclose(J, J_true, rtol=2e-8, atol=2e-8)
417
+
418
+
419
+ def test_parameter_validation():
420
+ x = [0, 1, 0.5]
421
+ y = np.zeros((2, 3))
422
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
423
+
424
+ x = np.linspace(0, 1, 5)
425
+ y = np.zeros((2, 4))
426
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
427
+
428
+ def fun(x, y, p):
429
+ return exp_fun(x, y)
430
+ def bc(ya, yb, p):
431
+ return exp_bc(ya, yb)
432
+
433
+ y = np.zeros((2, x.shape[0]))
434
+ assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
435
+
436
+ def wrong_shape_fun(x, y):
437
+ return np.zeros(3)
438
+
439
+ assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
440
+
441
+ S = np.array([[0, 0]])
442
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
443
+
444
+
445
+ def test_no_params():
446
+ x = np.linspace(0, 1, 5)
447
+ x_test = np.linspace(0, 1, 100)
448
+ y = np.zeros((2, x.shape[0]))
449
+ for fun_jac in [None, exp_fun_jac]:
450
+ for bc_jac in [None, exp_bc_jac]:
451
+ sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
452
+ bc_jac=bc_jac)
453
+
454
+ assert_equal(sol.status, 0)
455
+ assert_(sol.success)
456
+
457
+ assert_equal(sol.x.size, 5)
458
+
459
+ sol_test = sol.sol(x_test)
460
+
461
+ assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
462
+
463
+ f_test = exp_fun(x_test, sol_test)
464
+ r = sol.sol(x_test, 1) - f_test
465
+ rel_res = r / (1 + np.abs(f_test))
466
+ norm_res = np.sum(rel_res**2, axis=0)**0.5
467
+ assert_(np.all(norm_res < 1e-3))
468
+
469
+ assert_(np.all(sol.rms_residuals < 1e-3))
470
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
471
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
472
+
473
+
474
+ def test_with_params():
475
+ x = np.linspace(0, np.pi, 5)
476
+ x_test = np.linspace(0, np.pi, 100)
477
+ y = np.ones((2, x.shape[0]))
478
+
479
+ for fun_jac in [None, sl_fun_jac]:
480
+ for bc_jac in [None, sl_bc_jac]:
481
+ sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
482
+ bc_jac=bc_jac)
483
+
484
+ assert_equal(sol.status, 0)
485
+ assert_(sol.success)
486
+
487
+ assert_(sol.x.size < 10)
488
+
489
+ assert_allclose(sol.p, [1], rtol=1e-4)
490
+
491
+ sol_test = sol.sol(x_test)
492
+
493
+ assert_allclose(sol_test[0], sl_sol(x_test, [1]),
494
+ rtol=1e-4, atol=1e-4)
495
+
496
+ f_test = sl_fun(x_test, sol_test, [1])
497
+ r = sol.sol(x_test, 1) - f_test
498
+ rel_res = r / (1 + np.abs(f_test))
499
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
500
+ assert_(np.all(norm_res < 1e-3))
501
+
502
+ assert_(np.all(sol.rms_residuals < 1e-3))
503
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
504
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
505
+
506
+
507
+ def test_singular_term():
508
+ x = np.linspace(0, 1, 10)
509
+ x_test = np.linspace(0.05, 1, 100)
510
+ y = np.empty((2, 10))
511
+ y[0] = (3/4)**0.5
512
+ y[1] = 1e-4
513
+ S = np.array([[0, 0], [0, -2]])
514
+
515
+ for fun_jac in [None, emden_fun_jac]:
516
+ for bc_jac in [None, emden_bc_jac]:
517
+ sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
518
+ bc_jac=bc_jac)
519
+
520
+ assert_equal(sol.status, 0)
521
+ assert_(sol.success)
522
+
523
+ assert_equal(sol.x.size, 10)
524
+
525
+ sol_test = sol.sol(x_test)
526
+ assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
527
+
528
+ f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
529
+ r = sol.sol(x_test, 1) - f_test
530
+ rel_res = r / (1 + np.abs(f_test))
531
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
532
+
533
+ assert_(np.all(norm_res < 1e-3))
534
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
535
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
536
+
537
+
538
+ def test_complex():
539
+ # The test is essentially the same as test_no_params, but boundary
540
+ # conditions are turned into complex.
541
+ x = np.linspace(0, 1, 5)
542
+ x_test = np.linspace(0, 1, 100)
543
+ y = np.zeros((2, x.shape[0]), dtype=complex)
544
+ for fun_jac in [None, exp_fun_jac]:
545
+ for bc_jac in [None, exp_bc_jac]:
546
+ sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
547
+ bc_jac=bc_jac)
548
+
549
+ assert_equal(sol.status, 0)
550
+ assert_(sol.success)
551
+
552
+ sol_test = sol.sol(x_test)
553
+
554
+ assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
555
+ assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
556
+
557
+ f_test = exp_fun(x_test, sol_test)
558
+ r = sol.sol(x_test, 1) - f_test
559
+ rel_res = r / (1 + np.abs(f_test))
560
+ norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
561
+ axis=0) ** 0.5
562
+ assert_(np.all(norm_res < 1e-3))
563
+
564
+ assert_(np.all(sol.rms_residuals < 1e-3))
565
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
566
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
567
+
568
+
569
+ def test_failures():
570
+ x = np.linspace(0, 1, 2)
571
+ y = np.zeros((2, x.size))
572
+ res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
573
+ assert_equal(res.status, 1)
574
+ assert_(not res.success)
575
+
576
+ x = np.linspace(0, 1, 5)
577
+ y = np.zeros((2, x.size))
578
+ res = solve_bvp(undefined_fun, undefined_bc, x, y)
579
+ assert_equal(res.status, 2)
580
+ assert_(not res.success)
581
+
582
+
583
+ def test_big_problem():
584
+ n = 30
585
+ x = np.linspace(0, 1, 5)
586
+ y = np.zeros((2 * n, x.size))
587
+ sol = solve_bvp(big_fun, big_bc, x, y)
588
+
589
+ assert_equal(sol.status, 0)
590
+ assert_(sol.success)
591
+
592
+ sol_test = sol.sol(x)
593
+
594
+ assert_allclose(sol_test[0], big_sol(x, n))
595
+
596
+ f_test = big_fun(x, sol_test)
597
+ r = sol.sol(x, 1) - f_test
598
+ rel_res = r / (1 + np.abs(f_test))
599
+ norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
600
+ assert_(np.all(norm_res < 1e-3))
601
+
602
+ assert_(np.all(sol.rms_residuals < 1e-3))
603
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
604
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
605
+
606
+
607
+ def test_big_problem_with_parameters():
608
+ n = 30
609
+ x = np.linspace(0, np.pi, 5)
610
+ x_test = np.linspace(0, np.pi, 100)
611
+ y = np.ones((2 * n, x.size))
612
+
613
+ for fun_jac in [None, big_fun_with_parameters_jac]:
614
+ for bc_jac in [None, big_bc_with_parameters_jac]:
615
+ sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x,
616
+ y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac)
617
+
618
+ assert_equal(sol.status, 0)
619
+ assert_(sol.success)
620
+
621
+ assert_allclose(sol.p, [1, 1], rtol=1e-4)
622
+
623
+ sol_test = sol.sol(x_test)
624
+
625
+ for isol in range(0, n, 4):
626
+ assert_allclose(sol_test[isol],
627
+ big_sol_with_parameters(x_test, [1, 1])[0],
628
+ rtol=1e-4, atol=1e-4)
629
+ assert_allclose(sol_test[isol + 2],
630
+ big_sol_with_parameters(x_test, [1, 1])[1],
631
+ rtol=1e-4, atol=1e-4)
632
+
633
+ f_test = big_fun_with_parameters(x_test, sol_test, [1, 1])
634
+ r = sol.sol(x_test, 1) - f_test
635
+ rel_res = r / (1 + np.abs(f_test))
636
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
637
+ assert_(np.all(norm_res < 1e-3))
638
+
639
+ assert_(np.all(sol.rms_residuals < 1e-3))
640
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
641
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
642
+
643
+
644
+ def test_shock_layer():
645
+ x = np.linspace(-1, 1, 5)
646
+ x_test = np.linspace(-1, 1, 100)
647
+ y = np.zeros((2, x.size))
648
+ sol = solve_bvp(shock_fun, shock_bc, x, y)
649
+
650
+ assert_equal(sol.status, 0)
651
+ assert_(sol.success)
652
+
653
+ assert_(sol.x.size < 110)
654
+
655
+ sol_test = sol.sol(x_test)
656
+ assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
657
+
658
+ f_test = shock_fun(x_test, sol_test)
659
+ r = sol.sol(x_test, 1) - f_test
660
+ rel_res = r / (1 + np.abs(f_test))
661
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
662
+
663
+ assert_(np.all(norm_res < 1e-3))
664
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
665
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
666
+
667
+
668
+ def test_nonlin_bc():
669
+ x = np.linspace(0, 0.1, 5)
670
+ x_test = x
671
+ y = np.zeros([2, x.size])
672
+ sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y)
673
+
674
+ assert_equal(sol.status, 0)
675
+ assert_(sol.success)
676
+
677
+ assert_(sol.x.size < 8)
678
+
679
+ sol_test = sol.sol(x_test)
680
+ assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5)
681
+
682
+ f_test = nonlin_bc_fun(x_test, sol_test)
683
+ r = sol.sol(x_test, 1) - f_test
684
+ rel_res = r / (1 + np.abs(f_test))
685
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
686
+
687
+ assert_(np.all(norm_res < 1e-3))
688
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
689
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
690
+
691
+
692
+ def test_verbose():
693
+ # Smoke test that checks the printing does something and does not crash
694
+ x = np.linspace(0, 1, 5)
695
+ y = np.zeros((2, x.shape[0]))
696
+ for verbose in [0, 1, 2]:
697
+ old_stdout = sys.stdout
698
+ sys.stdout = StringIO()
699
+ try:
700
+ sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
701
+ text = sys.stdout.getvalue()
702
+ finally:
703
+ sys.stdout = old_stdout
704
+
705
+ assert_(sol.success)
706
+ if verbose == 0:
707
+ assert_(not text, text)
708
+ if verbose >= 1:
709
+ assert_("Solved in" in text, text)
710
+ if verbose >= 2:
711
+ assert_("Max residual" in text, text)
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
2
+ """
3
+ Tests for numerical integration.
4
+ """
5
+ import numpy as np
6
+ from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
7
+ allclose)
8
+
9
+ from numpy.testing import (
10
+ assert_, assert_array_almost_equal,
11
+ assert_allclose, assert_array_equal, assert_equal, assert_warns)
12
+ from pytest import raises as assert_raises
13
+ from scipy.integrate import odeint, ode, complex_ode
14
+
15
+ #------------------------------------------------------------------------------
16
+ # Test ODE integrators
17
+ #------------------------------------------------------------------------------
18
+
19
+
20
+ class TestOdeint:
21
+ # Check integrate.odeint
22
+
23
+ def _do_problem(self, problem):
24
+ t = arange(0.0, problem.stop_t, 0.05)
25
+
26
+ # Basic case
27
+ z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
28
+ assert_(problem.verify(z, t))
29
+
30
+ # Use tfirst=True
31
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
32
+ full_output=True, tfirst=True)
33
+ assert_(problem.verify(z, t))
34
+
35
+ if hasattr(problem, 'jac'):
36
+ # Use Dfun
37
+ z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
38
+ full_output=True)
39
+ assert_(problem.verify(z, t))
40
+
41
+ # Use Dfun and tfirst=True
42
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
43
+ Dfun=lambda t, y: problem.jac(y, t),
44
+ full_output=True, tfirst=True)
45
+ assert_(problem.verify(z, t))
46
+
47
+ def test_odeint(self):
48
+ for problem_cls in PROBLEMS:
49
+ problem = problem_cls()
50
+ if problem.cmplx:
51
+ continue
52
+ self._do_problem(problem)
53
+
54
+
55
+ class TestODEClass:
56
+
57
+ ode_class = None # Set in subclass.
58
+
59
+ def _do_problem(self, problem, integrator, method='adams'):
60
+
61
+ # ode has callback arguments in different order than odeint
62
+ def f(t, z):
63
+ return problem.f(z, t)
64
+ jac = None
65
+ if hasattr(problem, 'jac'):
66
+ def jac(t, z):
67
+ return problem.jac(z, t)
68
+
69
+ integrator_params = {}
70
+ if problem.lband is not None or problem.uband is not None:
71
+ integrator_params['uband'] = problem.uband
72
+ integrator_params['lband'] = problem.lband
73
+
74
+ ig = self.ode_class(f, jac)
75
+ ig.set_integrator(integrator,
76
+ atol=problem.atol/10,
77
+ rtol=problem.rtol/10,
78
+ method=method,
79
+ **integrator_params)
80
+
81
+ ig.set_initial_value(problem.z0, t=0.0)
82
+ z = ig.integrate(problem.stop_t)
83
+
84
+ assert_array_equal(z, ig.y)
85
+ assert_(ig.successful(), (problem, method))
86
+ assert_(ig.get_return_code() > 0, (problem, method))
87
+ assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
88
+
89
+
90
+ class TestOde(TestODEClass):
91
+
92
+ ode_class = ode
93
+
94
+ def test_vode(self):
95
+ # Check the vode solver
96
+ for problem_cls in PROBLEMS:
97
+ problem = problem_cls()
98
+ if problem.cmplx:
99
+ continue
100
+ if not problem.stiff:
101
+ self._do_problem(problem, 'vode', 'adams')
102
+ self._do_problem(problem, 'vode', 'bdf')
103
+
104
+ def test_zvode(self):
105
+ # Check the zvode solver
106
+ for problem_cls in PROBLEMS:
107
+ problem = problem_cls()
108
+ if not problem.stiff:
109
+ self._do_problem(problem, 'zvode', 'adams')
110
+ self._do_problem(problem, 'zvode', 'bdf')
111
+
112
+ def test_lsoda(self):
113
+ # Check the lsoda solver
114
+ for problem_cls in PROBLEMS:
115
+ problem = problem_cls()
116
+ if problem.cmplx:
117
+ continue
118
+ self._do_problem(problem, 'lsoda')
119
+
120
+ def test_dopri5(self):
121
+ # Check the dopri5 solver
122
+ for problem_cls in PROBLEMS:
123
+ problem = problem_cls()
124
+ if problem.cmplx:
125
+ continue
126
+ if problem.stiff:
127
+ continue
128
+ if hasattr(problem, 'jac'):
129
+ continue
130
+ self._do_problem(problem, 'dopri5')
131
+
132
+ def test_dop853(self):
133
+ # Check the dop853 solver
134
+ for problem_cls in PROBLEMS:
135
+ problem = problem_cls()
136
+ if problem.cmplx:
137
+ continue
138
+ if problem.stiff:
139
+ continue
140
+ if hasattr(problem, 'jac'):
141
+ continue
142
+ self._do_problem(problem, 'dop853')
143
+
144
+ def test_concurrent_fail(self):
145
+ for sol in ('vode', 'zvode', 'lsoda'):
146
+ def f(t, y):
147
+ return 1.0
148
+
149
+ r = ode(f).set_integrator(sol)
150
+ r.set_initial_value(0, 0)
151
+
152
+ r2 = ode(f).set_integrator(sol)
153
+ r2.set_initial_value(0, 0)
154
+
155
+ r.integrate(r.t + 0.1)
156
+ r2.integrate(r2.t + 0.1)
157
+
158
+ assert_raises(RuntimeError, r.integrate, r.t + 0.1)
159
+
160
+ def test_concurrent_ok(self):
161
+ def f(t, y):
162
+ return 1.0
163
+
164
+ for k in range(3):
165
+ for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
166
+ r = ode(f).set_integrator(sol)
167
+ r.set_initial_value(0, 0)
168
+
169
+ r2 = ode(f).set_integrator(sol)
170
+ r2.set_initial_value(0, 0)
171
+
172
+ r.integrate(r.t + 0.1)
173
+ r2.integrate(r2.t + 0.1)
174
+ r2.integrate(r2.t + 0.1)
175
+
176
+ assert_allclose(r.y, 0.1)
177
+ assert_allclose(r2.y, 0.2)
178
+
179
+ for sol in ('dopri5', 'dop853'):
180
+ r = ode(f).set_integrator(sol)
181
+ r.set_initial_value(0, 0)
182
+
183
+ r2 = ode(f).set_integrator(sol)
184
+ r2.set_initial_value(0, 0)
185
+
186
+ r.integrate(r.t + 0.1)
187
+ r.integrate(r.t + 0.1)
188
+ r2.integrate(r2.t + 0.1)
189
+ r.integrate(r.t + 0.1)
190
+ r2.integrate(r2.t + 0.1)
191
+
192
+ assert_allclose(r.y, 0.3)
193
+ assert_allclose(r2.y, 0.2)
194
+
195
+
196
+ class TestComplexOde(TestODEClass):
197
+
198
+ ode_class = complex_ode
199
+
200
+ def test_vode(self):
201
+ # Check the vode solver
202
+ for problem_cls in PROBLEMS:
203
+ problem = problem_cls()
204
+ if not problem.stiff:
205
+ self._do_problem(problem, 'vode', 'adams')
206
+ else:
207
+ self._do_problem(problem, 'vode', 'bdf')
208
+
209
+ def test_lsoda(self):
210
+ # Check the lsoda solver
211
+ for problem_cls in PROBLEMS:
212
+ problem = problem_cls()
213
+ self._do_problem(problem, 'lsoda')
214
+
215
+ def test_dopri5(self):
216
+ # Check the dopri5 solver
217
+ for problem_cls in PROBLEMS:
218
+ problem = problem_cls()
219
+ if problem.stiff:
220
+ continue
221
+ if hasattr(problem, 'jac'):
222
+ continue
223
+ self._do_problem(problem, 'dopri5')
224
+
225
+ def test_dop853(self):
226
+ # Check the dop853 solver
227
+ for problem_cls in PROBLEMS:
228
+ problem = problem_cls()
229
+ if problem.stiff:
230
+ continue
231
+ if hasattr(problem, 'jac'):
232
+ continue
233
+ self._do_problem(problem, 'dop853')
234
+
235
+
236
+ class TestSolout:
237
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
238
+ def _run_solout_test(self, integrator):
239
+ # Check correct usage of solout
240
+ ts = []
241
+ ys = []
242
+ t0 = 0.0
243
+ tend = 10.0
244
+ y0 = [1.0, 2.0]
245
+
246
+ def solout(t, y):
247
+ ts.append(t)
248
+ ys.append(y.copy())
249
+
250
+ def rhs(t, y):
251
+ return [y[0] + y[1], -y[1]**2]
252
+
253
+ ig = ode(rhs).set_integrator(integrator)
254
+ ig.set_solout(solout)
255
+ ig.set_initial_value(y0, t0)
256
+ ret = ig.integrate(tend)
257
+ assert_array_equal(ys[0], y0)
258
+ assert_array_equal(ys[-1], ret)
259
+ assert_equal(ts[0], t0)
260
+ assert_equal(ts[-1], tend)
261
+
262
+ def test_solout(self):
263
+ for integrator in ('dopri5', 'dop853'):
264
+ self._run_solout_test(integrator)
265
+
266
+ def _run_solout_after_initial_test(self, integrator):
267
+ # Check if solout works even if it is set after the initial value.
268
+ ts = []
269
+ ys = []
270
+ t0 = 0.0
271
+ tend = 10.0
272
+ y0 = [1.0, 2.0]
273
+
274
+ def solout(t, y):
275
+ ts.append(t)
276
+ ys.append(y.copy())
277
+
278
+ def rhs(t, y):
279
+ return [y[0] + y[1], -y[1]**2]
280
+
281
+ ig = ode(rhs).set_integrator(integrator)
282
+ ig.set_initial_value(y0, t0)
283
+ ig.set_solout(solout)
284
+ ret = ig.integrate(tend)
285
+ assert_array_equal(ys[0], y0)
286
+ assert_array_equal(ys[-1], ret)
287
+ assert_equal(ts[0], t0)
288
+ assert_equal(ts[-1], tend)
289
+
290
+ def test_solout_after_initial(self):
291
+ for integrator in ('dopri5', 'dop853'):
292
+ self._run_solout_after_initial_test(integrator)
293
+
294
+ def _run_solout_break_test(self, integrator):
295
+ # Check correct usage of stopping via solout
296
+ ts = []
297
+ ys = []
298
+ t0 = 0.0
299
+ tend = 10.0
300
+ y0 = [1.0, 2.0]
301
+
302
+ def solout(t, y):
303
+ ts.append(t)
304
+ ys.append(y.copy())
305
+ if t > tend/2.0:
306
+ return -1
307
+
308
+ def rhs(t, y):
309
+ return [y[0] + y[1], -y[1]**2]
310
+
311
+ ig = ode(rhs).set_integrator(integrator)
312
+ ig.set_solout(solout)
313
+ ig.set_initial_value(y0, t0)
314
+ ret = ig.integrate(tend)
315
+ assert_array_equal(ys[0], y0)
316
+ assert_array_equal(ys[-1], ret)
317
+ assert_equal(ts[0], t0)
318
+ assert_(ts[-1] > tend/2.0)
319
+ assert_(ts[-1] < tend)
320
+
321
+ def test_solout_break(self):
322
+ for integrator in ('dopri5', 'dop853'):
323
+ self._run_solout_break_test(integrator)
324
+
325
+
326
+ class TestComplexSolout:
327
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
328
+ def _run_solout_test(self, integrator):
329
+ # Check correct usage of solout
330
+ ts = []
331
+ ys = []
332
+ t0 = 0.0
333
+ tend = 20.0
334
+ y0 = [0.0]
335
+
336
+ def solout(t, y):
337
+ ts.append(t)
338
+ ys.append(y.copy())
339
+
340
+ def rhs(t, y):
341
+ return [1.0/(t - 10.0 - 1j)]
342
+
343
+ ig = complex_ode(rhs).set_integrator(integrator)
344
+ ig.set_solout(solout)
345
+ ig.set_initial_value(y0, t0)
346
+ ret = ig.integrate(tend)
347
+ assert_array_equal(ys[0], y0)
348
+ assert_array_equal(ys[-1], ret)
349
+ assert_equal(ts[0], t0)
350
+ assert_equal(ts[-1], tend)
351
+
352
+ def test_solout(self):
353
+ for integrator in ('dopri5', 'dop853'):
354
+ self._run_solout_test(integrator)
355
+
356
+ def _run_solout_break_test(self, integrator):
357
+ # Check correct usage of stopping via solout
358
+ ts = []
359
+ ys = []
360
+ t0 = 0.0
361
+ tend = 20.0
362
+ y0 = [0.0]
363
+
364
+ def solout(t, y):
365
+ ts.append(t)
366
+ ys.append(y.copy())
367
+ if t > tend/2.0:
368
+ return -1
369
+
370
+ def rhs(t, y):
371
+ return [1.0/(t - 10.0 - 1j)]
372
+
373
+ ig = complex_ode(rhs).set_integrator(integrator)
374
+ ig.set_solout(solout)
375
+ ig.set_initial_value(y0, t0)
376
+ ret = ig.integrate(tend)
377
+ assert_array_equal(ys[0], y0)
378
+ assert_array_equal(ys[-1], ret)
379
+ assert_equal(ts[0], t0)
380
+ assert_(ts[-1] > tend/2.0)
381
+ assert_(ts[-1] < tend)
382
+
383
+ def test_solout_break(self):
384
+ for integrator in ('dopri5', 'dop853'):
385
+ self._run_solout_break_test(integrator)
386
+
387
+
388
+ #------------------------------------------------------------------------------
389
+ # Test problems
390
+ #------------------------------------------------------------------------------
391
+
392
+
393
+ class ODE:
394
+ """
395
+ ODE problem
396
+ """
397
+ stiff = False
398
+ cmplx = False
399
+ stop_t = 1
400
+ z0 = []
401
+
402
+ lband = None
403
+ uband = None
404
+
405
+ atol = 1e-6
406
+ rtol = 1e-5
407
+
408
+
409
+ class SimpleOscillator(ODE):
410
+ r"""
411
+ Free vibration of a simple oscillator::
412
+ m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
413
+ Solution::
414
+ u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
415
+ """
416
+ stop_t = 1 + 0.09
417
+ z0 = array([1.0, 0.1], float)
418
+
419
+ k = 4.0
420
+ m = 1.0
421
+
422
+ def f(self, z, t):
423
+ tmp = zeros((2, 2), float)
424
+ tmp[0, 1] = 1.0
425
+ tmp[1, 0] = -self.k / self.m
426
+ return dot(tmp, z)
427
+
428
+ def verify(self, zs, t):
429
+ omega = sqrt(self.k / self.m)
430
+ u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
431
+ return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
432
+
433
+
434
+ class ComplexExp(ODE):
435
+ r"""The equation :lm:`\dot u = i u`"""
436
+ stop_t = 1.23*pi
437
+ z0 = exp([1j, 2j, 3j, 4j, 5j])
438
+ cmplx = True
439
+
440
+ def f(self, z, t):
441
+ return 1j*z
442
+
443
+ def jac(self, z, t):
444
+ return 1j*eye(5)
445
+
446
+ def verify(self, zs, t):
447
+ u = self.z0 * exp(1j*t)
448
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
449
+
450
+
451
+ class Pi(ODE):
452
+ r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
453
+ stop_t = 20
454
+ z0 = [0]
455
+ cmplx = True
456
+
457
+ def f(self, z, t):
458
+ return array([1./(t - 10 + 1j)])
459
+
460
+ def verify(self, zs, t):
461
+ u = -2j * np.arctan(10)
462
+ return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
463
+
464
+
465
+ class CoupledDecay(ODE):
466
+ r"""
467
+ 3 coupled decays suited for banded treatment
468
+ (banded mode makes it necessary when N>>3)
469
+ """
470
+
471
+ stiff = True
472
+ stop_t = 0.5
473
+ z0 = [5.0, 7.0, 13.0]
474
+ lband = 1
475
+ uband = 0
476
+
477
+ lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
478
+
479
+ def f(self, z, t):
480
+ lmbd = self.lmbd
481
+ return np.array([-lmbd[0]*z[0],
482
+ -lmbd[1]*z[1] + lmbd[0]*z[0],
483
+ -lmbd[2]*z[2] + lmbd[1]*z[1]])
484
+
485
+ def jac(self, z, t):
486
+ # The full Jacobian is
487
+ #
488
+ # [-lmbd[0] 0 0 ]
489
+ # [ lmbd[0] -lmbd[1] 0 ]
490
+ # [ 0 lmbd[1] -lmbd[2]]
491
+ #
492
+ # The lower and upper bandwidths are lband=1 and uband=0, resp.
493
+ # The representation of this array in packed format is
494
+ #
495
+ # [-lmbd[0] -lmbd[1] -lmbd[2]]
496
+ # [ lmbd[0] lmbd[1] 0 ]
497
+
498
+ lmbd = self.lmbd
499
+ j = np.zeros((self.lband + self.uband + 1, 3), order='F')
500
+
501
+ def set_j(ri, ci, val):
502
+ j[self.uband + ri - ci, ci] = val
503
+ set_j(0, 0, -lmbd[0])
504
+ set_j(1, 0, lmbd[0])
505
+ set_j(1, 1, -lmbd[1])
506
+ set_j(2, 1, lmbd[1])
507
+ set_j(2, 2, -lmbd[2])
508
+ return j
509
+
510
+ def verify(self, zs, t):
511
+ # Formulae derived by hand
512
+ lmbd = np.array(self.lmbd)
513
+ d10 = lmbd[1] - lmbd[0]
514
+ d21 = lmbd[2] - lmbd[1]
515
+ d20 = lmbd[2] - lmbd[0]
516
+ e0 = np.exp(-lmbd[0] * t)
517
+ e1 = np.exp(-lmbd[1] * t)
518
+ e2 = np.exp(-lmbd[2] * t)
519
+ u = np.vstack((
520
+ self.z0[0] * e0,
521
+ self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
522
+ self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
523
+ lmbd[1] * lmbd[0] * self.z0[0] / d10 *
524
+ (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
525
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
526
+
527
+
528
+ PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
529
+
530
+ #------------------------------------------------------------------------------
531
+
532
+
533
+ def f(t, x):
534
+ dxdt = [x[1], -x[0]]
535
+ return dxdt
536
+
537
+
538
+ def jac(t, x):
539
+ j = array([[0.0, 1.0],
540
+ [-1.0, 0.0]])
541
+ return j
542
+
543
+
544
+ def f1(t, x, omega):
545
+ dxdt = [omega*x[1], -omega*x[0]]
546
+ return dxdt
547
+
548
+
549
+ def jac1(t, x, omega):
550
+ j = array([[0.0, omega],
551
+ [-omega, 0.0]])
552
+ return j
553
+
554
+
555
+ def f2(t, x, omega1, omega2):
556
+ dxdt = [omega1*x[1], -omega2*x[0]]
557
+ return dxdt
558
+
559
+
560
+ def jac2(t, x, omega1, omega2):
561
+ j = array([[0.0, omega1],
562
+ [-omega2, 0.0]])
563
+ return j
564
+
565
+
566
+ def fv(t, x, omega):
567
+ dxdt = [omega[0]*x[1], -omega[1]*x[0]]
568
+ return dxdt
569
+
570
+
571
+ def jacv(t, x, omega):
572
+ j = array([[0.0, omega[0]],
573
+ [-omega[1], 0.0]])
574
+ return j
575
+
576
+
577
+ class ODECheckParameterUse:
578
+ """Call an ode-class solver with several cases of parameter use."""
579
+
580
+ # solver_name must be set before tests can be run with this class.
581
+
582
+ # Set these in subclasses.
583
+ solver_name = ''
584
+ solver_uses_jac = False
585
+
586
+ def _get_solver(self, f, jac):
587
+ solver = ode(f, jac)
588
+ if self.solver_uses_jac:
589
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
590
+ with_jacobian=self.solver_uses_jac)
591
+ else:
592
+ # XXX Shouldn't set_integrator *always* accept the keyword arg
593
+ # 'with_jacobian', and perhaps raise an exception if it is set
594
+ # to True if the solver can't actually use it?
595
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
596
+ return solver
597
+
598
+ def _check_solver(self, solver):
599
+ ic = [1.0, 0.0]
600
+ solver.set_initial_value(ic, 0.0)
601
+ solver.integrate(pi)
602
+ assert_array_almost_equal(solver.y, [-1.0, 0.0])
603
+
604
+ def test_no_params(self):
605
+ solver = self._get_solver(f, jac)
606
+ self._check_solver(solver)
607
+
608
+ def test_one_scalar_param(self):
609
+ solver = self._get_solver(f1, jac1)
610
+ omega = 1.0
611
+ solver.set_f_params(omega)
612
+ if self.solver_uses_jac:
613
+ solver.set_jac_params(omega)
614
+ self._check_solver(solver)
615
+
616
+ def test_two_scalar_params(self):
617
+ solver = self._get_solver(f2, jac2)
618
+ omega1 = 1.0
619
+ omega2 = 1.0
620
+ solver.set_f_params(omega1, omega2)
621
+ if self.solver_uses_jac:
622
+ solver.set_jac_params(omega1, omega2)
623
+ self._check_solver(solver)
624
+
625
+ def test_vector_param(self):
626
+ solver = self._get_solver(fv, jacv)
627
+ omega = [1.0, 1.0]
628
+ solver.set_f_params(omega)
629
+ if self.solver_uses_jac:
630
+ solver.set_jac_params(omega)
631
+ self._check_solver(solver)
632
+
633
+ def test_warns_on_failure(self):
634
+ # Set nsteps small to ensure failure
635
+ solver = self._get_solver(f, jac)
636
+ solver.set_integrator(self.solver_name, nsteps=1)
637
+ ic = [1.0, 0.0]
638
+ solver.set_initial_value(ic, 0.0)
639
+ assert_warns(UserWarning, solver.integrate, pi)
640
+
641
+
642
+ class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
643
+ solver_name = 'dopri5'
644
+ solver_uses_jac = False
645
+
646
+
647
+ class TestDOP853CheckParameterUse(ODECheckParameterUse):
648
+ solver_name = 'dop853'
649
+ solver_uses_jac = False
650
+
651
+
652
+ class TestVODECheckParameterUse(ODECheckParameterUse):
653
+ solver_name = 'vode'
654
+ solver_uses_jac = True
655
+
656
+
657
+ class TestZVODECheckParameterUse(ODECheckParameterUse):
658
+ solver_name = 'zvode'
659
+ solver_uses_jac = True
660
+
661
+
662
+ class TestLSODACheckParameterUse(ODECheckParameterUse):
663
+ solver_name = 'lsoda'
664
+ solver_uses_jac = True
665
+
666
+
667
+ def test_odeint_trivial_time():
668
+ # Test that odeint succeeds when given a single time point
669
+ # and full_output=True. This is a regression test for gh-4282.
670
+ y0 = 1
671
+ t = [0]
672
+ y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
673
+ assert_array_equal(y, np.array([[y0]]))
674
+
675
+
676
+ def test_odeint_banded_jacobian():
677
+ # Test the use of the `Dfun`, `ml` and `mu` options of odeint.
678
+
679
+ def func(y, t, c):
680
+ return c.dot(y)
681
+
682
+ def jac(y, t, c):
683
+ return c
684
+
685
+ def jac_transpose(y, t, c):
686
+ return c.T.copy(order='C')
687
+
688
+ def bjac_rows(y, t, c):
689
+ jac = np.vstack((np.r_[0, np.diag(c, 1)],
690
+ np.diag(c),
691
+ np.r_[np.diag(c, -1), 0],
692
+ np.r_[np.diag(c, -2), 0, 0]))
693
+ return jac
694
+
695
+ def bjac_cols(y, t, c):
696
+ return bjac_rows(y, t, c).T.copy(order='C')
697
+
698
+ c = array([[-205, 0.01, 0.00, 0.0],
699
+ [0.1, -2.50, 0.02, 0.0],
700
+ [1e-3, 0.01, -2.0, 0.01],
701
+ [0.00, 0.00, 0.1, -1.0]])
702
+
703
+ y0 = np.ones(4)
704
+ t = np.array([0, 5, 10, 100])
705
+
706
+ # Use the full Jacobian.
707
+ sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
708
+ atol=1e-13, rtol=1e-11, mxstep=10000,
709
+ Dfun=jac)
710
+
711
+ # Use the transposed full Jacobian, with col_deriv=True.
712
+ sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
713
+ atol=1e-13, rtol=1e-11, mxstep=10000,
714
+ Dfun=jac_transpose, col_deriv=True)
715
+
716
+ # Use the banded Jacobian.
717
+ sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
718
+ atol=1e-13, rtol=1e-11, mxstep=10000,
719
+ Dfun=bjac_rows, ml=2, mu=1)
720
+
721
+ # Use the transposed banded Jacobian, with col_deriv=True.
722
+ sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
723
+ atol=1e-13, rtol=1e-11, mxstep=10000,
724
+ Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
725
+
726
+ assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
727
+ assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
728
+ assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
729
+
730
+ # Verify that the number of jacobian evaluations was the same for the
731
+ # calls of odeint with a full jacobian and with a banded jacobian. This is
732
+ # a regression test--there was a bug in the handling of banded jacobians
733
+ # that resulted in an incorrect jacobian matrix being passed to the LSODA
734
+ # code. That would cause errors or excessive jacobian evaluations.
735
+ assert_array_equal(info1['nje'], info2['nje'])
736
+ assert_array_equal(info3['nje'], info4['nje'])
737
+
738
+ # Test the use of tfirst
739
+ sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
740
+ full_output=True, atol=1e-13, rtol=1e-11,
741
+ mxstep=10000,
742
+ Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
743
+ # The code should execute the exact same sequence of floating point
744
+ # calculations, so these should be exactly equal. We'll be safe and use
745
+ # a small tolerance.
746
+ assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
747
+
748
+
749
+ def test_odeint_errors():
750
+ def sys1d(x, t):
751
+ return -100*x
752
+
753
+ def bad1(x, t):
754
+ return 1.0/0
755
+
756
+ def bad2(x, t):
757
+ return "foo"
758
+
759
+ def bad_jac1(x, t):
760
+ return 1.0/0
761
+
762
+ def bad_jac2(x, t):
763
+ return [["foo"]]
764
+
765
+ def sys2d(x, t):
766
+ return [-100*x[0], -0.1*x[1]]
767
+
768
+ def sys2d_bad_jac(x, t):
769
+ return [[1.0/0, 0], [0, -0.1]]
770
+
771
+ assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
772
+ assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
773
+
774
+ assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
775
+ assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
776
+
777
+ assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
778
+ Dfun=sys2d_bad_jac)
779
+
780
+
781
+ def test_odeint_bad_shapes():
782
+ # Tests of some errors that can occur with odeint.
783
+
784
+ def badrhs(x, t):
785
+ return [1, -1]
786
+
787
+ def sys1(x, t):
788
+ return -100*x
789
+
790
+ def badjac(x, t):
791
+ return [[0, 0, 0]]
792
+
793
+ # y0 must be at most 1-d.
794
+ bad_y0 = [[0, 0], [0, 0]]
795
+ assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
796
+
797
+ # t must be at most 1-d.
798
+ bad_t = [[0, 1], [2, 3]]
799
+ assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
800
+
801
+ # y0 is 10, but badrhs(x, t) returns [1, -1].
802
+ assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
803
+
804
+ # shape of array returned by badjac(x, t) is not correct.
805
+ assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
806
+
807
+
808
+ def test_repeated_t_values():
809
+ """Regression test for gh-8217."""
810
+
811
+ def func(x, t):
812
+ return -0.25*x
813
+
814
+ t = np.zeros(10)
815
+ sol = odeint(func, [1.], t)
816
+ assert_array_equal(sol, np.ones((len(t), 1)))
817
+
818
+ tau = 4*np.log(2)
819
+ t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
820
+ sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
821
+ expected_sol = np.array([[1.0, 2.0]]*9 +
822
+ [[0.5, 1.0],
823
+ [0.25, 0.5],
824
+ [0.25, 0.5],
825
+ [0.125, 0.25]])
826
+ assert_allclose(sol, expected_sol)
827
+
828
+ # Edge case: empty t sequence.
829
+ sol = odeint(func, [1.], [])
830
+ assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
831
+
832
+ # t values are not monotonic.
833
+ assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
834
+ assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_equal, assert_allclose
3
+ from scipy.integrate import odeint
4
+ import scipy.integrate._test_odeint_banded as banded5x5
5
+
6
+
7
+ def rhs(y, t):
8
+ dydt = np.zeros_like(y)
9
+ banded5x5.banded5x5(t, y, dydt)
10
+ return dydt
11
+
12
+
13
+ def jac(y, t):
14
+ n = len(y)
15
+ jac = np.zeros((n, n), order='F')
16
+ banded5x5.banded5x5_jac(t, y, 1, 1, jac)
17
+ return jac
18
+
19
+
20
+ def bjac(y, t):
21
+ n = len(y)
22
+ bjac = np.zeros((4, n), order='F')
23
+ banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
24
+ return bjac
25
+
26
+
27
+ JACTYPE_FULL = 1
28
+ JACTYPE_BANDED = 4
29
+
30
+
31
+ def check_odeint(jactype):
32
+ if jactype == JACTYPE_FULL:
33
+ ml = None
34
+ mu = None
35
+ jacobian = jac
36
+ elif jactype == JACTYPE_BANDED:
37
+ ml = 2
38
+ mu = 1
39
+ jacobian = bjac
40
+ else:
41
+ raise ValueError(f"invalid jactype: {jactype!r}")
42
+
43
+ y0 = np.arange(1.0, 6.0)
44
+ # These tolerances must match the tolerances used in banded5x5.f.
45
+ rtol = 1e-11
46
+ atol = 1e-13
47
+ dt = 0.125
48
+ nsteps = 64
49
+ t = dt * np.arange(nsteps+1)
50
+
51
+ sol, info = odeint(rhs, y0, t,
52
+ Dfun=jacobian, ml=ml, mu=mu,
53
+ atol=atol, rtol=rtol, full_output=True)
54
+ yfinal = sol[-1]
55
+ odeint_nst = info['nst'][-1]
56
+ odeint_nfe = info['nfe'][-1]
57
+ odeint_nje = info['nje'][-1]
58
+
59
+ y1 = y0.copy()
60
+ # Pure Fortran solution. y1 is modified in-place.
61
+ nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
62
+
63
+ # It is likely that yfinal and y1 are *exactly* the same, but
64
+ # we'll be cautious and use assert_allclose.
65
+ assert_allclose(yfinal, y1, rtol=1e-12)
66
+ assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
67
+
68
+
69
+ def test_odeint_full_jac():
70
+ check_odeint(JACTYPE_FULL)
71
+
72
+
73
+ def test_odeint_banded_jac():
74
+ check_odeint(JACTYPE_BANDED)
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import math
3
+ import numpy as np
4
+ from numpy import sqrt, cos, sin, arctan, exp, log, pi
5
+ from numpy.testing import (assert_,
6
+ assert_allclose, assert_array_less, assert_almost_equal)
7
+ import pytest
8
+
9
+ from scipy.integrate import quad, dblquad, tplquad, nquad
10
+ from scipy.special import erf, erfc
11
+ from scipy._lib._ccallback import LowLevelCallable
12
+
13
+ import ctypes
14
+ import ctypes.util
15
+ from scipy._lib._ccallback_c import sine_ctypes
16
+
17
+ import scipy.integrate._test_multivariate as clib_test
18
+
19
+
20
+ def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8):
21
+ value, err = value_and_err
22
+ assert_allclose(value, tabled_value, atol=err, rtol=0)
23
+ if error_tolerance is not None:
24
+ assert_array_less(err, error_tolerance)
25
+
26
+
27
+ def get_clib_test_routine(name, restype, *argtypes):
28
+ ptr = getattr(clib_test, name)
29
+ return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
30
+
31
+
32
+ class TestCtypesQuad:
33
+ def setup_method(self):
34
+ if sys.platform == 'win32':
35
+ files = ['api-ms-win-crt-math-l1-1-0.dll']
36
+ elif sys.platform == 'darwin':
37
+ files = ['libm.dylib']
38
+ else:
39
+ files = ['libm.so', 'libm.so.6']
40
+
41
+ for file in files:
42
+ try:
43
+ self.lib = ctypes.CDLL(file)
44
+ break
45
+ except OSError:
46
+ pass
47
+ else:
48
+ # This test doesn't work on some Linux platforms (Fedora for
49
+ # example) that put an ld script in libm.so - see gh-5370
50
+ pytest.skip("Ctypes can't import libm.so")
51
+
52
+ restype = ctypes.c_double
53
+ argtypes = (ctypes.c_double,)
54
+ for name in ['sin', 'cos', 'tan']:
55
+ func = getattr(self.lib, name)
56
+ func.restype = restype
57
+ func.argtypes = argtypes
58
+
59
+ def test_typical(self):
60
+ assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
61
+ assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
62
+ assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
63
+
64
+ def test_ctypes_sine(self):
65
+ quad(LowLevelCallable(sine_ctypes), 0, 1)
66
+
67
+ def test_ctypes_variants(self):
68
+ sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
69
+ ctypes.c_double, ctypes.c_void_p)
70
+
71
+ sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
72
+ ctypes.c_int, ctypes.POINTER(ctypes.c_double),
73
+ ctypes.c_void_p)
74
+
75
+ sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
76
+ ctypes.c_double)
77
+
78
+ sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
79
+ ctypes.c_int, ctypes.POINTER(ctypes.c_double))
80
+
81
+ sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
82
+ ctypes.c_int, ctypes.c_double)
83
+
84
+ all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
85
+ legacy_sigs = [sin_2, sin_4]
86
+ legacy_only_sigs = [sin_4]
87
+
88
+ # LowLevelCallables work for new signatures
89
+ for j, func in enumerate(all_sigs):
90
+ callback = LowLevelCallable(func)
91
+ if func in legacy_only_sigs:
92
+ pytest.raises(ValueError, quad, callback, 0, pi)
93
+ else:
94
+ assert_allclose(quad(callback, 0, pi)[0], 2.0)
95
+
96
+ # Plain ctypes items work only for legacy signatures
97
+ for j, func in enumerate(legacy_sigs):
98
+ if func in legacy_sigs:
99
+ assert_allclose(quad(func, 0, pi)[0], 2.0)
100
+ else:
101
+ pytest.raises(ValueError, quad, func, 0, pi)
102
+
103
+
104
+ class TestMultivariateCtypesQuad:
105
+ def setup_method(self):
106
+ restype = ctypes.c_double
107
+ argtypes = (ctypes.c_int, ctypes.c_double)
108
+ for name in ['_multivariate_typical', '_multivariate_indefinite',
109
+ '_multivariate_sin']:
110
+ func = get_clib_test_routine(name, restype, *argtypes)
111
+ setattr(self, name, func)
112
+
113
+ def test_typical(self):
114
+ # 1) Typical function with two extra arguments:
115
+ assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
116
+ 0.30614353532540296487)
117
+
118
+ def test_indefinite(self):
119
+ # 2) Infinite integration limits --- Euler's constant
120
+ assert_quad(quad(self._multivariate_indefinite, 0, np.inf),
121
+ 0.577215664901532860606512)
122
+
123
+ def test_threadsafety(self):
124
+ # Ensure multivariate ctypes are threadsafe
125
+ def threadsafety(y):
126
+ return y + quad(self._multivariate_sin, 0, 1)[0]
127
+ assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
128
+
129
+
130
+ class TestQuad:
131
+ def test_typical(self):
132
+ # 1) Typical function with two extra arguments:
133
+ def myfunc(x, n, z): # Bessel function integrand
134
+ return cos(n*x-z*sin(x))/pi
135
+ assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
136
+
137
+ def test_indefinite(self):
138
+ # 2) Infinite integration limits --- Euler's constant
139
+ def myfunc(x): # Euler's constant integrand
140
+ return -exp(-x)*log(x)
141
+ assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512)
142
+
143
+ def test_singular(self):
144
+ # 3) Singular points in region of integration.
145
+ def myfunc(x):
146
+ if 0 < x < 2.5:
147
+ return sin(x)
148
+ elif 2.5 <= x <= 5.0:
149
+ return exp(-x)
150
+ else:
151
+ return 0.0
152
+
153
+ assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
154
+ 1 - cos(2.5) + exp(-2.5) - exp(-5.0))
155
+
156
+ def test_sine_weighted_finite(self):
157
+ # 4) Sine weighted integral (finite limits)
158
+ def myfunc(x, a):
159
+ return exp(a*(x-1))
160
+
161
+ ome = 2.0**3.4
162
+ assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
163
+ (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
164
+
165
+ def test_sine_weighted_infinite(self):
166
+ # 5) Sine weighted integral (infinite limits)
167
+ def myfunc(x, a):
168
+ return exp(-x*a)
169
+
170
+ a = 4.0
171
+ ome = 3.0
172
+ assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome),
173
+ ome/(a**2 + ome**2))
174
+
175
+ def test_cosine_weighted_infinite(self):
176
+ # 6) Cosine weighted integral (negative infinite limits)
177
+ def myfunc(x, a):
178
+ return exp(x*a)
179
+
180
+ a = 2.5
181
+ ome = 2.3
182
+ assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome),
183
+ a/(a**2 + ome**2))
184
+
185
+ def test_algebraic_log_weight(self):
186
+ # 6) Algebraic-logarithmic weight.
187
+ def myfunc(x, a):
188
+ return 1/(1+x+2**(-a))
189
+
190
+ a = 1.5
191
+ assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
192
+ wvar=(-0.5, -0.5)),
193
+ pi/sqrt((1+2**(-a))**2 - 1))
194
+
195
+ def test_cauchypv_weight(self):
196
+ # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
197
+ def myfunc(x, a):
198
+ return 2.0**(-a)/((x-1)**2+4.0**(-a))
199
+
200
+ a = 0.4
201
+ tabledValue = ((2.0**(-0.4)*log(1.5) -
202
+ 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
203
+ arctan(2.0**(a+2)) -
204
+ arctan(2.0**a)) /
205
+ (4.0**(-a) + 1))
206
+ assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
207
+ tabledValue, error_tolerance=1.9e-8)
208
+
209
+ def test_b_less_than_a(self):
210
+ def f(x, p, q):
211
+ return p * np.exp(-q*x)
212
+
213
+ val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
214
+ val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
215
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
216
+
217
+ def test_b_less_than_a_2(self):
218
+ def f(x, s):
219
+ return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
220
+
221
+ val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
222
+ val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
223
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
224
+
225
+ def test_b_less_than_a_3(self):
226
+ def f(x):
227
+ return 1.0
228
+
229
+ val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
230
+ val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
231
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
232
+
233
+ def test_b_less_than_a_full_output(self):
234
+ def f(x):
235
+ return 1.0
236
+
237
+ res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
238
+ res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
239
+ err = max(res_1[1], res_2[1])
240
+ assert_allclose(res_1[0], -res_2[0], atol=err)
241
+
242
+ def test_double_integral(self):
243
+ # 8) Double Integral test
244
+ def simpfunc(y, x): # Note order of arguments.
245
+ return x+y
246
+
247
+ a, b = 1.0, 2.0
248
+ assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
249
+ 5/6.0 * (b**3.0-a**3.0))
250
+
251
+ def test_double_integral2(self):
252
+ def func(x0, x1, t0, t1):
253
+ return x0 + x1 + t0 + t1
254
+ def g(x):
255
+ return x
256
+ def h(x):
257
+ return 2 * x
258
+ args = 1, 2
259
+ assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
260
+
261
+ def test_double_integral3(self):
262
+ def func(x0, x1):
263
+ return x0 + x1 + 1 + 2
264
+ assert_quad(dblquad(func, 1, 2, 1, 2),6.)
265
+
266
+ @pytest.mark.parametrize(
267
+ "x_lower, x_upper, y_lower, y_upper, expected",
268
+ [
269
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
270
+ # over domain D = [-inf, 0] for all n.
271
+ (-np.inf, 0, -np.inf, 0, np.pi / 4),
272
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
273
+ # over domain D = [-inf, -1] for each n (one at a time).
274
+ (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)),
275
+ (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)),
276
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
277
+ # over domain D = [-inf, -1] for all n.
278
+ (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)),
279
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
280
+ # over domain D = [-inf, 1] for each n (one at a time).
281
+ (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)),
282
+ (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)),
283
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
284
+ # over domain D = [-inf, 1] for all n.
285
+ (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)),
286
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
287
+ # over domain Dx = [-inf, -1] and Dy = [-inf, 1].
288
+ (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
289
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
290
+ # over domain Dx = [-inf, 1] and Dy = [-inf, -1].
291
+ (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
292
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
293
+ # over domain D = [0, inf] for all n.
294
+ (0, np.inf, 0, np.inf, np.pi / 4),
295
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
296
+ # over domain D = [1, inf] for each n (one at a time).
297
+ (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)),
298
+ (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)),
299
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
300
+ # over domain D = [1, inf] for all n.
301
+ (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)),
302
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
303
+ # over domain D = [-1, inf] for each n (one at a time).
304
+ (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)),
305
+ (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)),
306
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
307
+ # over domain D = [-1, inf] for all n.
308
+ (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)),
309
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
310
+ # over domain Dx = [-1, inf] and Dy = [1, inf].
311
+ (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
312
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
313
+ # over domain Dx = [1, inf] and Dy = [-1, inf].
314
+ (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
315
+ # Multiple integration of a function in n = 2 variables: f(x, y, z)
316
+ # over domain D = [-inf, inf] for all n.
317
+ (-np.inf, np.inf, -np.inf, np.inf, np.pi)
318
+ ]
319
+ )
320
+ def test_double_integral_improper(
321
+ self, x_lower, x_upper, y_lower, y_upper, expected
322
+ ):
323
+ # The Gaussian Integral.
324
+ def f(x, y):
325
+ return np.exp(-x ** 2 - y ** 2)
326
+
327
+ assert_quad(
328
+ dblquad(f, x_lower, x_upper, y_lower, y_upper),
329
+ expected,
330
+ error_tolerance=3e-8
331
+ )
332
+
333
+ def test_triple_integral(self):
334
+ # 9) Triple Integral test
335
+ def simpfunc(z, y, x, t): # Note order of arguments.
336
+ return (x+y+z)*t
337
+
338
+ a, b = 1.0, 2.0
339
+ assert_quad(tplquad(simpfunc, a, b,
340
+ lambda x: x, lambda x: 2*x,
341
+ lambda x, y: x - y, lambda x, y: x + y,
342
+ (2.,)),
343
+ 2*8/3.0 * (b**4.0 - a**4.0))
344
+
345
+ @pytest.mark.parametrize(
346
+ "x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
347
+ [
348
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
349
+ # over domain D = [-inf, 0] for all n.
350
+ (-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
351
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
352
+ # over domain D = [-inf, -1] for each n (one at a time).
353
+ (-np.inf, -1, -np.inf, 0, -np.inf, 0,
354
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
355
+ (-np.inf, 0, -np.inf, -1, -np.inf, 0,
356
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
357
+ (-np.inf, 0, -np.inf, 0, -np.inf, -1,
358
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
359
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
360
+ # over domain D = [-inf, -1] for each n (two at a time).
361
+ (-np.inf, -1, -np.inf, -1, -np.inf, 0,
362
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
363
+ (-np.inf, -1, -np.inf, 0, -np.inf, -1,
364
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
365
+ (-np.inf, 0, -np.inf, -1, -np.inf, -1,
366
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
367
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
368
+ # over domain D = [-inf, -1] for all n.
369
+ (-np.inf, -1, -np.inf, -1, -np.inf, -1,
370
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
371
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
372
+ # over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
373
+ (-np.inf, -1, -np.inf, 1, -np.inf, 1,
374
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
375
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
376
+ # over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
377
+ (-np.inf, -1, -np.inf, -1, -np.inf, 1,
378
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
379
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
380
+ # over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
381
+ (-np.inf, -1, -np.inf, 1, -np.inf, -1,
382
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
383
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
384
+ # over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
385
+ (-np.inf, 1, -np.inf, -1, -np.inf, -1,
386
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
387
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
388
+ # over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
389
+ (-np.inf, 1, -np.inf, 1, -np.inf, -1,
390
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
391
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
392
+ # over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
393
+ (-np.inf, 1, -np.inf, -1, -np.inf, 1,
394
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
395
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
396
+ # over domain D = [-inf, 1] for each n (one at a time).
397
+ (-np.inf, 1, -np.inf, 0, -np.inf, 0,
398
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
399
+ (-np.inf, 0, -np.inf, 1, -np.inf, 0,
400
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
401
+ (-np.inf, 0, -np.inf, 0, -np.inf, 1,
402
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
403
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
404
+ # over domain D = [-inf, 1] for each n (two at a time).
405
+ (-np.inf, 1, -np.inf, 1, -np.inf, 0,
406
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
407
+ (-np.inf, 1, -np.inf, 0, -np.inf, 1,
408
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
409
+ (-np.inf, 0, -np.inf, 1, -np.inf, 1,
410
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
411
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
412
+ # over domain D = [-inf, 1] for all n.
413
+ (-np.inf, 1, -np.inf, 1, -np.inf, 1,
414
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
415
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
416
+ # over domain D = [0, inf] for all n.
417
+ (0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
418
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
419
+ # over domain D = [1, inf] for each n (one at a time).
420
+ (1, np.inf, 0, np.inf, 0, np.inf,
421
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
422
+ (0, np.inf, 1, np.inf, 0, np.inf,
423
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
424
+ (0, np.inf, 0, np.inf, 1, np.inf,
425
+ (np.pi ** (3 / 2)) / 8 * erfc(1)),
426
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
427
+ # over domain D = [1, inf] for each n (two at a time).
428
+ (1, np.inf, 1, np.inf, 0, np.inf,
429
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
430
+ (1, np.inf, 0, np.inf, 1, np.inf,
431
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
432
+ (0, np.inf, 1, np.inf, 1, np.inf,
433
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
434
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
435
+ # over domain D = [1, inf] for all n.
436
+ (1, np.inf, 1, np.inf, 1, np.inf,
437
+ (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
438
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
439
+ # over domain D = [-1, inf] for each n (one at a time).
440
+ (-1, np.inf, 0, np.inf, 0, np.inf,
441
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
442
+ (0, np.inf, -1, np.inf, 0, np.inf,
443
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
444
+ (0, np.inf, 0, np.inf, -1, np.inf,
445
+ (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
446
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
447
+ # over domain D = [-1, inf] for each n (two at a time).
448
+ (-1, np.inf, -1, np.inf, 0, np.inf,
449
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
450
+ (-1, np.inf, 0, np.inf, -1, np.inf,
451
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
452
+ (0, np.inf, -1, np.inf, -1, np.inf,
453
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
454
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
455
+ # over domain D = [-1, inf] for all n.
456
+ (-1, np.inf, -1, np.inf, -1, np.inf,
457
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
458
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
459
+ # over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
460
+ (1, np.inf, -1, np.inf, -1, np.inf,
461
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
462
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
463
+ # over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
464
+ (1, np.inf, 1, np.inf, -1, np.inf,
465
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
466
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
467
+ # over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
468
+ (1, np.inf, -1, np.inf, 1, np.inf,
469
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
470
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
471
+ # over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
472
+ (-1, np.inf, 1, np.inf, 1, np.inf,
473
+ (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
474
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
475
+ # over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
476
+ (-1, np.inf, -1, np.inf, 1, np.inf,
477
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
478
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
479
+ # over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
480
+ (-1, np.inf, 1, np.inf, -1, np.inf,
481
+ (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
482
+ # Multiple integration of a function in n = 3 variables: f(x, y, z)
483
+ # over domain D = [-inf, inf] for all n.
484
+ (-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
485
+ np.pi ** (3 / 2)),
486
+ ],
487
+ )
488
+ def test_triple_integral_improper(
489
+ self,
490
+ x_lower,
491
+ x_upper,
492
+ y_lower,
493
+ y_upper,
494
+ z_lower,
495
+ z_upper,
496
+ expected
497
+ ):
498
+ # The Gaussian Integral.
499
+ def f(x, y, z):
500
+ return np.exp(-x ** 2 - y ** 2 - z ** 2)
501
+
502
+ assert_quad(
503
+ tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
504
+ expected,
505
+ error_tolerance=6e-8
506
+ )
507
+
508
+ def test_complex(self):
509
+ def tfunc(x):
510
+ return np.exp(1j*x)
511
+
512
+ assert np.allclose(
513
+ quad(tfunc, 0, np.pi/2, complex_func=True)[0],
514
+ 1+1j)
515
+
516
+ # We consider a divergent case in order to force quadpack
517
+ # to return an error message. The output is compared
518
+ # against what is returned by explicit integration
519
+ # of the parts.
520
+ kwargs = {'a': 0, 'b': np.inf, 'full_output': True,
521
+ 'weight': 'cos', 'wvar': 1}
522
+ res_c = quad(tfunc, complex_func=True, **kwargs)
523
+ res_r = quad(lambda x: np.real(np.exp(1j*x)),
524
+ complex_func=False,
525
+ **kwargs)
526
+ res_i = quad(lambda x: np.imag(np.exp(1j*x)),
527
+ complex_func=False,
528
+ **kwargs)
529
+
530
+ np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0])
531
+ np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1])
532
+
533
+ assert len(res_c[2]['real']) == len(res_r[2:]) == 3
534
+ assert res_c[2]['real'][2] == res_r[4]
535
+ assert res_c[2]['real'][1] == res_r[3]
536
+ assert res_c[2]['real'][0]['lst'] == res_r[2]['lst']
537
+
538
+ assert len(res_c[2]['imag']) == len(res_i[2:]) == 1
539
+ assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst']
540
+
541
+
542
+ class TestNQuad:
543
+ def test_fixed_limits(self):
544
+ def func1(x0, x1, x2, x3):
545
+ val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
546
+ (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
547
+ return val
548
+
549
+ def opts_basic(*args):
550
+ return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
551
+
552
+ res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
553
+ opts=[opts_basic, {}, {}, {}], full_output=True)
554
+ assert_quad(res[:-1], 1.5267454070738635)
555
+ assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
556
+
557
+ def test_variable_limits(self):
558
+ scale = .1
559
+
560
+ def func2(x0, x1, x2, x3, t0, t1):
561
+ val = (x0*x1*x3**2 + np.sin(x2) + 1 +
562
+ (1 if x0 + t1*x1 - t0 > 0 else 0))
563
+ return val
564
+
565
+ def lim0(x1, x2, x3, t0, t1):
566
+ return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
567
+ scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
568
+
569
+ def lim1(x2, x3, t0, t1):
570
+ return [scale * (t0*x2 + t1*x3) - 1,
571
+ scale * (t0*x2 + t1*x3) + 1]
572
+
573
+ def lim2(x3, t0, t1):
574
+ return [scale * (x3 + t0**2*t1**3) - 1,
575
+ scale * (x3 + t0**2*t1**3) + 1]
576
+
577
+ def lim3(t0, t1):
578
+ return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
579
+
580
+ def opts0(x1, x2, x3, t0, t1):
581
+ return {'points': [t0 - t1*x1]}
582
+
583
+ def opts1(x2, x3, t0, t1):
584
+ return {}
585
+
586
+ def opts2(x3, t0, t1):
587
+ return {}
588
+
589
+ def opts3(t0, t1):
590
+ return {}
591
+
592
+ res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
593
+ opts=[opts0, opts1, opts2, opts3])
594
+ assert_quad(res, 25.066666666666663)
595
+
596
+ def test_square_separate_ranges_and_opts(self):
597
+ def f(y, x):
598
+ return 1.0
599
+
600
+ assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
601
+
602
+ def test_square_aliased_ranges_and_opts(self):
603
+ def f(y, x):
604
+ return 1.0
605
+
606
+ r = [-1, 1]
607
+ opt = {}
608
+ assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
609
+
610
+ def test_square_separate_fn_ranges_and_opts(self):
611
+ def f(y, x):
612
+ return 1.0
613
+
614
+ def fn_range0(*args):
615
+ return (-1, 1)
616
+
617
+ def fn_range1(*args):
618
+ return (-1, 1)
619
+
620
+ def fn_opt0(*args):
621
+ return {}
622
+
623
+ def fn_opt1(*args):
624
+ return {}
625
+
626
+ ranges = [fn_range0, fn_range1]
627
+ opts = [fn_opt0, fn_opt1]
628
+ assert_quad(nquad(f, ranges, opts=opts), 4.0)
629
+
630
+ def test_square_aliased_fn_ranges_and_opts(self):
631
+ def f(y, x):
632
+ return 1.0
633
+
634
+ def fn_range(*args):
635
+ return (-1, 1)
636
+
637
+ def fn_opt(*args):
638
+ return {}
639
+
640
+ ranges = [fn_range, fn_range]
641
+ opts = [fn_opt, fn_opt]
642
+ assert_quad(nquad(f, ranges, opts=opts), 4.0)
643
+
644
+ def test_matching_quad(self):
645
+ def func(x):
646
+ return x**2 + 1
647
+
648
+ res, reserr = quad(func, 0, 4)
649
+ res2, reserr2 = nquad(func, ranges=[[0, 4]])
650
+ assert_almost_equal(res, res2)
651
+ assert_almost_equal(reserr, reserr2)
652
+
653
+ def test_matching_dblquad(self):
654
+ def func2d(x0, x1):
655
+ return x0**2 + x1**3 - x0 * x1 + 1
656
+
657
+ res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
658
+ res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
659
+ assert_almost_equal(res, res2)
660
+ assert_almost_equal(reserr, reserr2)
661
+
662
+ def test_matching_tplquad(self):
663
+ def func3d(x0, x1, x2, c0, c1):
664
+ return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
665
+
666
+ res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
667
+ lambda x, y: -np.pi, lambda x, y: np.pi,
668
+ args=(2, 3))
669
+ res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
670
+ assert_almost_equal(res, res2)
671
+
672
+ def test_dict_as_opts(self):
673
+ try:
674
+ nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
675
+ except TypeError:
676
+ assert False
677
+
venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import pytest
3
+ import numpy as np
4
+ from numpy import cos, sin, pi
5
+ from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
6
+ assert_, suppress_warnings)
7
+ from hypothesis import given
8
+ import hypothesis.strategies as st
9
+ import hypothesis.extra.numpy as hyp_num
10
+
11
+ from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
12
+ cumulative_trapezoid, cumtrapz, trapz, trapezoid,
13
+ quad, simpson, simps, fixed_quad, AccuracyWarning,
14
+ qmc_quad, cumulative_simpson)
15
+ from scipy.integrate._quadrature import _cumulative_simpson_unequal_intervals
16
+ from scipy import stats, special
17
+
18
+
19
+ class TestFixedQuad:
20
+ def test_scalar(self):
21
+ n = 4
22
+ expected = 1/(2*n)
23
+ got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n)
24
+ # quadrature exact for this input
25
+ assert_allclose(got, expected, rtol=1e-12)
26
+
27
+ def test_vector(self):
28
+ n = 4
29
+ p = np.arange(1, 2*n)
30
+ expected = 1/(p + 1)
31
+ got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n)
32
+ assert_allclose(got, expected, rtol=1e-12)
33
+
34
+
35
+ @pytest.mark.filterwarnings('ignore::DeprecationWarning')
36
+ class TestQuadrature:
37
+ def quad(self, x, a, b, args):
38
+ raise NotImplementedError
39
+
40
+ def test_quadrature(self):
41
+ # Typical function with two extra arguments:
42
+ def myfunc(x, n, z): # Bessel function integrand
43
+ return cos(n*x-z*sin(x))/pi
44
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8))
45
+ table_val = 0.30614353532540296487
46
+ assert_almost_equal(val, table_val, decimal=7)
47
+
48
+ def test_quadrature_rtol(self):
49
+ def myfunc(x, n, z): # Bessel function integrand
50
+ return 1e90 * cos(n*x-z*sin(x))/pi
51
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
52
+ table_val = 1e90 * 0.30614353532540296487
53
+ assert_allclose(val, table_val, rtol=1e-10)
54
+
55
+ def test_quadrature_miniter(self):
56
+ # Typical function with two extra arguments:
57
+ def myfunc(x, n, z): # Bessel function integrand
58
+ return cos(n*x-z*sin(x))/pi
59
+ table_val = 0.30614353532540296487
60
+ for miniter in [5, 52]:
61
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
62
+ assert_almost_equal(val, table_val, decimal=7)
63
+ assert_(err < 1.0)
64
+
65
+ def test_quadrature_single_args(self):
66
+ def myfunc(x, n):
67
+ return 1e90 * cos(n*x-1.8*sin(x))/pi
68
+ val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
69
+ table_val = 1e90 * 0.30614353532540296487
70
+ assert_allclose(val, table_val, rtol=1e-10)
71
+
72
+ def test_romberg(self):
73
+ # Typical function with two extra arguments:
74
+ def myfunc(x, n, z): # Bessel function integrand
75
+ return cos(n*x-z*sin(x))/pi
76
+ val = romberg(myfunc, 0, pi, args=(2, 1.8))
77
+ table_val = 0.30614353532540296487
78
+ assert_almost_equal(val, table_val, decimal=7)
79
+
80
+ def test_romberg_rtol(self):
81
+ # Typical function with two extra arguments:
82
+ def myfunc(x, n, z): # Bessel function integrand
83
+ return 1e19*cos(n*x-z*sin(x))/pi
84
+ val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
85
+ table_val = 1e19*0.30614353532540296487
86
+ assert_allclose(val, table_val, rtol=1e-10)
87
+
88
+ def test_romb(self):
89
+ assert_equal(romb(np.arange(17)), 128)
90
+
91
+ def test_romb_gh_3731(self):
92
+ # Check that romb makes maximal use of data points
93
+ x = np.arange(2**4+1)
94
+ y = np.cos(0.2*x)
95
+ val = romb(y)
96
+ val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
97
+ assert_allclose(val, val2, rtol=1e-8, atol=0)
98
+
99
+ # should be equal to romb with 2**k+1 samples
100
+ with suppress_warnings() as sup:
101
+ sup.filter(AccuracyWarning, "divmax .4. exceeded")
102
+ val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
103
+ assert_allclose(val, val3, rtol=1e-12, atol=0)
104
+
105
+ def test_non_dtype(self):
106
+ # Check that we work fine with functions returning float
107
+ import math
108
+ valmath = romberg(math.sin, 0, 1)
109
+ expected_val = 0.45969769413185085
110
+ assert_almost_equal(valmath, expected_val, decimal=7)
111
+
112
+ def test_newton_cotes(self):
113
+ """Test the first few degrees, for evenly spaced points."""
114
+ n = 1
115
+ wts, errcoff = newton_cotes(n, 1)
116
+ assert_equal(wts, n*np.array([0.5, 0.5]))
117
+ assert_almost_equal(errcoff, -n**3/12.0)
118
+
119
+ n = 2
120
+ wts, errcoff = newton_cotes(n, 1)
121
+ assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
122
+ assert_almost_equal(errcoff, -n**5/2880.0)
123
+
124
+ n = 3
125
+ wts, errcoff = newton_cotes(n, 1)
126
+ assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
127
+ assert_almost_equal(errcoff, -n**5/6480.0)
128
+
129
+ n = 4
130
+ wts, errcoff = newton_cotes(n, 1)
131
+ assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
132
+ assert_almost_equal(errcoff, -n**7/1935360.0)
133
+
134
+ def test_newton_cotes2(self):
135
+ """Test newton_cotes with points that are not evenly spaced."""
136
+
137
+ x = np.array([0.0, 1.5, 2.0])
138
+ y = x**2
139
+ wts, errcoff = newton_cotes(x)
140
+ exact_integral = 8.0/3
141
+ numeric_integral = np.dot(wts, y)
142
+ assert_almost_equal(numeric_integral, exact_integral)
143
+
144
+ x = np.array([0.0, 1.4, 2.1, 3.0])
145
+ y = x**2
146
+ wts, errcoff = newton_cotes(x)
147
+ exact_integral = 9.0
148
+ numeric_integral = np.dot(wts, y)
149
+ assert_almost_equal(numeric_integral, exact_integral)
150
+
151
+ # ignore the DeprecationWarning emitted by the even kwd
152
+ @pytest.mark.filterwarnings('ignore::DeprecationWarning')
153
+ def test_simpson(self):
154
+ y = np.arange(17)
155
+ assert_equal(simpson(y), 128)
156
+ assert_equal(simpson(y, dx=0.5), 64)
157
+ assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32)
158
+
159
+ y = np.arange(4)
160
+ x = 2**y
161
+ assert_equal(simpson(y, x=x, even='avg'), 13.875)
162
+ assert_equal(simpson(y, x=x, even='first'), 13.75)
163
+ assert_equal(simpson(y, x=x, even='last'), 14)
164
+
165
+ # `even='simpson'`
166
+ # integral should be exactly 21
167
+ x = np.linspace(1, 4, 4)
168
+ def f(x):
169
+ return x**2
170
+
171
+ assert_allclose(simpson(f(x), x=x, even='simpson'), 21.0)
172
+ assert_allclose(simpson(f(x), x=x, even='avg'), 21 + 1/6)
173
+
174
+ # integral should be exactly 114
175
+ x = np.linspace(1, 7, 4)
176
+ assert_allclose(simpson(f(x), dx=2.0, even='simpson'), 114)
177
+ assert_allclose(simpson(f(x), dx=2.0, even='avg'), 115 + 1/3)
178
+
179
+ # `even='simpson'`, test multi-axis behaviour
180
+ a = np.arange(16).reshape(4, 4)
181
+ x = np.arange(64.).reshape(4, 4, 4)
182
+ y = f(x)
183
+ for i in range(3):
184
+ r = simpson(y, x=x, even='simpson', axis=i)
185
+ it = np.nditer(a, flags=['multi_index'])
186
+ for _ in it:
187
+ idx = list(it.multi_index)
188
+ idx.insert(i, slice(None))
189
+ integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
190
+ assert_allclose(r[it.multi_index], integral)
191
+
192
+ # test when integration axis only has two points
193
+ x = np.arange(16).reshape(8, 2)
194
+ y = f(x)
195
+ for even in ['simpson', 'avg', 'first', 'last']:
196
+ r = simpson(y, x=x, even=even, axis=-1)
197
+
198
+ integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0])
199
+ assert_allclose(r, integral)
200
+
201
+ # odd points, test multi-axis behaviour
202
+ a = np.arange(25).reshape(5, 5)
203
+ x = np.arange(125).reshape(5, 5, 5)
204
+ y = f(x)
205
+ for i in range(3):
206
+ r = simpson(y, x=x, axis=i)
207
+ it = np.nditer(a, flags=['multi_index'])
208
+ for _ in it:
209
+ idx = list(it.multi_index)
210
+ idx.insert(i, slice(None))
211
+ integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
212
+ assert_allclose(r[it.multi_index], integral)
213
+
214
+ # Tests for checking base case
215
+ x = np.array([3])
216
+ y = np.power(x, 2)
217
+ assert_allclose(simpson(y, x=x, axis=0), 0.0)
218
+ assert_allclose(simpson(y, x=x, axis=-1), 0.0)
219
+
220
+ x = np.array([3, 3, 3, 3])
221
+ y = np.power(x, 2)
222
+ assert_allclose(simpson(y, x=x, axis=0), 0.0)
223
+ assert_allclose(simpson(y, x=x, axis=-1), 0.0)
224
+
225
+ x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]])
226
+ y = np.power(x, 2)
227
+ zero_axis = [0.0, 0.0, 0.0, 0.0]
228
+ default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3
229
+ assert_allclose(simpson(y, x=x, axis=0), zero_axis)
230
+ # the following should be exact for even='simpson'
231
+ assert_allclose(simpson(y, x=x, axis=-1), default_axis)
232
+
233
+ x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]])
234
+ y = np.power(x, 2)
235
+ zero_axis = [0.0, 136.0, 1088.0, 8704.0]
236
+ default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3]
237
+ assert_allclose(simpson(y, x=x, axis=0), zero_axis)
238
+ assert_allclose(simpson(y, x=x, axis=-1), default_axis)
239
+
240
+ def test_simpson_deprecations(self):
241
+ x = np.linspace(0, 3, 4)
242
+ y = x**2
243
+ with pytest.deprecated_call(match="The 'even' keyword is deprecated"):
244
+ simpson(y, x=x, even='first')
245
+ with pytest.deprecated_call(match="use keyword arguments"):
246
+ simpson(y, x)
247
+
248
+ @pytest.mark.parametrize('droplast', [False, True])
249
+ def test_simpson_2d_integer_no_x(self, droplast):
250
+ # The inputs are 2d integer arrays. The results should be
251
+ # identical to the results when the inputs are floating point.
252
+ y = np.array([[2, 2, 4, 4, 8, 8, -4, 5],
253
+ [4, 4, 2, -4, 10, 22, -2, 10]])
254
+ if droplast:
255
+ y = y[:, :-1]
256
+ result = simpson(y, axis=-1)
257
+ expected = simpson(np.array(y, dtype=np.float64), axis=-1)
258
+ assert_equal(result, expected)
259
+
260
+ def test_simps(self):
261
+ # Basic coverage test for the alias
262
+ y = np.arange(5)
263
+ x = 2**y
264
+ with pytest.deprecated_call(match="simpson"):
265
+ assert_allclose(
266
+ simpson(y, x=x, dx=0.5),
267
+ simps(y, x=x, dx=0.5)
268
+ )
269
+
270
+
271
+ @pytest.mark.parametrize('func', [romberg, quadrature])
272
+ def test_deprecate_integrator(func):
273
+ message = f"`scipy.integrate.{func.__name__}` is deprecated..."
274
+ with pytest.deprecated_call(match=message):
275
+ func(np.exp, 0, 1)
276
+
277
+
278
+ class TestCumulative_trapezoid:
279
+ def test_1d(self):
280
+ x = np.linspace(-2, 2, num=5)
281
+ y = x
282
+ y_int = cumulative_trapezoid(y, x, initial=0)
283
+ y_expected = [0., -1.5, -2., -1.5, 0.]
284
+ assert_allclose(y_int, y_expected)
285
+
286
+ y_int = cumulative_trapezoid(y, x, initial=None)
287
+ assert_allclose(y_int, y_expected[1:])
288
+
289
+ def test_y_nd_x_nd(self):
290
+ x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
291
+ y = x
292
+ y_int = cumulative_trapezoid(y, x, initial=0)
293
+ y_expected = np.array([[[0., 0.5, 2., 4.5],
294
+ [0., 4.5, 10., 16.5]],
295
+ [[0., 8.5, 18., 28.5],
296
+ [0., 12.5, 26., 40.5]],
297
+ [[0., 16.5, 34., 52.5],
298
+ [0., 20.5, 42., 64.5]]])
299
+
300
+ assert_allclose(y_int, y_expected)
301
+
302
+ # Try with all axes
303
+ shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
304
+ for axis, shape in zip([0, 1, 2], shapes):
305
+ y_int = cumulative_trapezoid(y, x, initial=0, axis=axis)
306
+ assert_equal(y_int.shape, (3, 2, 4))
307
+ y_int = cumulative_trapezoid(y, x, initial=None, axis=axis)
308
+ assert_equal(y_int.shape, shape)
309
+
310
+ def test_y_nd_x_1d(self):
311
+ y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
312
+ x = np.arange(4)**2
313
+ # Try with all axes
314
+ ys_expected = (
315
+ np.array([[[4., 5., 6., 7.],
316
+ [8., 9., 10., 11.]],
317
+ [[40., 44., 48., 52.],
318
+ [56., 60., 64., 68.]]]),
319
+ np.array([[[2., 3., 4., 5.]],
320
+ [[10., 11., 12., 13.]],
321
+ [[18., 19., 20., 21.]]]),
322
+ np.array([[[0.5, 5., 17.5],
323
+ [4.5, 21., 53.5]],
324
+ [[8.5, 37., 89.5],
325
+ [12.5, 53., 125.5]],
326
+ [[16.5, 69., 161.5],
327
+ [20.5, 85., 197.5]]]))
328
+
329
+ for axis, y_expected in zip([0, 1, 2], ys_expected):
330
+ y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis,
331
+ initial=None)
332
+ assert_allclose(y_int, y_expected)
333
+
334
+ def test_x_none(self):
335
+ y = np.linspace(-2, 2, num=5)
336
+
337
+ y_int = cumulative_trapezoid(y)
338
+ y_expected = [-1.5, -2., -1.5, 0.]
339
+ assert_allclose(y_int, y_expected)
340
+
341
+ y_int = cumulative_trapezoid(y, initial=0)
342
+ y_expected = [0, -1.5, -2., -1.5, 0.]
343
+ assert_allclose(y_int, y_expected)
344
+
345
+ y_int = cumulative_trapezoid(y, dx=3)
346
+ y_expected = [-4.5, -6., -4.5, 0.]
347
+ assert_allclose(y_int, y_expected)
348
+
349
+ y_int = cumulative_trapezoid(y, dx=3, initial=0)
350
+ y_expected = [0, -4.5, -6., -4.5, 0.]
351
+ assert_allclose(y_int, y_expected)
352
+
353
+ @pytest.mark.parametrize(
354
+ "initial", [1, 0.5]
355
+ )
356
+ def test_initial_warning(self, initial):
357
+ """If initial is not None or 0, a ValueError is raised."""
358
+ y = np.linspace(0, 10, num=10)
359
+ with pytest.deprecated_call(match="`initial`"):
360
+ res = cumulative_trapezoid(y, initial=initial)
361
+ assert_allclose(res, [initial, *np.cumsum(y[1:] + y[:-1])/2])
362
+
363
+ def test_zero_len_y(self):
364
+ with pytest.raises(ValueError, match="At least one point is required"):
365
+ cumulative_trapezoid(y=[])
366
+
367
+ def test_cumtrapz(self):
368
+ # Basic coverage test for the alias
369
+ x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
370
+ y = x
371
+ with pytest.deprecated_call(match="cumulative_trapezoid"):
372
+ assert_allclose(cumulative_trapezoid(y, x, dx=0.5, axis=0, initial=0),
373
+ cumtrapz(y, x, dx=0.5, axis=0, initial=0),
374
+ rtol=1e-14)
375
+
376
+
377
+ class TestTrapezoid:
378
+ def test_simple(self):
379
+ x = np.arange(-10, 10, .1)
380
+ r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
381
+ # check integral of normal equals 1
382
+ assert_allclose(r, 1)
383
+
384
+ def test_ndim(self):
385
+ x = np.linspace(0, 1, 3)
386
+ y = np.linspace(0, 2, 8)
387
+ z = np.linspace(0, 3, 13)
388
+
389
+ wx = np.ones_like(x) * (x[1] - x[0])
390
+ wx[0] /= 2
391
+ wx[-1] /= 2
392
+ wy = np.ones_like(y) * (y[1] - y[0])
393
+ wy[0] /= 2
394
+ wy[-1] /= 2
395
+ wz = np.ones_like(z) * (z[1] - z[0])
396
+ wz[0] /= 2
397
+ wz[-1] /= 2
398
+
399
+ q = x[:, None, None] + y[None,:, None] + z[None, None,:]
400
+
401
+ qx = (q * wx[:, None, None]).sum(axis=0)
402
+ qy = (q * wy[None, :, None]).sum(axis=1)
403
+ qz = (q * wz[None, None, :]).sum(axis=2)
404
+
405
+ # n-d `x`
406
+ r = trapezoid(q, x=x[:, None, None], axis=0)
407
+ assert_allclose(r, qx)
408
+ r = trapezoid(q, x=y[None,:, None], axis=1)
409
+ assert_allclose(r, qy)
410
+ r = trapezoid(q, x=z[None, None,:], axis=2)
411
+ assert_allclose(r, qz)
412
+
413
+ # 1-d `x`
414
+ r = trapezoid(q, x=x, axis=0)
415
+ assert_allclose(r, qx)
416
+ r = trapezoid(q, x=y, axis=1)
417
+ assert_allclose(r, qy)
418
+ r = trapezoid(q, x=z, axis=2)
419
+ assert_allclose(r, qz)
420
+
421
+ def test_masked(self):
422
+ # Testing that masked arrays behave as if the function is 0 where
423
+ # masked
424
+ x = np.arange(5)
425
+ y = x * x
426
+ mask = x == 2
427
+ ym = np.ma.array(y, mask=mask)
428
+ r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
429
+ assert_allclose(trapezoid(ym, x), r)
430
+
431
+ xm = np.ma.array(x, mask=mask)
432
+ assert_allclose(trapezoid(ym, xm), r)
433
+
434
+ xm = np.ma.array(x, mask=mask)
435
+ assert_allclose(trapezoid(y, xm), r)
436
+
437
+ def test_trapz_alias(self):
438
+ # Basic coverage test for the alias
439
+ y = np.arange(4)
440
+ x = 2**y
441
+ with pytest.deprecated_call(match="trapezoid"):
442
+ assert_equal(trapezoid(y, x=x, dx=0.5, axis=0),
443
+ trapz(y, x=x, dx=0.5, axis=0))
444
+
445
+
446
+ class TestQMCQuad:
447
+ def test_input_validation(self):
448
+ message = "`func` must be callable."
449
+ with pytest.raises(TypeError, match=message):
450
+ qmc_quad("a duck", [0, 0], [1, 1])
451
+
452
+ message = "`func` must evaluate the integrand at points..."
453
+ with pytest.raises(ValueError, match=message):
454
+ qmc_quad(lambda: 1, [0, 0], [1, 1])
455
+
456
+ def func(x):
457
+ assert x.ndim == 1
458
+ return np.sum(x)
459
+ message = "Exception encountered when attempting vectorized call..."
460
+ with pytest.warns(UserWarning, match=message):
461
+ qmc_quad(func, [0, 0], [1, 1])
462
+
463
+ message = "`n_points` must be an integer."
464
+ with pytest.raises(TypeError, match=message):
465
+ qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5)
466
+
467
+ message = "`n_estimates` must be an integer."
468
+ with pytest.raises(TypeError, match=message):
469
+ qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5)
470
+
471
+ message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
472
+ with pytest.raises(TypeError, match=message):
473
+ qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck")
474
+
475
+ message = "`qrng` must be initialized with dimensionality equal to "
476
+ with pytest.raises(ValueError, match=message):
477
+ qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1))
478
+
479
+ message = r"`log` must be boolean \(`True` or `False`\)."
480
+ with pytest.raises(TypeError, match=message):
481
+ qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10)
482
+
483
+ def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)):
484
+
485
+ ndim = 2
486
+ mean = np.zeros(ndim)
487
+ cov = np.eye(ndim)
488
+
489
+ def func(x):
490
+ return stats.multivariate_normal.pdf(x.T, mean, cov)
491
+
492
+ rng = np.random.default_rng(2879434385674690281)
493
+ qrng = stats.qmc.Sobol(ndim, seed=rng)
494
+ a = np.zeros(ndim)
495
+ b = np.ones(ndim) * signs
496
+ res = qmc_quad(func, a, b, n_points=n_points,
497
+ n_estimates=n_estimates, qrng=qrng)
498
+ ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
499
+ atol = special.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI
500
+ assert_allclose(res.integral, ref, atol=atol)
501
+ assert np.prod(signs)*res.integral > 0
502
+
503
+ rng = np.random.default_rng(2879434385674690281)
504
+ qrng = stats.qmc.Sobol(ndim, seed=rng)
505
+ logres = qmc_quad(lambda *args: np.log(func(*args)), a, b,
506
+ n_points=n_points, n_estimates=n_estimates,
507
+ log=True, qrng=qrng)
508
+ assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14)
509
+ assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0)
510
+ assert_allclose(np.exp(logres.standard_error),
511
+ res.standard_error, rtol=1e-14, atol=1e-16)
512
+
513
+ @pytest.mark.parametrize("n_points", [2**8, 2**12])
514
+ @pytest.mark.parametrize("n_estimates", [8, 16])
515
+ def test_basic(self, n_points, n_estimates):
516
+ self.basic_test(n_points, n_estimates)
517
+
518
+ @pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]])
519
+ def test_sign(self, signs):
520
+ self.basic_test(signs=signs)
521
+
522
+ @pytest.mark.parametrize("log", [False, True])
523
+ def test_zero(self, log):
524
+ message = "A lower limit was equal to an upper limit, so"
525
+ with pytest.warns(UserWarning, match=message):
526
+ res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log)
527
+ assert res.integral == (-np.inf if log else 0)
528
+ assert res.standard_error == 0
529
+
530
+ def test_flexible_input(self):
531
+ # check that qrng is not required
532
+ # also checks that for 1d problems, a and b can be scalars
533
+ def func(x):
534
+ return stats.norm.pdf(x, scale=2)
535
+
536
+ res = qmc_quad(func, 0, 1)
537
+ ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2)
538
+ assert_allclose(res.integral, ref, 1e-2)
539
+
540
+
541
+ def cumulative_simpson_nd_reference(y, *, x=None, dx=None, initial=None, axis=-1):
542
+ # Use cumulative_trapezoid if length of y < 3
543
+ if y.shape[axis] < 3:
544
+ if initial is None:
545
+ return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=None)
546
+ else:
547
+ return initial + cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=0)
548
+
549
+ # Ensure that working axis is last axis
550
+ y = np.moveaxis(y, axis, -1)
551
+ x = np.moveaxis(x, axis, -1) if np.ndim(x) > 1 else x
552
+ dx = np.moveaxis(dx, axis, -1) if np.ndim(dx) > 1 else dx
553
+ initial = np.moveaxis(initial, axis, -1) if np.ndim(initial) > 1 else initial
554
+
555
+ # If `x` is not present, create it from `dx`
556
+ n = y.shape[-1]
557
+ x = dx * np.arange(n) if dx is not None else x
558
+ # Similarly, if `initial` is not present, set it to 0
559
+ initial_was_none = initial is None
560
+ initial = 0 if initial_was_none else initial
561
+
562
+ # `np.apply_along_axis` accepts only one array, so concatenate arguments
563
+ x = np.broadcast_to(x, y.shape)
564
+ initial = np.broadcast_to(initial, y.shape[:-1] + (1,))
565
+ z = np.concatenate((y, x, initial), axis=-1)
566
+
567
+ # Use `np.apply_along_axis` to compute result
568
+ def f(z):
569
+ return cumulative_simpson(z[:n], x=z[n:2*n], initial=z[2*n:])
570
+ res = np.apply_along_axis(f, -1, z)
571
+
572
+ # Remove `initial` and undo axis move as needed
573
+ res = res[..., 1:] if initial_was_none else res
574
+ res = np.moveaxis(res, -1, axis)
575
+ return res
576
+
577
+
578
+ class TestCumulativeSimpson:
579
+ x0 = np.arange(4)
580
+ y0 = x0**2
581
+
582
+ @pytest.mark.parametrize('use_dx', (False, True))
583
+ @pytest.mark.parametrize('use_initial', (False, True))
584
+ def test_1d(self, use_dx, use_initial):
585
+ # Test for exact agreement with polynomial of highest
586
+ # possible order (3 if `dx` is constant, 2 otherwise).
587
+ rng = np.random.default_rng(82456839535679456794)
588
+ n = 10
589
+
590
+ # Generate random polynomials and ground truth
591
+ # integral of appropriate order
592
+ order = 3 if use_dx else 2
593
+ dx = rng.random()
594
+ x = (np.sort(rng.random(n)) if order == 2
595
+ else np.arange(n)*dx + rng.random())
596
+ i = np.arange(order + 1)[:, np.newaxis]
597
+ c = rng.random(order + 1)[:, np.newaxis]
598
+ y = np.sum(c*x**i, axis=0)
599
+ Y = np.sum(c*x**(i + 1)/(i + 1), axis=0)
600
+ ref = Y if use_initial else (Y-Y[0])[1:]
601
+
602
+ # Integrate with `cumulative_simpson`
603
+ initial = Y[0] if use_initial else None
604
+ kwarg = {'dx': dx} if use_dx else {'x': x}
605
+ res = cumulative_simpson(y, **kwarg, initial=initial)
606
+
607
+ # Compare result against reference
608
+ if not use_dx:
609
+ assert_allclose(res, ref, rtol=2e-15)
610
+ else:
611
+ i0 = 0 if use_initial else 1
612
+ # all terms are "close"
613
+ assert_allclose(res, ref, rtol=0.0025)
614
+ # only even-interval terms are "exact"
615
+ assert_allclose(res[i0::2], ref[i0::2], rtol=2e-15)
616
+
617
+ @pytest.mark.parametrize('axis', np.arange(-3, 3))
618
+ @pytest.mark.parametrize('x_ndim', (1, 3))
619
+ @pytest.mark.parametrize('x_len', (1, 2, 7))
620
+ @pytest.mark.parametrize('i_ndim', (None, 0, 3,))
621
+ @pytest.mark.parametrize('dx', (None, True))
622
+ def test_nd(self, axis, x_ndim, x_len, i_ndim, dx):
623
+ # Test behavior of `cumulative_simpson` with N-D `y`
624
+ rng = np.random.default_rng(82456839535679456794)
625
+
626
+ # determine shapes
627
+ shape = [5, 6, x_len]
628
+ shape[axis], shape[-1] = shape[-1], shape[axis]
629
+ shape_len_1 = shape.copy()
630
+ shape_len_1[axis] = 1
631
+ i_shape = shape_len_1 if i_ndim == 3 else ()
632
+
633
+ # initialize arguments
634
+ y = rng.random(size=shape)
635
+ x, dx = None, None
636
+ if dx:
637
+ dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random()
638
+ else:
639
+ x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1
640
+ else np.sort(rng.random(size=shape[axis])))
641
+ initial = None if i_ndim is None else rng.random(size=i_shape)
642
+
643
+ # compare results
644
+ res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis)
645
+ ref = cumulative_simpson_nd_reference(y, x=x, dx=dx, initial=initial, axis=axis)
646
+ np.testing.assert_allclose(res, ref, rtol=1e-15)
647
+
648
+ @pytest.mark.parametrize(('message', 'kwarg_update'), [
649
+ ("x must be strictly increasing", dict(x=[2, 2, 3, 4])),
650
+ ("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])),
651
+ ("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)),
652
+ ("At least one point is required", dict(x=[], y=[])),
653
+ ("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)),
654
+ ("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))),
655
+ ("`initial` must either be a scalar or...", dict(initial=np.arange(5))),
656
+ ("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))),
657
+ ])
658
+ def test_simpson_exceptions(self, message, kwarg_update):
659
+ kwargs0 = dict(y=self.y0, x=self.x0, dx=None, initial=None, axis=-1)
660
+ with pytest.raises(ValueError, match=message):
661
+ cumulative_simpson(**dict(kwargs0, **kwarg_update))
662
+
663
+ def test_special_cases(self):
664
+ # Test special cases not checked elsewhere
665
+ rng = np.random.default_rng(82456839535679456794)
666
+ y = rng.random(size=10)
667
+ res = cumulative_simpson(y, dx=0)
668
+ assert_equal(res, 0)
669
+
670
+ # Should add tests of:
671
+ # - all elements of `x` identical
672
+ # These should work as they do for `simpson`
673
+
674
+ def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x):
675
+ """`cumulative_simpson` and `simpson` can be tested against other to verify
676
+ they give consistent results. `simpson` will iteratively be called with
677
+ successively higher upper limits of integration. This function calculates
678
+ the theoretical correction required to `simpson` at even intervals to match
679
+ with `cumulative_simpson`.
680
+ """
681
+ d = np.diff(x, axis=-1)
682
+ sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d)
683
+ sub_integrals_h2 = _cumulative_simpson_unequal_intervals(
684
+ y[..., ::-1], d[..., ::-1]
685
+ )[..., ::-1]
686
+
687
+ # Concatenate to build difference array
688
+ zeros_shape = (*y.shape[:-1], 1)
689
+ theoretical_difference = np.concatenate(
690
+ [
691
+ np.zeros(zeros_shape),
692
+ (sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]),
693
+ np.zeros(zeros_shape),
694
+ ],
695
+ axis=-1,
696
+ )
697
+ # Differences only expected at even intervals. Odd intervals will
698
+ # match exactly so there is no correction
699
+ theoretical_difference[..., 1::2] = 0.0
700
+ # Note: the first interval will not match from this correction as
701
+ # `simpson` uses the trapezoidal rule
702
+ return theoretical_difference
703
+
704
+ @given(
705
+ y=hyp_num.arrays(
706
+ np.float64,
707
+ hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
708
+ elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
709
+ )
710
+ )
711
+ def test_cumulative_simpson_against_simpson_with_default_dx(
712
+ self, y
713
+ ):
714
+ """Theoretically, the output of `cumulative_simpson` will be identical
715
+ to `simpson` at all even indices and in the last index. The first index
716
+ will not match as `simpson` uses the trapezoidal rule when there are only two
717
+ data points. Odd indices after the first index are shown to match with
718
+ a mathematically-derived correction."""
719
+ def simpson_reference(y):
720
+ return np.stack(
721
+ [simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1,
722
+ )
723
+
724
+ res = cumulative_simpson(y, dx=1.0)
725
+ ref = simpson_reference(y)
726
+ theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
727
+ y, x=np.arange(y.shape[-1])
728
+ )
729
+ np.testing.assert_allclose(
730
+ res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:]
731
+ )
732
+
733
+
734
+ @given(
735
+ y=hyp_num.arrays(
736
+ np.float64,
737
+ hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
738
+ elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
739
+ )
740
+ )
741
+ def test_cumulative_simpson_against_simpson(
742
+ self, y
743
+ ):
744
+ """Theoretically, the output of `cumulative_simpson` will be identical
745
+ to `simpson` at all even indices and in the last index. The first index
746
+ will not match as `simpson` uses the trapezoidal rule when there are only two
747
+ data points. Odd indices after the first index are shown to match with
748
+ a mathematically-derived correction."""
749
+ interval = 10/(y.shape[-1] - 1)
750
+ x = np.linspace(0, 10, num=y.shape[-1])
751
+ x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1)
752
+
753
+ def simpson_reference(y, x):
754
+ return np.stack(
755
+ [simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)],
756
+ axis=-1,
757
+ )
758
+
759
+ res = cumulative_simpson(y, x=x)
760
+ ref = simpson_reference(y, x)
761
+ theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
762
+ y, x
763
+ )
764
+ np.testing.assert_allclose(
765
+ res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:]
766
+ )