Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py +32 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd +33 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py +525 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py +1677 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py +379 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py +310 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py +815 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py +228 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py +123 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py +1159 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py +226 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py +780 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py +827 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py +166 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py +274 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py +255 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py +92 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py +758 -0
- llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py +396 -0
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (383 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc
ADDED
Binary file (4.19 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc
ADDED
Binary file (20.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc
ADDED
Binary file (2.53 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc
ADDED
Binary file (9.66 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (213 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc
ADDED
Binary file (7.02 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc
ADDED
Binary file (6.58 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc
ADDED
Binary file (15.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc
ADDED
Binary file (1.45 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import scipy.linalg
|
3 |
+
from scipy.sparse import csc_matrix
|
4 |
+
from scipy.optimize._trustregion_constr.projections \
|
5 |
+
import projections, orthogonality
|
6 |
+
from numpy.testing import (TestCase, assert_array_almost_equal,
|
7 |
+
assert_equal, assert_allclose)
|
8 |
+
|
9 |
+
try:
|
10 |
+
from sksparse.cholmod import cholesky_AAt # noqa: F401
|
11 |
+
sksparse_available = True
|
12 |
+
available_sparse_methods = ("NormalEquation", "AugmentedSystem")
|
13 |
+
except ImportError:
|
14 |
+
sksparse_available = False
|
15 |
+
available_sparse_methods = ("AugmentedSystem",)
|
16 |
+
available_dense_methods = ('QRFactorization', 'SVDFactorization')
|
17 |
+
|
18 |
+
|
19 |
+
class TestProjections(TestCase):
|
20 |
+
|
21 |
+
def test_nullspace_and_least_squares_sparse(self):
|
22 |
+
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
23 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
24 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
25 |
+
At_dense = A_dense.T
|
26 |
+
A = csc_matrix(A_dense)
|
27 |
+
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
28 |
+
[1, 10, 3, 0, 1, 6, 7, 8],
|
29 |
+
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
|
30 |
+
|
31 |
+
for method in available_sparse_methods:
|
32 |
+
Z, LS, _ = projections(A, method)
|
33 |
+
for z in test_points:
|
34 |
+
# Test if x is in the null_space
|
35 |
+
x = Z.matvec(z)
|
36 |
+
assert_array_almost_equal(A.dot(x), 0)
|
37 |
+
# Test orthogonality
|
38 |
+
assert_array_almost_equal(orthogonality(A, x), 0)
|
39 |
+
# Test if x is the least square solution
|
40 |
+
x = LS.matvec(z)
|
41 |
+
x2 = scipy.linalg.lstsq(At_dense, z)[0]
|
42 |
+
assert_array_almost_equal(x, x2)
|
43 |
+
|
44 |
+
def test_iterative_refinements_sparse(self):
|
45 |
+
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
46 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
47 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
48 |
+
A = csc_matrix(A_dense)
|
49 |
+
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
50 |
+
[1, 10, 3, 0, 1, 6, 7, 8],
|
51 |
+
[1.12, 10, 0, 0, 100000, 6, 0.7, 8],
|
52 |
+
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
|
53 |
+
|
54 |
+
for method in available_sparse_methods:
|
55 |
+
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
|
56 |
+
for z in test_points:
|
57 |
+
# Test if x is in the null_space
|
58 |
+
x = Z.matvec(z)
|
59 |
+
atol = 1e-13 * abs(x).max()
|
60 |
+
assert_allclose(A.dot(x), 0, atol=atol)
|
61 |
+
# Test orthogonality
|
62 |
+
assert_allclose(orthogonality(A, x), 0, atol=1e-13)
|
63 |
+
|
64 |
+
def test_rowspace_sparse(self):
|
65 |
+
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
66 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
67 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
68 |
+
A = csc_matrix(A_dense)
|
69 |
+
test_points = ([1, 2, 3],
|
70 |
+
[1, 10, 3],
|
71 |
+
[1.12, 10, 0])
|
72 |
+
|
73 |
+
for method in available_sparse_methods:
|
74 |
+
_, _, Y = projections(A, method)
|
75 |
+
for z in test_points:
|
76 |
+
# Test if x is solution of A x = z
|
77 |
+
x = Y.matvec(z)
|
78 |
+
assert_array_almost_equal(A.dot(x), z)
|
79 |
+
# Test if x is in the return row space of A
|
80 |
+
A_ext = np.vstack((A_dense, x))
|
81 |
+
assert_equal(np.linalg.matrix_rank(A_dense),
|
82 |
+
np.linalg.matrix_rank(A_ext))
|
83 |
+
|
84 |
+
def test_nullspace_and_least_squares_dense(self):
|
85 |
+
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
86 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
87 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
88 |
+
At = A.T
|
89 |
+
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
90 |
+
[1, 10, 3, 0, 1, 6, 7, 8],
|
91 |
+
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
|
92 |
+
|
93 |
+
for method in available_dense_methods:
|
94 |
+
Z, LS, _ = projections(A, method)
|
95 |
+
for z in test_points:
|
96 |
+
# Test if x is in the null_space
|
97 |
+
x = Z.matvec(z)
|
98 |
+
assert_array_almost_equal(A.dot(x), 0)
|
99 |
+
# Test orthogonality
|
100 |
+
assert_array_almost_equal(orthogonality(A, x), 0)
|
101 |
+
# Test if x is the least square solution
|
102 |
+
x = LS.matvec(z)
|
103 |
+
x2 = scipy.linalg.lstsq(At, z)[0]
|
104 |
+
assert_array_almost_equal(x, x2)
|
105 |
+
|
106 |
+
def test_compare_dense_and_sparse(self):
|
107 |
+
D = np.diag(range(1, 101))
|
108 |
+
A = np.hstack([D, D, D, D])
|
109 |
+
A_sparse = csc_matrix(A)
|
110 |
+
np.random.seed(0)
|
111 |
+
|
112 |
+
Z, LS, Y = projections(A)
|
113 |
+
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
|
114 |
+
for k in range(20):
|
115 |
+
z = np.random.normal(size=(400,))
|
116 |
+
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
|
117 |
+
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
|
118 |
+
x = np.random.normal(size=(100,))
|
119 |
+
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
|
120 |
+
|
121 |
+
def test_compare_dense_and_sparse2(self):
|
122 |
+
D1 = np.diag([-1.7, 1, 0.5])
|
123 |
+
D2 = np.diag([1, -0.6, -0.3])
|
124 |
+
D3 = np.diag([-0.3, -1.5, 2])
|
125 |
+
A = np.hstack([D1, D2, D3])
|
126 |
+
A_sparse = csc_matrix(A)
|
127 |
+
np.random.seed(0)
|
128 |
+
|
129 |
+
Z, LS, Y = projections(A)
|
130 |
+
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
|
131 |
+
for k in range(1):
|
132 |
+
z = np.random.normal(size=(9,))
|
133 |
+
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
|
134 |
+
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
|
135 |
+
x = np.random.normal(size=(3,))
|
136 |
+
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
|
137 |
+
|
138 |
+
def test_iterative_refinements_dense(self):
|
139 |
+
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
140 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
141 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
142 |
+
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
143 |
+
[1, 10, 3, 0, 1, 6, 7, 8],
|
144 |
+
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
|
145 |
+
|
146 |
+
for method in available_dense_methods:
|
147 |
+
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
|
148 |
+
for z in test_points:
|
149 |
+
# Test if x is in the null_space
|
150 |
+
x = Z.matvec(z)
|
151 |
+
assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14)
|
152 |
+
# Test orthogonality
|
153 |
+
assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16)
|
154 |
+
|
155 |
+
def test_rowspace_dense(self):
|
156 |
+
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
157 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
158 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
159 |
+
test_points = ([1, 2, 3],
|
160 |
+
[1, 10, 3],
|
161 |
+
[1.12, 10, 0])
|
162 |
+
|
163 |
+
for method in available_dense_methods:
|
164 |
+
_, _, Y = projections(A, method)
|
165 |
+
for z in test_points:
|
166 |
+
# Test if x is solution of A x = z
|
167 |
+
x = Y.matvec(z)
|
168 |
+
assert_array_almost_equal(A.dot(x), z)
|
169 |
+
# Test if x is in the return row space of A
|
170 |
+
A_ext = np.vstack((A, x))
|
171 |
+
assert_equal(np.linalg.matrix_rank(A),
|
172 |
+
np.linalg.matrix_rank(A_ext))
|
173 |
+
|
174 |
+
|
175 |
+
class TestOrthogonality(TestCase):
|
176 |
+
|
177 |
+
def test_dense_matrix(self):
|
178 |
+
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
179 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
180 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
181 |
+
test_vectors = ([-1.98931144, -1.56363389,
|
182 |
+
-0.84115584, 2.2864762,
|
183 |
+
5.599141, 0.09286976,
|
184 |
+
1.37040802, -0.28145812],
|
185 |
+
[697.92794044, -4091.65114008,
|
186 |
+
-3327.42316335, 836.86906951,
|
187 |
+
99434.98929065, -1285.37653682,
|
188 |
+
-4109.21503806, 2935.29289083])
|
189 |
+
test_expected_orth = (0, 0)
|
190 |
+
|
191 |
+
for i in range(len(test_vectors)):
|
192 |
+
x = test_vectors[i]
|
193 |
+
orth = test_expected_orth[i]
|
194 |
+
assert_array_almost_equal(orthogonality(A, x), orth)
|
195 |
+
|
196 |
+
def test_sparse_matrix(self):
|
197 |
+
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
198 |
+
[0, 8, 7, 0, 1, 5, 9, 0],
|
199 |
+
[1, 0, 0, 0, 0, 1, 2, 3]])
|
200 |
+
A = csc_matrix(A)
|
201 |
+
test_vectors = ([-1.98931144, -1.56363389,
|
202 |
+
-0.84115584, 2.2864762,
|
203 |
+
5.599141, 0.09286976,
|
204 |
+
1.37040802, -0.28145812],
|
205 |
+
[697.92794044, -4091.65114008,
|
206 |
+
-3327.42316335, 836.86906951,
|
207 |
+
99434.98929065, -1285.37653682,
|
208 |
+
-4109.21503806, 2935.29289083])
|
209 |
+
test_expected_orth = (0, 0)
|
210 |
+
|
211 |
+
for i in range(len(test_vectors)):
|
212 |
+
x = test_vectors[i]
|
213 |
+
orth = test_expected_orth[i]
|
214 |
+
assert_array_almost_equal(orthogonality(A, x), orth)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.sparse import csc_matrix
|
3 |
+
from scipy.optimize._trustregion_constr.qp_subproblem \
|
4 |
+
import (eqp_kktfact,
|
5 |
+
projected_cg,
|
6 |
+
box_intersections,
|
7 |
+
sphere_intersections,
|
8 |
+
box_sphere_intersections,
|
9 |
+
modified_dogleg)
|
10 |
+
from scipy.optimize._trustregion_constr.projections \
|
11 |
+
import projections
|
12 |
+
from numpy.testing import TestCase, assert_array_almost_equal, assert_equal
|
13 |
+
import pytest
|
14 |
+
|
15 |
+
|
16 |
+
class TestEQPDirectFactorization(TestCase):
|
17 |
+
|
18 |
+
# From Example 16.2 Nocedal/Wright "Numerical
|
19 |
+
# Optimization" p.452.
|
20 |
+
def test_nocedal_example(self):
|
21 |
+
H = csc_matrix([[6, 2, 1],
|
22 |
+
[2, 5, 2],
|
23 |
+
[1, 2, 4]])
|
24 |
+
A = csc_matrix([[1, 0, 1],
|
25 |
+
[0, 1, 1]])
|
26 |
+
c = np.array([-8, -3, -3])
|
27 |
+
b = -np.array([3, 0])
|
28 |
+
x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
|
29 |
+
assert_array_almost_equal(x, [2, -1, 1])
|
30 |
+
assert_array_almost_equal(lagrange_multipliers, [3, -2])
|
31 |
+
|
32 |
+
|
33 |
+
class TestSphericalBoundariesIntersections(TestCase):
|
34 |
+
|
35 |
+
def test_2d_sphere_constraints(self):
|
36 |
+
# Interior inicial point
|
37 |
+
ta, tb, intersect = sphere_intersections([0, 0],
|
38 |
+
[1, 0], 0.5)
|
39 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
40 |
+
assert_equal(intersect, True)
|
41 |
+
|
42 |
+
# No intersection between line and circle
|
43 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
44 |
+
[0, 1], 1)
|
45 |
+
assert_equal(intersect, False)
|
46 |
+
|
47 |
+
# Outside initial point pointing toward outside the circle
|
48 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
49 |
+
[1, 0], 1)
|
50 |
+
assert_equal(intersect, False)
|
51 |
+
|
52 |
+
# Outside initial point pointing toward inside the circle
|
53 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
54 |
+
[-1, 0], 1.5)
|
55 |
+
assert_array_almost_equal([ta, tb], [0.5, 1])
|
56 |
+
assert_equal(intersect, True)
|
57 |
+
|
58 |
+
# Initial point on the boundary
|
59 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
60 |
+
[1, 0], 2)
|
61 |
+
assert_array_almost_equal([ta, tb], [0, 0])
|
62 |
+
assert_equal(intersect, True)
|
63 |
+
|
64 |
+
def test_2d_sphere_constraints_line_intersections(self):
|
65 |
+
# Interior initial point
|
66 |
+
ta, tb, intersect = sphere_intersections([0, 0],
|
67 |
+
[1, 0], 0.5,
|
68 |
+
entire_line=True)
|
69 |
+
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
|
70 |
+
assert_equal(intersect, True)
|
71 |
+
|
72 |
+
# No intersection between line and circle
|
73 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
74 |
+
[0, 1], 1,
|
75 |
+
entire_line=True)
|
76 |
+
assert_equal(intersect, False)
|
77 |
+
|
78 |
+
# Outside initial point pointing toward outside the circle
|
79 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
80 |
+
[1, 0], 1,
|
81 |
+
entire_line=True)
|
82 |
+
assert_array_almost_equal([ta, tb], [-3, -1])
|
83 |
+
assert_equal(intersect, True)
|
84 |
+
|
85 |
+
# Outside initial point pointing toward inside the circle
|
86 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
87 |
+
[-1, 0], 1.5,
|
88 |
+
entire_line=True)
|
89 |
+
assert_array_almost_equal([ta, tb], [0.5, 3.5])
|
90 |
+
assert_equal(intersect, True)
|
91 |
+
|
92 |
+
# Initial point on the boundary
|
93 |
+
ta, tb, intersect = sphere_intersections([2, 0],
|
94 |
+
[1, 0], 2,
|
95 |
+
entire_line=True)
|
96 |
+
assert_array_almost_equal([ta, tb], [-4, 0])
|
97 |
+
assert_equal(intersect, True)
|
98 |
+
|
99 |
+
|
100 |
+
class TestBoxBoundariesIntersections(TestCase):
|
101 |
+
|
102 |
+
def test_2d_box_constraints(self):
|
103 |
+
# Box constraint in the direction of vector d
|
104 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
105 |
+
[1, 1], [3, 3])
|
106 |
+
assert_array_almost_equal([ta, tb], [0.5, 1])
|
107 |
+
assert_equal(intersect, True)
|
108 |
+
|
109 |
+
# Negative direction
|
110 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
111 |
+
[1, -3], [3, -1])
|
112 |
+
assert_equal(intersect, False)
|
113 |
+
|
114 |
+
# Some constraints are absent (set to +/- inf)
|
115 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
116 |
+
[-np.inf, 1],
|
117 |
+
[np.inf, np.inf])
|
118 |
+
assert_array_almost_equal([ta, tb], [0.5, 1])
|
119 |
+
assert_equal(intersect, True)
|
120 |
+
|
121 |
+
# Intersect on the face of the box
|
122 |
+
ta, tb, intersect = box_intersections([1, 0], [0, 1],
|
123 |
+
[1, 1], [3, 3])
|
124 |
+
assert_array_almost_equal([ta, tb], [1, 1])
|
125 |
+
assert_equal(intersect, True)
|
126 |
+
|
127 |
+
# Interior initial point
|
128 |
+
ta, tb, intersect = box_intersections([0, 0], [4, 4],
|
129 |
+
[-2, -3], [3, 2])
|
130 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
131 |
+
assert_equal(intersect, True)
|
132 |
+
|
133 |
+
# No intersection between line and box constraints
|
134 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
135 |
+
[-3, -3], [-1, -1])
|
136 |
+
assert_equal(intersect, False)
|
137 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
138 |
+
[-3, 3], [-1, 1])
|
139 |
+
assert_equal(intersect, False)
|
140 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
141 |
+
[-3, -np.inf],
|
142 |
+
[-1, np.inf])
|
143 |
+
assert_equal(intersect, False)
|
144 |
+
ta, tb, intersect = box_intersections([0, 0], [1, 100],
|
145 |
+
[1, 1], [3, 3])
|
146 |
+
assert_equal(intersect, False)
|
147 |
+
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
|
148 |
+
[1, 1], [3, 3])
|
149 |
+
assert_equal(intersect, False)
|
150 |
+
|
151 |
+
# Initial point on the boundary
|
152 |
+
ta, tb, intersect = box_intersections([2, 2], [0, 1],
|
153 |
+
[-2, -2], [2, 2])
|
154 |
+
assert_array_almost_equal([ta, tb], [0, 0])
|
155 |
+
assert_equal(intersect, True)
|
156 |
+
|
157 |
+
def test_2d_box_constraints_entire_line(self):
|
158 |
+
# Box constraint in the direction of vector d
|
159 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
160 |
+
[1, 1], [3, 3],
|
161 |
+
entire_line=True)
|
162 |
+
assert_array_almost_equal([ta, tb], [0.5, 1.5])
|
163 |
+
assert_equal(intersect, True)
|
164 |
+
|
165 |
+
# Negative direction
|
166 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
167 |
+
[1, -3], [3, -1],
|
168 |
+
entire_line=True)
|
169 |
+
assert_array_almost_equal([ta, tb], [-1.5, -0.5])
|
170 |
+
assert_equal(intersect, True)
|
171 |
+
|
172 |
+
# Some constraints are absent (set to +/- inf)
|
173 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
174 |
+
[-np.inf, 1],
|
175 |
+
[np.inf, np.inf],
|
176 |
+
entire_line=True)
|
177 |
+
assert_array_almost_equal([ta, tb], [0.5, np.inf])
|
178 |
+
assert_equal(intersect, True)
|
179 |
+
|
180 |
+
# Intersect on the face of the box
|
181 |
+
ta, tb, intersect = box_intersections([1, 0], [0, 1],
|
182 |
+
[1, 1], [3, 3],
|
183 |
+
entire_line=True)
|
184 |
+
assert_array_almost_equal([ta, tb], [1, 3])
|
185 |
+
assert_equal(intersect, True)
|
186 |
+
|
187 |
+
# Interior initial pointoint
|
188 |
+
ta, tb, intersect = box_intersections([0, 0], [4, 4],
|
189 |
+
[-2, -3], [3, 2],
|
190 |
+
entire_line=True)
|
191 |
+
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
|
192 |
+
assert_equal(intersect, True)
|
193 |
+
|
194 |
+
# No intersection between line and box constraints
|
195 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
196 |
+
[-3, -3], [-1, -1],
|
197 |
+
entire_line=True)
|
198 |
+
assert_equal(intersect, False)
|
199 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
200 |
+
[-3, 3], [-1, 1],
|
201 |
+
entire_line=True)
|
202 |
+
assert_equal(intersect, False)
|
203 |
+
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
204 |
+
[-3, -np.inf],
|
205 |
+
[-1, np.inf],
|
206 |
+
entire_line=True)
|
207 |
+
assert_equal(intersect, False)
|
208 |
+
ta, tb, intersect = box_intersections([0, 0], [1, 100],
|
209 |
+
[1, 1], [3, 3],
|
210 |
+
entire_line=True)
|
211 |
+
assert_equal(intersect, False)
|
212 |
+
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
|
213 |
+
[1, 1], [3, 3],
|
214 |
+
entire_line=True)
|
215 |
+
assert_equal(intersect, False)
|
216 |
+
|
217 |
+
# Initial point on the boundary
|
218 |
+
ta, tb, intersect = box_intersections([2, 2], [0, 1],
|
219 |
+
[-2, -2], [2, 2],
|
220 |
+
entire_line=True)
|
221 |
+
assert_array_almost_equal([ta, tb], [-4, 0])
|
222 |
+
assert_equal(intersect, True)
|
223 |
+
|
224 |
+
def test_3d_box_constraints(self):
|
225 |
+
# Simple case
|
226 |
+
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
|
227 |
+
[1, 1, 1], [3, 3, 3])
|
228 |
+
assert_array_almost_equal([ta, tb], [1, 1])
|
229 |
+
assert_equal(intersect, True)
|
230 |
+
|
231 |
+
# Negative direction
|
232 |
+
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
|
233 |
+
[1, 1, 1], [3, 3, 3])
|
234 |
+
assert_equal(intersect, False)
|
235 |
+
|
236 |
+
# Interior point
|
237 |
+
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
|
238 |
+
[1, 1, 1], [3, 3, 3])
|
239 |
+
assert_array_almost_equal([ta, tb], [0, 1])
|
240 |
+
assert_equal(intersect, True)
|
241 |
+
|
242 |
+
def test_3d_box_constraints_entire_line(self):
|
243 |
+
# Simple case
|
244 |
+
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
|
245 |
+
[1, 1, 1], [3, 3, 3],
|
246 |
+
entire_line=True)
|
247 |
+
assert_array_almost_equal([ta, tb], [1, 3])
|
248 |
+
assert_equal(intersect, True)
|
249 |
+
|
250 |
+
# Negative direction
|
251 |
+
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
|
252 |
+
[1, 1, 1], [3, 3, 3],
|
253 |
+
entire_line=True)
|
254 |
+
assert_array_almost_equal([ta, tb], [-3, -1])
|
255 |
+
assert_equal(intersect, True)
|
256 |
+
|
257 |
+
# Interior point
|
258 |
+
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
|
259 |
+
[1, 1, 1], [3, 3, 3],
|
260 |
+
entire_line=True)
|
261 |
+
assert_array_almost_equal([ta, tb], [-1, 1])
|
262 |
+
assert_equal(intersect, True)
|
263 |
+
|
264 |
+
|
265 |
+
class TestBoxSphereBoundariesIntersections(TestCase):
|
266 |
+
|
267 |
+
def test_2d_box_constraints(self):
|
268 |
+
# Both constraints are active
|
269 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
|
270 |
+
[-1, -2], [1, 2], 2,
|
271 |
+
entire_line=False)
|
272 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
273 |
+
assert_equal(intersect, True)
|
274 |
+
|
275 |
+
# None of the constraints are active
|
276 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
|
277 |
+
[-1, -3], [1, 3], 10,
|
278 |
+
entire_line=False)
|
279 |
+
assert_array_almost_equal([ta, tb], [0, 1])
|
280 |
+
assert_equal(intersect, True)
|
281 |
+
|
282 |
+
# Box constraints are active
|
283 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
284 |
+
[-1, -3], [1, 3], 10,
|
285 |
+
entire_line=False)
|
286 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
287 |
+
assert_equal(intersect, True)
|
288 |
+
|
289 |
+
# Spherical constraints are active
|
290 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
291 |
+
[-1, -3], [1, 3], 2,
|
292 |
+
entire_line=False)
|
293 |
+
assert_array_almost_equal([ta, tb], [0, 0.25])
|
294 |
+
assert_equal(intersect, True)
|
295 |
+
|
296 |
+
# Infeasible problems
|
297 |
+
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
|
298 |
+
[-1, -3], [1, 3], 2,
|
299 |
+
entire_line=False)
|
300 |
+
assert_equal(intersect, False)
|
301 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
302 |
+
[2, 4], [2, 4], 2,
|
303 |
+
entire_line=False)
|
304 |
+
assert_equal(intersect, False)
|
305 |
+
|
306 |
+
def test_2d_box_constraints_entire_line(self):
|
307 |
+
# Both constraints are active
|
308 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
|
309 |
+
[-1, -2], [1, 2], 2,
|
310 |
+
entire_line=True)
|
311 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
312 |
+
assert_equal(intersect, True)
|
313 |
+
|
314 |
+
# None of the constraints are active
|
315 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
|
316 |
+
[-1, -3], [1, 3], 10,
|
317 |
+
entire_line=True)
|
318 |
+
assert_array_almost_equal([ta, tb], [0, 2])
|
319 |
+
assert_equal(intersect, True)
|
320 |
+
|
321 |
+
# Box constraints are active
|
322 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
323 |
+
[-1, -3], [1, 3], 10,
|
324 |
+
entire_line=True)
|
325 |
+
assert_array_almost_equal([ta, tb], [0, 0.5])
|
326 |
+
assert_equal(intersect, True)
|
327 |
+
|
328 |
+
# Spherical constraints are active
|
329 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
330 |
+
[-1, -3], [1, 3], 2,
|
331 |
+
entire_line=True)
|
332 |
+
assert_array_almost_equal([ta, tb], [0, 0.25])
|
333 |
+
assert_equal(intersect, True)
|
334 |
+
|
335 |
+
# Infeasible problems
|
336 |
+
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
|
337 |
+
[-1, -3], [1, 3], 2,
|
338 |
+
entire_line=True)
|
339 |
+
assert_equal(intersect, False)
|
340 |
+
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
341 |
+
[2, 4], [2, 4], 2,
|
342 |
+
entire_line=True)
|
343 |
+
assert_equal(intersect, False)
|
344 |
+
|
345 |
+
|
346 |
+
class TestModifiedDogleg(TestCase):
|
347 |
+
|
348 |
+
def test_cauchypoint_equalsto_newtonpoint(self):
|
349 |
+
A = np.array([[1, 8]])
|
350 |
+
b = np.array([-16])
|
351 |
+
_, _, Y = projections(A)
|
352 |
+
newton_point = np.array([0.24615385, 1.96923077])
|
353 |
+
|
354 |
+
# Newton point inside boundaries
|
355 |
+
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
|
356 |
+
assert_array_almost_equal(x, newton_point)
|
357 |
+
|
358 |
+
# Spherical constraint active
|
359 |
+
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
|
360 |
+
assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
|
361 |
+
|
362 |
+
# Box constraints active
|
363 |
+
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
|
364 |
+
assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
|
365 |
+
|
366 |
+
def test_3d_example(self):
|
367 |
+
A = np.array([[1, 8, 1],
|
368 |
+
[4, 2, 2]])
|
369 |
+
b = np.array([-16, 2])
|
370 |
+
Z, LS, Y = projections(A)
|
371 |
+
|
372 |
+
newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
|
373 |
+
cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
|
374 |
+
origin = np.zeros_like(newton_point)
|
375 |
+
|
376 |
+
# newton_point inside boundaries
|
377 |
+
x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
|
378 |
+
[np.inf, np.inf, np.inf])
|
379 |
+
assert_array_almost_equal(x, newton_point)
|
380 |
+
|
381 |
+
# line between cauchy_point and newton_point contains best point
|
382 |
+
# (spherical constraint is active).
|
383 |
+
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
|
384 |
+
[np.inf, np.inf, np.inf])
|
385 |
+
z = cauchy_point
|
386 |
+
d = newton_point-cauchy_point
|
387 |
+
t = ((x-z)/(d))
|
388 |
+
assert_array_almost_equal(t, np.full(3, 0.40807330))
|
389 |
+
assert_array_almost_equal(np.linalg.norm(x), 2)
|
390 |
+
|
391 |
+
# line between cauchy_point and newton_point contains best point
|
392 |
+
# (box constraint is active).
|
393 |
+
x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
|
394 |
+
[np.inf, np.inf, np.inf])
|
395 |
+
z = cauchy_point
|
396 |
+
d = newton_point-cauchy_point
|
397 |
+
t = ((x-z)/(d))
|
398 |
+
assert_array_almost_equal(t, np.full(3, 0.7498195))
|
399 |
+
assert_array_almost_equal(x[0], -1)
|
400 |
+
|
401 |
+
# line between origin and cauchy_point contains best point
|
402 |
+
# (spherical constraint is active).
|
403 |
+
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
|
404 |
+
[np.inf, np.inf, np.inf])
|
405 |
+
z = origin
|
406 |
+
d = cauchy_point
|
407 |
+
t = ((x-z)/(d))
|
408 |
+
assert_array_almost_equal(t, np.full(3, 0.573936265))
|
409 |
+
assert_array_almost_equal(np.linalg.norm(x), 1)
|
410 |
+
|
411 |
+
# line between origin and newton_point contains best point
|
412 |
+
# (box constraint is active).
|
413 |
+
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
|
414 |
+
[np.inf, 1, np.inf])
|
415 |
+
z = origin
|
416 |
+
d = newton_point
|
417 |
+
t = ((x-z)/(d))
|
418 |
+
assert_array_almost_equal(t, np.full(3, 0.4478827364))
|
419 |
+
assert_array_almost_equal(x[1], 1)
|
420 |
+
|
421 |
+
|
422 |
+
class TestProjectCG(TestCase):
|
423 |
+
|
424 |
+
# From Example 16.2 Nocedal/Wright "Numerical
|
425 |
+
# Optimization" p.452.
|
426 |
+
def test_nocedal_example(self):
|
427 |
+
H = csc_matrix([[6, 2, 1],
|
428 |
+
[2, 5, 2],
|
429 |
+
[1, 2, 4]])
|
430 |
+
A = csc_matrix([[1, 0, 1],
|
431 |
+
[0, 1, 1]])
|
432 |
+
c = np.array([-8, -3, -3])
|
433 |
+
b = -np.array([3, 0])
|
434 |
+
Z, _, Y = projections(A)
|
435 |
+
x, info = projected_cg(H, c, Z, Y, b)
|
436 |
+
assert_equal(info["stop_cond"], 4)
|
437 |
+
assert_equal(info["hits_boundary"], False)
|
438 |
+
assert_array_almost_equal(x, [2, -1, 1])
|
439 |
+
|
440 |
+
def test_compare_with_direct_fact(self):
|
441 |
+
H = csc_matrix([[6, 2, 1, 3],
|
442 |
+
[2, 5, 2, 4],
|
443 |
+
[1, 2, 4, 5],
|
444 |
+
[3, 4, 5, 7]])
|
445 |
+
A = csc_matrix([[1, 0, 1, 0],
|
446 |
+
[0, 1, 1, 1]])
|
447 |
+
c = np.array([-2, -3, -3, 1])
|
448 |
+
b = -np.array([3, 0])
|
449 |
+
Z, _, Y = projections(A)
|
450 |
+
x, info = projected_cg(H, c, Z, Y, b, tol=0)
|
451 |
+
x_kkt, _ = eqp_kktfact(H, c, A, b)
|
452 |
+
assert_equal(info["stop_cond"], 1)
|
453 |
+
assert_equal(info["hits_boundary"], False)
|
454 |
+
assert_array_almost_equal(x, x_kkt)
|
455 |
+
|
456 |
+
def test_trust_region_infeasible(self):
|
457 |
+
H = csc_matrix([[6, 2, 1, 3],
|
458 |
+
[2, 5, 2, 4],
|
459 |
+
[1, 2, 4, 5],
|
460 |
+
[3, 4, 5, 7]])
|
461 |
+
A = csc_matrix([[1, 0, 1, 0],
|
462 |
+
[0, 1, 1, 1]])
|
463 |
+
c = np.array([-2, -3, -3, 1])
|
464 |
+
b = -np.array([3, 0])
|
465 |
+
trust_radius = 1
|
466 |
+
Z, _, Y = projections(A)
|
467 |
+
with pytest.raises(ValueError):
|
468 |
+
projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
|
469 |
+
|
470 |
+
def test_trust_region_barely_feasible(self):
|
471 |
+
H = csc_matrix([[6, 2, 1, 3],
|
472 |
+
[2, 5, 2, 4],
|
473 |
+
[1, 2, 4, 5],
|
474 |
+
[3, 4, 5, 7]])
|
475 |
+
A = csc_matrix([[1, 0, 1, 0],
|
476 |
+
[0, 1, 1, 1]])
|
477 |
+
c = np.array([-2, -3, -3, 1])
|
478 |
+
b = -np.array([3, 0])
|
479 |
+
trust_radius = 2.32379000772445021283
|
480 |
+
Z, _, Y = projections(A)
|
481 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
482 |
+
tol=0,
|
483 |
+
trust_radius=trust_radius)
|
484 |
+
assert_equal(info["stop_cond"], 2)
|
485 |
+
assert_equal(info["hits_boundary"], True)
|
486 |
+
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
487 |
+
assert_array_almost_equal(x, -Y.dot(b))
|
488 |
+
|
489 |
+
def test_hits_boundary(self):
|
490 |
+
H = csc_matrix([[6, 2, 1, 3],
|
491 |
+
[2, 5, 2, 4],
|
492 |
+
[1, 2, 4, 5],
|
493 |
+
[3, 4, 5, 7]])
|
494 |
+
A = csc_matrix([[1, 0, 1, 0],
|
495 |
+
[0, 1, 1, 1]])
|
496 |
+
c = np.array([-2, -3, -3, 1])
|
497 |
+
b = -np.array([3, 0])
|
498 |
+
trust_radius = 3
|
499 |
+
Z, _, Y = projections(A)
|
500 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
501 |
+
tol=0,
|
502 |
+
trust_radius=trust_radius)
|
503 |
+
assert_equal(info["stop_cond"], 2)
|
504 |
+
assert_equal(info["hits_boundary"], True)
|
505 |
+
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
506 |
+
|
507 |
+
def test_negative_curvature_unconstrained(self):
|
508 |
+
H = csc_matrix([[1, 2, 1, 3],
|
509 |
+
[2, 0, 2, 4],
|
510 |
+
[1, 2, 0, 2],
|
511 |
+
[3, 4, 2, 0]])
|
512 |
+
A = csc_matrix([[1, 0, 1, 0],
|
513 |
+
[0, 1, 0, 1]])
|
514 |
+
c = np.array([-2, -3, -3, 1])
|
515 |
+
b = -np.array([3, 0])
|
516 |
+
Z, _, Y = projections(A)
|
517 |
+
with pytest.raises(ValueError):
|
518 |
+
projected_cg(H, c, Z, Y, b, tol=0)
|
519 |
+
|
520 |
+
def test_negative_curvature(self):
|
521 |
+
H = csc_matrix([[1, 2, 1, 3],
|
522 |
+
[2, 0, 2, 4],
|
523 |
+
[1, 2, 0, 2],
|
524 |
+
[3, 4, 2, 0]])
|
525 |
+
A = csc_matrix([[1, 0, 1, 0],
|
526 |
+
[0, 1, 0, 1]])
|
527 |
+
c = np.array([-2, -3, -3, 1])
|
528 |
+
b = -np.array([3, 0])
|
529 |
+
Z, _, Y = projections(A)
|
530 |
+
trust_radius = 1000
|
531 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
532 |
+
tol=0,
|
533 |
+
trust_radius=trust_radius)
|
534 |
+
assert_equal(info["stop_cond"], 3)
|
535 |
+
assert_equal(info["hits_boundary"], True)
|
536 |
+
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
537 |
+
|
538 |
+
# The box constraints are inactive at the solution but
|
539 |
+
# are active during the iterations.
|
540 |
+
def test_inactive_box_constraints(self):
|
541 |
+
H = csc_matrix([[6, 2, 1, 3],
|
542 |
+
[2, 5, 2, 4],
|
543 |
+
[1, 2, 4, 5],
|
544 |
+
[3, 4, 5, 7]])
|
545 |
+
A = csc_matrix([[1, 0, 1, 0],
|
546 |
+
[0, 1, 1, 1]])
|
547 |
+
c = np.array([-2, -3, -3, 1])
|
548 |
+
b = -np.array([3, 0])
|
549 |
+
Z, _, Y = projections(A)
|
550 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
551 |
+
tol=0,
|
552 |
+
lb=[0.5, -np.inf,
|
553 |
+
-np.inf, -np.inf],
|
554 |
+
return_all=True)
|
555 |
+
x_kkt, _ = eqp_kktfact(H, c, A, b)
|
556 |
+
assert_equal(info["stop_cond"], 1)
|
557 |
+
assert_equal(info["hits_boundary"], False)
|
558 |
+
assert_array_almost_equal(x, x_kkt)
|
559 |
+
|
560 |
+
# The box constraints active and the termination is
|
561 |
+
# by maximum iterations (infeasible interaction).
|
562 |
+
def test_active_box_constraints_maximum_iterations_reached(self):
|
563 |
+
H = csc_matrix([[6, 2, 1, 3],
|
564 |
+
[2, 5, 2, 4],
|
565 |
+
[1, 2, 4, 5],
|
566 |
+
[3, 4, 5, 7]])
|
567 |
+
A = csc_matrix([[1, 0, 1, 0],
|
568 |
+
[0, 1, 1, 1]])
|
569 |
+
c = np.array([-2, -3, -3, 1])
|
570 |
+
b = -np.array([3, 0])
|
571 |
+
Z, _, Y = projections(A)
|
572 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
573 |
+
tol=0,
|
574 |
+
lb=[0.8, -np.inf,
|
575 |
+
-np.inf, -np.inf],
|
576 |
+
return_all=True)
|
577 |
+
assert_equal(info["stop_cond"], 1)
|
578 |
+
assert_equal(info["hits_boundary"], True)
|
579 |
+
assert_array_almost_equal(A.dot(x), -b)
|
580 |
+
assert_array_almost_equal(x[0], 0.8)
|
581 |
+
|
582 |
+
# The box constraints are active and the termination is
|
583 |
+
# because it hits boundary (without infeasible interaction).
|
584 |
+
def test_active_box_constraints_hits_boundaries(self):
|
585 |
+
H = csc_matrix([[6, 2, 1, 3],
|
586 |
+
[2, 5, 2, 4],
|
587 |
+
[1, 2, 4, 5],
|
588 |
+
[3, 4, 5, 7]])
|
589 |
+
A = csc_matrix([[1, 0, 1, 0],
|
590 |
+
[0, 1, 1, 1]])
|
591 |
+
c = np.array([-2, -3, -3, 1])
|
592 |
+
b = -np.array([3, 0])
|
593 |
+
trust_radius = 3
|
594 |
+
Z, _, Y = projections(A)
|
595 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
596 |
+
tol=0,
|
597 |
+
ub=[np.inf, np.inf, 1.6, np.inf],
|
598 |
+
trust_radius=trust_radius,
|
599 |
+
return_all=True)
|
600 |
+
assert_equal(info["stop_cond"], 2)
|
601 |
+
assert_equal(info["hits_boundary"], True)
|
602 |
+
assert_array_almost_equal(x[2], 1.6)
|
603 |
+
|
604 |
+
# The box constraints are active and the termination is
|
605 |
+
# because it hits boundary (infeasible interaction).
|
606 |
+
def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
|
607 |
+
H = csc_matrix([[6, 2, 1, 3],
|
608 |
+
[2, 5, 2, 4],
|
609 |
+
[1, 2, 4, 5],
|
610 |
+
[3, 4, 5, 7]])
|
611 |
+
A = csc_matrix([[1, 0, 1, 0],
|
612 |
+
[0, 1, 1, 1]])
|
613 |
+
c = np.array([-2, -3, -3, 1])
|
614 |
+
b = -np.array([3, 0])
|
615 |
+
trust_radius = 4
|
616 |
+
Z, _, Y = projections(A)
|
617 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
618 |
+
tol=0,
|
619 |
+
ub=[np.inf, 0.1, np.inf, np.inf],
|
620 |
+
trust_radius=trust_radius,
|
621 |
+
return_all=True)
|
622 |
+
assert_equal(info["stop_cond"], 2)
|
623 |
+
assert_equal(info["hits_boundary"], True)
|
624 |
+
assert_array_almost_equal(x[1], 0.1)
|
625 |
+
|
626 |
+
# The box constraints are active and the termination is
|
627 |
+
# because it hits boundary (no infeasible interaction).
|
628 |
+
def test_active_box_constraints_negative_curvature(self):
|
629 |
+
H = csc_matrix([[1, 2, 1, 3],
|
630 |
+
[2, 0, 2, 4],
|
631 |
+
[1, 2, 0, 2],
|
632 |
+
[3, 4, 2, 0]])
|
633 |
+
A = csc_matrix([[1, 0, 1, 0],
|
634 |
+
[0, 1, 0, 1]])
|
635 |
+
c = np.array([-2, -3, -3, 1])
|
636 |
+
b = -np.array([3, 0])
|
637 |
+
Z, _, Y = projections(A)
|
638 |
+
trust_radius = 1000
|
639 |
+
x, info = projected_cg(H, c, Z, Y, b,
|
640 |
+
tol=0,
|
641 |
+
ub=[np.inf, np.inf, 100, np.inf],
|
642 |
+
trust_radius=trust_radius)
|
643 |
+
assert_equal(info["stop_cond"], 3)
|
644 |
+
assert_equal(info["hits_boundary"], True)
|
645 |
+
assert_array_almost_equal(x[2], 100)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.optimize import minimize, Bounds
|
3 |
+
|
4 |
+
def test_gh10880():
|
5 |
+
# checks that verbose reporting works with trust-constr for
|
6 |
+
# bound-contrained problems
|
7 |
+
bnds = Bounds(1, 2)
|
8 |
+
opts = {'maxiter': 1000, 'verbose': 2}
|
9 |
+
minimize(lambda x: x**2, x0=2., method='trust-constr',
|
10 |
+
bounds=bnds, options=opts)
|
11 |
+
|
12 |
+
opts = {'maxiter': 1000, 'verbose': 3}
|
13 |
+
minimize(lambda x: x**2, x0=2., method='trust-constr',
|
14 |
+
bounds=bnds, options=opts)
|
15 |
+
|
16 |
+
def test_gh12922():
|
17 |
+
# checks that verbose reporting works with trust-constr for
|
18 |
+
# general constraints
|
19 |
+
def objective(x):
|
20 |
+
return np.array([(np.sum((x+1)**4))])
|
21 |
+
|
22 |
+
cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2}
|
23 |
+
n = 25
|
24 |
+
x0 = np.linspace(-5, 5, n)
|
25 |
+
|
26 |
+
opts = {'maxiter': 1000, 'verbose': 2}
|
27 |
+
minimize(objective, x0=x0, method='trust-constr',
|
28 |
+
constraints=cons, options=opts)
|
29 |
+
|
30 |
+
opts = {'maxiter': 1000, 'verbose': 3}
|
31 |
+
minimize(objective, x0=x0, method='trust-constr',
|
32 |
+
constraints=cons, options=opts)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (116 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Legacy public Cython API declarations
|
2 |
+
#
|
3 |
+
# NOTE: due to the way Cython ABI compatibility works, **no changes
|
4 |
+
# should be made to this file** --- any API additions/changes should be
|
5 |
+
# done in `cython_optimize.pxd` (see gh-11793).
|
6 |
+
|
7 |
+
ctypedef double (*callback_type)(double, void*) noexcept
|
8 |
+
|
9 |
+
ctypedef struct zeros_parameters:
|
10 |
+
callback_type function
|
11 |
+
void* args
|
12 |
+
|
13 |
+
ctypedef struct zeros_full_output:
|
14 |
+
int funcalls
|
15 |
+
int iterations
|
16 |
+
int error_num
|
17 |
+
double root
|
18 |
+
|
19 |
+
cdef double bisect(callback_type f, double xa, double xb, void* args,
|
20 |
+
double xtol, double rtol, int iter,
|
21 |
+
zeros_full_output *full_output) noexcept nogil
|
22 |
+
|
23 |
+
cdef double ridder(callback_type f, double xa, double xb, void* args,
|
24 |
+
double xtol, double rtol, int iter,
|
25 |
+
zeros_full_output *full_output) noexcept nogil
|
26 |
+
|
27 |
+
cdef double brenth(callback_type f, double xa, double xb, void* args,
|
28 |
+
double xtol, double rtol, int iter,
|
29 |
+
zeros_full_output *full_output) noexcept nogil
|
30 |
+
|
31 |
+
cdef double brentq(callback_type f, double xa, double xb, void* args,
|
32 |
+
double xtol, double rtol, int iter,
|
33 |
+
zeros_full_output *full_output) noexcept nogil
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cdef extern from "../Zeros/zeros.h":
|
2 |
+
ctypedef double (*callback_type)(double, void*) noexcept
|
3 |
+
ctypedef struct scipy_zeros_info:
|
4 |
+
int funcalls
|
5 |
+
int iterations
|
6 |
+
int error_num
|
7 |
+
|
8 |
+
cdef extern from "../Zeros/bisect.c" nogil:
|
9 |
+
double bisect(callback_type f, double xa, double xb, double xtol,
|
10 |
+
double rtol, int iter, void *func_data_param,
|
11 |
+
scipy_zeros_info *solver_stats)
|
12 |
+
|
13 |
+
cdef extern from "../Zeros/ridder.c" nogil:
|
14 |
+
double ridder(callback_type f, double xa, double xb, double xtol,
|
15 |
+
double rtol, int iter, void *func_data_param,
|
16 |
+
scipy_zeros_info *solver_stats)
|
17 |
+
|
18 |
+
cdef extern from "../Zeros/brenth.c" nogil:
|
19 |
+
double brenth(callback_type f, double xa, double xb, double xtol,
|
20 |
+
double rtol, int iter, void *func_data_param,
|
21 |
+
scipy_zeros_info *solver_stats)
|
22 |
+
|
23 |
+
cdef extern from "../Zeros/brentq.c" nogil:
|
24 |
+
double brentq(callback_type f, double xa, double xb, double xtol,
|
25 |
+
double rtol, int iter, void *func_data_param,
|
26 |
+
scipy_zeros_info *solver_stats)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc
ADDED
Binary file (8.05 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc
ADDED
Binary file (7.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc
ADDED
Binary file (4.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc
ADDED
Binary file (40.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc
ADDED
Binary file (11.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc
ADDED
Binary file (2.34 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc
ADDED
Binary file (2.79 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc
ADDED
Binary file (28.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc
ADDED
Binary file (4.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc
ADDED
Binary file (9.53 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py
ADDED
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for the basin hopping global minimization algorithm.
|
3 |
+
"""
|
4 |
+
import copy
|
5 |
+
|
6 |
+
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
|
7 |
+
assert_allclose)
|
8 |
+
import pytest
|
9 |
+
from pytest import raises as assert_raises
|
10 |
+
import numpy as np
|
11 |
+
from numpy import cos, sin
|
12 |
+
|
13 |
+
from scipy.optimize import basinhopping, OptimizeResult
|
14 |
+
from scipy.optimize._basinhopping import (
|
15 |
+
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
|
16 |
+
|
17 |
+
|
18 |
+
def func1d(x):
|
19 |
+
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
|
20 |
+
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
|
21 |
+
return f, df
|
22 |
+
|
23 |
+
|
24 |
+
def func2d_nograd(x):
|
25 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
26 |
+
return f
|
27 |
+
|
28 |
+
|
29 |
+
def func2d(x):
|
30 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
31 |
+
df = np.zeros(2)
|
32 |
+
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
|
33 |
+
df[1] = 2. * x[1] + 0.2
|
34 |
+
return f, df
|
35 |
+
|
36 |
+
|
37 |
+
def func2d_easyderiv(x):
|
38 |
+
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
|
39 |
+
df = np.zeros(2)
|
40 |
+
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
|
41 |
+
df[1] = 2.0*x[0] + 4.0*x[1]
|
42 |
+
|
43 |
+
return f, df
|
44 |
+
|
45 |
+
|
46 |
+
class MyTakeStep1(RandomDisplacement):
|
47 |
+
"""use a copy of displace, but have it set a special parameter to
|
48 |
+
make sure it's actually being used."""
|
49 |
+
def __init__(self):
|
50 |
+
self.been_called = False
|
51 |
+
super().__init__()
|
52 |
+
|
53 |
+
def __call__(self, x):
|
54 |
+
self.been_called = True
|
55 |
+
return super().__call__(x)
|
56 |
+
|
57 |
+
|
58 |
+
def myTakeStep2(x):
|
59 |
+
"""redo RandomDisplacement in function form without the attribute stepsize
|
60 |
+
to make sure everything still works ok
|
61 |
+
"""
|
62 |
+
s = 0.5
|
63 |
+
x += np.random.uniform(-s, s, np.shape(x))
|
64 |
+
return x
|
65 |
+
|
66 |
+
|
67 |
+
class MyAcceptTest:
|
68 |
+
"""pass a custom accept test
|
69 |
+
|
70 |
+
This does nothing but make sure it's being used and ensure all the
|
71 |
+
possible return values are accepted
|
72 |
+
"""
|
73 |
+
def __init__(self):
|
74 |
+
self.been_called = False
|
75 |
+
self.ncalls = 0
|
76 |
+
self.testres = [False, 'force accept', True, np.bool_(True),
|
77 |
+
np.bool_(False), [], {}, 0, 1]
|
78 |
+
|
79 |
+
def __call__(self, **kwargs):
|
80 |
+
self.been_called = True
|
81 |
+
self.ncalls += 1
|
82 |
+
if self.ncalls - 1 < len(self.testres):
|
83 |
+
return self.testres[self.ncalls - 1]
|
84 |
+
else:
|
85 |
+
return True
|
86 |
+
|
87 |
+
|
88 |
+
class MyCallBack:
|
89 |
+
"""pass a custom callback function
|
90 |
+
|
91 |
+
This makes sure it's being used. It also returns True after 10
|
92 |
+
steps to ensure that it's stopping early.
|
93 |
+
|
94 |
+
"""
|
95 |
+
def __init__(self):
|
96 |
+
self.been_called = False
|
97 |
+
self.ncalls = 0
|
98 |
+
|
99 |
+
def __call__(self, x, f, accepted):
|
100 |
+
self.been_called = True
|
101 |
+
self.ncalls += 1
|
102 |
+
if self.ncalls == 10:
|
103 |
+
return True
|
104 |
+
|
105 |
+
|
106 |
+
class TestBasinHopping:
|
107 |
+
|
108 |
+
def setup_method(self):
|
109 |
+
""" Tests setup.
|
110 |
+
|
111 |
+
Run tests based on the 1-D and 2-D functions described above.
|
112 |
+
"""
|
113 |
+
self.x0 = (1.0, [1.0, 1.0])
|
114 |
+
self.sol = (-0.195, np.array([-0.195, -0.1]))
|
115 |
+
|
116 |
+
self.tol = 3 # number of decimal places
|
117 |
+
|
118 |
+
self.niter = 100
|
119 |
+
self.disp = False
|
120 |
+
|
121 |
+
# fix random seed
|
122 |
+
np.random.seed(1234)
|
123 |
+
|
124 |
+
self.kwargs = {"method": "L-BFGS-B", "jac": True}
|
125 |
+
self.kwargs_nograd = {"method": "L-BFGS-B"}
|
126 |
+
|
127 |
+
def test_TypeError(self):
|
128 |
+
# test the TypeErrors are raised on bad input
|
129 |
+
i = 1
|
130 |
+
# if take_step is passed, it must be callable
|
131 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
132 |
+
take_step=1)
|
133 |
+
# if accept_test is passed, it must be callable
|
134 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
135 |
+
accept_test=1)
|
136 |
+
|
137 |
+
def test_input_validation(self):
|
138 |
+
msg = 'target_accept_rate has to be in range \\(0, 1\\)'
|
139 |
+
with assert_raises(ValueError, match=msg):
|
140 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=0.)
|
141 |
+
with assert_raises(ValueError, match=msg):
|
142 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=1.)
|
143 |
+
|
144 |
+
msg = 'stepwise_factor has to be in range \\(0, 1\\)'
|
145 |
+
with assert_raises(ValueError, match=msg):
|
146 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=0.)
|
147 |
+
with assert_raises(ValueError, match=msg):
|
148 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=1.)
|
149 |
+
|
150 |
+
def test_1d_grad(self):
|
151 |
+
# test 1-D minimizations with gradient
|
152 |
+
i = 0
|
153 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
154 |
+
niter=self.niter, disp=self.disp)
|
155 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
156 |
+
|
157 |
+
def test_2d(self):
|
158 |
+
# test 2d minimizations with gradient
|
159 |
+
i = 1
|
160 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
161 |
+
niter=self.niter, disp=self.disp)
|
162 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
163 |
+
assert_(res.nfev > 0)
|
164 |
+
|
165 |
+
def test_njev(self):
|
166 |
+
# test njev is returned correctly
|
167 |
+
i = 1
|
168 |
+
minimizer_kwargs = self.kwargs.copy()
|
169 |
+
# L-BFGS-B doesn't use njev, but BFGS does
|
170 |
+
minimizer_kwargs["method"] = "BFGS"
|
171 |
+
res = basinhopping(func2d, self.x0[i],
|
172 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
173 |
+
disp=self.disp)
|
174 |
+
assert_(res.nfev > 0)
|
175 |
+
assert_equal(res.nfev, res.njev)
|
176 |
+
|
177 |
+
def test_jac(self):
|
178 |
+
# test Jacobian returned
|
179 |
+
minimizer_kwargs = self.kwargs.copy()
|
180 |
+
# BFGS returns a Jacobian
|
181 |
+
minimizer_kwargs["method"] = "BFGS"
|
182 |
+
|
183 |
+
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
|
184 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
185 |
+
disp=self.disp)
|
186 |
+
|
187 |
+
assert_(hasattr(res.lowest_optimization_result, "jac"))
|
188 |
+
|
189 |
+
# in this case, the Jacobian is just [df/dx, df/dy]
|
190 |
+
_, jacobian = func2d_easyderiv(res.x)
|
191 |
+
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
|
192 |
+
self.tol)
|
193 |
+
|
194 |
+
def test_2d_nograd(self):
|
195 |
+
# test 2-D minimizations without gradient
|
196 |
+
i = 1
|
197 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
198 |
+
minimizer_kwargs=self.kwargs_nograd,
|
199 |
+
niter=self.niter, disp=self.disp)
|
200 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
201 |
+
|
202 |
+
def test_all_minimizers(self):
|
203 |
+
# Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA
|
204 |
+
# don't accept jac=True, so aren't included here.
|
205 |
+
i = 1
|
206 |
+
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
|
207 |
+
minimizer_kwargs = copy.copy(self.kwargs)
|
208 |
+
for method in methods:
|
209 |
+
minimizer_kwargs["method"] = method
|
210 |
+
res = basinhopping(func2d, self.x0[i],
|
211 |
+
minimizer_kwargs=minimizer_kwargs,
|
212 |
+
niter=self.niter, disp=self.disp)
|
213 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
214 |
+
|
215 |
+
def test_all_nograd_minimizers(self):
|
216 |
+
# Test 2-D minimizations without gradient. Newton-CG requires jac=True,
|
217 |
+
# so not included here.
|
218 |
+
i = 1
|
219 |
+
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
|
220 |
+
'Nelder-Mead', 'Powell', 'COBYLA']
|
221 |
+
minimizer_kwargs = copy.copy(self.kwargs_nograd)
|
222 |
+
for method in methods:
|
223 |
+
minimizer_kwargs["method"] = method
|
224 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
225 |
+
minimizer_kwargs=minimizer_kwargs,
|
226 |
+
niter=self.niter, disp=self.disp)
|
227 |
+
tol = self.tol
|
228 |
+
if method == 'COBYLA':
|
229 |
+
tol = 2
|
230 |
+
assert_almost_equal(res.x, self.sol[i], decimal=tol)
|
231 |
+
|
232 |
+
def test_pass_takestep(self):
|
233 |
+
# test that passing a custom takestep works
|
234 |
+
# also test that the stepsize is being adjusted
|
235 |
+
takestep = MyTakeStep1()
|
236 |
+
initial_step_size = takestep.stepsize
|
237 |
+
i = 1
|
238 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
239 |
+
niter=self.niter, disp=self.disp,
|
240 |
+
take_step=takestep)
|
241 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
242 |
+
assert_(takestep.been_called)
|
243 |
+
# make sure that the build in adaptive step size has been used
|
244 |
+
assert_(initial_step_size != takestep.stepsize)
|
245 |
+
|
246 |
+
def test_pass_simple_takestep(self):
|
247 |
+
# test that passing a custom takestep without attribute stepsize
|
248 |
+
takestep = myTakeStep2
|
249 |
+
i = 1
|
250 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
251 |
+
minimizer_kwargs=self.kwargs_nograd,
|
252 |
+
niter=self.niter, disp=self.disp,
|
253 |
+
take_step=takestep)
|
254 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
255 |
+
|
256 |
+
def test_pass_accept_test(self):
|
257 |
+
# test passing a custom accept test
|
258 |
+
# makes sure it's being used and ensures all the possible return values
|
259 |
+
# are accepted.
|
260 |
+
accept_test = MyAcceptTest()
|
261 |
+
i = 1
|
262 |
+
# there's no point in running it more than a few steps.
|
263 |
+
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
264 |
+
niter=10, disp=self.disp, accept_test=accept_test)
|
265 |
+
assert_(accept_test.been_called)
|
266 |
+
|
267 |
+
def test_pass_callback(self):
|
268 |
+
# test passing a custom callback function
|
269 |
+
# This makes sure it's being used. It also returns True after 10 steps
|
270 |
+
# to ensure that it's stopping early.
|
271 |
+
callback = MyCallBack()
|
272 |
+
i = 1
|
273 |
+
# there's no point in running it more than a few steps.
|
274 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
275 |
+
niter=30, disp=self.disp, callback=callback)
|
276 |
+
assert_(callback.been_called)
|
277 |
+
assert_("callback" in res.message[0])
|
278 |
+
# One of the calls of MyCallBack is during BasinHoppingRunner
|
279 |
+
# construction, so there are only 9 remaining before MyCallBack stops
|
280 |
+
# the minimization.
|
281 |
+
assert_equal(res.nit, 9)
|
282 |
+
|
283 |
+
def test_minimizer_fail(self):
|
284 |
+
# test if a minimizer fails
|
285 |
+
i = 1
|
286 |
+
self.kwargs["options"] = dict(maxiter=0)
|
287 |
+
self.niter = 10
|
288 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
289 |
+
niter=self.niter, disp=self.disp)
|
290 |
+
# the number of failed minimizations should be the number of
|
291 |
+
# iterations + 1
|
292 |
+
assert_equal(res.nit + 1, res.minimization_failures)
|
293 |
+
|
294 |
+
def test_niter_zero(self):
|
295 |
+
# gh5915, what happens if you call basinhopping with niter=0
|
296 |
+
i = 0
|
297 |
+
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
298 |
+
niter=0, disp=self.disp)
|
299 |
+
|
300 |
+
def test_seed_reproducibility(self):
|
301 |
+
# seed should ensure reproducibility between runs
|
302 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
303 |
+
|
304 |
+
f_1 = []
|
305 |
+
|
306 |
+
def callback(x, f, accepted):
|
307 |
+
f_1.append(f)
|
308 |
+
|
309 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
310 |
+
niter=10, callback=callback, seed=10)
|
311 |
+
|
312 |
+
f_2 = []
|
313 |
+
|
314 |
+
def callback2(x, f, accepted):
|
315 |
+
f_2.append(f)
|
316 |
+
|
317 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
318 |
+
niter=10, callback=callback2, seed=10)
|
319 |
+
assert_equal(np.array(f_1), np.array(f_2))
|
320 |
+
|
321 |
+
def test_random_gen(self):
|
322 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
323 |
+
rng = np.random.default_rng(1)
|
324 |
+
|
325 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
326 |
+
|
327 |
+
res1 = basinhopping(func2d, [1.0, 1.0],
|
328 |
+
minimizer_kwargs=minimizer_kwargs,
|
329 |
+
niter=10, seed=rng)
|
330 |
+
|
331 |
+
rng = np.random.default_rng(1)
|
332 |
+
res2 = basinhopping(func2d, [1.0, 1.0],
|
333 |
+
minimizer_kwargs=minimizer_kwargs,
|
334 |
+
niter=10, seed=rng)
|
335 |
+
assert_equal(res1.x, res2.x)
|
336 |
+
|
337 |
+
def test_monotonic_basin_hopping(self):
|
338 |
+
# test 1-D minimizations with gradient and T=0
|
339 |
+
i = 0
|
340 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
341 |
+
niter=self.niter, disp=self.disp, T=0)
|
342 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
343 |
+
|
344 |
+
|
345 |
+
class Test_Storage:
|
346 |
+
def setup_method(self):
|
347 |
+
self.x0 = np.array(1)
|
348 |
+
self.f0 = 0
|
349 |
+
|
350 |
+
minres = OptimizeResult(success=True)
|
351 |
+
minres.x = self.x0
|
352 |
+
minres.fun = self.f0
|
353 |
+
|
354 |
+
self.storage = Storage(minres)
|
355 |
+
|
356 |
+
def test_higher_f_rejected(self):
|
357 |
+
new_minres = OptimizeResult(success=True)
|
358 |
+
new_minres.x = self.x0 + 1
|
359 |
+
new_minres.fun = self.f0 + 1
|
360 |
+
|
361 |
+
ret = self.storage.update(new_minres)
|
362 |
+
minres = self.storage.get_lowest()
|
363 |
+
assert_equal(self.x0, minres.x)
|
364 |
+
assert_equal(self.f0, minres.fun)
|
365 |
+
assert_(not ret)
|
366 |
+
|
367 |
+
@pytest.mark.parametrize('success', [True, False])
|
368 |
+
def test_lower_f_accepted(self, success):
|
369 |
+
new_minres = OptimizeResult(success=success)
|
370 |
+
new_minres.x = self.x0 + 1
|
371 |
+
new_minres.fun = self.f0 - 1
|
372 |
+
|
373 |
+
ret = self.storage.update(new_minres)
|
374 |
+
minres = self.storage.get_lowest()
|
375 |
+
assert (self.x0 != minres.x) == success # can't use `is`
|
376 |
+
assert (self.f0 != minres.fun) == success # left side is NumPy bool
|
377 |
+
assert ret is success
|
378 |
+
|
379 |
+
|
380 |
+
class Test_RandomDisplacement:
|
381 |
+
def setup_method(self):
|
382 |
+
self.stepsize = 1.0
|
383 |
+
self.displace = RandomDisplacement(stepsize=self.stepsize)
|
384 |
+
self.N = 300000
|
385 |
+
self.x0 = np.zeros([self.N])
|
386 |
+
|
387 |
+
def test_random(self):
|
388 |
+
# the mean should be 0
|
389 |
+
# the variance should be (2*stepsize)**2 / 12
|
390 |
+
# note these tests are random, they will fail from time to time
|
391 |
+
x = self.displace(self.x0)
|
392 |
+
v = (2. * self.stepsize) ** 2 / 12
|
393 |
+
assert_almost_equal(np.mean(x), 0., 1)
|
394 |
+
assert_almost_equal(np.var(x), v, 1)
|
395 |
+
|
396 |
+
|
397 |
+
class Test_Metropolis:
|
398 |
+
def setup_method(self):
|
399 |
+
self.T = 2.
|
400 |
+
self.met = Metropolis(self.T)
|
401 |
+
self.res_new = OptimizeResult(success=True, fun=0.)
|
402 |
+
self.res_old = OptimizeResult(success=True, fun=1.)
|
403 |
+
|
404 |
+
def test_boolean_return(self):
|
405 |
+
# the return must be a bool, else an error will be raised in
|
406 |
+
# basinhopping
|
407 |
+
ret = self.met(res_new=self.res_new, res_old=self.res_old)
|
408 |
+
assert isinstance(ret, bool)
|
409 |
+
|
410 |
+
def test_lower_f_accepted(self):
|
411 |
+
assert_(self.met(res_new=self.res_new, res_old=self.res_old))
|
412 |
+
|
413 |
+
def test_accept(self):
|
414 |
+
# test that steps are randomly accepted for f_new > f_old
|
415 |
+
one_accept = False
|
416 |
+
one_reject = False
|
417 |
+
for i in range(1000):
|
418 |
+
if one_accept and one_reject:
|
419 |
+
break
|
420 |
+
res_new = OptimizeResult(success=True, fun=1.)
|
421 |
+
res_old = OptimizeResult(success=True, fun=0.5)
|
422 |
+
ret = self.met(res_new=res_new, res_old=res_old)
|
423 |
+
if ret:
|
424 |
+
one_accept = True
|
425 |
+
else:
|
426 |
+
one_reject = True
|
427 |
+
assert_(one_accept)
|
428 |
+
assert_(one_reject)
|
429 |
+
|
430 |
+
def test_GH7495(self):
|
431 |
+
# an overflow in exp was producing a RuntimeWarning
|
432 |
+
# create own object here in case someone changes self.T
|
433 |
+
met = Metropolis(2)
|
434 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
435 |
+
res_old = OptimizeResult(success=True, fun=2000)
|
436 |
+
with np.errstate(over='raise'):
|
437 |
+
met.accept_reject(res_new=res_new, res_old=res_old)
|
438 |
+
|
439 |
+
def test_gh7799(self):
|
440 |
+
# gh-7799 reported a problem in which local search was successful but
|
441 |
+
# basinhopping returned an invalid solution. Show that this is fixed.
|
442 |
+
def func(x):
|
443 |
+
return (x**2-8)**2+(x+2)**2
|
444 |
+
|
445 |
+
x0 = -4
|
446 |
+
limit = 50 # Constrain to func value >= 50
|
447 |
+
con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
|
448 |
+
res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con})
|
449 |
+
assert res.success
|
450 |
+
assert_allclose(res.fun, limit, rtol=1e-6)
|
451 |
+
|
452 |
+
def test_accept_gh7799(self):
|
453 |
+
# Metropolis should not accept the result of an unsuccessful new local
|
454 |
+
# search if the old local search was successful
|
455 |
+
|
456 |
+
met = Metropolis(0) # monotonic basin hopping
|
457 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
458 |
+
res_old = OptimizeResult(success=True, fun=1.)
|
459 |
+
|
460 |
+
# if new local search was successful and energy is lower, accept
|
461 |
+
assert met(res_new=res_new, res_old=res_old)
|
462 |
+
# if new res is unsuccessful, don't accept - even if energy is lower
|
463 |
+
res_new.success = False
|
464 |
+
assert not met(res_new=res_new, res_old=res_old)
|
465 |
+
# ...unless the old res was unsuccessful, too. In that case, why not?
|
466 |
+
res_old.success = False
|
467 |
+
assert met(res_new=res_new, res_old=res_old)
|
468 |
+
|
469 |
+
def test_reject_all_gh7799(self):
|
470 |
+
# Test the behavior when there is no feasible solution
|
471 |
+
def fun(x):
|
472 |
+
return x@x
|
473 |
+
|
474 |
+
def constraint(x):
|
475 |
+
return x + 1
|
476 |
+
|
477 |
+
kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
|
478 |
+
'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
|
479 |
+
res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
|
480 |
+
assert not res.success
|
481 |
+
|
482 |
+
|
483 |
+
class Test_AdaptiveStepsize:
|
484 |
+
def setup_method(self):
|
485 |
+
self.stepsize = 1.
|
486 |
+
self.ts = RandomDisplacement(stepsize=self.stepsize)
|
487 |
+
self.target_accept_rate = 0.5
|
488 |
+
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
|
489 |
+
accept_rate=self.target_accept_rate)
|
490 |
+
|
491 |
+
def test_adaptive_increase(self):
|
492 |
+
# if few steps are rejected, the stepsize should increase
|
493 |
+
x = 0.
|
494 |
+
self.takestep(x)
|
495 |
+
self.takestep.report(False)
|
496 |
+
for i in range(self.takestep.interval):
|
497 |
+
self.takestep(x)
|
498 |
+
self.takestep.report(True)
|
499 |
+
assert_(self.ts.stepsize > self.stepsize)
|
500 |
+
|
501 |
+
def test_adaptive_decrease(self):
|
502 |
+
# if few steps are rejected, the stepsize should increase
|
503 |
+
x = 0.
|
504 |
+
self.takestep(x)
|
505 |
+
self.takestep.report(True)
|
506 |
+
for i in range(self.takestep.interval):
|
507 |
+
self.takestep(x)
|
508 |
+
self.takestep.report(False)
|
509 |
+
assert_(self.ts.stepsize < self.stepsize)
|
510 |
+
|
511 |
+
def test_all_accepted(self):
|
512 |
+
# test that everything works OK if all steps were accepted
|
513 |
+
x = 0.
|
514 |
+
for i in range(self.takestep.interval + 1):
|
515 |
+
self.takestep(x)
|
516 |
+
self.takestep.report(True)
|
517 |
+
assert_(self.ts.stepsize > self.stepsize)
|
518 |
+
|
519 |
+
def test_all_rejected(self):
|
520 |
+
# test that everything works OK if all steps were rejected
|
521 |
+
x = 0.
|
522 |
+
for i in range(self.takestep.interval + 1):
|
523 |
+
self.takestep(x)
|
524 |
+
self.takestep.report(False)
|
525 |
+
assert_(self.ts.stepsize < self.stepsize)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py
ADDED
@@ -0,0 +1,1677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for the differential global minimization algorithm.
|
3 |
+
"""
|
4 |
+
import multiprocessing
|
5 |
+
import platform
|
6 |
+
|
7 |
+
from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
|
8 |
+
_ConstraintWrapper)
|
9 |
+
from scipy.optimize import differential_evolution, OptimizeResult
|
10 |
+
from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
|
11 |
+
LinearConstraint)
|
12 |
+
from scipy.optimize import rosen, minimize
|
13 |
+
from scipy.sparse import csr_matrix
|
14 |
+
from scipy import stats
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
|
18 |
+
assert_string_equal, assert_, suppress_warnings)
|
19 |
+
from pytest import raises as assert_raises, warns
|
20 |
+
import pytest
|
21 |
+
|
22 |
+
|
23 |
+
class TestDifferentialEvolutionSolver:
|
24 |
+
|
25 |
+
def setup_method(self):
|
26 |
+
self.old_seterr = np.seterr(invalid='raise')
|
27 |
+
self.limits = np.array([[0., 0.],
|
28 |
+
[2., 2.]])
|
29 |
+
self.bounds = [(0., 2.), (0., 2.)]
|
30 |
+
|
31 |
+
self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
|
32 |
+
[(0, 100)])
|
33 |
+
|
34 |
+
# dummy_solver2 will be used to test mutation strategies
|
35 |
+
self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
|
36 |
+
[(0, 1)],
|
37 |
+
popsize=7,
|
38 |
+
mutation=0.5)
|
39 |
+
# create a population that's only 7 members long
|
40 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
41 |
+
population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
|
42 |
+
self.dummy_solver2.population = population
|
43 |
+
|
44 |
+
def teardown_method(self):
|
45 |
+
np.seterr(**self.old_seterr)
|
46 |
+
|
47 |
+
def quadratic(self, x):
|
48 |
+
return x[0]**2
|
49 |
+
|
50 |
+
def test__strategy_resolves(self):
|
51 |
+
# test that the correct mutation function is resolved by
|
52 |
+
# different requested strategy arguments
|
53 |
+
solver = DifferentialEvolutionSolver(rosen,
|
54 |
+
self.bounds,
|
55 |
+
strategy='best1exp')
|
56 |
+
assert_equal(solver.strategy, 'best1exp')
|
57 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
58 |
+
|
59 |
+
solver = DifferentialEvolutionSolver(rosen,
|
60 |
+
self.bounds,
|
61 |
+
strategy='best1bin')
|
62 |
+
assert_equal(solver.strategy, 'best1bin')
|
63 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
64 |
+
|
65 |
+
solver = DifferentialEvolutionSolver(rosen,
|
66 |
+
self.bounds,
|
67 |
+
strategy='rand1bin')
|
68 |
+
assert_equal(solver.strategy, 'rand1bin')
|
69 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
70 |
+
|
71 |
+
solver = DifferentialEvolutionSolver(rosen,
|
72 |
+
self.bounds,
|
73 |
+
strategy='rand1exp')
|
74 |
+
assert_equal(solver.strategy, 'rand1exp')
|
75 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
76 |
+
|
77 |
+
solver = DifferentialEvolutionSolver(rosen,
|
78 |
+
self.bounds,
|
79 |
+
strategy='rand2exp')
|
80 |
+
assert_equal(solver.strategy, 'rand2exp')
|
81 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
82 |
+
|
83 |
+
solver = DifferentialEvolutionSolver(rosen,
|
84 |
+
self.bounds,
|
85 |
+
strategy='best2bin')
|
86 |
+
assert_equal(solver.strategy, 'best2bin')
|
87 |
+
assert_equal(solver.mutation_func.__name__, '_best2')
|
88 |
+
|
89 |
+
solver = DifferentialEvolutionSolver(rosen,
|
90 |
+
self.bounds,
|
91 |
+
strategy='rand2bin')
|
92 |
+
assert_equal(solver.strategy, 'rand2bin')
|
93 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
94 |
+
|
95 |
+
solver = DifferentialEvolutionSolver(rosen,
|
96 |
+
self.bounds,
|
97 |
+
strategy='rand2exp')
|
98 |
+
assert_equal(solver.strategy, 'rand2exp')
|
99 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
100 |
+
|
101 |
+
solver = DifferentialEvolutionSolver(rosen,
|
102 |
+
self.bounds,
|
103 |
+
strategy='randtobest1bin')
|
104 |
+
assert_equal(solver.strategy, 'randtobest1bin')
|
105 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
106 |
+
|
107 |
+
solver = DifferentialEvolutionSolver(rosen,
|
108 |
+
self.bounds,
|
109 |
+
strategy='randtobest1exp')
|
110 |
+
assert_equal(solver.strategy, 'randtobest1exp')
|
111 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
112 |
+
|
113 |
+
solver = DifferentialEvolutionSolver(rosen,
|
114 |
+
self.bounds,
|
115 |
+
strategy='currenttobest1bin')
|
116 |
+
assert_equal(solver.strategy, 'currenttobest1bin')
|
117 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
118 |
+
|
119 |
+
solver = DifferentialEvolutionSolver(rosen,
|
120 |
+
self.bounds,
|
121 |
+
strategy='currenttobest1exp')
|
122 |
+
assert_equal(solver.strategy, 'currenttobest1exp')
|
123 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
124 |
+
|
125 |
+
def test__mutate1(self):
|
126 |
+
# strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
|
127 |
+
result = np.array([0.05])
|
128 |
+
trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
|
129 |
+
assert_allclose(trial, result)
|
130 |
+
|
131 |
+
result = np.array([0.25])
|
132 |
+
trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
|
133 |
+
assert_allclose(trial, result)
|
134 |
+
|
135 |
+
def test__mutate2(self):
|
136 |
+
# strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
|
137 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
138 |
+
|
139 |
+
result = np.array([-0.1])
|
140 |
+
trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
|
141 |
+
assert_allclose(trial, result)
|
142 |
+
|
143 |
+
result = np.array([0.1])
|
144 |
+
trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
|
145 |
+
assert_allclose(trial, result)
|
146 |
+
|
147 |
+
def test__randtobest1(self):
|
148 |
+
# strategies randtobest/1/*
|
149 |
+
result = np.array([0.15])
|
150 |
+
trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
|
151 |
+
assert_allclose(trial, result)
|
152 |
+
|
153 |
+
def test__currenttobest1(self):
|
154 |
+
# strategies currenttobest/1/*
|
155 |
+
result = np.array([0.1])
|
156 |
+
trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
|
157 |
+
assert_allclose(trial, result)
|
158 |
+
|
159 |
+
def test_can_init_with_dithering(self):
|
160 |
+
mutation = (0.5, 1)
|
161 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
162 |
+
self.bounds,
|
163 |
+
mutation=mutation)
|
164 |
+
|
165 |
+
assert_equal(solver.dither, list(mutation))
|
166 |
+
|
167 |
+
def test_invalid_mutation_values_arent_accepted(self):
|
168 |
+
func = rosen
|
169 |
+
mutation = (0.5, 3)
|
170 |
+
assert_raises(ValueError,
|
171 |
+
DifferentialEvolutionSolver,
|
172 |
+
func,
|
173 |
+
self.bounds,
|
174 |
+
mutation=mutation)
|
175 |
+
|
176 |
+
mutation = (-1, 1)
|
177 |
+
assert_raises(ValueError,
|
178 |
+
DifferentialEvolutionSolver,
|
179 |
+
func,
|
180 |
+
self.bounds,
|
181 |
+
mutation=mutation)
|
182 |
+
|
183 |
+
mutation = (0.1, np.nan)
|
184 |
+
assert_raises(ValueError,
|
185 |
+
DifferentialEvolutionSolver,
|
186 |
+
func,
|
187 |
+
self.bounds,
|
188 |
+
mutation=mutation)
|
189 |
+
|
190 |
+
mutation = 0.5
|
191 |
+
solver = DifferentialEvolutionSolver(func,
|
192 |
+
self.bounds,
|
193 |
+
mutation=mutation)
|
194 |
+
assert_equal(0.5, solver.scale)
|
195 |
+
assert_equal(None, solver.dither)
|
196 |
+
|
197 |
+
def test_invalid_functional(self):
|
198 |
+
def func(x):
|
199 |
+
return np.array([np.sum(x ** 2), np.sum(x)])
|
200 |
+
|
201 |
+
with assert_raises(
|
202 |
+
RuntimeError,
|
203 |
+
match=r"func\(x, \*args\) must return a scalar value"):
|
204 |
+
differential_evolution(func, [(-2, 2), (-2, 2)])
|
205 |
+
|
206 |
+
def test__scale_parameters(self):
|
207 |
+
trial = np.array([0.3])
|
208 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
209 |
+
|
210 |
+
# it should also work with the limits reversed
|
211 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
212 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
213 |
+
|
214 |
+
def test__unscale_parameters(self):
|
215 |
+
trial = np.array([30])
|
216 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
217 |
+
|
218 |
+
# it should also work with the limits reversed
|
219 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
220 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
221 |
+
|
222 |
+
def test_equal_bounds(self):
|
223 |
+
with np.errstate(invalid='raise'):
|
224 |
+
solver = DifferentialEvolutionSolver(
|
225 |
+
self.quadratic,
|
226 |
+
bounds=[(2.0, 2.0), (1.0, 3.0)]
|
227 |
+
)
|
228 |
+
v = solver._unscale_parameters([2.0, 2.0])
|
229 |
+
assert_allclose(v, 0.5)
|
230 |
+
|
231 |
+
res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)])
|
232 |
+
assert_equal(res.x, [2.0, 3.0])
|
233 |
+
|
234 |
+
def test__ensure_constraint(self):
|
235 |
+
trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
|
236 |
+
self.dummy_solver._ensure_constraint(trial)
|
237 |
+
|
238 |
+
assert_equal(trial[2], 0.9)
|
239 |
+
assert_(np.logical_and(trial >= 0, trial <= 1).all())
|
240 |
+
|
241 |
+
def test_differential_evolution(self):
|
242 |
+
# test that the Jmin of DifferentialEvolutionSolver
|
243 |
+
# is the same as the function evaluation
|
244 |
+
solver = DifferentialEvolutionSolver(
|
245 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=False
|
246 |
+
)
|
247 |
+
result = solver.solve()
|
248 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
249 |
+
|
250 |
+
solver = DifferentialEvolutionSolver(
|
251 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=True
|
252 |
+
)
|
253 |
+
result = solver.solve()
|
254 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
255 |
+
|
256 |
+
def test_best_solution_retrieval(self):
|
257 |
+
# test that the getter property method for the best solution works.
|
258 |
+
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
|
259 |
+
result = solver.solve()
|
260 |
+
assert_equal(result.x, solver.x)
|
261 |
+
|
262 |
+
def test_intermediate_result(self):
|
263 |
+
# Check that intermediate result object passed into the callback
|
264 |
+
# function contains the expected information and that raising
|
265 |
+
# `StopIteration` causes the expected behavior.
|
266 |
+
maxiter = 10
|
267 |
+
|
268 |
+
def func(x):
|
269 |
+
val = rosen(x)
|
270 |
+
if val < func.val:
|
271 |
+
func.x = x
|
272 |
+
func.val = val
|
273 |
+
return val
|
274 |
+
func.x = None
|
275 |
+
func.val = np.inf
|
276 |
+
|
277 |
+
def callback(intermediate_result):
|
278 |
+
callback.nit += 1
|
279 |
+
callback.intermediate_result = intermediate_result
|
280 |
+
assert intermediate_result.population.ndim == 2
|
281 |
+
assert intermediate_result.population.shape[1] == 2
|
282 |
+
assert intermediate_result.nit == callback.nit
|
283 |
+
|
284 |
+
# Check that `x` and `fun` attributes are the best found so far
|
285 |
+
assert_equal(intermediate_result.x, callback.func.x)
|
286 |
+
assert_equal(intermediate_result.fun, callback.func.val)
|
287 |
+
|
288 |
+
# Check for consistency between `fun`, `population_energies`,
|
289 |
+
# `x`, and `population`
|
290 |
+
assert_equal(intermediate_result.fun, rosen(intermediate_result.x))
|
291 |
+
for i in range(len(intermediate_result.population_energies)):
|
292 |
+
res = intermediate_result.population_energies[i]
|
293 |
+
ref = rosen(intermediate_result.population[i])
|
294 |
+
assert_equal(res, ref)
|
295 |
+
assert_equal(intermediate_result.x,
|
296 |
+
intermediate_result.population[0])
|
297 |
+
assert_equal(intermediate_result.fun,
|
298 |
+
intermediate_result.population_energies[0])
|
299 |
+
|
300 |
+
assert intermediate_result.message == 'in progress'
|
301 |
+
assert intermediate_result.success is True
|
302 |
+
assert isinstance(intermediate_result, OptimizeResult)
|
303 |
+
if callback.nit == maxiter:
|
304 |
+
raise StopIteration
|
305 |
+
callback.nit = 0
|
306 |
+
callback.intermediate_result = None
|
307 |
+
callback.func = func
|
308 |
+
|
309 |
+
bounds = [(0, 2), (0, 2)]
|
310 |
+
kwargs = dict(func=func, bounds=bounds, seed=838245, polish=False)
|
311 |
+
res = differential_evolution(**kwargs, callback=callback)
|
312 |
+
ref = differential_evolution(**kwargs, maxiter=maxiter)
|
313 |
+
|
314 |
+
# Check that final `intermediate_result` is equivalent to returned
|
315 |
+
# result object and that terminating with callback `StopIteration`
|
316 |
+
# after `maxiter` iterations is equivalent to terminating with
|
317 |
+
# `maxiter` parameter.
|
318 |
+
assert res.success is ref.success is False
|
319 |
+
assert callback.nit == res.nit == maxiter
|
320 |
+
assert res.message == 'callback function requested stop early'
|
321 |
+
assert ref.message == 'Maximum number of iterations has been exceeded.'
|
322 |
+
for field, val in ref.items():
|
323 |
+
if field in {'message', 'success'}: # checked separately
|
324 |
+
continue
|
325 |
+
assert_equal(callback.intermediate_result[field], val)
|
326 |
+
assert_equal(res[field], val)
|
327 |
+
|
328 |
+
# Check that polish occurs after `StopIteration` as advertised
|
329 |
+
callback.nit = 0
|
330 |
+
func.val = np.inf
|
331 |
+
kwargs['polish'] = True
|
332 |
+
res = differential_evolution(**kwargs, callback=callback)
|
333 |
+
assert res.fun < ref.fun
|
334 |
+
|
335 |
+
def test_callback_terminates(self):
|
336 |
+
# test that if the callback returns true, then the minimization halts
|
337 |
+
bounds = [(0, 2), (0, 2)]
|
338 |
+
expected_msg = 'callback function requested stop early'
|
339 |
+
def callback_python_true(param, convergence=0.):
|
340 |
+
return True
|
341 |
+
|
342 |
+
result = differential_evolution(
|
343 |
+
rosen, bounds, callback=callback_python_true
|
344 |
+
)
|
345 |
+
assert_string_equal(result.message, expected_msg)
|
346 |
+
|
347 |
+
# if callback raises StopIteration then solve should be interrupted
|
348 |
+
def callback_stop(intermediate_result):
|
349 |
+
raise StopIteration
|
350 |
+
|
351 |
+
result = differential_evolution(rosen, bounds, callback=callback_stop)
|
352 |
+
assert not result.success
|
353 |
+
|
354 |
+
def callback_evaluates_true(param, convergence=0.):
|
355 |
+
# DE should stop if bool(self.callback) is True
|
356 |
+
return [10]
|
357 |
+
|
358 |
+
result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
|
359 |
+
assert_string_equal(result.message, expected_msg)
|
360 |
+
assert not result.success
|
361 |
+
|
362 |
+
def callback_evaluates_false(param, convergence=0.):
|
363 |
+
return []
|
364 |
+
|
365 |
+
result = differential_evolution(rosen, bounds,
|
366 |
+
callback=callback_evaluates_false)
|
367 |
+
assert result.success
|
368 |
+
|
369 |
+
def test_args_tuple_is_passed(self):
|
370 |
+
# test that the args tuple is passed to the cost function properly.
|
371 |
+
bounds = [(-10, 10)]
|
372 |
+
args = (1., 2., 3.)
|
373 |
+
|
374 |
+
def quadratic(x, *args):
|
375 |
+
if type(args) != tuple:
|
376 |
+
raise ValueError('args should be a tuple')
|
377 |
+
return args[0] + args[1] * x + args[2] * x**2.
|
378 |
+
|
379 |
+
result = differential_evolution(quadratic,
|
380 |
+
bounds,
|
381 |
+
args=args,
|
382 |
+
polish=True)
|
383 |
+
assert_almost_equal(result.fun, 2 / 3.)
|
384 |
+
|
385 |
+
def test_init_with_invalid_strategy(self):
|
386 |
+
# test that passing an invalid strategy raises ValueError
|
387 |
+
func = rosen
|
388 |
+
bounds = [(-3, 3)]
|
389 |
+
assert_raises(ValueError,
|
390 |
+
differential_evolution,
|
391 |
+
func,
|
392 |
+
bounds,
|
393 |
+
strategy='abc')
|
394 |
+
|
395 |
+
def test_bounds_checking(self):
|
396 |
+
# test that the bounds checking works
|
397 |
+
func = rosen
|
398 |
+
bounds = [(-3)]
|
399 |
+
assert_raises(ValueError,
|
400 |
+
differential_evolution,
|
401 |
+
func,
|
402 |
+
bounds)
|
403 |
+
bounds = [(-3, 3), (3, 4, 5)]
|
404 |
+
assert_raises(ValueError,
|
405 |
+
differential_evolution,
|
406 |
+
func,
|
407 |
+
bounds)
|
408 |
+
|
409 |
+
# test that we can use a new-type Bounds object
|
410 |
+
result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
|
411 |
+
assert_almost_equal(result.x, (1., 1.))
|
412 |
+
|
413 |
+
def test_select_samples(self):
|
414 |
+
# select_samples should return 5 separate random numbers.
|
415 |
+
limits = np.arange(12., dtype='float64').reshape(2, 6)
|
416 |
+
bounds = list(zip(limits[0, :], limits[1, :]))
|
417 |
+
solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
|
418 |
+
candidate = 0
|
419 |
+
r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
|
420 |
+
assert_equal(
|
421 |
+
len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
|
422 |
+
|
423 |
+
def test_maxiter_stops_solve(self):
|
424 |
+
# test that if the maximum number of iterations is exceeded
|
425 |
+
# the solver stops.
|
426 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
|
427 |
+
result = solver.solve()
|
428 |
+
assert_equal(result.success, False)
|
429 |
+
assert_equal(result.message,
|
430 |
+
'Maximum number of iterations has been exceeded.')
|
431 |
+
|
432 |
+
def test_maxfun_stops_solve(self):
|
433 |
+
# test that if the maximum number of function evaluations is exceeded
|
434 |
+
# during initialisation the solver stops
|
435 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
|
436 |
+
polish=False)
|
437 |
+
result = solver.solve()
|
438 |
+
|
439 |
+
assert_equal(result.nfev, 2)
|
440 |
+
assert_equal(result.success, False)
|
441 |
+
assert_equal(result.message,
|
442 |
+
'Maximum number of function evaluations has '
|
443 |
+
'been exceeded.')
|
444 |
+
|
445 |
+
# test that if the maximum number of function evaluations is exceeded
|
446 |
+
# during the actual minimisation, then the solver stops.
|
447 |
+
# Have to turn polishing off, as this will still occur even if maxfun
|
448 |
+
# is reached. For popsize=5 and len(bounds)=2, then there are only 10
|
449 |
+
# function evaluations during initialisation.
|
450 |
+
solver = DifferentialEvolutionSolver(rosen,
|
451 |
+
self.bounds,
|
452 |
+
popsize=5,
|
453 |
+
polish=False,
|
454 |
+
maxfun=40)
|
455 |
+
result = solver.solve()
|
456 |
+
|
457 |
+
assert_equal(result.nfev, 41)
|
458 |
+
assert_equal(result.success, False)
|
459 |
+
assert_equal(result.message,
|
460 |
+
'Maximum number of function evaluations has '
|
461 |
+
'been exceeded.')
|
462 |
+
|
463 |
+
# now repeat for updating='deferred version
|
464 |
+
# 47 function evaluations is not a multiple of the population size,
|
465 |
+
# so maxfun is reached partway through a population evaluation.
|
466 |
+
solver = DifferentialEvolutionSolver(rosen,
|
467 |
+
self.bounds,
|
468 |
+
popsize=5,
|
469 |
+
polish=False,
|
470 |
+
maxfun=47,
|
471 |
+
updating='deferred')
|
472 |
+
result = solver.solve()
|
473 |
+
|
474 |
+
assert_equal(result.nfev, 47)
|
475 |
+
assert_equal(result.success, False)
|
476 |
+
assert_equal(result.message,
|
477 |
+
'Maximum number of function evaluations has '
|
478 |
+
'been reached.')
|
479 |
+
|
480 |
+
def test_quadratic(self):
|
481 |
+
# test the quadratic function from object
|
482 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
483 |
+
[(-100, 100)],
|
484 |
+
tol=0.02)
|
485 |
+
solver.solve()
|
486 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
487 |
+
|
488 |
+
def test_quadratic_from_diff_ev(self):
|
489 |
+
# test the quadratic function from differential_evolution function
|
490 |
+
differential_evolution(self.quadratic,
|
491 |
+
[(-100, 100)],
|
492 |
+
tol=0.02)
|
493 |
+
|
494 |
+
def test_seed_gives_repeatability(self):
|
495 |
+
result = differential_evolution(self.quadratic,
|
496 |
+
[(-100, 100)],
|
497 |
+
polish=False,
|
498 |
+
seed=1,
|
499 |
+
tol=0.5)
|
500 |
+
result2 = differential_evolution(self.quadratic,
|
501 |
+
[(-100, 100)],
|
502 |
+
polish=False,
|
503 |
+
seed=1,
|
504 |
+
tol=0.5)
|
505 |
+
assert_equal(result.x, result2.x)
|
506 |
+
assert_equal(result.nfev, result2.nfev)
|
507 |
+
|
508 |
+
def test_random_generator(self):
|
509 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
510 |
+
# obtain a np.random.Generator object
|
511 |
+
rng = np.random.default_rng()
|
512 |
+
|
513 |
+
inits = ['random', 'latinhypercube', 'sobol', 'halton']
|
514 |
+
for init in inits:
|
515 |
+
differential_evolution(self.quadratic,
|
516 |
+
[(-100, 100)],
|
517 |
+
polish=False,
|
518 |
+
seed=rng,
|
519 |
+
tol=0.5,
|
520 |
+
init=init)
|
521 |
+
|
522 |
+
def test_exp_runs(self):
|
523 |
+
# test whether exponential mutation loop runs
|
524 |
+
solver = DifferentialEvolutionSolver(rosen,
|
525 |
+
self.bounds,
|
526 |
+
strategy='best1exp',
|
527 |
+
maxiter=1)
|
528 |
+
|
529 |
+
solver.solve()
|
530 |
+
|
531 |
+
def test_gh_4511_regression(self):
|
532 |
+
# This modification of the differential evolution docstring example
|
533 |
+
# uses a custom popsize that had triggered an off-by-one error.
|
534 |
+
# Because we do not care about solving the optimization problem in
|
535 |
+
# this test, we use maxiter=1 to reduce the testing time.
|
536 |
+
bounds = [(-5, 5), (-5, 5)]
|
537 |
+
# result = differential_evolution(rosen, bounds, popsize=1815,
|
538 |
+
# maxiter=1)
|
539 |
+
|
540 |
+
# the original issue arose because of rounding error in arange, with
|
541 |
+
# linspace being a much better solution. 1815 is quite a large popsize
|
542 |
+
# to use and results in a long test time (~13s). I used the original
|
543 |
+
# issue to figure out the lowest number of samples that would cause
|
544 |
+
# this rounding error to occur, 49.
|
545 |
+
differential_evolution(rosen, bounds, popsize=49, maxiter=1)
|
546 |
+
|
547 |
+
def test_calculate_population_energies(self):
|
548 |
+
# if popsize is 3, then the overall generation has size (6,)
|
549 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
|
550 |
+
solver._calculate_population_energies(solver.population)
|
551 |
+
solver._promote_lowest_energy()
|
552 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
553 |
+
|
554 |
+
# initial calculation of the energies should require 6 nfev.
|
555 |
+
assert_equal(solver._nfev, 6)
|
556 |
+
|
557 |
+
def test_iteration(self):
|
558 |
+
# test that DifferentialEvolutionSolver is iterable
|
559 |
+
# if popsize is 3, then the overall generation has size (6,)
|
560 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
|
561 |
+
maxfun=12)
|
562 |
+
x, fun = next(solver)
|
563 |
+
assert_equal(np.size(x, 0), 2)
|
564 |
+
|
565 |
+
# 6 nfev are required for initial calculation of energies, 6 nfev are
|
566 |
+
# required for the evolution of the 6 population members.
|
567 |
+
assert_equal(solver._nfev, 12)
|
568 |
+
|
569 |
+
# the next generation should halt because it exceeds maxfun
|
570 |
+
assert_raises(StopIteration, next, solver)
|
571 |
+
|
572 |
+
# check a proper minimisation can be done by an iterable solver
|
573 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
574 |
+
_, fun_prev = next(solver)
|
575 |
+
for i, soln in enumerate(solver):
|
576 |
+
x_current, fun_current = soln
|
577 |
+
assert fun_prev >= fun_current
|
578 |
+
_, fun_prev = x_current, fun_current
|
579 |
+
# need to have this otherwise the solver would never stop.
|
580 |
+
if i == 50:
|
581 |
+
break
|
582 |
+
|
583 |
+
def test_convergence(self):
|
584 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
|
585 |
+
polish=False)
|
586 |
+
solver.solve()
|
587 |
+
assert_(solver.convergence < 0.2)
|
588 |
+
|
589 |
+
def test_maxiter_none_GH5731(self):
|
590 |
+
# Pre 0.17 the previous default for maxiter and maxfun was None.
|
591 |
+
# the numerical defaults are now 1000 and np.inf. However, some scripts
|
592 |
+
# will still supply None for both of those, this will raise a TypeError
|
593 |
+
# in the solve method.
|
594 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
|
595 |
+
maxfun=None)
|
596 |
+
solver.solve()
|
597 |
+
|
598 |
+
def test_population_initiation(self):
|
599 |
+
# test the different modes of population initiation
|
600 |
+
|
601 |
+
# init must be either 'latinhypercube' or 'random'
|
602 |
+
# raising ValueError is something else is passed in
|
603 |
+
assert_raises(ValueError,
|
604 |
+
DifferentialEvolutionSolver,
|
605 |
+
*(rosen, self.bounds),
|
606 |
+
**{'init': 'rubbish'})
|
607 |
+
|
608 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
609 |
+
|
610 |
+
# check that population initiation:
|
611 |
+
# 1) resets _nfev to 0
|
612 |
+
# 2) all population energies are np.inf
|
613 |
+
solver.init_population_random()
|
614 |
+
assert_equal(solver._nfev, 0)
|
615 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
616 |
+
|
617 |
+
solver.init_population_lhs()
|
618 |
+
assert_equal(solver._nfev, 0)
|
619 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
620 |
+
|
621 |
+
solver.init_population_qmc(qmc_engine='halton')
|
622 |
+
assert_equal(solver._nfev, 0)
|
623 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
624 |
+
|
625 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
|
626 |
+
solver.init_population_qmc(qmc_engine='sobol')
|
627 |
+
assert_equal(solver._nfev, 0)
|
628 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
629 |
+
|
630 |
+
# we should be able to initialize with our own array
|
631 |
+
population = np.linspace(-1, 3, 10).reshape(5, 2)
|
632 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds,
|
633 |
+
init=population,
|
634 |
+
strategy='best2bin',
|
635 |
+
atol=0.01, seed=1, popsize=5)
|
636 |
+
|
637 |
+
assert_equal(solver._nfev, 0)
|
638 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
639 |
+
assert_(solver.num_population_members == 5)
|
640 |
+
assert_(solver.population_shape == (5, 2))
|
641 |
+
|
642 |
+
# check that the population was initialized correctly
|
643 |
+
unscaled_population = np.clip(solver._unscale_parameters(population),
|
644 |
+
0, 1)
|
645 |
+
assert_almost_equal(solver.population[:5], unscaled_population)
|
646 |
+
|
647 |
+
# population values need to be clipped to bounds
|
648 |
+
assert_almost_equal(np.min(solver.population[:5]), 0)
|
649 |
+
assert_almost_equal(np.max(solver.population[:5]), 1)
|
650 |
+
|
651 |
+
# shouldn't be able to initialize with an array if it's the wrong shape
|
652 |
+
# this would have too many parameters
|
653 |
+
population = np.linspace(-1, 3, 15).reshape(5, 3)
|
654 |
+
assert_raises(ValueError,
|
655 |
+
DifferentialEvolutionSolver,
|
656 |
+
*(rosen, self.bounds),
|
657 |
+
**{'init': population})
|
658 |
+
|
659 |
+
# provide an initial solution
|
660 |
+
# bounds are [(0, 2), (0, 2)]
|
661 |
+
x0 = np.random.uniform(low=0.0, high=2.0, size=2)
|
662 |
+
solver = DifferentialEvolutionSolver(
|
663 |
+
rosen, self.bounds, x0=x0
|
664 |
+
)
|
665 |
+
# parameters are scaled to unit interval
|
666 |
+
assert_allclose(solver.population[0], x0 / 2.0)
|
667 |
+
|
668 |
+
def test_x0(self):
|
669 |
+
# smoke test that checks that x0 is usable.
|
670 |
+
res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
|
671 |
+
assert res.success
|
672 |
+
|
673 |
+
# check what happens if some of the x0 lay outside the bounds
|
674 |
+
with assert_raises(ValueError):
|
675 |
+
differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
|
676 |
+
|
677 |
+
def test_infinite_objective_function(self):
|
678 |
+
# Test that there are no problems if the objective function
|
679 |
+
# returns inf on some runs
|
680 |
+
def sometimes_inf(x):
|
681 |
+
if x[0] < .5:
|
682 |
+
return np.inf
|
683 |
+
return x[1]
|
684 |
+
bounds = [(0, 1), (0, 1)]
|
685 |
+
differential_evolution(sometimes_inf, bounds=bounds, disp=False)
|
686 |
+
|
687 |
+
def test_deferred_updating(self):
|
688 |
+
# check setting of deferred updating, with default workers
|
689 |
+
bounds = [(0., 2.), (0., 2.)]
|
690 |
+
solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
|
691 |
+
assert_(solver._updating == 'deferred')
|
692 |
+
assert_(solver._mapwrapper._mapfunc is map)
|
693 |
+
solver.solve()
|
694 |
+
|
695 |
+
def test_immediate_updating(self):
|
696 |
+
# check setting of immediate updating, with default workers
|
697 |
+
bounds = [(0., 2.), (0., 2.)]
|
698 |
+
solver = DifferentialEvolutionSolver(rosen, bounds)
|
699 |
+
assert_(solver._updating == 'immediate')
|
700 |
+
|
701 |
+
# Safely forking from a multithreaded process is
|
702 |
+
# problematic, and deprecated in Python 3.12, so
|
703 |
+
# we use a slower but portable alternative
|
704 |
+
# see gh-19848
|
705 |
+
ctx = multiprocessing.get_context("spawn")
|
706 |
+
with ctx.Pool(2) as p:
|
707 |
+
# should raise a UserWarning because the updating='immediate'
|
708 |
+
# is being overridden by the workers keyword
|
709 |
+
with warns(UserWarning):
|
710 |
+
with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s:
|
711 |
+
pass
|
712 |
+
assert s._updating == 'deferred'
|
713 |
+
|
714 |
+
def test_parallel(self):
|
715 |
+
# smoke test for parallelization with deferred updating
|
716 |
+
bounds = [(0., 2.), (0., 2.)]
|
717 |
+
with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(
|
718 |
+
rosen, bounds, updating='deferred', workers=p.map) as solver:
|
719 |
+
assert_(solver._mapwrapper.pool is not None)
|
720 |
+
assert_(solver._updating == 'deferred')
|
721 |
+
solver.solve()
|
722 |
+
|
723 |
+
with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',
|
724 |
+
workers=2) as solver:
|
725 |
+
assert_(solver._mapwrapper.pool is not None)
|
726 |
+
assert_(solver._updating == 'deferred')
|
727 |
+
solver.solve()
|
728 |
+
|
729 |
+
def test_converged(self):
|
730 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
|
731 |
+
solver.solve()
|
732 |
+
assert_(solver.converged())
|
733 |
+
|
734 |
+
def test_constraint_violation_fn(self):
|
735 |
+
def constr_f(x):
|
736 |
+
return [x[0] + x[1]]
|
737 |
+
|
738 |
+
def constr_f2(x):
|
739 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
740 |
+
|
741 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
742 |
+
|
743 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
744 |
+
constraints=(nlc))
|
745 |
+
|
746 |
+
cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
|
747 |
+
assert_almost_equal(cv, 0.1)
|
748 |
+
|
749 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
750 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
751 |
+
constraints=(nlc, nlc2))
|
752 |
+
|
753 |
+
# for multiple constraints the constraint violations should
|
754 |
+
# be concatenated.
|
755 |
+
xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
|
756 |
+
vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
|
757 |
+
|
758 |
+
for x, v in zip(xs, vs):
|
759 |
+
cv = solver._constraint_violation_fn(np.array(x))
|
760 |
+
assert_allclose(cv, np.atleast_2d(v))
|
761 |
+
|
762 |
+
# vectorized calculation of a series of solutions
|
763 |
+
assert_allclose(
|
764 |
+
solver._constraint_violation_fn(np.array(xs)), np.array(vs)
|
765 |
+
)
|
766 |
+
|
767 |
+
# the following line is used in _calculate_population_feasibilities.
|
768 |
+
# _constraint_violation_fn returns an (1, M) array when
|
769 |
+
# x.shape == (N,), i.e. a single solution. Therefore this list
|
770 |
+
# comprehension should generate (S, 1, M) array.
|
771 |
+
constraint_violation = np.array([solver._constraint_violation_fn(x)
|
772 |
+
for x in np.array(xs)])
|
773 |
+
assert constraint_violation.shape == (3, 1, 3)
|
774 |
+
|
775 |
+
# we need reasonable error messages if the constraint function doesn't
|
776 |
+
# return the right thing
|
777 |
+
def constr_f3(x):
|
778 |
+
# returns (S, M), rather than (M, S)
|
779 |
+
return constr_f2(x).T
|
780 |
+
|
781 |
+
nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
|
782 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
783 |
+
constraints=(nlc, nlc2),
|
784 |
+
vectorized=False)
|
785 |
+
solver.vectorized = True
|
786 |
+
with pytest.raises(
|
787 |
+
RuntimeError, match="An array returned from a Constraint"
|
788 |
+
):
|
789 |
+
solver._constraint_violation_fn(np.array(xs))
|
790 |
+
|
791 |
+
def test_constraint_population_feasibilities(self):
|
792 |
+
def constr_f(x):
|
793 |
+
return [x[0] + x[1]]
|
794 |
+
|
795 |
+
def constr_f2(x):
|
796 |
+
return [x[0]**2 + x[1], x[0] - x[1]]
|
797 |
+
|
798 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
799 |
+
|
800 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
801 |
+
constraints=(nlc))
|
802 |
+
|
803 |
+
# are population feasibilities correct
|
804 |
+
# [0.5, 0.5] corresponds to scaled values of [1., 1.]
|
805 |
+
feas, cv = solver._calculate_population_feasibilities(
|
806 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
807 |
+
assert_equal(feas, [False, False])
|
808 |
+
assert_almost_equal(cv, np.array([[0.1], [2.1]]))
|
809 |
+
assert cv.shape == (2, 1)
|
810 |
+
|
811 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
812 |
+
|
813 |
+
for vectorize in [False, True]:
|
814 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
815 |
+
constraints=(nlc, nlc2),
|
816 |
+
vectorized=vectorize,
|
817 |
+
updating='deferred')
|
818 |
+
|
819 |
+
feas, cv = solver._calculate_population_feasibilities(
|
820 |
+
np.array([[0.5, 0.5], [0.6, 0.5]]))
|
821 |
+
assert_equal(feas, [False, False])
|
822 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
|
823 |
+
|
824 |
+
feas, cv = solver._calculate_population_feasibilities(
|
825 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
826 |
+
assert_equal(feas, [False, False])
|
827 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
|
828 |
+
assert cv.shape == (2, 3)
|
829 |
+
|
830 |
+
feas, cv = solver._calculate_population_feasibilities(
|
831 |
+
np.array([[0.25, 0.25], [1., 1.]]))
|
832 |
+
assert_equal(feas, [True, False])
|
833 |
+
assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
|
834 |
+
assert cv.shape == (2, 3)
|
835 |
+
|
836 |
+
def test_constraint_solve(self):
|
837 |
+
def constr_f(x):
|
838 |
+
return np.array([x[0] + x[1]])
|
839 |
+
|
840 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
841 |
+
|
842 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
843 |
+
constraints=(nlc))
|
844 |
+
|
845 |
+
# trust-constr warns if the constraint function is linear
|
846 |
+
with warns(UserWarning):
|
847 |
+
res = solver.solve()
|
848 |
+
|
849 |
+
assert constr_f(res.x) <= 1.9
|
850 |
+
assert res.success
|
851 |
+
|
852 |
+
def test_impossible_constraint(self):
|
853 |
+
def constr_f(x):
|
854 |
+
return np.array([x[0] + x[1]])
|
855 |
+
|
856 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, -1)
|
857 |
+
|
858 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
859 |
+
constraints=(nlc), popsize=3,
|
860 |
+
seed=1)
|
861 |
+
|
862 |
+
# a UserWarning is issued because the 'trust-constr' polishing is
|
863 |
+
# attempted on the least infeasible solution found.
|
864 |
+
with warns(UserWarning):
|
865 |
+
res = solver.solve()
|
866 |
+
|
867 |
+
assert res.maxcv > 0
|
868 |
+
assert not res.success
|
869 |
+
|
870 |
+
# test _promote_lowest_energy works when none of the population is
|
871 |
+
# feasible. In this case, the solution with the lowest constraint
|
872 |
+
# violation should be promoted.
|
873 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
874 |
+
constraints=(nlc), polish=False)
|
875 |
+
next(solver)
|
876 |
+
assert not solver.feasible.all()
|
877 |
+
assert not np.isfinite(solver.population_energies).all()
|
878 |
+
|
879 |
+
# now swap two of the entries in the population
|
880 |
+
l = 20
|
881 |
+
cv = solver.constraint_violation[0]
|
882 |
+
|
883 |
+
solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
|
884 |
+
solver.population[[0, l], :] = solver.population[[l, 0], :]
|
885 |
+
solver.constraint_violation[[0, l], :] = (
|
886 |
+
solver.constraint_violation[[l, 0], :])
|
887 |
+
|
888 |
+
solver._promote_lowest_energy()
|
889 |
+
assert_equal(solver.constraint_violation[0], cv)
|
890 |
+
|
891 |
+
def test_accept_trial(self):
|
892 |
+
# _accept_trial(self, energy_trial, feasible_trial, cv_trial,
|
893 |
+
# energy_orig, feasible_orig, cv_orig)
|
894 |
+
def constr_f(x):
|
895 |
+
return [x[0] + x[1]]
|
896 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
897 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
898 |
+
constraints=(nlc))
|
899 |
+
fn = solver._accept_trial
|
900 |
+
# both solutions are feasible, select lower energy
|
901 |
+
assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
|
902 |
+
assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False)
|
903 |
+
assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
|
904 |
+
|
905 |
+
# trial is feasible, original is not
|
906 |
+
assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
|
907 |
+
|
908 |
+
# trial and original are infeasible
|
909 |
+
# cv_trial have to be <= cv_original to be better
|
910 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
911 |
+
1.0, False, np.array([1., 1.0])))
|
912 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
913 |
+
1.0, False, np.array([1., 0.50])))
|
914 |
+
assert not (fn(1.0, False, np.array([0.5, 0.5]),
|
915 |
+
1.0, False, np.array([1.0, 0.4])))
|
916 |
+
|
917 |
+
def test_constraint_wrapper(self):
|
918 |
+
lb = np.array([0, 20, 30])
|
919 |
+
ub = np.array([0.5, np.inf, 70])
|
920 |
+
x0 = np.array([1, 2, 3])
|
921 |
+
pc = _ConstraintWrapper(Bounds(lb, ub), x0)
|
922 |
+
assert (pc.violation(x0) > 0).any()
|
923 |
+
assert (pc.violation([0.25, 21, 31]) == 0).all()
|
924 |
+
|
925 |
+
# check vectorized Bounds constraint
|
926 |
+
xs = np.arange(1, 16).reshape(5, 3)
|
927 |
+
violations = []
|
928 |
+
for x in xs:
|
929 |
+
violations.append(pc.violation(x))
|
930 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
931 |
+
|
932 |
+
x0 = np.array([1, 2, 3, 4])
|
933 |
+
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
|
934 |
+
pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
|
935 |
+
assert (pc.violation(x0) > 0).any()
|
936 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
937 |
+
|
938 |
+
# check vectorized LinearConstraint, for 7 lots of parameter vectors
|
939 |
+
# with each parameter vector being 4 long, with 3 constraints
|
940 |
+
# xs is the same shape as stored in the differential evolution
|
941 |
+
# population, but it's sent to the violation function as (len(x), M)
|
942 |
+
xs = np.arange(1, 29).reshape(7, 4)
|
943 |
+
violations = []
|
944 |
+
for x in xs:
|
945 |
+
violations.append(pc.violation(x))
|
946 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
947 |
+
|
948 |
+
pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
|
949 |
+
x0)
|
950 |
+
assert (pc.violation(x0) > 0).any()
|
951 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
952 |
+
|
953 |
+
def fun(x):
|
954 |
+
return A.dot(x)
|
955 |
+
|
956 |
+
nonlinear = NonlinearConstraint(fun, -np.inf, 0)
|
957 |
+
pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
|
958 |
+
assert (pc.violation(x0) > 0).any()
|
959 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
960 |
+
|
961 |
+
def test_constraint_wrapper_violation(self):
|
962 |
+
def cons_f(x):
|
963 |
+
# written in vectorised form to accept an array of (N, S)
|
964 |
+
# returning (M, S)
|
965 |
+
# where N is the number of parameters,
|
966 |
+
# S is the number of solution vectors to be examined,
|
967 |
+
# and M is the number of constraint components
|
968 |
+
return np.array([x[0] ** 2 + x[1],
|
969 |
+
x[0] ** 2 - x[1]])
|
970 |
+
|
971 |
+
nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
|
972 |
+
pc = _ConstraintWrapper(nlc, [0.5, 1])
|
973 |
+
assert np.size(pc.bounds[0]) == 2
|
974 |
+
|
975 |
+
xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
|
976 |
+
vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
|
977 |
+
|
978 |
+
for x, v in zip(xs, vs):
|
979 |
+
assert_allclose(pc.violation(x), v)
|
980 |
+
|
981 |
+
# now check that we can vectorize the constraint wrapper
|
982 |
+
assert_allclose(pc.violation(np.array(xs).T),
|
983 |
+
np.array(vs).T)
|
984 |
+
assert pc.fun(np.array(xs).T).shape == (2, len(xs))
|
985 |
+
assert pc.violation(np.array(xs).T).shape == (2, len(xs))
|
986 |
+
assert pc.num_constr == 2
|
987 |
+
assert pc.parameter_count == 2
|
988 |
+
|
989 |
+
def test_matrix_linear_constraint(self):
|
990 |
+
# gh20041 supplying an np.matrix to construct a LinearConstraint caused
|
991 |
+
# _ConstraintWrapper to start returning constraint violations of the
|
992 |
+
# wrong shape.
|
993 |
+
with suppress_warnings() as sup:
|
994 |
+
sup.filter(PendingDeprecationWarning)
|
995 |
+
matrix = np.matrix([[1, 1, 1, 1.],
|
996 |
+
[2, 2, 2, 2.]])
|
997 |
+
lc = LinearConstraint(matrix, 0, 1)
|
998 |
+
x0 = np.ones(4)
|
999 |
+
cw = _ConstraintWrapper(lc, x0)
|
1000 |
+
# the shape of the constraint violation should be the same as the number
|
1001 |
+
# of constraints applied.
|
1002 |
+
assert cw.violation(x0).shape == (2,)
|
1003 |
+
|
1004 |
+
# let's try a vectorised violation call.
|
1005 |
+
xtrial = np.arange(4 * 5).reshape(4, 5)
|
1006 |
+
assert cw.violation(xtrial).shape == (2, 5)
|
1007 |
+
|
1008 |
+
|
1009 |
+
def test_L1(self):
|
1010 |
+
# Lampinen ([5]) test problem 1
|
1011 |
+
|
1012 |
+
def f(x):
|
1013 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1014 |
+
fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
|
1015 |
+
return fun
|
1016 |
+
|
1017 |
+
A = np.zeros((10, 14)) # 1-indexed to match reference
|
1018 |
+
A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
|
1019 |
+
A[2, [1, 10]] = -8, 1
|
1020 |
+
A[3, [4, 5, 10]] = -2, -1, 1
|
1021 |
+
A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
|
1022 |
+
A[5, [2, 11]] = -8, 1
|
1023 |
+
A[6, [6, 7, 11]] = -2, -1, 1
|
1024 |
+
A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
|
1025 |
+
A[8, [3, 12]] = -8, 1
|
1026 |
+
A[9, [8, 9, 12]] = -2, -1, 1
|
1027 |
+
A = A[1:, 1:]
|
1028 |
+
|
1029 |
+
b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
|
1030 |
+
|
1031 |
+
L = LinearConstraint(A, -np.inf, b)
|
1032 |
+
|
1033 |
+
bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
|
1034 |
+
|
1035 |
+
# using a lower popsize to speed the test up
|
1036 |
+
res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
|
1037 |
+
constraints=(L), popsize=2)
|
1038 |
+
|
1039 |
+
x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
|
1040 |
+
f_opt = -15
|
1041 |
+
|
1042 |
+
assert_allclose(f(x_opt), f_opt, atol=6e-4)
|
1043 |
+
assert res.success
|
1044 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
1045 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1046 |
+
assert_(np.all([email protected] <= b))
|
1047 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1048 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1049 |
+
|
1050 |
+
# now repeat the same solve, using the same overall constraints,
|
1051 |
+
# but using a sparse matrix for the LinearConstraint instead of an
|
1052 |
+
# array
|
1053 |
+
|
1054 |
+
L = LinearConstraint(csr_matrix(A), -np.inf, b)
|
1055 |
+
|
1056 |
+
# using a lower popsize to speed the test up
|
1057 |
+
res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
|
1058 |
+
constraints=(L), popsize=2)
|
1059 |
+
|
1060 |
+
assert_allclose(f(x_opt), f_opt)
|
1061 |
+
assert res.success
|
1062 |
+
assert_allclose(res.x, x_opt, atol=5e-4)
|
1063 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1064 |
+
assert_(np.all([email protected] <= b))
|
1065 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1066 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1067 |
+
|
1068 |
+
# now repeat the same solve, using the same overall constraints,
|
1069 |
+
# but specify half the constraints in terms of LinearConstraint,
|
1070 |
+
# and the other half by NonlinearConstraint
|
1071 |
+
def c1(x):
|
1072 |
+
x = np.hstack(([0], x))
|
1073 |
+
return [2*x[2] + 2*x[3] + x[11] + x[12],
|
1074 |
+
-8*x[3] + x[12]]
|
1075 |
+
|
1076 |
+
def c2(x):
|
1077 |
+
x = np.hstack(([0], x))
|
1078 |
+
return -2*x[8] - x[9] + x[12]
|
1079 |
+
|
1080 |
+
L = LinearConstraint(A[:5, :], -np.inf, b[:5])
|
1081 |
+
L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
|
1082 |
+
N = NonlinearConstraint(c1, -np.inf, b[6:8])
|
1083 |
+
N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
|
1084 |
+
constraints = (L, N, L2, N2)
|
1085 |
+
|
1086 |
+
with suppress_warnings() as sup:
|
1087 |
+
sup.filter(UserWarning)
|
1088 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1089 |
+
seed=1234, constraints=constraints,
|
1090 |
+
popsize=2)
|
1091 |
+
|
1092 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
1093 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1094 |
+
assert_(np.all([email protected] <= b))
|
1095 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1096 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1097 |
+
|
1098 |
+
def test_L2(self):
|
1099 |
+
# Lampinen ([5]) test problem 2
|
1100 |
+
|
1101 |
+
def f(x):
|
1102 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1103 |
+
fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
|
1104 |
+
10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
|
1105 |
+
8*x[7])
|
1106 |
+
return fun
|
1107 |
+
|
1108 |
+
def c1(x):
|
1109 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1110 |
+
return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
|
1111 |
+
196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
|
1112 |
+
282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
|
1113 |
+
-4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
|
1114 |
+
5*x[6] + 11*x[7]]
|
1115 |
+
|
1116 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1117 |
+
bounds = [(-10, 10)]*7
|
1118 |
+
constraints = (N)
|
1119 |
+
|
1120 |
+
with suppress_warnings() as sup:
|
1121 |
+
sup.filter(UserWarning)
|
1122 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1123 |
+
seed=1234, constraints=constraints)
|
1124 |
+
|
1125 |
+
f_opt = 680.6300599487869
|
1126 |
+
x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
|
1127 |
+
-0.6244870, 1.038131, 1.594227)
|
1128 |
+
|
1129 |
+
assert_allclose(f(x_opt), f_opt)
|
1130 |
+
assert_allclose(res.fun, f_opt)
|
1131 |
+
assert_allclose(res.x, x_opt, atol=1e-5)
|
1132 |
+
assert res.success
|
1133 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1134 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1135 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1136 |
+
|
1137 |
+
def test_L3(self):
|
1138 |
+
# Lampinen ([5]) test problem 3
|
1139 |
+
|
1140 |
+
def f(x):
|
1141 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1142 |
+
fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
|
1143 |
+
(x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
|
1144 |
+
5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
|
1145 |
+
(x[10] - 7)**2 + 45
|
1146 |
+
)
|
1147 |
+
return fun # maximize
|
1148 |
+
|
1149 |
+
A = np.zeros((4, 11))
|
1150 |
+
A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
|
1151 |
+
A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
|
1152 |
+
A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
|
1153 |
+
A = A[1:, 1:]
|
1154 |
+
b = np.array([-105, 0, -12])
|
1155 |
+
|
1156 |
+
def c1(x):
|
1157 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1158 |
+
return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
|
1159 |
+
-3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
|
1160 |
+
-x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
|
1161 |
+
-5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
|
1162 |
+
-0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
|
1163 |
+
|
1164 |
+
L = LinearConstraint(A, b, np.inf)
|
1165 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1166 |
+
bounds = [(-10, 10)]*10
|
1167 |
+
constraints = (L, N)
|
1168 |
+
|
1169 |
+
with suppress_warnings() as sup:
|
1170 |
+
sup.filter(UserWarning)
|
1171 |
+
res = differential_evolution(f, bounds, seed=1234,
|
1172 |
+
constraints=constraints, popsize=3)
|
1173 |
+
|
1174 |
+
x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
|
1175 |
+
1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
|
1176 |
+
f_opt = 24.3062091
|
1177 |
+
|
1178 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-5)
|
1179 |
+
assert_allclose(res.x, x_opt, atol=1e-6)
|
1180 |
+
assert_allclose(res.fun, f_opt, atol=1e-5)
|
1181 |
+
assert res.success
|
1182 |
+
assert_(np.all(A @ res.x >= b))
|
1183 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1184 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1185 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1186 |
+
|
1187 |
+
def test_L4(self):
|
1188 |
+
# Lampinen ([5]) test problem 4
|
1189 |
+
def f(x):
|
1190 |
+
return np.sum(x[:3])
|
1191 |
+
|
1192 |
+
A = np.zeros((4, 9))
|
1193 |
+
A[1, [4, 6]] = 0.0025, 0.0025
|
1194 |
+
A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
|
1195 |
+
A[3, [8, 5]] = 0.01, -0.01
|
1196 |
+
A = A[1:, 1:]
|
1197 |
+
b = np.array([1, 1, 1])
|
1198 |
+
|
1199 |
+
def c1(x):
|
1200 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1201 |
+
return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
|
1202 |
+
x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
|
1203 |
+
x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
|
1204 |
+
|
1205 |
+
L = LinearConstraint(A, -np.inf, 1)
|
1206 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1207 |
+
|
1208 |
+
bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
|
1209 |
+
constraints = (L, N)
|
1210 |
+
|
1211 |
+
with suppress_warnings() as sup:
|
1212 |
+
sup.filter(UserWarning)
|
1213 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1214 |
+
seed=1234, constraints=constraints,
|
1215 |
+
popsize=3)
|
1216 |
+
|
1217 |
+
f_opt = 7049.248
|
1218 |
+
|
1219 |
+
x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
|
1220 |
+
217.9823, 286.416528, 395.601172]
|
1221 |
+
|
1222 |
+
assert_allclose(f(x_opt), f_opt, atol=0.001)
|
1223 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
1224 |
+
|
1225 |
+
# use higher tol here for 32-bit Windows, see gh-11693
|
1226 |
+
if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
|
1227 |
+
assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
|
1228 |
+
else:
|
1229 |
+
# tolerance determined from macOS + MKL failure, see gh-12701
|
1230 |
+
assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
|
1231 |
+
|
1232 |
+
assert res.success
|
1233 |
+
assert_(np.all(A @ res.x <= b))
|
1234 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1235 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1236 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1237 |
+
|
1238 |
+
def test_L5(self):
|
1239 |
+
# Lampinen ([5]) test problem 5
|
1240 |
+
|
1241 |
+
def f(x):
|
1242 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1243 |
+
fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
|
1244 |
+
(x[1]**3*(x[1]+x[2])))
|
1245 |
+
return -fun # maximize
|
1246 |
+
|
1247 |
+
def c1(x):
|
1248 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1249 |
+
return [x[1]**2 - x[2] + 1,
|
1250 |
+
1 - x[1] + (x[2]-4)**2]
|
1251 |
+
|
1252 |
+
N = NonlinearConstraint(c1, -np.inf, 0)
|
1253 |
+
bounds = [(0, 10)]*2
|
1254 |
+
constraints = (N)
|
1255 |
+
|
1256 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1257 |
+
constraints=constraints)
|
1258 |
+
|
1259 |
+
x_opt = (1.22797135, 4.24537337)
|
1260 |
+
f_opt = -0.095825
|
1261 |
+
assert_allclose(f(x_opt), f_opt, atol=2e-5)
|
1262 |
+
assert_allclose(res.fun, f_opt, atol=1e-4)
|
1263 |
+
assert res.success
|
1264 |
+
assert_(np.all(np.array(c1(res.x)) <= 0))
|
1265 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1266 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1267 |
+
|
1268 |
+
def test_L6(self):
|
1269 |
+
# Lampinen ([5]) test problem 6
|
1270 |
+
def f(x):
|
1271 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1272 |
+
fun = (x[1]-10)**3 + (x[2] - 20)**3
|
1273 |
+
return fun
|
1274 |
+
|
1275 |
+
def c1(x):
|
1276 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1277 |
+
return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
|
1278 |
+
-(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
|
1279 |
+
|
1280 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1281 |
+
bounds = [(13, 100), (0, 100)]
|
1282 |
+
constraints = (N)
|
1283 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1284 |
+
constraints=constraints, tol=1e-7)
|
1285 |
+
x_opt = (14.095, 0.84296)
|
1286 |
+
f_opt = -6961.814744
|
1287 |
+
|
1288 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-6)
|
1289 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
1290 |
+
assert_allclose(res.x, x_opt, atol=1e-4)
|
1291 |
+
assert res.success
|
1292 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1293 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1294 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1295 |
+
|
1296 |
+
def test_L7(self):
|
1297 |
+
# Lampinen ([5]) test problem 7
|
1298 |
+
def f(x):
|
1299 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1300 |
+
fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
|
1301 |
+
37.293239*x[1] - 40792.141)
|
1302 |
+
return fun
|
1303 |
+
|
1304 |
+
def c1(x):
|
1305 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1306 |
+
return [
|
1307 |
+
85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
|
1308 |
+
0.0022053*x[3]*x[5],
|
1309 |
+
|
1310 |
+
80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
|
1311 |
+
0.0021813*x[3]**2,
|
1312 |
+
|
1313 |
+
9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
|
1314 |
+
0.0019085*x[3]*x[4]
|
1315 |
+
]
|
1316 |
+
|
1317 |
+
N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
|
1318 |
+
|
1319 |
+
bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
|
1320 |
+
constraints = (N)
|
1321 |
+
|
1322 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1323 |
+
constraints=constraints)
|
1324 |
+
|
1325 |
+
# using our best solution, rather than Lampinen/Koziel. Koziel solution
|
1326 |
+
# doesn't satisfy constraints, Lampinen f_opt just plain wrong.
|
1327 |
+
x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
|
1328 |
+
36.77579979]
|
1329 |
+
|
1330 |
+
f_opt = -30665.537578
|
1331 |
+
|
1332 |
+
assert_allclose(f(x_opt), f_opt)
|
1333 |
+
assert_allclose(res.x, x_opt, atol=1e-3)
|
1334 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
1335 |
+
|
1336 |
+
assert res.success
|
1337 |
+
assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
|
1338 |
+
assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
|
1339 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1340 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1341 |
+
|
1342 |
+
@pytest.mark.slow
|
1343 |
+
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
1344 |
+
reason="fails on ppc64le")
|
1345 |
+
def test_L8(self):
|
1346 |
+
def f(x):
|
1347 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1348 |
+
fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
|
1349 |
+
return fun
|
1350 |
+
|
1351 |
+
A = np.zeros((3, 5))
|
1352 |
+
A[1, [4, 3]] = 1, -1
|
1353 |
+
A[2, [3, 4]] = 1, -1
|
1354 |
+
A = A[1:, 1:]
|
1355 |
+
b = np.array([-.55, -.55])
|
1356 |
+
|
1357 |
+
def c1(x):
|
1358 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1359 |
+
return [
|
1360 |
+
1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
|
1361 |
+
894.8 - x[1],
|
1362 |
+
1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
|
1363 |
+
894.8 - x[2],
|
1364 |
+
1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
|
1365 |
+
1294.8
|
1366 |
+
]
|
1367 |
+
L = LinearConstraint(A, b, np.inf)
|
1368 |
+
N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
|
1369 |
+
|
1370 |
+
bounds = [(0, 1200)]*2+[(-.55, .55)]*2
|
1371 |
+
constraints = (L, N)
|
1372 |
+
|
1373 |
+
with suppress_warnings() as sup:
|
1374 |
+
sup.filter(UserWarning)
|
1375 |
+
# original Lampinen test was with rand1bin, but that takes a
|
1376 |
+
# huge amount of CPU time. Changing strategy to best1bin speeds
|
1377 |
+
# things up a lot
|
1378 |
+
res = differential_evolution(f, bounds, strategy='best1bin',
|
1379 |
+
seed=1234, constraints=constraints,
|
1380 |
+
maxiter=5000)
|
1381 |
+
|
1382 |
+
x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
|
1383 |
+
f_opt = 5126.4981
|
1384 |
+
|
1385 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-3)
|
1386 |
+
assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
|
1387 |
+
assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
|
1388 |
+
assert_allclose(res.fun, f_opt, atol=2e-2)
|
1389 |
+
assert res.success
|
1390 |
+
assert_(np.all([email protected] >= b))
|
1391 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
1392 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
1393 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1394 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1395 |
+
|
1396 |
+
def test_L9(self):
|
1397 |
+
# Lampinen ([5]) test problem 9
|
1398 |
+
|
1399 |
+
def f(x):
|
1400 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1401 |
+
return x[1]**2 + (x[2]-1)**2
|
1402 |
+
|
1403 |
+
def c1(x):
|
1404 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1405 |
+
return [x[2] - x[1]**2]
|
1406 |
+
|
1407 |
+
N = NonlinearConstraint(c1, [-.001], [0.001])
|
1408 |
+
|
1409 |
+
bounds = [(-1, 1)]*2
|
1410 |
+
constraints = (N)
|
1411 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1412 |
+
constraints=constraints)
|
1413 |
+
|
1414 |
+
x_opt = [np.sqrt(2)/2, 0.5]
|
1415 |
+
f_opt = 0.75
|
1416 |
+
|
1417 |
+
assert_allclose(f(x_opt), f_opt)
|
1418 |
+
assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
|
1419 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
1420 |
+
assert res.success
|
1421 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
1422 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
1423 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1424 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1425 |
+
|
1426 |
+
def test_integrality(self):
|
1427 |
+
# test fitting discrete distribution to data
|
1428 |
+
rng = np.random.default_rng(6519843218105)
|
1429 |
+
dist = stats.nbinom
|
1430 |
+
shapes = (5, 0.5)
|
1431 |
+
x = dist.rvs(*shapes, size=10000, random_state=rng)
|
1432 |
+
|
1433 |
+
def func(p, *args):
|
1434 |
+
dist, x = args
|
1435 |
+
# negative log-likelihood function
|
1436 |
+
ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
|
1437 |
+
if np.isnan(ll): # occurs when x is outside of support
|
1438 |
+
ll = np.inf # we don't want that
|
1439 |
+
return ll
|
1440 |
+
|
1441 |
+
integrality = [True, False]
|
1442 |
+
bounds = [(1, 18), (0, 0.95)]
|
1443 |
+
|
1444 |
+
res = differential_evolution(func, bounds, args=(dist, x),
|
1445 |
+
integrality=integrality, polish=False,
|
1446 |
+
seed=rng)
|
1447 |
+
# tolerance has to be fairly relaxed for the second parameter
|
1448 |
+
# because we're fitting a distribution to random variates.
|
1449 |
+
assert res.x[0] == 5
|
1450 |
+
assert_allclose(res.x, shapes, rtol=0.025)
|
1451 |
+
|
1452 |
+
# check that we can still use integrality constraints with polishing
|
1453 |
+
res2 = differential_evolution(func, bounds, args=(dist, x),
|
1454 |
+
integrality=integrality, polish=True,
|
1455 |
+
seed=rng)
|
1456 |
+
|
1457 |
+
def func2(p, *args):
|
1458 |
+
n, dist, x = args
|
1459 |
+
return func(np.array([n, p[0]]), dist, x)
|
1460 |
+
|
1461 |
+
# compare the DE derived solution to an LBFGSB solution (that doesn't
|
1462 |
+
# have to find the integral values). Note we're setting x0 to be the
|
1463 |
+
# output from the first DE result, thereby making the polishing step
|
1464 |
+
# and this minimisation pretty much equivalent.
|
1465 |
+
LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
|
1466 |
+
bounds=[(0, 0.95)])
|
1467 |
+
assert_allclose(res2.x[1], LBFGSB.x)
|
1468 |
+
assert res2.fun <= res.fun
|
1469 |
+
|
1470 |
+
def test_integrality_limits(self):
|
1471 |
+
def f(x):
|
1472 |
+
return x
|
1473 |
+
|
1474 |
+
integrality = [True, False, True]
|
1475 |
+
bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
|
1476 |
+
|
1477 |
+
# no integrality constraints
|
1478 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1479 |
+
integrality=False)
|
1480 |
+
assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
|
1481 |
+
assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
|
1482 |
+
|
1483 |
+
# with integrality constraints
|
1484 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1485 |
+
integrality=integrality)
|
1486 |
+
assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
|
1487 |
+
assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
|
1488 |
+
assert_equal(solver.integrality, [True, False, True])
|
1489 |
+
assert solver.polish is False
|
1490 |
+
|
1491 |
+
bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
|
1492 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1493 |
+
integrality=integrality)
|
1494 |
+
assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
|
1495 |
+
assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
|
1496 |
+
|
1497 |
+
# A lower bound of -1.2 is converted to
|
1498 |
+
# np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
|
1499 |
+
# with a similar process to the upper bound. Check that the
|
1500 |
+
# conversions work
|
1501 |
+
assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
|
1502 |
+
assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
|
1503 |
+
|
1504 |
+
bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
|
1505 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1506 |
+
integrality=integrality)
|
1507 |
+
assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
|
1508 |
+
assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
|
1509 |
+
|
1510 |
+
bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
|
1511 |
+
with pytest.raises(ValueError, match='One of the integrality'):
|
1512 |
+
DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1513 |
+
integrality=integrality)
|
1514 |
+
|
1515 |
+
def test_vectorized(self):
|
1516 |
+
def quadratic(x):
|
1517 |
+
return np.sum(x**2)
|
1518 |
+
|
1519 |
+
def quadratic_vec(x):
|
1520 |
+
return np.sum(x**2, axis=0)
|
1521 |
+
|
1522 |
+
# A vectorized function needs to accept (len(x), S) and return (S,)
|
1523 |
+
with pytest.raises(RuntimeError, match='The vectorized function'):
|
1524 |
+
differential_evolution(quadratic, self.bounds,
|
1525 |
+
vectorized=True, updating='deferred')
|
1526 |
+
|
1527 |
+
# vectorized overrides the updating keyword, check for warning
|
1528 |
+
with warns(UserWarning, match="differential_evolution: the 'vector"):
|
1529 |
+
differential_evolution(quadratic_vec, self.bounds,
|
1530 |
+
vectorized=True)
|
1531 |
+
|
1532 |
+
# vectorized defers to the workers keyword, check for warning
|
1533 |
+
with warns(UserWarning, match="differential_evolution: the 'workers"):
|
1534 |
+
differential_evolution(quadratic_vec, self.bounds,
|
1535 |
+
vectorized=True, workers=map,
|
1536 |
+
updating='deferred')
|
1537 |
+
|
1538 |
+
ncalls = [0]
|
1539 |
+
|
1540 |
+
def rosen_vec(x):
|
1541 |
+
ncalls[0] += 1
|
1542 |
+
return rosen(x)
|
1543 |
+
|
1544 |
+
bounds = [(0, 10), (0, 10)]
|
1545 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
1546 |
+
seed=1)
|
1547 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
1548 |
+
updating='deferred', seed=1)
|
1549 |
+
|
1550 |
+
# the two minimisation runs should be functionally equivalent
|
1551 |
+
assert_allclose(res1.x, res2.x)
|
1552 |
+
assert ncalls[0] == res2.nfev
|
1553 |
+
assert res1.nit == res2.nit
|
1554 |
+
|
1555 |
+
def test_vectorized_constraints(self):
|
1556 |
+
def constr_f(x):
|
1557 |
+
return np.array([x[0] + x[1]])
|
1558 |
+
|
1559 |
+
def constr_f2(x):
|
1560 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
1561 |
+
|
1562 |
+
nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
1563 |
+
nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
|
1564 |
+
|
1565 |
+
def rosen_vec(x):
|
1566 |
+
# accept an (len(x0), S) array, returning a (S,) array
|
1567 |
+
v = 100 * (x[1:] - x[:-1]**2.0)**2.0
|
1568 |
+
v += (1 - x[:-1])**2.0
|
1569 |
+
return np.squeeze(v)
|
1570 |
+
|
1571 |
+
bounds = [(0, 10), (0, 10)]
|
1572 |
+
|
1573 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
1574 |
+
seed=1, constraints=[nlc1, nlc2],
|
1575 |
+
polish=False)
|
1576 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
1577 |
+
updating='deferred', seed=1,
|
1578 |
+
constraints=[nlc1, nlc2],
|
1579 |
+
polish=False)
|
1580 |
+
# the two minimisation runs should be functionally equivalent
|
1581 |
+
assert_allclose(res1.x, res2.x)
|
1582 |
+
|
1583 |
+
def test_constraint_violation_error_message(self):
|
1584 |
+
|
1585 |
+
def func(x):
|
1586 |
+
return np.cos(x[0]) + np.sin(x[1])
|
1587 |
+
|
1588 |
+
# Intentionally infeasible constraints.
|
1589 |
+
c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
|
1590 |
+
c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
|
1591 |
+
|
1592 |
+
result = differential_evolution(func,
|
1593 |
+
bounds=[(-1, 2), (-1, 1)],
|
1594 |
+
constraints=[c0, c1],
|
1595 |
+
maxiter=10,
|
1596 |
+
polish=False,
|
1597 |
+
seed=864197532)
|
1598 |
+
assert result.success is False
|
1599 |
+
# The numerical value in the error message might be sensitive to
|
1600 |
+
# changes in the implementation. It can be updated if the code is
|
1601 |
+
# changed. The essential part of the test is that there is a number
|
1602 |
+
# after the '=', so if necessary, the text could be reduced to, say,
|
1603 |
+
# "MAXCV = 0.".
|
1604 |
+
assert "MAXCV = 0.414" in result.message
|
1605 |
+
|
1606 |
+
def test_strategy_fn(self):
|
1607 |
+
# examines ability to customize strategy by mimicking one of the
|
1608 |
+
# in-built strategies and comparing to the actual in-built strategy.
|
1609 |
+
parameter_count = 4
|
1610 |
+
popsize = 10
|
1611 |
+
bounds = [(0, 10.)] * parameter_count
|
1612 |
+
total_popsize = parameter_count * popsize
|
1613 |
+
mutation = 0.8
|
1614 |
+
recombination = 0.7
|
1615 |
+
|
1616 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
1617 |
+
trial = np.copy(population[candidate])
|
1618 |
+
fill_point = rng.choice(parameter_count)
|
1619 |
+
|
1620 |
+
pool = np.arange(total_popsize)
|
1621 |
+
rng.shuffle(pool)
|
1622 |
+
|
1623 |
+
idxs = []
|
1624 |
+
while len(idxs) < 2 and len(pool) > 0:
|
1625 |
+
idx = pool[0]
|
1626 |
+
pool = pool[1:]
|
1627 |
+
if idx != candidate:
|
1628 |
+
idxs.append(idx)
|
1629 |
+
|
1630 |
+
r0, r1 = idxs[:2]
|
1631 |
+
|
1632 |
+
bprime = (population[0] + mutation *
|
1633 |
+
(population[r0] - population[r1]))
|
1634 |
+
|
1635 |
+
crossovers = rng.uniform(size=parameter_count)
|
1636 |
+
crossovers = crossovers < recombination
|
1637 |
+
crossovers[fill_point] = True
|
1638 |
+
trial = np.where(crossovers, bprime, trial)
|
1639 |
+
return trial
|
1640 |
+
|
1641 |
+
solver = DifferentialEvolutionSolver(
|
1642 |
+
rosen,
|
1643 |
+
bounds,
|
1644 |
+
popsize=popsize,
|
1645 |
+
recombination=recombination,
|
1646 |
+
mutation=mutation,
|
1647 |
+
maxiter=2,
|
1648 |
+
strategy=custom_strategy_fn,
|
1649 |
+
seed=10,
|
1650 |
+
polish=False
|
1651 |
+
)
|
1652 |
+
assert solver.strategy is custom_strategy_fn
|
1653 |
+
res = solver.solve()
|
1654 |
+
|
1655 |
+
res2 = differential_evolution(
|
1656 |
+
rosen,
|
1657 |
+
bounds,
|
1658 |
+
mutation=mutation,
|
1659 |
+
popsize=popsize,
|
1660 |
+
recombination=recombination,
|
1661 |
+
maxiter=2,
|
1662 |
+
strategy='best1bin',
|
1663 |
+
polish=False,
|
1664 |
+
seed=10
|
1665 |
+
)
|
1666 |
+
assert_allclose(res.population, res2.population)
|
1667 |
+
assert_allclose(res.x, res2.x)
|
1668 |
+
|
1669 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
1670 |
+
return np.array([1.0, 2.0])
|
1671 |
+
|
1672 |
+
with pytest.raises(RuntimeError, match="strategy*"):
|
1673 |
+
differential_evolution(
|
1674 |
+
rosen,
|
1675 |
+
bounds,
|
1676 |
+
strategy=custom_strategy_fn
|
1677 |
+
)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Dual annealing unit tests implementation.
|
2 |
+
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
|
3 |
+
# Yang Xiang <[email protected]>
|
4 |
+
# Author: Sylvain Gubian, PMP S.A.
|
5 |
+
"""
|
6 |
+
Unit tests for the dual annealing global optimizer
|
7 |
+
"""
|
8 |
+
from scipy.optimize import dual_annealing, Bounds
|
9 |
+
|
10 |
+
from scipy.optimize._dual_annealing import EnergyState
|
11 |
+
from scipy.optimize._dual_annealing import LocalSearchWrapper
|
12 |
+
from scipy.optimize._dual_annealing import ObjectiveFunWrapper
|
13 |
+
from scipy.optimize._dual_annealing import StrategyChain
|
14 |
+
from scipy.optimize._dual_annealing import VisitingDistribution
|
15 |
+
from scipy.optimize import rosen, rosen_der
|
16 |
+
import pytest
|
17 |
+
import numpy as np
|
18 |
+
from numpy.testing import assert_equal, assert_allclose, assert_array_less
|
19 |
+
from pytest import raises as assert_raises
|
20 |
+
from scipy._lib._util import check_random_state
|
21 |
+
|
22 |
+
|
23 |
+
class TestDualAnnealing:
|
24 |
+
|
25 |
+
def setup_method(self):
|
26 |
+
# A function that returns always infinity for initialization tests
|
27 |
+
self.weirdfunc = lambda x: np.inf
|
28 |
+
# 2-D bounds for testing function
|
29 |
+
self.ld_bounds = [(-5.12, 5.12)] * 2
|
30 |
+
# 4-D bounds for testing function
|
31 |
+
self.hd_bounds = self.ld_bounds * 4
|
32 |
+
# Number of values to be generated for testing visit function
|
33 |
+
self.nbtestvalues = 5000
|
34 |
+
self.high_temperature = 5230
|
35 |
+
self.low_temperature = 0.1
|
36 |
+
self.qv = 2.62
|
37 |
+
self.seed = 1234
|
38 |
+
self.rs = check_random_state(self.seed)
|
39 |
+
self.nb_fun_call = 0
|
40 |
+
self.ngev = 0
|
41 |
+
|
42 |
+
def callback(self, x, f, context):
|
43 |
+
# For testing callback mechanism. Should stop for e <= 1 as
|
44 |
+
# the callback function returns True
|
45 |
+
if f <= 1.0:
|
46 |
+
return True
|
47 |
+
|
48 |
+
def func(self, x, args=()):
|
49 |
+
# Using Rastrigin function for performing tests
|
50 |
+
if args:
|
51 |
+
shift = args
|
52 |
+
else:
|
53 |
+
shift = 0
|
54 |
+
y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
|
55 |
+
x - shift))) + 10 * np.size(x) + shift
|
56 |
+
self.nb_fun_call += 1
|
57 |
+
return y
|
58 |
+
|
59 |
+
def rosen_der_wrapper(self, x, args=()):
|
60 |
+
self.ngev += 1
|
61 |
+
return rosen_der(x, *args)
|
62 |
+
|
63 |
+
# FIXME: there are some discontinuities in behaviour as a function of `qv`,
|
64 |
+
# this needs investigating - see gh-12384
|
65 |
+
@pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9])
|
66 |
+
def test_visiting_stepping(self, qv):
|
67 |
+
lu = list(zip(*self.ld_bounds))
|
68 |
+
lower = np.array(lu[0])
|
69 |
+
upper = np.array(lu[1])
|
70 |
+
dim = lower.size
|
71 |
+
vd = VisitingDistribution(lower, upper, qv, self.rs)
|
72 |
+
values = np.zeros(dim)
|
73 |
+
x_step_low = vd.visiting(values, 0, self.high_temperature)
|
74 |
+
# Make sure that only the first component is changed
|
75 |
+
assert_equal(np.not_equal(x_step_low, 0), True)
|
76 |
+
values = np.zeros(dim)
|
77 |
+
x_step_high = vd.visiting(values, dim, self.high_temperature)
|
78 |
+
# Make sure that component other than at dim has changed
|
79 |
+
assert_equal(np.not_equal(x_step_high[0], 0), True)
|
80 |
+
|
81 |
+
@pytest.mark.parametrize('qv', [2.25, 2.62, 2.9])
|
82 |
+
def test_visiting_dist_high_temperature(self, qv):
|
83 |
+
lu = list(zip(*self.ld_bounds))
|
84 |
+
lower = np.array(lu[0])
|
85 |
+
upper = np.array(lu[1])
|
86 |
+
vd = VisitingDistribution(lower, upper, qv, self.rs)
|
87 |
+
# values = np.zeros(self.nbtestvalues)
|
88 |
+
# for i in np.arange(self.nbtestvalues):
|
89 |
+
# values[i] = vd.visit_fn(self.high_temperature)
|
90 |
+
values = vd.visit_fn(self.high_temperature, self.nbtestvalues)
|
91 |
+
|
92 |
+
# Visiting distribution is a distorted version of Cauchy-Lorentz
|
93 |
+
# distribution, and as no 1st and higher moments (no mean defined,
|
94 |
+
# no variance defined).
|
95 |
+
# Check that big tails values are generated
|
96 |
+
assert_array_less(np.min(values), 1e-10)
|
97 |
+
assert_array_less(1e+10, np.max(values))
|
98 |
+
|
99 |
+
def test_reset(self):
|
100 |
+
owf = ObjectiveFunWrapper(self.weirdfunc)
|
101 |
+
lu = list(zip(*self.ld_bounds))
|
102 |
+
lower = np.array(lu[0])
|
103 |
+
upper = np.array(lu[1])
|
104 |
+
es = EnergyState(lower, upper)
|
105 |
+
assert_raises(ValueError, es.reset, owf, check_random_state(None))
|
106 |
+
|
107 |
+
def test_low_dim(self):
|
108 |
+
ret = dual_annealing(
|
109 |
+
self.func, self.ld_bounds, seed=self.seed)
|
110 |
+
assert_allclose(ret.fun, 0., atol=1e-12)
|
111 |
+
assert ret.success
|
112 |
+
|
113 |
+
def test_high_dim(self):
|
114 |
+
ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
|
115 |
+
assert_allclose(ret.fun, 0., atol=1e-12)
|
116 |
+
assert ret.success
|
117 |
+
|
118 |
+
def test_low_dim_no_ls(self):
|
119 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
120 |
+
no_local_search=True, seed=self.seed)
|
121 |
+
assert_allclose(ret.fun, 0., atol=1e-4)
|
122 |
+
|
123 |
+
def test_high_dim_no_ls(self):
|
124 |
+
ret = dual_annealing(self.func, self.hd_bounds,
|
125 |
+
no_local_search=True, seed=self.seed)
|
126 |
+
assert_allclose(ret.fun, 0., atol=1e-4)
|
127 |
+
|
128 |
+
def test_nb_fun_call(self):
|
129 |
+
ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
|
130 |
+
assert_equal(self.nb_fun_call, ret.nfev)
|
131 |
+
|
132 |
+
def test_nb_fun_call_no_ls(self):
|
133 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
134 |
+
no_local_search=True, seed=self.seed)
|
135 |
+
assert_equal(self.nb_fun_call, ret.nfev)
|
136 |
+
|
137 |
+
def test_max_reinit(self):
|
138 |
+
assert_raises(ValueError, dual_annealing, self.weirdfunc,
|
139 |
+
self.ld_bounds)
|
140 |
+
|
141 |
+
def test_reproduce(self):
|
142 |
+
res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
|
143 |
+
res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
|
144 |
+
res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
|
145 |
+
# If we have reproducible results, x components found has to
|
146 |
+
# be exactly the same, which is not the case with no seeding
|
147 |
+
assert_equal(res1.x, res2.x)
|
148 |
+
assert_equal(res1.x, res3.x)
|
149 |
+
|
150 |
+
def test_rand_gen(self):
|
151 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
152 |
+
# obtain a np.random.Generator object
|
153 |
+
rng = np.random.default_rng(1)
|
154 |
+
|
155 |
+
res1 = dual_annealing(self.func, self.ld_bounds, seed=rng)
|
156 |
+
# seed again
|
157 |
+
rng = np.random.default_rng(1)
|
158 |
+
res2 = dual_annealing(self.func, self.ld_bounds, seed=rng)
|
159 |
+
# If we have reproducible results, x components found has to
|
160 |
+
# be exactly the same, which is not the case with no seeding
|
161 |
+
assert_equal(res1.x, res2.x)
|
162 |
+
|
163 |
+
def test_bounds_integrity(self):
|
164 |
+
wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)]
|
165 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
166 |
+
wrong_bounds)
|
167 |
+
|
168 |
+
def test_bound_validity(self):
|
169 |
+
invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)]
|
170 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
171 |
+
invalid_bounds)
|
172 |
+
invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)]
|
173 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
174 |
+
invalid_bounds)
|
175 |
+
invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)]
|
176 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
177 |
+
invalid_bounds)
|
178 |
+
|
179 |
+
def test_deprecated_local_search_options_bounds(self):
|
180 |
+
def func(x):
|
181 |
+
return np.sum((x - 5) * (x - 1))
|
182 |
+
bounds = list(zip([-6, -5], [6, 5]))
|
183 |
+
# Test bounds can be passed (see gh-10831)
|
184 |
+
|
185 |
+
with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
|
186 |
+
dual_annealing(
|
187 |
+
func,
|
188 |
+
bounds=bounds,
|
189 |
+
minimizer_kwargs={"method": "CG", "bounds": bounds})
|
190 |
+
|
191 |
+
def test_minimizer_kwargs_bounds(self):
|
192 |
+
def func(x):
|
193 |
+
return np.sum((x - 5) * (x - 1))
|
194 |
+
bounds = list(zip([-6, -5], [6, 5]))
|
195 |
+
# Test bounds can be passed (see gh-10831)
|
196 |
+
dual_annealing(
|
197 |
+
func,
|
198 |
+
bounds=bounds,
|
199 |
+
minimizer_kwargs={"method": "SLSQP", "bounds": bounds})
|
200 |
+
|
201 |
+
with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
|
202 |
+
dual_annealing(
|
203 |
+
func,
|
204 |
+
bounds=bounds,
|
205 |
+
minimizer_kwargs={"method": "CG", "bounds": bounds})
|
206 |
+
|
207 |
+
def test_max_fun_ls(self):
|
208 |
+
ret = dual_annealing(self.func, self.ld_bounds, maxfun=100,
|
209 |
+
seed=self.seed)
|
210 |
+
|
211 |
+
ls_max_iter = min(max(
|
212 |
+
len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
|
213 |
+
LocalSearchWrapper.LS_MAXITER_MIN),
|
214 |
+
LocalSearchWrapper.LS_MAXITER_MAX)
|
215 |
+
assert ret.nfev <= 100 + ls_max_iter
|
216 |
+
assert not ret.success
|
217 |
+
|
218 |
+
def test_max_fun_no_ls(self):
|
219 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
220 |
+
no_local_search=True, maxfun=500, seed=self.seed)
|
221 |
+
assert ret.nfev <= 500
|
222 |
+
assert not ret.success
|
223 |
+
|
224 |
+
def test_maxiter(self):
|
225 |
+
ret = dual_annealing(self.func, self.ld_bounds, maxiter=700,
|
226 |
+
seed=self.seed)
|
227 |
+
assert ret.nit <= 700
|
228 |
+
|
229 |
+
# Testing that args are passed correctly for dual_annealing
|
230 |
+
def test_fun_args_ls(self):
|
231 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
232 |
+
args=((3.14159,)), seed=self.seed)
|
233 |
+
assert_allclose(ret.fun, 3.14159, atol=1e-6)
|
234 |
+
|
235 |
+
# Testing that args are passed correctly for pure simulated annealing
|
236 |
+
def test_fun_args_no_ls(self):
|
237 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
238 |
+
args=((3.14159, )), no_local_search=True,
|
239 |
+
seed=self.seed)
|
240 |
+
assert_allclose(ret.fun, 3.14159, atol=1e-4)
|
241 |
+
|
242 |
+
def test_callback_stop(self):
|
243 |
+
# Testing that callback make the algorithm stop for
|
244 |
+
# fun value <= 1.0 (see callback method)
|
245 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
246 |
+
callback=self.callback, seed=self.seed)
|
247 |
+
assert ret.fun <= 1.0
|
248 |
+
assert 'stop early' in ret.message[0]
|
249 |
+
assert not ret.success
|
250 |
+
|
251 |
+
@pytest.mark.parametrize('method, atol', [
|
252 |
+
('Nelder-Mead', 2e-5),
|
253 |
+
('COBYLA', 1e-5),
|
254 |
+
('Powell', 1e-8),
|
255 |
+
('CG', 1e-8),
|
256 |
+
('BFGS', 1e-8),
|
257 |
+
('TNC', 1e-8),
|
258 |
+
('SLSQP', 2e-7),
|
259 |
+
])
|
260 |
+
def test_multi_ls_minimizer(self, method, atol):
|
261 |
+
ret = dual_annealing(self.func, self.ld_bounds,
|
262 |
+
minimizer_kwargs=dict(method=method),
|
263 |
+
seed=self.seed)
|
264 |
+
assert_allclose(ret.fun, 0., atol=atol)
|
265 |
+
|
266 |
+
def test_wrong_restart_temp(self):
|
267 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
268 |
+
self.ld_bounds, restart_temp_ratio=1)
|
269 |
+
assert_raises(ValueError, dual_annealing, self.func,
|
270 |
+
self.ld_bounds, restart_temp_ratio=0)
|
271 |
+
|
272 |
+
def test_gradient_gnev(self):
|
273 |
+
minimizer_opts = {
|
274 |
+
'jac': self.rosen_der_wrapper,
|
275 |
+
}
|
276 |
+
ret = dual_annealing(rosen, self.ld_bounds,
|
277 |
+
minimizer_kwargs=minimizer_opts,
|
278 |
+
seed=self.seed)
|
279 |
+
assert ret.njev == self.ngev
|
280 |
+
|
281 |
+
def test_from_docstring(self):
|
282 |
+
def func(x):
|
283 |
+
return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
|
284 |
+
lw = [-5.12] * 10
|
285 |
+
up = [5.12] * 10
|
286 |
+
ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
|
287 |
+
assert_allclose(ret.x,
|
288 |
+
[-4.26437714e-09, -3.91699361e-09, -1.86149218e-09,
|
289 |
+
-3.97165720e-09, -6.29151648e-09, -6.53145322e-09,
|
290 |
+
-3.93616815e-09, -6.55623025e-09, -6.05775280e-09,
|
291 |
+
-5.00668935e-09], atol=4e-8)
|
292 |
+
assert_allclose(ret.fun, 0.000000, atol=5e-13)
|
293 |
+
|
294 |
+
@pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [
|
295 |
+
(0, 100, 1000, 1.0097587941791923),
|
296 |
+
(0, 2, 1000, 1.2599210498948732),
|
297 |
+
(10, 100, 878, 0.8786035869128718),
|
298 |
+
(10, 60, 695, 0.6812920690579612),
|
299 |
+
(2, 100, 990, 0.9897404249173424),
|
300 |
+
])
|
301 |
+
def test_accept_reject_probabilistic(
|
302 |
+
self, new_e, temp_step, accepted, accept_rate):
|
303 |
+
# Test accepts unconditionally with e < current_energy and
|
304 |
+
# probabilistically with e > current_energy
|
305 |
+
|
306 |
+
rs = check_random_state(123)
|
307 |
+
|
308 |
+
count_accepted = 0
|
309 |
+
iterations = 1000
|
310 |
+
|
311 |
+
accept_param = -5
|
312 |
+
current_energy = 1
|
313 |
+
for _ in range(iterations):
|
314 |
+
energy_state = EnergyState(lower=None, upper=None)
|
315 |
+
# Set energy state with current_energy, any location.
|
316 |
+
energy_state.update_current(current_energy, [0])
|
317 |
+
|
318 |
+
chain = StrategyChain(
|
319 |
+
accept_param, None, None, None, rs, energy_state)
|
320 |
+
# Normally this is set in run()
|
321 |
+
chain.temperature_step = temp_step
|
322 |
+
|
323 |
+
# Check if update is accepted.
|
324 |
+
chain.accept_reject(j=1, e=new_e, x_visit=[2])
|
325 |
+
if energy_state.current_energy == new_e:
|
326 |
+
count_accepted += 1
|
327 |
+
|
328 |
+
assert count_accepted == accepted
|
329 |
+
|
330 |
+
# Check accept rate
|
331 |
+
pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step
|
332 |
+
rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param))
|
333 |
+
|
334 |
+
assert_allclose(rate, accept_rate)
|
335 |
+
|
336 |
+
def test_bounds_class(self):
|
337 |
+
# test that result does not depend on the bounds type
|
338 |
+
def func(x):
|
339 |
+
f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
|
340 |
+
return f
|
341 |
+
lw = [-5.12] * 5
|
342 |
+
up = [5.12] * 5
|
343 |
+
|
344 |
+
# Unbounded global minimum is all zeros. Most bounds below will force
|
345 |
+
# a DV away from unbounded minimum and be active at solution.
|
346 |
+
up[0] = -2.0
|
347 |
+
up[1] = -1.0
|
348 |
+
lw[3] = 1.0
|
349 |
+
lw[4] = 2.0
|
350 |
+
|
351 |
+
# run optimizations
|
352 |
+
bounds = Bounds(lw, up)
|
353 |
+
ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234)
|
354 |
+
|
355 |
+
bounds_old = list(zip(lw, up))
|
356 |
+
ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234)
|
357 |
+
|
358 |
+
# test that found minima, function evaluations and iterations match
|
359 |
+
assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8)
|
360 |
+
assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7)
|
361 |
+
assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9)
|
362 |
+
assert ret_bounds_list.nfev == ret_bounds_class.nfev
|
363 |
+
|
364 |
+
def test_callable_jac_with_args_gh11052(self):
|
365 |
+
# dual_annealing used to fail when `jac` was callable and `args` were
|
366 |
+
# used; check that this is resolved. Example is from gh-11052.
|
367 |
+
rng = np.random.default_rng(94253637693657847462)
|
368 |
+
def f(x, power):
|
369 |
+
return np.sum(np.exp(x ** power))
|
370 |
+
|
371 |
+
def jac(x, power):
|
372 |
+
return np.exp(x ** power) * power * x ** (power - 1)
|
373 |
+
|
374 |
+
res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
|
375 |
+
minimizer_kwargs=dict(method='L-BFGS-B'))
|
376 |
+
res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
|
377 |
+
minimizer_kwargs=dict(method='L-BFGS-B',
|
378 |
+
jac=jac))
|
379 |
+
assert_allclose(res1.fun, res2.fun, rtol=1e-6)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit test for Linear Programming via Simplex Algorithm.
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_, assert_allclose, assert_equal
|
6 |
+
from pytest import raises as assert_raises
|
7 |
+
from scipy.optimize._linprog_util import _clean_inputs, _LPProblem
|
8 |
+
from scipy._lib._util import VisibleDeprecationWarning
|
9 |
+
from copy import deepcopy
|
10 |
+
from datetime import date
|
11 |
+
|
12 |
+
|
13 |
+
def test_aliasing():
|
14 |
+
"""
|
15 |
+
Test for ensuring that no objects referred to by `lp` attributes,
|
16 |
+
`c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified
|
17 |
+
by `_clean_inputs` as a side effect.
|
18 |
+
"""
|
19 |
+
lp = _LPProblem(
|
20 |
+
c=1,
|
21 |
+
A_ub=[[1]],
|
22 |
+
b_ub=[1],
|
23 |
+
A_eq=[[1]],
|
24 |
+
b_eq=[1],
|
25 |
+
bounds=(-np.inf, np.inf)
|
26 |
+
)
|
27 |
+
lp_copy = deepcopy(lp)
|
28 |
+
|
29 |
+
_clean_inputs(lp)
|
30 |
+
|
31 |
+
assert_(lp.c == lp_copy.c, "c modified by _clean_inputs")
|
32 |
+
assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs")
|
33 |
+
assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs")
|
34 |
+
assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs")
|
35 |
+
assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs")
|
36 |
+
assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
|
37 |
+
|
38 |
+
|
39 |
+
def test_aliasing2():
|
40 |
+
"""
|
41 |
+
Similar purpose as `test_aliasing` above.
|
42 |
+
"""
|
43 |
+
lp = _LPProblem(
|
44 |
+
c=np.array([1, 1]),
|
45 |
+
A_ub=np.array([[1, 1], [2, 2]]),
|
46 |
+
b_ub=np.array([[1], [1]]),
|
47 |
+
A_eq=np.array([[1, 1]]),
|
48 |
+
b_eq=np.array([1]),
|
49 |
+
bounds=[(-np.inf, np.inf), (None, 1)]
|
50 |
+
)
|
51 |
+
lp_copy = deepcopy(lp)
|
52 |
+
|
53 |
+
_clean_inputs(lp)
|
54 |
+
|
55 |
+
assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs")
|
56 |
+
assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs")
|
57 |
+
assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs")
|
58 |
+
assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs")
|
59 |
+
assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs")
|
60 |
+
assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
|
61 |
+
|
62 |
+
|
63 |
+
def test_missing_inputs():
|
64 |
+
c = [1, 2]
|
65 |
+
A_ub = np.array([[1, 1], [2, 2]])
|
66 |
+
b_ub = np.array([1, 1])
|
67 |
+
A_eq = np.array([[1, 1], [2, 2]])
|
68 |
+
b_eq = np.array([1, 1])
|
69 |
+
|
70 |
+
assert_raises(TypeError, _clean_inputs)
|
71 |
+
assert_raises(TypeError, _clean_inputs, _LPProblem(c=None))
|
72 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub))
|
73 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None))
|
74 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub))
|
75 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub))
|
76 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq))
|
77 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None))
|
78 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq))
|
79 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq))
|
80 |
+
|
81 |
+
|
82 |
+
def test_too_many_dimensions():
|
83 |
+
cb = [1, 2, 3, 4]
|
84 |
+
A = np.random.rand(4, 4)
|
85 |
+
bad2D = [[1, 2], [3, 4]]
|
86 |
+
bad3D = np.random.rand(4, 4, 4)
|
87 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb))
|
88 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb))
|
89 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D))
|
90 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb))
|
91 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D))
|
92 |
+
|
93 |
+
|
94 |
+
def test_too_few_dimensions():
|
95 |
+
bad = np.random.rand(4, 4).ravel()
|
96 |
+
cb = np.random.rand(4)
|
97 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb))
|
98 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb))
|
99 |
+
|
100 |
+
|
101 |
+
def test_inconsistent_dimensions():
|
102 |
+
m = 2
|
103 |
+
n = 4
|
104 |
+
c = [1, 2, 3, 4]
|
105 |
+
|
106 |
+
Agood = np.random.rand(m, n)
|
107 |
+
Abad = np.random.rand(m, n + 1)
|
108 |
+
bgood = np.random.rand(m)
|
109 |
+
bbad = np.random.rand(m + 1)
|
110 |
+
boundsbad = [(0, 1)] * (n + 1)
|
111 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood))
|
112 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad))
|
113 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood))
|
114 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad))
|
115 |
+
assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad))
|
116 |
+
with np.testing.suppress_warnings() as sup:
|
117 |
+
sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
|
118 |
+
assert_raises(ValueError, _clean_inputs,
|
119 |
+
_LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]]))
|
120 |
+
|
121 |
+
|
122 |
+
def test_type_errors():
|
123 |
+
lp = _LPProblem(
|
124 |
+
c=[1, 2],
|
125 |
+
A_ub=np.array([[1, 1], [2, 2]]),
|
126 |
+
b_ub=np.array([1, 1]),
|
127 |
+
A_eq=np.array([[1, 1], [2, 2]]),
|
128 |
+
b_eq=np.array([1, 1]),
|
129 |
+
bounds=[(0, 1)]
|
130 |
+
)
|
131 |
+
bad = "hello"
|
132 |
+
|
133 |
+
assert_raises(TypeError, _clean_inputs, lp._replace(c=bad))
|
134 |
+
assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad))
|
135 |
+
assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad))
|
136 |
+
assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad))
|
137 |
+
assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad))
|
138 |
+
|
139 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad))
|
140 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi"))
|
141 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"]))
|
142 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")]))
|
143 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")]))
|
144 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")]))
|
145 |
+
assert_raises(TypeError, _clean_inputs,
|
146 |
+
lp._replace(bounds=[(1, date(2020, 2, 29))]))
|
147 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]]))
|
148 |
+
|
149 |
+
|
150 |
+
def test_non_finite_errors():
|
151 |
+
lp = _LPProblem(
|
152 |
+
c=[1, 2],
|
153 |
+
A_ub=np.array([[1, 1], [2, 2]]),
|
154 |
+
b_ub=np.array([1, 1]),
|
155 |
+
A_eq=np.array([[1, 1], [2, 2]]),
|
156 |
+
b_eq=np.array([1, 1]),
|
157 |
+
bounds=[(0, 1)]
|
158 |
+
)
|
159 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None]))
|
160 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0]))
|
161 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf]))
|
162 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0]))
|
163 |
+
|
164 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]]))
|
165 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1]))
|
166 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]]))
|
167 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan]))
|
168 |
+
|
169 |
+
|
170 |
+
def test__clean_inputs1():
|
171 |
+
lp = _LPProblem(
|
172 |
+
c=[1, 2],
|
173 |
+
A_ub=[[1, 1], [2, 2]],
|
174 |
+
b_ub=[1, 1],
|
175 |
+
A_eq=[[1, 1], [2, 2]],
|
176 |
+
b_eq=[1, 1],
|
177 |
+
bounds=None
|
178 |
+
)
|
179 |
+
|
180 |
+
lp_cleaned = _clean_inputs(lp)
|
181 |
+
|
182 |
+
assert_allclose(lp_cleaned.c, np.array(lp.c))
|
183 |
+
assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
|
184 |
+
assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
|
185 |
+
assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
|
186 |
+
assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
|
187 |
+
assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
|
188 |
+
|
189 |
+
assert_(lp_cleaned.c.shape == (2,), "")
|
190 |
+
assert_(lp_cleaned.A_ub.shape == (2, 2), "")
|
191 |
+
assert_(lp_cleaned.b_ub.shape == (2,), "")
|
192 |
+
assert_(lp_cleaned.A_eq.shape == (2, 2), "")
|
193 |
+
assert_(lp_cleaned.b_eq.shape == (2,), "")
|
194 |
+
|
195 |
+
|
196 |
+
def test__clean_inputs2():
|
197 |
+
lp = _LPProblem(
|
198 |
+
c=1,
|
199 |
+
A_ub=[[1]],
|
200 |
+
b_ub=1,
|
201 |
+
A_eq=[[1]],
|
202 |
+
b_eq=1,
|
203 |
+
bounds=(0, 1)
|
204 |
+
)
|
205 |
+
|
206 |
+
lp_cleaned = _clean_inputs(lp)
|
207 |
+
|
208 |
+
assert_allclose(lp_cleaned.c, np.array(lp.c))
|
209 |
+
assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
|
210 |
+
assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
|
211 |
+
assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
|
212 |
+
assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
|
213 |
+
assert_equal(lp_cleaned.bounds, [(0, 1)])
|
214 |
+
|
215 |
+
assert_(lp_cleaned.c.shape == (1,), "")
|
216 |
+
assert_(lp_cleaned.A_ub.shape == (1, 1), "")
|
217 |
+
assert_(lp_cleaned.b_ub.shape == (1,), "")
|
218 |
+
assert_(lp_cleaned.A_eq.shape == (1, 1), "")
|
219 |
+
assert_(lp_cleaned.b_eq.shape == (1,), "")
|
220 |
+
|
221 |
+
|
222 |
+
def test__clean_inputs3():
|
223 |
+
lp = _LPProblem(
|
224 |
+
c=[[1, 2]],
|
225 |
+
A_ub=np.random.rand(2, 2),
|
226 |
+
b_ub=[[1], [2]],
|
227 |
+
A_eq=np.random.rand(2, 2),
|
228 |
+
b_eq=[[1], [2]],
|
229 |
+
bounds=[(0, 1)]
|
230 |
+
)
|
231 |
+
|
232 |
+
lp_cleaned = _clean_inputs(lp)
|
233 |
+
|
234 |
+
assert_allclose(lp_cleaned.c, np.array([1, 2]))
|
235 |
+
assert_allclose(lp_cleaned.b_ub, np.array([1, 2]))
|
236 |
+
assert_allclose(lp_cleaned.b_eq, np.array([1, 2]))
|
237 |
+
assert_equal(lp_cleaned.bounds, [(0, 1)] * 2)
|
238 |
+
|
239 |
+
assert_(lp_cleaned.c.shape == (2,), "")
|
240 |
+
assert_(lp_cleaned.b_ub.shape == (2,), "")
|
241 |
+
assert_(lp_cleaned.b_eq.shape == (2,), "")
|
242 |
+
|
243 |
+
|
244 |
+
def test_bad_bounds():
|
245 |
+
lp = _LPProblem(c=[1, 2])
|
246 |
+
|
247 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2)))
|
248 |
+
assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)]))
|
249 |
+
with np.testing.suppress_warnings() as sup:
|
250 |
+
sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
|
251 |
+
assert_raises(ValueError, _clean_inputs,
|
252 |
+
lp._replace(bounds=[(1, 2), (1, 2, 2)]))
|
253 |
+
assert_raises(ValueError, _clean_inputs,
|
254 |
+
lp._replace(bounds=[(1, 2), (1, 2), (1, 2)]))
|
255 |
+
|
256 |
+
lp = _LPProblem(c=[1, 2, 3, 4])
|
257 |
+
|
258 |
+
assert_raises(ValueError, _clean_inputs,
|
259 |
+
lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)]))
|
260 |
+
|
261 |
+
|
262 |
+
def test_good_bounds():
|
263 |
+
lp = _LPProblem(c=[1, 2])
|
264 |
+
|
265 |
+
lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
|
266 |
+
assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
|
267 |
+
|
268 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[]))
|
269 |
+
assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
|
270 |
+
|
271 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[[]]))
|
272 |
+
assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
|
273 |
+
|
274 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
|
275 |
+
assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
|
276 |
+
|
277 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
|
278 |
+
assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
|
279 |
+
|
280 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
|
281 |
+
assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2)
|
282 |
+
|
283 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
|
284 |
+
assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2)
|
285 |
+
|
286 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)]))
|
287 |
+
assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2)
|
288 |
+
|
289 |
+
lp = _LPProblem(c=[1, 2, 3, 4])
|
290 |
+
|
291 |
+
lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
|
292 |
+
assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4)
|
293 |
+
|
294 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
|
295 |
+
assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
|
296 |
+
|
297 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
|
298 |
+
assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
|
299 |
+
|
300 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
|
301 |
+
assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4)
|
302 |
+
|
303 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
|
304 |
+
assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4)
|
305 |
+
|
306 |
+
lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None),
|
307 |
+
(-np.inf, None),
|
308 |
+
(None, np.inf),
|
309 |
+
(-np.inf, np.inf)]))
|
310 |
+
assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py
ADDED
@@ -0,0 +1,815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from itertools import product
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_allclose, assert_equal, assert_
|
6 |
+
from pytest import raises as assert_raises
|
7 |
+
|
8 |
+
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
|
9 |
+
|
10 |
+
from scipy.optimize._numdiff import (
|
11 |
+
_adjust_scheme_to_bounds, approx_derivative, check_derivative,
|
12 |
+
group_columns, _eps_for_method, _compute_absolute_step)
|
13 |
+
|
14 |
+
|
15 |
+
def test_group_columns():
|
16 |
+
structure = [
|
17 |
+
[1, 1, 0, 0, 0, 0],
|
18 |
+
[1, 1, 1, 0, 0, 0],
|
19 |
+
[0, 1, 1, 1, 0, 0],
|
20 |
+
[0, 0, 1, 1, 1, 0],
|
21 |
+
[0, 0, 0, 1, 1, 1],
|
22 |
+
[0, 0, 0, 0, 1, 1],
|
23 |
+
[0, 0, 0, 0, 0, 0]
|
24 |
+
]
|
25 |
+
for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
|
26 |
+
A = transform(structure)
|
27 |
+
order = np.arange(6)
|
28 |
+
groups_true = np.array([0, 1, 2, 0, 1, 2])
|
29 |
+
groups = group_columns(A, order)
|
30 |
+
assert_equal(groups, groups_true)
|
31 |
+
|
32 |
+
order = [1, 2, 4, 3, 5, 0]
|
33 |
+
groups_true = np.array([2, 0, 1, 2, 0, 1])
|
34 |
+
groups = group_columns(A, order)
|
35 |
+
assert_equal(groups, groups_true)
|
36 |
+
|
37 |
+
# Test repeatability.
|
38 |
+
groups_1 = group_columns(A)
|
39 |
+
groups_2 = group_columns(A)
|
40 |
+
assert_equal(groups_1, groups_2)
|
41 |
+
|
42 |
+
|
43 |
+
def test_correct_fp_eps():
|
44 |
+
# check that relative step size is correct for FP size
|
45 |
+
EPS = np.finfo(np.float64).eps
|
46 |
+
relative_step = {"2-point": EPS**0.5,
|
47 |
+
"3-point": EPS**(1/3),
|
48 |
+
"cs": EPS**0.5}
|
49 |
+
for method in ['2-point', '3-point', 'cs']:
|
50 |
+
assert_allclose(
|
51 |
+
_eps_for_method(np.float64, np.float64, method),
|
52 |
+
relative_step[method])
|
53 |
+
assert_allclose(
|
54 |
+
_eps_for_method(np.complex128, np.complex128, method),
|
55 |
+
relative_step[method]
|
56 |
+
)
|
57 |
+
|
58 |
+
# check another FP size
|
59 |
+
EPS = np.finfo(np.float32).eps
|
60 |
+
relative_step = {"2-point": EPS**0.5,
|
61 |
+
"3-point": EPS**(1/3),
|
62 |
+
"cs": EPS**0.5}
|
63 |
+
|
64 |
+
for method in ['2-point', '3-point', 'cs']:
|
65 |
+
assert_allclose(
|
66 |
+
_eps_for_method(np.float64, np.float32, method),
|
67 |
+
relative_step[method]
|
68 |
+
)
|
69 |
+
assert_allclose(
|
70 |
+
_eps_for_method(np.float32, np.float64, method),
|
71 |
+
relative_step[method]
|
72 |
+
)
|
73 |
+
assert_allclose(
|
74 |
+
_eps_for_method(np.float32, np.float32, method),
|
75 |
+
relative_step[method]
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
class TestAdjustSchemeToBounds:
|
80 |
+
def test_no_bounds(self):
|
81 |
+
x0 = np.zeros(3)
|
82 |
+
h = np.full(3, 1e-2)
|
83 |
+
inf_lower = np.empty_like(x0)
|
84 |
+
inf_upper = np.empty_like(x0)
|
85 |
+
inf_lower.fill(-np.inf)
|
86 |
+
inf_upper.fill(np.inf)
|
87 |
+
|
88 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
89 |
+
x0, h, 1, '1-sided', inf_lower, inf_upper)
|
90 |
+
assert_allclose(h_adjusted, h)
|
91 |
+
assert_(np.all(one_sided))
|
92 |
+
|
93 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
94 |
+
x0, h, 2, '1-sided', inf_lower, inf_upper)
|
95 |
+
assert_allclose(h_adjusted, h)
|
96 |
+
assert_(np.all(one_sided))
|
97 |
+
|
98 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
99 |
+
x0, h, 1, '2-sided', inf_lower, inf_upper)
|
100 |
+
assert_allclose(h_adjusted, h)
|
101 |
+
assert_(np.all(~one_sided))
|
102 |
+
|
103 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
104 |
+
x0, h, 2, '2-sided', inf_lower, inf_upper)
|
105 |
+
assert_allclose(h_adjusted, h)
|
106 |
+
assert_(np.all(~one_sided))
|
107 |
+
|
108 |
+
def test_with_bound(self):
|
109 |
+
x0 = np.array([0.0, 0.85, -0.85])
|
110 |
+
lb = -np.ones(3)
|
111 |
+
ub = np.ones(3)
|
112 |
+
h = np.array([1, 1, -1]) * 1e-1
|
113 |
+
|
114 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
|
115 |
+
assert_allclose(h_adjusted, h)
|
116 |
+
|
117 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
|
118 |
+
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
|
119 |
+
|
120 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
121 |
+
x0, h, 1, '2-sided', lb, ub)
|
122 |
+
assert_allclose(h_adjusted, np.abs(h))
|
123 |
+
assert_(np.all(~one_sided))
|
124 |
+
|
125 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
126 |
+
x0, h, 2, '2-sided', lb, ub)
|
127 |
+
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
|
128 |
+
assert_equal(one_sided, np.array([False, True, True]))
|
129 |
+
|
130 |
+
def test_tight_bounds(self):
|
131 |
+
lb = np.array([-0.03, -0.03])
|
132 |
+
ub = np.array([0.05, 0.05])
|
133 |
+
x0 = np.array([0.0, 0.03])
|
134 |
+
h = np.array([-0.1, -0.1])
|
135 |
+
|
136 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
|
137 |
+
assert_allclose(h_adjusted, np.array([0.05, -0.06]))
|
138 |
+
|
139 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
|
140 |
+
assert_allclose(h_adjusted, np.array([0.025, -0.03]))
|
141 |
+
|
142 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
143 |
+
x0, h, 1, '2-sided', lb, ub)
|
144 |
+
assert_allclose(h_adjusted, np.array([0.03, -0.03]))
|
145 |
+
assert_equal(one_sided, np.array([False, True]))
|
146 |
+
|
147 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
148 |
+
x0, h, 2, '2-sided', lb, ub)
|
149 |
+
assert_allclose(h_adjusted, np.array([0.015, -0.015]))
|
150 |
+
assert_equal(one_sided, np.array([False, True]))
|
151 |
+
|
152 |
+
|
153 |
+
class TestApproxDerivativesDense:
|
154 |
+
def fun_scalar_scalar(self, x):
|
155 |
+
return np.sinh(x)
|
156 |
+
|
157 |
+
def jac_scalar_scalar(self, x):
|
158 |
+
return np.cosh(x)
|
159 |
+
|
160 |
+
def fun_scalar_vector(self, x):
|
161 |
+
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
|
162 |
+
|
163 |
+
def jac_scalar_vector(self, x):
|
164 |
+
return np.array(
|
165 |
+
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
|
166 |
+
|
167 |
+
def fun_vector_scalar(self, x):
|
168 |
+
return np.sin(x[0] * x[1]) * np.log(x[0])
|
169 |
+
|
170 |
+
def wrong_dimensions_fun(self, x):
|
171 |
+
return np.array([x**2, np.tan(x), np.exp(x)])
|
172 |
+
|
173 |
+
def jac_vector_scalar(self, x):
|
174 |
+
return np.array([
|
175 |
+
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
|
176 |
+
np.sin(x[0] * x[1]) / x[0],
|
177 |
+
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
|
178 |
+
])
|
179 |
+
|
180 |
+
def fun_vector_vector(self, x):
|
181 |
+
return np.array([
|
182 |
+
x[0] * np.sin(x[1]),
|
183 |
+
x[1] * np.cos(x[0]),
|
184 |
+
x[0] ** 3 * x[1] ** -0.5
|
185 |
+
])
|
186 |
+
|
187 |
+
def jac_vector_vector(self, x):
|
188 |
+
return np.array([
|
189 |
+
[np.sin(x[1]), x[0] * np.cos(x[1])],
|
190 |
+
[-x[1] * np.sin(x[0]), np.cos(x[0])],
|
191 |
+
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
|
192 |
+
])
|
193 |
+
|
194 |
+
def fun_parametrized(self, x, c0, c1=1.0):
|
195 |
+
return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
|
196 |
+
|
197 |
+
def jac_parametrized(self, x, c0, c1=0.1):
|
198 |
+
return np.array([
|
199 |
+
[c0 * np.exp(c0 * x[0]), 0],
|
200 |
+
[0, c1 * np.exp(c1 * x[1])]
|
201 |
+
])
|
202 |
+
|
203 |
+
def fun_with_nan(self, x):
|
204 |
+
return x if np.abs(x) <= 1e-8 else np.nan
|
205 |
+
|
206 |
+
def jac_with_nan(self, x):
|
207 |
+
return 1.0 if np.abs(x) <= 1e-8 else np.nan
|
208 |
+
|
209 |
+
def fun_zero_jacobian(self, x):
|
210 |
+
return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
|
211 |
+
|
212 |
+
def jac_zero_jacobian(self, x):
|
213 |
+
return np.array([
|
214 |
+
[x[1], x[0]],
|
215 |
+
[-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
|
216 |
+
])
|
217 |
+
|
218 |
+
def jac_non_numpy(self, x):
|
219 |
+
# x can be a scalar or an array [val].
|
220 |
+
# Cast to true scalar before handing over to math.exp
|
221 |
+
xp = np.asarray(x).item()
|
222 |
+
return math.exp(xp)
|
223 |
+
|
224 |
+
def test_scalar_scalar(self):
|
225 |
+
x0 = 1.0
|
226 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
227 |
+
method='2-point')
|
228 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
|
229 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
230 |
+
method='cs')
|
231 |
+
jac_true = self.jac_scalar_scalar(x0)
|
232 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
233 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
234 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
235 |
+
|
236 |
+
def test_scalar_scalar_abs_step(self):
|
237 |
+
# can approx_derivative use abs_step?
|
238 |
+
x0 = 1.0
|
239 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
240 |
+
method='2-point', abs_step=1.49e-8)
|
241 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
|
242 |
+
abs_step=1.49e-8)
|
243 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
244 |
+
method='cs', abs_step=1.49e-8)
|
245 |
+
jac_true = self.jac_scalar_scalar(x0)
|
246 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
247 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
248 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
249 |
+
|
250 |
+
def test_scalar_vector(self):
|
251 |
+
x0 = 0.5
|
252 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
|
253 |
+
method='2-point')
|
254 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
|
255 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
|
256 |
+
method='cs')
|
257 |
+
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
|
258 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
259 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
260 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
261 |
+
|
262 |
+
def test_vector_scalar(self):
|
263 |
+
x0 = np.array([100.0, -0.5])
|
264 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
265 |
+
method='2-point')
|
266 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
|
267 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
268 |
+
method='cs')
|
269 |
+
jac_true = self.jac_vector_scalar(x0)
|
270 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
271 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
|
272 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
273 |
+
|
274 |
+
def test_vector_scalar_abs_step(self):
|
275 |
+
# can approx_derivative use abs_step?
|
276 |
+
x0 = np.array([100.0, -0.5])
|
277 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
278 |
+
method='2-point', abs_step=1.49e-8)
|
279 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
|
280 |
+
abs_step=1.49e-8, rel_step=np.inf)
|
281 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
282 |
+
method='cs', abs_step=1.49e-8)
|
283 |
+
jac_true = self.jac_vector_scalar(x0)
|
284 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
285 |
+
assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
|
286 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
287 |
+
|
288 |
+
def test_vector_vector(self):
|
289 |
+
x0 = np.array([-100.0, 0.2])
|
290 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
291 |
+
method='2-point')
|
292 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
|
293 |
+
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
|
294 |
+
method='cs')
|
295 |
+
jac_true = self.jac_vector_vector(x0)
|
296 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
|
297 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
|
298 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
299 |
+
|
300 |
+
def test_wrong_dimensions(self):
|
301 |
+
x0 = 1.0
|
302 |
+
assert_raises(RuntimeError, approx_derivative,
|
303 |
+
self.wrong_dimensions_fun, x0)
|
304 |
+
f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
|
305 |
+
assert_raises(ValueError, approx_derivative,
|
306 |
+
self.wrong_dimensions_fun, x0, f0=f0)
|
307 |
+
|
308 |
+
def test_custom_rel_step(self):
|
309 |
+
x0 = np.array([-0.1, 0.1])
|
310 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
311 |
+
method='2-point', rel_step=1e-4)
|
312 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
|
313 |
+
rel_step=1e-4)
|
314 |
+
jac_true = self.jac_vector_vector(x0)
|
315 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
|
316 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
|
317 |
+
|
318 |
+
def test_options(self):
|
319 |
+
x0 = np.array([1.0, 1.0])
|
320 |
+
c0 = -1.0
|
321 |
+
c1 = 1.0
|
322 |
+
lb = 0.0
|
323 |
+
ub = 2.0
|
324 |
+
f0 = self.fun_parametrized(x0, c0, c1=c1)
|
325 |
+
rel_step = np.array([-1e-6, 1e-7])
|
326 |
+
jac_true = self.jac_parametrized(x0, c0, c1)
|
327 |
+
jac_diff_2 = approx_derivative(
|
328 |
+
self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
|
329 |
+
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
|
330 |
+
jac_diff_3 = approx_derivative(
|
331 |
+
self.fun_parametrized, x0, rel_step=rel_step,
|
332 |
+
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
|
333 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
334 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
335 |
+
|
336 |
+
def test_with_bounds_2_point(self):
|
337 |
+
lb = -np.ones(2)
|
338 |
+
ub = np.ones(2)
|
339 |
+
|
340 |
+
x0 = np.array([-2.0, 0.2])
|
341 |
+
assert_raises(ValueError, approx_derivative,
|
342 |
+
self.fun_vector_vector, x0, bounds=(lb, ub))
|
343 |
+
|
344 |
+
x0 = np.array([-1.0, 1.0])
|
345 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
346 |
+
method='2-point', bounds=(lb, ub))
|
347 |
+
jac_true = self.jac_vector_vector(x0)
|
348 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
349 |
+
|
350 |
+
def test_with_bounds_3_point(self):
|
351 |
+
lb = np.array([1.0, 1.0])
|
352 |
+
ub = np.array([2.0, 2.0])
|
353 |
+
|
354 |
+
x0 = np.array([1.0, 2.0])
|
355 |
+
jac_true = self.jac_vector_vector(x0)
|
356 |
+
|
357 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0)
|
358 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
359 |
+
|
360 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
361 |
+
bounds=(lb, np.inf))
|
362 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
363 |
+
|
364 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
365 |
+
bounds=(-np.inf, ub))
|
366 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
367 |
+
|
368 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
369 |
+
bounds=(lb, ub))
|
370 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
371 |
+
|
372 |
+
def test_tight_bounds(self):
|
373 |
+
x0 = np.array([10.0, 10.0])
|
374 |
+
lb = x0 - 3e-9
|
375 |
+
ub = x0 + 2e-9
|
376 |
+
jac_true = self.jac_vector_vector(x0)
|
377 |
+
jac_diff = approx_derivative(
|
378 |
+
self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
|
379 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
380 |
+
jac_diff = approx_derivative(
|
381 |
+
self.fun_vector_vector, x0, method='2-point',
|
382 |
+
rel_step=1e-6, bounds=(lb, ub))
|
383 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
384 |
+
|
385 |
+
jac_diff = approx_derivative(
|
386 |
+
self.fun_vector_vector, x0, bounds=(lb, ub))
|
387 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
388 |
+
jac_diff = approx_derivative(
|
389 |
+
self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
|
390 |
+
assert_allclose(jac_true, jac_diff, rtol=1e-6)
|
391 |
+
|
392 |
+
def test_bound_switches(self):
|
393 |
+
lb = -1e-8
|
394 |
+
ub = 1e-8
|
395 |
+
x0 = 0.0
|
396 |
+
jac_true = self.jac_with_nan(x0)
|
397 |
+
jac_diff_2 = approx_derivative(
|
398 |
+
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
|
399 |
+
bounds=(lb, ub))
|
400 |
+
jac_diff_3 = approx_derivative(
|
401 |
+
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
|
402 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
403 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
404 |
+
|
405 |
+
x0 = 1e-8
|
406 |
+
jac_true = self.jac_with_nan(x0)
|
407 |
+
jac_diff_2 = approx_derivative(
|
408 |
+
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
|
409 |
+
bounds=(lb, ub))
|
410 |
+
jac_diff_3 = approx_derivative(
|
411 |
+
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
|
412 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
413 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
414 |
+
|
415 |
+
def test_non_numpy(self):
|
416 |
+
x0 = 1.0
|
417 |
+
jac_true = self.jac_non_numpy(x0)
|
418 |
+
jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
|
419 |
+
method='2-point')
|
420 |
+
jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
|
421 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
422 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
|
423 |
+
|
424 |
+
# math.exp cannot handle complex arguments, hence this raises
|
425 |
+
assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
|
426 |
+
**dict(method='cs'))
|
427 |
+
|
428 |
+
def test_fp(self):
|
429 |
+
# checks that approx_derivative works for FP size other than 64.
|
430 |
+
# Example is derived from the minimal working example in gh12991.
|
431 |
+
np.random.seed(1)
|
432 |
+
|
433 |
+
def func(p, x):
|
434 |
+
return p[0] + p[1] * x
|
435 |
+
|
436 |
+
def err(p, x, y):
|
437 |
+
return func(p, x) - y
|
438 |
+
|
439 |
+
x = np.linspace(0, 1, 100, dtype=np.float64)
|
440 |
+
y = np.random.random(100).astype(np.float64)
|
441 |
+
p0 = np.array([-1.0, -1.0])
|
442 |
+
|
443 |
+
jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y))
|
444 |
+
|
445 |
+
# parameter vector is float32, func output is float64
|
446 |
+
jac_fp = approx_derivative(err, p0.astype(np.float32),
|
447 |
+
method='2-point', args=(x, y))
|
448 |
+
assert err(p0, x, y).dtype == np.float64
|
449 |
+
assert_allclose(jac_fp, jac_fp64, atol=1e-3)
|
450 |
+
|
451 |
+
# parameter vector is float64, func output is float32
|
452 |
+
def err_fp32(p):
|
453 |
+
assert p.dtype == np.float32
|
454 |
+
return err(p, x, y).astype(np.float32)
|
455 |
+
|
456 |
+
jac_fp = approx_derivative(err_fp32, p0.astype(np.float32),
|
457 |
+
method='2-point')
|
458 |
+
assert_allclose(jac_fp, jac_fp64, atol=1e-3)
|
459 |
+
|
460 |
+
# check upper bound of error on the derivative for 2-point
|
461 |
+
def f(x):
|
462 |
+
return np.sin(x)
|
463 |
+
def g(x):
|
464 |
+
return np.cos(x)
|
465 |
+
def hess(x):
|
466 |
+
return -np.sin(x)
|
467 |
+
|
468 |
+
def calc_atol(h, x0, f, hess, EPS):
|
469 |
+
# truncation error
|
470 |
+
t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h)))
|
471 |
+
# roundoff error. There may be a divisor (>1) missing from
|
472 |
+
# the following line, so this contribution is possibly
|
473 |
+
# overestimated
|
474 |
+
t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h)))
|
475 |
+
return t0 + t1
|
476 |
+
|
477 |
+
for dtype in [np.float16, np.float32, np.float64]:
|
478 |
+
EPS = np.finfo(dtype).eps
|
479 |
+
x0 = np.array(1.0).astype(dtype)
|
480 |
+
h = _compute_absolute_step(None, x0, f(x0), '2-point')
|
481 |
+
atol = calc_atol(h, x0, f, hess, EPS)
|
482 |
+
err = approx_derivative(f, x0, method='2-point',
|
483 |
+
abs_step=h) - g(x0)
|
484 |
+
assert abs(err) < atol
|
485 |
+
|
486 |
+
def test_check_derivative(self):
|
487 |
+
x0 = np.array([-10.0, 10])
|
488 |
+
accuracy = check_derivative(self.fun_vector_vector,
|
489 |
+
self.jac_vector_vector, x0)
|
490 |
+
assert_(accuracy < 1e-9)
|
491 |
+
accuracy = check_derivative(self.fun_vector_vector,
|
492 |
+
self.jac_vector_vector, x0)
|
493 |
+
assert_(accuracy < 1e-6)
|
494 |
+
|
495 |
+
x0 = np.array([0.0, 0.0])
|
496 |
+
accuracy = check_derivative(self.fun_zero_jacobian,
|
497 |
+
self.jac_zero_jacobian, x0)
|
498 |
+
assert_(accuracy == 0)
|
499 |
+
accuracy = check_derivative(self.fun_zero_jacobian,
|
500 |
+
self.jac_zero_jacobian, x0)
|
501 |
+
assert_(accuracy == 0)
|
502 |
+
|
503 |
+
|
504 |
+
class TestApproxDerivativeSparse:
|
505 |
+
# Example from Numerical Optimization 2nd edition, p. 198.
|
506 |
+
def setup_method(self):
|
507 |
+
np.random.seed(0)
|
508 |
+
self.n = 50
|
509 |
+
self.lb = -0.1 * (1 + np.arange(self.n))
|
510 |
+
self.ub = 0.1 * (1 + np.arange(self.n))
|
511 |
+
self.x0 = np.empty(self.n)
|
512 |
+
self.x0[::2] = (1 - 1e-7) * self.lb[::2]
|
513 |
+
self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
|
514 |
+
|
515 |
+
self.J_true = self.jac(self.x0)
|
516 |
+
|
517 |
+
def fun(self, x):
|
518 |
+
e = x[1:]**3 - x[:-1]**2
|
519 |
+
return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
|
520 |
+
|
521 |
+
def jac(self, x):
|
522 |
+
n = x.size
|
523 |
+
J = np.zeros((n, n))
|
524 |
+
J[0, 0] = -4 * x[0]
|
525 |
+
J[0, 1] = 6 * x[1]**2
|
526 |
+
for i in range(1, n - 1):
|
527 |
+
J[i, i - 1] = -6 * x[i-1]
|
528 |
+
J[i, i] = 9 * x[i]**2 - 4 * x[i]
|
529 |
+
J[i, i + 1] = 6 * x[i+1]**2
|
530 |
+
J[-1, -1] = 9 * x[-1]**2
|
531 |
+
J[-1, -2] = -6 * x[-2]
|
532 |
+
|
533 |
+
return J
|
534 |
+
|
535 |
+
def structure(self, n):
|
536 |
+
A = np.zeros((n, n), dtype=int)
|
537 |
+
A[0, 0] = 1
|
538 |
+
A[0, 1] = 1
|
539 |
+
for i in range(1, n - 1):
|
540 |
+
A[i, i - 1: i + 2] = 1
|
541 |
+
A[-1, -1] = 1
|
542 |
+
A[-1, -2] = 1
|
543 |
+
|
544 |
+
return A
|
545 |
+
|
546 |
+
def test_all(self):
|
547 |
+
A = self.structure(self.n)
|
548 |
+
order = np.arange(self.n)
|
549 |
+
groups_1 = group_columns(A, order)
|
550 |
+
np.random.shuffle(order)
|
551 |
+
groups_2 = group_columns(A, order)
|
552 |
+
|
553 |
+
for method, groups, l, u in product(
|
554 |
+
['2-point', '3-point', 'cs'], [groups_1, groups_2],
|
555 |
+
[-np.inf, self.lb], [np.inf, self.ub]):
|
556 |
+
J = approx_derivative(self.fun, self.x0, method=method,
|
557 |
+
bounds=(l, u), sparsity=(A, groups))
|
558 |
+
assert_(isinstance(J, csr_matrix))
|
559 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
|
560 |
+
|
561 |
+
rel_step = np.full_like(self.x0, 1e-8)
|
562 |
+
rel_step[::2] *= -1
|
563 |
+
J = approx_derivative(self.fun, self.x0, method=method,
|
564 |
+
rel_step=rel_step, sparsity=(A, groups))
|
565 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
|
566 |
+
|
567 |
+
def test_no_precomputed_groups(self):
|
568 |
+
A = self.structure(self.n)
|
569 |
+
J = approx_derivative(self.fun, self.x0, sparsity=A)
|
570 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
|
571 |
+
|
572 |
+
def test_equivalence(self):
|
573 |
+
structure = np.ones((self.n, self.n), dtype=int)
|
574 |
+
groups = np.arange(self.n)
|
575 |
+
for method in ['2-point', '3-point', 'cs']:
|
576 |
+
J_dense = approx_derivative(self.fun, self.x0, method=method)
|
577 |
+
J_sparse = approx_derivative(
|
578 |
+
self.fun, self.x0, sparsity=(structure, groups), method=method)
|
579 |
+
assert_allclose(J_dense, J_sparse.toarray(),
|
580 |
+
rtol=5e-16, atol=7e-15)
|
581 |
+
|
582 |
+
def test_check_derivative(self):
|
583 |
+
def jac(x):
|
584 |
+
return csr_matrix(self.jac(x))
|
585 |
+
|
586 |
+
accuracy = check_derivative(self.fun, jac, self.x0,
|
587 |
+
bounds=(self.lb, self.ub))
|
588 |
+
assert_(accuracy < 1e-9)
|
589 |
+
|
590 |
+
accuracy = check_derivative(self.fun, jac, self.x0,
|
591 |
+
bounds=(self.lb, self.ub))
|
592 |
+
assert_(accuracy < 1e-9)
|
593 |
+
|
594 |
+
|
595 |
+
class TestApproxDerivativeLinearOperator:
|
596 |
+
|
597 |
+
def fun_scalar_scalar(self, x):
|
598 |
+
return np.sinh(x)
|
599 |
+
|
600 |
+
def jac_scalar_scalar(self, x):
|
601 |
+
return np.cosh(x)
|
602 |
+
|
603 |
+
def fun_scalar_vector(self, x):
|
604 |
+
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
|
605 |
+
|
606 |
+
def jac_scalar_vector(self, x):
|
607 |
+
return np.array(
|
608 |
+
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
|
609 |
+
|
610 |
+
def fun_vector_scalar(self, x):
|
611 |
+
return np.sin(x[0] * x[1]) * np.log(x[0])
|
612 |
+
|
613 |
+
def jac_vector_scalar(self, x):
|
614 |
+
return np.array([
|
615 |
+
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
|
616 |
+
np.sin(x[0] * x[1]) / x[0],
|
617 |
+
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
|
618 |
+
])
|
619 |
+
|
620 |
+
def fun_vector_vector(self, x):
|
621 |
+
return np.array([
|
622 |
+
x[0] * np.sin(x[1]),
|
623 |
+
x[1] * np.cos(x[0]),
|
624 |
+
x[0] ** 3 * x[1] ** -0.5
|
625 |
+
])
|
626 |
+
|
627 |
+
def jac_vector_vector(self, x):
|
628 |
+
return np.array([
|
629 |
+
[np.sin(x[1]), x[0] * np.cos(x[1])],
|
630 |
+
[-x[1] * np.sin(x[0]), np.cos(x[0])],
|
631 |
+
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
|
632 |
+
])
|
633 |
+
|
634 |
+
def test_scalar_scalar(self):
|
635 |
+
x0 = 1.0
|
636 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
637 |
+
method='2-point',
|
638 |
+
as_linear_operator=True)
|
639 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
|
640 |
+
as_linear_operator=True)
|
641 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
642 |
+
method='cs',
|
643 |
+
as_linear_operator=True)
|
644 |
+
jac_true = self.jac_scalar_scalar(x0)
|
645 |
+
np.random.seed(1)
|
646 |
+
for i in range(10):
|
647 |
+
p = np.random.uniform(-10, 10, size=(1,))
|
648 |
+
assert_allclose(jac_diff_2.dot(p), jac_true*p,
|
649 |
+
rtol=1e-5)
|
650 |
+
assert_allclose(jac_diff_3.dot(p), jac_true*p,
|
651 |
+
rtol=5e-6)
|
652 |
+
assert_allclose(jac_diff_4.dot(p), jac_true*p,
|
653 |
+
rtol=5e-6)
|
654 |
+
|
655 |
+
def test_scalar_vector(self):
|
656 |
+
x0 = 0.5
|
657 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
|
658 |
+
method='2-point',
|
659 |
+
as_linear_operator=True)
|
660 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
|
661 |
+
as_linear_operator=True)
|
662 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
|
663 |
+
method='cs',
|
664 |
+
as_linear_operator=True)
|
665 |
+
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
|
666 |
+
np.random.seed(1)
|
667 |
+
for i in range(10):
|
668 |
+
p = np.random.uniform(-10, 10, size=(1,))
|
669 |
+
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
|
670 |
+
rtol=1e-5)
|
671 |
+
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
|
672 |
+
rtol=5e-6)
|
673 |
+
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
|
674 |
+
rtol=5e-6)
|
675 |
+
|
676 |
+
def test_vector_scalar(self):
|
677 |
+
x0 = np.array([100.0, -0.5])
|
678 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
679 |
+
method='2-point',
|
680 |
+
as_linear_operator=True)
|
681 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
|
682 |
+
as_linear_operator=True)
|
683 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
684 |
+
method='cs',
|
685 |
+
as_linear_operator=True)
|
686 |
+
jac_true = self.jac_vector_scalar(x0)
|
687 |
+
np.random.seed(1)
|
688 |
+
for i in range(10):
|
689 |
+
p = np.random.uniform(-10, 10, size=x0.shape)
|
690 |
+
assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
|
691 |
+
rtol=1e-5)
|
692 |
+
assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
|
693 |
+
rtol=5e-6)
|
694 |
+
assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
|
695 |
+
rtol=1e-7)
|
696 |
+
|
697 |
+
def test_vector_vector(self):
|
698 |
+
x0 = np.array([-100.0, 0.2])
|
699 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
700 |
+
method='2-point',
|
701 |
+
as_linear_operator=True)
|
702 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
|
703 |
+
as_linear_operator=True)
|
704 |
+
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
|
705 |
+
method='cs',
|
706 |
+
as_linear_operator=True)
|
707 |
+
jac_true = self.jac_vector_vector(x0)
|
708 |
+
np.random.seed(1)
|
709 |
+
for i in range(10):
|
710 |
+
p = np.random.uniform(-10, 10, size=x0.shape)
|
711 |
+
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
|
712 |
+
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
|
713 |
+
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
|
714 |
+
|
715 |
+
def test_exception(self):
|
716 |
+
x0 = np.array([-100.0, 0.2])
|
717 |
+
assert_raises(ValueError, approx_derivative,
|
718 |
+
self.fun_vector_vector, x0,
|
719 |
+
method='2-point', bounds=(1, np.inf))
|
720 |
+
|
721 |
+
|
722 |
+
def test_absolute_step_sign():
|
723 |
+
# test for gh12487
|
724 |
+
# if an absolute step is specified for 2-point differences make sure that
|
725 |
+
# the side corresponds to the step. i.e. if step is positive then forward
|
726 |
+
# differences should be used, if step is negative then backwards
|
727 |
+
# differences should be used.
|
728 |
+
|
729 |
+
# function has double discontinuity at x = [-1, -1]
|
730 |
+
# first component is \/, second component is /\
|
731 |
+
def f(x):
|
732 |
+
return -np.abs(x[0] + 1) + np.abs(x[1] + 1)
|
733 |
+
|
734 |
+
# check that the forward difference is used
|
735 |
+
grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
|
736 |
+
assert_allclose(grad, [-1.0, 1.0])
|
737 |
+
|
738 |
+
# check that the backwards difference is used
|
739 |
+
grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
|
740 |
+
assert_allclose(grad, [1.0, -1.0])
|
741 |
+
|
742 |
+
# check that the forwards difference is used with a step for both
|
743 |
+
# parameters
|
744 |
+
grad = approx_derivative(
|
745 |
+
f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8]
|
746 |
+
)
|
747 |
+
assert_allclose(grad, [-1.0, 1.0])
|
748 |
+
|
749 |
+
# check that we can mix forward/backwards steps.
|
750 |
+
grad = approx_derivative(
|
751 |
+
f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8]
|
752 |
+
)
|
753 |
+
assert_allclose(grad, [-1.0, -1.0])
|
754 |
+
grad = approx_derivative(
|
755 |
+
f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8]
|
756 |
+
)
|
757 |
+
assert_allclose(grad, [1.0, 1.0])
|
758 |
+
|
759 |
+
# the forward step should reverse to a backwards step if it runs into a
|
760 |
+
# bound
|
761 |
+
# This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
|
762 |
+
# function.
|
763 |
+
grad = approx_derivative(
|
764 |
+
f, [-1, -1], method='2-point', abs_step=1e-8,
|
765 |
+
bounds=(-np.inf, -1)
|
766 |
+
)
|
767 |
+
assert_allclose(grad, [1.0, -1.0])
|
768 |
+
|
769 |
+
grad = approx_derivative(
|
770 |
+
f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf)
|
771 |
+
)
|
772 |
+
assert_allclose(grad, [-1.0, 1.0])
|
773 |
+
|
774 |
+
|
775 |
+
def test__compute_absolute_step():
|
776 |
+
# tests calculation of absolute step from rel_step
|
777 |
+
methods = ['2-point', '3-point', 'cs']
|
778 |
+
|
779 |
+
x0 = np.array([1e-5, 0, 1, 1e5])
|
780 |
+
|
781 |
+
EPS = np.finfo(np.float64).eps
|
782 |
+
relative_step = {
|
783 |
+
"2-point": EPS**0.5,
|
784 |
+
"3-point": EPS**(1/3),
|
785 |
+
"cs": EPS**0.5
|
786 |
+
}
|
787 |
+
f0 = np.array(1.0)
|
788 |
+
|
789 |
+
for method in methods:
|
790 |
+
rel_step = relative_step[method]
|
791 |
+
correct_step = np.array([rel_step,
|
792 |
+
rel_step * 1.,
|
793 |
+
rel_step * 1.,
|
794 |
+
rel_step * np.abs(x0[3])])
|
795 |
+
|
796 |
+
abs_step = _compute_absolute_step(None, x0, f0, method)
|
797 |
+
assert_allclose(abs_step, correct_step)
|
798 |
+
|
799 |
+
sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
|
800 |
+
abs_step = _compute_absolute_step(None, -x0, f0, method)
|
801 |
+
assert_allclose(abs_step, sign_x0 * correct_step)
|
802 |
+
|
803 |
+
# if a relative step is provided it should be used
|
804 |
+
rel_step = np.array([0.1, 1, 10, 100])
|
805 |
+
correct_step = np.array([rel_step[0] * x0[0],
|
806 |
+
relative_step['2-point'],
|
807 |
+
rel_step[2] * 1.,
|
808 |
+
rel_step[3] * np.abs(x0[3])])
|
809 |
+
|
810 |
+
abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point')
|
811 |
+
assert_allclose(abs_step, correct_step)
|
812 |
+
|
813 |
+
sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
|
814 |
+
abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point')
|
815 |
+
assert_allclose(abs_step, sign_x0 * correct_step)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit test for Linear Programming via Simplex Algorithm.
|
3 |
+
"""
|
4 |
+
|
5 |
+
# TODO: add tests for:
|
6 |
+
# https://github.com/scipy/scipy/issues/5400
|
7 |
+
# https://github.com/scipy/scipy/issues/6690
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from numpy.testing import (
|
11 |
+
assert_,
|
12 |
+
assert_allclose,
|
13 |
+
assert_equal)
|
14 |
+
|
15 |
+
from .test_linprog import magic_square
|
16 |
+
from scipy.optimize._remove_redundancy import _remove_redundancy_svd
|
17 |
+
from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense
|
18 |
+
from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse
|
19 |
+
from scipy.optimize._remove_redundancy import _remove_redundancy_id
|
20 |
+
|
21 |
+
from scipy.sparse import csc_matrix
|
22 |
+
|
23 |
+
|
24 |
+
def setup_module():
|
25 |
+
np.random.seed(2017)
|
26 |
+
|
27 |
+
|
28 |
+
def redundancy_removed(A, B):
|
29 |
+
"""Checks whether a matrix contains only independent rows of another"""
|
30 |
+
for rowA in A:
|
31 |
+
# `rowA in B` is not a reliable check
|
32 |
+
for rowB in B:
|
33 |
+
if np.all(rowA == rowB):
|
34 |
+
break
|
35 |
+
else:
|
36 |
+
return False
|
37 |
+
return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B)
|
38 |
+
|
39 |
+
|
40 |
+
class RRCommonTests:
|
41 |
+
def test_no_redundancy(self):
|
42 |
+
m, n = 10, 10
|
43 |
+
A0 = np.random.rand(m, n)
|
44 |
+
b0 = np.random.rand(m)
|
45 |
+
A1, b1, status, message = self.rr(A0, b0)
|
46 |
+
assert_allclose(A0, A1)
|
47 |
+
assert_allclose(b0, b1)
|
48 |
+
assert_equal(status, 0)
|
49 |
+
|
50 |
+
def test_infeasible_zero_row(self):
|
51 |
+
A = np.eye(3)
|
52 |
+
A[1, :] = 0
|
53 |
+
b = np.random.rand(3)
|
54 |
+
A1, b1, status, message = self.rr(A, b)
|
55 |
+
assert_equal(status, 2)
|
56 |
+
|
57 |
+
def test_remove_zero_row(self):
|
58 |
+
A = np.eye(3)
|
59 |
+
A[1, :] = 0
|
60 |
+
b = np.random.rand(3)
|
61 |
+
b[1] = 0
|
62 |
+
A1, b1, status, message = self.rr(A, b)
|
63 |
+
assert_equal(status, 0)
|
64 |
+
assert_allclose(A1, A[[0, 2], :])
|
65 |
+
assert_allclose(b1, b[[0, 2]])
|
66 |
+
|
67 |
+
def test_infeasible_m_gt_n(self):
|
68 |
+
m, n = 20, 10
|
69 |
+
A0 = np.random.rand(m, n)
|
70 |
+
b0 = np.random.rand(m)
|
71 |
+
A1, b1, status, message = self.rr(A0, b0)
|
72 |
+
assert_equal(status, 2)
|
73 |
+
|
74 |
+
def test_infeasible_m_eq_n(self):
|
75 |
+
m, n = 10, 10
|
76 |
+
A0 = np.random.rand(m, n)
|
77 |
+
b0 = np.random.rand(m)
|
78 |
+
A0[-1, :] = 2 * A0[-2, :]
|
79 |
+
A1, b1, status, message = self.rr(A0, b0)
|
80 |
+
assert_equal(status, 2)
|
81 |
+
|
82 |
+
def test_infeasible_m_lt_n(self):
|
83 |
+
m, n = 9, 10
|
84 |
+
A0 = np.random.rand(m, n)
|
85 |
+
b0 = np.random.rand(m)
|
86 |
+
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
|
87 |
+
A1, b1, status, message = self.rr(A0, b0)
|
88 |
+
assert_equal(status, 2)
|
89 |
+
|
90 |
+
def test_m_gt_n(self):
|
91 |
+
np.random.seed(2032)
|
92 |
+
m, n = 20, 10
|
93 |
+
A0 = np.random.rand(m, n)
|
94 |
+
b0 = np.random.rand(m)
|
95 |
+
x = np.linalg.solve(A0[:n, :], b0[:n])
|
96 |
+
b0[n:] = A0[n:, :].dot(x)
|
97 |
+
A1, b1, status, message = self.rr(A0, b0)
|
98 |
+
assert_equal(status, 0)
|
99 |
+
assert_equal(A1.shape[0], n)
|
100 |
+
assert_equal(np.linalg.matrix_rank(A1), n)
|
101 |
+
|
102 |
+
def test_m_gt_n_rank_deficient(self):
|
103 |
+
m, n = 20, 10
|
104 |
+
A0 = np.zeros((m, n))
|
105 |
+
A0[:, 0] = 1
|
106 |
+
b0 = np.ones(m)
|
107 |
+
A1, b1, status, message = self.rr(A0, b0)
|
108 |
+
assert_equal(status, 0)
|
109 |
+
assert_allclose(A1, A0[0:1, :])
|
110 |
+
assert_allclose(b1, b0[0])
|
111 |
+
|
112 |
+
def test_m_lt_n_rank_deficient(self):
|
113 |
+
m, n = 9, 10
|
114 |
+
A0 = np.random.rand(m, n)
|
115 |
+
b0 = np.random.rand(m)
|
116 |
+
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
|
117 |
+
b0[-1] = np.arange(m - 1).dot(b0[:-1])
|
118 |
+
A1, b1, status, message = self.rr(A0, b0)
|
119 |
+
assert_equal(status, 0)
|
120 |
+
assert_equal(A1.shape[0], 8)
|
121 |
+
assert_equal(np.linalg.matrix_rank(A1), 8)
|
122 |
+
|
123 |
+
def test_dense1(self):
|
124 |
+
A = np.ones((6, 6))
|
125 |
+
A[0, :3] = 0
|
126 |
+
A[1, 3:] = 0
|
127 |
+
A[3:, ::2] = -1
|
128 |
+
A[3, :2] = 0
|
129 |
+
A[4, 2:] = 0
|
130 |
+
b = np.zeros(A.shape[0])
|
131 |
+
|
132 |
+
A1, b1, status, message = self.rr(A, b)
|
133 |
+
assert_(redundancy_removed(A1, A))
|
134 |
+
assert_equal(status, 0)
|
135 |
+
|
136 |
+
def test_dense2(self):
|
137 |
+
A = np.eye(6)
|
138 |
+
A[-2, -1] = 1
|
139 |
+
A[-1, :] = 1
|
140 |
+
b = np.zeros(A.shape[0])
|
141 |
+
A1, b1, status, message = self.rr(A, b)
|
142 |
+
assert_(redundancy_removed(A1, A))
|
143 |
+
assert_equal(status, 0)
|
144 |
+
|
145 |
+
def test_dense3(self):
|
146 |
+
A = np.eye(6)
|
147 |
+
A[-2, -1] = 1
|
148 |
+
A[-1, :] = 1
|
149 |
+
b = np.random.rand(A.shape[0])
|
150 |
+
b[-1] = np.sum(b[:-1])
|
151 |
+
A1, b1, status, message = self.rr(A, b)
|
152 |
+
assert_(redundancy_removed(A1, A))
|
153 |
+
assert_equal(status, 0)
|
154 |
+
|
155 |
+
def test_m_gt_n_sparse(self):
|
156 |
+
np.random.seed(2013)
|
157 |
+
m, n = 20, 5
|
158 |
+
p = 0.1
|
159 |
+
A = np.random.rand(m, n)
|
160 |
+
A[np.random.rand(m, n) > p] = 0
|
161 |
+
rank = np.linalg.matrix_rank(A)
|
162 |
+
b = np.zeros(A.shape[0])
|
163 |
+
A1, b1, status, message = self.rr(A, b)
|
164 |
+
assert_equal(status, 0)
|
165 |
+
assert_equal(A1.shape[0], rank)
|
166 |
+
assert_equal(np.linalg.matrix_rank(A1), rank)
|
167 |
+
|
168 |
+
def test_m_lt_n_sparse(self):
|
169 |
+
np.random.seed(2017)
|
170 |
+
m, n = 20, 50
|
171 |
+
p = 0.05
|
172 |
+
A = np.random.rand(m, n)
|
173 |
+
A[np.random.rand(m, n) > p] = 0
|
174 |
+
rank = np.linalg.matrix_rank(A)
|
175 |
+
b = np.zeros(A.shape[0])
|
176 |
+
A1, b1, status, message = self.rr(A, b)
|
177 |
+
assert_equal(status, 0)
|
178 |
+
assert_equal(A1.shape[0], rank)
|
179 |
+
assert_equal(np.linalg.matrix_rank(A1), rank)
|
180 |
+
|
181 |
+
def test_m_eq_n_sparse(self):
|
182 |
+
np.random.seed(2017)
|
183 |
+
m, n = 100, 100
|
184 |
+
p = 0.01
|
185 |
+
A = np.random.rand(m, n)
|
186 |
+
A[np.random.rand(m, n) > p] = 0
|
187 |
+
rank = np.linalg.matrix_rank(A)
|
188 |
+
b = np.zeros(A.shape[0])
|
189 |
+
A1, b1, status, message = self.rr(A, b)
|
190 |
+
assert_equal(status, 0)
|
191 |
+
assert_equal(A1.shape[0], rank)
|
192 |
+
assert_equal(np.linalg.matrix_rank(A1), rank)
|
193 |
+
|
194 |
+
def test_magic_square(self):
|
195 |
+
A, b, c, numbers, _ = magic_square(3)
|
196 |
+
A1, b1, status, message = self.rr(A, b)
|
197 |
+
assert_equal(status, 0)
|
198 |
+
assert_equal(A1.shape[0], 23)
|
199 |
+
assert_equal(np.linalg.matrix_rank(A1), 23)
|
200 |
+
|
201 |
+
def test_magic_square2(self):
|
202 |
+
A, b, c, numbers, _ = magic_square(4)
|
203 |
+
A1, b1, status, message = self.rr(A, b)
|
204 |
+
assert_equal(status, 0)
|
205 |
+
assert_equal(A1.shape[0], 39)
|
206 |
+
assert_equal(np.linalg.matrix_rank(A1), 39)
|
207 |
+
|
208 |
+
|
209 |
+
class TestRRSVD(RRCommonTests):
|
210 |
+
def rr(self, A, b):
|
211 |
+
return _remove_redundancy_svd(A, b)
|
212 |
+
|
213 |
+
|
214 |
+
class TestRRPivotDense(RRCommonTests):
|
215 |
+
def rr(self, A, b):
|
216 |
+
return _remove_redundancy_pivot_dense(A, b)
|
217 |
+
|
218 |
+
|
219 |
+
class TestRRID(RRCommonTests):
|
220 |
+
def rr(self, A, b):
|
221 |
+
return _remove_redundancy_id(A, b)
|
222 |
+
|
223 |
+
|
224 |
+
class TestRRPivotSparse(RRCommonTests):
|
225 |
+
def rr(self, A, b):
|
226 |
+
rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b)
|
227 |
+
A1, b1, status, message = rr_res
|
228 |
+
return A1.toarray(), b1, status, message
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for optimization routines from _root.py.
|
3 |
+
"""
|
4 |
+
from numpy.testing import assert_, assert_equal
|
5 |
+
import pytest
|
6 |
+
from pytest import raises as assert_raises, warns as assert_warns
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from scipy.optimize import root
|
10 |
+
|
11 |
+
|
12 |
+
class TestRoot:
|
13 |
+
def test_tol_parameter(self):
|
14 |
+
# Check that the minimize() tol= argument does something
|
15 |
+
def func(z):
|
16 |
+
x, y = z
|
17 |
+
return np.array([x**3 - 1, y**3 - 1])
|
18 |
+
|
19 |
+
def dfunc(z):
|
20 |
+
x, y = z
|
21 |
+
return np.array([[3*x**2, 0], [0, 3*y**2]])
|
22 |
+
|
23 |
+
for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
|
24 |
+
'diagbroyden', 'krylov']:
|
25 |
+
if method in ('linearmixing', 'excitingmixing'):
|
26 |
+
# doesn't converge
|
27 |
+
continue
|
28 |
+
|
29 |
+
if method in ('hybr', 'lm'):
|
30 |
+
jac = dfunc
|
31 |
+
else:
|
32 |
+
jac = None
|
33 |
+
|
34 |
+
sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
|
35 |
+
sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
|
36 |
+
msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
|
37 |
+
assert_(sol1.success, msg)
|
38 |
+
assert_(sol2.success, msg)
|
39 |
+
assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
|
40 |
+
msg)
|
41 |
+
|
42 |
+
def test_tol_norm(self):
|
43 |
+
|
44 |
+
def norm(x):
|
45 |
+
return abs(x[0])
|
46 |
+
|
47 |
+
for method in ['excitingmixing',
|
48 |
+
'diagbroyden',
|
49 |
+
'linearmixing',
|
50 |
+
'anderson',
|
51 |
+
'broyden1',
|
52 |
+
'broyden2',
|
53 |
+
'krylov']:
|
54 |
+
|
55 |
+
root(np.zeros_like, np.zeros(2), method=method,
|
56 |
+
options={"tol_norm": norm})
|
57 |
+
|
58 |
+
def test_minimize_scalar_coerce_args_param(self):
|
59 |
+
# github issue #3503
|
60 |
+
def func(z, f=1):
|
61 |
+
x, y = z
|
62 |
+
return np.array([x**3 - 1, y**3 - f])
|
63 |
+
root(func, [1.1, 1.1], args=1.5)
|
64 |
+
|
65 |
+
def test_f_size(self):
|
66 |
+
# gh8320
|
67 |
+
# check that decreasing the size of the returned array raises an error
|
68 |
+
# and doesn't segfault
|
69 |
+
class fun:
|
70 |
+
def __init__(self):
|
71 |
+
self.count = 0
|
72 |
+
|
73 |
+
def __call__(self, x):
|
74 |
+
self.count += 1
|
75 |
+
|
76 |
+
if not (self.count % 5):
|
77 |
+
ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
|
78 |
+
else:
|
79 |
+
ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
|
80 |
+
0.5 * (x[1] - x[0]) ** 3 + x[1]])
|
81 |
+
|
82 |
+
return ret
|
83 |
+
|
84 |
+
F = fun()
|
85 |
+
with assert_raises(ValueError):
|
86 |
+
root(F, [0.1, 0.0], method='lm')
|
87 |
+
|
88 |
+
def test_gh_10370(self):
|
89 |
+
# gh-10370 reported that passing both `args` and `jac` to `root` with
|
90 |
+
# `method='krylov'` caused a failure. Ensure that this is fixed whether
|
91 |
+
# the gradient is passed via `jac` or as a second output of `fun`.
|
92 |
+
def fun(x, ignored):
|
93 |
+
return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2]
|
94 |
+
|
95 |
+
def grad(x, ignored):
|
96 |
+
return [[3, 0.5 * x[1]], [0.2 * x[0], 5]]
|
97 |
+
|
98 |
+
def fun_grad(x, ignored):
|
99 |
+
return fun(x, ignored), grad(x, ignored)
|
100 |
+
|
101 |
+
x0 = np.zeros(2)
|
102 |
+
|
103 |
+
ref = root(fun, x0, args=(1,), method='krylov')
|
104 |
+
message = 'Method krylov does not use the jacobian'
|
105 |
+
with assert_warns(RuntimeWarning, match=message):
|
106 |
+
res1 = root(fun, x0, args=(1,), method='krylov', jac=grad)
|
107 |
+
with assert_warns(RuntimeWarning, match=message):
|
108 |
+
res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True)
|
109 |
+
|
110 |
+
assert_equal(res1.x, ref.x)
|
111 |
+
assert_equal(res2.x, ref.x)
|
112 |
+
assert res1.success is res2.success is ref.success is True
|
113 |
+
|
114 |
+
@pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2",
|
115 |
+
"anderson", "linearmixing",
|
116 |
+
"diagbroyden", "excitingmixing",
|
117 |
+
"krylov", "df-sane"])
|
118 |
+
def test_method_in_result(self, method):
|
119 |
+
def func(x):
|
120 |
+
return x - 1
|
121 |
+
|
122 |
+
res = root(func, x0=[1], method=method)
|
123 |
+
assert res.method == method
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py
ADDED
@@ -0,0 +1,1159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import numpy
|
5 |
+
import numpy as np
|
6 |
+
import time
|
7 |
+
from multiprocessing import Pool
|
8 |
+
from numpy.testing import assert_allclose, IS_PYPY
|
9 |
+
import pytest
|
10 |
+
from pytest import raises as assert_raises, warns
|
11 |
+
from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen,
|
12 |
+
rosen_der, rosen_hess, NonlinearConstraint)
|
13 |
+
from scipy.optimize._constraints import new_constraint_to_old
|
14 |
+
from scipy.optimize._shgo import SHGO
|
15 |
+
|
16 |
+
|
17 |
+
class StructTestFunction:
|
18 |
+
def __init__(self, bounds, expected_x, expected_fun=None,
|
19 |
+
expected_xl=None, expected_funl=None):
|
20 |
+
self.bounds = bounds
|
21 |
+
self.expected_x = expected_x
|
22 |
+
self.expected_fun = expected_fun
|
23 |
+
self.expected_xl = expected_xl
|
24 |
+
self.expected_funl = expected_funl
|
25 |
+
|
26 |
+
|
27 |
+
def wrap_constraints(g):
|
28 |
+
cons = []
|
29 |
+
if g is not None:
|
30 |
+
if not isinstance(g, (tuple, list)):
|
31 |
+
g = (g,)
|
32 |
+
else:
|
33 |
+
pass
|
34 |
+
for g in g:
|
35 |
+
cons.append({'type': 'ineq',
|
36 |
+
'fun': g})
|
37 |
+
cons = tuple(cons)
|
38 |
+
else:
|
39 |
+
cons = None
|
40 |
+
return cons
|
41 |
+
|
42 |
+
|
43 |
+
class StructTest1(StructTestFunction):
|
44 |
+
def f(self, x):
|
45 |
+
return x[0] ** 2 + x[1] ** 2
|
46 |
+
|
47 |
+
def g(x):
|
48 |
+
return -(numpy.sum(x, axis=0) - 6.0)
|
49 |
+
|
50 |
+
cons = wrap_constraints(g)
|
51 |
+
|
52 |
+
|
53 |
+
test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)],
|
54 |
+
expected_x=[0, 0])
|
55 |
+
test1_2 = StructTest1(bounds=[(0, 1), (0, 1)],
|
56 |
+
expected_x=[0, 0])
|
57 |
+
test1_3 = StructTest1(bounds=[(None, None), (None, None)],
|
58 |
+
expected_x=[0, 0])
|
59 |
+
|
60 |
+
|
61 |
+
class StructTest2(StructTestFunction):
|
62 |
+
"""
|
63 |
+
Scalar function with several minima to test all minimiser retrievals
|
64 |
+
"""
|
65 |
+
|
66 |
+
def f(self, x):
|
67 |
+
return (x - 30) * numpy.sin(x)
|
68 |
+
|
69 |
+
def g(x):
|
70 |
+
return 58 - numpy.sum(x, axis=0)
|
71 |
+
|
72 |
+
cons = wrap_constraints(g)
|
73 |
+
|
74 |
+
|
75 |
+
test2_1 = StructTest2(bounds=[(0, 60)],
|
76 |
+
expected_x=[1.53567906],
|
77 |
+
expected_fun=-28.44677132,
|
78 |
+
# Important: test that funl return is in the correct
|
79 |
+
# order
|
80 |
+
expected_xl=numpy.array([[1.53567906],
|
81 |
+
[55.01782167],
|
82 |
+
[7.80894889],
|
83 |
+
[48.74797493],
|
84 |
+
[14.07445705],
|
85 |
+
[42.4913859],
|
86 |
+
[20.31743841],
|
87 |
+
[36.28607535],
|
88 |
+
[26.43039605],
|
89 |
+
[30.76371366]]),
|
90 |
+
|
91 |
+
expected_funl=numpy.array([-28.44677132, -24.99785984,
|
92 |
+
-22.16855376, -18.72136195,
|
93 |
+
-15.89423937, -12.45154942,
|
94 |
+
-9.63133158, -6.20801301,
|
95 |
+
-3.43727232, -0.46353338])
|
96 |
+
)
|
97 |
+
|
98 |
+
test2_2 = StructTest2(bounds=[(0, 4.5)],
|
99 |
+
expected_x=[1.53567906],
|
100 |
+
expected_fun=[-28.44677132],
|
101 |
+
expected_xl=numpy.array([[1.53567906]]),
|
102 |
+
expected_funl=numpy.array([-28.44677132])
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
class StructTest3(StructTestFunction):
|
107 |
+
"""
|
108 |
+
Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981)
|
109 |
+
http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
|
110 |
+
Minimize: f = 0.01 * (x_1)**2 + (x_2)**2
|
111 |
+
|
112 |
+
Subject to: x_1 * x_2 - 25.0 >= 0,
|
113 |
+
(x_1)**2 + (x_2)**2 - 25.0 >= 0,
|
114 |
+
2 <= x_1 <= 50,
|
115 |
+
0 <= x_2 <= 50.
|
116 |
+
|
117 |
+
Approx. Answer:
|
118 |
+
f([(250)**0.5 , (2.5)**0.5]) = 5.0
|
119 |
+
|
120 |
+
|
121 |
+
"""
|
122 |
+
|
123 |
+
# amended to test vectorisation of constraints
|
124 |
+
def f(self, x):
|
125 |
+
return 0.01 * (x[0]) ** 2 + (x[1]) ** 2
|
126 |
+
|
127 |
+
def g1(x):
|
128 |
+
return x[0] * x[1] - 25.0
|
129 |
+
|
130 |
+
def g2(x):
|
131 |
+
return x[0] ** 2 + x[1] ** 2 - 25.0
|
132 |
+
|
133 |
+
# g = (g1, g2)
|
134 |
+
# cons = wrap_constraints(g)
|
135 |
+
|
136 |
+
def g(x):
|
137 |
+
return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0
|
138 |
+
|
139 |
+
# this checks that shgo can be sent new-style constraints
|
140 |
+
__nlc = NonlinearConstraint(g, 0, np.inf)
|
141 |
+
cons = (__nlc,)
|
142 |
+
|
143 |
+
test3_1 = StructTest3(bounds=[(2, 50), (0, 50)],
|
144 |
+
expected_x=[250 ** 0.5, 2.5 ** 0.5],
|
145 |
+
expected_fun=5.0
|
146 |
+
)
|
147 |
+
|
148 |
+
|
149 |
+
class StructTest4(StructTestFunction):
|
150 |
+
"""
|
151 |
+
Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981)
|
152 |
+
|
153 |
+
NOTE: Did not find in original reference to HS collection, refer to
|
154 |
+
Henderson (2015) problem 7 instead. 02.03.2016
|
155 |
+
"""
|
156 |
+
|
157 |
+
def f(self, x):
|
158 |
+
return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4
|
159 |
+
+ 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[
|
160 |
+
6] ** 4
|
161 |
+
- 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6]
|
162 |
+
)
|
163 |
+
|
164 |
+
def g1(x):
|
165 |
+
return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2
|
166 |
+
+ 5 * x[4] - 127)
|
167 |
+
|
168 |
+
def g2(x):
|
169 |
+
return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0)
|
170 |
+
|
171 |
+
def g3(x):
|
172 |
+
return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196)
|
173 |
+
|
174 |
+
def g4(x):
|
175 |
+
return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2
|
176 |
+
+ 5 * x[5] - 11 * x[6])
|
177 |
+
|
178 |
+
g = (g1, g2, g3, g4)
|
179 |
+
|
180 |
+
cons = wrap_constraints(g)
|
181 |
+
|
182 |
+
|
183 |
+
test4_1 = StructTest4(bounds=[(-10, 10), ] * 7,
|
184 |
+
expected_x=[2.330499, 1.951372, -0.4775414,
|
185 |
+
4.365726, -0.6244870, 1.038131, 1.594227],
|
186 |
+
expected_fun=680.6300573
|
187 |
+
)
|
188 |
+
|
189 |
+
|
190 |
+
class StructTest5(StructTestFunction):
|
191 |
+
def f(self, x):
|
192 |
+
return (-(x[1] + 47.0)
|
193 |
+
* numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
|
194 |
+
- x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0))))
|
195 |
+
)
|
196 |
+
|
197 |
+
g = None
|
198 |
+
cons = wrap_constraints(g)
|
199 |
+
|
200 |
+
|
201 |
+
test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
|
202 |
+
expected_fun=[-959.64066272085051],
|
203 |
+
expected_x=[512., 404.23180542])
|
204 |
+
|
205 |
+
|
206 |
+
class StructTestLJ(StructTestFunction):
|
207 |
+
"""
|
208 |
+
LennardJones objective function. Used to test symmetry constraints
|
209 |
+
settings.
|
210 |
+
"""
|
211 |
+
|
212 |
+
def f(self, x, *args):
|
213 |
+
print(f'x = {x}')
|
214 |
+
self.N = args[0]
|
215 |
+
k = int(self.N / 3)
|
216 |
+
s = 0.0
|
217 |
+
|
218 |
+
for i in range(k - 1):
|
219 |
+
for j in range(i + 1, k):
|
220 |
+
a = 3 * i
|
221 |
+
b = 3 * j
|
222 |
+
xd = x[a] - x[b]
|
223 |
+
yd = x[a + 1] - x[b + 1]
|
224 |
+
zd = x[a + 2] - x[b + 2]
|
225 |
+
ed = xd * xd + yd * yd + zd * zd
|
226 |
+
ud = ed * ed * ed
|
227 |
+
if ed > 0.0:
|
228 |
+
s += (1.0 / ud - 2.0) / ud
|
229 |
+
|
230 |
+
return s
|
231 |
+
|
232 |
+
g = None
|
233 |
+
cons = wrap_constraints(g)
|
234 |
+
|
235 |
+
|
236 |
+
N = 6
|
237 |
+
boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
|
238 |
+
|
239 |
+
testLJ = StructTestLJ(bounds=boundsLJ,
|
240 |
+
expected_fun=[-1.0],
|
241 |
+
expected_x=None,
|
242 |
+
# expected_x=[-2.71247337e-08,
|
243 |
+
# -2.71247337e-08,
|
244 |
+
# -2.50000222e+00,
|
245 |
+
# -2.71247337e-08,
|
246 |
+
# -2.71247337e-08,
|
247 |
+
# -1.50000222e+00]
|
248 |
+
)
|
249 |
+
|
250 |
+
|
251 |
+
class StructTestS(StructTestFunction):
|
252 |
+
def f(self, x):
|
253 |
+
return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2
|
254 |
+
+ (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2)
|
255 |
+
|
256 |
+
g = None
|
257 |
+
cons = wrap_constraints(g)
|
258 |
+
|
259 |
+
|
260 |
+
test_s = StructTestS(bounds=[(0, 2.0), ] * 4,
|
261 |
+
expected_fun=0.0,
|
262 |
+
expected_x=numpy.ones(4) - 0.5
|
263 |
+
)
|
264 |
+
|
265 |
+
|
266 |
+
class StructTestTable(StructTestFunction):
|
267 |
+
def f(self, x):
|
268 |
+
if x[0] == 3.0 and x[1] == 3.0:
|
269 |
+
return 50
|
270 |
+
else:
|
271 |
+
return 100
|
272 |
+
|
273 |
+
g = None
|
274 |
+
cons = wrap_constraints(g)
|
275 |
+
|
276 |
+
|
277 |
+
test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
|
278 |
+
expected_fun=[50],
|
279 |
+
expected_x=[3.0, 3.0])
|
280 |
+
|
281 |
+
|
282 |
+
class StructTestInfeasible(StructTestFunction):
|
283 |
+
"""
|
284 |
+
Test function with no feasible domain.
|
285 |
+
"""
|
286 |
+
|
287 |
+
def f(self, x, *args):
|
288 |
+
return x[0] ** 2 + x[1] ** 2
|
289 |
+
|
290 |
+
def g1(x):
|
291 |
+
return x[0] + x[1] - 1
|
292 |
+
|
293 |
+
def g2(x):
|
294 |
+
return -(x[0] + x[1] - 1)
|
295 |
+
|
296 |
+
def g3(x):
|
297 |
+
return -x[0] + x[1] - 1
|
298 |
+
|
299 |
+
def g4(x):
|
300 |
+
return -(-x[0] + x[1] - 1)
|
301 |
+
|
302 |
+
g = (g1, g2, g3, g4)
|
303 |
+
cons = wrap_constraints(g)
|
304 |
+
|
305 |
+
|
306 |
+
test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
|
307 |
+
expected_fun=None,
|
308 |
+
expected_x=None
|
309 |
+
)
|
310 |
+
|
311 |
+
|
312 |
+
@pytest.mark.skip("Not a test")
|
313 |
+
def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
|
314 |
+
callback=None, minimizer_kwargs=None, options=None,
|
315 |
+
sampling_method='sobol', workers=1):
|
316 |
+
res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
|
317 |
+
n=n, iters=iters, callback=callback,
|
318 |
+
minimizer_kwargs=minimizer_kwargs, options=options,
|
319 |
+
sampling_method=sampling_method, workers=workers)
|
320 |
+
|
321 |
+
print(f'res = {res}')
|
322 |
+
logging.info(f'res = {res}')
|
323 |
+
if test.expected_x is not None:
|
324 |
+
numpy.testing.assert_allclose(res.x, test.expected_x,
|
325 |
+
rtol=test_atol,
|
326 |
+
atol=test_atol)
|
327 |
+
|
328 |
+
# (Optional tests)
|
329 |
+
if test.expected_fun is not None:
|
330 |
+
numpy.testing.assert_allclose(res.fun,
|
331 |
+
test.expected_fun,
|
332 |
+
atol=test_atol)
|
333 |
+
|
334 |
+
if test.expected_xl is not None:
|
335 |
+
numpy.testing.assert_allclose(res.xl,
|
336 |
+
test.expected_xl,
|
337 |
+
atol=test_atol)
|
338 |
+
|
339 |
+
if test.expected_funl is not None:
|
340 |
+
numpy.testing.assert_allclose(res.funl,
|
341 |
+
test.expected_funl,
|
342 |
+
atol=test_atol)
|
343 |
+
return
|
344 |
+
|
345 |
+
|
346 |
+
# Base test functions:
|
347 |
+
class TestShgoSobolTestFunctions:
|
348 |
+
"""
|
349 |
+
Global optimisation tests with Sobol sampling:
|
350 |
+
"""
|
351 |
+
|
352 |
+
# Sobol algorithm
|
353 |
+
def test_f1_1_sobol(self):
|
354 |
+
"""Multivariate test function 1:
|
355 |
+
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
|
356 |
+
run_test(test1_1)
|
357 |
+
|
358 |
+
def test_f1_2_sobol(self):
|
359 |
+
"""Multivariate test function 1:
|
360 |
+
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
|
361 |
+
run_test(test1_2)
|
362 |
+
|
363 |
+
def test_f1_3_sobol(self):
|
364 |
+
"""Multivariate test function 1:
|
365 |
+
x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
|
366 |
+
options = {'disp': True}
|
367 |
+
run_test(test1_3, options=options)
|
368 |
+
|
369 |
+
def test_f2_1_sobol(self):
|
370 |
+
"""Univariate test function on
|
371 |
+
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
|
372 |
+
run_test(test2_1)
|
373 |
+
|
374 |
+
def test_f2_2_sobol(self):
|
375 |
+
"""Univariate test function on
|
376 |
+
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
|
377 |
+
run_test(test2_2)
|
378 |
+
|
379 |
+
def test_f3_sobol(self):
|
380 |
+
"""NLP: Hock and Schittkowski problem 18"""
|
381 |
+
run_test(test3_1)
|
382 |
+
|
383 |
+
@pytest.mark.slow
|
384 |
+
def test_f4_sobol(self):
|
385 |
+
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
|
386 |
+
options = {'infty_constraints': False}
|
387 |
+
# run_test(test4_1, n=990, options=options)
|
388 |
+
run_test(test4_1, n=990 * 2, options=options)
|
389 |
+
|
390 |
+
def test_f5_1_sobol(self):
|
391 |
+
"""NLP: Eggholder, multimodal"""
|
392 |
+
# run_test(test5_1, n=30)
|
393 |
+
run_test(test5_1, n=60)
|
394 |
+
|
395 |
+
def test_f5_2_sobol(self):
|
396 |
+
"""NLP: Eggholder, multimodal"""
|
397 |
+
# run_test(test5_1, n=60, iters=5)
|
398 |
+
run_test(test5_1, n=60, iters=5)
|
399 |
+
|
400 |
+
# def test_t911(self):
|
401 |
+
# """1D tabletop function"""
|
402 |
+
# run_test(test11_1)
|
403 |
+
|
404 |
+
|
405 |
+
class TestShgoSimplicialTestFunctions:
|
406 |
+
"""
|
407 |
+
Global optimisation tests with Simplicial sampling:
|
408 |
+
"""
|
409 |
+
|
410 |
+
def test_f1_1_simplicial(self):
|
411 |
+
"""Multivariate test function 1:
|
412 |
+
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
|
413 |
+
run_test(test1_1, n=1, sampling_method='simplicial')
|
414 |
+
|
415 |
+
def test_f1_2_simplicial(self):
|
416 |
+
"""Multivariate test function 1:
|
417 |
+
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
|
418 |
+
run_test(test1_2, n=1, sampling_method='simplicial')
|
419 |
+
|
420 |
+
def test_f1_3_simplicial(self):
|
421 |
+
"""Multivariate test function 1: x[0]**2 + x[1]**2
|
422 |
+
with bounds=[(None, None),(None, None)]"""
|
423 |
+
run_test(test1_3, n=5, sampling_method='simplicial')
|
424 |
+
|
425 |
+
def test_f2_1_simplicial(self):
|
426 |
+
"""Univariate test function on
|
427 |
+
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
|
428 |
+
options = {'minimize_every_iter': False}
|
429 |
+
run_test(test2_1, n=200, iters=7, options=options,
|
430 |
+
sampling_method='simplicial')
|
431 |
+
|
432 |
+
def test_f2_2_simplicial(self):
|
433 |
+
"""Univariate test function on
|
434 |
+
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
|
435 |
+
run_test(test2_2, n=1, sampling_method='simplicial')
|
436 |
+
|
437 |
+
def test_f3_simplicial(self):
|
438 |
+
"""NLP: Hock and Schittkowski problem 18"""
|
439 |
+
run_test(test3_1, n=1, sampling_method='simplicial')
|
440 |
+
|
441 |
+
@pytest.mark.slow
|
442 |
+
def test_f4_simplicial(self):
|
443 |
+
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
|
444 |
+
run_test(test4_1, n=1, sampling_method='simplicial')
|
445 |
+
|
446 |
+
def test_lj_symmetry_old(self):
|
447 |
+
"""LJ: Symmetry-constrained test function"""
|
448 |
+
options = {'symmetry': True,
|
449 |
+
'disp': True}
|
450 |
+
args = (6,) # Number of atoms
|
451 |
+
run_test(testLJ, args=args, n=300,
|
452 |
+
options=options, iters=1,
|
453 |
+
sampling_method='simplicial')
|
454 |
+
|
455 |
+
def test_f5_1_lj_symmetry(self):
|
456 |
+
"""LJ: Symmetry constrained test function"""
|
457 |
+
options = {'symmetry': [0, ] * 6,
|
458 |
+
'disp': True}
|
459 |
+
args = (6,) # No. of atoms
|
460 |
+
|
461 |
+
run_test(testLJ, args=args, n=300,
|
462 |
+
options=options, iters=1,
|
463 |
+
sampling_method='simplicial')
|
464 |
+
|
465 |
+
def test_f5_2_cons_symmetry(self):
|
466 |
+
"""Symmetry constrained test function"""
|
467 |
+
options = {'symmetry': [0, 0],
|
468 |
+
'disp': True}
|
469 |
+
|
470 |
+
run_test(test1_1, n=200,
|
471 |
+
options=options, iters=1,
|
472 |
+
sampling_method='simplicial')
|
473 |
+
|
474 |
+
def test_f5_3_cons_symmetry(self):
|
475 |
+
"""Assymmetrically constrained test function"""
|
476 |
+
options = {'symmetry': [0, 0, 0, 3],
|
477 |
+
'disp': True}
|
478 |
+
|
479 |
+
run_test(test_s, n=10000,
|
480 |
+
options=options,
|
481 |
+
iters=1,
|
482 |
+
sampling_method='simplicial')
|
483 |
+
|
484 |
+
@pytest.mark.skip("Not a test")
|
485 |
+
def test_f0_min_variance(self):
|
486 |
+
"""Return a minimum on a perfectly symmetric problem, based on
|
487 |
+
gh10429"""
|
488 |
+
avg = 0.5 # Given average value of x
|
489 |
+
cons = {'type': 'eq', 'fun': lambda x: numpy.mean(x) - avg}
|
490 |
+
|
491 |
+
# Minimize the variance of x under the given constraint
|
492 |
+
res = shgo(numpy.var, bounds=6 * [(0, 1)], constraints=cons)
|
493 |
+
assert res.success
|
494 |
+
assert_allclose(res.fun, 0, atol=1e-15)
|
495 |
+
assert_allclose(res.x, 0.5)
|
496 |
+
|
497 |
+
@pytest.mark.skip("Not a test")
|
498 |
+
def test_f0_min_variance_1D(self):
|
499 |
+
"""Return a minimum on a perfectly symmetric 1D problem, based on
|
500 |
+
gh10538"""
|
501 |
+
|
502 |
+
def fun(x):
|
503 |
+
return x * (x - 1.0) * (x - 0.5)
|
504 |
+
|
505 |
+
bounds = [(0, 1)]
|
506 |
+
res = shgo(fun, bounds=bounds)
|
507 |
+
ref = minimize_scalar(fun, bounds=bounds[0])
|
508 |
+
assert res.success
|
509 |
+
assert_allclose(res.fun, ref.fun)
|
510 |
+
assert_allclose(res.x, ref.x, rtol=1e-6)
|
511 |
+
|
512 |
+
# Argument test functions
|
513 |
+
class TestShgoArguments:
|
514 |
+
def test_1_1_simpl_iter(self):
|
515 |
+
"""Iterative simplicial sampling on TestFunction 1 (multivariate)"""
|
516 |
+
run_test(test1_2, n=None, iters=2, sampling_method='simplicial')
|
517 |
+
|
518 |
+
def test_1_2_simpl_iter(self):
|
519 |
+
"""Iterative simplicial on TestFunction 2 (univariate)"""
|
520 |
+
options = {'minimize_every_iter': False}
|
521 |
+
run_test(test2_1, n=None, iters=9, options=options,
|
522 |
+
sampling_method='simplicial')
|
523 |
+
|
524 |
+
def test_2_1_sobol_iter(self):
|
525 |
+
"""Iterative Sobol sampling on TestFunction 1 (multivariate)"""
|
526 |
+
run_test(test1_2, n=None, iters=1, sampling_method='sobol')
|
527 |
+
|
528 |
+
def test_2_2_sobol_iter(self):
|
529 |
+
"""Iterative Sobol sampling on TestFunction 2 (univariate)"""
|
530 |
+
res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
|
531 |
+
n=None, iters=1, sampling_method='sobol')
|
532 |
+
|
533 |
+
numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
|
534 |
+
atol=1e-5)
|
535 |
+
numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
|
536 |
+
|
537 |
+
def test_3_1_disp_simplicial(self):
|
538 |
+
"""Iterative sampling on TestFunction 1 and 2 (multi and univariate)
|
539 |
+
"""
|
540 |
+
|
541 |
+
def callback_func(x):
|
542 |
+
print("Local minimization callback test")
|
543 |
+
|
544 |
+
for test in [test1_1, test2_1]:
|
545 |
+
shgo(test.f, test.bounds, iters=1,
|
546 |
+
sampling_method='simplicial',
|
547 |
+
callback=callback_func, options={'disp': True})
|
548 |
+
shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
|
549 |
+
callback=callback_func, options={'disp': True})
|
550 |
+
|
551 |
+
def test_3_2_disp_sobol(self):
|
552 |
+
"""Iterative sampling on TestFunction 1 and 2 (multi and univariate)"""
|
553 |
+
|
554 |
+
def callback_func(x):
|
555 |
+
print("Local minimization callback test")
|
556 |
+
|
557 |
+
for test in [test1_1, test2_1]:
|
558 |
+
shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
|
559 |
+
callback=callback_func, options={'disp': True})
|
560 |
+
|
561 |
+
shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
|
562 |
+
callback=callback_func, options={'disp': True})
|
563 |
+
|
564 |
+
def test_args_gh14589(self):
|
565 |
+
"""Using `args` used to cause `shgo` to fail; see #14589, #15986,
|
566 |
+
#16506"""
|
567 |
+
res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2)
|
568 |
+
)
|
569 |
+
ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)])
|
570 |
+
assert_allclose(res.fun, ref.fun)
|
571 |
+
assert_allclose(res.x, ref.x)
|
572 |
+
|
573 |
+
@pytest.mark.slow
|
574 |
+
def test_4_1_known_f_min(self):
|
575 |
+
"""Test known function minima stopping criteria"""
|
576 |
+
# Specify known function value
|
577 |
+
options = {'f_min': test4_1.expected_fun,
|
578 |
+
'f_tol': 1e-6,
|
579 |
+
'minimize_every_iter': True}
|
580 |
+
# TODO: Make default n higher for faster tests
|
581 |
+
run_test(test4_1, n=None, test_atol=1e-5, options=options,
|
582 |
+
sampling_method='simplicial')
|
583 |
+
|
584 |
+
@pytest.mark.slow
|
585 |
+
def test_4_2_known_f_min(self):
|
586 |
+
"""Test Global mode limiting local evaluations"""
|
587 |
+
options = { # Specify known function value
|
588 |
+
'f_min': test4_1.expected_fun,
|
589 |
+
'f_tol': 1e-6,
|
590 |
+
# Specify number of local iterations to perform
|
591 |
+
'minimize_every_iter': True,
|
592 |
+
'local_iter': 1}
|
593 |
+
|
594 |
+
run_test(test4_1, n=None, test_atol=1e-5, options=options,
|
595 |
+
sampling_method='simplicial')
|
596 |
+
|
597 |
+
def test_4_4_known_f_min(self):
|
598 |
+
"""Test Global mode limiting local evaluations for 1D funcs"""
|
599 |
+
options = { # Specify known function value
|
600 |
+
'f_min': test2_1.expected_fun,
|
601 |
+
'f_tol': 1e-6,
|
602 |
+
# Specify number of local iterations to perform+
|
603 |
+
'minimize_every_iter': True,
|
604 |
+
'local_iter': 1,
|
605 |
+
'infty_constraints': False}
|
606 |
+
|
607 |
+
res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
|
608 |
+
n=None, iters=None, options=options,
|
609 |
+
sampling_method='sobol')
|
610 |
+
numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
|
611 |
+
atol=1e-5)
|
612 |
+
|
613 |
+
def test_5_1_simplicial_argless(self):
|
614 |
+
"""Test Default simplicial sampling settings on TestFunction 1"""
|
615 |
+
res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
|
616 |
+
numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
|
617 |
+
atol=1e-5)
|
618 |
+
|
619 |
+
def test_5_2_sobol_argless(self):
|
620 |
+
"""Test Default sobol sampling settings on TestFunction 1"""
|
621 |
+
res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
|
622 |
+
sampling_method='sobol')
|
623 |
+
numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
|
624 |
+
atol=1e-5)
|
625 |
+
|
626 |
+
def test_6_1_simplicial_max_iter(self):
|
627 |
+
"""Test that maximum iteration option works on TestFunction 3"""
|
628 |
+
options = {'max_iter': 2}
|
629 |
+
res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
|
630 |
+
options=options, sampling_method='simplicial')
|
631 |
+
numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
|
632 |
+
atol=1e-5)
|
633 |
+
numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
|
634 |
+
|
635 |
+
def test_6_2_simplicial_min_iter(self):
|
636 |
+
"""Test that maximum iteration option works on TestFunction 3"""
|
637 |
+
options = {'min_iter': 2}
|
638 |
+
res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
|
639 |
+
options=options, sampling_method='simplicial')
|
640 |
+
numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
|
641 |
+
atol=1e-5)
|
642 |
+
numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
|
643 |
+
|
644 |
+
def test_7_1_minkwargs(self):
|
645 |
+
"""Test the minimizer_kwargs arguments for solvers with constraints"""
|
646 |
+
# Test solvers
|
647 |
+
for solver in ['COBYLA', 'SLSQP']:
|
648 |
+
# Note that passing global constraints to SLSQP is tested in other
|
649 |
+
# unittests which run test4_1 normally
|
650 |
+
minimizer_kwargs = {'method': solver,
|
651 |
+
'constraints': test3_1.cons}
|
652 |
+
run_test(test3_1, n=100, test_atol=1e-3,
|
653 |
+
minimizer_kwargs=minimizer_kwargs,
|
654 |
+
sampling_method='sobol')
|
655 |
+
|
656 |
+
def test_7_2_minkwargs(self):
|
657 |
+
"""Test the minimizer_kwargs default inits"""
|
658 |
+
minimizer_kwargs = {'ftol': 1e-5}
|
659 |
+
options = {'disp': True} # For coverage purposes
|
660 |
+
SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0],
|
661 |
+
minimizer_kwargs=minimizer_kwargs, options=options)
|
662 |
+
|
663 |
+
def test_7_3_minkwargs(self):
|
664 |
+
"""Test minimizer_kwargs arguments for solvers without constraints"""
|
665 |
+
for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
|
666 |
+
'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact',
|
667 |
+
'trust-krylov']:
|
668 |
+
def jac(x):
|
669 |
+
return numpy.array([2 * x[0], 2 * x[1]]).T
|
670 |
+
|
671 |
+
def hess(x):
|
672 |
+
return numpy.array([[2, 0], [0, 2]])
|
673 |
+
|
674 |
+
minimizer_kwargs = {'method': solver,
|
675 |
+
'jac': jac,
|
676 |
+
'hess': hess}
|
677 |
+
logging.info(f"Solver = {solver}")
|
678 |
+
logging.info("=" * 100)
|
679 |
+
run_test(test1_1, n=100, test_atol=1e-3,
|
680 |
+
minimizer_kwargs=minimizer_kwargs,
|
681 |
+
sampling_method='sobol')
|
682 |
+
|
683 |
+
def test_8_homology_group_diff(self):
|
684 |
+
options = {'minhgrd': 1,
|
685 |
+
'minimize_every_iter': True}
|
686 |
+
|
687 |
+
run_test(test1_1, n=None, iters=None, options=options,
|
688 |
+
sampling_method='simplicial')
|
689 |
+
|
690 |
+
def test_9_cons_g(self):
|
691 |
+
"""Test single function constraint passing"""
|
692 |
+
SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0])
|
693 |
+
|
694 |
+
@pytest.mark.xfail(IS_PYPY and sys.platform == 'win32',
|
695 |
+
reason="Failing and fix in PyPy not planned (see gh-18632)")
|
696 |
+
def test_10_finite_time(self):
|
697 |
+
"""Test single function constraint passing"""
|
698 |
+
options = {'maxtime': 1e-15}
|
699 |
+
|
700 |
+
def f(x):
|
701 |
+
time.sleep(1e-14)
|
702 |
+
return 0.0
|
703 |
+
|
704 |
+
res = shgo(f, test1_1.bounds, iters=5, options=options)
|
705 |
+
# Assert that only 1 rather than 5 requested iterations ran:
|
706 |
+
assert res.nit == 1
|
707 |
+
|
708 |
+
def test_11_f_min_0(self):
|
709 |
+
"""Test to cover the case where f_lowest == 0"""
|
710 |
+
options = {'f_min': 0.0,
|
711 |
+
'disp': True}
|
712 |
+
res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None,
|
713 |
+
options=options, sampling_method='sobol')
|
714 |
+
numpy.testing.assert_equal(0, res.x[0])
|
715 |
+
numpy.testing.assert_equal(0, res.x[1])
|
716 |
+
|
717 |
+
# @nottest
|
718 |
+
@pytest.mark.skip(reason="no way of currently testing this")
|
719 |
+
def test_12_sobol_inf_cons(self):
|
720 |
+
"""Test to cover the case where f_lowest == 0"""
|
721 |
+
# TODO: This test doesn't cover anything new, it is unknown what the
|
722 |
+
# original test was intended for as it was never complete. Delete or
|
723 |
+
# replace in the future.
|
724 |
+
options = {'maxtime': 1e-15,
|
725 |
+
'f_min': 0.0}
|
726 |
+
res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
|
727 |
+
options=options, sampling_method='sobol')
|
728 |
+
numpy.testing.assert_equal(0.0, res.fun)
|
729 |
+
|
730 |
+
def test_13_high_sobol(self):
|
731 |
+
"""Test init of high-dimensional sobol sequences"""
|
732 |
+
|
733 |
+
def f(x):
|
734 |
+
return 0
|
735 |
+
|
736 |
+
bounds = [(None, None), ] * 41
|
737 |
+
SHGOc = SHGO(f, bounds, sampling_method='sobol')
|
738 |
+
# SHGOc.sobol_points(2, 50)
|
739 |
+
SHGOc.sampling_function(2, 50)
|
740 |
+
|
741 |
+
def test_14_local_iter(self):
|
742 |
+
"""Test limited local iterations for a pseudo-global mode"""
|
743 |
+
options = {'local_iter': 4}
|
744 |
+
run_test(test5_1, n=60, options=options)
|
745 |
+
|
746 |
+
def test_15_min_every_iter(self):
|
747 |
+
"""Test minimize every iter options and cover function cache"""
|
748 |
+
options = {'minimize_every_iter': True}
|
749 |
+
run_test(test1_1, n=1, iters=7, options=options,
|
750 |
+
sampling_method='sobol')
|
751 |
+
|
752 |
+
def test_16_disp_bounds_minimizer(self, capsys):
|
753 |
+
"""Test disp=True with minimizers that do not support bounds """
|
754 |
+
options = {'disp': True}
|
755 |
+
minimizer_kwargs = {'method': 'nelder-mead'}
|
756 |
+
run_test(test1_2, sampling_method='simplicial',
|
757 |
+
options=options, minimizer_kwargs=minimizer_kwargs)
|
758 |
+
|
759 |
+
def test_17_custom_sampling(self):
|
760 |
+
"""Test the functionality to add custom sampling methods to shgo"""
|
761 |
+
|
762 |
+
def sample(n, d):
|
763 |
+
return numpy.random.uniform(size=(n, d))
|
764 |
+
|
765 |
+
run_test(test1_1, n=30, sampling_method=sample)
|
766 |
+
|
767 |
+
def test_18_bounds_class(self):
|
768 |
+
# test that new and old bounds yield same result
|
769 |
+
def f(x):
|
770 |
+
return numpy.square(x).sum()
|
771 |
+
|
772 |
+
lb = [-6., 1., -5.]
|
773 |
+
ub = [-1., 3., 5.]
|
774 |
+
bounds_old = list(zip(lb, ub))
|
775 |
+
bounds_new = Bounds(lb, ub)
|
776 |
+
|
777 |
+
res_old_bounds = shgo(f, bounds_old)
|
778 |
+
res_new_bounds = shgo(f, bounds_new)
|
779 |
+
|
780 |
+
assert res_new_bounds.nfev == res_old_bounds.nfev
|
781 |
+
assert res_new_bounds.message == res_old_bounds.message
|
782 |
+
assert res_new_bounds.success == res_old_bounds.success
|
783 |
+
x_opt = numpy.array([-1., 1., 0.])
|
784 |
+
numpy.testing.assert_allclose(res_new_bounds.x, x_opt)
|
785 |
+
numpy.testing.assert_allclose(res_new_bounds.x,
|
786 |
+
res_old_bounds.x)
|
787 |
+
|
788 |
+
def test_19_parallelization(self):
|
789 |
+
"""Test the functionality to add custom sampling methods to shgo"""
|
790 |
+
|
791 |
+
with Pool(2) as p:
|
792 |
+
run_test(test1_1, n=30, workers=p.map) # Constrained
|
793 |
+
run_test(test1_1, n=30, workers=map) # Constrained
|
794 |
+
with Pool(2) as p:
|
795 |
+
run_test(test_s, n=30, workers=p.map) # Unconstrained
|
796 |
+
run_test(test_s, n=30, workers=map) # Unconstrained
|
797 |
+
|
798 |
+
def test_20_constrained_args(self):
|
799 |
+
"""Test that constraints can be passed to arguments"""
|
800 |
+
|
801 |
+
def eggholder(x):
|
802 |
+
return (-(x[1] + 47.0)
|
803 |
+
* numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
|
804 |
+
- x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0))))
|
805 |
+
)
|
806 |
+
|
807 |
+
def f(x): # (cattle-feed)
|
808 |
+
return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3]
|
809 |
+
|
810 |
+
bounds = [(0, 1.0), ] * 4
|
811 |
+
|
812 |
+
def g1_modified(x, i):
|
813 |
+
return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[
|
814 |
+
3] - 5 # >=0
|
815 |
+
|
816 |
+
def g2(x):
|
817 |
+
return (12 * x[0] + 11.9 * x[1] + 41.8 * x[2] + 52.1 * x[3] - 21
|
818 |
+
- 1.645 * numpy.sqrt(0.28 * x[0] ** 2 + 0.19 * x[1] ** 2
|
819 |
+
+ 20.5 * x[2] ** 2 + 0.62 * x[3] ** 2)
|
820 |
+
) # >=0
|
821 |
+
|
822 |
+
def h1(x):
|
823 |
+
return x[0] + x[1] + x[2] + x[3] - 1 # == 0
|
824 |
+
|
825 |
+
cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)},
|
826 |
+
{'type': 'ineq', 'fun': g2},
|
827 |
+
{'type': 'eq', 'fun': h1})
|
828 |
+
|
829 |
+
shgo(f, bounds, n=300, iters=1, constraints=cons)
|
830 |
+
# using constrain with arguments AND sampling method sobol
|
831 |
+
shgo(f, bounds, n=300, iters=1, constraints=cons,
|
832 |
+
sampling_method='sobol')
|
833 |
+
|
834 |
+
def test_21_1_jac_true(self):
|
835 |
+
"""Test that shgo can handle objective functions that return the
|
836 |
+
gradient alongside the objective value. Fixes gh-13547"""
|
837 |
+
# previous
|
838 |
+
def func(x):
|
839 |
+
return numpy.sum(numpy.power(x, 2)), 2 * x
|
840 |
+
|
841 |
+
shgo(
|
842 |
+
func,
|
843 |
+
bounds=[[-1, 1], [1, 2]],
|
844 |
+
n=100, iters=5,
|
845 |
+
sampling_method="sobol",
|
846 |
+
minimizer_kwargs={'method': 'SLSQP', 'jac': True}
|
847 |
+
)
|
848 |
+
|
849 |
+
# new
|
850 |
+
def func(x):
|
851 |
+
return numpy.sum(x ** 2), 2 * x
|
852 |
+
|
853 |
+
bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]]
|
854 |
+
|
855 |
+
res = shgo(func, bounds=bounds, sampling_method="sobol",
|
856 |
+
minimizer_kwargs={'method': 'SLSQP', 'jac': True})
|
857 |
+
ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds,
|
858 |
+
jac=True)
|
859 |
+
assert res.success
|
860 |
+
assert_allclose(res.fun, ref.fun)
|
861 |
+
assert_allclose(res.x, ref.x, atol=1e-15)
|
862 |
+
|
863 |
+
@pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp'])
|
864 |
+
def test_21_2_derivative_options(self, derivative):
|
865 |
+
"""shgo used to raise an error when passing `options` with 'jac'
|
866 |
+
# see gh-12963. check that this is resolved
|
867 |
+
"""
|
868 |
+
|
869 |
+
def objective(x):
|
870 |
+
return 3 * x[0] * x[0] + 2 * x[0] + 5
|
871 |
+
|
872 |
+
def gradient(x):
|
873 |
+
return 6 * x[0] + 2
|
874 |
+
|
875 |
+
def hess(x):
|
876 |
+
return 6
|
877 |
+
|
878 |
+
def hessp(x, p):
|
879 |
+
return 6 * p
|
880 |
+
|
881 |
+
derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp}
|
882 |
+
options = {derivative: derivative_funcs[derivative]}
|
883 |
+
minimizer_kwargs = {'method': 'trust-constr'}
|
884 |
+
|
885 |
+
bounds = [(-100, 100)]
|
886 |
+
res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs,
|
887 |
+
options=options)
|
888 |
+
ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs,
|
889 |
+
**options)
|
890 |
+
|
891 |
+
assert res.success
|
892 |
+
numpy.testing.assert_allclose(res.fun, ref.fun)
|
893 |
+
numpy.testing.assert_allclose(res.x, ref.x)
|
894 |
+
|
895 |
+
def test_21_3_hess_options_rosen(self):
|
896 |
+
"""Ensure the Hessian gets passed correctly to the local minimizer
|
897 |
+
routine. Previous report gh-14533.
|
898 |
+
"""
|
899 |
+
bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)]
|
900 |
+
options = {'jac': rosen_der, 'hess': rosen_hess}
|
901 |
+
minimizer_kwargs = {'method': 'Newton-CG'}
|
902 |
+
res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs,
|
903 |
+
options=options)
|
904 |
+
ref = minimize(rosen, numpy.zeros(5), method='Newton-CG',
|
905 |
+
**options)
|
906 |
+
assert res.success
|
907 |
+
assert_allclose(res.fun, ref.fun)
|
908 |
+
assert_allclose(res.x, ref.x, atol=1e-15)
|
909 |
+
|
910 |
+
def test_21_arg_tuple_sobol(self):
|
911 |
+
"""shgo used to raise an error when passing `args` with Sobol sampling
|
912 |
+
# see gh-12114. check that this is resolved"""
|
913 |
+
|
914 |
+
def fun(x, k):
|
915 |
+
return x[0] ** k
|
916 |
+
|
917 |
+
constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1})
|
918 |
+
|
919 |
+
bounds = [(0, 10)]
|
920 |
+
res = shgo(fun, bounds, args=(1,), constraints=constraints,
|
921 |
+
sampling_method='sobol')
|
922 |
+
ref = minimize(fun, numpy.zeros(1), bounds=bounds, args=(1,),
|
923 |
+
constraints=constraints)
|
924 |
+
assert res.success
|
925 |
+
assert_allclose(res.fun, ref.fun)
|
926 |
+
assert_allclose(res.x, ref.x)
|
927 |
+
|
928 |
+
|
929 |
+
# Failure test functions
|
930 |
+
class TestShgoFailures:
|
931 |
+
def test_1_maxiter(self):
|
932 |
+
"""Test failure on insufficient iterations"""
|
933 |
+
options = {'maxiter': 2}
|
934 |
+
res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None,
|
935 |
+
options=options, sampling_method='sobol')
|
936 |
+
|
937 |
+
numpy.testing.assert_equal(False, res.success)
|
938 |
+
# numpy.testing.assert_equal(4, res.nfev)
|
939 |
+
numpy.testing.assert_equal(4, res.tnev)
|
940 |
+
|
941 |
+
def test_2_sampling(self):
|
942 |
+
"""Rejection of unknown sampling method"""
|
943 |
+
assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds,
|
944 |
+
sampling_method='not_Sobol')
|
945 |
+
|
946 |
+
def test_3_1_no_min_pool_sobol(self):
|
947 |
+
"""Check that the routine stops when no minimiser is found
|
948 |
+
after maximum specified function evaluations"""
|
949 |
+
options = {'maxfev': 10,
|
950 |
+
# 'maxev': 10,
|
951 |
+
'disp': True}
|
952 |
+
res = shgo(test_table.f, test_table.bounds, n=3, options=options,
|
953 |
+
sampling_method='sobol')
|
954 |
+
numpy.testing.assert_equal(False, res.success)
|
955 |
+
# numpy.testing.assert_equal(9, res.nfev)
|
956 |
+
numpy.testing.assert_equal(12, res.nfev)
|
957 |
+
|
958 |
+
def test_3_2_no_min_pool_simplicial(self):
|
959 |
+
"""Check that the routine stops when no minimiser is found
|
960 |
+
after maximum specified sampling evaluations"""
|
961 |
+
options = {'maxev': 10,
|
962 |
+
'disp': True}
|
963 |
+
res = shgo(test_table.f, test_table.bounds, n=3, options=options,
|
964 |
+
sampling_method='simplicial')
|
965 |
+
numpy.testing.assert_equal(False, res.success)
|
966 |
+
|
967 |
+
def test_4_1_bound_err(self):
|
968 |
+
"""Specified bounds ub > lb"""
|
969 |
+
bounds = [(6, 3), (3, 5)]
|
970 |
+
assert_raises(ValueError, shgo, test1_1.f, bounds)
|
971 |
+
|
972 |
+
def test_4_2_bound_err(self):
|
973 |
+
"""Specified bounds are of the form (lb, ub)"""
|
974 |
+
bounds = [(3, 5, 5), (3, 5)]
|
975 |
+
assert_raises(ValueError, shgo, test1_1.f, bounds)
|
976 |
+
|
977 |
+
def test_5_1_1_infeasible_sobol(self):
|
978 |
+
"""Ensures the algorithm terminates on infeasible problems
|
979 |
+
after maxev is exceeded. Use infty constraints option"""
|
980 |
+
options = {'maxev': 100,
|
981 |
+
'disp': True}
|
982 |
+
|
983 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
984 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
985 |
+
sampling_method='sobol')
|
986 |
+
|
987 |
+
numpy.testing.assert_equal(False, res.success)
|
988 |
+
|
989 |
+
def test_5_1_2_infeasible_sobol(self):
|
990 |
+
"""Ensures the algorithm terminates on infeasible problems
|
991 |
+
after maxev is exceeded. Do not use infty constraints option"""
|
992 |
+
options = {'maxev': 100,
|
993 |
+
'disp': True,
|
994 |
+
'infty_constraints': False}
|
995 |
+
|
996 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
997 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
998 |
+
sampling_method='sobol')
|
999 |
+
|
1000 |
+
numpy.testing.assert_equal(False, res.success)
|
1001 |
+
|
1002 |
+
def test_5_2_infeasible_simplicial(self):
|
1003 |
+
"""Ensures the algorithm terminates on infeasible problems
|
1004 |
+
after maxev is exceeded."""
|
1005 |
+
options = {'maxev': 1000,
|
1006 |
+
'disp': False}
|
1007 |
+
|
1008 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
1009 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
1010 |
+
sampling_method='simplicial')
|
1011 |
+
|
1012 |
+
numpy.testing.assert_equal(False, res.success)
|
1013 |
+
|
1014 |
+
def test_6_1_lower_known_f_min(self):
|
1015 |
+
"""Test Global mode limiting local evaluations with f* too high"""
|
1016 |
+
options = { # Specify known function value
|
1017 |
+
'f_min': test2_1.expected_fun + 2.0,
|
1018 |
+
'f_tol': 1e-6,
|
1019 |
+
# Specify number of local iterations to perform+
|
1020 |
+
'minimize_every_iter': True,
|
1021 |
+
'local_iter': 1,
|
1022 |
+
'infty_constraints': False}
|
1023 |
+
args = (test2_1.f, test2_1.bounds)
|
1024 |
+
kwargs = {'constraints': test2_1.cons,
|
1025 |
+
'n': None,
|
1026 |
+
'iters': None,
|
1027 |
+
'options': options,
|
1028 |
+
'sampling_method': 'sobol'
|
1029 |
+
}
|
1030 |
+
warns(UserWarning, shgo, *args, **kwargs)
|
1031 |
+
|
1032 |
+
def test(self):
|
1033 |
+
from scipy.optimize import rosen, shgo
|
1034 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
1035 |
+
|
1036 |
+
def fun(x):
|
1037 |
+
fun.nfev += 1
|
1038 |
+
return rosen(x)
|
1039 |
+
|
1040 |
+
fun.nfev = 0
|
1041 |
+
|
1042 |
+
result = shgo(fun, bounds)
|
1043 |
+
print(result.x, result.fun, fun.nfev) # 50
|
1044 |
+
|
1045 |
+
|
1046 |
+
# Returns
|
1047 |
+
class TestShgoReturns:
|
1048 |
+
def test_1_nfev_simplicial(self):
|
1049 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
1050 |
+
|
1051 |
+
def fun(x):
|
1052 |
+
fun.nfev += 1
|
1053 |
+
return rosen(x)
|
1054 |
+
|
1055 |
+
fun.nfev = 0
|
1056 |
+
|
1057 |
+
result = shgo(fun, bounds)
|
1058 |
+
numpy.testing.assert_equal(fun.nfev, result.nfev)
|
1059 |
+
|
1060 |
+
def test_1_nfev_sobol(self):
|
1061 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
1062 |
+
|
1063 |
+
def fun(x):
|
1064 |
+
fun.nfev += 1
|
1065 |
+
return rosen(x)
|
1066 |
+
|
1067 |
+
fun.nfev = 0
|
1068 |
+
|
1069 |
+
result = shgo(fun, bounds, sampling_method='sobol')
|
1070 |
+
numpy.testing.assert_equal(fun.nfev, result.nfev)
|
1071 |
+
|
1072 |
+
|
1073 |
+
def test_vector_constraint():
|
1074 |
+
# gh15514
|
1075 |
+
def quad(x):
|
1076 |
+
x = np.asarray(x)
|
1077 |
+
return [np.sum(x ** 2)]
|
1078 |
+
|
1079 |
+
nlc = NonlinearConstraint(quad, [2.2], [3])
|
1080 |
+
oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
|
1081 |
+
|
1082 |
+
res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
|
1083 |
+
assert np.all(np.sum((res.x)**2) >= 2.2)
|
1084 |
+
assert np.all(np.sum((res.x) ** 2) <= 3.0)
|
1085 |
+
assert res.success
|
1086 |
+
|
1087 |
+
|
1088 |
+
@pytest.mark.filterwarnings("ignore:delta_grad")
|
1089 |
+
def test_trust_constr():
|
1090 |
+
def quad(x):
|
1091 |
+
x = np.asarray(x)
|
1092 |
+
return [np.sum(x ** 2)]
|
1093 |
+
|
1094 |
+
nlc = NonlinearConstraint(quad, [2.6], [3])
|
1095 |
+
minimizer_kwargs = {'method': 'trust-constr'}
|
1096 |
+
# note that we don't supply the constraints in minimizer_kwargs,
|
1097 |
+
# so if the final result obeys the constraints we know that shgo
|
1098 |
+
# passed them on to 'trust-constr'
|
1099 |
+
res = shgo(
|
1100 |
+
rosen,
|
1101 |
+
[(0, 10), (0, 10)],
|
1102 |
+
constraints=nlc,
|
1103 |
+
sampling_method='sobol',
|
1104 |
+
minimizer_kwargs=minimizer_kwargs
|
1105 |
+
)
|
1106 |
+
assert np.all(np.sum((res.x)**2) >= 2.6)
|
1107 |
+
assert np.all(np.sum((res.x) ** 2) <= 3.0)
|
1108 |
+
assert res.success
|
1109 |
+
|
1110 |
+
|
1111 |
+
def test_equality_constraints():
|
1112 |
+
# gh16260
|
1113 |
+
bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1.
|
1114 |
+
|
1115 |
+
def faulty(x):
|
1116 |
+
return x[0] + x[1]
|
1117 |
+
|
1118 |
+
nlc = NonlinearConstraint(faulty, 3.9, 3.9)
|
1119 |
+
res = shgo(rosen, bounds=bounds, constraints=nlc)
|
1120 |
+
assert_allclose(np.sum(res.x), 3.9)
|
1121 |
+
|
1122 |
+
def faulty(x):
|
1123 |
+
return x[0] + x[1] - 3.9
|
1124 |
+
|
1125 |
+
constraints = {'type': 'eq', 'fun': faulty}
|
1126 |
+
res = shgo(rosen, bounds=bounds, constraints=constraints)
|
1127 |
+
assert_allclose(np.sum(res.x), 3.9)
|
1128 |
+
|
1129 |
+
bounds = [(0, 1.0)] * 4
|
1130 |
+
# sum of variable should equal 1.
|
1131 |
+
def faulty(x):
|
1132 |
+
return x[0] + x[1] + x[2] + x[3] - 1
|
1133 |
+
|
1134 |
+
# options = {'minimize_every_iter': True, 'local_iter':10}
|
1135 |
+
constraints = {'type': 'eq', 'fun': faulty}
|
1136 |
+
res = shgo(
|
1137 |
+
lambda x: - np.prod(x),
|
1138 |
+
bounds=bounds,
|
1139 |
+
constraints=constraints,
|
1140 |
+
sampling_method='sobol'
|
1141 |
+
)
|
1142 |
+
assert_allclose(np.sum(res.x), 1.0)
|
1143 |
+
|
1144 |
+
def test_gh16971():
|
1145 |
+
def cons(x):
|
1146 |
+
return np.sum(x**2) - 0
|
1147 |
+
|
1148 |
+
c = {'fun': cons, 'type': 'ineq'}
|
1149 |
+
minimizer_kwargs = {
|
1150 |
+
'method': 'COBYLA',
|
1151 |
+
'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05}
|
1152 |
+
}
|
1153 |
+
|
1154 |
+
s = SHGO(
|
1155 |
+
rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs
|
1156 |
+
)
|
1157 |
+
|
1158 |
+
assert s.minimizer_kwargs['method'].lower() == 'cobyla'
|
1159 |
+
assert s.minimizer_kwargs['options']['catol'] == 0.05
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy import exp
|
5 |
+
from numpy.testing import assert_, assert_equal
|
6 |
+
|
7 |
+
from scipy.optimize import root
|
8 |
+
|
9 |
+
|
10 |
+
def test_performance():
|
11 |
+
# Compare performance results to those listed in
|
12 |
+
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
|
13 |
+
# and
|
14 |
+
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
|
15 |
+
# and those produced by dfsane.f from M. Raydan's website.
|
16 |
+
#
|
17 |
+
# Where the results disagree, the largest limits are taken.
|
18 |
+
|
19 |
+
e_a = 1e-5
|
20 |
+
e_r = 1e-4
|
21 |
+
|
22 |
+
table_1 = [
|
23 |
+
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
|
24 |
+
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
|
25 |
+
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
|
26 |
+
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
|
27 |
+
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed:
|
28 |
+
# too sensitive to rounding errors
|
29 |
+
# Results from dfsane.f; papers list nit=3, nfev=3
|
30 |
+
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6),
|
31 |
+
# Must have n%3==0, typo in papers?
|
32 |
+
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29),
|
33 |
+
# Must have n%3==0, typo in papers?
|
34 |
+
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29),
|
35 |
+
# Results from dfsane.f; papers list nit=nfev=6?
|
36 |
+
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18),
|
37 |
+
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
|
38 |
+
# Results from dfsane.f; papers list nit=2, nfev=12
|
39 |
+
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5),
|
40 |
+
]
|
41 |
+
|
42 |
+
# Check also scaling invariance
|
43 |
+
for xscale, yscale, line_search in itertools.product(
|
44 |
+
[1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng']
|
45 |
+
):
|
46 |
+
for problem in table_1:
|
47 |
+
n = problem['n']
|
48 |
+
def func(x, n):
|
49 |
+
return yscale * problem['F'](x / xscale, n)
|
50 |
+
args = (n,)
|
51 |
+
x0 = problem['x0'](n) * xscale
|
52 |
+
|
53 |
+
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
|
54 |
+
|
55 |
+
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
|
56 |
+
sigma_0 = xscale/yscale
|
57 |
+
|
58 |
+
with np.errstate(over='ignore'):
|
59 |
+
sol = root(func, x0, args=args,
|
60 |
+
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
|
61 |
+
sigma_0=sigma_0, sigma_eps=sigma_eps,
|
62 |
+
line_search=line_search),
|
63 |
+
method='DF-SANE')
|
64 |
+
|
65 |
+
err_msg = repr(
|
66 |
+
[xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
|
67 |
+
fatol, sol.success, sol.nit, sol.nfev]
|
68 |
+
)
|
69 |
+
assert sol.success, err_msg
|
70 |
+
# nfev+1: dfsane.f doesn't count first eval
|
71 |
+
assert sol.nfev <= problem['nfev'] + 1, err_msg
|
72 |
+
assert sol.nit <= problem['nit'], err_msg
|
73 |
+
assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg
|
74 |
+
|
75 |
+
|
76 |
+
def test_complex():
|
77 |
+
def func(z):
|
78 |
+
return z**2 - 1 + 2j
|
79 |
+
x0 = 2.0j
|
80 |
+
|
81 |
+
ftol = 1e-4
|
82 |
+
sol = root(func, x0, tol=ftol, method='DF-SANE')
|
83 |
+
|
84 |
+
assert_(sol.success)
|
85 |
+
|
86 |
+
f0 = np.linalg.norm(func(x0))
|
87 |
+
fx = np.linalg.norm(func(sol.x))
|
88 |
+
assert_(fx <= ftol*f0)
|
89 |
+
|
90 |
+
|
91 |
+
def test_linear_definite():
|
92 |
+
# The DF-SANE paper proves convergence for "strongly isolated"
|
93 |
+
# solutions.
|
94 |
+
#
|
95 |
+
# For linear systems F(x) = A x - b = 0, with A positive or
|
96 |
+
# negative definite, the solution is strongly isolated.
|
97 |
+
|
98 |
+
def check_solvability(A, b, line_search='cruz'):
|
99 |
+
def func(x):
|
100 |
+
return A.dot(x) - b
|
101 |
+
xp = np.linalg.solve(A, b)
|
102 |
+
eps = np.linalg.norm(func(xp)) * 1e3
|
103 |
+
sol = root(
|
104 |
+
func, b,
|
105 |
+
options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
|
106 |
+
method='DF-SANE',
|
107 |
+
)
|
108 |
+
assert_(sol.success)
|
109 |
+
assert_(np.linalg.norm(func(sol.x)) <= eps)
|
110 |
+
|
111 |
+
n = 90
|
112 |
+
|
113 |
+
# Test linear pos.def. system
|
114 |
+
np.random.seed(1234)
|
115 |
+
A = np.arange(n*n).reshape(n, n)
|
116 |
+
A = A + n*n * np.diag(1 + np.arange(n))
|
117 |
+
assert_(np.linalg.eigvals(A).min() > 0)
|
118 |
+
b = np.arange(n) * 1.0
|
119 |
+
check_solvability(A, b, 'cruz')
|
120 |
+
check_solvability(A, b, 'cheng')
|
121 |
+
|
122 |
+
# Test linear neg.def. system
|
123 |
+
check_solvability(-A, b, 'cruz')
|
124 |
+
check_solvability(-A, b, 'cheng')
|
125 |
+
|
126 |
+
|
127 |
+
def test_shape():
|
128 |
+
def f(x, arg):
|
129 |
+
return x - arg
|
130 |
+
|
131 |
+
for dt in [float, complex]:
|
132 |
+
x = np.zeros([2,2])
|
133 |
+
arg = np.ones([2,2], dtype=dt)
|
134 |
+
|
135 |
+
sol = root(f, x, args=(arg,), method='DF-SANE')
|
136 |
+
assert_(sol.success)
|
137 |
+
assert_equal(sol.x.shape, x.shape)
|
138 |
+
|
139 |
+
|
140 |
+
# Some of the test functions and initial guesses listed in
|
141 |
+
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
|
142 |
+
|
143 |
+
def F_1(x, n):
|
144 |
+
g = np.zeros([n])
|
145 |
+
i = np.arange(2, n+1)
|
146 |
+
g[0] = exp(x[0] - 1) - 1
|
147 |
+
g[1:] = i*(exp(x[1:] - 1) - x[1:])
|
148 |
+
return g
|
149 |
+
|
150 |
+
def x0_1(n):
|
151 |
+
x0 = np.empty([n])
|
152 |
+
x0.fill(n/(n-1))
|
153 |
+
return x0
|
154 |
+
|
155 |
+
def F_2(x, n):
|
156 |
+
g = np.zeros([n])
|
157 |
+
i = np.arange(2, n+1)
|
158 |
+
g[0] = exp(x[0]) - 1
|
159 |
+
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
|
160 |
+
return g
|
161 |
+
|
162 |
+
def x0_2(n):
|
163 |
+
x0 = np.empty([n])
|
164 |
+
x0.fill(1/n**2)
|
165 |
+
return x0
|
166 |
+
|
167 |
+
|
168 |
+
def F_4(x, n): # skip name check
|
169 |
+
assert_equal(n % 3, 0)
|
170 |
+
g = np.zeros([n])
|
171 |
+
# Note: the first line is typoed in some of the references;
|
172 |
+
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
|
173 |
+
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
|
174 |
+
g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3]
|
175 |
+
- x[2::3] + 0.2 * x[2::3]**3 + 2.16)
|
176 |
+
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
|
177 |
+
return g
|
178 |
+
|
179 |
+
|
180 |
+
def x0_4(n): # skip name check
|
181 |
+
assert_equal(n % 3, 0)
|
182 |
+
x0 = np.array([-1, 1/2, -1] * (n//3))
|
183 |
+
return x0
|
184 |
+
|
185 |
+
def F_6(x, n):
|
186 |
+
c = 0.9
|
187 |
+
mu = (np.arange(1, n+1) - 0.5)/n
|
188 |
+
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
|
189 |
+
|
190 |
+
def x0_6(n):
|
191 |
+
return np.ones([n])
|
192 |
+
|
193 |
+
def F_7(x, n):
|
194 |
+
assert_equal(n % 3, 0)
|
195 |
+
|
196 |
+
def phi(t):
|
197 |
+
v = 0.5*t - 2
|
198 |
+
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
|
199 |
+
v[t >= 2] = (0.5*t + 2)[t >= 2]
|
200 |
+
return v
|
201 |
+
g = np.zeros([n])
|
202 |
+
g[::3] = 1e4 * x[1::3]**2 - 1
|
203 |
+
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
|
204 |
+
g[2::3] = phi(x[2::3])
|
205 |
+
return g
|
206 |
+
|
207 |
+
def x0_7(n):
|
208 |
+
assert_equal(n % 3, 0)
|
209 |
+
return np.array([1e-3, 18, 1] * (n//3))
|
210 |
+
|
211 |
+
def F_9(x, n):
|
212 |
+
g = np.zeros([n])
|
213 |
+
i = np.arange(2, n)
|
214 |
+
g[0] = x[0]**3/3 + x[1]**2/2
|
215 |
+
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
|
216 |
+
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
|
217 |
+
return g
|
218 |
+
|
219 |
+
def x0_9(n):
|
220 |
+
return np.ones([n])
|
221 |
+
|
222 |
+
def F_10(x, n):
|
223 |
+
return np.log(1 + x) - x/n
|
224 |
+
|
225 |
+
def x0_10(n):
|
226 |
+
return np.ones([n])
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py
ADDED
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_less, assert_allclose, assert_equal
|
5 |
+
|
6 |
+
from scipy.optimize._bracket import _bracket_root, _bracket_minimum, _ELIMITS
|
7 |
+
import scipy._lib._elementwise_iterative_method as eim
|
8 |
+
from scipy import stats
|
9 |
+
|
10 |
+
class TestBracketRoot:
|
11 |
+
@pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752))
|
12 |
+
@pytest.mark.parametrize("use_xmin", (False, True))
|
13 |
+
@pytest.mark.parametrize("other_side", (False, True))
|
14 |
+
@pytest.mark.parametrize("fix_one_side", (False, True))
|
15 |
+
def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side):
|
16 |
+
# Property-based test to confirm that _bracket_root is behaving as
|
17 |
+
# expected. The basic case is when root < a < b.
|
18 |
+
# The number of times bracket expands (per side) can be found by
|
19 |
+
# setting the expression for the left endpoint of the bracket to the
|
20 |
+
# root of f (x=0), solving for i, and rounding up. The corresponding
|
21 |
+
# lower and upper ends of the bracket are found by plugging this back
|
22 |
+
# into the expression for the ends of the bracket.
|
23 |
+
# `other_side=True` is the case that a < b < root
|
24 |
+
# Special cases like a < root < b are tested separately
|
25 |
+
|
26 |
+
rng = np.random.default_rng(seed)
|
27 |
+
xl0, d, factor = rng.random(size=3) * [1e5, 10, 5]
|
28 |
+
factor = 1 + factor # factor must be greater than 1
|
29 |
+
xr0 = xl0 + d # xr0 must be greater than a in basic case
|
30 |
+
|
31 |
+
def f(x):
|
32 |
+
f.count += 1
|
33 |
+
return x # root is 0
|
34 |
+
|
35 |
+
if use_xmin:
|
36 |
+
xmin = -rng.random()
|
37 |
+
n = np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor))
|
38 |
+
l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1)
|
39 |
+
kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin)
|
40 |
+
else:
|
41 |
+
n = np.ceil(np.log(xr0/d) / np.log(factor))
|
42 |
+
l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1)
|
43 |
+
kwargs = dict(xl0=xl0, xr0=xr0, factor=factor)
|
44 |
+
|
45 |
+
if other_side:
|
46 |
+
kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0']
|
47 |
+
l, u = -u, -l
|
48 |
+
if 'xmin' in kwargs:
|
49 |
+
kwargs['xmax'] = -kwargs.pop('xmin')
|
50 |
+
|
51 |
+
if fix_one_side:
|
52 |
+
if other_side:
|
53 |
+
kwargs['xmin'] = -xr0
|
54 |
+
else:
|
55 |
+
kwargs['xmax'] = xr0
|
56 |
+
|
57 |
+
f.count = 0
|
58 |
+
res = _bracket_root(f, **kwargs)
|
59 |
+
|
60 |
+
# Compare reported number of function evaluations `nfev` against
|
61 |
+
# reported `nit`, actual function call count `f.count`, and theoretical
|
62 |
+
# number of expansions `n`.
|
63 |
+
# When both sides are free, these get multiplied by 2 because function
|
64 |
+
# is evaluated on the left and the right each iteration.
|
65 |
+
# When one side is fixed, however, we add one: on the right side, the
|
66 |
+
# function gets evaluated once at b.
|
67 |
+
# Add 1 to `n` and `res.nit` because function evaluations occur at
|
68 |
+
# iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because
|
69 |
+
# function is called separately for left and right in iteration 0.
|
70 |
+
if not fix_one_side:
|
71 |
+
assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1)
|
72 |
+
else:
|
73 |
+
assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1
|
74 |
+
|
75 |
+
# Compare reported bracket to theoretical bracket and reported function
|
76 |
+
# values to function evaluated at bracket.
|
77 |
+
bracket = np.asarray([res.xl, res.xr])
|
78 |
+
assert_allclose(bracket, (l, u))
|
79 |
+
f_bracket = np.asarray([res.fl, res.fr])
|
80 |
+
assert_allclose(f_bracket, f(bracket))
|
81 |
+
|
82 |
+
# Check that bracket is valid and that status and success are correct
|
83 |
+
assert res.xr > res.xl
|
84 |
+
signs = np.sign(f_bracket)
|
85 |
+
assert signs[0] == -signs[1]
|
86 |
+
assert res.status == 0
|
87 |
+
assert res.success
|
88 |
+
|
89 |
+
def f(self, q, p):
|
90 |
+
return stats.norm.cdf(q) - p
|
91 |
+
|
92 |
+
@pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)])
|
93 |
+
@pytest.mark.parametrize('xmin', [-5, None])
|
94 |
+
@pytest.mark.parametrize('xmax', [5, None])
|
95 |
+
@pytest.mark.parametrize('factor', [1.2, 2])
|
96 |
+
def test_basic(self, p, xmin, xmax, factor):
|
97 |
+
# Test basic functionality to bracket root (distribution PPF)
|
98 |
+
res = _bracket_root(self.f, -0.01, 0.01, xmin=xmin, xmax=xmax,
|
99 |
+
factor=factor, args=(p,))
|
100 |
+
assert_equal(-np.sign(res.fl), np.sign(res.fr))
|
101 |
+
|
102 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
103 |
+
def test_vectorization(self, shape):
|
104 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
105 |
+
# input shapes.
|
106 |
+
p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
107 |
+
args = (p,)
|
108 |
+
maxiter = 10
|
109 |
+
|
110 |
+
@np.vectorize
|
111 |
+
def bracket_root_single(xl0, xr0, xmin, xmax, factor, p):
|
112 |
+
return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax,
|
113 |
+
factor=factor, args=(p,),
|
114 |
+
maxiter=maxiter)
|
115 |
+
|
116 |
+
def f(*args, **kwargs):
|
117 |
+
f.f_evals += 1
|
118 |
+
return self.f(*args, **kwargs)
|
119 |
+
f.f_evals = 0
|
120 |
+
|
121 |
+
rng = np.random.default_rng(2348234)
|
122 |
+
xl0 = -rng.random(size=shape)
|
123 |
+
xr0 = rng.random(size=shape)
|
124 |
+
xmin, xmax = 1e3*xl0, 1e3*xr0
|
125 |
+
if shape: # make some elements un
|
126 |
+
i = rng.random(size=shape) > 0.5
|
127 |
+
xmin[i], xmax[i] = -np.inf, np.inf
|
128 |
+
factor = rng.random(size=shape) + 1.5
|
129 |
+
res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor,
|
130 |
+
args=args, maxiter=maxiter)
|
131 |
+
refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel()
|
132 |
+
|
133 |
+
attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit']
|
134 |
+
for attr in attrs:
|
135 |
+
ref_attr = [getattr(ref, attr) for ref in refs]
|
136 |
+
res_attr = getattr(res, attr)
|
137 |
+
assert_allclose(res_attr.ravel(), ref_attr)
|
138 |
+
assert_equal(res_attr.shape, shape)
|
139 |
+
|
140 |
+
assert np.issubdtype(res.success.dtype, np.bool_)
|
141 |
+
if shape:
|
142 |
+
assert np.all(res.success[1:-1])
|
143 |
+
assert np.issubdtype(res.status.dtype, np.integer)
|
144 |
+
assert np.issubdtype(res.nfev.dtype, np.integer)
|
145 |
+
assert np.issubdtype(res.nit.dtype, np.integer)
|
146 |
+
assert_equal(np.max(res.nit), f.f_evals - 2)
|
147 |
+
assert_array_less(res.xl, res.xr)
|
148 |
+
assert_allclose(res.fl, self.f(res.xl, *args))
|
149 |
+
assert_allclose(res.fr, self.f(res.xr, *args))
|
150 |
+
|
151 |
+
def test_flags(self):
|
152 |
+
# Test cases that should produce different status flags; show that all
|
153 |
+
# can be produced simultaneously.
|
154 |
+
def f(xs, js):
|
155 |
+
funcs = [lambda x: x - 1.5,
|
156 |
+
lambda x: x - 1000,
|
157 |
+
lambda x: x - 1000,
|
158 |
+
lambda x: np.nan]
|
159 |
+
|
160 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
161 |
+
|
162 |
+
args = (np.arange(4, dtype=np.int64),)
|
163 |
+
res = _bracket_root(f, xl0=[-1, -1, -1, -1], xr0=[1, 1, 1, 1],
|
164 |
+
xmin=[-np.inf, -1, -np.inf, -np.inf],
|
165 |
+
xmax=[np.inf, 1, np.inf, np.inf],
|
166 |
+
args=args, maxiter=3)
|
167 |
+
|
168 |
+
ref_flags = np.array([eim._ECONVERGED,
|
169 |
+
_ELIMITS,
|
170 |
+
eim._ECONVERR,
|
171 |
+
eim._EVALUEERR])
|
172 |
+
assert_equal(res.status, ref_flags)
|
173 |
+
|
174 |
+
@pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
|
175 |
+
@pytest.mark.parametrize('xmin', [-5, None])
|
176 |
+
@pytest.mark.parametrize('xmax', [5, None])
|
177 |
+
@pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
|
178 |
+
def test_dtype(self, root, xmin, xmax, dtype):
|
179 |
+
# Test that dtypes are preserved
|
180 |
+
|
181 |
+
xmin = xmin if xmin is None else dtype(xmin)
|
182 |
+
xmax = xmax if xmax is None else dtype(xmax)
|
183 |
+
root = dtype(root)
|
184 |
+
def f(x, root):
|
185 |
+
return ((x - root) ** 3).astype(dtype)
|
186 |
+
|
187 |
+
bracket = np.asarray([-0.01, 0.01], dtype=dtype)
|
188 |
+
res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,))
|
189 |
+
assert np.all(res.success)
|
190 |
+
assert res.xl.dtype == res.xr.dtype == dtype
|
191 |
+
assert res.fl.dtype == res.fr.dtype == dtype
|
192 |
+
|
193 |
+
def test_input_validation(self):
|
194 |
+
# Test input validation for appropriate error messages
|
195 |
+
|
196 |
+
message = '`func` must be callable.'
|
197 |
+
with pytest.raises(ValueError, match=message):
|
198 |
+
_bracket_root(None, -4, 4)
|
199 |
+
|
200 |
+
message = '...must be numeric and real.'
|
201 |
+
with pytest.raises(ValueError, match=message):
|
202 |
+
_bracket_root(lambda x: x, -4+1j, 4)
|
203 |
+
with pytest.raises(ValueError, match=message):
|
204 |
+
_bracket_root(lambda x: x, -4, 'hello')
|
205 |
+
with pytest.raises(ValueError, match=message):
|
206 |
+
_bracket_root(lambda x: x, -4, 4, xmin=np)
|
207 |
+
with pytest.raises(ValueError, match=message):
|
208 |
+
_bracket_root(lambda x: x, -4, 4, xmax=object())
|
209 |
+
with pytest.raises(ValueError, match=message):
|
210 |
+
_bracket_root(lambda x: x, -4, 4, factor=sum)
|
211 |
+
|
212 |
+
message = "All elements of `factor` must be greater than 1."
|
213 |
+
with pytest.raises(ValueError, match=message):
|
214 |
+
_bracket_root(lambda x: x, -4, 4, factor=0.5)
|
215 |
+
|
216 |
+
message = '`xmin <= xl0 < xr0 <= xmax` must be True'
|
217 |
+
with pytest.raises(ValueError, match=message):
|
218 |
+
_bracket_root(lambda x: x, 4, -4)
|
219 |
+
with pytest.raises(ValueError, match=message):
|
220 |
+
_bracket_root(lambda x: x, -4, 4, xmax=np.nan)
|
221 |
+
with pytest.raises(ValueError, match=message):
|
222 |
+
_bracket_root(lambda x: x, -4, 4, xmin=10)
|
223 |
+
|
224 |
+
message = "shape mismatch: objects cannot be broadcast"
|
225 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
226 |
+
with pytest.raises(ValueError, match=message):
|
227 |
+
_bracket_root(lambda x: x, [-2, -3], [3, 4, 5])
|
228 |
+
# Consider making this give a more readable error message
|
229 |
+
# with pytest.raises(ValueError, match=message):
|
230 |
+
# _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
|
231 |
+
|
232 |
+
message = '`maxiter` must be a non-negative integer.'
|
233 |
+
with pytest.raises(ValueError, match=message):
|
234 |
+
_bracket_root(lambda x: x, -4, 4, maxiter=1.5)
|
235 |
+
with pytest.raises(ValueError, match=message):
|
236 |
+
_bracket_root(lambda x: x, -4, 4, maxiter=-1)
|
237 |
+
|
238 |
+
def test_special_cases(self):
|
239 |
+
# Test edge cases and other special cases
|
240 |
+
|
241 |
+
# Test that integers are not passed to `f`
|
242 |
+
# (otherwise this would overflow)
|
243 |
+
def f(x):
|
244 |
+
assert np.issubdtype(x.dtype, np.floating)
|
245 |
+
return x ** 99 - 1
|
246 |
+
|
247 |
+
res = _bracket_root(f, -7, 5)
|
248 |
+
assert res.success
|
249 |
+
|
250 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
251 |
+
def f(x):
|
252 |
+
return x - 10
|
253 |
+
|
254 |
+
bracket = (-3, 5)
|
255 |
+
res = _bracket_root(f, *bracket, maxiter=0)
|
256 |
+
assert res.xl, res.xr == bracket
|
257 |
+
assert res.nit == 0
|
258 |
+
assert res.nfev == 2
|
259 |
+
assert res.status == -2
|
260 |
+
|
261 |
+
# Test scalar `args` (not in tuple)
|
262 |
+
def f(x, c):
|
263 |
+
return c*x - 1
|
264 |
+
|
265 |
+
res = _bracket_root(f, -1, 1, args=3)
|
266 |
+
assert res.success
|
267 |
+
assert_allclose(res.fl, f(res.xl, 3))
|
268 |
+
|
269 |
+
# Test other edge cases
|
270 |
+
|
271 |
+
def f(x):
|
272 |
+
f.count += 1
|
273 |
+
return x
|
274 |
+
|
275 |
+
# 1. root lies within guess of bracket
|
276 |
+
f.count = 0
|
277 |
+
_bracket_root(f, -10, 20)
|
278 |
+
assert_equal(f.count, 2)
|
279 |
+
|
280 |
+
# 2. bracket endpoint hits root exactly
|
281 |
+
f.count = 0
|
282 |
+
res = _bracket_root(f, 5, 10, factor=2)
|
283 |
+
bracket = (res.xl, res.xr)
|
284 |
+
assert_equal(res.nfev, 4)
|
285 |
+
assert_allclose(bracket, (0, 5), atol=1e-15)
|
286 |
+
|
287 |
+
# 3. bracket limit hits root exactly
|
288 |
+
with np.errstate(over='ignore'):
|
289 |
+
res = _bracket_root(f, 5, 10, xmin=0)
|
290 |
+
bracket = (res.xl, res.xr)
|
291 |
+
assert_allclose(bracket[0], 0, atol=1e-15)
|
292 |
+
with np.errstate(over='ignore'):
|
293 |
+
res = _bracket_root(f, -10, -5, xmax=0)
|
294 |
+
bracket = (res.xl, res.xr)
|
295 |
+
assert_allclose(bracket[1], 0, atol=1e-15)
|
296 |
+
|
297 |
+
# 4. bracket not within min, max
|
298 |
+
with np.errstate(over='ignore'):
|
299 |
+
res = _bracket_root(f, 5, 10, xmin=1)
|
300 |
+
assert not res.success
|
301 |
+
|
302 |
+
|
303 |
+
class TestBracketMinimum:
|
304 |
+
def init_f(self):
|
305 |
+
def f(x, a, b):
|
306 |
+
f.count += 1
|
307 |
+
return (x - a)**2 + b
|
308 |
+
f.count = 0
|
309 |
+
return f
|
310 |
+
|
311 |
+
def assert_valid_bracket(self, result):
|
312 |
+
assert np.all(
|
313 |
+
(result.xl < result.xm) & (result.xm < result.xr)
|
314 |
+
)
|
315 |
+
assert np.all(
|
316 |
+
(result.fl >= result.fm) & (result.fr > result.fm)
|
317 |
+
| (result.fl > result.fm) & (result.fr > result.fm)
|
318 |
+
)
|
319 |
+
|
320 |
+
def get_kwargs(
|
321 |
+
self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=()
|
322 |
+
):
|
323 |
+
names = ("xl0", "xr0", "xmin", "xmax", "factor", "args")
|
324 |
+
return {
|
325 |
+
name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args))
|
326 |
+
if isinstance(val, np.ndarray) or np.isscalar(val)
|
327 |
+
or val not in [None, ()]
|
328 |
+
}
|
329 |
+
|
330 |
+
@pytest.mark.parametrize(
|
331 |
+
"seed",
|
332 |
+
(
|
333 |
+
307448016549685229886351382450158984917,
|
334 |
+
11650702770735516532954347931959000479,
|
335 |
+
113767103358505514764278732330028568336,
|
336 |
+
)
|
337 |
+
)
|
338 |
+
@pytest.mark.parametrize("use_xmin", (False, True))
|
339 |
+
@pytest.mark.parametrize("other_side", (False, True))
|
340 |
+
def test_nfev_expected(self, seed, use_xmin, other_side):
|
341 |
+
rng = np.random.default_rng(seed)
|
342 |
+
args = (0, 0) # f(x) = x^2 with minimum at 0
|
343 |
+
# xl0, xm0, xr0 are chosen such that the initial bracket is to
|
344 |
+
# the right of the minimum, and the bracket will expand
|
345 |
+
# downhill towards zero.
|
346 |
+
xl0, d1, d2, factor = rng.random(size=4) * [1e5, 10, 10, 5]
|
347 |
+
xm0 = xl0 + d1
|
348 |
+
xr0 = xm0 + d2
|
349 |
+
# Factor should be greater than one.
|
350 |
+
factor += 1
|
351 |
+
|
352 |
+
if use_xmin:
|
353 |
+
xmin = -rng.random() * 5
|
354 |
+
n = int(np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor)))
|
355 |
+
lower = xmin + (xl0 - xmin)*factor**-n
|
356 |
+
middle = xmin + (xl0 - xmin)*factor**-(n-1)
|
357 |
+
upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0
|
358 |
+
# It may be the case the lower is below the minimum, but we still
|
359 |
+
# don't have a valid bracket.
|
360 |
+
if middle**2 > lower**2:
|
361 |
+
n += 1
|
362 |
+
lower, middle, upper = (
|
363 |
+
xmin + (xl0 - xmin)*factor**-n, lower, middle
|
364 |
+
)
|
365 |
+
else:
|
366 |
+
xmin = None
|
367 |
+
n = int(np.ceil(np.log(xl0 / d1) / np.log(factor)))
|
368 |
+
lower = xl0 - d1*factor**n
|
369 |
+
middle = xl0 - d1*factor**(n-1) if n > 1 else xl0
|
370 |
+
upper = xl0 - d1*factor**(n-2) if n > 1 else xm0
|
371 |
+
# It may be the case the lower is below the minimum, but we still
|
372 |
+
# don't have a valid bracket.
|
373 |
+
if middle**2 > lower**2:
|
374 |
+
n += 1
|
375 |
+
lower, middle, upper = (
|
376 |
+
xl0 - d1*factor**n, lower, middle
|
377 |
+
)
|
378 |
+
f = self.init_f()
|
379 |
+
|
380 |
+
xmax = None
|
381 |
+
if other_side:
|
382 |
+
xl0, xm0, xr0 = -xr0, -xm0, -xl0
|
383 |
+
xmin, xmax = None, -xmin if xmin is not None else None
|
384 |
+
lower, middle, upper = -upper, -middle, -lower
|
385 |
+
|
386 |
+
kwargs = self.get_kwargs(
|
387 |
+
xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args
|
388 |
+
)
|
389 |
+
result = _bracket_minimum(f, xm0, **kwargs)
|
390 |
+
|
391 |
+
# Check that `nfev` and `nit` have the correct relationship
|
392 |
+
assert result.nfev == result.nit + 3
|
393 |
+
# Check that `nfev` reports the correct number of function evaluations.
|
394 |
+
assert result.nfev == f.count
|
395 |
+
# Check that the number of iterations matches the theoretical value.
|
396 |
+
assert result.nit == n
|
397 |
+
|
398 |
+
# Compare reported bracket to theoretical bracket and reported function
|
399 |
+
# values to function evaluated at bracket.
|
400 |
+
bracket = np.asarray([result.xl, result.xm, result.xr])
|
401 |
+
assert_allclose(bracket, (lower, middle, upper))
|
402 |
+
f_bracket = np.asarray([result.fl, result.fm, result.fr])
|
403 |
+
assert_allclose(f_bracket, f(bracket, *args))
|
404 |
+
|
405 |
+
self.assert_valid_bracket(result)
|
406 |
+
assert result.status == 0
|
407 |
+
assert result.success
|
408 |
+
|
409 |
+
def test_flags(self):
|
410 |
+
# Test cases that should produce different status flags; show that all
|
411 |
+
# can be produced simultaneously
|
412 |
+
def f(xs, js):
|
413 |
+
funcs = [lambda x: (x - 1.5)**2,
|
414 |
+
lambda x: x,
|
415 |
+
lambda x: x,
|
416 |
+
lambda x: np.nan]
|
417 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
418 |
+
|
419 |
+
args = (np.arange(4, dtype=np.int64),)
|
420 |
+
xl0, xm0, xr0 = np.full(4, -1.0), np.full(4, 0.0), np.full(4, 1.0)
|
421 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0,
|
422 |
+
xmin=[-np.inf, -1.0, -np.inf, -np.inf],
|
423 |
+
args=args, maxiter=3)
|
424 |
+
|
425 |
+
reference_flags = np.array([eim._ECONVERGED, _ELIMITS,
|
426 |
+
eim._ECONVERR, eim._EVALUEERR])
|
427 |
+
assert_equal(result.status, reference_flags)
|
428 |
+
|
429 |
+
@pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623]))
|
430 |
+
@pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
|
431 |
+
@pytest.mark.parametrize("xmin", [-5, None])
|
432 |
+
@pytest.mark.parametrize("xmax", [5, None])
|
433 |
+
def test_dtypes(self, minimum, xmin, xmax, dtype):
|
434 |
+
xmin = xmin if xmin is None else dtype(xmin)
|
435 |
+
xmax = xmax if xmax is None else dtype(xmax)
|
436 |
+
minimum = dtype(minimum)
|
437 |
+
|
438 |
+
def f(x, minimum):
|
439 |
+
return ((x - minimum)**2).astype(dtype)
|
440 |
+
|
441 |
+
xl0, xm0, xr0 = np.array([-0.01, 0.0, 0.01], dtype=dtype)
|
442 |
+
result = _bracket_minimum(
|
443 |
+
f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(minimum, )
|
444 |
+
)
|
445 |
+
assert np.all(result.success)
|
446 |
+
assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype
|
447 |
+
assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype
|
448 |
+
|
449 |
+
def test_input_validation(self):
|
450 |
+
# Test input validation for appropriate error messages
|
451 |
+
|
452 |
+
message = '`func` must be callable.'
|
453 |
+
with pytest.raises(ValueError, match=message):
|
454 |
+
_bracket_minimum(None, -4, xl0=4)
|
455 |
+
|
456 |
+
message = '...must be numeric and real.'
|
457 |
+
with pytest.raises(ValueError, match=message):
|
458 |
+
_bracket_minimum(lambda x: x**2, 4+1j)
|
459 |
+
with pytest.raises(ValueError, match=message):
|
460 |
+
_bracket_minimum(lambda x: x**2, -4, xl0='hello')
|
461 |
+
with pytest.raises(ValueError, match=message):
|
462 |
+
_bracket_minimum(lambda x: x**2, -4, xmin=np)
|
463 |
+
with pytest.raises(ValueError, match=message):
|
464 |
+
_bracket_minimum(lambda x: x**2, -4, xmax=object())
|
465 |
+
with pytest.raises(ValueError, match=message):
|
466 |
+
_bracket_minimum(lambda x: x**2, -4, factor=sum)
|
467 |
+
|
468 |
+
message = "All elements of `factor` must be greater than 1."
|
469 |
+
with pytest.raises(ValueError, match=message):
|
470 |
+
_bracket_minimum(lambda x: x, -4, factor=0.5)
|
471 |
+
|
472 |
+
message = '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True'
|
473 |
+
with pytest.raises(ValueError, match=message):
|
474 |
+
_bracket_minimum(lambda x: x**2, 4, xl0=6)
|
475 |
+
with pytest.raises(ValueError, match=message):
|
476 |
+
_bracket_minimum(lambda x: x**2, -4, xr0=-6)
|
477 |
+
with pytest.raises(ValueError, match=message):
|
478 |
+
_bracket_minimum(lambda x: x**2, -4, xl0=-3, xr0=-2)
|
479 |
+
with pytest.raises(ValueError, match=message):
|
480 |
+
_bracket_minimum(lambda x: x**2, -4, xl0=-6, xr0=-5)
|
481 |
+
with pytest.raises(ValueError, match=message):
|
482 |
+
_bracket_minimum(lambda x: x**2, -4, xl0=-np.nan)
|
483 |
+
with pytest.raises(ValueError, match=message):
|
484 |
+
_bracket_minimum(lambda x: x**2, -4, xr0=np.nan)
|
485 |
+
|
486 |
+
message = "shape mismatch: objects cannot be broadcast"
|
487 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
488 |
+
with pytest.raises(ValueError, match=message):
|
489 |
+
_bracket_minimum(lambda x: x**2, [-2, -3], xl0=[-3, -4, -5])
|
490 |
+
|
491 |
+
message = '`maxiter` must be a non-negative integer.'
|
492 |
+
with pytest.raises(ValueError, match=message):
|
493 |
+
_bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=1.5)
|
494 |
+
with pytest.raises(ValueError, match=message):
|
495 |
+
_bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=-1)
|
496 |
+
|
497 |
+
@pytest.mark.parametrize("xl0", [0.0, None])
|
498 |
+
@pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15))
|
499 |
+
@pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None))
|
500 |
+
# Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum
|
501 |
+
# is within, or at varying disances to the left or right of the initial
|
502 |
+
# bracket.
|
503 |
+
@pytest.mark.parametrize(
|
504 |
+
"args",
|
505 |
+
(
|
506 |
+
(1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0),
|
507 |
+
(121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0)
|
508 |
+
)
|
509 |
+
)
|
510 |
+
def test_scalar_no_limits(self, xl0, xm0, xr0, args):
|
511 |
+
f = self.init_f()
|
512 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=args)
|
513 |
+
result = _bracket_minimum(f, xm0, **kwargs)
|
514 |
+
self.assert_valid_bracket(result)
|
515 |
+
assert result.status == 0
|
516 |
+
assert result.success
|
517 |
+
assert result.nfev == f.count
|
518 |
+
|
519 |
+
@pytest.mark.parametrize(
|
520 |
+
# xmin is set at 0.0 in all cases.
|
521 |
+
"xl0,xm0,xr0,xmin",
|
522 |
+
(
|
523 |
+
# Initial bracket at varying distances from the xmin.
|
524 |
+
(0.5, 0.75, 1.0, 0.0),
|
525 |
+
(1.0, 2.5, 4.0, 0.0),
|
526 |
+
(2.0, 4.0, 6.0, 0.0),
|
527 |
+
(12.0, 16.0, 20.0, 0.0),
|
528 |
+
# Test default initial left endpoint selection. It should not
|
529 |
+
# be below xmin.
|
530 |
+
(None, 0.75, 1.0, 0.0),
|
531 |
+
(None, 2.5, 4.0, 0.0),
|
532 |
+
(None, 4.0, 6.0, 0.0),
|
533 |
+
(None, 16.0, 20.0, 0.0),
|
534 |
+
)
|
535 |
+
)
|
536 |
+
@pytest.mark.parametrize(
|
537 |
+
"args", (
|
538 |
+
(0.0, 0.0), # Minimum is directly at xmin.
|
539 |
+
(1e-300, 0.0), # Minimum is extremely close to xmin.
|
540 |
+
(1e-20, 0.0), # Minimum is very close to xmin.
|
541 |
+
# Minimum at varying distances from xmin.
|
542 |
+
(0.1, 0.0),
|
543 |
+
(0.2, 0.0),
|
544 |
+
(0.4, 0.0)
|
545 |
+
)
|
546 |
+
)
|
547 |
+
def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args):
|
548 |
+
f = self.init_f()
|
549 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, args=args)
|
550 |
+
result = _bracket_minimum(f, xm0, **kwargs)
|
551 |
+
self.assert_valid_bracket(result)
|
552 |
+
assert result.status == 0
|
553 |
+
assert result.success
|
554 |
+
assert result.nfev == f.count
|
555 |
+
|
556 |
+
@pytest.mark.parametrize(
|
557 |
+
#xmax is set to 1.0 in all cases.
|
558 |
+
"xl0,xm0,xr0,xmax",
|
559 |
+
(
|
560 |
+
# Bracket at varying distances from xmax.
|
561 |
+
(0.2, 0.3, 0.4, 1.0),
|
562 |
+
(0.05, 0.075, 0.1, 1.0),
|
563 |
+
(-0.2, -0.1, 0.0, 1.0),
|
564 |
+
(-21.2, -17.7, -14.2, 1.0),
|
565 |
+
# Test default right endpoint selection. It should not exceed xmax.
|
566 |
+
(0.2, 0.3, None, 1.0),
|
567 |
+
(0.05, 0.075, None, 1.0),
|
568 |
+
(-0.2, -0.1, None, 1.0),
|
569 |
+
(-21.2, -17.7, None, 1.0),
|
570 |
+
)
|
571 |
+
)
|
572 |
+
@pytest.mark.parametrize(
|
573 |
+
"args", (
|
574 |
+
(0.9999999999999999, 0.0), # Minimum very close to xmax.
|
575 |
+
# Minimum at varying distances from xmax.
|
576 |
+
(0.9, 0.0),
|
577 |
+
(0.7, 0.0),
|
578 |
+
(0.5, 0.0)
|
579 |
+
)
|
580 |
+
)
|
581 |
+
def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args):
|
582 |
+
f = self.init_f()
|
583 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args)
|
584 |
+
result = _bracket_minimum(f, xm0, **kwargs)
|
585 |
+
self.assert_valid_bracket(result)
|
586 |
+
assert result.status == 0
|
587 |
+
assert result.success
|
588 |
+
assert result.nfev == f.count
|
589 |
+
|
590 |
+
@pytest.mark.parametrize(
|
591 |
+
"xl0,xm0,xr0,xmin,xmax,args",
|
592 |
+
(
|
593 |
+
( # Case 1:
|
594 |
+
# Initial bracket.
|
595 |
+
0.2,
|
596 |
+
0.3,
|
597 |
+
0.4,
|
598 |
+
# Function slopes down to the right from the bracket to a minimum
|
599 |
+
# at 1.0. xmax is also at 1.0
|
600 |
+
None,
|
601 |
+
1.0,
|
602 |
+
(1.0, 0.0)
|
603 |
+
),
|
604 |
+
( # Case 2:
|
605 |
+
# Initial bracket.
|
606 |
+
1.4,
|
607 |
+
1.95,
|
608 |
+
2.5,
|
609 |
+
# Function slopes down to the left from the bracket to a minimum at
|
610 |
+
# 0.3 with xmin set to 0.3.
|
611 |
+
0.3,
|
612 |
+
None,
|
613 |
+
(0.3, 0.0)
|
614 |
+
),
|
615 |
+
(
|
616 |
+
# Case 3:
|
617 |
+
# Initial bracket.
|
618 |
+
2.6,
|
619 |
+
3.25,
|
620 |
+
3.9,
|
621 |
+
# Function slopes down and to the right to a minimum at 99.4 with xmax
|
622 |
+
# at 99.4. Tests case where minimum is at xmax relatively further from
|
623 |
+
# the bracket.
|
624 |
+
None,
|
625 |
+
99.4,
|
626 |
+
(99.4, 0)
|
627 |
+
),
|
628 |
+
(
|
629 |
+
# Case 4:
|
630 |
+
# Initial bracket.
|
631 |
+
4,
|
632 |
+
4.5,
|
633 |
+
5,
|
634 |
+
# Function slopes down and to the left away from the bracket with a
|
635 |
+
# minimum at -26.3 with xmin set to -26.3. Tests case where minimum is
|
636 |
+
# at xmin relatively far from the bracket.
|
637 |
+
-26.3,
|
638 |
+
None,
|
639 |
+
(-26.3, 0)
|
640 |
+
),
|
641 |
+
(
|
642 |
+
# Case 5:
|
643 |
+
# Similar to Case 1 above, but tests default values of xl0 and xr0.
|
644 |
+
None,
|
645 |
+
0.3,
|
646 |
+
None,
|
647 |
+
None,
|
648 |
+
1.0,
|
649 |
+
(1.0, 0.0)
|
650 |
+
),
|
651 |
+
( # Case 6:
|
652 |
+
# Similar to Case 2 above, but tests default values of xl0 and xr0.
|
653 |
+
None,
|
654 |
+
1.95,
|
655 |
+
None,
|
656 |
+
0.3,
|
657 |
+
None,
|
658 |
+
(0.3, 0.0)
|
659 |
+
),
|
660 |
+
(
|
661 |
+
# Case 7:
|
662 |
+
# Similar to Case 3 above, but tests default values of xl0 and xr0.
|
663 |
+
None,
|
664 |
+
3.25,
|
665 |
+
None,
|
666 |
+
None,
|
667 |
+
99.4,
|
668 |
+
(99.4, 0)
|
669 |
+
),
|
670 |
+
(
|
671 |
+
# Case 8:
|
672 |
+
# Similar to Case 4 above, but tests default values of xl0 and xr0.
|
673 |
+
None,
|
674 |
+
4.5,
|
675 |
+
None,
|
676 |
+
-26.3,
|
677 |
+
None,
|
678 |
+
(-26.3, 0)
|
679 |
+
),
|
680 |
+
)
|
681 |
+
)
|
682 |
+
def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args):
|
683 |
+
f = self.init_f()
|
684 |
+
kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, args=args)
|
685 |
+
result = _bracket_minimum(f, xm0, **kwargs)
|
686 |
+
assert result.status == -1
|
687 |
+
assert args[0] in (result.xl, result.xr)
|
688 |
+
assert result.nfev == f.count
|
689 |
+
|
690 |
+
@pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)])
|
691 |
+
def test_vectorization(self, shape):
|
692 |
+
# Test for correct functionality, output shapes, and dtypes for
|
693 |
+
# various input shapes.
|
694 |
+
a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
695 |
+
args = (a, 0.0)
|
696 |
+
maxiter = 10
|
697 |
+
|
698 |
+
@np.vectorize
|
699 |
+
def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a):
|
700 |
+
return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin,
|
701 |
+
xmax=xmax, factor=factor, maxiter=maxiter,
|
702 |
+
args=(a, 0.0))
|
703 |
+
|
704 |
+
f = self.init_f()
|
705 |
+
|
706 |
+
rng = np.random.default_rng(2348234)
|
707 |
+
xl0 = -rng.random(size=shape)
|
708 |
+
xr0 = rng.random(size=shape)
|
709 |
+
xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0)
|
710 |
+
xmin, xmax = 1e3*xl0, 1e3*xr0
|
711 |
+
if shape: # make some elements un
|
712 |
+
i = rng.random(size=shape) > 0.5
|
713 |
+
xmin[i], xmax[i] = -np.inf, np.inf
|
714 |
+
factor = rng.random(size=shape) + 1.5
|
715 |
+
res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax,
|
716 |
+
factor=factor, args=args, maxiter=maxiter)
|
717 |
+
refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel()
|
718 |
+
|
719 |
+
attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit']
|
720 |
+
for attr in attrs:
|
721 |
+
ref_attr = [getattr(ref, attr) for ref in refs]
|
722 |
+
res_attr = getattr(res, attr)
|
723 |
+
assert_allclose(res_attr.ravel(), ref_attr)
|
724 |
+
assert_equal(res_attr.shape, shape)
|
725 |
+
|
726 |
+
assert np.issubdtype(res.success.dtype, np.bool_)
|
727 |
+
if shape:
|
728 |
+
assert np.all(res.success[1:-1])
|
729 |
+
assert np.issubdtype(res.status.dtype, np.integer)
|
730 |
+
assert np.issubdtype(res.nfev.dtype, np.integer)
|
731 |
+
assert np.issubdtype(res.nit.dtype, np.integer)
|
732 |
+
assert_equal(np.max(res.nit), f.count - 3)
|
733 |
+
self.assert_valid_bracket(res)
|
734 |
+
assert_allclose(res.fl, f(res.xl, *args))
|
735 |
+
assert_allclose(res.fm, f(res.xm, *args))
|
736 |
+
assert_allclose(res.fr, f(res.xr, *args))
|
737 |
+
|
738 |
+
def test_special_cases(self):
|
739 |
+
# Test edge cases and other special cases.
|
740 |
+
|
741 |
+
# Test that integers are not passed to `f`
|
742 |
+
# (otherwise this would overflow)
|
743 |
+
def f(x):
|
744 |
+
assert np.issubdtype(x.dtype, np.floating)
|
745 |
+
return x ** 98 - 1
|
746 |
+
|
747 |
+
result = _bracket_minimum(f, -7, xr0=5)
|
748 |
+
assert result.success
|
749 |
+
|
750 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
751 |
+
def f(x):
|
752 |
+
return x**2 - 10
|
753 |
+
|
754 |
+
xl0, xm0, xr0 = -3, -1, 2
|
755 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0)
|
756 |
+
assert_equal([result.xl, result.xm, result.xr], [xl0, xm0, xr0])
|
757 |
+
|
758 |
+
# Test scalar `args` (not in tuple)
|
759 |
+
def f(x, c):
|
760 |
+
return c*x**2 - 1
|
761 |
+
|
762 |
+
result = _bracket_minimum(f, -1, args=3)
|
763 |
+
assert result.success
|
764 |
+
assert_allclose(result.fl, f(result.xl, 3))
|
765 |
+
|
766 |
+
# Initial bracket is valid.
|
767 |
+
f = self.init_f()
|
768 |
+
xl0, xm0, xr0 = [-1.0, -0.2, 1.0]
|
769 |
+
args = (0, 0)
|
770 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args)
|
771 |
+
assert f.count == 3
|
772 |
+
|
773 |
+
assert_equal(
|
774 |
+
[result.xl, result.xm, result.xr],
|
775 |
+
[xl0, xm0, xr0],
|
776 |
+
)
|
777 |
+
assert_equal(
|
778 |
+
[result.fl, result.fm, result.fr],
|
779 |
+
[f(xl0, *args), f(xm0, *args), f(xr0, *args)],
|
780 |
+
)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py
ADDED
@@ -0,0 +1,827 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_allclose, assert_equal, assert_array_less
|
4 |
+
|
5 |
+
from scipy import stats
|
6 |
+
import scipy._lib._elementwise_iterative_method as eim
|
7 |
+
|
8 |
+
from scipy.optimize._chandrupatla import (_chandrupatla_minimize,
|
9 |
+
_chandrupatla as _chandrupatla_root)
|
10 |
+
from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS
|
11 |
+
|
12 |
+
from itertools import permutations
|
13 |
+
from .test_zeros import TestScalarRootFinders
|
14 |
+
|
15 |
+
def f1(x):
|
16 |
+
return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2.
|
17 |
+
|
18 |
+
|
19 |
+
def f2(x):
|
20 |
+
return 5 + (x - 2.)**6
|
21 |
+
|
22 |
+
|
23 |
+
def f3(x):
|
24 |
+
return np.exp(x) - 5*x
|
25 |
+
|
26 |
+
|
27 |
+
def f4(x):
|
28 |
+
return x**5. - 5*x**3. - 20.*x + 5.
|
29 |
+
|
30 |
+
|
31 |
+
def f5(x):
|
32 |
+
return 8*x**3 - 2*x**2 - 7*x + 3
|
33 |
+
|
34 |
+
|
35 |
+
def _bracket_minimum(func, x1, x2):
|
36 |
+
phi = 1.61803398875
|
37 |
+
maxiter = 100
|
38 |
+
f1 = func(x1)
|
39 |
+
f2 = func(x2)
|
40 |
+
step = x2 - x1
|
41 |
+
x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1
|
42 |
+
else (x1, x2, f1, f2, step))
|
43 |
+
|
44 |
+
for i in range(maxiter):
|
45 |
+
step *= phi
|
46 |
+
x3 = x2 + step
|
47 |
+
f3 = func(x3)
|
48 |
+
if f3 < f2:
|
49 |
+
x1, x2, f1, f2 = x2, x3, f2, f3
|
50 |
+
else:
|
51 |
+
break
|
52 |
+
return x1, x2, x3, f1, f2, f3
|
53 |
+
|
54 |
+
|
55 |
+
cases = [
|
56 |
+
(f1, -1, 11),
|
57 |
+
(f1, -2, 13),
|
58 |
+
(f1, -4, 13),
|
59 |
+
(f1, -8, 15),
|
60 |
+
(f1, -16, 16),
|
61 |
+
(f1, -32, 19),
|
62 |
+
(f1, -64, 20),
|
63 |
+
(f1, -128, 21),
|
64 |
+
(f1, -256, 21),
|
65 |
+
(f1, -512, 19),
|
66 |
+
(f1, -1024, 24),
|
67 |
+
(f2, -1, 8),
|
68 |
+
(f2, -2, 6),
|
69 |
+
(f2, -4, 6),
|
70 |
+
(f2, -8, 7),
|
71 |
+
(f2, -16, 8),
|
72 |
+
(f2, -32, 8),
|
73 |
+
(f2, -64, 9),
|
74 |
+
(f2, -128, 11),
|
75 |
+
(f2, -256, 13),
|
76 |
+
(f2, -512, 12),
|
77 |
+
(f2, -1024, 13),
|
78 |
+
(f3, -1, 11),
|
79 |
+
(f3, -2, 11),
|
80 |
+
(f3, -4, 11),
|
81 |
+
(f3, -8, 10),
|
82 |
+
(f3, -16, 14),
|
83 |
+
(f3, -32, 12),
|
84 |
+
(f3, -64, 15),
|
85 |
+
(f3, -128, 18),
|
86 |
+
(f3, -256, 18),
|
87 |
+
(f3, -512, 19),
|
88 |
+
(f3, -1024, 19),
|
89 |
+
(f4, -0.05, 9),
|
90 |
+
(f4, -0.10, 11),
|
91 |
+
(f4, -0.15, 11),
|
92 |
+
(f4, -0.20, 11),
|
93 |
+
(f4, -0.25, 11),
|
94 |
+
(f4, -0.30, 9),
|
95 |
+
(f4, -0.35, 9),
|
96 |
+
(f4, -0.40, 9),
|
97 |
+
(f4, -0.45, 10),
|
98 |
+
(f4, -0.50, 10),
|
99 |
+
(f4, -0.55, 10),
|
100 |
+
(f5, -0.05, 6),
|
101 |
+
(f5, -0.10, 7),
|
102 |
+
(f5, -0.15, 8),
|
103 |
+
(f5, -0.20, 10),
|
104 |
+
(f5, -0.25, 9),
|
105 |
+
(f5, -0.30, 8),
|
106 |
+
(f5, -0.35, 7),
|
107 |
+
(f5, -0.40, 7),
|
108 |
+
(f5, -0.45, 9),
|
109 |
+
(f5, -0.50, 9),
|
110 |
+
(f5, -0.55, 8)
|
111 |
+
]
|
112 |
+
|
113 |
+
|
114 |
+
class TestChandrupatlaMinimize:
|
115 |
+
|
116 |
+
def f(self, x, loc):
|
117 |
+
dist = stats.norm()
|
118 |
+
return -dist.pdf(x - loc)
|
119 |
+
|
120 |
+
@pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)])
|
121 |
+
def test_basic(self, loc):
|
122 |
+
# Find mode of normal distribution. Compare mode against location
|
123 |
+
# parameter and value of pdf at mode against expected pdf.
|
124 |
+
res = _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc,))
|
125 |
+
ref = loc
|
126 |
+
np.testing.assert_allclose(res.x, ref, rtol=1e-6)
|
127 |
+
np.testing.assert_allclose(res.fun, -stats.norm.pdf(0), atol=0, rtol=0)
|
128 |
+
assert res.x.shape == np.shape(ref)
|
129 |
+
|
130 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
131 |
+
def test_vectorization(self, shape):
|
132 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
133 |
+
# input shapes.
|
134 |
+
loc = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
135 |
+
args = (loc,)
|
136 |
+
|
137 |
+
@np.vectorize
|
138 |
+
def chandrupatla_single(loc_single):
|
139 |
+
return _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc_single,))
|
140 |
+
|
141 |
+
def f(*args, **kwargs):
|
142 |
+
f.f_evals += 1
|
143 |
+
return self.f(*args, **kwargs)
|
144 |
+
f.f_evals = 0
|
145 |
+
|
146 |
+
res = _chandrupatla_minimize(f, -5, 0, 5, args=args)
|
147 |
+
refs = chandrupatla_single(loc).ravel()
|
148 |
+
|
149 |
+
ref_x = [ref.x for ref in refs]
|
150 |
+
assert_allclose(res.x.ravel(), ref_x)
|
151 |
+
assert_equal(res.x.shape, shape)
|
152 |
+
|
153 |
+
ref_fun = [ref.fun for ref in refs]
|
154 |
+
assert_allclose(res.fun.ravel(), ref_fun)
|
155 |
+
assert_equal(res.fun.shape, shape)
|
156 |
+
assert_equal(res.fun, self.f(res.x, *args))
|
157 |
+
|
158 |
+
ref_success = [ref.success for ref in refs]
|
159 |
+
assert_equal(res.success.ravel(), ref_success)
|
160 |
+
assert_equal(res.success.shape, shape)
|
161 |
+
assert np.issubdtype(res.success.dtype, np.bool_)
|
162 |
+
|
163 |
+
ref_flag = [ref.status for ref in refs]
|
164 |
+
assert_equal(res.status.ravel(), ref_flag)
|
165 |
+
assert_equal(res.status.shape, shape)
|
166 |
+
assert np.issubdtype(res.status.dtype, np.integer)
|
167 |
+
|
168 |
+
ref_nfev = [ref.nfev for ref in refs]
|
169 |
+
assert_equal(res.nfev.ravel(), ref_nfev)
|
170 |
+
assert_equal(np.max(res.nfev), f.f_evals)
|
171 |
+
assert_equal(res.nfev.shape, res.fun.shape)
|
172 |
+
assert np.issubdtype(res.nfev.dtype, np.integer)
|
173 |
+
|
174 |
+
ref_nit = [ref.nit for ref in refs]
|
175 |
+
assert_equal(res.nit.ravel(), ref_nit)
|
176 |
+
assert_equal(np.max(res.nit), f.f_evals-3)
|
177 |
+
assert_equal(res.nit.shape, res.fun.shape)
|
178 |
+
assert np.issubdtype(res.nit.dtype, np.integer)
|
179 |
+
|
180 |
+
ref_xl = [ref.xl for ref in refs]
|
181 |
+
assert_allclose(res.xl.ravel(), ref_xl)
|
182 |
+
assert_equal(res.xl.shape, shape)
|
183 |
+
|
184 |
+
ref_xm = [ref.xm for ref in refs]
|
185 |
+
assert_allclose(res.xm.ravel(), ref_xm)
|
186 |
+
assert_equal(res.xm.shape, shape)
|
187 |
+
|
188 |
+
ref_xr = [ref.xr for ref in refs]
|
189 |
+
assert_allclose(res.xr.ravel(), ref_xr)
|
190 |
+
assert_equal(res.xr.shape, shape)
|
191 |
+
|
192 |
+
ref_fl = [ref.fl for ref in refs]
|
193 |
+
assert_allclose(res.fl.ravel(), ref_fl)
|
194 |
+
assert_equal(res.fl.shape, shape)
|
195 |
+
assert_allclose(res.fl, self.f(res.xl, *args))
|
196 |
+
|
197 |
+
ref_fm = [ref.fm for ref in refs]
|
198 |
+
assert_allclose(res.fm.ravel(), ref_fm)
|
199 |
+
assert_equal(res.fm.shape, shape)
|
200 |
+
assert_allclose(res.fm, self.f(res.xm, *args))
|
201 |
+
|
202 |
+
ref_fr = [ref.fr for ref in refs]
|
203 |
+
assert_allclose(res.fr.ravel(), ref_fr)
|
204 |
+
assert_equal(res.fr.shape, shape)
|
205 |
+
assert_allclose(res.fr, self.f(res.xr, *args))
|
206 |
+
|
207 |
+
def test_flags(self):
|
208 |
+
# Test cases that should produce different status flags; show that all
|
209 |
+
# can be produced simultaneously.
|
210 |
+
def f(xs, js):
|
211 |
+
funcs = [lambda x: (x - 2.5) ** 2,
|
212 |
+
lambda x: x - 10,
|
213 |
+
lambda x: (x - 2.5) ** 4,
|
214 |
+
lambda x: np.nan]
|
215 |
+
|
216 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
217 |
+
|
218 |
+
args = (np.arange(4, dtype=np.int64),)
|
219 |
+
|
220 |
+
res = _chandrupatla_minimize(f, [0]*4, [2]*4, [np.pi]*4, args=args,
|
221 |
+
maxiter=10)
|
222 |
+
|
223 |
+
ref_flags = np.array([eim._ECONVERGED,
|
224 |
+
eim._ESIGNERR,
|
225 |
+
eim._ECONVERR,
|
226 |
+
eim._EVALUEERR])
|
227 |
+
assert_equal(res.status, ref_flags)
|
228 |
+
|
229 |
+
def test_convergence(self):
|
230 |
+
# Test that the convergence tolerances behave as expected
|
231 |
+
rng = np.random.default_rng(2585255913088665241)
|
232 |
+
p = rng.random(size=3)
|
233 |
+
bracket = (-5, 0, 5)
|
234 |
+
args = (p,)
|
235 |
+
kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
|
236 |
+
|
237 |
+
kwargs = kwargs0.copy()
|
238 |
+
kwargs['xatol'] = 1e-3
|
239 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
240 |
+
j1 = abs(res1.xr - res1.xl)
|
241 |
+
assert_array_less(j1, 4*kwargs['xatol'])
|
242 |
+
kwargs['xatol'] = 1e-6
|
243 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
244 |
+
j2 = abs(res2.xr - res2.xl)
|
245 |
+
assert_array_less(j2, 4*kwargs['xatol'])
|
246 |
+
assert_array_less(j2, j1)
|
247 |
+
|
248 |
+
kwargs = kwargs0.copy()
|
249 |
+
kwargs['xrtol'] = 1e-3
|
250 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
251 |
+
j1 = abs(res1.xr - res1.xl)
|
252 |
+
assert_array_less(j1, 4*kwargs['xrtol']*abs(res1.x))
|
253 |
+
kwargs['xrtol'] = 1e-6
|
254 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
255 |
+
j2 = abs(res2.xr - res2.xl)
|
256 |
+
assert_array_less(j2, 4*kwargs['xrtol']*abs(res2.x))
|
257 |
+
assert_array_less(j2, j1)
|
258 |
+
|
259 |
+
kwargs = kwargs0.copy()
|
260 |
+
kwargs['fatol'] = 1e-3
|
261 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
262 |
+
h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
|
263 |
+
assert_array_less(h1, 2*kwargs['fatol'])
|
264 |
+
kwargs['fatol'] = 1e-6
|
265 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
266 |
+
h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
|
267 |
+
assert_array_less(h2, 2*kwargs['fatol'])
|
268 |
+
assert_array_less(h2, h1)
|
269 |
+
|
270 |
+
kwargs = kwargs0.copy()
|
271 |
+
kwargs['frtol'] = 1e-3
|
272 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
273 |
+
h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
|
274 |
+
assert_array_less(h1, 2*kwargs['frtol']*abs(res1.fun))
|
275 |
+
kwargs['frtol'] = 1e-6
|
276 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
277 |
+
h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
|
278 |
+
assert_array_less(h2, 2*kwargs['frtol']*abs(res2.fun))
|
279 |
+
assert_array_less(h2, h1)
|
280 |
+
|
281 |
+
def test_maxiter_callback(self):
|
282 |
+
# Test behavior of `maxiter` parameter and `callback` interface
|
283 |
+
loc = 0.612814
|
284 |
+
bracket = (-5, 0, 5)
|
285 |
+
maxiter = 5
|
286 |
+
|
287 |
+
res = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
|
288 |
+
maxiter=maxiter)
|
289 |
+
assert not np.any(res.success)
|
290 |
+
assert np.all(res.nfev == maxiter+3)
|
291 |
+
assert np.all(res.nit == maxiter)
|
292 |
+
|
293 |
+
def callback(res):
|
294 |
+
callback.iter += 1
|
295 |
+
callback.res = res
|
296 |
+
assert hasattr(res, 'x')
|
297 |
+
if callback.iter == 0:
|
298 |
+
# callback is called once with initial bracket
|
299 |
+
assert (res.xl, res.xm, res.xr) == bracket
|
300 |
+
else:
|
301 |
+
changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr)
|
302 |
+
changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr)
|
303 |
+
assert np.all(changed_xr | changed_xl)
|
304 |
+
|
305 |
+
callback.xl = res.xl
|
306 |
+
callback.xr = res.xr
|
307 |
+
assert res.status == eim._EINPROGRESS
|
308 |
+
assert_equal(self.f(res.xl, loc), res.fl)
|
309 |
+
assert_equal(self.f(res.xm, loc), res.fm)
|
310 |
+
assert_equal(self.f(res.xr, loc), res.fr)
|
311 |
+
assert_equal(self.f(res.x, loc), res.fun)
|
312 |
+
if callback.iter == maxiter:
|
313 |
+
raise StopIteration
|
314 |
+
|
315 |
+
callback.xl = np.nan
|
316 |
+
callback.xr = np.nan
|
317 |
+
callback.iter = -1 # callback called once before first iteration
|
318 |
+
callback.res = None
|
319 |
+
|
320 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
|
321 |
+
callback=callback)
|
322 |
+
|
323 |
+
# terminating with callback is identical to terminating due to maxiter
|
324 |
+
# (except for `status`)
|
325 |
+
for key in res.keys():
|
326 |
+
if key == 'status':
|
327 |
+
assert res[key] == eim._ECONVERR
|
328 |
+
assert callback.res[key] == eim._EINPROGRESS
|
329 |
+
assert res2[key] == eim._ECALLBACK
|
330 |
+
else:
|
331 |
+
assert res2[key] == callback.res[key] == res[key]
|
332 |
+
|
333 |
+
@pytest.mark.parametrize('case', cases)
|
334 |
+
def test_nit_expected(self, case):
|
335 |
+
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
|
336 |
+
# in all 55 test cases, the number of iterations performed
|
337 |
+
# matches the number reported in the original paper.
|
338 |
+
func, x1, nit = case
|
339 |
+
|
340 |
+
# Find bracket using the algorithm in the paper
|
341 |
+
step = 0.2
|
342 |
+
x2 = x1 + step
|
343 |
+
x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2)
|
344 |
+
|
345 |
+
# Use tolerances from original paper
|
346 |
+
xatol = 0.0001
|
347 |
+
fatol = 0.000001
|
348 |
+
xrtol = 1e-16
|
349 |
+
frtol = 1e-16
|
350 |
+
|
351 |
+
res = _chandrupatla_minimize(func, x1, x2, x3, xatol=xatol,
|
352 |
+
fatol=fatol, xrtol=xrtol, frtol=frtol)
|
353 |
+
assert_equal(res.nit, nit)
|
354 |
+
|
355 |
+
@pytest.mark.parametrize("loc", (0.65, [0.65, 0.7]))
|
356 |
+
@pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
|
357 |
+
def test_dtype(self, loc, dtype):
|
358 |
+
# Test that dtypes are preserved
|
359 |
+
|
360 |
+
loc = dtype(loc)
|
361 |
+
|
362 |
+
def f(x, loc):
|
363 |
+
assert x.dtype == dtype
|
364 |
+
return ((x - loc) ** 2).astype(dtype)
|
365 |
+
|
366 |
+
res = _chandrupatla_minimize(f, dtype(-3), dtype(1), dtype(5),
|
367 |
+
args=(loc,))
|
368 |
+
assert res.x.dtype == dtype
|
369 |
+
assert_allclose(res.x, loc, rtol=np.sqrt(np.finfo(dtype).eps))
|
370 |
+
|
371 |
+
def test_input_validation(self):
|
372 |
+
# Test input validation for appropriate error messages
|
373 |
+
|
374 |
+
message = '`func` must be callable.'
|
375 |
+
with pytest.raises(ValueError, match=message):
|
376 |
+
_chandrupatla_minimize(None, -4, 0, 4)
|
377 |
+
|
378 |
+
message = 'Abscissae and function output must be real numbers.'
|
379 |
+
with pytest.raises(ValueError, match=message):
|
380 |
+
_chandrupatla_minimize(lambda x: x, -4+1j, 0, 4)
|
381 |
+
|
382 |
+
message = "shape mismatch: objects cannot be broadcast"
|
383 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
384 |
+
with pytest.raises(ValueError, match=message):
|
385 |
+
_chandrupatla_minimize(lambda x: x, [-2, -3], [0, 0], [3, 4, 5])
|
386 |
+
|
387 |
+
message = "The shape of the array returned by `func` must be the same"
|
388 |
+
with pytest.raises(ValueError, match=message):
|
389 |
+
_chandrupatla_minimize(lambda x: [x[0], x[1], x[1]], [-3, -3],
|
390 |
+
[0, 0], [5, 5])
|
391 |
+
|
392 |
+
message = 'Tolerances must be non-negative scalars.'
|
393 |
+
with pytest.raises(ValueError, match=message):
|
394 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, xatol=-1)
|
395 |
+
with pytest.raises(ValueError, match=message):
|
396 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, xrtol=np.nan)
|
397 |
+
with pytest.raises(ValueError, match=message):
|
398 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, fatol='ekki')
|
399 |
+
with pytest.raises(ValueError, match=message):
|
400 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, frtol=np.nan)
|
401 |
+
|
402 |
+
message = '`maxiter` must be a non-negative integer.'
|
403 |
+
with pytest.raises(ValueError, match=message):
|
404 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=1.5)
|
405 |
+
with pytest.raises(ValueError, match=message):
|
406 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=-1)
|
407 |
+
|
408 |
+
message = '`callback` must be callable.'
|
409 |
+
with pytest.raises(ValueError, match=message):
|
410 |
+
_chandrupatla_minimize(lambda x: x, -4, 0, 4, callback='shrubbery')
|
411 |
+
|
412 |
+
def test_bracket_order(self):
|
413 |
+
# Confirm that order of points in bracket doesn't matter
|
414 |
+
loc = np.linspace(-1, 1, 6)[:, np.newaxis]
|
415 |
+
brackets = np.array(list(permutations([-5, 0, 5]))).T
|
416 |
+
res = _chandrupatla_minimize(self.f, *brackets, args=(loc,))
|
417 |
+
assert np.all(np.isclose(res.x, loc) | (res.fun == self.f(loc, loc)))
|
418 |
+
ref = res.x[:, 0] # all columns should be the same
|
419 |
+
assert_allclose(*np.broadcast_arrays(res.x.T, ref), rtol=1e-15)
|
420 |
+
|
421 |
+
def test_special_cases(self):
|
422 |
+
# Test edge cases and other special cases
|
423 |
+
|
424 |
+
# Test that integers are not passed to `f`
|
425 |
+
# (otherwise this would overflow)
|
426 |
+
def f(x):
|
427 |
+
assert np.issubdtype(x.dtype, np.floating)
|
428 |
+
return (x-1) ** 100
|
429 |
+
|
430 |
+
with np.errstate(invalid='ignore'):
|
431 |
+
res = _chandrupatla_minimize(f, -7, 0, 8, fatol=0, frtol=0)
|
432 |
+
assert res.success
|
433 |
+
assert_allclose(res.x, 1, rtol=1e-3)
|
434 |
+
assert_equal(res.fun, 0)
|
435 |
+
|
436 |
+
# Test that if all elements of bracket equal minimizer, algorithm
|
437 |
+
# reports convergence
|
438 |
+
def f(x):
|
439 |
+
return (x-1)**2
|
440 |
+
|
441 |
+
res = _chandrupatla_minimize(f, 1, 1, 1)
|
442 |
+
assert res.success
|
443 |
+
assert_equal(res.x, 1)
|
444 |
+
|
445 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
446 |
+
def f(x):
|
447 |
+
return (x-1)**2
|
448 |
+
|
449 |
+
bracket = (-3, 1.1, 5)
|
450 |
+
res = _chandrupatla_minimize(f, *bracket, maxiter=0)
|
451 |
+
assert res.xl, res.xr == bracket
|
452 |
+
assert res.nit == 0
|
453 |
+
assert res.nfev == 3
|
454 |
+
assert res.status == -2
|
455 |
+
assert res.x == 1.1 # best so far
|
456 |
+
|
457 |
+
# Test scalar `args` (not in tuple)
|
458 |
+
def f(x, c):
|
459 |
+
return (x-c)**2 - 1
|
460 |
+
|
461 |
+
res = _chandrupatla_minimize(f, -1, 0, 1, args=1/3)
|
462 |
+
assert_allclose(res.x, 1/3)
|
463 |
+
|
464 |
+
# Test zero tolerances
|
465 |
+
# TODO: fatol/frtol = 0?
|
466 |
+
def f(x):
|
467 |
+
return -np.sin(x)
|
468 |
+
|
469 |
+
res = _chandrupatla_minimize(f, 0, 1, np.pi, xatol=0, xrtol=0,
|
470 |
+
fatol=0, frtol=0)
|
471 |
+
assert res.success
|
472 |
+
# found a minimum exactly (according to floating point arithmetic)
|
473 |
+
assert res.xl < res.xm < res.xr
|
474 |
+
assert f(res.xl) == f(res.xm) == f(res.xr)
|
475 |
+
|
476 |
+
|
477 |
+
class TestChandrupatla(TestScalarRootFinders):
|
478 |
+
|
479 |
+
def f(self, q, p):
|
480 |
+
return stats.norm.cdf(q) - p
|
481 |
+
|
482 |
+
@pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)])
|
483 |
+
def test_basic(self, p):
|
484 |
+
# Invert distribution CDF and compare against distrtibution `ppf`
|
485 |
+
res = _chandrupatla_root(self.f, -5, 5, args=(p,))
|
486 |
+
ref = stats.norm().ppf(p)
|
487 |
+
np.testing.assert_allclose(res.x, ref)
|
488 |
+
assert res.x.shape == ref.shape
|
489 |
+
|
490 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
491 |
+
def test_vectorization(self, shape):
|
492 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
493 |
+
# input shapes.
|
494 |
+
p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
495 |
+
args = (p,)
|
496 |
+
|
497 |
+
@np.vectorize
|
498 |
+
def chandrupatla_single(p):
|
499 |
+
return _chandrupatla_root(self.f, -5, 5, args=(p,))
|
500 |
+
|
501 |
+
def f(*args, **kwargs):
|
502 |
+
f.f_evals += 1
|
503 |
+
return self.f(*args, **kwargs)
|
504 |
+
f.f_evals = 0
|
505 |
+
|
506 |
+
res = _chandrupatla_root(f, -5, 5, args=args)
|
507 |
+
refs = chandrupatla_single(p).ravel()
|
508 |
+
|
509 |
+
ref_x = [ref.x for ref in refs]
|
510 |
+
assert_allclose(res.x.ravel(), ref_x)
|
511 |
+
assert_equal(res.x.shape, shape)
|
512 |
+
|
513 |
+
ref_fun = [ref.fun for ref in refs]
|
514 |
+
assert_allclose(res.fun.ravel(), ref_fun)
|
515 |
+
assert_equal(res.fun.shape, shape)
|
516 |
+
assert_equal(res.fun, self.f(res.x, *args))
|
517 |
+
|
518 |
+
ref_success = [ref.success for ref in refs]
|
519 |
+
assert_equal(res.success.ravel(), ref_success)
|
520 |
+
assert_equal(res.success.shape, shape)
|
521 |
+
assert np.issubdtype(res.success.dtype, np.bool_)
|
522 |
+
|
523 |
+
ref_flag = [ref.status for ref in refs]
|
524 |
+
assert_equal(res.status.ravel(), ref_flag)
|
525 |
+
assert_equal(res.status.shape, shape)
|
526 |
+
assert np.issubdtype(res.status.dtype, np.integer)
|
527 |
+
|
528 |
+
ref_nfev = [ref.nfev for ref in refs]
|
529 |
+
assert_equal(res.nfev.ravel(), ref_nfev)
|
530 |
+
assert_equal(np.max(res.nfev), f.f_evals)
|
531 |
+
assert_equal(res.nfev.shape, res.fun.shape)
|
532 |
+
assert np.issubdtype(res.nfev.dtype, np.integer)
|
533 |
+
|
534 |
+
ref_nit = [ref.nit for ref in refs]
|
535 |
+
assert_equal(res.nit.ravel(), ref_nit)
|
536 |
+
assert_equal(np.max(res.nit), f.f_evals-2)
|
537 |
+
assert_equal(res.nit.shape, res.fun.shape)
|
538 |
+
assert np.issubdtype(res.nit.dtype, np.integer)
|
539 |
+
|
540 |
+
ref_xl = [ref.xl for ref in refs]
|
541 |
+
assert_allclose(res.xl.ravel(), ref_xl)
|
542 |
+
assert_equal(res.xl.shape, shape)
|
543 |
+
|
544 |
+
ref_xr = [ref.xr for ref in refs]
|
545 |
+
assert_allclose(res.xr.ravel(), ref_xr)
|
546 |
+
assert_equal(res.xr.shape, shape)
|
547 |
+
|
548 |
+
assert_array_less(res.xl, res.xr)
|
549 |
+
finite = np.isfinite(res.x)
|
550 |
+
assert np.all((res.x[finite] == res.xl[finite])
|
551 |
+
| (res.x[finite] == res.xr[finite]))
|
552 |
+
|
553 |
+
ref_fl = [ref.fl for ref in refs]
|
554 |
+
assert_allclose(res.fl.ravel(), ref_fl)
|
555 |
+
assert_equal(res.fl.shape, shape)
|
556 |
+
assert_allclose(res.fl, self.f(res.xl, *args))
|
557 |
+
|
558 |
+
ref_fr = [ref.fr for ref in refs]
|
559 |
+
assert_allclose(res.fr.ravel(), ref_fr)
|
560 |
+
assert_equal(res.fr.shape, shape)
|
561 |
+
assert_allclose(res.fr, self.f(res.xr, *args))
|
562 |
+
|
563 |
+
assert np.all(np.abs(res.fun[finite]) ==
|
564 |
+
np.minimum(np.abs(res.fl[finite]),
|
565 |
+
np.abs(res.fr[finite])))
|
566 |
+
|
567 |
+
def test_flags(self):
|
568 |
+
# Test cases that should produce different status flags; show that all
|
569 |
+
# can be produced simultaneously.
|
570 |
+
def f(xs, js):
|
571 |
+
funcs = [lambda x: x - 2.5,
|
572 |
+
lambda x: x - 10,
|
573 |
+
lambda x: (x - 0.1)**3,
|
574 |
+
lambda x: np.nan]
|
575 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
576 |
+
|
577 |
+
args = (np.arange(4, dtype=np.int64),)
|
578 |
+
res = _chandrupatla_root(f, [0]*4, [np.pi]*4, args=args, maxiter=2)
|
579 |
+
|
580 |
+
ref_flags = np.array([eim._ECONVERGED,
|
581 |
+
eim._ESIGNERR,
|
582 |
+
eim._ECONVERR,
|
583 |
+
eim._EVALUEERR])
|
584 |
+
assert_equal(res.status, ref_flags)
|
585 |
+
|
586 |
+
def test_convergence(self):
|
587 |
+
# Test that the convergence tolerances behave as expected
|
588 |
+
rng = np.random.default_rng(2585255913088665241)
|
589 |
+
p = rng.random(size=3)
|
590 |
+
bracket = (-5, 5)
|
591 |
+
args = (p,)
|
592 |
+
kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
|
593 |
+
|
594 |
+
kwargs = kwargs0.copy()
|
595 |
+
kwargs['xatol'] = 1e-3
|
596 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
597 |
+
assert_array_less(res1.xr - res1.xl, 1e-3)
|
598 |
+
kwargs['xatol'] = 1e-6
|
599 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
600 |
+
assert_array_less(res2.xr - res2.xl, 1e-6)
|
601 |
+
assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl)
|
602 |
+
|
603 |
+
kwargs = kwargs0.copy()
|
604 |
+
kwargs['xrtol'] = 1e-3
|
605 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
606 |
+
assert_array_less(res1.xr - res1.xl, 1e-3 * np.abs(res1.x))
|
607 |
+
kwargs['xrtol'] = 1e-6
|
608 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
609 |
+
assert_array_less(res2.xr - res2.xl, 1e-6 * np.abs(res2.x))
|
610 |
+
assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl)
|
611 |
+
|
612 |
+
kwargs = kwargs0.copy()
|
613 |
+
kwargs['fatol'] = 1e-3
|
614 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
615 |
+
assert_array_less(np.abs(res1.fun), 1e-3)
|
616 |
+
kwargs['fatol'] = 1e-6
|
617 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
618 |
+
assert_array_less(np.abs(res2.fun), 1e-6)
|
619 |
+
assert_array_less(np.abs(res2.fun), np.abs(res1.fun))
|
620 |
+
|
621 |
+
kwargs = kwargs0.copy()
|
622 |
+
kwargs['frtol'] = 1e-3
|
623 |
+
x1, x2 = bracket
|
624 |
+
f0 = np.minimum(abs(self.f(x1, *args)), abs(self.f(x2, *args)))
|
625 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
626 |
+
assert_array_less(np.abs(res1.fun), 1e-3*f0)
|
627 |
+
kwargs['frtol'] = 1e-6
|
628 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
629 |
+
assert_array_less(np.abs(res2.fun), 1e-6*f0)
|
630 |
+
assert_array_less(np.abs(res2.fun), np.abs(res1.fun))
|
631 |
+
|
632 |
+
def test_maxiter_callback(self):
|
633 |
+
# Test behavior of `maxiter` parameter and `callback` interface
|
634 |
+
p = 0.612814
|
635 |
+
bracket = (-5, 5)
|
636 |
+
maxiter = 5
|
637 |
+
|
638 |
+
def f(q, p):
|
639 |
+
res = stats.norm().cdf(q) - p
|
640 |
+
f.x = q
|
641 |
+
f.fun = res
|
642 |
+
return res
|
643 |
+
f.x = None
|
644 |
+
f.fun = None
|
645 |
+
|
646 |
+
res = _chandrupatla_root(f, *bracket, args=(p,),
|
647 |
+
maxiter=maxiter)
|
648 |
+
assert not np.any(res.success)
|
649 |
+
assert np.all(res.nfev == maxiter+2)
|
650 |
+
assert np.all(res.nit == maxiter)
|
651 |
+
|
652 |
+
def callback(res):
|
653 |
+
callback.iter += 1
|
654 |
+
callback.res = res
|
655 |
+
assert hasattr(res, 'x')
|
656 |
+
if callback.iter == 0:
|
657 |
+
# callback is called once with initial bracket
|
658 |
+
assert (res.xl, res.xr) == bracket
|
659 |
+
else:
|
660 |
+
changed = (((res.xl == callback.xl) & (res.xr != callback.xr))
|
661 |
+
| ((res.xl != callback.xl) & (res.xr == callback.xr)))
|
662 |
+
assert np.all(changed)
|
663 |
+
|
664 |
+
callback.xl = res.xl
|
665 |
+
callback.xr = res.xr
|
666 |
+
assert res.status == eim._EINPROGRESS
|
667 |
+
assert_equal(self.f(res.xl, p), res.fl)
|
668 |
+
assert_equal(self.f(res.xr, p), res.fr)
|
669 |
+
assert_equal(self.f(res.x, p), res.fun)
|
670 |
+
if callback.iter == maxiter:
|
671 |
+
raise StopIteration
|
672 |
+
callback.iter = -1 # callback called once before first iteration
|
673 |
+
callback.res = None
|
674 |
+
callback.xl = None
|
675 |
+
callback.xr = None
|
676 |
+
|
677 |
+
res2 = _chandrupatla_root(f, *bracket, args=(p,),
|
678 |
+
callback=callback)
|
679 |
+
|
680 |
+
# terminating with callback is identical to terminating due to maxiter
|
681 |
+
# (except for `status`)
|
682 |
+
for key in res.keys():
|
683 |
+
if key == 'status':
|
684 |
+
assert res[key] == eim._ECONVERR
|
685 |
+
assert callback.res[key] == eim._EINPROGRESS
|
686 |
+
assert res2[key] == eim._ECALLBACK
|
687 |
+
else:
|
688 |
+
assert res2[key] == callback.res[key] == res[key]
|
689 |
+
|
690 |
+
@pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS)
|
691 |
+
def test_nit_expected(self, case):
|
692 |
+
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
|
693 |
+
# in all 40 test cases, the number of iterations performed
|
694 |
+
# matches the number reported in the original paper.
|
695 |
+
f, bracket, root, nfeval, id = case
|
696 |
+
# Chandrupatla's criterion is equivalent to
|
697 |
+
# abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard
|
698 |
+
# abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x
|
699 |
+
# that used by Chandrupatla in tests.
|
700 |
+
res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5)
|
701 |
+
assert_allclose(res.fun, f(root), rtol=1e-8, atol=2e-3)
|
702 |
+
assert_equal(res.nfev, nfeval)
|
703 |
+
|
704 |
+
@pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
|
705 |
+
@pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
|
706 |
+
def test_dtype(self, root, dtype):
|
707 |
+
# Test that dtypes are preserved
|
708 |
+
|
709 |
+
root = dtype(root)
|
710 |
+
def f(x, root):
|
711 |
+
return ((x - root) ** 3).astype(dtype)
|
712 |
+
|
713 |
+
res = _chandrupatla_root(f, dtype(-3), dtype(5),
|
714 |
+
args=(root,), xatol=1e-3)
|
715 |
+
assert res.x.dtype == dtype
|
716 |
+
assert np.allclose(res.x, root, atol=1e-3) or np.all(res.fun == 0)
|
717 |
+
|
718 |
+
def test_input_validation(self):
|
719 |
+
# Test input validation for appropriate error messages
|
720 |
+
|
721 |
+
message = '`func` must be callable.'
|
722 |
+
with pytest.raises(ValueError, match=message):
|
723 |
+
_chandrupatla_root(None, -4, 4)
|
724 |
+
|
725 |
+
message = 'Abscissae and function output must be real numbers.'
|
726 |
+
with pytest.raises(ValueError, match=message):
|
727 |
+
_chandrupatla_root(lambda x: x, -4+1j, 4)
|
728 |
+
|
729 |
+
message = "shape mismatch: objects cannot be broadcast"
|
730 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
731 |
+
with pytest.raises(ValueError, match=message):
|
732 |
+
_chandrupatla_root(lambda x: x, [-2, -3], [3, 4, 5])
|
733 |
+
|
734 |
+
message = "The shape of the array returned by `func`..."
|
735 |
+
with pytest.raises(ValueError, match=message):
|
736 |
+
_chandrupatla_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
|
737 |
+
|
738 |
+
message = 'Tolerances must be non-negative scalars.'
|
739 |
+
with pytest.raises(ValueError, match=message):
|
740 |
+
_chandrupatla_root(lambda x: x, -4, 4, xatol=-1)
|
741 |
+
with pytest.raises(ValueError, match=message):
|
742 |
+
_chandrupatla_root(lambda x: x, -4, 4, xrtol=np.nan)
|
743 |
+
with pytest.raises(ValueError, match=message):
|
744 |
+
_chandrupatla_root(lambda x: x, -4, 4, fatol='ekki')
|
745 |
+
with pytest.raises(ValueError, match=message):
|
746 |
+
_chandrupatla_root(lambda x: x, -4, 4, frtol=np.nan)
|
747 |
+
|
748 |
+
message = '`maxiter` must be a non-negative integer.'
|
749 |
+
with pytest.raises(ValueError, match=message):
|
750 |
+
_chandrupatla_root(lambda x: x, -4, 4, maxiter=1.5)
|
751 |
+
with pytest.raises(ValueError, match=message):
|
752 |
+
_chandrupatla_root(lambda x: x, -4, 4, maxiter=-1)
|
753 |
+
|
754 |
+
message = '`callback` must be callable.'
|
755 |
+
with pytest.raises(ValueError, match=message):
|
756 |
+
_chandrupatla_root(lambda x: x, -4, 4, callback='shrubbery')
|
757 |
+
|
758 |
+
def test_special_cases(self):
|
759 |
+
# Test edge cases and other special cases
|
760 |
+
|
761 |
+
# Test that integers are not passed to `f`
|
762 |
+
# (otherwise this would overflow)
|
763 |
+
def f(x):
|
764 |
+
assert np.issubdtype(x.dtype, np.floating)
|
765 |
+
return x ** 99 - 1
|
766 |
+
|
767 |
+
res = _chandrupatla_root(f, -7, 5)
|
768 |
+
assert res.success
|
769 |
+
assert_allclose(res.x, 1)
|
770 |
+
|
771 |
+
# Test that if both ends of bracket equal root, algorithm reports
|
772 |
+
# convergence
|
773 |
+
def f(x):
|
774 |
+
return x**2 - 1
|
775 |
+
|
776 |
+
res = _chandrupatla_root(f, 1, 1)
|
777 |
+
assert res.success
|
778 |
+
assert_equal(res.x, 1)
|
779 |
+
|
780 |
+
def f(x):
|
781 |
+
return 1/x
|
782 |
+
|
783 |
+
with np.errstate(invalid='ignore'):
|
784 |
+
res = _chandrupatla_root(f, np.inf, np.inf)
|
785 |
+
assert res.success
|
786 |
+
assert_equal(res.x, np.inf)
|
787 |
+
|
788 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
789 |
+
def f(x):
|
790 |
+
return x**3 - 1
|
791 |
+
|
792 |
+
bracket = (-3, 5)
|
793 |
+
res = _chandrupatla_root(f, *bracket, maxiter=0)
|
794 |
+
assert res.xl, res.xr == bracket
|
795 |
+
assert res.nit == 0
|
796 |
+
assert res.nfev == 2
|
797 |
+
assert res.status == -2
|
798 |
+
assert res.x == -3 # best so far
|
799 |
+
|
800 |
+
# Test maxiter = 1
|
801 |
+
res = _chandrupatla_root(f, *bracket, maxiter=1)
|
802 |
+
assert res.success
|
803 |
+
assert res.status == 0
|
804 |
+
assert res.nit == 1
|
805 |
+
assert res.nfev == 3
|
806 |
+
assert_allclose(res.x, 1)
|
807 |
+
|
808 |
+
# Test scalar `args` (not in tuple)
|
809 |
+
def f(x, c):
|
810 |
+
return c*x - 1
|
811 |
+
|
812 |
+
res = _chandrupatla_root(f, -1, 1, args=3)
|
813 |
+
assert_allclose(res.x, 1/3)
|
814 |
+
|
815 |
+
# # TODO: Test zero tolerance
|
816 |
+
# # ~~What's going on here - why are iterations repeated?~~
|
817 |
+
# # tl goes to zero when xatol=xrtol=0. When function is nearly linear,
|
818 |
+
# # this causes convergence issues.
|
819 |
+
# def f(x):
|
820 |
+
# return np.cos(x)
|
821 |
+
#
|
822 |
+
# res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0)
|
823 |
+
# assert res.nit < 100
|
824 |
+
# xp = np.nextafter(res.x, np.inf)
|
825 |
+
# xm = np.nextafter(res.x, -np.inf)
|
826 |
+
# assert np.abs(res.fun) < np.abs(f(xp))
|
827 |
+
# assert np.abs(res.fun) < np.abs(f(xm))
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_allclose, assert_, assert_array_equal
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from scipy.optimize import fmin_cobyla, minimize, Bounds
|
8 |
+
|
9 |
+
|
10 |
+
class TestCobyla:
|
11 |
+
def setup_method(self):
|
12 |
+
self.x0 = [4.95, 0.66]
|
13 |
+
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
|
14 |
+
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
|
15 |
+
'maxiter': 100}
|
16 |
+
|
17 |
+
def fun(self, x):
|
18 |
+
return x[0]**2 + abs(x[1])**3
|
19 |
+
|
20 |
+
def con1(self, x):
|
21 |
+
return x[0]**2 + x[1]**2 - 25
|
22 |
+
|
23 |
+
def con2(self, x):
|
24 |
+
return -self.con1(x)
|
25 |
+
|
26 |
+
@pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely')
|
27 |
+
def test_simple(self, capfd):
|
28 |
+
# use disp=True as smoke test for gh-8118
|
29 |
+
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
|
30 |
+
rhoend=1e-5, maxfun=100, disp=True)
|
31 |
+
assert_allclose(x, self.solution, atol=1e-4)
|
32 |
+
|
33 |
+
def test_minimize_simple(self):
|
34 |
+
class Callback:
|
35 |
+
def __init__(self):
|
36 |
+
self.n_calls = 0
|
37 |
+
self.last_x = None
|
38 |
+
|
39 |
+
def __call__(self, x):
|
40 |
+
self.n_calls += 1
|
41 |
+
self.last_x = x
|
42 |
+
|
43 |
+
callback = Callback()
|
44 |
+
|
45 |
+
# Minimize with method='COBYLA'
|
46 |
+
cons = ({'type': 'ineq', 'fun': self.con1},
|
47 |
+
{'type': 'ineq', 'fun': self.con2})
|
48 |
+
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
|
49 |
+
callback=callback, options=self.opts)
|
50 |
+
assert_allclose(sol.x, self.solution, atol=1e-4)
|
51 |
+
assert_(sol.success, sol.message)
|
52 |
+
assert_(sol.maxcv < 1e-5, sol)
|
53 |
+
assert_(sol.nfev < 70, sol)
|
54 |
+
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
|
55 |
+
assert_(sol.nfev == callback.n_calls,
|
56 |
+
"Callback is not called exactly once for every function eval.")
|
57 |
+
assert_array_equal(
|
58 |
+
sol.x,
|
59 |
+
callback.last_x,
|
60 |
+
"Last design vector sent to the callback is not equal to returned value.",
|
61 |
+
)
|
62 |
+
|
63 |
+
def test_minimize_constraint_violation(self):
|
64 |
+
np.random.seed(1234)
|
65 |
+
pb = np.random.rand(10, 10)
|
66 |
+
spread = np.random.rand(10)
|
67 |
+
|
68 |
+
def p(w):
|
69 |
+
return pb.dot(w)
|
70 |
+
|
71 |
+
def f(w):
|
72 |
+
return -(w * spread).sum()
|
73 |
+
|
74 |
+
def c1(w):
|
75 |
+
return 500 - abs(p(w)).sum()
|
76 |
+
|
77 |
+
def c2(w):
|
78 |
+
return 5 - abs(p(w).sum())
|
79 |
+
|
80 |
+
def c3(w):
|
81 |
+
return 5 - abs(p(w)).max()
|
82 |
+
|
83 |
+
cons = ({'type': 'ineq', 'fun': c1},
|
84 |
+
{'type': 'ineq', 'fun': c2},
|
85 |
+
{'type': 'ineq', 'fun': c3})
|
86 |
+
w0 = np.zeros((10,))
|
87 |
+
sol = minimize(f, w0, method='cobyla', constraints=cons,
|
88 |
+
options={'catol': 1e-6})
|
89 |
+
assert_(sol.maxcv > 1e-6)
|
90 |
+
assert_(not sol.success)
|
91 |
+
|
92 |
+
|
93 |
+
def test_vector_constraints():
|
94 |
+
# test that fmin_cobyla and minimize can take a combination
|
95 |
+
# of constraints, some returning a number and others an array
|
96 |
+
def fun(x):
|
97 |
+
return (x[0] - 1)**2 + (x[1] - 2.5)**2
|
98 |
+
|
99 |
+
def fmin(x):
|
100 |
+
return fun(x) - 1
|
101 |
+
|
102 |
+
def cons1(x):
|
103 |
+
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
|
104 |
+
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
|
105 |
+
a[i, 2] for i in range(len(a))])
|
106 |
+
|
107 |
+
def cons2(x):
|
108 |
+
return x # identity, acts as bounds x > 0
|
109 |
+
|
110 |
+
x0 = np.array([2, 0])
|
111 |
+
cons_list = [fun, cons1, cons2]
|
112 |
+
|
113 |
+
xsol = [1.4, 1.7]
|
114 |
+
fsol = 0.8
|
115 |
+
|
116 |
+
# testing fmin_cobyla
|
117 |
+
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5)
|
118 |
+
assert_allclose(sol, xsol, atol=1e-4)
|
119 |
+
|
120 |
+
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5)
|
121 |
+
assert_allclose(fun(sol), 1, atol=1e-4)
|
122 |
+
|
123 |
+
# testing minimize
|
124 |
+
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
|
125 |
+
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
|
126 |
+
assert_allclose(sol.x, xsol, atol=1e-4)
|
127 |
+
assert_(sol.success, sol.message)
|
128 |
+
assert_allclose(sol.fun, fsol, atol=1e-4)
|
129 |
+
|
130 |
+
constraints = {'type': 'ineq', 'fun': fmin}
|
131 |
+
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
|
132 |
+
assert_allclose(sol.fun, 1, atol=1e-4)
|
133 |
+
|
134 |
+
|
135 |
+
class TestBounds:
|
136 |
+
# Test cobyla support for bounds (only when used via `minimize`)
|
137 |
+
# Invalid bounds is tested in
|
138 |
+
# test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds
|
139 |
+
|
140 |
+
def test_basic(self):
|
141 |
+
def f(x):
|
142 |
+
return np.sum(x**2)
|
143 |
+
|
144 |
+
lb = [-1, None, 1, None, -0.5]
|
145 |
+
ub = [-0.5, -0.5, None, None, -0.5]
|
146 |
+
bounds = [(a, b) for a, b in zip(lb, ub)]
|
147 |
+
# these are converted to Bounds internally
|
148 |
+
|
149 |
+
res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds)
|
150 |
+
ref = [-0.5, -0.5, 1, 0, -0.5]
|
151 |
+
assert res.success
|
152 |
+
assert_allclose(res.x, ref, atol=1e-3)
|
153 |
+
|
154 |
+
def test_unbounded(self):
|
155 |
+
def f(x):
|
156 |
+
return np.sum(x**2)
|
157 |
+
|
158 |
+
bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])
|
159 |
+
res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
|
160 |
+
assert res.success
|
161 |
+
assert_allclose(res.x, 0, atol=1e-3)
|
162 |
+
|
163 |
+
bounds = Bounds([1, -np.inf], [np.inf, np.inf])
|
164 |
+
res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
|
165 |
+
assert res.success
|
166 |
+
assert_allclose(res.x, [1, 0], atol=1e-3)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit test for constraint conversion
|
3 |
+
"""
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from numpy.testing import (assert_array_almost_equal,
|
7 |
+
assert_allclose, assert_warns, suppress_warnings)
|
8 |
+
import pytest
|
9 |
+
from scipy.optimize import (NonlinearConstraint, LinearConstraint,
|
10 |
+
OptimizeWarning, minimize, BFGS)
|
11 |
+
from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock,
|
12 |
+
IneqRosenbrock, EqIneqRosenbrock,
|
13 |
+
BoundedRosenbrock, Elec)
|
14 |
+
|
15 |
+
|
16 |
+
class TestOldToNew:
|
17 |
+
x0 = (2, 0)
|
18 |
+
bnds = ((0, None), (0, None))
|
19 |
+
method = "trust-constr"
|
20 |
+
|
21 |
+
def test_constraint_dictionary_1(self):
|
22 |
+
def fun(x):
|
23 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
24 |
+
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
25 |
+
{'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
|
26 |
+
{'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
|
27 |
+
|
28 |
+
with suppress_warnings() as sup:
|
29 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
30 |
+
res = minimize(fun, self.x0, method=self.method,
|
31 |
+
bounds=self.bnds, constraints=cons)
|
32 |
+
assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
|
33 |
+
assert_allclose(res.fun, 0.8, rtol=1e-4)
|
34 |
+
|
35 |
+
def test_constraint_dictionary_2(self):
|
36 |
+
def fun(x):
|
37 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
38 |
+
cons = {'type': 'eq',
|
39 |
+
'fun': lambda x, p1, p2: p1*x[0] - p2*x[1],
|
40 |
+
'args': (1, 1.1),
|
41 |
+
'jac': lambda x, p1, p2: np.array([[p1, -p2]])}
|
42 |
+
with suppress_warnings() as sup:
|
43 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
44 |
+
res = minimize(fun, self.x0, method=self.method,
|
45 |
+
bounds=self.bnds, constraints=cons)
|
46 |
+
assert_allclose(res.x, [1.7918552, 1.62895927])
|
47 |
+
assert_allclose(res.fun, 1.3857466063348418)
|
48 |
+
|
49 |
+
def test_constraint_dictionary_3(self):
|
50 |
+
def fun(x):
|
51 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
52 |
+
cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
53 |
+
NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]
|
54 |
+
|
55 |
+
with suppress_warnings() as sup:
|
56 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
57 |
+
res = minimize(fun, self.x0, method=self.method,
|
58 |
+
bounds=self.bnds, constraints=cons)
|
59 |
+
assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
|
60 |
+
assert_allclose(res.fun, 1.125, rtol=1e-4)
|
61 |
+
|
62 |
+
|
63 |
+
class TestNewToOld:
|
64 |
+
|
65 |
+
def test_multiple_constraint_objects(self):
|
66 |
+
def fun(x):
|
67 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
68 |
+
x0 = [2, 0, 1]
|
69 |
+
coni = [] # only inequality constraints (can use cobyla)
|
70 |
+
methods = ["slsqp", "cobyla", "trust-constr"]
|
71 |
+
|
72 |
+
# mixed old and new
|
73 |
+
coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
74 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
75 |
+
|
76 |
+
coni.append([LinearConstraint([1, -2, 0], -2, np.inf),
|
77 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
78 |
+
|
79 |
+
coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
|
80 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
81 |
+
|
82 |
+
for con in coni:
|
83 |
+
funs = {}
|
84 |
+
for method in methods:
|
85 |
+
with suppress_warnings() as sup:
|
86 |
+
sup.filter(UserWarning)
|
87 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
88 |
+
funs[method] = result.fun
|
89 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4)
|
90 |
+
assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
|
91 |
+
|
92 |
+
def test_individual_constraint_objects(self):
|
93 |
+
def fun(x):
|
94 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
95 |
+
x0 = [2, 0, 1]
|
96 |
+
|
97 |
+
cone = [] # with equality constraints (can't use cobyla)
|
98 |
+
coni = [] # only inequality constraints (can use cobyla)
|
99 |
+
methods = ["slsqp", "cobyla", "trust-constr"]
|
100 |
+
|
101 |
+
# nonstandard data types for constraint equality bounds
|
102 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
|
103 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
|
104 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
105 |
+
1.21, np.array([1.21])))
|
106 |
+
|
107 |
+
# multiple equalities
|
108 |
+
cone.append(NonlinearConstraint(
|
109 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
110 |
+
1.21, 1.21)) # two same equalities
|
111 |
+
cone.append(NonlinearConstraint(
|
112 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
113 |
+
[1.21, 1.4], [1.21, 1.4])) # two different equalities
|
114 |
+
cone.append(NonlinearConstraint(
|
115 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
116 |
+
[1.21, 1.21], 1.21)) # equality specified two ways
|
117 |
+
cone.append(NonlinearConstraint(
|
118 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
119 |
+
[1.21, -np.inf], [1.21, np.inf])) # equality + unbounded
|
120 |
+
|
121 |
+
# nonstandard data types for constraint inequality bounds
|
122 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
|
123 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
|
124 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
125 |
+
1.21, np.array([np.inf])))
|
126 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
|
127 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
128 |
+
np.array(-np.inf), -3))
|
129 |
+
|
130 |
+
# multiple inequalities/equalities
|
131 |
+
coni.append(NonlinearConstraint(
|
132 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
133 |
+
1.21, np.inf)) # two same inequalities
|
134 |
+
cone.append(NonlinearConstraint(
|
135 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
136 |
+
[1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality
|
137 |
+
coni.append(NonlinearConstraint(
|
138 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
139 |
+
[1.1, .8], [1.2, 1.4])) # bounded above and below
|
140 |
+
coni.append(NonlinearConstraint(
|
141 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
142 |
+
[-1.2, -1.4], [-1.1, -.8])) # - bounded above and below
|
143 |
+
|
144 |
+
# quick check of LinearConstraint class (very little new code to test)
|
145 |
+
cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
|
146 |
+
cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
|
147 |
+
cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]],
|
148 |
+
[1.21, -np.inf], [1.21, 1.4]))
|
149 |
+
|
150 |
+
for con in coni:
|
151 |
+
funs = {}
|
152 |
+
for method in methods:
|
153 |
+
with suppress_warnings() as sup:
|
154 |
+
sup.filter(UserWarning)
|
155 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
156 |
+
funs[method] = result.fun
|
157 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
|
158 |
+
assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3)
|
159 |
+
|
160 |
+
for con in cone:
|
161 |
+
funs = {}
|
162 |
+
for method in methods[::2]: # skip cobyla
|
163 |
+
with suppress_warnings() as sup:
|
164 |
+
sup.filter(UserWarning)
|
165 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
166 |
+
funs[method] = result.fun
|
167 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
|
168 |
+
|
169 |
+
|
170 |
+
class TestNewToOldSLSQP:
|
171 |
+
method = 'slsqp'
|
172 |
+
elec = Elec(n_electrons=2)
|
173 |
+
elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047,
|
174 |
+
-0.73597044, 0.34180668, -0.34180667])
|
175 |
+
brock = BoundedRosenbrock()
|
176 |
+
brock.x_opt = [0, 0]
|
177 |
+
list_of_problems = [Maratos(),
|
178 |
+
HyperbolicIneq(),
|
179 |
+
Rosenbrock(),
|
180 |
+
IneqRosenbrock(),
|
181 |
+
EqIneqRosenbrock(),
|
182 |
+
elec,
|
183 |
+
brock
|
184 |
+
]
|
185 |
+
|
186 |
+
def test_list_of_problems(self):
|
187 |
+
|
188 |
+
for prob in self.list_of_problems:
|
189 |
+
|
190 |
+
with suppress_warnings() as sup:
|
191 |
+
sup.filter(UserWarning)
|
192 |
+
result = minimize(prob.fun, prob.x0,
|
193 |
+
method=self.method,
|
194 |
+
bounds=prob.bounds,
|
195 |
+
constraints=prob.constr)
|
196 |
+
|
197 |
+
assert_array_almost_equal(result.x, prob.x_opt, decimal=3)
|
198 |
+
|
199 |
+
def test_warn_mixed_constraints(self):
|
200 |
+
# warns about inefficiency of mixed equality/inequality constraints
|
201 |
+
def fun(x):
|
202 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
203 |
+
cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]],
|
204 |
+
[1.1, .8], [1.1, 1.4])
|
205 |
+
bnds = ((0, None), (0, None), (0, None))
|
206 |
+
with suppress_warnings() as sup:
|
207 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
208 |
+
assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1),
|
209 |
+
method=self.method, bounds=bnds, constraints=cons)
|
210 |
+
|
211 |
+
def test_warn_ignored_options(self):
|
212 |
+
# warns about constraint options being ignored
|
213 |
+
def fun(x):
|
214 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
215 |
+
x0 = (2, 0, 1)
|
216 |
+
|
217 |
+
if self.method == "slsqp":
|
218 |
+
bnds = ((0, None), (0, None), (0, None))
|
219 |
+
else:
|
220 |
+
bnds = None
|
221 |
+
|
222 |
+
cons = NonlinearConstraint(lambda x: x[0], 2, np.inf)
|
223 |
+
res = minimize(fun, x0, method=self.method,
|
224 |
+
bounds=bnds, constraints=cons)
|
225 |
+
# no warnings without constraint options
|
226 |
+
assert_allclose(res.fun, 1)
|
227 |
+
|
228 |
+
cons = LinearConstraint([1, 0, 0], 2, np.inf)
|
229 |
+
res = minimize(fun, x0, method=self.method,
|
230 |
+
bounds=bnds, constraints=cons)
|
231 |
+
# no warnings without constraint options
|
232 |
+
assert_allclose(res.fun, 1)
|
233 |
+
|
234 |
+
cons = []
|
235 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
236 |
+
keep_feasible=True))
|
237 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
238 |
+
hess=BFGS()))
|
239 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
240 |
+
finite_diff_jac_sparsity=42))
|
241 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
242 |
+
finite_diff_rel_step=42))
|
243 |
+
cons.append(LinearConstraint([1, 0, 0], 2, np.inf,
|
244 |
+
keep_feasible=True))
|
245 |
+
for con in cons:
|
246 |
+
assert_warns(OptimizeWarning, minimize, fun, x0,
|
247 |
+
method=self.method, bounds=bnds, constraints=cons)
|
248 |
+
|
249 |
+
|
250 |
+
class TestNewToOldCobyla:
|
251 |
+
method = 'cobyla'
|
252 |
+
|
253 |
+
list_of_problems = [
|
254 |
+
Elec(n_electrons=2),
|
255 |
+
Elec(n_electrons=4),
|
256 |
+
]
|
257 |
+
|
258 |
+
@pytest.mark.slow
|
259 |
+
def test_list_of_problems(self):
|
260 |
+
|
261 |
+
for prob in self.list_of_problems:
|
262 |
+
|
263 |
+
with suppress_warnings() as sup:
|
264 |
+
sup.filter(UserWarning)
|
265 |
+
truth = minimize(prob.fun, prob.x0,
|
266 |
+
method='trust-constr',
|
267 |
+
bounds=prob.bounds,
|
268 |
+
constraints=prob.constr)
|
269 |
+
result = minimize(prob.fun, prob.x0,
|
270 |
+
method=self.method,
|
271 |
+
bounds=prob.bounds,
|
272 |
+
constraints=prob.constr)
|
273 |
+
|
274 |
+
assert_allclose(result.fun, truth.fun, rtol=1e-3)
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py
ADDED
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import TestCase, assert_array_equal
|
4 |
+
import scipy.sparse as sps
|
5 |
+
from scipy.optimize._constraints import (
|
6 |
+
Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
|
7 |
+
new_bounds_to_old, old_bound_to_new, strict_bounds)
|
8 |
+
|
9 |
+
|
10 |
+
class TestStrictBounds(TestCase):
|
11 |
+
def test_scalarvalue_unique_enforce_feasibility(self):
|
12 |
+
m = 3
|
13 |
+
lb = 2
|
14 |
+
ub = 4
|
15 |
+
enforce_feasibility = False
|
16 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
17 |
+
enforce_feasibility,
|
18 |
+
m)
|
19 |
+
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
|
20 |
+
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
|
21 |
+
|
22 |
+
enforce_feasibility = True
|
23 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
24 |
+
enforce_feasibility,
|
25 |
+
m)
|
26 |
+
assert_array_equal(strict_lb, [2, 2, 2])
|
27 |
+
assert_array_equal(strict_ub, [4, 4, 4])
|
28 |
+
|
29 |
+
def test_vectorvalue_unique_enforce_feasibility(self):
|
30 |
+
m = 3
|
31 |
+
lb = [1, 2, 3]
|
32 |
+
ub = [4, 5, 6]
|
33 |
+
enforce_feasibility = False
|
34 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
35 |
+
enforce_feasibility,
|
36 |
+
m)
|
37 |
+
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
|
38 |
+
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
|
39 |
+
|
40 |
+
enforce_feasibility = True
|
41 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
42 |
+
enforce_feasibility,
|
43 |
+
m)
|
44 |
+
assert_array_equal(strict_lb, [1, 2, 3])
|
45 |
+
assert_array_equal(strict_ub, [4, 5, 6])
|
46 |
+
|
47 |
+
def test_scalarvalue_vector_enforce_feasibility(self):
|
48 |
+
m = 3
|
49 |
+
lb = 2
|
50 |
+
ub = 4
|
51 |
+
enforce_feasibility = [False, True, False]
|
52 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
53 |
+
enforce_feasibility,
|
54 |
+
m)
|
55 |
+
assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
|
56 |
+
assert_array_equal(strict_ub, [np.inf, 4, np.inf])
|
57 |
+
|
58 |
+
def test_vectorvalue_vector_enforce_feasibility(self):
|
59 |
+
m = 3
|
60 |
+
lb = [1, 2, 3]
|
61 |
+
ub = [4, 6, np.inf]
|
62 |
+
enforce_feasibility = [True, False, True]
|
63 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
64 |
+
enforce_feasibility,
|
65 |
+
m)
|
66 |
+
assert_array_equal(strict_lb, [1, -np.inf, 3])
|
67 |
+
assert_array_equal(strict_ub, [4, np.inf, np.inf])
|
68 |
+
|
69 |
+
|
70 |
+
def test_prepare_constraint_infeasible_x0():
|
71 |
+
lb = np.array([0, 20, 30])
|
72 |
+
ub = np.array([0.5, np.inf, 70])
|
73 |
+
x0 = np.array([1, 2, 3])
|
74 |
+
enforce_feasibility = np.array([False, True, True], dtype=bool)
|
75 |
+
bounds = Bounds(lb, ub, enforce_feasibility)
|
76 |
+
pytest.raises(ValueError, PreparedConstraint, bounds, x0)
|
77 |
+
|
78 |
+
pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3])
|
79 |
+
assert (pc.violation([1, 2, 3]) > 0).any()
|
80 |
+
assert (pc.violation([0.25, 21, 31]) == 0).all()
|
81 |
+
|
82 |
+
x0 = np.array([1, 2, 3, 4])
|
83 |
+
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
|
84 |
+
enforce_feasibility = np.array([True, True, True], dtype=bool)
|
85 |
+
linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
|
86 |
+
pytest.raises(ValueError, PreparedConstraint, linear, x0)
|
87 |
+
|
88 |
+
pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0),
|
89 |
+
[1, 2, 3, 4])
|
90 |
+
assert (pc.violation([1, 2, 3, 4]) > 0).any()
|
91 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
92 |
+
|
93 |
+
def fun(x):
|
94 |
+
return A.dot(x)
|
95 |
+
|
96 |
+
def jac(x):
|
97 |
+
return A
|
98 |
+
|
99 |
+
def hess(x, v):
|
100 |
+
return sps.csr_matrix((4, 4))
|
101 |
+
|
102 |
+
nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
|
103 |
+
enforce_feasibility)
|
104 |
+
pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
|
105 |
+
|
106 |
+
pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4])
|
107 |
+
assert (pc.violation([1, 2, 3, 4]) > 0).any()
|
108 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
109 |
+
|
110 |
+
|
111 |
+
def test_violation():
|
112 |
+
def cons_f(x):
|
113 |
+
return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])
|
114 |
+
|
115 |
+
nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
|
116 |
+
pc = PreparedConstraint(nlc, [0.5, 1])
|
117 |
+
|
118 |
+
assert_array_equal(pc.violation([0.5, 1]), [0., 0.])
|
119 |
+
|
120 |
+
np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])
|
121 |
+
|
122 |
+
np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])
|
123 |
+
|
124 |
+
np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])
|
125 |
+
|
126 |
+
np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])
|
127 |
+
|
128 |
+
|
129 |
+
def test_new_bounds_to_old():
|
130 |
+
lb = np.array([-np.inf, 2, 3])
|
131 |
+
ub = np.array([3, np.inf, 10])
|
132 |
+
|
133 |
+
bounds = [(None, 3), (2, None), (3, 10)]
|
134 |
+
assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
|
135 |
+
|
136 |
+
bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
|
137 |
+
assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
|
138 |
+
|
139 |
+
bounds_no_lb = [(None, 3), (None, None), (None, 10)]
|
140 |
+
assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
|
141 |
+
|
142 |
+
bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
|
143 |
+
assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
|
144 |
+
|
145 |
+
bounds_no_ub = [(None, None), (2, None), (3, None)]
|
146 |
+
assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
|
147 |
+
|
148 |
+
bounds_single_both = [(1, 2), (1, 2), (1, 2)]
|
149 |
+
assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
|
150 |
+
|
151 |
+
bounds_no_both = [(None, None), (None, None), (None, None)]
|
152 |
+
assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
|
153 |
+
|
154 |
+
|
155 |
+
def test_old_bounds_to_new():
|
156 |
+
bounds = ([1, 2], (None, 3), (-1, None))
|
157 |
+
lb_true = np.array([1, -np.inf, -1])
|
158 |
+
ub_true = np.array([2, 3, np.inf])
|
159 |
+
|
160 |
+
lb, ub = old_bound_to_new(bounds)
|
161 |
+
assert_array_equal(lb, lb_true)
|
162 |
+
assert_array_equal(ub, ub_true)
|
163 |
+
|
164 |
+
bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))]
|
165 |
+
lb, ub = old_bound_to_new(bounds)
|
166 |
+
|
167 |
+
assert_array_equal(lb, [-np.inf, 1])
|
168 |
+
assert_array_equal(ub, [np.inf, 1])
|
169 |
+
|
170 |
+
|
171 |
+
class TestBounds:
|
172 |
+
def test_repr(self):
|
173 |
+
# so that eval works
|
174 |
+
from numpy import array, inf # noqa: F401
|
175 |
+
for args in (
|
176 |
+
(-1.0, 5.0),
|
177 |
+
(-1.0, np.inf, True),
|
178 |
+
(np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
|
179 |
+
(np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
|
180 |
+
np.array([True, False])),
|
181 |
+
):
|
182 |
+
bounds = Bounds(*args)
|
183 |
+
bounds2 = eval(repr(Bounds(*args)))
|
184 |
+
assert_array_equal(bounds.lb, bounds2.lb)
|
185 |
+
assert_array_equal(bounds.ub, bounds2.ub)
|
186 |
+
assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
|
187 |
+
|
188 |
+
def test_array(self):
|
189 |
+
# gh13501
|
190 |
+
b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
|
191 |
+
assert isinstance(b.lb, np.ndarray)
|
192 |
+
assert isinstance(b.ub, np.ndarray)
|
193 |
+
|
194 |
+
def test_defaults(self):
|
195 |
+
b1 = Bounds()
|
196 |
+
b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
|
197 |
+
assert b1.lb == b2.lb
|
198 |
+
assert b1.ub == b2.ub
|
199 |
+
|
200 |
+
def test_input_validation(self):
|
201 |
+
message = "Lower and upper bounds must be dense arrays."
|
202 |
+
with pytest.raises(ValueError, match=message):
|
203 |
+
Bounds(sps.coo_array([1, 2]), [1, 2])
|
204 |
+
with pytest.raises(ValueError, match=message):
|
205 |
+
Bounds([1, 2], sps.coo_array([1, 2]))
|
206 |
+
|
207 |
+
message = "`keep_feasible` must be a dense array."
|
208 |
+
with pytest.raises(ValueError, match=message):
|
209 |
+
Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True]))
|
210 |
+
|
211 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
|
212 |
+
with pytest.raises(ValueError, match=message):
|
213 |
+
Bounds([1, 2], [1, 2, 3])
|
214 |
+
|
215 |
+
def test_residual(self):
|
216 |
+
bounds = Bounds(-2, 4)
|
217 |
+
x0 = [-1, 2]
|
218 |
+
np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
|
219 |
+
|
220 |
+
|
221 |
+
class TestLinearConstraint:
|
222 |
+
def test_defaults(self):
|
223 |
+
A = np.eye(4)
|
224 |
+
lc = LinearConstraint(A)
|
225 |
+
lc2 = LinearConstraint(A, -np.inf, np.inf)
|
226 |
+
assert_array_equal(lc.lb, lc2.lb)
|
227 |
+
assert_array_equal(lc.ub, lc2.ub)
|
228 |
+
|
229 |
+
def test_input_validation(self):
|
230 |
+
A = np.eye(4)
|
231 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
|
232 |
+
with pytest.raises(ValueError, match=message):
|
233 |
+
LinearConstraint(A, [1, 2], [1, 2, 3])
|
234 |
+
|
235 |
+
message = "Constraint limits must be dense arrays"
|
236 |
+
with pytest.raises(ValueError, match=message):
|
237 |
+
LinearConstraint(A, sps.coo_array([1, 2]), [2, 3])
|
238 |
+
with pytest.raises(ValueError, match=message):
|
239 |
+
LinearConstraint(A, [1, 2], sps.coo_array([2, 3]))
|
240 |
+
|
241 |
+
message = "`keep_feasible` must be a dense array"
|
242 |
+
with pytest.raises(ValueError, match=message):
|
243 |
+
keep_feasible = sps.coo_array([True, True])
|
244 |
+
LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible)
|
245 |
+
|
246 |
+
A = np.empty((4, 3, 5))
|
247 |
+
message = "`A` must have exactly two dimensions."
|
248 |
+
with pytest.raises(ValueError, match=message):
|
249 |
+
LinearConstraint(A)
|
250 |
+
|
251 |
+
def test_residual(self):
|
252 |
+
A = np.eye(2)
|
253 |
+
lc = LinearConstraint(A, -2, 4)
|
254 |
+
x0 = [-1, 2]
|
255 |
+
np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``,
|
3 |
+
and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a
|
4 |
+
3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st,
|
5 |
+
2nd, and 3rd order terms in ``args``.
|
6 |
+
|
7 |
+
.. math::
|
8 |
+
|
9 |
+
f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0
|
10 |
+
|
11 |
+
The 3rd order polynomial function is written in Cython and called in a Python
|
12 |
+
wrapper named after the zero function. See the private ``_zeros`` Cython module
|
13 |
+
in `scipy.optimize.cython_optimze` for more information.
|
14 |
+
"""
|
15 |
+
|
16 |
+
import numpy.testing as npt
|
17 |
+
from scipy.optimize.cython_optimize import _zeros
|
18 |
+
|
19 |
+
# CONSTANTS
|
20 |
+
# Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9].
|
21 |
+
# The ARGS have 3 elements just to show how this could be done for any cubic
|
22 |
+
# polynomial.
|
23 |
+
A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term
|
24 |
+
ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms
|
25 |
+
XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions
|
26 |
+
# absolute and relative tolerances and max iterations for zeros functions
|
27 |
+
XTOL, RTOL, MITR = 0.001, 0.001, 10
|
28 |
+
EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0]
|
29 |
+
# = [1.2599210498948732,
|
30 |
+
# 1.2805791649874942,
|
31 |
+
# 1.300591446851387,
|
32 |
+
# 1.3200061217959123,
|
33 |
+
# 1.338865900164339,
|
34 |
+
# 1.3572088082974532,
|
35 |
+
# 1.375068867074141,
|
36 |
+
# 1.3924766500838337,
|
37 |
+
# 1.4094597464129783,
|
38 |
+
# 1.4260431471424087]
|
39 |
+
|
40 |
+
|
41 |
+
# test bisect
|
42 |
+
def test_bisect():
|
43 |
+
npt.assert_allclose(
|
44 |
+
EXPECTED,
|
45 |
+
list(
|
46 |
+
_zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
47 |
+
),
|
48 |
+
rtol=RTOL, atol=XTOL
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
# test ridder
|
53 |
+
def test_ridder():
|
54 |
+
npt.assert_allclose(
|
55 |
+
EXPECTED,
|
56 |
+
list(
|
57 |
+
_zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
58 |
+
),
|
59 |
+
rtol=RTOL, atol=XTOL
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
# test brenth
|
64 |
+
def test_brenth():
|
65 |
+
npt.assert_allclose(
|
66 |
+
EXPECTED,
|
67 |
+
list(
|
68 |
+
_zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
69 |
+
),
|
70 |
+
rtol=RTOL, atol=XTOL
|
71 |
+
)
|
72 |
+
|
73 |
+
|
74 |
+
# test brentq
|
75 |
+
def test_brentq():
|
76 |
+
npt.assert_allclose(
|
77 |
+
EXPECTED,
|
78 |
+
list(
|
79 |
+
_zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
80 |
+
),
|
81 |
+
rtol=RTOL, atol=XTOL
|
82 |
+
)
|
83 |
+
|
84 |
+
|
85 |
+
# test brentq with full output
|
86 |
+
def test_brentq_full_output():
|
87 |
+
output = _zeros.full_output_example(
|
88 |
+
(A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
89 |
+
npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
|
90 |
+
npt.assert_equal(6, output['iterations'])
|
91 |
+
npt.assert_equal(7, output['funcalls'])
|
92 |
+
npt.assert_equal(0, output['error_num'])
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py
ADDED
@@ -0,0 +1,758 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import (TestCase, assert_array_almost_equal,
|
4 |
+
assert_array_equal, assert_, assert_allclose,
|
5 |
+
assert_equal)
|
6 |
+
from scipy.sparse import csr_matrix
|
7 |
+
from scipy.sparse.linalg import LinearOperator
|
8 |
+
from scipy.optimize._differentiable_functions import (ScalarFunction,
|
9 |
+
VectorFunction,
|
10 |
+
LinearVectorFunction,
|
11 |
+
IdentityVectorFunction)
|
12 |
+
from scipy.optimize import rosen, rosen_der, rosen_hess
|
13 |
+
from scipy.optimize._hessian_update_strategy import BFGS
|
14 |
+
|
15 |
+
|
16 |
+
class ExScalarFunction:
|
17 |
+
|
18 |
+
def __init__(self):
|
19 |
+
self.nfev = 0
|
20 |
+
self.ngev = 0
|
21 |
+
self.nhev = 0
|
22 |
+
|
23 |
+
def fun(self, x):
|
24 |
+
self.nfev += 1
|
25 |
+
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
|
26 |
+
|
27 |
+
def grad(self, x):
|
28 |
+
self.ngev += 1
|
29 |
+
return np.array([4*x[0]-1, 4*x[1]])
|
30 |
+
|
31 |
+
def hess(self, x):
|
32 |
+
self.nhev += 1
|
33 |
+
return 4*np.eye(2)
|
34 |
+
|
35 |
+
|
36 |
+
class TestScalarFunction(TestCase):
|
37 |
+
|
38 |
+
def test_finite_difference_grad(self):
|
39 |
+
ex = ExScalarFunction()
|
40 |
+
nfev = 0
|
41 |
+
ngev = 0
|
42 |
+
|
43 |
+
x0 = [1.0, 0.0]
|
44 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
45 |
+
ex.hess, None, (-np.inf, np.inf))
|
46 |
+
nfev += 1
|
47 |
+
ngev += 1
|
48 |
+
assert_array_equal(ex.nfev, nfev)
|
49 |
+
assert_array_equal(analit.nfev, nfev)
|
50 |
+
assert_array_equal(ex.ngev, ngev)
|
51 |
+
assert_array_equal(analit.ngev, nfev)
|
52 |
+
approx = ScalarFunction(ex.fun, x0, (), '2-point',
|
53 |
+
ex.hess, None, (-np.inf, np.inf))
|
54 |
+
nfev += 3
|
55 |
+
ngev += 1
|
56 |
+
assert_array_equal(ex.nfev, nfev)
|
57 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
58 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
59 |
+
assert_array_equal(analit.f, approx.f)
|
60 |
+
assert_array_almost_equal(analit.g, approx.g)
|
61 |
+
|
62 |
+
x = [10, 0.3]
|
63 |
+
f_analit = analit.fun(x)
|
64 |
+
g_analit = analit.grad(x)
|
65 |
+
nfev += 1
|
66 |
+
ngev += 1
|
67 |
+
assert_array_equal(ex.nfev, nfev)
|
68 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
69 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
70 |
+
f_approx = approx.fun(x)
|
71 |
+
g_approx = approx.grad(x)
|
72 |
+
nfev += 3
|
73 |
+
ngev += 1
|
74 |
+
assert_array_equal(ex.nfev, nfev)
|
75 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
76 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
77 |
+
assert_array_almost_equal(f_analit, f_approx)
|
78 |
+
assert_array_almost_equal(g_analit, g_approx)
|
79 |
+
|
80 |
+
x = [2.0, 1.0]
|
81 |
+
g_analit = analit.grad(x)
|
82 |
+
ngev += 1
|
83 |
+
assert_array_equal(ex.nfev, nfev)
|
84 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
85 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
86 |
+
|
87 |
+
g_approx = approx.grad(x)
|
88 |
+
nfev += 3
|
89 |
+
ngev += 1
|
90 |
+
assert_array_equal(ex.nfev, nfev)
|
91 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
92 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
93 |
+
assert_array_almost_equal(g_analit, g_approx)
|
94 |
+
|
95 |
+
x = [2.5, 0.3]
|
96 |
+
f_analit = analit.fun(x)
|
97 |
+
g_analit = analit.grad(x)
|
98 |
+
nfev += 1
|
99 |
+
ngev += 1
|
100 |
+
assert_array_equal(ex.nfev, nfev)
|
101 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
102 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
103 |
+
f_approx = approx.fun(x)
|
104 |
+
g_approx = approx.grad(x)
|
105 |
+
nfev += 3
|
106 |
+
ngev += 1
|
107 |
+
assert_array_equal(ex.nfev, nfev)
|
108 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
109 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
110 |
+
assert_array_almost_equal(f_analit, f_approx)
|
111 |
+
assert_array_almost_equal(g_analit, g_approx)
|
112 |
+
|
113 |
+
x = [2, 0.3]
|
114 |
+
f_analit = analit.fun(x)
|
115 |
+
g_analit = analit.grad(x)
|
116 |
+
nfev += 1
|
117 |
+
ngev += 1
|
118 |
+
assert_array_equal(ex.nfev, nfev)
|
119 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
120 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
121 |
+
f_approx = approx.fun(x)
|
122 |
+
g_approx = approx.grad(x)
|
123 |
+
nfev += 3
|
124 |
+
ngev += 1
|
125 |
+
assert_array_equal(ex.nfev, nfev)
|
126 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
127 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
128 |
+
assert_array_almost_equal(f_analit, f_approx)
|
129 |
+
assert_array_almost_equal(g_analit, g_approx)
|
130 |
+
|
131 |
+
def test_fun_and_grad(self):
|
132 |
+
ex = ExScalarFunction()
|
133 |
+
|
134 |
+
def fg_allclose(x, y):
|
135 |
+
assert_allclose(x[0], y[0])
|
136 |
+
assert_allclose(x[1], y[1])
|
137 |
+
|
138 |
+
# with analytic gradient
|
139 |
+
x0 = [2.0, 0.3]
|
140 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
141 |
+
ex.hess, None, (-np.inf, np.inf))
|
142 |
+
|
143 |
+
fg = ex.fun(x0), ex.grad(x0)
|
144 |
+
fg_allclose(analit.fun_and_grad(x0), fg)
|
145 |
+
assert analit.ngev == 1
|
146 |
+
|
147 |
+
x0[1] = 1.
|
148 |
+
fg = ex.fun(x0), ex.grad(x0)
|
149 |
+
fg_allclose(analit.fun_and_grad(x0), fg)
|
150 |
+
|
151 |
+
# with finite difference gradient
|
152 |
+
x0 = [2.0, 0.3]
|
153 |
+
sf = ScalarFunction(ex.fun, x0, (), '3-point',
|
154 |
+
ex.hess, None, (-np.inf, np.inf))
|
155 |
+
assert sf.ngev == 1
|
156 |
+
fg = ex.fun(x0), ex.grad(x0)
|
157 |
+
fg_allclose(sf.fun_and_grad(x0), fg)
|
158 |
+
assert sf.ngev == 1
|
159 |
+
|
160 |
+
x0[1] = 1.
|
161 |
+
fg = ex.fun(x0), ex.grad(x0)
|
162 |
+
fg_allclose(sf.fun_and_grad(x0), fg)
|
163 |
+
|
164 |
+
def test_finite_difference_hess_linear_operator(self):
|
165 |
+
ex = ExScalarFunction()
|
166 |
+
nfev = 0
|
167 |
+
ngev = 0
|
168 |
+
nhev = 0
|
169 |
+
|
170 |
+
x0 = [1.0, 0.0]
|
171 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
172 |
+
ex.hess, None, (-np.inf, np.inf))
|
173 |
+
nfev += 1
|
174 |
+
ngev += 1
|
175 |
+
nhev += 1
|
176 |
+
assert_array_equal(ex.nfev, nfev)
|
177 |
+
assert_array_equal(analit.nfev, nfev)
|
178 |
+
assert_array_equal(ex.ngev, ngev)
|
179 |
+
assert_array_equal(analit.ngev, ngev)
|
180 |
+
assert_array_equal(ex.nhev, nhev)
|
181 |
+
assert_array_equal(analit.nhev, nhev)
|
182 |
+
approx = ScalarFunction(ex.fun, x0, (), ex.grad,
|
183 |
+
'2-point', None, (-np.inf, np.inf))
|
184 |
+
assert_(isinstance(approx.H, LinearOperator))
|
185 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
186 |
+
assert_array_equal(analit.f, approx.f)
|
187 |
+
assert_array_almost_equal(analit.g, approx.g)
|
188 |
+
assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
|
189 |
+
nfev += 1
|
190 |
+
ngev += 4
|
191 |
+
assert_array_equal(ex.nfev, nfev)
|
192 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
193 |
+
assert_array_equal(ex.ngev, ngev)
|
194 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
195 |
+
assert_array_equal(ex.nhev, nhev)
|
196 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
197 |
+
|
198 |
+
x = [2.0, 1.0]
|
199 |
+
H_analit = analit.hess(x)
|
200 |
+
nhev += 1
|
201 |
+
assert_array_equal(ex.nfev, nfev)
|
202 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
203 |
+
assert_array_equal(ex.ngev, ngev)
|
204 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
205 |
+
assert_array_equal(ex.nhev, nhev)
|
206 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
207 |
+
H_approx = approx.hess(x)
|
208 |
+
assert_(isinstance(H_approx, LinearOperator))
|
209 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
210 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
211 |
+
ngev += 4
|
212 |
+
assert_array_equal(ex.nfev, nfev)
|
213 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
214 |
+
assert_array_equal(ex.ngev, ngev)
|
215 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
216 |
+
assert_array_equal(ex.nhev, nhev)
|
217 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
218 |
+
|
219 |
+
x = [2.1, 1.2]
|
220 |
+
H_analit = analit.hess(x)
|
221 |
+
nhev += 1
|
222 |
+
assert_array_equal(ex.nfev, nfev)
|
223 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
224 |
+
assert_array_equal(ex.ngev, ngev)
|
225 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
226 |
+
assert_array_equal(ex.nhev, nhev)
|
227 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
228 |
+
H_approx = approx.hess(x)
|
229 |
+
assert_(isinstance(H_approx, LinearOperator))
|
230 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
231 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
232 |
+
ngev += 4
|
233 |
+
assert_array_equal(ex.nfev, nfev)
|
234 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
235 |
+
assert_array_equal(ex.ngev, ngev)
|
236 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
237 |
+
assert_array_equal(ex.nhev, nhev)
|
238 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
239 |
+
|
240 |
+
x = [2.5, 0.3]
|
241 |
+
_ = analit.grad(x)
|
242 |
+
H_analit = analit.hess(x)
|
243 |
+
ngev += 1
|
244 |
+
nhev += 1
|
245 |
+
assert_array_equal(ex.nfev, nfev)
|
246 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
247 |
+
assert_array_equal(ex.ngev, ngev)
|
248 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
249 |
+
assert_array_equal(ex.nhev, nhev)
|
250 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
251 |
+
_ = approx.grad(x)
|
252 |
+
H_approx = approx.hess(x)
|
253 |
+
assert_(isinstance(H_approx, LinearOperator))
|
254 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
255 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
256 |
+
ngev += 4
|
257 |
+
assert_array_equal(ex.nfev, nfev)
|
258 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
259 |
+
assert_array_equal(ex.ngev, ngev)
|
260 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
261 |
+
assert_array_equal(ex.nhev, nhev)
|
262 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
263 |
+
|
264 |
+
x = [5.2, 2.3]
|
265 |
+
_ = analit.grad(x)
|
266 |
+
H_analit = analit.hess(x)
|
267 |
+
ngev += 1
|
268 |
+
nhev += 1
|
269 |
+
assert_array_equal(ex.nfev, nfev)
|
270 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
271 |
+
assert_array_equal(ex.ngev, ngev)
|
272 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
273 |
+
assert_array_equal(ex.nhev, nhev)
|
274 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
275 |
+
_ = approx.grad(x)
|
276 |
+
H_approx = approx.hess(x)
|
277 |
+
assert_(isinstance(H_approx, LinearOperator))
|
278 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
279 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
280 |
+
ngev += 4
|
281 |
+
assert_array_equal(ex.nfev, nfev)
|
282 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
283 |
+
assert_array_equal(ex.ngev, ngev)
|
284 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
285 |
+
assert_array_equal(ex.nhev, nhev)
|
286 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
287 |
+
|
288 |
+
def test_x_storage_overlap(self):
|
289 |
+
# Scalar_Function should not store references to arrays, it should
|
290 |
+
# store copies - this checks that updating an array in-place causes
|
291 |
+
# Scalar_Function.x to be updated.
|
292 |
+
|
293 |
+
def f(x):
|
294 |
+
return np.sum(np.asarray(x) ** 2)
|
295 |
+
|
296 |
+
x = np.array([1., 2., 3.])
|
297 |
+
sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf))
|
298 |
+
|
299 |
+
assert x is not sf.x
|
300 |
+
assert_equal(sf.fun(x), 14.0)
|
301 |
+
assert x is not sf.x
|
302 |
+
|
303 |
+
x[0] = 0.
|
304 |
+
f1 = sf.fun(x)
|
305 |
+
assert_equal(f1, 13.0)
|
306 |
+
|
307 |
+
x[0] = 1
|
308 |
+
f2 = sf.fun(x)
|
309 |
+
assert_equal(f2, 14.0)
|
310 |
+
assert x is not sf.x
|
311 |
+
|
312 |
+
# now test with a HessianUpdate strategy specified
|
313 |
+
hess = BFGS()
|
314 |
+
x = np.array([1., 2., 3.])
|
315 |
+
sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf))
|
316 |
+
|
317 |
+
assert x is not sf.x
|
318 |
+
assert_equal(sf.fun(x), 14.0)
|
319 |
+
assert x is not sf.x
|
320 |
+
|
321 |
+
x[0] = 0.
|
322 |
+
f1 = sf.fun(x)
|
323 |
+
assert_equal(f1, 13.0)
|
324 |
+
|
325 |
+
x[0] = 1
|
326 |
+
f2 = sf.fun(x)
|
327 |
+
assert_equal(f2, 14.0)
|
328 |
+
assert x is not sf.x
|
329 |
+
|
330 |
+
# gh13740 x is changed in user function
|
331 |
+
def ff(x):
|
332 |
+
x *= x # overwrite x
|
333 |
+
return np.sum(x)
|
334 |
+
|
335 |
+
x = np.array([1., 2., 3.])
|
336 |
+
sf = ScalarFunction(
|
337 |
+
ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)
|
338 |
+
)
|
339 |
+
assert x is not sf.x
|
340 |
+
assert_equal(sf.fun(x), 14.0)
|
341 |
+
assert_equal(sf.x, np.array([1., 2., 3.]))
|
342 |
+
assert x is not sf.x
|
343 |
+
|
344 |
+
def test_lowest_x(self):
|
345 |
+
# ScalarFunction should remember the lowest func(x) visited.
|
346 |
+
x0 = np.array([2, 3, 4])
|
347 |
+
sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess,
|
348 |
+
None, None)
|
349 |
+
sf.fun([1, 1, 1])
|
350 |
+
sf.fun(x0)
|
351 |
+
sf.fun([1.01, 1, 1.0])
|
352 |
+
sf.grad([1.01, 1, 1.0])
|
353 |
+
assert_equal(sf._lowest_f, 0.0)
|
354 |
+
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
|
355 |
+
|
356 |
+
sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess,
|
357 |
+
None, (-np.inf, np.inf))
|
358 |
+
sf.fun([1, 1, 1])
|
359 |
+
sf.fun(x0)
|
360 |
+
sf.fun([1.01, 1, 1.0])
|
361 |
+
sf.grad([1.01, 1, 1.0])
|
362 |
+
assert_equal(sf._lowest_f, 0.0)
|
363 |
+
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
|
364 |
+
|
365 |
+
def test_float_size(self):
|
366 |
+
x0 = np.array([2, 3, 4]).astype(np.float32)
|
367 |
+
|
368 |
+
# check that ScalarFunction/approx_derivative always send the correct
|
369 |
+
# float width
|
370 |
+
def rosen_(x):
|
371 |
+
assert x.dtype == np.float32
|
372 |
+
return rosen(x)
|
373 |
+
|
374 |
+
sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess,
|
375 |
+
None, (-np.inf, np.inf))
|
376 |
+
res = sf.fun(x0)
|
377 |
+
assert res.dtype == np.float32
|
378 |
+
|
379 |
+
|
380 |
+
class ExVectorialFunction:
|
381 |
+
|
382 |
+
def __init__(self):
|
383 |
+
self.nfev = 0
|
384 |
+
self.njev = 0
|
385 |
+
self.nhev = 0
|
386 |
+
|
387 |
+
def fun(self, x):
|
388 |
+
self.nfev += 1
|
389 |
+
return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0],
|
390 |
+
4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype)
|
391 |
+
|
392 |
+
def jac(self, x):
|
393 |
+
self.njev += 1
|
394 |
+
return np.array([[4*x[0]-1, 4*x[1]],
|
395 |
+
[12*x[0]**2-3, 8*x[1]]], dtype=x.dtype)
|
396 |
+
|
397 |
+
def hess(self, x, v):
|
398 |
+
self.nhev += 1
|
399 |
+
return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0],
|
400 |
+
[0, 8]])
|
401 |
+
|
402 |
+
|
403 |
+
class TestVectorialFunction(TestCase):
|
404 |
+
|
405 |
+
def test_finite_difference_jac(self):
|
406 |
+
ex = ExVectorialFunction()
|
407 |
+
nfev = 0
|
408 |
+
njev = 0
|
409 |
+
|
410 |
+
x0 = [1.0, 0.0]
|
411 |
+
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
412 |
+
(-np.inf, np.inf), None)
|
413 |
+
nfev += 1
|
414 |
+
njev += 1
|
415 |
+
assert_array_equal(ex.nfev, nfev)
|
416 |
+
assert_array_equal(analit.nfev, nfev)
|
417 |
+
assert_array_equal(ex.njev, njev)
|
418 |
+
assert_array_equal(analit.njev, njev)
|
419 |
+
approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None,
|
420 |
+
(-np.inf, np.inf), None)
|
421 |
+
nfev += 3
|
422 |
+
assert_array_equal(ex.nfev, nfev)
|
423 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
424 |
+
assert_array_equal(ex.njev, njev)
|
425 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
426 |
+
assert_array_equal(analit.f, approx.f)
|
427 |
+
assert_array_almost_equal(analit.J, approx.J)
|
428 |
+
|
429 |
+
x = [10, 0.3]
|
430 |
+
f_analit = analit.fun(x)
|
431 |
+
J_analit = analit.jac(x)
|
432 |
+
nfev += 1
|
433 |
+
njev += 1
|
434 |
+
assert_array_equal(ex.nfev, nfev)
|
435 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
436 |
+
assert_array_equal(ex.njev, njev)
|
437 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
438 |
+
f_approx = approx.fun(x)
|
439 |
+
J_approx = approx.jac(x)
|
440 |
+
nfev += 3
|
441 |
+
assert_array_equal(ex.nfev, nfev)
|
442 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
443 |
+
assert_array_equal(ex.njev, njev)
|
444 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
445 |
+
assert_array_almost_equal(f_analit, f_approx)
|
446 |
+
assert_array_almost_equal(J_analit, J_approx, decimal=4)
|
447 |
+
|
448 |
+
x = [2.0, 1.0]
|
449 |
+
J_analit = analit.jac(x)
|
450 |
+
njev += 1
|
451 |
+
assert_array_equal(ex.nfev, nfev)
|
452 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
453 |
+
assert_array_equal(ex.njev, njev)
|
454 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
455 |
+
J_approx = approx.jac(x)
|
456 |
+
nfev += 3
|
457 |
+
assert_array_equal(ex.nfev, nfev)
|
458 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
459 |
+
assert_array_equal(ex.njev, njev)
|
460 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
461 |
+
assert_array_almost_equal(J_analit, J_approx)
|
462 |
+
|
463 |
+
x = [2.5, 0.3]
|
464 |
+
f_analit = analit.fun(x)
|
465 |
+
J_analit = analit.jac(x)
|
466 |
+
nfev += 1
|
467 |
+
njev += 1
|
468 |
+
assert_array_equal(ex.nfev, nfev)
|
469 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
470 |
+
assert_array_equal(ex.njev, njev)
|
471 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
472 |
+
f_approx = approx.fun(x)
|
473 |
+
J_approx = approx.jac(x)
|
474 |
+
nfev += 3
|
475 |
+
assert_array_equal(ex.nfev, nfev)
|
476 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
477 |
+
assert_array_equal(ex.njev, njev)
|
478 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
479 |
+
assert_array_almost_equal(f_analit, f_approx)
|
480 |
+
assert_array_almost_equal(J_analit, J_approx)
|
481 |
+
|
482 |
+
x = [2, 0.3]
|
483 |
+
f_analit = analit.fun(x)
|
484 |
+
J_analit = analit.jac(x)
|
485 |
+
nfev += 1
|
486 |
+
njev += 1
|
487 |
+
assert_array_equal(ex.nfev, nfev)
|
488 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
489 |
+
assert_array_equal(ex.njev, njev)
|
490 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
491 |
+
f_approx = approx.fun(x)
|
492 |
+
J_approx = approx.jac(x)
|
493 |
+
nfev += 3
|
494 |
+
assert_array_equal(ex.nfev, nfev)
|
495 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
496 |
+
assert_array_equal(ex.njev, njev)
|
497 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
498 |
+
assert_array_almost_equal(f_analit, f_approx)
|
499 |
+
assert_array_almost_equal(J_analit, J_approx)
|
500 |
+
|
501 |
+
def test_finite_difference_hess_linear_operator(self):
|
502 |
+
ex = ExVectorialFunction()
|
503 |
+
nfev = 0
|
504 |
+
njev = 0
|
505 |
+
nhev = 0
|
506 |
+
|
507 |
+
x0 = [1.0, 0.0]
|
508 |
+
v0 = [1.0, 2.0]
|
509 |
+
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
510 |
+
(-np.inf, np.inf), None)
|
511 |
+
nfev += 1
|
512 |
+
njev += 1
|
513 |
+
nhev += 1
|
514 |
+
assert_array_equal(ex.nfev, nfev)
|
515 |
+
assert_array_equal(analit.nfev, nfev)
|
516 |
+
assert_array_equal(ex.njev, njev)
|
517 |
+
assert_array_equal(analit.njev, njev)
|
518 |
+
assert_array_equal(ex.nhev, nhev)
|
519 |
+
assert_array_equal(analit.nhev, nhev)
|
520 |
+
approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None,
|
521 |
+
(-np.inf, np.inf), None)
|
522 |
+
assert_(isinstance(approx.H, LinearOperator))
|
523 |
+
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
524 |
+
assert_array_equal(analit.f, approx.f)
|
525 |
+
assert_array_almost_equal(analit.J, approx.J)
|
526 |
+
assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p))
|
527 |
+
nfev += 1
|
528 |
+
njev += 4
|
529 |
+
assert_array_equal(ex.nfev, nfev)
|
530 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
531 |
+
assert_array_equal(ex.njev, njev)
|
532 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
533 |
+
assert_array_equal(ex.nhev, nhev)
|
534 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
535 |
+
|
536 |
+
x = [2.0, 1.0]
|
537 |
+
H_analit = analit.hess(x, v0)
|
538 |
+
nhev += 1
|
539 |
+
assert_array_equal(ex.nfev, nfev)
|
540 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
541 |
+
assert_array_equal(ex.njev, njev)
|
542 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
543 |
+
assert_array_equal(ex.nhev, nhev)
|
544 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
545 |
+
H_approx = approx.hess(x, v0)
|
546 |
+
assert_(isinstance(H_approx, LinearOperator))
|
547 |
+
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
548 |
+
assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p),
|
549 |
+
decimal=5)
|
550 |
+
njev += 4
|
551 |
+
assert_array_equal(ex.nfev, nfev)
|
552 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
553 |
+
assert_array_equal(ex.njev, njev)
|
554 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
555 |
+
assert_array_equal(ex.nhev, nhev)
|
556 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
557 |
+
|
558 |
+
x = [2.1, 1.2]
|
559 |
+
v = [1.0, 1.0]
|
560 |
+
H_analit = analit.hess(x, v)
|
561 |
+
nhev += 1
|
562 |
+
assert_array_equal(ex.nfev, nfev)
|
563 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
564 |
+
assert_array_equal(ex.njev, njev)
|
565 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
566 |
+
assert_array_equal(ex.nhev, nhev)
|
567 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
568 |
+
H_approx = approx.hess(x, v)
|
569 |
+
assert_(isinstance(H_approx, LinearOperator))
|
570 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
571 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
572 |
+
njev += 4
|
573 |
+
assert_array_equal(ex.nfev, nfev)
|
574 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
575 |
+
assert_array_equal(ex.njev, njev)
|
576 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
577 |
+
assert_array_equal(ex.nhev, nhev)
|
578 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
579 |
+
|
580 |
+
x = [2.5, 0.3]
|
581 |
+
_ = analit.jac(x)
|
582 |
+
H_analit = analit.hess(x, v0)
|
583 |
+
njev += 1
|
584 |
+
nhev += 1
|
585 |
+
assert_array_equal(ex.nfev, nfev)
|
586 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
587 |
+
assert_array_equal(ex.njev, njev)
|
588 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
589 |
+
assert_array_equal(ex.nhev, nhev)
|
590 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
591 |
+
_ = approx.jac(x)
|
592 |
+
H_approx = approx.hess(x, v0)
|
593 |
+
assert_(isinstance(H_approx, LinearOperator))
|
594 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
595 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
|
596 |
+
njev += 4
|
597 |
+
assert_array_equal(ex.nfev, nfev)
|
598 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
599 |
+
assert_array_equal(ex.njev, njev)
|
600 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
601 |
+
assert_array_equal(ex.nhev, nhev)
|
602 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
603 |
+
|
604 |
+
x = [5.2, 2.3]
|
605 |
+
v = [2.3, 5.2]
|
606 |
+
_ = analit.jac(x)
|
607 |
+
H_analit = analit.hess(x, v)
|
608 |
+
njev += 1
|
609 |
+
nhev += 1
|
610 |
+
assert_array_equal(ex.nfev, nfev)
|
611 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
612 |
+
assert_array_equal(ex.njev, njev)
|
613 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
614 |
+
assert_array_equal(ex.nhev, nhev)
|
615 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
616 |
+
_ = approx.jac(x)
|
617 |
+
H_approx = approx.hess(x, v)
|
618 |
+
assert_(isinstance(H_approx, LinearOperator))
|
619 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
620 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
|
621 |
+
njev += 4
|
622 |
+
assert_array_equal(ex.nfev, nfev)
|
623 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
624 |
+
assert_array_equal(ex.njev, njev)
|
625 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
626 |
+
assert_array_equal(ex.nhev, nhev)
|
627 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
628 |
+
|
629 |
+
def test_x_storage_overlap(self):
|
630 |
+
# VectorFunction should not store references to arrays, it should
|
631 |
+
# store copies - this checks that updating an array in-place causes
|
632 |
+
# Scalar_Function.x to be updated.
|
633 |
+
ex = ExVectorialFunction()
|
634 |
+
x0 = np.array([1.0, 0.0])
|
635 |
+
|
636 |
+
vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None,
|
637 |
+
(-np.inf, np.inf), None)
|
638 |
+
|
639 |
+
assert x0 is not vf.x
|
640 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
641 |
+
assert x0 is not vf.x
|
642 |
+
|
643 |
+
x0[0] = 2.
|
644 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
645 |
+
assert x0 is not vf.x
|
646 |
+
|
647 |
+
x0[0] = 1.
|
648 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
649 |
+
assert x0 is not vf.x
|
650 |
+
|
651 |
+
# now test with a HessianUpdate strategy specified
|
652 |
+
hess = BFGS()
|
653 |
+
x0 = np.array([1.0, 0.0])
|
654 |
+
vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None,
|
655 |
+
(-np.inf, np.inf), None)
|
656 |
+
|
657 |
+
with pytest.warns(UserWarning):
|
658 |
+
# filter UserWarning because ExVectorialFunction is linear and
|
659 |
+
# a quasi-Newton approximation is used for the Hessian.
|
660 |
+
assert x0 is not vf.x
|
661 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
662 |
+
assert x0 is not vf.x
|
663 |
+
|
664 |
+
x0[0] = 2.
|
665 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
666 |
+
assert x0 is not vf.x
|
667 |
+
|
668 |
+
x0[0] = 1.
|
669 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
670 |
+
assert x0 is not vf.x
|
671 |
+
|
672 |
+
def test_float_size(self):
|
673 |
+
ex = ExVectorialFunction()
|
674 |
+
x0 = np.array([1.0, 0.0]).astype(np.float32)
|
675 |
+
|
676 |
+
vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
677 |
+
(-np.inf, np.inf), None)
|
678 |
+
|
679 |
+
res = vf.fun(x0)
|
680 |
+
assert res.dtype == np.float32
|
681 |
+
|
682 |
+
res = vf.jac(x0)
|
683 |
+
assert res.dtype == np.float32
|
684 |
+
|
685 |
+
|
686 |
+
def test_LinearVectorFunction():
|
687 |
+
A_dense = np.array([
|
688 |
+
[-1, 2, 0],
|
689 |
+
[0, 4, 2]
|
690 |
+
])
|
691 |
+
x0 = np.zeros(3)
|
692 |
+
A_sparse = csr_matrix(A_dense)
|
693 |
+
x = np.array([1, -1, 0])
|
694 |
+
v = np.array([-1, 1])
|
695 |
+
Ax = np.array([-3, -4])
|
696 |
+
|
697 |
+
f1 = LinearVectorFunction(A_dense, x0, None)
|
698 |
+
assert_(not f1.sparse_jacobian)
|
699 |
+
|
700 |
+
f2 = LinearVectorFunction(A_dense, x0, True)
|
701 |
+
assert_(f2.sparse_jacobian)
|
702 |
+
|
703 |
+
f3 = LinearVectorFunction(A_dense, x0, False)
|
704 |
+
assert_(not f3.sparse_jacobian)
|
705 |
+
|
706 |
+
f4 = LinearVectorFunction(A_sparse, x0, None)
|
707 |
+
assert_(f4.sparse_jacobian)
|
708 |
+
|
709 |
+
f5 = LinearVectorFunction(A_sparse, x0, True)
|
710 |
+
assert_(f5.sparse_jacobian)
|
711 |
+
|
712 |
+
f6 = LinearVectorFunction(A_sparse, x0, False)
|
713 |
+
assert_(not f6.sparse_jacobian)
|
714 |
+
|
715 |
+
assert_array_equal(f1.fun(x), Ax)
|
716 |
+
assert_array_equal(f2.fun(x), Ax)
|
717 |
+
assert_array_equal(f1.jac(x), A_dense)
|
718 |
+
assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray())
|
719 |
+
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
|
720 |
+
|
721 |
+
|
722 |
+
def test_LinearVectorFunction_memoization():
|
723 |
+
A = np.array([[-1, 2, 0], [0, 4, 2]])
|
724 |
+
x0 = np.array([1, 2, -1])
|
725 |
+
fun = LinearVectorFunction(A, x0, False)
|
726 |
+
|
727 |
+
assert_array_equal(x0, fun.x)
|
728 |
+
assert_array_equal(A.dot(x0), fun.f)
|
729 |
+
|
730 |
+
x1 = np.array([-1, 3, 10])
|
731 |
+
assert_array_equal(A, fun.jac(x1))
|
732 |
+
assert_array_equal(x1, fun.x)
|
733 |
+
assert_array_equal(A.dot(x0), fun.f)
|
734 |
+
assert_array_equal(A.dot(x1), fun.fun(x1))
|
735 |
+
assert_array_equal(A.dot(x1), fun.f)
|
736 |
+
|
737 |
+
|
738 |
+
def test_IdentityVectorFunction():
|
739 |
+
x0 = np.zeros(3)
|
740 |
+
|
741 |
+
f1 = IdentityVectorFunction(x0, None)
|
742 |
+
f2 = IdentityVectorFunction(x0, False)
|
743 |
+
f3 = IdentityVectorFunction(x0, True)
|
744 |
+
|
745 |
+
assert_(f1.sparse_jacobian)
|
746 |
+
assert_(not f2.sparse_jacobian)
|
747 |
+
assert_(f3.sparse_jacobian)
|
748 |
+
|
749 |
+
x = np.array([-1, 2, 1])
|
750 |
+
v = np.array([-2, 3, 0])
|
751 |
+
|
752 |
+
assert_array_equal(f1.fun(x), x)
|
753 |
+
assert_array_equal(f2.fun(x), x)
|
754 |
+
|
755 |
+
assert_array_equal(f1.jac(x).toarray(), np.eye(3))
|
756 |
+
assert_array_equal(f2.jac(x), np.eye(3))
|
757 |
+
|
758 |
+
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
|
llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_less, assert_allclose, assert_equal
|
5 |
+
|
6 |
+
import scipy._lib._elementwise_iterative_method as eim
|
7 |
+
from scipy import stats
|
8 |
+
from scipy.optimize._differentiate import (_differentiate as differentiate,
|
9 |
+
_EERRORINCREASE)
|
10 |
+
|
11 |
+
class TestDifferentiate:
|
12 |
+
|
13 |
+
def f(self, x):
|
14 |
+
return stats.norm().cdf(x)
|
15 |
+
|
16 |
+
@pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)])
|
17 |
+
def test_basic(self, x):
|
18 |
+
# Invert distribution CDF and compare against distribution `ppf`
|
19 |
+
res = differentiate(self.f, x)
|
20 |
+
ref = stats.norm().pdf(x)
|
21 |
+
np.testing.assert_allclose(res.df, ref)
|
22 |
+
# This would be nice, but doesn't always work out. `error` is an
|
23 |
+
# estimate, not a bound.
|
24 |
+
assert_array_less(abs(res.df - ref), res.error)
|
25 |
+
assert res.x.shape == ref.shape
|
26 |
+
|
27 |
+
@pytest.mark.parametrize('case', stats._distr_params.distcont)
|
28 |
+
def test_accuracy(self, case):
|
29 |
+
distname, params = case
|
30 |
+
dist = getattr(stats, distname)(*params)
|
31 |
+
x = dist.median() + 0.1
|
32 |
+
res = differentiate(dist.cdf, x)
|
33 |
+
ref = dist.pdf(x)
|
34 |
+
assert_allclose(res.df, ref, atol=1e-10)
|
35 |
+
|
36 |
+
@pytest.mark.parametrize('order', [1, 6])
|
37 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
38 |
+
def test_vectorization(self, order, shape):
|
39 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
40 |
+
# input shapes.
|
41 |
+
x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
42 |
+
n = np.size(x)
|
43 |
+
|
44 |
+
@np.vectorize
|
45 |
+
def _differentiate_single(x):
|
46 |
+
return differentiate(self.f, x, order=order)
|
47 |
+
|
48 |
+
def f(x, *args, **kwargs):
|
49 |
+
f.nit += 1
|
50 |
+
f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1]
|
51 |
+
return self.f(x, *args, **kwargs)
|
52 |
+
f.nit = -1
|
53 |
+
f.feval = 0
|
54 |
+
|
55 |
+
res = differentiate(f, x, order=order)
|
56 |
+
refs = _differentiate_single(x).ravel()
|
57 |
+
|
58 |
+
ref_x = [ref.x for ref in refs]
|
59 |
+
assert_allclose(res.x.ravel(), ref_x)
|
60 |
+
assert_equal(res.x.shape, shape)
|
61 |
+
|
62 |
+
ref_df = [ref.df for ref in refs]
|
63 |
+
assert_allclose(res.df.ravel(), ref_df)
|
64 |
+
assert_equal(res.df.shape, shape)
|
65 |
+
|
66 |
+
ref_error = [ref.error for ref in refs]
|
67 |
+
assert_allclose(res.error.ravel(), ref_error, atol=5e-15)
|
68 |
+
assert_equal(res.error.shape, shape)
|
69 |
+
|
70 |
+
ref_success = [ref.success for ref in refs]
|
71 |
+
assert_equal(res.success.ravel(), ref_success)
|
72 |
+
assert_equal(res.success.shape, shape)
|
73 |
+
assert np.issubdtype(res.success.dtype, np.bool_)
|
74 |
+
|
75 |
+
ref_flag = [ref.status for ref in refs]
|
76 |
+
assert_equal(res.status.ravel(), ref_flag)
|
77 |
+
assert_equal(res.status.shape, shape)
|
78 |
+
assert np.issubdtype(res.status.dtype, np.integer)
|
79 |
+
|
80 |
+
ref_nfev = [ref.nfev for ref in refs]
|
81 |
+
assert_equal(res.nfev.ravel(), ref_nfev)
|
82 |
+
assert_equal(np.max(res.nfev), f.feval)
|
83 |
+
assert_equal(res.nfev.shape, res.x.shape)
|
84 |
+
assert np.issubdtype(res.nfev.dtype, np.integer)
|
85 |
+
|
86 |
+
ref_nit = [ref.nit for ref in refs]
|
87 |
+
assert_equal(res.nit.ravel(), ref_nit)
|
88 |
+
assert_equal(np.max(res.nit), f.nit)
|
89 |
+
assert_equal(res.nit.shape, res.x.shape)
|
90 |
+
assert np.issubdtype(res.nit.dtype, np.integer)
|
91 |
+
|
92 |
+
def test_flags(self):
|
93 |
+
# Test cases that should produce different status flags; show that all
|
94 |
+
# can be produced simultaneously.
|
95 |
+
rng = np.random.default_rng(5651219684984213)
|
96 |
+
def f(xs, js):
|
97 |
+
f.nit += 1
|
98 |
+
funcs = [lambda x: x - 2.5, # converges
|
99 |
+
lambda x: np.exp(x)*rng.random(), # error increases
|
100 |
+
lambda x: np.exp(x), # reaches maxiter due to order=2
|
101 |
+
lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN
|
102 |
+
res = [funcs[j](x) for x, j in zip(xs, js.ravel())]
|
103 |
+
return res
|
104 |
+
f.nit = 0
|
105 |
+
|
106 |
+
args = (np.arange(4, dtype=np.int64),)
|
107 |
+
res = differentiate(f, [1]*4, rtol=1e-14, order=2, args=args)
|
108 |
+
|
109 |
+
ref_flags = np.array([eim._ECONVERGED,
|
110 |
+
_EERRORINCREASE,
|
111 |
+
eim._ECONVERR,
|
112 |
+
eim._EVALUEERR])
|
113 |
+
assert_equal(res.status, ref_flags)
|
114 |
+
|
115 |
+
def test_flags_preserve_shape(self):
|
116 |
+
# Same test as above but using `preserve_shape` option to simplify.
|
117 |
+
rng = np.random.default_rng(5651219684984213)
|
118 |
+
def f(x):
|
119 |
+
return [x - 2.5, # converges
|
120 |
+
np.exp(x)*rng.random(), # error increases
|
121 |
+
np.exp(x), # reaches maxiter due to order=2
|
122 |
+
np.full_like(x, np.nan)[()]] # stops due to NaN
|
123 |
+
|
124 |
+
res = differentiate(f, 1, rtol=1e-14, order=2, preserve_shape=True)
|
125 |
+
|
126 |
+
ref_flags = np.array([eim._ECONVERGED,
|
127 |
+
_EERRORINCREASE,
|
128 |
+
eim._ECONVERR,
|
129 |
+
eim._EVALUEERR])
|
130 |
+
assert_equal(res.status, ref_flags)
|
131 |
+
|
132 |
+
def test_preserve_shape(self):
|
133 |
+
# Test `preserve_shape` option
|
134 |
+
def f(x):
|
135 |
+
return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
|
136 |
+
|
137 |
+
x = 0
|
138 |
+
ref = [1, 3*np.cos(3*x), 1+10*np.cos(10*x),
|
139 |
+
20*np.cos(20*x)*(x-1)**2 + 2*np.sin(20*x)*(x-1)]
|
140 |
+
res = differentiate(f, x, preserve_shape=True)
|
141 |
+
assert_allclose(res.df, ref)
|
142 |
+
|
143 |
+
def test_convergence(self):
|
144 |
+
# Test that the convergence tolerances behave as expected
|
145 |
+
dist = stats.norm()
|
146 |
+
x = 1
|
147 |
+
f = dist.cdf
|
148 |
+
ref = dist.pdf(x)
|
149 |
+
kwargs0 = dict(atol=0, rtol=0, order=4)
|
150 |
+
|
151 |
+
kwargs = kwargs0.copy()
|
152 |
+
kwargs['atol'] = 1e-3
|
153 |
+
res1 = differentiate(f, x, **kwargs)
|
154 |
+
assert_array_less(abs(res1.df - ref), 1e-3)
|
155 |
+
kwargs['atol'] = 1e-6
|
156 |
+
res2 = differentiate(f, x, **kwargs)
|
157 |
+
assert_array_less(abs(res2.df - ref), 1e-6)
|
158 |
+
assert_array_less(abs(res2.df - ref), abs(res1.df - ref))
|
159 |
+
|
160 |
+
kwargs = kwargs0.copy()
|
161 |
+
kwargs['rtol'] = 1e-3
|
162 |
+
res1 = differentiate(f, x, **kwargs)
|
163 |
+
assert_array_less(abs(res1.df - ref), 1e-3 * np.abs(ref))
|
164 |
+
kwargs['rtol'] = 1e-6
|
165 |
+
res2 = differentiate(f, x, **kwargs)
|
166 |
+
assert_array_less(abs(res2.df - ref), 1e-6 * np.abs(ref))
|
167 |
+
assert_array_less(abs(res2.df - ref), abs(res1.df - ref))
|
168 |
+
|
169 |
+
def test_step_parameters(self):
|
170 |
+
# Test that step factors have the expected effect on accuracy
|
171 |
+
dist = stats.norm()
|
172 |
+
x = 1
|
173 |
+
f = dist.cdf
|
174 |
+
ref = dist.pdf(x)
|
175 |
+
|
176 |
+
res1 = differentiate(f, x, initial_step=0.5, maxiter=1)
|
177 |
+
res2 = differentiate(f, x, initial_step=0.05, maxiter=1)
|
178 |
+
assert abs(res2.df - ref) < abs(res1.df - ref)
|
179 |
+
|
180 |
+
res1 = differentiate(f, x, step_factor=2, maxiter=1)
|
181 |
+
res2 = differentiate(f, x, step_factor=20, maxiter=1)
|
182 |
+
assert abs(res2.df - ref) < abs(res1.df - ref)
|
183 |
+
|
184 |
+
# `step_factor` can be less than 1: `initial_step` is the minimum step
|
185 |
+
kwargs = dict(order=4, maxiter=1, step_direction=0)
|
186 |
+
res = differentiate(f, x, initial_step=0.5, step_factor=0.5, **kwargs)
|
187 |
+
ref = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
|
188 |
+
assert_allclose(res.df, ref.df, rtol=5e-15)
|
189 |
+
|
190 |
+
# This is a similar test for one-sided difference
|
191 |
+
kwargs = dict(order=2, maxiter=1, step_direction=1)
|
192 |
+
res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
|
193 |
+
ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5,
|
194 |
+
**kwargs)
|
195 |
+
assert_allclose(res.df, ref.df, rtol=5e-15)
|
196 |
+
|
197 |
+
kwargs['step_direction'] = -1
|
198 |
+
res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
|
199 |
+
ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5,
|
200 |
+
**kwargs)
|
201 |
+
assert_allclose(res.df, ref.df, rtol=5e-15)
|
202 |
+
|
203 |
+
def test_step_direction(self):
|
204 |
+
# test that `step_direction` works as expected
|
205 |
+
def f(x):
|
206 |
+
y = np.exp(x)
|
207 |
+
y[(x < 0) + (x > 2)] = np.nan
|
208 |
+
return y
|
209 |
+
|
210 |
+
x = np.linspace(0, 2, 10)
|
211 |
+
step_direction = np.zeros_like(x)
|
212 |
+
step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1
|
213 |
+
res = differentiate(f, x, step_direction=step_direction)
|
214 |
+
assert_allclose(res.df, np.exp(x))
|
215 |
+
assert np.all(res.success)
|
216 |
+
|
217 |
+
def test_vectorized_step_direction_args(self):
|
218 |
+
# test that `step_direction` and `args` are vectorized properly
|
219 |
+
def f(x, p):
|
220 |
+
return x ** p
|
221 |
+
|
222 |
+
def df(x, p):
|
223 |
+
return p * x ** (p - 1)
|
224 |
+
|
225 |
+
x = np.array([1, 2, 3, 4]).reshape(-1, 1, 1)
|
226 |
+
hdir = np.array([-1, 0, 1]).reshape(1, -1, 1)
|
227 |
+
p = np.array([2, 3]).reshape(1, 1, -1)
|
228 |
+
res = differentiate(f, x, step_direction=hdir, args=(p,))
|
229 |
+
ref = np.broadcast_to(df(x, p), res.df.shape)
|
230 |
+
assert_allclose(res.df, ref)
|
231 |
+
|
232 |
+
def test_maxiter_callback(self):
|
233 |
+
# Test behavior of `maxiter` parameter and `callback` interface
|
234 |
+
x = 0.612814
|
235 |
+
dist = stats.norm()
|
236 |
+
maxiter = 3
|
237 |
+
|
238 |
+
def f(x):
|
239 |
+
res = dist.cdf(x)
|
240 |
+
return res
|
241 |
+
|
242 |
+
default_order = 8
|
243 |
+
res = differentiate(f, x, maxiter=maxiter, rtol=1e-15)
|
244 |
+
assert not np.any(res.success)
|
245 |
+
assert np.all(res.nfev == default_order + 1 + (maxiter - 1)*2)
|
246 |
+
assert np.all(res.nit == maxiter)
|
247 |
+
|
248 |
+
def callback(res):
|
249 |
+
callback.iter += 1
|
250 |
+
callback.res = res
|
251 |
+
assert hasattr(res, 'x')
|
252 |
+
assert res.df not in callback.dfs
|
253 |
+
callback.dfs.add(res.df)
|
254 |
+
assert res.status == eim._EINPROGRESS
|
255 |
+
if callback.iter == maxiter:
|
256 |
+
raise StopIteration
|
257 |
+
callback.iter = -1 # callback called once before first iteration
|
258 |
+
callback.res = None
|
259 |
+
callback.dfs = set()
|
260 |
+
|
261 |
+
res2 = differentiate(f, x, callback=callback, rtol=1e-15)
|
262 |
+
# terminating with callback is identical to terminating due to maxiter
|
263 |
+
# (except for `status`)
|
264 |
+
for key in res.keys():
|
265 |
+
if key == 'status':
|
266 |
+
assert res[key] == eim._ECONVERR
|
267 |
+
assert callback.res[key] == eim._EINPROGRESS
|
268 |
+
assert res2[key] == eim._ECALLBACK
|
269 |
+
else:
|
270 |
+
assert res2[key] == callback.res[key] == res[key]
|
271 |
+
|
272 |
+
@pytest.mark.parametrize("hdir", (-1, 0, 1))
|
273 |
+
@pytest.mark.parametrize("x", (0.65, [0.65, 0.7]))
|
274 |
+
@pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
|
275 |
+
def test_dtype(self, hdir, x, dtype):
|
276 |
+
# Test that dtypes are preserved
|
277 |
+
x = np.asarray(x, dtype=dtype)[()]
|
278 |
+
|
279 |
+
def f(x):
|
280 |
+
assert x.dtype == dtype
|
281 |
+
return np.exp(x)
|
282 |
+
|
283 |
+
def callback(res):
|
284 |
+
assert res.x.dtype == dtype
|
285 |
+
assert res.df.dtype == dtype
|
286 |
+
assert res.error.dtype == dtype
|
287 |
+
|
288 |
+
res = differentiate(f, x, order=4, step_direction=hdir,
|
289 |
+
callback=callback)
|
290 |
+
assert res.x.dtype == dtype
|
291 |
+
assert res.df.dtype == dtype
|
292 |
+
assert res.error.dtype == dtype
|
293 |
+
eps = np.finfo(dtype).eps
|
294 |
+
assert_allclose(res.df, np.exp(res.x), rtol=np.sqrt(eps))
|
295 |
+
|
296 |
+
def test_input_validation(self):
|
297 |
+
# Test input validation for appropriate error messages
|
298 |
+
|
299 |
+
message = '`func` must be callable.'
|
300 |
+
with pytest.raises(ValueError, match=message):
|
301 |
+
differentiate(None, 1)
|
302 |
+
|
303 |
+
message = 'Abscissae and function output must be real numbers.'
|
304 |
+
with pytest.raises(ValueError, match=message):
|
305 |
+
differentiate(lambda x: x, -4+1j)
|
306 |
+
|
307 |
+
message = "When `preserve_shape=False`, the shape of the array..."
|
308 |
+
with pytest.raises(ValueError, match=message):
|
309 |
+
differentiate(lambda x: [1, 2, 3], [-2, -3])
|
310 |
+
|
311 |
+
message = 'Tolerances and step parameters must be non-negative...'
|
312 |
+
with pytest.raises(ValueError, match=message):
|
313 |
+
differentiate(lambda x: x, 1, atol=-1)
|
314 |
+
with pytest.raises(ValueError, match=message):
|
315 |
+
differentiate(lambda x: x, 1, rtol='ekki')
|
316 |
+
with pytest.raises(ValueError, match=message):
|
317 |
+
differentiate(lambda x: x, 1, initial_step=None)
|
318 |
+
with pytest.raises(ValueError, match=message):
|
319 |
+
differentiate(lambda x: x, 1, step_factor=object())
|
320 |
+
|
321 |
+
message = '`maxiter` must be a positive integer.'
|
322 |
+
with pytest.raises(ValueError, match=message):
|
323 |
+
differentiate(lambda x: x, 1, maxiter=1.5)
|
324 |
+
with pytest.raises(ValueError, match=message):
|
325 |
+
differentiate(lambda x: x, 1, maxiter=0)
|
326 |
+
|
327 |
+
message = '`order` must be a positive integer'
|
328 |
+
with pytest.raises(ValueError, match=message):
|
329 |
+
differentiate(lambda x: x, 1, order=1.5)
|
330 |
+
with pytest.raises(ValueError, match=message):
|
331 |
+
differentiate(lambda x: x, 1, order=0)
|
332 |
+
|
333 |
+
message = '`preserve_shape` must be True or False.'
|
334 |
+
with pytest.raises(ValueError, match=message):
|
335 |
+
differentiate(lambda x: x, 1, preserve_shape='herring')
|
336 |
+
|
337 |
+
message = '`callback` must be callable.'
|
338 |
+
with pytest.raises(ValueError, match=message):
|
339 |
+
differentiate(lambda x: x, 1, callback='shrubbery')
|
340 |
+
|
341 |
+
def test_special_cases(self):
|
342 |
+
# Test edge cases and other special cases
|
343 |
+
|
344 |
+
# Test that integers are not passed to `f`
|
345 |
+
# (otherwise this would overflow)
|
346 |
+
def f(x):
|
347 |
+
assert np.issubdtype(x.dtype, np.floating)
|
348 |
+
return x ** 99 - 1
|
349 |
+
|
350 |
+
res = differentiate(f, 7, rtol=1e-10)
|
351 |
+
assert res.success
|
352 |
+
assert_allclose(res.df, 99*7.**98)
|
353 |
+
|
354 |
+
# Test that if success is achieved in the correct number
|
355 |
+
# of iterations if function is a polynomial. Ideally, all polynomials
|
356 |
+
# of order 0-2 would get exact result with 0 refinement iterations,
|
357 |
+
# all polynomials of order 3-4 would be differentiated exactly after
|
358 |
+
# 1 iteration, etc. However, it seems that _differentiate needs an
|
359 |
+
# extra iteration to detect convergence based on the error estimate.
|
360 |
+
|
361 |
+
for n in range(6):
|
362 |
+
x = 1.5
|
363 |
+
def f(x):
|
364 |
+
return 2*x**n
|
365 |
+
|
366 |
+
ref = 2*n*x**(n-1)
|
367 |
+
|
368 |
+
res = differentiate(f, x, maxiter=1, order=max(1, n))
|
369 |
+
assert_allclose(res.df, ref, rtol=1e-15)
|
370 |
+
assert_equal(res.error, np.nan)
|
371 |
+
|
372 |
+
res = differentiate(f, x, order=max(1, n))
|
373 |
+
assert res.success
|
374 |
+
assert res.nit == 2
|
375 |
+
assert_allclose(res.df, ref, rtol=1e-15)
|
376 |
+
|
377 |
+
# Test scalar `args` (not in tuple)
|
378 |
+
def f(x, c):
|
379 |
+
return c*x - 1
|
380 |
+
|
381 |
+
res = differentiate(f, 2, args=3)
|
382 |
+
assert_allclose(res.df, 3)
|
383 |
+
|
384 |
+
@pytest.mark.xfail
|
385 |
+
@pytest.mark.parametrize("case", ( # function, evaluation point
|
386 |
+
(lambda x: (x - 1) ** 3, 1),
|
387 |
+
(lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1)
|
388 |
+
))
|
389 |
+
def test_saddle_gh18811(self, case):
|
390 |
+
# With default settings, _differentiate will not always converge when
|
391 |
+
# the true derivative is exactly zero. This tests that specifying a
|
392 |
+
# (tight) `atol` alleviates the problem. See discussion in gh-18811.
|
393 |
+
atol = 1e-16
|
394 |
+
res = differentiate(*case, step_direction=[-1, 0, 1], atol=atol)
|
395 |
+
assert np.all(res.success)
|
396 |
+
assert_allclose(res.df, 0, atol=atol)
|