applied-ai-018 commited on
Commit
6fc8360
·
verified ·
1 Parent(s): ea4d17f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so +3 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/optimize/README +76 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/optimize/__init__.py +451 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py +1897 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd +106 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd +56 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd +20 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd +22 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd +9 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd +10 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd +110 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd +9 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd +12 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd +95 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd +7 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_isotonic.py +158 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py +1434 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py +440 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py +572 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py +661 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py +733 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py +331 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py +967 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py +362 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py +249 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minimize.py +1094 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_qap.py +731 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py +65 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py +525 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py +1677 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py +379 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py +310 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py +815 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py +228 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py +123 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py +1159 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py +226 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py +780 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py +827 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py +166 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py +274 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py +255 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py +92 -0
.gitattributes CHANGED
@@ -166,3 +166,4 @@ env-llmeval/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpyt
166
  env-llmeval/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
167
  env-llmeval/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
168
  env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
166
  env-llmeval/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
167
  env-llmeval/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
168
  env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
169
+ env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9faaa2f0339885c57273254ce48313ce6a25143417c9618a63a1ea97df886369
3
+ size 1197600
env-llmeval/lib/python3.10/site-packages/scipy/optimize/README ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ From the website for the L-BFGS-B code (from at
2
+ http://www.ece.northwestern.edu/~nocedal/lbfgsb.html):
3
+
4
+ """
5
+ L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained
6
+ optimization, i.e. for problems where the only constraints are of the
7
+ form l<= x <= u.
8
+ """
9
+
10
+ This is a Python wrapper (using F2PY) written by David M. Cooke
11
+ <[email protected]> and released as version 0.9 on April 9, 2004.
12
+ The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version
13
+ in March 2012.
14
+
15
+ License of L-BFGS-B (Fortran code)
16
+ ==================================
17
+
18
+ The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was
19
+ written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <[email protected]>. It
20
+ carries the following condition for use:
21
+
22
+ """
23
+ This software is freely available, but we expect that all publications
24
+ describing work using this software, or all commercial products using it,
25
+ quote at least one of the references given below. This software is released
26
+ under the BSD License.
27
+
28
+ References
29
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
30
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
31
+ Statistical Computing, 16, 5, pp. 1190-1208.
32
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
33
+ FORTRAN routines for large scale bound constrained optimization (1997),
34
+ ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
35
+ * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
36
+ FORTRAN routines for large scale bound constrained optimization (2011),
37
+ ACM Transactions on Mathematical Software, 38, 1.
38
+ """
39
+
40
+ The Python wrapper
41
+ ==================
42
+
43
+ This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate
44
+ the wrapper around the Fortran code.
45
+
46
+ The Python code and wrapper are copyrighted 2004 by David M. Cooke
47
48
+
49
+ Example usage
50
+ =============
51
+
52
+ An example of the usage is given at the bottom of the lbfgsb.py file.
53
+ Run it with 'python lbfgsb.py'.
54
+
55
+ License for the Python wrapper
56
+ ==============================
57
+
58
+ Copyright (c) 2004 David M. Cooke <[email protected]>
59
+
60
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
61
+ this software and associated documentation files (the "Software"), to deal in
62
+ the Software without restriction, including without limitation the rights to
63
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
64
+ of the Software, and to permit persons to whom the Software is furnished to do
65
+ so, subject to the following conditions:
66
+
67
+ The above copyright notice and this permission notice shall be included in all
68
+ copies or substantial portions of the Software.
69
+
70
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
73
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
74
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
75
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
76
+ SOFTWARE.
env-llmeval/lib/python3.10/site-packages/scipy/optimize/__init__.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =====================================================
3
+ Optimization and root finding (:mod:`scipy.optimize`)
4
+ =====================================================
5
+
6
+ .. currentmodule:: scipy.optimize
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ optimize.cython_optimize
12
+
13
+ SciPy ``optimize`` provides functions for minimizing (or maximizing)
14
+ objective functions, possibly subject to constraints. It includes
15
+ solvers for nonlinear problems (with support for both local and global
16
+ optimization algorithms), linear programming, constrained
17
+ and nonlinear least-squares, root finding, and curve fitting.
18
+
19
+ Common functions and objects, shared across different solvers, are:
20
+
21
+ .. autosummary::
22
+ :toctree: generated/
23
+
24
+ show_options - Show specific options optimization solvers.
25
+ OptimizeResult - The optimization result returned by some optimizers.
26
+ OptimizeWarning - The optimization encountered problems.
27
+
28
+
29
+ Optimization
30
+ ============
31
+
32
+ Scalar functions optimization
33
+ -----------------------------
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ minimize_scalar - Interface for minimizers of univariate functions
39
+
40
+ The `minimize_scalar` function supports the following methods:
41
+
42
+ .. toctree::
43
+
44
+ optimize.minimize_scalar-brent
45
+ optimize.minimize_scalar-bounded
46
+ optimize.minimize_scalar-golden
47
+
48
+ Local (multivariate) optimization
49
+ ---------------------------------
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ minimize - Interface for minimizers of multivariate functions.
55
+
56
+ The `minimize` function supports the following methods:
57
+
58
+ .. toctree::
59
+
60
+ optimize.minimize-neldermead
61
+ optimize.minimize-powell
62
+ optimize.minimize-cg
63
+ optimize.minimize-bfgs
64
+ optimize.minimize-newtoncg
65
+ optimize.minimize-lbfgsb
66
+ optimize.minimize-tnc
67
+ optimize.minimize-cobyla
68
+ optimize.minimize-slsqp
69
+ optimize.minimize-trustconstr
70
+ optimize.minimize-dogleg
71
+ optimize.minimize-trustncg
72
+ optimize.minimize-trustkrylov
73
+ optimize.minimize-trustexact
74
+
75
+ Constraints are passed to `minimize` function as a single object or
76
+ as a list of objects from the following classes:
77
+
78
+ .. autosummary::
79
+ :toctree: generated/
80
+
81
+ NonlinearConstraint - Class defining general nonlinear constraints.
82
+ LinearConstraint - Class defining general linear constraints.
83
+
84
+ Simple bound constraints are handled separately and there is a special class
85
+ for them:
86
+
87
+ .. autosummary::
88
+ :toctree: generated/
89
+
90
+ Bounds - Bound constraints.
91
+
92
+ Quasi-Newton strategies implementing `HessianUpdateStrategy`
93
+ interface can be used to approximate the Hessian in `minimize`
94
+ function (available only for the 'trust-constr' method). Available
95
+ quasi-Newton methods implementing this interface are:
96
+
97
+ .. autosummary::
98
+ :toctree: generated/
99
+
100
+ BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
101
+ SR1 - Symmetric-rank-1 Hessian update strategy.
102
+
103
+ .. _global_optimization:
104
+
105
+ Global optimization
106
+ -------------------
107
+
108
+ .. autosummary::
109
+ :toctree: generated/
110
+
111
+ basinhopping - Basinhopping stochastic optimizer.
112
+ brute - Brute force searching optimizer.
113
+ differential_evolution - Stochastic optimizer using differential evolution.
114
+
115
+ shgo - Simplicial homology global optimizer.
116
+ dual_annealing - Dual annealing stochastic optimizer.
117
+ direct - DIRECT (Dividing Rectangles) optimizer.
118
+
119
+ Least-squares and curve fitting
120
+ ===============================
121
+
122
+ Nonlinear least-squares
123
+ -----------------------
124
+
125
+ .. autosummary::
126
+ :toctree: generated/
127
+
128
+ least_squares - Solve a nonlinear least-squares problem with bounds on the variables.
129
+
130
+ Linear least-squares
131
+ --------------------
132
+
133
+ .. autosummary::
134
+ :toctree: generated/
135
+
136
+ nnls - Linear least-squares problem with non-negativity constraint.
137
+ lsq_linear - Linear least-squares problem with bound constraints.
138
+ isotonic_regression - Least squares problem of isotonic regression via PAVA.
139
+
140
+ Curve fitting
141
+ -------------
142
+
143
+ .. autosummary::
144
+ :toctree: generated/
145
+
146
+ curve_fit -- Fit curve to a set of points.
147
+
148
+ Root finding
149
+ ============
150
+
151
+ Scalar functions
152
+ ----------------
153
+ .. autosummary::
154
+ :toctree: generated/
155
+
156
+ root_scalar - Unified interface for nonlinear solvers of scalar functions.
157
+ brentq - quadratic interpolation Brent method.
158
+ brenth - Brent method, modified by Harris with hyperbolic extrapolation.
159
+ ridder - Ridder's method.
160
+ bisect - Bisection method.
161
+ newton - Newton's method (also Secant and Halley's methods).
162
+ toms748 - Alefeld, Potra & Shi Algorithm 748.
163
+ RootResults - The root finding result returned by some root finders.
164
+
165
+ The `root_scalar` function supports the following methods:
166
+
167
+ .. toctree::
168
+
169
+ optimize.root_scalar-brentq
170
+ optimize.root_scalar-brenth
171
+ optimize.root_scalar-bisect
172
+ optimize.root_scalar-ridder
173
+ optimize.root_scalar-newton
174
+ optimize.root_scalar-toms748
175
+ optimize.root_scalar-secant
176
+ optimize.root_scalar-halley
177
+
178
+
179
+
180
+ The table below lists situations and appropriate methods, along with
181
+ *asymptotic* convergence rates per iteration (and per function evaluation)
182
+ for successful convergence to a simple root(*).
183
+ Bisection is the slowest of them all, adding one bit of accuracy for each
184
+ function evaluation, but is guaranteed to converge.
185
+ The other bracketing methods all (eventually) increase the number of accurate
186
+ bits by about 50% for every function evaluation.
187
+ The derivative-based methods, all built on `newton`, can converge quite quickly
188
+ if the initial value is close to the root. They can also be applied to
189
+ functions defined on (a subset of) the complex plane.
190
+
191
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
192
+ | Domain of f | Bracket? | Derivatives? | Solvers | Convergence |
193
+ + + +----------+-----------+ +-------------+----------------+
194
+ | | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) |
195
+ +=============+==========+==========+===========+=============+=============+================+
196
+ | `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" |
197
+ | | | | | - brentq | - Yes | - >=1, <= 1.62 |
198
+ | | | | | - brenth | - Yes | - >=1, <= 1.62 |
199
+ | | | | | - ridder | - Yes | - 2.0 (1.41) |
200
+ | | | | | - toms748 | - Yes | - 2.7 (1.65) |
201
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
202
+ | `R` or `C` | No | No | No | secant | No | 1.62 (1.62) |
203
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
204
+ | `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) |
205
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
206
+ | `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) |
207
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
208
+
209
+ .. seealso::
210
+
211
+ `scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions
212
+
213
+ Fixed point finding:
214
+
215
+ .. autosummary::
216
+ :toctree: generated/
217
+
218
+ fixed_point - Single-variable fixed-point solver.
219
+
220
+ Multidimensional
221
+ ----------------
222
+
223
+ .. autosummary::
224
+ :toctree: generated/
225
+
226
+ root - Unified interface for nonlinear solvers of multivariate functions.
227
+
228
+ The `root` function supports the following methods:
229
+
230
+ .. toctree::
231
+
232
+ optimize.root-hybr
233
+ optimize.root-lm
234
+ optimize.root-broyden1
235
+ optimize.root-broyden2
236
+ optimize.root-anderson
237
+ optimize.root-linearmixing
238
+ optimize.root-diagbroyden
239
+ optimize.root-excitingmixing
240
+ optimize.root-krylov
241
+ optimize.root-dfsane
242
+
243
+ Linear programming / MILP
244
+ =========================
245
+
246
+ .. autosummary::
247
+ :toctree: generated/
248
+
249
+ milp -- Mixed integer linear programming.
250
+ linprog -- Unified interface for minimizers of linear programming problems.
251
+
252
+ The `linprog` function supports the following methods:
253
+
254
+ .. toctree::
255
+
256
+ optimize.linprog-simplex
257
+ optimize.linprog-interior-point
258
+ optimize.linprog-revised_simplex
259
+ optimize.linprog-highs-ipm
260
+ optimize.linprog-highs-ds
261
+ optimize.linprog-highs
262
+
263
+ The simplex, interior-point, and revised simplex methods support callback
264
+ functions, such as:
265
+
266
+ .. autosummary::
267
+ :toctree: generated/
268
+
269
+ linprog_verbose_callback -- Sample callback function for linprog (simplex).
270
+
271
+ Assignment problems
272
+ ===================
273
+
274
+ .. autosummary::
275
+ :toctree: generated/
276
+
277
+ linear_sum_assignment -- Solves the linear-sum assignment problem.
278
+ quadratic_assignment -- Solves the quadratic assignment problem.
279
+
280
+ The `quadratic_assignment` function supports the following methods:
281
+
282
+ .. toctree::
283
+
284
+ optimize.qap-faq
285
+ optimize.qap-2opt
286
+
287
+ Utilities
288
+ =========
289
+
290
+ Finite-difference approximation
291
+ -------------------------------
292
+
293
+ .. autosummary::
294
+ :toctree: generated/
295
+
296
+ approx_fprime - Approximate the gradient of a scalar function.
297
+ check_grad - Check the supplied derivative using finite differences.
298
+
299
+
300
+ Line search
301
+ -----------
302
+
303
+ .. autosummary::
304
+ :toctree: generated/
305
+
306
+ bracket - Bracket a minimum, given two starting points.
307
+ line_search - Return a step that satisfies the strong Wolfe conditions.
308
+
309
+ Hessian approximation
310
+ ---------------------
311
+
312
+ .. autosummary::
313
+ :toctree: generated/
314
+
315
+ LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.
316
+ HessianUpdateStrategy - Interface for implementing Hessian update strategies
317
+
318
+ Benchmark problems
319
+ ------------------
320
+
321
+ .. autosummary::
322
+ :toctree: generated/
323
+
324
+ rosen - The Rosenbrock function.
325
+ rosen_der - The derivative of the Rosenbrock function.
326
+ rosen_hess - The Hessian matrix of the Rosenbrock function.
327
+ rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
328
+
329
+ Legacy functions
330
+ ================
331
+
332
+ The functions below are not recommended for use in new scripts;
333
+ all of these methods are accessible via a newer, more consistent
334
+ interfaces, provided by the interfaces above.
335
+
336
+ Optimization
337
+ ------------
338
+
339
+ General-purpose multivariate methods:
340
+
341
+ .. autosummary::
342
+ :toctree: generated/
343
+
344
+ fmin - Nelder-Mead Simplex algorithm.
345
+ fmin_powell - Powell's (modified) conjugate direction method.
346
+ fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.
347
+ fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).
348
+ fmin_ncg - Line-search Newton Conjugate Gradient.
349
+
350
+ Constrained multivariate methods:
351
+
352
+ .. autosummary::
353
+ :toctree: generated/
354
+
355
+ fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.
356
+ fmin_tnc - Truncated Newton code.
357
+ fmin_cobyla - Constrained optimization by linear approximation.
358
+ fmin_slsqp - Minimization using sequential least-squares programming.
359
+
360
+ Univariate (scalar) minimization methods:
361
+
362
+ .. autosummary::
363
+ :toctree: generated/
364
+
365
+ fminbound - Bounded minimization of a scalar function.
366
+ brent - 1-D function minimization using Brent method.
367
+ golden - 1-D function minimization using Golden Section method.
368
+
369
+ Least-squares
370
+ -------------
371
+
372
+ .. autosummary::
373
+ :toctree: generated/
374
+
375
+ leastsq - Minimize the sum of squares of M equations in N unknowns.
376
+
377
+ Root finding
378
+ ------------
379
+
380
+ General nonlinear solvers:
381
+
382
+ .. autosummary::
383
+ :toctree: generated/
384
+
385
+ fsolve - Non-linear multivariable equation solver.
386
+ broyden1 - Broyden's first method.
387
+ broyden2 - Broyden's second method.
388
+ NoConvergence - Exception raised when nonlinear solver does not converge.
389
+
390
+ Large-scale nonlinear solvers:
391
+
392
+ .. autosummary::
393
+ :toctree: generated/
394
+
395
+ newton_krylov
396
+ anderson
397
+
398
+ BroydenFirst
399
+ InverseJacobian
400
+ KrylovJacobian
401
+
402
+ Simple iteration solvers:
403
+
404
+ .. autosummary::
405
+ :toctree: generated/
406
+
407
+ excitingmixing
408
+ linearmixing
409
+ diagbroyden
410
+
411
+ """ # noqa: E501
412
+
413
+ from ._optimize import *
414
+ from ._minimize import *
415
+ from ._root import *
416
+ from ._root_scalar import *
417
+ from ._minpack_py import *
418
+ from ._zeros_py import *
419
+ from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct
420
+ from ._tnc import fmin_tnc
421
+ from ._cobyla_py import fmin_cobyla
422
+ from ._nonlin import *
423
+ from ._slsqp_py import fmin_slsqp
424
+ from ._nnls import nnls
425
+ from ._basinhopping import basinhopping
426
+ from ._linprog import linprog, linprog_verbose_callback
427
+ from ._lsap import linear_sum_assignment
428
+ from ._differentialevolution import differential_evolution
429
+ from ._lsq import least_squares, lsq_linear
430
+ from ._isotonic import isotonic_regression
431
+ from ._constraints import (NonlinearConstraint,
432
+ LinearConstraint,
433
+ Bounds)
434
+ from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1
435
+ from ._shgo import shgo
436
+ from ._dual_annealing import dual_annealing
437
+ from ._qap import quadratic_assignment
438
+ from ._direct_py import direct
439
+ from ._milp import milp
440
+
441
+ # Deprecated namespaces, to be removed in v2.0.0
442
+ from . import (
443
+ cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize,
444
+ slsqp, tnc, zeros
445
+ )
446
+
447
+ __all__ = [s for s in dir() if not s.startswith('_')]
448
+
449
+ from scipy._lib._testutils import PytestTester
450
+ test = PytestTester(__name__)
451
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py ADDED
@@ -0,0 +1,1897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ differential_evolution: The differential evolution global optimization algorithm
3
+ Added by Andrew Nelson 2014
4
+ """
5
+ import warnings
6
+
7
+ import numpy as np
8
+ from scipy.optimize import OptimizeResult, minimize
9
+ from scipy.optimize._optimize import _status_message, _wrap_callback
10
+ from scipy._lib._util import check_random_state, MapWrapper, _FunctionWrapper
11
+
12
+ from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
13
+ NonlinearConstraint, LinearConstraint)
14
+ from scipy.sparse import issparse
15
+
16
+ __all__ = ['differential_evolution']
17
+
18
+
19
+ _MACHEPS = np.finfo(np.float64).eps
20
+
21
+
22
+ def differential_evolution(func, bounds, args=(), strategy='best1bin',
23
+ maxiter=1000, popsize=15, tol=0.01,
24
+ mutation=(0.5, 1), recombination=0.7, seed=None,
25
+ callback=None, disp=False, polish=True,
26
+ init='latinhypercube', atol=0, updating='immediate',
27
+ workers=1, constraints=(), x0=None, *,
28
+ integrality=None, vectorized=False):
29
+ """Finds the global minimum of a multivariate function.
30
+
31
+ The differential evolution method [1]_ is stochastic in nature. It does
32
+ not use gradient methods to find the minimum, and can search large areas
33
+ of candidate space, but often requires larger numbers of function
34
+ evaluations than conventional gradient-based techniques.
35
+
36
+ The algorithm is due to Storn and Price [2]_.
37
+
38
+ Parameters
39
+ ----------
40
+ func : callable
41
+ The objective function to be minimized. Must be in the form
42
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
43
+ and ``args`` is a tuple of any additional fixed parameters needed to
44
+ completely specify the function. The number of parameters, N, is equal
45
+ to ``len(x)``.
46
+ bounds : sequence or `Bounds`
47
+ Bounds for variables. There are two ways to specify the bounds:
48
+
49
+ 1. Instance of `Bounds` class.
50
+ 2. ``(min, max)`` pairs for each element in ``x``, defining the
51
+ finite lower and upper bounds for the optimizing argument of
52
+ `func`.
53
+
54
+ The total number of bounds is used to determine the number of
55
+ parameters, N. If there are parameters whose bounds are equal the total
56
+ number of free parameters is ``N - N_equal``.
57
+
58
+ args : tuple, optional
59
+ Any additional fixed parameters needed to
60
+ completely specify the objective function.
61
+ strategy : {str, callable}, optional
62
+ The differential evolution strategy to use. Should be one of:
63
+
64
+ - 'best1bin'
65
+ - 'best1exp'
66
+ - 'rand1bin'
67
+ - 'rand1exp'
68
+ - 'rand2bin'
69
+ - 'rand2exp'
70
+ - 'randtobest1bin'
71
+ - 'randtobest1exp'
72
+ - 'currenttobest1bin'
73
+ - 'currenttobest1exp'
74
+ - 'best2exp'
75
+ - 'best2bin'
76
+
77
+ The default is 'best1bin'. Strategies that may be implemented are
78
+ outlined in 'Notes'.
79
+ Alternatively the differential evolution strategy can be customized by
80
+ providing a callable that constructs a trial vector. The callable must
81
+ have the form ``strategy(candidate: int, population: np.ndarray, rng=None)``,
82
+ where ``candidate`` is an integer specifying which entry of the
83
+ population is being evolved, ``population`` is an array of shape
84
+ ``(S, N)`` containing all the population members (where S is the
85
+ total population size), and ``rng`` is the random number generator
86
+ being used within the solver.
87
+ ``candidate`` will be in the range ``[0, S)``.
88
+ ``strategy`` must return a trial vector with shape `(N,)`. The
89
+ fitness of this trial vector is compared against the fitness of
90
+ ``population[candidate]``.
91
+
92
+ .. versionchanged:: 1.12.0
93
+ Customization of evolution strategy via a callable.
94
+
95
+ maxiter : int, optional
96
+ The maximum number of generations over which the entire population is
97
+ evolved. The maximum number of function evaluations (with no polishing)
98
+ is: ``(maxiter + 1) * popsize * (N - N_equal)``
99
+ popsize : int, optional
100
+ A multiplier for setting the total population size. The population has
101
+ ``popsize * (N - N_equal)`` individuals. This keyword is overridden if
102
+ an initial population is supplied via the `init` keyword. When using
103
+ ``init='sobol'`` the population size is calculated as the next power
104
+ of 2 after ``popsize * (N - N_equal)``.
105
+ tol : float, optional
106
+ Relative tolerance for convergence, the solving stops when
107
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
108
+ where and `atol` and `tol` are the absolute and relative tolerance
109
+ respectively.
110
+ mutation : float or tuple(float, float), optional
111
+ The mutation constant. In the literature this is also known as
112
+ differential weight, being denoted by F.
113
+ If specified as a float it should be in the range [0, 2].
114
+ If specified as a tuple ``(min, max)`` dithering is employed. Dithering
115
+ randomly changes the mutation constant on a generation by generation
116
+ basis. The mutation constant for that generation is taken from
117
+ ``U[min, max)``. Dithering can help speed convergence significantly.
118
+ Increasing the mutation constant increases the search radius, but will
119
+ slow down convergence.
120
+ recombination : float, optional
121
+ The recombination constant, should be in the range [0, 1]. In the
122
+ literature this is also known as the crossover probability, being
123
+ denoted by CR. Increasing this value allows a larger number of mutants
124
+ to progress into the next generation, but at the risk of population
125
+ stability.
126
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
127
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
128
+ singleton is used.
129
+ If `seed` is an int, a new ``RandomState`` instance is used,
130
+ seeded with `seed`.
131
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
132
+ that instance is used.
133
+ Specify `seed` for repeatable minimizations.
134
+ disp : bool, optional
135
+ Prints the evaluated `func` at every iteration.
136
+ callback : callable, optional
137
+ A callable called after each iteration. Has the signature:
138
+
139
+ ``callback(intermediate_result: OptimizeResult)``
140
+
141
+ where ``intermediate_result`` is a keyword parameter containing an
142
+ `OptimizeResult` with attributes ``x`` and ``fun``, the best solution
143
+ found so far and the objective function. Note that the name
144
+ of the parameter must be ``intermediate_result`` for the callback
145
+ to be passed an `OptimizeResult`.
146
+
147
+ The callback also supports a signature like:
148
+
149
+ ``callback(x, convergence: float=val)``
150
+
151
+ ``val`` represents the fractional value of the population convergence.
152
+ When ``val`` is greater than ``1.0``, the function halts.
153
+
154
+ Introspection is used to determine which of the signatures is invoked.
155
+
156
+ Global minimization will halt if the callback raises ``StopIteration``
157
+ or returns ``True``; any polishing is still carried out.
158
+
159
+ .. versionchanged:: 1.12.0
160
+ callback accepts the ``intermediate_result`` keyword.
161
+
162
+ polish : bool, optional
163
+ If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
164
+ method is used to polish the best population member at the end, which
165
+ can improve the minimization slightly. If a constrained problem is
166
+ being studied then the `trust-constr` method is used instead. For large
167
+ problems with many constraints, polishing can take a long time due to
168
+ the Jacobian computations.
169
+ init : str or array-like, optional
170
+ Specify which type of population initialization is performed. Should be
171
+ one of:
172
+
173
+ - 'latinhypercube'
174
+ - 'sobol'
175
+ - 'halton'
176
+ - 'random'
177
+ - array specifying the initial population. The array should have
178
+ shape ``(S, N)``, where S is the total population size and N is
179
+ the number of parameters.
180
+ `init` is clipped to `bounds` before use.
181
+
182
+ The default is 'latinhypercube'. Latin Hypercube sampling tries to
183
+ maximize coverage of the available parameter space.
184
+
185
+ 'sobol' and 'halton' are superior alternatives and maximize even more
186
+ the parameter space. 'sobol' will enforce an initial population
187
+ size which is calculated as the next power of 2 after
188
+ ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
189
+ less efficient. See `scipy.stats.qmc` for more details.
190
+
191
+ 'random' initializes the population randomly - this has the drawback
192
+ that clustering can occur, preventing the whole of parameter space
193
+ being covered. Use of an array to specify a population could be used,
194
+ for example, to create a tight bunch of initial guesses in an location
195
+ where the solution is known to exist, thereby reducing time for
196
+ convergence.
197
+ atol : float, optional
198
+ Absolute tolerance for convergence, the solving stops when
199
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
200
+ where and `atol` and `tol` are the absolute and relative tolerance
201
+ respectively.
202
+ updating : {'immediate', 'deferred'}, optional
203
+ If ``'immediate'``, the best solution vector is continuously updated
204
+ within a single generation [4]_. This can lead to faster convergence as
205
+ trial vectors can take advantage of continuous improvements in the best
206
+ solution.
207
+ With ``'deferred'``, the best solution vector is updated once per
208
+ generation. Only ``'deferred'`` is compatible with parallelization or
209
+ vectorization, and the `workers` and `vectorized` keywords can
210
+ over-ride this option.
211
+
212
+ .. versionadded:: 1.2.0
213
+
214
+ workers : int or map-like callable, optional
215
+ If `workers` is an int the population is subdivided into `workers`
216
+ sections and evaluated in parallel
217
+ (uses `multiprocessing.Pool <multiprocessing>`).
218
+ Supply -1 to use all available CPU cores.
219
+ Alternatively supply a map-like callable, such as
220
+ `multiprocessing.Pool.map` for evaluating the population in parallel.
221
+ This evaluation is carried out as ``workers(func, iterable)``.
222
+ This option will override the `updating` keyword to
223
+ ``updating='deferred'`` if ``workers != 1``.
224
+ This option overrides the `vectorized` keyword if ``workers != 1``.
225
+ Requires that `func` be pickleable.
226
+
227
+ .. versionadded:: 1.2.0
228
+
229
+ constraints : {NonLinearConstraint, LinearConstraint, Bounds}
230
+ Constraints on the solver, over and above those applied by the `bounds`
231
+ kwd. Uses the approach by Lampinen [5]_.
232
+
233
+ .. versionadded:: 1.4.0
234
+
235
+ x0 : None or array-like, optional
236
+ Provides an initial guess to the minimization. Once the population has
237
+ been initialized this vector replaces the first (best) member. This
238
+ replacement is done even if `init` is given an initial population.
239
+ ``x0.shape == (N,)``.
240
+
241
+ .. versionadded:: 1.7.0
242
+
243
+ integrality : 1-D array, optional
244
+ For each decision variable, a boolean value indicating whether the
245
+ decision variable is constrained to integer values. The array is
246
+ broadcast to ``(N,)``.
247
+ If any decision variables are constrained to be integral, they will not
248
+ be changed during polishing.
249
+ Only integer values lying between the lower and upper bounds are used.
250
+ If there are no integer values lying between the bounds then a
251
+ `ValueError` is raised.
252
+
253
+ .. versionadded:: 1.9.0
254
+
255
+ vectorized : bool, optional
256
+ If ``vectorized is True``, `func` is sent an `x` array with
257
+ ``x.shape == (N, S)``, and is expected to return an array of shape
258
+ ``(S,)``, where `S` is the number of solution vectors to be calculated.
259
+ If constraints are applied, each of the functions used to construct
260
+ a `Constraint` object should accept an `x` array with
261
+ ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
262
+ `M` is the number of constraint components.
263
+ This option is an alternative to the parallelization offered by
264
+ `workers`, and may help in optimization speed by reducing interpreter
265
+ overhead from multiple function calls. This keyword is ignored if
266
+ ``workers != 1``.
267
+ This option will override the `updating` keyword to
268
+ ``updating='deferred'``.
269
+ See the notes section for further discussion on when to use
270
+ ``'vectorized'``, and when to use ``'workers'``.
271
+
272
+ .. versionadded:: 1.9.0
273
+
274
+ Returns
275
+ -------
276
+ res : OptimizeResult
277
+ The optimization result represented as a `OptimizeResult` object.
278
+ Important attributes are: ``x`` the solution array, ``success`` a
279
+ Boolean flag indicating if the optimizer exited successfully,
280
+ ``message`` which describes the cause of the termination,
281
+ ``population`` the solution vectors present in the population, and
282
+ ``population_energies`` the value of the objective function for each
283
+ entry in ``population``.
284
+ See `OptimizeResult` for a description of other attributes. If `polish`
285
+ was employed, and a lower minimum was obtained by the polishing, then
286
+ OptimizeResult also contains the ``jac`` attribute.
287
+ If the eventual solution does not satisfy the applied constraints
288
+ ``success`` will be `False`.
289
+
290
+ Notes
291
+ -----
292
+ Differential evolution is a stochastic population based method that is
293
+ useful for global optimization problems. At each pass through the
294
+ population the algorithm mutates each candidate solution by mixing with
295
+ other candidate solutions to create a trial candidate. There are several
296
+ strategies [3]_ for creating trial candidates, which suit some problems
297
+ more than others. The 'best1bin' strategy is a good starting point for
298
+ many systems. In this strategy two members of the population are randomly
299
+ chosen. Their difference is used to mutate the best member (the 'best' in
300
+ 'best1bin'), :math:`x_0`, so far:
301
+
302
+ .. math::
303
+
304
+ b' = x_0 + mutation * (x_{r_0} - x_{r_1})
305
+
306
+ A trial vector is then constructed. Starting with a randomly chosen ith
307
+ parameter the trial is sequentially filled (in modulo) with parameters
308
+ from ``b'`` or the original candidate. The choice of whether to use ``b'``
309
+ or the original candidate is made with a binomial distribution (the 'bin'
310
+ in 'best1bin') - a random number in [0, 1) is generated. If this number is
311
+ less than the `recombination` constant then the parameter is loaded from
312
+ ``b'``, otherwise it is loaded from the original candidate. The final
313
+ parameter is always loaded from ``b'``. Once the trial candidate is built
314
+ its fitness is assessed. If the trial is better than the original candidate
315
+ then it takes its place. If it is also better than the best overall
316
+ candidate it also replaces that.
317
+
318
+ The other strategies available are outlined in Qiang and
319
+ Mitchell (2014) [3]_.
320
+
321
+ .. math::
322
+ rand1* : b' = x_{r_0} + mutation*(x_{r_1} - x_{r_2})
323
+
324
+ rand2* : b' = x_{r_0} + mutation*(x_{r_1} + x_{r_2}
325
+ - x_{r_3} - x_{r_4})
326
+
327
+ best1* : b' = x_0 + mutation*(x_{r_0} - x_{r_1})
328
+
329
+ best2* : b' = x_0 + mutation*(x_{r_0} + x_{r_1}
330
+ - x_{r_2} - x_{r_3})
331
+
332
+ currenttobest1* : b' = x_i + mutation*(x_0 - x_i
333
+ + x_{r_0} - x_{r_1})
334
+
335
+ randtobest1* : b' = x_{r_0} + mutation*(x_0 - x_{r_0}
336
+ + x_{r_1} - x_{r_2})
337
+
338
+ where the integers :math:`r_0, r_1, r_2, r_3, r_4` are chosen randomly
339
+ from the interval [0, NP) with `NP` being the total population size and
340
+ the original candidate having index `i`. The user can fully customize the
341
+ generation of the trial candidates by supplying a callable to ``strategy``.
342
+
343
+ To improve your chances of finding a global minimum use higher `popsize`
344
+ values, with higher `mutation` and (dithering), but lower `recombination`
345
+ values. This has the effect of widening the search radius, but slowing
346
+ convergence.
347
+
348
+ By default the best solution vector is updated continuously within a single
349
+ iteration (``updating='immediate'``). This is a modification [4]_ of the
350
+ original differential evolution algorithm which can lead to faster
351
+ convergence as trial vectors can immediately benefit from improved
352
+ solutions. To use the original Storn and Price behaviour, updating the best
353
+ solution once per iteration, set ``updating='deferred'``.
354
+ The ``'deferred'`` approach is compatible with both parallelization and
355
+ vectorization (``'workers'`` and ``'vectorized'`` keywords). These may
356
+ improve minimization speed by using computer resources more efficiently.
357
+ The ``'workers'`` distribute calculations over multiple processors. By
358
+ default the Python `multiprocessing` module is used, but other approaches
359
+ are also possible, such as the Message Passing Interface (MPI) used on
360
+ clusters [6]_ [7]_. The overhead from these approaches (creating new
361
+ Processes, etc) may be significant, meaning that computational speed
362
+ doesn't necessarily scale with the number of processors used.
363
+ Parallelization is best suited to computationally expensive objective
364
+ functions. If the objective function is less expensive, then
365
+ ``'vectorized'`` may aid by only calling the objective function once per
366
+ iteration, rather than multiple times for all the population members; the
367
+ interpreter overhead is reduced.
368
+
369
+ .. versionadded:: 0.15.0
370
+
371
+ References
372
+ ----------
373
+ .. [1] Differential evolution, Wikipedia,
374
+ http://en.wikipedia.org/wiki/Differential_evolution
375
+ .. [2] Storn, R and Price, K, Differential Evolution - a Simple and
376
+ Efficient Heuristic for Global Optimization over Continuous Spaces,
377
+ Journal of Global Optimization, 1997, 11, 341 - 359.
378
+ .. [3] Qiang, J., Mitchell, C., A Unified Differential Evolution Algorithm
379
+ for Global Optimization, 2014, https://www.osti.gov/servlets/purl/1163659
380
+ .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
381
+ Characterization of structures from X-ray scattering data using
382
+ genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
383
+ 2827-2848
384
+ .. [5] Lampinen, J., A constraint handling approach for the differential
385
+ evolution algorithm. Proceedings of the 2002 Congress on
386
+ Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
387
+ 2002.
388
+ .. [6] https://mpi4py.readthedocs.io/en/stable/
389
+ .. [7] https://schwimmbad.readthedocs.io/en/latest/
390
+
391
+
392
+ Examples
393
+ --------
394
+ Let us consider the problem of minimizing the Rosenbrock function. This
395
+ function is implemented in `rosen` in `scipy.optimize`.
396
+
397
+ >>> import numpy as np
398
+ >>> from scipy.optimize import rosen, differential_evolution
399
+ >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
400
+ >>> result = differential_evolution(rosen, bounds)
401
+ >>> result.x, result.fun
402
+ (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
403
+
404
+ Now repeat, but with parallelization.
405
+
406
+ >>> result = differential_evolution(rosen, bounds, updating='deferred',
407
+ ... workers=2)
408
+ >>> result.x, result.fun
409
+ (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
410
+
411
+ Let's do a constrained minimization.
412
+
413
+ >>> from scipy.optimize import LinearConstraint, Bounds
414
+
415
+ We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less
416
+ than or equal to 1.9. This is a linear constraint, which may be written
417
+ ``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as
418
+ a `LinearConstraint` instance:
419
+
420
+ >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9)
421
+
422
+ Specify limits using a `Bounds` object.
423
+
424
+ >>> bounds = Bounds([0., 0.], [2., 2.])
425
+ >>> result = differential_evolution(rosen, bounds, constraints=lc,
426
+ ... seed=1)
427
+ >>> result.x, result.fun
428
+ (array([0.96632622, 0.93367155]), 0.0011352416852625719)
429
+
430
+ Next find the minimum of the Ackley function
431
+ (https://en.wikipedia.org/wiki/Test_functions_for_optimization).
432
+
433
+ >>> def ackley(x):
434
+ ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
435
+ ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
436
+ ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
437
+ >>> bounds = [(-5, 5), (-5, 5)]
438
+ >>> result = differential_evolution(ackley, bounds, seed=1)
439
+ >>> result.x, result.fun
440
+ (array([0., 0.]), 4.440892098500626e-16)
441
+
442
+ The Ackley function is written in a vectorized manner, so the
443
+ ``'vectorized'`` keyword can be employed. Note the reduced number of
444
+ function evaluations.
445
+
446
+ >>> result = differential_evolution(
447
+ ... ackley, bounds, vectorized=True, updating='deferred', seed=1
448
+ ... )
449
+ >>> result.x, result.fun
450
+ (array([0., 0.]), 4.440892098500626e-16)
451
+
452
+ The following custom strategy function mimics 'best1bin':
453
+
454
+ >>> def custom_strategy_fn(candidate, population, rng=None):
455
+ ... parameter_count = population.shape(-1)
456
+ ... mutation, recombination = 0.7, 0.9
457
+ ... trial = np.copy(population[candidate])
458
+ ... fill_point = rng.choice(parameter_count)
459
+ ...
460
+ ... pool = np.arange(len(population))
461
+ ... rng.shuffle(pool)
462
+ ...
463
+ ... # two unique random numbers that aren't the same, and
464
+ ... # aren't equal to candidate.
465
+ ... idxs = []
466
+ ... while len(idxs) < 2 and len(pool) > 0:
467
+ ... idx = pool[0]
468
+ ... pool = pool[1:]
469
+ ... if idx != candidate:
470
+ ... idxs.append(idx)
471
+ ...
472
+ ... r0, r1 = idxs[:2]
473
+ ...
474
+ ... bprime = (population[0] + mutation *
475
+ ... (population[r0] - population[r1]))
476
+ ...
477
+ ... crossovers = rng.uniform(size=parameter_count)
478
+ ... crossovers = crossovers < recombination
479
+ ... crossovers[fill_point] = True
480
+ ... trial = np.where(crossovers, bprime, trial)
481
+ ... return trial
482
+
483
+ """
484
+
485
+ # using a context manager means that any created Pool objects are
486
+ # cleared up.
487
+ with DifferentialEvolutionSolver(func, bounds, args=args,
488
+ strategy=strategy,
489
+ maxiter=maxiter,
490
+ popsize=popsize, tol=tol,
491
+ mutation=mutation,
492
+ recombination=recombination,
493
+ seed=seed, polish=polish,
494
+ callback=callback,
495
+ disp=disp, init=init, atol=atol,
496
+ updating=updating,
497
+ workers=workers,
498
+ constraints=constraints,
499
+ x0=x0,
500
+ integrality=integrality,
501
+ vectorized=vectorized) as solver:
502
+ ret = solver.solve()
503
+
504
+ return ret
505
+
506
+
507
+ class DifferentialEvolutionSolver:
508
+
509
+ """This class implements the differential evolution solver
510
+
511
+ Parameters
512
+ ----------
513
+ func : callable
514
+ The objective function to be minimized. Must be in the form
515
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
516
+ and ``args`` is a tuple of any additional fixed parameters needed to
517
+ completely specify the function. The number of parameters, N, is equal
518
+ to ``len(x)``.
519
+ bounds : sequence or `Bounds`
520
+ Bounds for variables. There are two ways to specify the bounds:
521
+
522
+ 1. Instance of `Bounds` class.
523
+ 2. ``(min, max)`` pairs for each element in ``x``, defining the
524
+ finite lower and upper bounds for the optimizing argument of
525
+ `func`.
526
+
527
+ The total number of bounds is used to determine the number of
528
+ parameters, N. If there are parameters whose bounds are equal the total
529
+ number of free parameters is ``N - N_equal``.
530
+ args : tuple, optional
531
+ Any additional fixed parameters needed to
532
+ completely specify the objective function.
533
+ strategy : {str, callable}, optional
534
+ The differential evolution strategy to use. Should be one of:
535
+
536
+ - 'best1bin'
537
+ - 'best1exp'
538
+ - 'rand1bin'
539
+ - 'rand1exp'
540
+ - 'rand2bin'
541
+ - 'rand2exp'
542
+ - 'randtobest1bin'
543
+ - 'randtobest1exp'
544
+ - 'currenttobest1bin'
545
+ - 'currenttobest1exp'
546
+ - 'best2exp'
547
+ - 'best2bin'
548
+
549
+ The default is 'best1bin'. Strategies that may be
550
+ implemented are outlined in 'Notes'.
551
+
552
+ Alternatively the differential evolution strategy can be customized
553
+ by providing a callable that constructs a trial vector. The callable
554
+ must have the form
555
+ ``strategy(candidate: int, population: np.ndarray, rng=None)``,
556
+ where ``candidate`` is an integer specifying which entry of the
557
+ population is being evolved, ``population`` is an array of shape
558
+ ``(S, N)`` containing all the population members (where S is the
559
+ total population size), and ``rng`` is the random number generator
560
+ being used within the solver.
561
+ ``candidate`` will be in the range ``[0, S)``.
562
+ ``strategy`` must return a trial vector with shape `(N,)`. The
563
+ fitness of this trial vector is compared against the fitness of
564
+ ``population[candidate]``.
565
+ maxiter : int, optional
566
+ The maximum number of generations over which the entire population is
567
+ evolved. The maximum number of function evaluations (with no polishing)
568
+ is: ``(maxiter + 1) * popsize * (N - N_equal)``
569
+ popsize : int, optional
570
+ A multiplier for setting the total population size. The population has
571
+ ``popsize * (N - N_equal)`` individuals. This keyword is overridden if
572
+ an initial population is supplied via the `init` keyword. When using
573
+ ``init='sobol'`` the population size is calculated as the next power
574
+ of 2 after ``popsize * (N - N_equal)``.
575
+ tol : float, optional
576
+ Relative tolerance for convergence, the solving stops when
577
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
578
+ where and `atol` and `tol` are the absolute and relative tolerance
579
+ respectively.
580
+ mutation : float or tuple(float, float), optional
581
+ The mutation constant. In the literature this is also known as
582
+ differential weight, being denoted by F.
583
+ If specified as a float it should be in the range [0, 2].
584
+ If specified as a tuple ``(min, max)`` dithering is employed. Dithering
585
+ randomly changes the mutation constant on a generation by generation
586
+ basis. The mutation constant for that generation is taken from
587
+ U[min, max). Dithering can help speed convergence significantly.
588
+ Increasing the mutation constant increases the search radius, but will
589
+ slow down convergence.
590
+ recombination : float, optional
591
+ The recombination constant, should be in the range [0, 1]. In the
592
+ literature this is also known as the crossover probability, being
593
+ denoted by CR. Increasing this value allows a larger number of mutants
594
+ to progress into the next generation, but at the risk of population
595
+ stability.
596
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
597
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
598
+ singleton is used.
599
+ If `seed` is an int, a new ``RandomState`` instance is used,
600
+ seeded with `seed`.
601
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
602
+ that instance is used.
603
+ Specify `seed` for repeatable minimizations.
604
+ disp : bool, optional
605
+ Prints the evaluated `func` at every iteration.
606
+ callback : callable, optional
607
+ A callable called after each iteration. Has the signature:
608
+
609
+ ``callback(intermediate_result: OptimizeResult)``
610
+
611
+ where ``intermediate_result`` is a keyword parameter containing an
612
+ `OptimizeResult` with attributes ``x`` and ``fun``, the best solution
613
+ found so far and the objective function. Note that the name
614
+ of the parameter must be ``intermediate_result`` for the callback
615
+ to be passed an `OptimizeResult`.
616
+
617
+ The callback also supports a signature like:
618
+
619
+ ``callback(x, convergence: float=val)``
620
+
621
+ ``val`` represents the fractional value of the population convergence.
622
+ When ``val`` is greater than ``1.0``, the function halts.
623
+
624
+ Introspection is used to determine which of the signatures is invoked.
625
+
626
+ Global minimization will halt if the callback raises ``StopIteration``
627
+ or returns ``True``; any polishing is still carried out.
628
+
629
+ .. versionchanged:: 1.12.0
630
+ callback accepts the ``intermediate_result`` keyword.
631
+
632
+ polish : bool, optional
633
+ If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
634
+ method is used to polish the best population member at the end, which
635
+ can improve the minimization slightly. If a constrained problem is
636
+ being studied then the `trust-constr` method is used instead. For large
637
+ problems with many constraints, polishing can take a long time due to
638
+ the Jacobian computations.
639
+ maxfun : int, optional
640
+ Set the maximum number of function evaluations. However, it probably
641
+ makes more sense to set `maxiter` instead.
642
+ init : str or array-like, optional
643
+ Specify which type of population initialization is performed. Should be
644
+ one of:
645
+
646
+ - 'latinhypercube'
647
+ - 'sobol'
648
+ - 'halton'
649
+ - 'random'
650
+ - array specifying the initial population. The array should have
651
+ shape ``(S, N)``, where S is the total population size and
652
+ N is the number of parameters.
653
+ `init` is clipped to `bounds` before use.
654
+
655
+ The default is 'latinhypercube'. Latin Hypercube sampling tries to
656
+ maximize coverage of the available parameter space.
657
+
658
+ 'sobol' and 'halton' are superior alternatives and maximize even more
659
+ the parameter space. 'sobol' will enforce an initial population
660
+ size which is calculated as the next power of 2 after
661
+ ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
662
+ less efficient. See `scipy.stats.qmc` for more details.
663
+
664
+ 'random' initializes the population randomly - this has the drawback
665
+ that clustering can occur, preventing the whole of parameter space
666
+ being covered. Use of an array to specify a population could be used,
667
+ for example, to create a tight bunch of initial guesses in an location
668
+ where the solution is known to exist, thereby reducing time for
669
+ convergence.
670
+ atol : float, optional
671
+ Absolute tolerance for convergence, the solving stops when
672
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
673
+ where and `atol` and `tol` are the absolute and relative tolerance
674
+ respectively.
675
+ updating : {'immediate', 'deferred'}, optional
676
+ If ``'immediate'``, the best solution vector is continuously updated
677
+ within a single generation [4]_. This can lead to faster convergence as
678
+ trial vectors can take advantage of continuous improvements in the best
679
+ solution.
680
+ With ``'deferred'``, the best solution vector is updated once per
681
+ generation. Only ``'deferred'`` is compatible with parallelization or
682
+ vectorization, and the `workers` and `vectorized` keywords can
683
+ over-ride this option.
684
+ workers : int or map-like callable, optional
685
+ If `workers` is an int the population is subdivided into `workers`
686
+ sections and evaluated in parallel
687
+ (uses `multiprocessing.Pool <multiprocessing>`).
688
+ Supply `-1` to use all cores available to the Process.
689
+ Alternatively supply a map-like callable, such as
690
+ `multiprocessing.Pool.map` for evaluating the population in parallel.
691
+ This evaluation is carried out as ``workers(func, iterable)``.
692
+ This option will override the `updating` keyword to
693
+ `updating='deferred'` if `workers != 1`.
694
+ Requires that `func` be pickleable.
695
+ constraints : {NonLinearConstraint, LinearConstraint, Bounds}
696
+ Constraints on the solver, over and above those applied by the `bounds`
697
+ kwd. Uses the approach by Lampinen.
698
+ x0 : None or array-like, optional
699
+ Provides an initial guess to the minimization. Once the population has
700
+ been initialized this vector replaces the first (best) member. This
701
+ replacement is done even if `init` is given an initial population.
702
+ ``x0.shape == (N,)``.
703
+ integrality : 1-D array, optional
704
+ For each decision variable, a boolean value indicating whether the
705
+ decision variable is constrained to integer values. The array is
706
+ broadcast to ``(N,)``.
707
+ If any decision variables are constrained to be integral, they will not
708
+ be changed during polishing.
709
+ Only integer values lying between the lower and upper bounds are used.
710
+ If there are no integer values lying between the bounds then a
711
+ `ValueError` is raised.
712
+ vectorized : bool, optional
713
+ If ``vectorized is True``, `func` is sent an `x` array with
714
+ ``x.shape == (N, S)``, and is expected to return an array of shape
715
+ ``(S,)``, where `S` is the number of solution vectors to be calculated.
716
+ If constraints are applied, each of the functions used to construct
717
+ a `Constraint` object should accept an `x` array with
718
+ ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
719
+ `M` is the number of constraint components.
720
+ This option is an alternative to the parallelization offered by
721
+ `workers`, and may help in optimization speed. This keyword is
722
+ ignored if ``workers != 1``.
723
+ This option will override the `updating` keyword to
724
+ ``updating='deferred'``.
725
+ """
726
+
727
+ # Dispatch of mutation strategy method (binomial or exponential).
728
+ _binomial = {'best1bin': '_best1',
729
+ 'randtobest1bin': '_randtobest1',
730
+ 'currenttobest1bin': '_currenttobest1',
731
+ 'best2bin': '_best2',
732
+ 'rand2bin': '_rand2',
733
+ 'rand1bin': '_rand1'}
734
+ _exponential = {'best1exp': '_best1',
735
+ 'rand1exp': '_rand1',
736
+ 'randtobest1exp': '_randtobest1',
737
+ 'currenttobest1exp': '_currenttobest1',
738
+ 'best2exp': '_best2',
739
+ 'rand2exp': '_rand2'}
740
+
741
+ __init_error_msg = ("The population initialization method must be one of "
742
+ "'latinhypercube' or 'random', or an array of shape "
743
+ "(S, N) where N is the number of parameters and S>5")
744
+
745
+ def __init__(self, func, bounds, args=(),
746
+ strategy='best1bin', maxiter=1000, popsize=15,
747
+ tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
748
+ maxfun=np.inf, callback=None, disp=False, polish=True,
749
+ init='latinhypercube', atol=0, updating='immediate',
750
+ workers=1, constraints=(), x0=None, *, integrality=None,
751
+ vectorized=False):
752
+
753
+ if callable(strategy):
754
+ # a callable strategy is going to be stored in self.strategy anyway
755
+ pass
756
+ elif strategy in self._binomial:
757
+ self.mutation_func = getattr(self, self._binomial[strategy])
758
+ elif strategy in self._exponential:
759
+ self.mutation_func = getattr(self, self._exponential[strategy])
760
+ else:
761
+ raise ValueError("Please select a valid mutation strategy")
762
+ self.strategy = strategy
763
+
764
+ self.callback = _wrap_callback(callback, "differential_evolution")
765
+ self.polish = polish
766
+
767
+ # set the updating / parallelisation options
768
+ if updating in ['immediate', 'deferred']:
769
+ self._updating = updating
770
+
771
+ self.vectorized = vectorized
772
+
773
+ # want to use parallelisation, but updating is immediate
774
+ if workers != 1 and updating == 'immediate':
775
+ warnings.warn("differential_evolution: the 'workers' keyword has"
776
+ " overridden updating='immediate' to"
777
+ " updating='deferred'", UserWarning, stacklevel=2)
778
+ self._updating = 'deferred'
779
+
780
+ if vectorized and workers != 1:
781
+ warnings.warn("differential_evolution: the 'workers' keyword"
782
+ " overrides the 'vectorized' keyword", stacklevel=2)
783
+ self.vectorized = vectorized = False
784
+
785
+ if vectorized and updating == 'immediate':
786
+ warnings.warn("differential_evolution: the 'vectorized' keyword"
787
+ " has overridden updating='immediate' to updating"
788
+ "='deferred'", UserWarning, stacklevel=2)
789
+ self._updating = 'deferred'
790
+
791
+ # an object with a map method.
792
+ if vectorized:
793
+ def maplike_for_vectorized_func(func, x):
794
+ # send an array (N, S) to the user func,
795
+ # expect to receive (S,). Transposition is required because
796
+ # internally the population is held as (S, N)
797
+ return np.atleast_1d(func(x.T))
798
+ workers = maplike_for_vectorized_func
799
+
800
+ self._mapwrapper = MapWrapper(workers)
801
+
802
+ # relative and absolute tolerances for convergence
803
+ self.tol, self.atol = tol, atol
804
+
805
+ # Mutation constant should be in [0, 2). If specified as a sequence
806
+ # then dithering is performed.
807
+ self.scale = mutation
808
+ if (not np.all(np.isfinite(mutation)) or
809
+ np.any(np.array(mutation) >= 2) or
810
+ np.any(np.array(mutation) < 0)):
811
+ raise ValueError('The mutation constant must be a float in '
812
+ 'U[0, 2), or specified as a tuple(min, max)'
813
+ ' where min < max and min, max are in U[0, 2).')
814
+
815
+ self.dither = None
816
+ if hasattr(mutation, '__iter__') and len(mutation) > 1:
817
+ self.dither = [mutation[0], mutation[1]]
818
+ self.dither.sort()
819
+
820
+ self.cross_over_probability = recombination
821
+
822
+ # we create a wrapped function to allow the use of map (and Pool.map
823
+ # in the future)
824
+ self.func = _FunctionWrapper(func, args)
825
+ self.args = args
826
+
827
+ # convert tuple of lower and upper bounds to limits
828
+ # [(low_0, high_0), ..., (low_n, high_n]
829
+ # -> [[low_0, ..., low_n], [high_0, ..., high_n]]
830
+ if isinstance(bounds, Bounds):
831
+ self.limits = np.array(new_bounds_to_old(bounds.lb,
832
+ bounds.ub,
833
+ len(bounds.lb)),
834
+ dtype=float).T
835
+ else:
836
+ self.limits = np.array(bounds, dtype='float').T
837
+
838
+ if (np.size(self.limits, 0) != 2 or not
839
+ np.all(np.isfinite(self.limits))):
840
+ raise ValueError('bounds should be a sequence containing finite '
841
+ 'real valued (min, max) pairs for each value'
842
+ ' in x')
843
+
844
+ if maxiter is None: # the default used to be None
845
+ maxiter = 1000
846
+ self.maxiter = maxiter
847
+ if maxfun is None: # the default used to be None
848
+ maxfun = np.inf
849
+ self.maxfun = maxfun
850
+
851
+ # population is scaled to between [0, 1].
852
+ # We have to scale between parameter <-> population
853
+ # save these arguments for _scale_parameter and
854
+ # _unscale_parameter. This is an optimization
855
+ self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
856
+ self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
857
+ with np.errstate(divide='ignore'):
858
+ # if lb == ub then the following line will be 1/0, which is why
859
+ # we ignore the divide by zero warning. The result from 1/0 is
860
+ # inf, so replace those values by 0.
861
+ self.__recip_scale_arg2 = 1 / self.__scale_arg2
862
+ self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0
863
+
864
+ self.parameter_count = np.size(self.limits, 1)
865
+
866
+ self.random_number_generator = check_random_state(seed)
867
+
868
+ # Which parameters are going to be integers?
869
+ if np.any(integrality):
870
+ # # user has provided a truth value for integer constraints
871
+ integrality = np.broadcast_to(
872
+ integrality,
873
+ self.parameter_count
874
+ )
875
+ integrality = np.asarray(integrality, bool)
876
+ # For integrality parameters change the limits to only allow
877
+ # integer values lying between the limits.
878
+ lb, ub = np.copy(self.limits)
879
+
880
+ lb = np.ceil(lb)
881
+ ub = np.floor(ub)
882
+ if not (lb[integrality] <= ub[integrality]).all():
883
+ # there's a parameter that doesn't have an integer value
884
+ # lying between the limits
885
+ raise ValueError("One of the integrality constraints does not"
886
+ " have any possible integer values between"
887
+ " the lower/upper bounds.")
888
+ nlb = np.nextafter(lb[integrality] - 0.5, np.inf)
889
+ nub = np.nextafter(ub[integrality] + 0.5, -np.inf)
890
+
891
+ self.integrality = integrality
892
+ self.limits[0, self.integrality] = nlb
893
+ self.limits[1, self.integrality] = nub
894
+ else:
895
+ self.integrality = False
896
+
897
+ # check for equal bounds
898
+ eb = self.limits[0] == self.limits[1]
899
+ eb_count = np.count_nonzero(eb)
900
+
901
+ # default population initialization is a latin hypercube design, but
902
+ # there are other population initializations possible.
903
+ # the minimum is 5 because 'best2bin' requires a population that's at
904
+ # least 5 long
905
+ # 202301 - reduced population size to account for parameters with
906
+ # equal bounds. If there are no varying parameters set N to at least 1
907
+ self.num_population_members = max(
908
+ 5,
909
+ popsize * max(1, self.parameter_count - eb_count)
910
+ )
911
+ self.population_shape = (self.num_population_members,
912
+ self.parameter_count)
913
+
914
+ self._nfev = 0
915
+ # check first str otherwise will fail to compare str with array
916
+ if isinstance(init, str):
917
+ if init == 'latinhypercube':
918
+ self.init_population_lhs()
919
+ elif init == 'sobol':
920
+ # must be Ns = 2**m for Sobol'
921
+ n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
922
+ self.num_population_members = n_s
923
+ self.population_shape = (self.num_population_members,
924
+ self.parameter_count)
925
+ self.init_population_qmc(qmc_engine='sobol')
926
+ elif init == 'halton':
927
+ self.init_population_qmc(qmc_engine='halton')
928
+ elif init == 'random':
929
+ self.init_population_random()
930
+ else:
931
+ raise ValueError(self.__init_error_msg)
932
+ else:
933
+ self.init_population_array(init)
934
+
935
+ if x0 is not None:
936
+ # scale to within unit interval and
937
+ # ensure parameters are within bounds.
938
+ x0_scaled = self._unscale_parameters(np.asarray(x0))
939
+ if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
940
+ raise ValueError(
941
+ "Some entries in x0 lay outside the specified bounds"
942
+ )
943
+ self.population[0] = x0_scaled
944
+
945
+ # infrastructure for constraints
946
+ self.constraints = constraints
947
+ self._wrapped_constraints = []
948
+
949
+ if hasattr(constraints, '__len__'):
950
+ # sequence of constraints, this will also deal with default
951
+ # keyword parameter
952
+ for c in constraints:
953
+ self._wrapped_constraints.append(
954
+ _ConstraintWrapper(c, self.x)
955
+ )
956
+ else:
957
+ self._wrapped_constraints = [
958
+ _ConstraintWrapper(constraints, self.x)
959
+ ]
960
+ self.total_constraints = np.sum(
961
+ [c.num_constr for c in self._wrapped_constraints]
962
+ )
963
+ self.constraint_violation = np.zeros((self.num_population_members, 1))
964
+ self.feasible = np.ones(self.num_population_members, bool)
965
+
966
+ self.disp = disp
967
+
968
+ def init_population_lhs(self):
969
+ """
970
+ Initializes the population with Latin Hypercube Sampling.
971
+ Latin Hypercube Sampling ensures that each parameter is uniformly
972
+ sampled over its range.
973
+ """
974
+ rng = self.random_number_generator
975
+
976
+ # Each parameter range needs to be sampled uniformly. The scaled
977
+ # parameter range ([0, 1)) needs to be split into
978
+ # `self.num_population_members` segments, each of which has the following
979
+ # size:
980
+ segsize = 1.0 / self.num_population_members
981
+
982
+ # Within each segment we sample from a uniform random distribution.
983
+ # We need to do this sampling for each parameter.
984
+ samples = (segsize * rng.uniform(size=self.population_shape)
985
+
986
+ # Offset each segment to cover the entire parameter range [0, 1)
987
+ + np.linspace(0., 1., self.num_population_members,
988
+ endpoint=False)[:, np.newaxis])
989
+
990
+ # Create an array for population of candidate solutions.
991
+ self.population = np.zeros_like(samples)
992
+
993
+ # Initialize population of candidate solutions by permutation of the
994
+ # random samples.
995
+ for j in range(self.parameter_count):
996
+ order = rng.permutation(range(self.num_population_members))
997
+ self.population[:, j] = samples[order, j]
998
+
999
+ # reset population energies
1000
+ self.population_energies = np.full(self.num_population_members,
1001
+ np.inf)
1002
+
1003
+ # reset number of function evaluations counter
1004
+ self._nfev = 0
1005
+
1006
+ def init_population_qmc(self, qmc_engine):
1007
+ """Initializes the population with a QMC method.
1008
+
1009
+ QMC methods ensures that each parameter is uniformly
1010
+ sampled over its range.
1011
+
1012
+ Parameters
1013
+ ----------
1014
+ qmc_engine : str
1015
+ The QMC method to use for initialization. Can be one of
1016
+ ``latinhypercube``, ``sobol`` or ``halton``.
1017
+
1018
+ """
1019
+ from scipy.stats import qmc
1020
+
1021
+ rng = self.random_number_generator
1022
+
1023
+ # Create an array for population of candidate solutions.
1024
+ if qmc_engine == 'latinhypercube':
1025
+ sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
1026
+ elif qmc_engine == 'sobol':
1027
+ sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
1028
+ elif qmc_engine == 'halton':
1029
+ sampler = qmc.Halton(d=self.parameter_count, seed=rng)
1030
+ else:
1031
+ raise ValueError(self.__init_error_msg)
1032
+
1033
+ self.population = sampler.random(n=self.num_population_members)
1034
+
1035
+ # reset population energies
1036
+ self.population_energies = np.full(self.num_population_members,
1037
+ np.inf)
1038
+
1039
+ # reset number of function evaluations counter
1040
+ self._nfev = 0
1041
+
1042
+ def init_population_random(self):
1043
+ """
1044
+ Initializes the population at random. This type of initialization
1045
+ can possess clustering, Latin Hypercube sampling is generally better.
1046
+ """
1047
+ rng = self.random_number_generator
1048
+ self.population = rng.uniform(size=self.population_shape)
1049
+
1050
+ # reset population energies
1051
+ self.population_energies = np.full(self.num_population_members,
1052
+ np.inf)
1053
+
1054
+ # reset number of function evaluations counter
1055
+ self._nfev = 0
1056
+
1057
+ def init_population_array(self, init):
1058
+ """
1059
+ Initializes the population with a user specified population.
1060
+
1061
+ Parameters
1062
+ ----------
1063
+ init : np.ndarray
1064
+ Array specifying subset of the initial population. The array should
1065
+ have shape (S, N), where N is the number of parameters.
1066
+ The population is clipped to the lower and upper bounds.
1067
+ """
1068
+ # make sure you're using a float array
1069
+ popn = np.asarray(init, dtype=np.float64)
1070
+
1071
+ if (np.size(popn, 0) < 5 or
1072
+ popn.shape[1] != self.parameter_count or
1073
+ len(popn.shape) != 2):
1074
+ raise ValueError("The population supplied needs to have shape"
1075
+ " (S, len(x)), where S > 4.")
1076
+
1077
+ # scale values and clip to bounds, assigning to population
1078
+ self.population = np.clip(self._unscale_parameters(popn), 0, 1)
1079
+
1080
+ self.num_population_members = np.size(self.population, 0)
1081
+
1082
+ self.population_shape = (self.num_population_members,
1083
+ self.parameter_count)
1084
+
1085
+ # reset population energies
1086
+ self.population_energies = np.full(self.num_population_members,
1087
+ np.inf)
1088
+
1089
+ # reset number of function evaluations counter
1090
+ self._nfev = 0
1091
+
1092
+ @property
1093
+ def x(self):
1094
+ """
1095
+ The best solution from the solver
1096
+ """
1097
+ return self._scale_parameters(self.population[0])
1098
+
1099
+ @property
1100
+ def convergence(self):
1101
+ """
1102
+ The standard deviation of the population energies divided by their
1103
+ mean.
1104
+ """
1105
+ if np.any(np.isinf(self.population_energies)):
1106
+ return np.inf
1107
+ return (np.std(self.population_energies) /
1108
+ (np.abs(np.mean(self.population_energies)) + _MACHEPS))
1109
+
1110
+ def converged(self):
1111
+ """
1112
+ Return True if the solver has converged.
1113
+ """
1114
+ if np.any(np.isinf(self.population_energies)):
1115
+ return False
1116
+
1117
+ return (np.std(self.population_energies) <=
1118
+ self.atol +
1119
+ self.tol * np.abs(np.mean(self.population_energies)))
1120
+
1121
+ def solve(self):
1122
+ """
1123
+ Runs the DifferentialEvolutionSolver.
1124
+
1125
+ Returns
1126
+ -------
1127
+ res : OptimizeResult
1128
+ The optimization result represented as a `OptimizeResult` object.
1129
+ Important attributes are: ``x`` the solution array, ``success`` a
1130
+ Boolean flag indicating if the optimizer exited successfully,
1131
+ ``message`` which describes the cause of the termination,
1132
+ ``population`` the solution vectors present in the population, and
1133
+ ``population_energies`` the value of the objective function for
1134
+ each entry in ``population``.
1135
+ See `OptimizeResult` for a description of other attributes. If
1136
+ `polish` was employed, and a lower minimum was obtained by the
1137
+ polishing, then OptimizeResult also contains the ``jac`` attribute.
1138
+ If the eventual solution does not satisfy the applied constraints
1139
+ ``success`` will be `False`.
1140
+ """
1141
+ nit, warning_flag = 0, False
1142
+ status_message = _status_message['success']
1143
+
1144
+ # The population may have just been initialized (all entries are
1145
+ # np.inf). If it has you have to calculate the initial energies.
1146
+ # Although this is also done in the evolve generator it's possible
1147
+ # that someone can set maxiter=0, at which point we still want the
1148
+ # initial energies to be calculated (the following loop isn't run).
1149
+ if np.all(np.isinf(self.population_energies)):
1150
+ self.feasible, self.constraint_violation = (
1151
+ self._calculate_population_feasibilities(self.population))
1152
+
1153
+ # only work out population energies for feasible solutions
1154
+ self.population_energies[self.feasible] = (
1155
+ self._calculate_population_energies(
1156
+ self.population[self.feasible]))
1157
+
1158
+ self._promote_lowest_energy()
1159
+
1160
+ # do the optimization.
1161
+ for nit in range(1, self.maxiter + 1):
1162
+ # evolve the population by a generation
1163
+ try:
1164
+ next(self)
1165
+ except StopIteration:
1166
+ warning_flag = True
1167
+ if self._nfev > self.maxfun:
1168
+ status_message = _status_message['maxfev']
1169
+ elif self._nfev == self.maxfun:
1170
+ status_message = ('Maximum number of function evaluations'
1171
+ ' has been reached.')
1172
+ break
1173
+
1174
+ if self.disp:
1175
+ print(f"differential_evolution step {nit}: f(x)="
1176
+ f" {self.population_energies[0]}"
1177
+ )
1178
+
1179
+ if self.callback:
1180
+ c = self.tol / (self.convergence + _MACHEPS)
1181
+ res = self._result(nit=nit, message="in progress")
1182
+ res.convergence = c
1183
+ try:
1184
+ warning_flag = bool(self.callback(res))
1185
+ except StopIteration:
1186
+ warning_flag = True
1187
+
1188
+ if warning_flag:
1189
+ status_message = 'callback function requested stop early'
1190
+
1191
+ # should the solver terminate?
1192
+ if warning_flag or self.converged():
1193
+ break
1194
+
1195
+ else:
1196
+ status_message = _status_message['maxiter']
1197
+ warning_flag = True
1198
+
1199
+ DE_result = self._result(
1200
+ nit=nit, message=status_message, warning_flag=warning_flag
1201
+ )
1202
+
1203
+ if self.polish and not np.all(self.integrality):
1204
+ # can't polish if all the parameters are integers
1205
+ if np.any(self.integrality):
1206
+ # set the lower/upper bounds equal so that any integrality
1207
+ # constraints work.
1208
+ limits, integrality = self.limits, self.integrality
1209
+ limits[0, integrality] = DE_result.x[integrality]
1210
+ limits[1, integrality] = DE_result.x[integrality]
1211
+
1212
+ polish_method = 'L-BFGS-B'
1213
+
1214
+ if self._wrapped_constraints:
1215
+ polish_method = 'trust-constr'
1216
+
1217
+ constr_violation = self._constraint_violation_fn(DE_result.x)
1218
+ if np.any(constr_violation > 0.):
1219
+ warnings.warn("differential evolution didn't find a "
1220
+ "solution satisfying the constraints, "
1221
+ "attempting to polish from the least "
1222
+ "infeasible solution",
1223
+ UserWarning, stacklevel=2)
1224
+ if self.disp:
1225
+ print(f"Polishing solution with '{polish_method}'")
1226
+ result = minimize(self.func,
1227
+ np.copy(DE_result.x),
1228
+ method=polish_method,
1229
+ bounds=self.limits.T,
1230
+ constraints=self.constraints)
1231
+
1232
+ self._nfev += result.nfev
1233
+ DE_result.nfev = self._nfev
1234
+
1235
+ # Polishing solution is only accepted if there is an improvement in
1236
+ # cost function, the polishing was successful and the solution lies
1237
+ # within the bounds.
1238
+ if (result.fun < DE_result.fun and
1239
+ result.success and
1240
+ np.all(result.x <= self.limits[1]) and
1241
+ np.all(self.limits[0] <= result.x)):
1242
+ DE_result.fun = result.fun
1243
+ DE_result.x = result.x
1244
+ DE_result.jac = result.jac
1245
+ # to keep internal state consistent
1246
+ self.population_energies[0] = result.fun
1247
+ self.population[0] = self._unscale_parameters(result.x)
1248
+
1249
+ if self._wrapped_constraints:
1250
+ DE_result.constr = [c.violation(DE_result.x) for
1251
+ c in self._wrapped_constraints]
1252
+ DE_result.constr_violation = np.max(
1253
+ np.concatenate(DE_result.constr))
1254
+ DE_result.maxcv = DE_result.constr_violation
1255
+ if DE_result.maxcv > 0:
1256
+ # if the result is infeasible then success must be False
1257
+ DE_result.success = False
1258
+ DE_result.message = ("The solution does not satisfy the "
1259
+ f"constraints, MAXCV = {DE_result.maxcv}")
1260
+
1261
+ return DE_result
1262
+
1263
+ def _result(self, **kwds):
1264
+ # form an intermediate OptimizeResult
1265
+ nit = kwds.get('nit', None)
1266
+ message = kwds.get('message', None)
1267
+ warning_flag = kwds.get('warning_flag', False)
1268
+ result = OptimizeResult(
1269
+ x=self.x,
1270
+ fun=self.population_energies[0],
1271
+ nfev=self._nfev,
1272
+ nit=nit,
1273
+ message=message,
1274
+ success=(warning_flag is not True),
1275
+ population=self._scale_parameters(self.population),
1276
+ population_energies=self.population_energies
1277
+ )
1278
+ if self._wrapped_constraints:
1279
+ result.constr = [c.violation(result.x)
1280
+ for c in self._wrapped_constraints]
1281
+ result.constr_violation = np.max(np.concatenate(result.constr))
1282
+ result.maxcv = result.constr_violation
1283
+ if result.maxcv > 0:
1284
+ result.success = False
1285
+
1286
+ return result
1287
+
1288
+ def _calculate_population_energies(self, population):
1289
+ """
1290
+ Calculate the energies of a population.
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ population : ndarray
1295
+ An array of parameter vectors normalised to [0, 1] using lower
1296
+ and upper limits. Has shape ``(np.size(population, 0), N)``.
1297
+
1298
+ Returns
1299
+ -------
1300
+ energies : ndarray
1301
+ An array of energies corresponding to each population member. If
1302
+ maxfun will be exceeded during this call, then the number of
1303
+ function evaluations will be reduced and energies will be
1304
+ right-padded with np.inf. Has shape ``(np.size(population, 0),)``
1305
+ """
1306
+ num_members = np.size(population, 0)
1307
+ # S is the number of function evals left to stay under the
1308
+ # maxfun budget
1309
+ S = min(num_members, self.maxfun - self._nfev)
1310
+
1311
+ energies = np.full(num_members, np.inf)
1312
+
1313
+ parameters_pop = self._scale_parameters(population)
1314
+ try:
1315
+ calc_energies = list(
1316
+ self._mapwrapper(self.func, parameters_pop[0:S])
1317
+ )
1318
+ calc_energies = np.squeeze(calc_energies)
1319
+ except (TypeError, ValueError) as e:
1320
+ # wrong number of arguments for _mapwrapper
1321
+ # or wrong length returned from the mapper
1322
+ raise RuntimeError(
1323
+ "The map-like callable must be of the form f(func, iterable), "
1324
+ "returning a sequence of numbers the same length as 'iterable'"
1325
+ ) from e
1326
+
1327
+ if calc_energies.size != S:
1328
+ if self.vectorized:
1329
+ raise RuntimeError("The vectorized function must return an"
1330
+ " array of shape (S,) when given an array"
1331
+ " of shape (len(x), S)")
1332
+ raise RuntimeError("func(x, *args) must return a scalar value")
1333
+
1334
+ energies[0:S] = calc_energies
1335
+
1336
+ if self.vectorized:
1337
+ self._nfev += 1
1338
+ else:
1339
+ self._nfev += S
1340
+
1341
+ return energies
1342
+
1343
+ def _promote_lowest_energy(self):
1344
+ # swaps 'best solution' into first population entry
1345
+
1346
+ idx = np.arange(self.num_population_members)
1347
+ feasible_solutions = idx[self.feasible]
1348
+ if feasible_solutions.size:
1349
+ # find the best feasible solution
1350
+ idx_t = np.argmin(self.population_energies[feasible_solutions])
1351
+ l = feasible_solutions[idx_t]
1352
+ else:
1353
+ # no solution was feasible, use 'best' infeasible solution, which
1354
+ # will violate constraints the least
1355
+ l = np.argmin(np.sum(self.constraint_violation, axis=1))
1356
+
1357
+ self.population_energies[[0, l]] = self.population_energies[[l, 0]]
1358
+ self.population[[0, l], :] = self.population[[l, 0], :]
1359
+ self.feasible[[0, l]] = self.feasible[[l, 0]]
1360
+ self.constraint_violation[[0, l], :] = (
1361
+ self.constraint_violation[[l, 0], :])
1362
+
1363
+ def _constraint_violation_fn(self, x):
1364
+ """
1365
+ Calculates total constraint violation for all the constraints, for a
1366
+ set of solutions.
1367
+
1368
+ Parameters
1369
+ ----------
1370
+ x : ndarray
1371
+ Solution vector(s). Has shape (S, N), or (N,), where S is the
1372
+ number of solutions to investigate and N is the number of
1373
+ parameters.
1374
+
1375
+ Returns
1376
+ -------
1377
+ cv : ndarray
1378
+ Total violation of constraints. Has shape ``(S, M)``, where M is
1379
+ the total number of constraint components (which is not necessarily
1380
+ equal to len(self._wrapped_constraints)).
1381
+ """
1382
+ # how many solution vectors you're calculating constraint violations
1383
+ # for
1384
+ S = np.size(x) // self.parameter_count
1385
+ _out = np.zeros((S, self.total_constraints))
1386
+ offset = 0
1387
+ for con in self._wrapped_constraints:
1388
+ # the input/output of the (vectorized) constraint function is
1389
+ # {(N, S), (N,)} --> (M, S)
1390
+ # The input to _constraint_violation_fn is (S, N) or (N,), so
1391
+ # transpose to pass it to the constraint. The output is transposed
1392
+ # from (M, S) to (S, M) for further use.
1393
+ c = con.violation(x.T).T
1394
+
1395
+ # The shape of c should be (M,), (1, M), or (S, M). Check for
1396
+ # those shapes, as an incorrect shape indicates that the
1397
+ # user constraint function didn't return the right thing, and
1398
+ # the reshape operation will fail. Intercept the wrong shape
1399
+ # to give a reasonable error message. I'm not sure what failure
1400
+ # modes an inventive user will come up with.
1401
+ if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):
1402
+ raise RuntimeError("An array returned from a Constraint has"
1403
+ " the wrong shape. If `vectorized is False`"
1404
+ " the Constraint should return an array of"
1405
+ " shape (M,). If `vectorized is True` then"
1406
+ " the Constraint must return an array of"
1407
+ " shape (M, S), where S is the number of"
1408
+ " solution vectors and M is the number of"
1409
+ " constraint components in a given"
1410
+ " Constraint object.")
1411
+
1412
+ # the violation function may return a 1D array, but is it a
1413
+ # sequence of constraints for one solution (S=1, M>=1), or the
1414
+ # value of a single constraint for a sequence of solutions
1415
+ # (S>=1, M=1)
1416
+ c = np.reshape(c, (S, con.num_constr))
1417
+ _out[:, offset:offset + con.num_constr] = c
1418
+ offset += con.num_constr
1419
+
1420
+ return _out
1421
+
1422
+ def _calculate_population_feasibilities(self, population):
1423
+ """
1424
+ Calculate the feasibilities of a population.
1425
+
1426
+ Parameters
1427
+ ----------
1428
+ population : ndarray
1429
+ An array of parameter vectors normalised to [0, 1] using lower
1430
+ and upper limits. Has shape ``(np.size(population, 0), N)``.
1431
+
1432
+ Returns
1433
+ -------
1434
+ feasible, constraint_violation : ndarray, ndarray
1435
+ Boolean array of feasibility for each population member, and an
1436
+ array of the constraint violation for each population member.
1437
+ constraint_violation has shape ``(np.size(population, 0), M)``,
1438
+ where M is the number of constraints.
1439
+ """
1440
+ num_members = np.size(population, 0)
1441
+ if not self._wrapped_constraints:
1442
+ # shortcut for no constraints
1443
+ return np.ones(num_members, bool), np.zeros((num_members, 1))
1444
+
1445
+ # (S, N)
1446
+ parameters_pop = self._scale_parameters(population)
1447
+
1448
+ if self.vectorized:
1449
+ # (S, M)
1450
+ constraint_violation = np.array(
1451
+ self._constraint_violation_fn(parameters_pop)
1452
+ )
1453
+ else:
1454
+ # (S, 1, M)
1455
+ constraint_violation = np.array([self._constraint_violation_fn(x)
1456
+ for x in parameters_pop])
1457
+ # if you use the list comprehension in the line above it will
1458
+ # create an array of shape (S, 1, M), because each iteration
1459
+ # generates an array of (1, M). In comparison the vectorized
1460
+ # version returns (S, M). It's therefore necessary to remove axis 1
1461
+ constraint_violation = constraint_violation[:, 0]
1462
+
1463
+ feasible = ~(np.sum(constraint_violation, axis=1) > 0)
1464
+
1465
+ return feasible, constraint_violation
1466
+
1467
+ def __iter__(self):
1468
+ return self
1469
+
1470
+ def __enter__(self):
1471
+ return self
1472
+
1473
+ def __exit__(self, *args):
1474
+ return self._mapwrapper.__exit__(*args)
1475
+
1476
+ def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
1477
+ energy_orig, feasible_orig, cv_orig):
1478
+ """
1479
+ Trial is accepted if:
1480
+ * it satisfies all constraints and provides a lower or equal objective
1481
+ function value, while both the compared solutions are feasible
1482
+ - or -
1483
+ * it is feasible while the original solution is infeasible,
1484
+ - or -
1485
+ * it is infeasible, but provides a lower or equal constraint violation
1486
+ for all constraint functions.
1487
+
1488
+ This test corresponds to section III of Lampinen [1]_.
1489
+
1490
+ Parameters
1491
+ ----------
1492
+ energy_trial : float
1493
+ Energy of the trial solution
1494
+ feasible_trial : float
1495
+ Feasibility of trial solution
1496
+ cv_trial : array-like
1497
+ Excess constraint violation for the trial solution
1498
+ energy_orig : float
1499
+ Energy of the original solution
1500
+ feasible_orig : float
1501
+ Feasibility of original solution
1502
+ cv_orig : array-like
1503
+ Excess constraint violation for the original solution
1504
+
1505
+ Returns
1506
+ -------
1507
+ accepted : bool
1508
+
1509
+ """
1510
+ if feasible_orig and feasible_trial:
1511
+ return energy_trial <= energy_orig
1512
+ elif feasible_trial and not feasible_orig:
1513
+ return True
1514
+ elif not feasible_trial and (cv_trial <= cv_orig).all():
1515
+ # cv_trial < cv_orig would imply that both trial and orig are not
1516
+ # feasible
1517
+ return True
1518
+
1519
+ return False
1520
+
1521
+ def __next__(self):
1522
+ """
1523
+ Evolve the population by a single generation
1524
+
1525
+ Returns
1526
+ -------
1527
+ x : ndarray
1528
+ The best solution from the solver.
1529
+ fun : float
1530
+ Value of objective function obtained from the best solution.
1531
+ """
1532
+ # the population may have just been initialized (all entries are
1533
+ # np.inf). If it has you have to calculate the initial energies
1534
+ if np.all(np.isinf(self.population_energies)):
1535
+ self.feasible, self.constraint_violation = (
1536
+ self._calculate_population_feasibilities(self.population))
1537
+
1538
+ # only need to work out population energies for those that are
1539
+ # feasible
1540
+ self.population_energies[self.feasible] = (
1541
+ self._calculate_population_energies(
1542
+ self.population[self.feasible]))
1543
+
1544
+ self._promote_lowest_energy()
1545
+
1546
+ if self.dither is not None:
1547
+ self.scale = self.random_number_generator.uniform(self.dither[0],
1548
+ self.dither[1])
1549
+
1550
+ if self._updating == 'immediate':
1551
+ # update best solution immediately
1552
+ for candidate in range(self.num_population_members):
1553
+ if self._nfev > self.maxfun:
1554
+ raise StopIteration
1555
+
1556
+ # create a trial solution
1557
+ trial = self._mutate(candidate)
1558
+
1559
+ # ensuring that it's in the range [0, 1)
1560
+ self._ensure_constraint(trial)
1561
+
1562
+ # scale from [0, 1) to the actual parameter value
1563
+ parameters = self._scale_parameters(trial)
1564
+
1565
+ # determine the energy of the objective function
1566
+ if self._wrapped_constraints:
1567
+ cv = self._constraint_violation_fn(parameters)
1568
+ feasible = False
1569
+ energy = np.inf
1570
+ if not np.sum(cv) > 0:
1571
+ # solution is feasible
1572
+ feasible = True
1573
+ energy = self.func(parameters)
1574
+ self._nfev += 1
1575
+ else:
1576
+ feasible = True
1577
+ cv = np.atleast_2d([0.])
1578
+ energy = self.func(parameters)
1579
+ self._nfev += 1
1580
+
1581
+ # compare trial and population member
1582
+ if self._accept_trial(energy, feasible, cv,
1583
+ self.population_energies[candidate],
1584
+ self.feasible[candidate],
1585
+ self.constraint_violation[candidate]):
1586
+ self.population[candidate] = trial
1587
+ self.population_energies[candidate] = np.squeeze(energy)
1588
+ self.feasible[candidate] = feasible
1589
+ self.constraint_violation[candidate] = cv
1590
+
1591
+ # if the trial candidate is also better than the best
1592
+ # solution then promote it.
1593
+ if self._accept_trial(energy, feasible, cv,
1594
+ self.population_energies[0],
1595
+ self.feasible[0],
1596
+ self.constraint_violation[0]):
1597
+ self._promote_lowest_energy()
1598
+
1599
+ elif self._updating == 'deferred':
1600
+ # update best solution once per generation
1601
+ if self._nfev >= self.maxfun:
1602
+ raise StopIteration
1603
+
1604
+ # 'deferred' approach, vectorised form.
1605
+ # create trial solutions
1606
+ trial_pop = np.array(
1607
+ [self._mutate(i) for i in range(self.num_population_members)])
1608
+
1609
+ # enforce bounds
1610
+ self._ensure_constraint(trial_pop)
1611
+
1612
+ # determine the energies of the objective function, but only for
1613
+ # feasible trials
1614
+ feasible, cv = self._calculate_population_feasibilities(trial_pop)
1615
+ trial_energies = np.full(self.num_population_members, np.inf)
1616
+
1617
+ # only calculate for feasible entries
1618
+ trial_energies[feasible] = self._calculate_population_energies(
1619
+ trial_pop[feasible])
1620
+
1621
+ # which solutions are 'improved'?
1622
+ loc = [self._accept_trial(*val) for val in
1623
+ zip(trial_energies, feasible, cv, self.population_energies,
1624
+ self.feasible, self.constraint_violation)]
1625
+ loc = np.array(loc)
1626
+ self.population = np.where(loc[:, np.newaxis],
1627
+ trial_pop,
1628
+ self.population)
1629
+ self.population_energies = np.where(loc,
1630
+ trial_energies,
1631
+ self.population_energies)
1632
+ self.feasible = np.where(loc,
1633
+ feasible,
1634
+ self.feasible)
1635
+ self.constraint_violation = np.where(loc[:, np.newaxis],
1636
+ cv,
1637
+ self.constraint_violation)
1638
+
1639
+ # make sure the best solution is updated if updating='deferred'.
1640
+ # put the lowest energy into the best solution position.
1641
+ self._promote_lowest_energy()
1642
+
1643
+ return self.x, self.population_energies[0]
1644
+
1645
+ def _scale_parameters(self, trial):
1646
+ """Scale from a number between 0 and 1 to parameters."""
1647
+ # trial either has shape (N, ) or (L, N), where L is the number of
1648
+ # solutions being scaled
1649
+ scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
1650
+ if np.any(self.integrality):
1651
+ i = np.broadcast_to(self.integrality, scaled.shape)
1652
+ scaled[i] = np.round(scaled[i])
1653
+ return scaled
1654
+
1655
+ def _unscale_parameters(self, parameters):
1656
+ """Scale from parameters to a number between 0 and 1."""
1657
+ return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5
1658
+
1659
+ def _ensure_constraint(self, trial):
1660
+ """Make sure the parameters lie between the limits."""
1661
+ mask = np.where((trial > 1) | (trial < 0))
1662
+ trial[mask] = self.random_number_generator.uniform(size=mask[0].shape)
1663
+
1664
+ def _mutate(self, candidate):
1665
+ """Create a trial vector based on a mutation strategy."""
1666
+ rng = self.random_number_generator
1667
+
1668
+ if callable(self.strategy):
1669
+ _population = self._scale_parameters(self.population)
1670
+ trial = np.array(
1671
+ self.strategy(candidate, _population, rng=rng), dtype=float
1672
+ )
1673
+ if trial.shape != (self.parameter_count,):
1674
+ raise RuntimeError(
1675
+ "strategy must have signature"
1676
+ " f(candidate: int, population: np.ndarray, rng=None)"
1677
+ " returning an array of shape (N,)"
1678
+ )
1679
+ return self._unscale_parameters(trial)
1680
+
1681
+ trial = np.copy(self.population[candidate])
1682
+ fill_point = rng.choice(self.parameter_count)
1683
+
1684
+ if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
1685
+ bprime = self.mutation_func(candidate,
1686
+ self._select_samples(candidate, 5))
1687
+ else:
1688
+ bprime = self.mutation_func(self._select_samples(candidate, 5))
1689
+
1690
+ if self.strategy in self._binomial:
1691
+ crossovers = rng.uniform(size=self.parameter_count)
1692
+ crossovers = crossovers < self.cross_over_probability
1693
+ # the last one is always from the bprime vector for binomial
1694
+ # If you fill in modulo with a loop you have to set the last one to
1695
+ # true. If you don't use a loop then you can have any random entry
1696
+ # be True.
1697
+ crossovers[fill_point] = True
1698
+ trial = np.where(crossovers, bprime, trial)
1699
+ return trial
1700
+
1701
+ elif self.strategy in self._exponential:
1702
+ i = 0
1703
+ crossovers = rng.uniform(size=self.parameter_count)
1704
+ crossovers = crossovers < self.cross_over_probability
1705
+ crossovers[0] = True
1706
+ while (i < self.parameter_count and crossovers[i]):
1707
+ trial[fill_point] = bprime[fill_point]
1708
+ fill_point = (fill_point + 1) % self.parameter_count
1709
+ i += 1
1710
+
1711
+ return trial
1712
+
1713
+ def _best1(self, samples):
1714
+ """best1bin, best1exp"""
1715
+ r0, r1 = samples[:2]
1716
+ return (self.population[0] + self.scale *
1717
+ (self.population[r0] - self.population[r1]))
1718
+
1719
+ def _rand1(self, samples):
1720
+ """rand1bin, rand1exp"""
1721
+ r0, r1, r2 = samples[:3]
1722
+ return (self.population[r0] + self.scale *
1723
+ (self.population[r1] - self.population[r2]))
1724
+
1725
+ def _randtobest1(self, samples):
1726
+ """randtobest1bin, randtobest1exp"""
1727
+ r0, r1, r2 = samples[:3]
1728
+ bprime = np.copy(self.population[r0])
1729
+ bprime += self.scale * (self.population[0] - bprime)
1730
+ bprime += self.scale * (self.population[r1] -
1731
+ self.population[r2])
1732
+ return bprime
1733
+
1734
+ def _currenttobest1(self, candidate, samples):
1735
+ """currenttobest1bin, currenttobest1exp"""
1736
+ r0, r1 = samples[:2]
1737
+ bprime = (self.population[candidate] + self.scale *
1738
+ (self.population[0] - self.population[candidate] +
1739
+ self.population[r0] - self.population[r1]))
1740
+ return bprime
1741
+
1742
+ def _best2(self, samples):
1743
+ """best2bin, best2exp"""
1744
+ r0, r1, r2, r3 = samples[:4]
1745
+ bprime = (self.population[0] + self.scale *
1746
+ (self.population[r0] + self.population[r1] -
1747
+ self.population[r2] - self.population[r3]))
1748
+
1749
+ return bprime
1750
+
1751
+ def _rand2(self, samples):
1752
+ """rand2bin, rand2exp"""
1753
+ r0, r1, r2, r3, r4 = samples
1754
+ bprime = (self.population[r0] + self.scale *
1755
+ (self.population[r1] + self.population[r2] -
1756
+ self.population[r3] - self.population[r4]))
1757
+
1758
+ return bprime
1759
+
1760
+ def _select_samples(self, candidate, number_samples):
1761
+ """
1762
+ obtain random integers from range(self.num_population_members),
1763
+ without replacement. You can't have the original candidate either.
1764
+ """
1765
+ pool = np.arange(self.num_population_members)
1766
+ self.random_number_generator.shuffle(pool)
1767
+
1768
+ idxs = []
1769
+ while len(idxs) < number_samples and len(pool) > 0:
1770
+ idx = pool[0]
1771
+ pool = pool[1:]
1772
+ if idx != candidate:
1773
+ idxs.append(idx)
1774
+
1775
+ return idxs
1776
+
1777
+
1778
+ class _ConstraintWrapper:
1779
+ """Object to wrap/evaluate user defined constraints.
1780
+
1781
+ Very similar in practice to `PreparedConstraint`, except that no evaluation
1782
+ of jac/hess is performed (explicit or implicit).
1783
+
1784
+ If created successfully, it will contain the attributes listed below.
1785
+
1786
+ Parameters
1787
+ ----------
1788
+ constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
1789
+ Constraint to check and prepare.
1790
+ x0 : array_like
1791
+ Initial vector of independent variables, shape (N,)
1792
+
1793
+ Attributes
1794
+ ----------
1795
+ fun : callable
1796
+ Function defining the constraint wrapped by one of the convenience
1797
+ classes.
1798
+ bounds : 2-tuple
1799
+ Contains lower and upper bounds for the constraints --- lb and ub.
1800
+ These are converted to ndarray and have a size equal to the number of
1801
+ the constraints.
1802
+
1803
+ Notes
1804
+ -----
1805
+ _ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent
1806
+ arrays of shape (N, S) or (N,), where S is the number of vectors of shape
1807
+ (N,) to consider constraints for.
1808
+ """
1809
+ def __init__(self, constraint, x0):
1810
+ self.constraint = constraint
1811
+
1812
+ if isinstance(constraint, NonlinearConstraint):
1813
+ def fun(x):
1814
+ x = np.asarray(x)
1815
+ return np.atleast_1d(constraint.fun(x))
1816
+ elif isinstance(constraint, LinearConstraint):
1817
+ def fun(x):
1818
+ if issparse(constraint.A):
1819
+ A = constraint.A
1820
+ else:
1821
+ A = np.atleast_2d(constraint.A)
1822
+
1823
+ res = A.dot(x)
1824
+ # x either has shape (N, S) or (N)
1825
+ # (M, N) x (N, S) --> (M, S)
1826
+ # (M, N) x (N,) --> (M,)
1827
+ # However, if (M, N) is a matrix then:
1828
+ # (M, N) * (N,) --> (M, 1), we need this to be (M,)
1829
+ if x.ndim == 1 and res.ndim == 2:
1830
+ # deal with case that constraint.A is an np.matrix
1831
+ # see gh20041
1832
+ res = np.asarray(res)[:, 0]
1833
+
1834
+ return res
1835
+ elif isinstance(constraint, Bounds):
1836
+ def fun(x):
1837
+ return np.asarray(x)
1838
+ else:
1839
+ raise ValueError("`constraint` of an unknown type is passed.")
1840
+
1841
+ self.fun = fun
1842
+
1843
+ lb = np.asarray(constraint.lb, dtype=float)
1844
+ ub = np.asarray(constraint.ub, dtype=float)
1845
+
1846
+ x0 = np.asarray(x0)
1847
+
1848
+ # find out the number of constraints
1849
+ f0 = fun(x0)
1850
+ self.num_constr = m = f0.size
1851
+ self.parameter_count = x0.size
1852
+
1853
+ if lb.ndim == 0:
1854
+ lb = np.resize(lb, m)
1855
+ if ub.ndim == 0:
1856
+ ub = np.resize(ub, m)
1857
+
1858
+ self.bounds = (lb, ub)
1859
+
1860
+ def __call__(self, x):
1861
+ return np.atleast_1d(self.fun(x))
1862
+
1863
+ def violation(self, x):
1864
+ """How much the constraint is exceeded by.
1865
+
1866
+ Parameters
1867
+ ----------
1868
+ x : array-like
1869
+ Vector of independent variables, (N, S), where N is number of
1870
+ parameters and S is the number of solutions to be investigated.
1871
+
1872
+ Returns
1873
+ -------
1874
+ excess : array-like
1875
+ How much the constraint is exceeded by, for each of the
1876
+ constraints specified by `_ConstraintWrapper.fun`.
1877
+ Has shape (M, S) where M is the number of constraint components.
1878
+ """
1879
+ # expect ev to have shape (num_constr, S) or (num_constr,)
1880
+ ev = self.fun(np.asarray(x))
1881
+
1882
+ try:
1883
+ excess_lb = np.maximum(self.bounds[0] - ev.T, 0)
1884
+ excess_ub = np.maximum(ev.T - self.bounds[1], 0)
1885
+ except ValueError as e:
1886
+ raise RuntimeError("An array returned from a Constraint has"
1887
+ " the wrong shape. If `vectorized is False`"
1888
+ " the Constraint should return an array of"
1889
+ " shape (M,). If `vectorized is True` then"
1890
+ " the Constraint must return an array of"
1891
+ " shape (M, S), where S is the number of"
1892
+ " solution vectors and M is the number of"
1893
+ " constraint components in a given"
1894
+ " Constraint object.") from e
1895
+
1896
+ v = (excess_lb + excess_ub).T
1897
+ return v
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+ from libcpp.string cimport string
5
+
6
+ cdef extern from "HConst.h" nogil:
7
+
8
+ const int HIGHS_CONST_I_INF "kHighsIInf"
9
+ const double HIGHS_CONST_INF "kHighsInf"
10
+ const double kHighsTiny
11
+ const double kHighsZero
12
+ const int kHighsThreadLimit
13
+
14
+ cdef enum HighsDebugLevel:
15
+ HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0
16
+ HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap"
17
+ HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly"
18
+ HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive"
19
+ HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone
20
+ HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive
21
+
22
+ ctypedef enum HighsModelStatus:
23
+ HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0
24
+ HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError"
25
+ HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError"
26
+ HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError"
27
+ HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError"
28
+ HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError"
29
+ HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty"
30
+ HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal"
31
+ HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible"
32
+ HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible"
33
+ HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded"
34
+ HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound"
35
+ HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget"
36
+ HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit"
37
+ HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit"
38
+ HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown"
39
+ HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET
40
+ HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN
41
+
42
+ cdef enum HighsBasisStatus:
43
+ HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables]
44
+ HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic
45
+ HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound
46
+ HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero
47
+ HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve
48
+
49
+ cdef enum SolverOption:
50
+ SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1
51
+ SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE"
52
+ SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM"
53
+
54
+ cdef enum PrimalDualStatus:
55
+ PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1
56
+ PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET
57
+ PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION"
58
+ PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN"
59
+ PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT"
60
+ PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT"
61
+ PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT
62
+
63
+ cdef enum HighsOptionType:
64
+ HighsOptionTypeBOOL "HighsOptionType::kBool" = 0
65
+ HighsOptionTypeINT "HighsOptionType::kInt"
66
+ HighsOptionTypeDOUBLE "HighsOptionType::kDouble"
67
+ HighsOptionTypeSTRING "HighsOptionType::kString"
68
+
69
+ # workaround for lack of enum class support in Cython < 3.x
70
+ # cdef enum class ObjSense(int):
71
+ # ObjSenseMINIMIZE "ObjSense::kMinimize" = 1
72
+ # ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1
73
+
74
+ cdef cppclass ObjSense:
75
+ pass
76
+
77
+ cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize"
78
+ cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize"
79
+
80
+ # cdef enum class MatrixFormat(int):
81
+ # MatrixFormatkColwise "MatrixFormat::kColwise" = 1
82
+ # MatrixFormatkRowwise "MatrixFormat::kRowwise"
83
+ # MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
84
+
85
+ cdef cppclass MatrixFormat:
86
+ pass
87
+
88
+ cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise"
89
+ cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise"
90
+ cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
91
+
92
+ # cdef enum class HighsVarType(int):
93
+ # kContinuous "HighsVarType::kContinuous"
94
+ # kInteger "HighsVarType::kInteger"
95
+ # kSemiContinuous "HighsVarType::kSemiContinuous"
96
+ # kSemiInteger "HighsVarType::kSemiInteger"
97
+ # kImplicitInteger "HighsVarType::kImplicitInteger"
98
+
99
+ cdef cppclass HighsVarType:
100
+ pass
101
+
102
+ cdef HighsVarType kContinuous "HighsVarType::kContinuous"
103
+ cdef HighsVarType kInteger "HighsVarType::kInteger"
104
+ cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous"
105
+ cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger"
106
+ cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger"
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+
8
+ from .HighsStatus cimport HighsStatus
9
+ from .HighsOptions cimport HighsOptions
10
+ from .HighsInfo cimport HighsInfo
11
+ from .HighsLp cimport (
12
+ HighsLp,
13
+ HighsSolution,
14
+ HighsBasis,
15
+ ObjSense,
16
+ )
17
+ from .HConst cimport HighsModelStatus
18
+
19
+ cdef extern from "Highs.h":
20
+ # From HiGHS/src/Highs.h
21
+ cdef cppclass Highs:
22
+ HighsStatus passHighsOptions(const HighsOptions& options)
23
+ HighsStatus passModel(const HighsLp& lp)
24
+ HighsStatus run()
25
+ HighsStatus setHighsLogfile(FILE* logfile)
26
+ HighsStatus setHighsOutput(FILE* output)
27
+ HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true)
28
+
29
+ # split up for cython below
30
+ #const HighsModelStatus& getModelStatus(const bool scaled_model = False) const
31
+ const HighsModelStatus & getModelStatus() const
32
+
33
+ const HighsInfo& getHighsInfo "getInfo" () const
34
+ string modelStatusToString(const HighsModelStatus model_status) const
35
+ #HighsStatus getHighsInfoValue(const string& info, int& value)
36
+ HighsStatus getHighsInfoValue(const string& info, double& value) const
37
+ const HighsOptions& getHighsOptions() const
38
+
39
+ const HighsLp& getLp() const
40
+
41
+ HighsStatus writeSolution(const string filename, const bool pretty) const
42
+
43
+ HighsStatus setBasis()
44
+ const HighsSolution& getSolution() const
45
+ const HighsBasis& getBasis() const
46
+
47
+ bool changeObjectiveSense(const ObjSense sense)
48
+
49
+ HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value)
50
+ HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value)
51
+ HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value)
52
+ HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value)
53
+
54
+ string primalDualStatusToString(const int primal_dual_status)
55
+
56
+ void resetGlobalScheduler(bool blocking)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+
4
+ cdef extern from "HighsIO.h" nogil:
5
+ # workaround for lack of enum class support in Cython < 3.x
6
+ # cdef enum class HighsLogType(int):
7
+ # kInfo "HighsLogType::kInfo" = 1
8
+ # kDetailed "HighsLogType::kDetailed"
9
+ # kVerbose "HighsLogType::kVerbose"
10
+ # kWarning "HighsLogType::kWarning"
11
+ # kError "HighsLogType::kError"
12
+
13
+ cdef cppclass HighsLogType:
14
+ pass
15
+
16
+ cdef HighsLogType kInfo "HighsLogType::kInfo"
17
+ cdef HighsLogType kDetailed "HighsLogType::kDetailed"
18
+ cdef HighsLogType kVerbose "HighsLogType::kVerbose"
19
+ cdef HighsLogType kWarning "HighsLogType::kWarning"
20
+ cdef HighsLogType kError "HighsLogType::kError"
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ cdef extern from "HighsInfo.h" nogil:
4
+ # From HiGHS/src/lp_data/HighsInfo.h
5
+ cdef cppclass HighsInfo:
6
+ # Inherited from HighsInfoStruct:
7
+ int mip_node_count
8
+ int simplex_iteration_count
9
+ int ipm_iteration_count
10
+ int crossover_iteration_count
11
+ int primal_solution_status
12
+ int dual_solution_status
13
+ int basis_validity
14
+ double objective_function_value
15
+ double mip_dual_bound
16
+ double mip_gap
17
+ int num_primal_infeasibilities
18
+ double max_primal_infeasibility
19
+ double sum_primal_infeasibilities
20
+ int num_dual_infeasibilities
21
+ double max_dual_infeasibility
22
+ double sum_dual_infeasibilities
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from .HighsStatus cimport HighsStatus
4
+ from .HighsLp cimport HighsLp
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsLpUtils.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsLpUtils.h
9
+ HighsStatus assessLp(HighsLp& lp, const HighsOptions& options)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp.string cimport string
4
+
5
+ from .HConst cimport HighsModelStatus
6
+
7
+ cdef extern from "HighsModelUtils.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsModelUtils.h
9
+ string utilHighsModelStatusToString(const HighsModelStatus model_status)
10
+ string utilBasisStatusToString(const int primal_dual_status)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+ from libcpp.vector cimport vector
8
+
9
+ from .HConst cimport HighsOptionType
10
+
11
+ cdef extern from "HighsOptions.h" nogil:
12
+
13
+ cdef cppclass OptionRecord:
14
+ HighsOptionType type
15
+ string name
16
+ string description
17
+ bool advanced
18
+
19
+ cdef cppclass OptionRecordBool(OptionRecord):
20
+ bool* value
21
+ bool default_value
22
+
23
+ cdef cppclass OptionRecordInt(OptionRecord):
24
+ int* value
25
+ int lower_bound
26
+ int default_value
27
+ int upper_bound
28
+
29
+ cdef cppclass OptionRecordDouble(OptionRecord):
30
+ double* value
31
+ double lower_bound
32
+ double default_value
33
+ double upper_bound
34
+
35
+ cdef cppclass OptionRecordString(OptionRecord):
36
+ string* value
37
+ string default_value
38
+
39
+ cdef cppclass HighsOptions:
40
+ # From HighsOptionsStruct:
41
+
42
+ # Options read from the command line
43
+ string model_file
44
+ string presolve
45
+ string solver
46
+ string parallel
47
+ double time_limit
48
+ string options_file
49
+
50
+ # Options read from the file
51
+ double infinite_cost
52
+ double infinite_bound
53
+ double small_matrix_value
54
+ double large_matrix_value
55
+ double primal_feasibility_tolerance
56
+ double dual_feasibility_tolerance
57
+ double ipm_optimality_tolerance
58
+ double dual_objective_value_upper_bound
59
+ int highs_debug_level
60
+ int simplex_strategy
61
+ int simplex_scale_strategy
62
+ int simplex_crash_strategy
63
+ int simplex_dual_edge_weight_strategy
64
+ int simplex_primal_edge_weight_strategy
65
+ int simplex_iteration_limit
66
+ int simplex_update_limit
67
+ int ipm_iteration_limit
68
+ int highs_min_threads
69
+ int highs_max_threads
70
+ int message_level
71
+ string solution_file
72
+ bool write_solution_to_file
73
+ bool write_solution_pretty
74
+
75
+ # Advanced options
76
+ bool run_crossover
77
+ bool mps_parser_type_free
78
+ int keep_n_rows
79
+ int allowed_simplex_matrix_scale_factor
80
+ int allowed_simplex_cost_scale_factor
81
+ int simplex_dualise_strategy
82
+ int simplex_permute_strategy
83
+ int dual_simplex_cleanup_strategy
84
+ int simplex_price_strategy
85
+ int dual_chuzc_sort_strategy
86
+ bool simplex_initial_condition_check
87
+ double simplex_initial_condition_tolerance
88
+ double dual_steepest_edge_weight_log_error_threshhold
89
+ double dual_simplex_cost_perturbation_multiplier
90
+ double start_crossover_tolerance
91
+ bool less_infeasible_DSE_check
92
+ bool less_infeasible_DSE_choose_row
93
+ bool use_original_HFactor_logic
94
+
95
+ # Options for MIP solver
96
+ int mip_max_nodes
97
+ int mip_report_level
98
+
99
+ # Switch for MIP solver
100
+ bool mip
101
+
102
+ # Options for HighsPrintMessage and HighsLogMessage
103
+ FILE* logfile
104
+ FILE* output
105
+ int message_level
106
+ string solution_file
107
+ bool write_solution_to_file
108
+ bool write_solution_pretty
109
+
110
+ vector[OptionRecord*] records
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsRuntimeOptions.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsRuntimeOptions.h
9
+ bool loadOptions(int argc, char** argv, HighsOptions& options)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp.string cimport string
4
+
5
+ cdef extern from "HighsStatus.h" nogil:
6
+ ctypedef enum HighsStatus:
7
+ HighsStatusError "HighsStatus::kError" = -1
8
+ HighsStatusOK "HighsStatus::kOk" = 0
9
+ HighsStatusWarning "HighsStatus::kWarning" = 1
10
+
11
+
12
+ string highsStatusToString(HighsStatus status)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ cdef extern from "SimplexConst.h" nogil:
6
+
7
+ cdef enum SimplexAlgorithm:
8
+ PRIMAL "SimplexAlgorithm::kPrimal" = 0
9
+ DUAL "SimplexAlgorithm::kDual"
10
+
11
+ cdef enum SimplexStrategy:
12
+ SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0
13
+ SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN
14
+ SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual"
15
+ SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL
16
+ SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks"
17
+ SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti"
18
+ SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal"
19
+ SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL
20
+ SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum"
21
+
22
+ cdef enum SimplexCrashStrategy:
23
+ SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0
24
+ SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN
25
+ SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK"
26
+ SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K
27
+ SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby"
28
+ SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri"
29
+ SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK"
30
+ SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri"
31
+ SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf"
32
+ SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts"
33
+ SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic"
34
+ SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing"
35
+ SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING
36
+
37
+ cdef enum SimplexEdgeWeightStrategy:
38
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1
39
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN
40
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig"
41
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex"
42
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge"
43
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial"
44
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL
45
+
46
+ cdef enum SimplexPriceStrategy:
47
+ SIMPLEX_PRICE_STRATEGY_MIN = 0
48
+ SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN
49
+ SIMPLEX_PRICE_STRATEGY_ROW
50
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH
51
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
52
+ SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
53
+
54
+ cdef enum SimplexDualChuzcStrategy:
55
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0
56
+ SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN
57
+ SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD
58
+ SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP
59
+ SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
60
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
61
+
62
+ cdef enum InvertHint:
63
+ INVERT_HINT_NO = 0
64
+ INVERT_HINT_UPDATE_LIMIT_REACHED
65
+ INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT
66
+ INVERT_HINT_POSSIBLY_OPTIMAL
67
+ INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED
68
+ INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED
69
+ INVERT_HINT_POSSIBLY_SINGULAR_BASIS
70
+ INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX
71
+ INVERT_HINT_CHOOSE_COLUMN_FAIL
72
+ INVERT_HINT_Count
73
+
74
+ cdef enum DualEdgeWeightMode:
75
+ DANTZIG "DualEdgeWeightMode::DANTZIG" = 0
76
+ DEVEX "DualEdgeWeightMode::DEVEX"
77
+ STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE"
78
+ Count "DualEdgeWeightMode::Count"
79
+
80
+ cdef enum PriceMode:
81
+ ROW "PriceMode::ROW" = 0
82
+ COL "PriceMode::COL"
83
+
84
+ const int PARALLEL_THREADS_DEFAULT
85
+ const int DUAL_TASKS_MIN_THREADS
86
+ const int DUAL_MULTI_MIN_THREADS
87
+
88
+ const bool invert_if_row_out_negative
89
+
90
+ const int NONBASIC_FLAG_TRUE
91
+ const int NONBASIC_FLAG_FALSE
92
+
93
+ const int NONBASIC_MOVE_UP
94
+ const int NONBASIC_MOVE_DN
95
+ const int NONBASIC_MOVE_ZE
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ cdef extern from "highs_c_api.h" nogil:
4
+ int Highs_passLp(void* highs, int numcol, int numrow, int numnz,
5
+ double* colcost, double* collower, double* colupper,
6
+ double* rowlower, double* rowupper,
7
+ int* astart, int* aindex, double* avalue)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_isotonic.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING
3
+
4
+ import numpy as np
5
+
6
+ from ._optimize import OptimizeResult
7
+ from ._pava_pybind import pava
8
+
9
+ if TYPE_CHECKING:
10
+ import numpy.typing as npt
11
+
12
+
13
+ __all__ = ["isotonic_regression"]
14
+
15
+
16
+ def isotonic_regression(
17
+ y: npt.ArrayLike,
18
+ *,
19
+ weights: npt.ArrayLike | None = None,
20
+ increasing: bool = True,
21
+ ) -> OptimizeResult:
22
+ r"""Nonparametric isotonic regression.
23
+
24
+ A (not strictly) monotonically increasing array `x` with the same length
25
+ as `y` is calculated by the pool adjacent violators algorithm (PAVA), see
26
+ [1]_. See the Notes section for more details.
27
+
28
+ Parameters
29
+ ----------
30
+ y : (N,) array_like
31
+ Response variable.
32
+ weights : (N,) array_like or None
33
+ Case weights.
34
+ increasing : bool
35
+ If True, fit monotonic increasing, i.e. isotonic, regression.
36
+ If False, fit a monotonic decreasing, i.e. antitonic, regression.
37
+ Default is True.
38
+
39
+ Returns
40
+ -------
41
+ res : OptimizeResult
42
+ The optimization result represented as a ``OptimizeResult`` object.
43
+ Important attributes are:
44
+
45
+ - ``x``: The isotonic regression solution, i.e. an increasing (or
46
+ decreasing) array of the same length than y, with elements in the
47
+ range from min(y) to max(y).
48
+ - ``weights`` : Array with the sum of case weights for each block
49
+ (or pool) B.
50
+ - ``blocks``: Array of length B+1 with the indices of the start
51
+ positions of each block (or pool) B. The j-th block is given by
52
+ ``x[blocks[j]:blocks[j+1]]`` for which all values are the same.
53
+
54
+ Notes
55
+ -----
56
+ Given data :math:`y` and case weights :math:`w`, the isotonic regression
57
+ solves the following optimization problem:
58
+
59
+ .. math::
60
+
61
+ \operatorname{argmin}_{x_i} \sum_i w_i (y_i - x_i)^2 \quad
62
+ \text{subject to } x_i \leq x_j \text{ whenever } i \leq j \,.
63
+
64
+ For every input value :math:`y_i`, it generates a value :math:`x_i` such
65
+ that :math:`x` is increasing (but not strictly), i.e.
66
+ :math:`x_i \leq x_{i+1}`. This is accomplished by the PAVA.
67
+ The solution consists of pools or blocks, i.e. neighboring elements of
68
+ :math:`x`, e.g. :math:`x_i` and :math:`x_{i+1}`, that all have the same
69
+ value.
70
+
71
+ Most interestingly, the solution stays the same if the squared loss is
72
+ replaced by the wide class of Bregman functions which are the unique
73
+ class of strictly consistent scoring functions for the mean, see [2]_
74
+ and references therein.
75
+
76
+ The implemented version of PAVA according to [1]_ has a computational
77
+ complexity of O(N) with input size N.
78
+
79
+ References
80
+ ----------
81
+ .. [1] Busing, F. M. T. A. (2022).
82
+ Monotone Regression: A Simple and Fast O(n) PAVA Implementation.
83
+ Journal of Statistical Software, Code Snippets, 102(1), 1-25.
84
+ :doi:`10.18637/jss.v102.c01`
85
+ .. [2] Jordan, A.I., Mühlemann, A. & Ziegel, J.F.
86
+ Characterizing the optimal solutions to the isotonic regression
87
+ problem for identifiable functionals.
88
+ Ann Inst Stat Math 74, 489-514 (2022).
89
+ :doi:`10.1007/s10463-021-00808-0`
90
+
91
+ Examples
92
+ --------
93
+ This example demonstrates that ``isotonic_regression`` really solves a
94
+ constrained optimization problem.
95
+
96
+ >>> import numpy as np
97
+ >>> from scipy.optimize import isotonic_regression, minimize
98
+ >>> y = [1.5, 1.0, 4.0, 6.0, 5.7, 5.0, 7.8, 9.0, 7.5, 9.5, 9.0]
99
+ >>> def objective(yhat, y):
100
+ ... return np.sum((yhat - y)**2)
101
+ >>> def constraint(yhat, y):
102
+ ... # This is for a monotonically increasing regression.
103
+ ... return np.diff(yhat)
104
+ >>> result = minimize(objective, x0=y, args=(y,),
105
+ ... constraints=[{'type': 'ineq',
106
+ ... 'fun': lambda x: constraint(x, y)}])
107
+ >>> result.x
108
+ array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
109
+ 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
110
+ 9.25 ])
111
+ >>> result = isotonic_regression(y)
112
+ >>> result.x
113
+ array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
114
+ 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
115
+ 9.25 ])
116
+
117
+ The big advantage of ``isotonic_regression`` compared to calling
118
+ ``minimize`` is that it is more user friendly, i.e. one does not need to
119
+ define objective and constraint functions, and that it is orders of
120
+ magnitudes faster. On commodity hardware (in 2023), for normal distributed
121
+ input y of length 1000, the minimizer takes about 4 seconds, while
122
+ ``isotonic_regression`` takes about 200 microseconds.
123
+ """
124
+ yarr = np.asarray(y) # Check yarr.ndim == 1 is implicit (pybind11) in pava.
125
+ if weights is None:
126
+ warr = np.ones_like(yarr)
127
+ else:
128
+ warr = np.asarray(weights)
129
+
130
+ if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]):
131
+ raise ValueError(
132
+ "Input arrays y and w must have one dimension of equal length."
133
+ )
134
+ if np.any(warr <= 0):
135
+ raise ValueError("Weights w must be strictly positive.")
136
+
137
+ order = slice(None) if increasing else slice(None, None, -1)
138
+ x = np.array(yarr[order], order="C", dtype=np.float64, copy=True)
139
+ wx = np.array(warr[order], order="C", dtype=np.float64, copy=True)
140
+ n = x.shape[0]
141
+ r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp)
142
+ x, wx, r, b = pava(x, wx, r)
143
+ # Now that we know the number of blocks b, we only keep the relevant part
144
+ # of r and wx.
145
+ # As information: Due to the pava implementation, after the last block
146
+ # index, there might be smaller numbers appended to r, e.g.
147
+ # r = [0, 10, 8, 7] which in the end should be r = [0, 10].
148
+ r = r[:b + 1]
149
+ wx = wx[:b]
150
+ if not increasing:
151
+ x = x[::-1]
152
+ wx = wx[::-1]
153
+ r = r[-1] - r[::-1]
154
+ return OptimizeResult(
155
+ x=x,
156
+ weights=wx,
157
+ blocks=r,
158
+ )
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Created on Sat Aug 22 19:49:17 2020
3
+
4
+ @author: matth
5
+ """
6
+
7
+
8
+ def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
9
+ bounds=None, method='highs', callback=None,
10
+ maxiter=None, disp=False, presolve=True,
11
+ time_limit=None,
12
+ dual_feasibility_tolerance=None,
13
+ primal_feasibility_tolerance=None,
14
+ ipm_optimality_tolerance=None,
15
+ simplex_dual_edge_weight_strategy=None,
16
+ mip_rel_gap=None,
17
+ **unknown_options):
18
+ r"""
19
+ Linear programming: minimize a linear objective function subject to linear
20
+ equality and inequality constraints using one of the HiGHS solvers.
21
+
22
+ Linear programming solves problems of the following form:
23
+
24
+ .. math::
25
+
26
+ \min_x \ & c^T x \\
27
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
28
+ & A_{eq} x = b_{eq},\\
29
+ & l \leq x \leq u ,
30
+
31
+ where :math:`x` is a vector of decision variables; :math:`c`,
32
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
33
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
34
+
35
+ Alternatively, that's:
36
+
37
+ minimize::
38
+
39
+ c @ x
40
+
41
+ such that::
42
+
43
+ A_ub @ x <= b_ub
44
+ A_eq @ x == b_eq
45
+ lb <= x <= ub
46
+
47
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
48
+ ``bounds``.
49
+
50
+ Parameters
51
+ ----------
52
+ c : 1-D array
53
+ The coefficients of the linear objective function to be minimized.
54
+ A_ub : 2-D array, optional
55
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
56
+ coefficients of a linear inequality constraint on ``x``.
57
+ b_ub : 1-D array, optional
58
+ The inequality constraint vector. Each element represents an
59
+ upper bound on the corresponding value of ``A_ub @ x``.
60
+ A_eq : 2-D array, optional
61
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
62
+ coefficients of a linear equality constraint on ``x``.
63
+ b_eq : 1-D array, optional
64
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
65
+ the corresponding element of ``b_eq``.
66
+ bounds : sequence, optional
67
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
68
+ the minimum and maximum values of that decision variable. Use ``None``
69
+ to indicate that there is no bound. By default, bounds are
70
+ ``(0, None)`` (all decision variables are non-negative).
71
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
72
+ ``max`` will serve as bounds for all decision variables.
73
+ method : str
74
+
75
+ This is the method-specific documentation for 'highs', which chooses
76
+ automatically between
77
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>` and
78
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
79
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
80
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
81
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
82
+ are also available.
83
+ integrality : 1-D array or int, optional
84
+ Indicates the type of integrality constraint on each decision variable.
85
+
86
+ ``0`` : Continuous variable; no integrality constraint.
87
+
88
+ ``1`` : Integer variable; decision variable must be an integer
89
+ within `bounds`.
90
+
91
+ ``2`` : Semi-continuous variable; decision variable must be within
92
+ `bounds` or take value ``0``.
93
+
94
+ ``3`` : Semi-integer variable; decision variable must be an integer
95
+ within `bounds` or take value ``0``.
96
+
97
+ By default, all variables are continuous.
98
+
99
+ For mixed integrality constraints, supply an array of shape `c.shape`.
100
+ To infer a constraint on each decision variable from shorter inputs,
101
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
102
+
103
+ This argument is currently used only by the ``'highs'`` method and
104
+ ignored otherwise.
105
+
106
+ Options
107
+ -------
108
+ maxiter : int
109
+ The maximum number of iterations to perform in either phase.
110
+ For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
111
+ include the number of crossover iterations. Default is the largest
112
+ possible value for an ``int`` on the platform.
113
+ disp : bool (default: ``False``)
114
+ Set to ``True`` if indicators of optimization status are to be
115
+ printed to the console during optimization.
116
+ presolve : bool (default: ``True``)
117
+ Presolve attempts to identify trivial infeasibilities,
118
+ identify trivial unboundedness, and simplify the problem before
119
+ sending it to the main solver. It is generally recommended
120
+ to keep the default setting ``True``; set to ``False`` if
121
+ presolve is to be disabled.
122
+ time_limit : float
123
+ The maximum time in seconds allotted to solve the problem;
124
+ default is the largest possible value for a ``double`` on the
125
+ platform.
126
+ dual_feasibility_tolerance : double (default: 1e-07)
127
+ Dual feasibility tolerance for
128
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
129
+ The minimum of this and ``primal_feasibility_tolerance``
130
+ is used for the feasibility tolerance of
131
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
132
+ primal_feasibility_tolerance : double (default: 1e-07)
133
+ Primal feasibility tolerance for
134
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
135
+ The minimum of this and ``dual_feasibility_tolerance``
136
+ is used for the feasibility tolerance of
137
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
138
+ ipm_optimality_tolerance : double (default: ``1e-08``)
139
+ Optimality tolerance for
140
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
141
+ Minimum allowable value is 1e-12.
142
+ simplex_dual_edge_weight_strategy : str (default: None)
143
+ Strategy for simplex dual edge weights. The default, ``None``,
144
+ automatically selects one of the following.
145
+
146
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
147
+ negative reduced cost.
148
+
149
+ ``'devex'`` uses the strategy described in [15]_.
150
+
151
+ ``steepest`` uses the exact steepest edge strategy as described in
152
+ [16]_.
153
+
154
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
155
+ until the computation is too costly or inexact and then switches to
156
+ the devex method.
157
+
158
+ Currently, ``None`` always selects ``'steepest-devex'``, but this
159
+ may change as new options become available.
160
+ mip_rel_gap : double (default: None)
161
+ Termination criterion for MIP solver: solver will terminate when the
162
+ gap between the primal objective value and the dual objective bound,
163
+ scaled by the primal objective value, is <= mip_rel_gap.
164
+ unknown_options : dict
165
+ Optional arguments not used by this particular solver. If
166
+ ``unknown_options`` is non-empty, a warning is issued listing
167
+ all unused options.
168
+
169
+ Returns
170
+ -------
171
+ res : OptimizeResult
172
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
173
+
174
+ x : 1D array
175
+ The values of the decision variables that minimizes the
176
+ objective function while satisfying the constraints.
177
+ fun : float
178
+ The optimal value of the objective function ``c @ x``.
179
+ slack : 1D array
180
+ The (nominally positive) values of the slack,
181
+ ``b_ub - A_ub @ x``.
182
+ con : 1D array
183
+ The (nominally zero) residuals of the equality constraints,
184
+ ``b_eq - A_eq @ x``.
185
+ success : bool
186
+ ``True`` when the algorithm succeeds in finding an optimal
187
+ solution.
188
+ status : int
189
+ An integer representing the exit status of the algorithm.
190
+
191
+ ``0`` : Optimization terminated successfully.
192
+
193
+ ``1`` : Iteration or time limit reached.
194
+
195
+ ``2`` : Problem appears to be infeasible.
196
+
197
+ ``3`` : Problem appears to be unbounded.
198
+
199
+ ``4`` : The HiGHS solver ran into a problem.
200
+
201
+ message : str
202
+ A string descriptor of the exit status of the algorithm.
203
+ nit : int
204
+ The total number of iterations performed.
205
+ For the HiGHS simplex method, this includes iterations in all
206
+ phases. For the HiGHS interior-point method, this does not include
207
+ crossover iterations.
208
+ crossover_nit : int
209
+ The number of primal/dual pushes performed during the
210
+ crossover routine for the HiGHS interior-point method.
211
+ This is ``0`` for the HiGHS simplex method.
212
+ ineqlin : OptimizeResult
213
+ Solution and sensitivity information corresponding to the
214
+ inequality constraints, `b_ub`. A dictionary consisting of the
215
+ fields:
216
+
217
+ residual : np.ndnarray
218
+ The (nominally positive) values of the slack variables,
219
+ ``b_ub - A_ub @ x``. This quantity is also commonly
220
+ referred to as "slack".
221
+
222
+ marginals : np.ndarray
223
+ The sensitivity (partial derivative) of the objective
224
+ function with respect to the right-hand side of the
225
+ inequality constraints, `b_ub`.
226
+
227
+ eqlin : OptimizeResult
228
+ Solution and sensitivity information corresponding to the
229
+ equality constraints, `b_eq`. A dictionary consisting of the
230
+ fields:
231
+
232
+ residual : np.ndarray
233
+ The (nominally zero) residuals of the equality constraints,
234
+ ``b_eq - A_eq @ x``.
235
+
236
+ marginals : np.ndarray
237
+ The sensitivity (partial derivative) of the objective
238
+ function with respect to the right-hand side of the
239
+ equality constraints, `b_eq`.
240
+
241
+ lower, upper : OptimizeResult
242
+ Solution and sensitivity information corresponding to the
243
+ lower and upper bounds on decision variables, `bounds`.
244
+
245
+ residual : np.ndarray
246
+ The (nominally positive) values of the quantity
247
+ ``x - lb`` (lower) or ``ub - x`` (upper).
248
+
249
+ marginals : np.ndarray
250
+ The sensitivity (partial derivative) of the objective
251
+ function with respect to the lower and upper
252
+ `bounds`.
253
+
254
+ Notes
255
+ -----
256
+
257
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
258
+ of the C++ high performance dual revised simplex implementation (HSOL)
259
+ [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
260
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
261
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
262
+ as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
263
+ between the two automatically. For new code involving `linprog`, we
264
+ recommend explicitly choosing one of these three method values instead of
265
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
266
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
267
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
268
+
269
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
270
+ `marginals`, or partial derivatives of the objective function with respect
271
+ to the right-hand side of each constraint. These partial derivatives are
272
+ also referred to as "Lagrange multipliers", "dual values", and
273
+ "shadow prices". The sign convention of `marginals` is opposite that
274
+ of Lagrange multipliers produced by many nonlinear solvers.
275
+
276
+ References
277
+ ----------
278
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
279
+ "HiGHS - high performance software for linear optimization."
280
+ https://highs.dev/
281
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
282
+ simplex method." Mathematical Programming Computation, 10 (1),
283
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
284
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
285
+ Mathematical programming 5.1 (1973): 1-28.
286
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
287
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
288
+ """
289
+ pass
290
+
291
+
292
+ def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
293
+ bounds=None, method='highs-ds', callback=None,
294
+ maxiter=None, disp=False, presolve=True,
295
+ time_limit=None,
296
+ dual_feasibility_tolerance=None,
297
+ primal_feasibility_tolerance=None,
298
+ simplex_dual_edge_weight_strategy=None,
299
+ **unknown_options):
300
+ r"""
301
+ Linear programming: minimize a linear objective function subject to linear
302
+ equality and inequality constraints using the HiGHS dual simplex solver.
303
+
304
+ Linear programming solves problems of the following form:
305
+
306
+ .. math::
307
+
308
+ \min_x \ & c^T x \\
309
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
310
+ & A_{eq} x = b_{eq},\\
311
+ & l \leq x \leq u ,
312
+
313
+ where :math:`x` is a vector of decision variables; :math:`c`,
314
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
315
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
316
+
317
+ Alternatively, that's:
318
+
319
+ minimize::
320
+
321
+ c @ x
322
+
323
+ such that::
324
+
325
+ A_ub @ x <= b_ub
326
+ A_eq @ x == b_eq
327
+ lb <= x <= ub
328
+
329
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
330
+ ``bounds``.
331
+
332
+ Parameters
333
+ ----------
334
+ c : 1-D array
335
+ The coefficients of the linear objective function to be minimized.
336
+ A_ub : 2-D array, optional
337
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
338
+ coefficients of a linear inequality constraint on ``x``.
339
+ b_ub : 1-D array, optional
340
+ The inequality constraint vector. Each element represents an
341
+ upper bound on the corresponding value of ``A_ub @ x``.
342
+ A_eq : 2-D array, optional
343
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
344
+ coefficients of a linear equality constraint on ``x``.
345
+ b_eq : 1-D array, optional
346
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
347
+ the corresponding element of ``b_eq``.
348
+ bounds : sequence, optional
349
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
350
+ the minimum and maximum values of that decision variable. Use ``None``
351
+ to indicate that there is no bound. By default, bounds are
352
+ ``(0, None)`` (all decision variables are non-negative).
353
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
354
+ ``max`` will serve as bounds for all decision variables.
355
+ method : str
356
+
357
+ This is the method-specific documentation for 'highs-ds'.
358
+ :ref:`'highs' <optimize.linprog-highs>`,
359
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
360
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
361
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
362
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
363
+ are also available.
364
+
365
+ Options
366
+ -------
367
+ maxiter : int
368
+ The maximum number of iterations to perform in either phase.
369
+ Default is the largest possible value for an ``int`` on the platform.
370
+ disp : bool (default: ``False``)
371
+ Set to ``True`` if indicators of optimization status are to be
372
+ printed to the console during optimization.
373
+ presolve : bool (default: ``True``)
374
+ Presolve attempts to identify trivial infeasibilities,
375
+ identify trivial unboundedness, and simplify the problem before
376
+ sending it to the main solver. It is generally recommended
377
+ to keep the default setting ``True``; set to ``False`` if
378
+ presolve is to be disabled.
379
+ time_limit : float
380
+ The maximum time in seconds allotted to solve the problem;
381
+ default is the largest possible value for a ``double`` on the
382
+ platform.
383
+ dual_feasibility_tolerance : double (default: 1e-07)
384
+ Dual feasibility tolerance for
385
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
386
+ primal_feasibility_tolerance : double (default: 1e-07)
387
+ Primal feasibility tolerance for
388
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
389
+ simplex_dual_edge_weight_strategy : str (default: None)
390
+ Strategy for simplex dual edge weights. The default, ``None``,
391
+ automatically selects one of the following.
392
+
393
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
394
+ negative reduced cost.
395
+
396
+ ``'devex'`` uses the strategy described in [15]_.
397
+
398
+ ``steepest`` uses the exact steepest edge strategy as described in
399
+ [16]_.
400
+
401
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
402
+ until the computation is too costly or inexact and then switches to
403
+ the devex method.
404
+
405
+ Currently, ``None`` always selects ``'steepest-devex'``, but this
406
+ may change as new options become available.
407
+ unknown_options : dict
408
+ Optional arguments not used by this particular solver. If
409
+ ``unknown_options`` is non-empty, a warning is issued listing
410
+ all unused options.
411
+
412
+ Returns
413
+ -------
414
+ res : OptimizeResult
415
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
416
+
417
+ x : 1D array
418
+ The values of the decision variables that minimizes the
419
+ objective function while satisfying the constraints.
420
+ fun : float
421
+ The optimal value of the objective function ``c @ x``.
422
+ slack : 1D array
423
+ The (nominally positive) values of the slack,
424
+ ``b_ub - A_ub @ x``.
425
+ con : 1D array
426
+ The (nominally zero) residuals of the equality constraints,
427
+ ``b_eq - A_eq @ x``.
428
+ success : bool
429
+ ``True`` when the algorithm succeeds in finding an optimal
430
+ solution.
431
+ status : int
432
+ An integer representing the exit status of the algorithm.
433
+
434
+ ``0`` : Optimization terminated successfully.
435
+
436
+ ``1`` : Iteration or time limit reached.
437
+
438
+ ``2`` : Problem appears to be infeasible.
439
+
440
+ ``3`` : Problem appears to be unbounded.
441
+
442
+ ``4`` : The HiGHS solver ran into a problem.
443
+
444
+ message : str
445
+ A string descriptor of the exit status of the algorithm.
446
+ nit : int
447
+ The total number of iterations performed. This includes iterations
448
+ in all phases.
449
+ crossover_nit : int
450
+ This is always ``0`` for the HiGHS simplex method.
451
+ For the HiGHS interior-point method, this is the number of
452
+ primal/dual pushes performed during the crossover routine.
453
+ ineqlin : OptimizeResult
454
+ Solution and sensitivity information corresponding to the
455
+ inequality constraints, `b_ub`. A dictionary consisting of the
456
+ fields:
457
+
458
+ residual : np.ndnarray
459
+ The (nominally positive) values of the slack variables,
460
+ ``b_ub - A_ub @ x``. This quantity is also commonly
461
+ referred to as "slack".
462
+
463
+ marginals : np.ndarray
464
+ The sensitivity (partial derivative) of the objective
465
+ function with respect to the right-hand side of the
466
+ inequality constraints, `b_ub`.
467
+
468
+ eqlin : OptimizeResult
469
+ Solution and sensitivity information corresponding to the
470
+ equality constraints, `b_eq`. A dictionary consisting of the
471
+ fields:
472
+
473
+ residual : np.ndarray
474
+ The (nominally zero) residuals of the equality constraints,
475
+ ``b_eq - A_eq @ x``.
476
+
477
+ marginals : np.ndarray
478
+ The sensitivity (partial derivative) of the objective
479
+ function with respect to the right-hand side of the
480
+ equality constraints, `b_eq`.
481
+
482
+ lower, upper : OptimizeResult
483
+ Solution and sensitivity information corresponding to the
484
+ lower and upper bounds on decision variables, `bounds`.
485
+
486
+ residual : np.ndarray
487
+ The (nominally positive) values of the quantity
488
+ ``x - lb`` (lower) or ``ub - x`` (upper).
489
+
490
+ marginals : np.ndarray
491
+ The sensitivity (partial derivative) of the objective
492
+ function with respect to the lower and upper
493
+ `bounds`.
494
+
495
+ Notes
496
+ -----
497
+
498
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
499
+ of the C++ high performance dual revised simplex implementation (HSOL)
500
+ [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
501
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
502
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
503
+ as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
504
+ between the two automatically. For new code involving `linprog`, we
505
+ recommend explicitly choosing one of these three method values instead of
506
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
507
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
508
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
509
+
510
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
511
+ `marginals`, or partial derivatives of the objective function with respect
512
+ to the right-hand side of each constraint. These partial derivatives are
513
+ also referred to as "Lagrange multipliers", "dual values", and
514
+ "shadow prices". The sign convention of `marginals` is opposite that
515
+ of Lagrange multipliers produced by many nonlinear solvers.
516
+
517
+ References
518
+ ----------
519
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
520
+ "HiGHS - high performance software for linear optimization."
521
+ https://highs.dev/
522
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
523
+ simplex method." Mathematical Programming Computation, 10 (1),
524
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
525
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
526
+ Mathematical programming 5.1 (1973): 1-28.
527
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
528
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
529
+ """
530
+ pass
531
+
532
+
533
+ def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
534
+ bounds=None, method='highs-ipm', callback=None,
535
+ maxiter=None, disp=False, presolve=True,
536
+ time_limit=None,
537
+ dual_feasibility_tolerance=None,
538
+ primal_feasibility_tolerance=None,
539
+ ipm_optimality_tolerance=None,
540
+ **unknown_options):
541
+ r"""
542
+ Linear programming: minimize a linear objective function subject to linear
543
+ equality and inequality constraints using the HiGHS interior point solver.
544
+
545
+ Linear programming solves problems of the following form:
546
+
547
+ .. math::
548
+
549
+ \min_x \ & c^T x \\
550
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
551
+ & A_{eq} x = b_{eq},\\
552
+ & l \leq x \leq u ,
553
+
554
+ where :math:`x` is a vector of decision variables; :math:`c`,
555
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
556
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
557
+
558
+ Alternatively, that's:
559
+
560
+ minimize::
561
+
562
+ c @ x
563
+
564
+ such that::
565
+
566
+ A_ub @ x <= b_ub
567
+ A_eq @ x == b_eq
568
+ lb <= x <= ub
569
+
570
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
571
+ ``bounds``.
572
+
573
+ Parameters
574
+ ----------
575
+ c : 1-D array
576
+ The coefficients of the linear objective function to be minimized.
577
+ A_ub : 2-D array, optional
578
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
579
+ coefficients of a linear inequality constraint on ``x``.
580
+ b_ub : 1-D array, optional
581
+ The inequality constraint vector. Each element represents an
582
+ upper bound on the corresponding value of ``A_ub @ x``.
583
+ A_eq : 2-D array, optional
584
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
585
+ coefficients of a linear equality constraint on ``x``.
586
+ b_eq : 1-D array, optional
587
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
588
+ the corresponding element of ``b_eq``.
589
+ bounds : sequence, optional
590
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
591
+ the minimum and maximum values of that decision variable. Use ``None``
592
+ to indicate that there is no bound. By default, bounds are
593
+ ``(0, None)`` (all decision variables are non-negative).
594
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
595
+ ``max`` will serve as bounds for all decision variables.
596
+ method : str
597
+
598
+ This is the method-specific documentation for 'highs-ipm'.
599
+ :ref:`'highs-ipm' <optimize.linprog-highs>`,
600
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
601
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
602
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
603
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
604
+ are also available.
605
+
606
+ Options
607
+ -------
608
+ maxiter : int
609
+ The maximum number of iterations to perform in either phase.
610
+ For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
611
+ include the number of crossover iterations. Default is the largest
612
+ possible value for an ``int`` on the platform.
613
+ disp : bool (default: ``False``)
614
+ Set to ``True`` if indicators of optimization status are to be
615
+ printed to the console during optimization.
616
+ presolve : bool (default: ``True``)
617
+ Presolve attempts to identify trivial infeasibilities,
618
+ identify trivial unboundedness, and simplify the problem before
619
+ sending it to the main solver. It is generally recommended
620
+ to keep the default setting ``True``; set to ``False`` if
621
+ presolve is to be disabled.
622
+ time_limit : float
623
+ The maximum time in seconds allotted to solve the problem;
624
+ default is the largest possible value for a ``double`` on the
625
+ platform.
626
+ dual_feasibility_tolerance : double (default: 1e-07)
627
+ The minimum of this and ``primal_feasibility_tolerance``
628
+ is used for the feasibility tolerance of
629
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
630
+ primal_feasibility_tolerance : double (default: 1e-07)
631
+ The minimum of this and ``dual_feasibility_tolerance``
632
+ is used for the feasibility tolerance of
633
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
634
+ ipm_optimality_tolerance : double (default: ``1e-08``)
635
+ Optimality tolerance for
636
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
637
+ Minimum allowable value is 1e-12.
638
+ unknown_options : dict
639
+ Optional arguments not used by this particular solver. If
640
+ ``unknown_options`` is non-empty, a warning is issued listing
641
+ all unused options.
642
+
643
+ Returns
644
+ -------
645
+ res : OptimizeResult
646
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
647
+
648
+ x : 1D array
649
+ The values of the decision variables that minimizes the
650
+ objective function while satisfying the constraints.
651
+ fun : float
652
+ The optimal value of the objective function ``c @ x``.
653
+ slack : 1D array
654
+ The (nominally positive) values of the slack,
655
+ ``b_ub - A_ub @ x``.
656
+ con : 1D array
657
+ The (nominally zero) residuals of the equality constraints,
658
+ ``b_eq - A_eq @ x``.
659
+ success : bool
660
+ ``True`` when the algorithm succeeds in finding an optimal
661
+ solution.
662
+ status : int
663
+ An integer representing the exit status of the algorithm.
664
+
665
+ ``0`` : Optimization terminated successfully.
666
+
667
+ ``1`` : Iteration or time limit reached.
668
+
669
+ ``2`` : Problem appears to be infeasible.
670
+
671
+ ``3`` : Problem appears to be unbounded.
672
+
673
+ ``4`` : The HiGHS solver ran into a problem.
674
+
675
+ message : str
676
+ A string descriptor of the exit status of the algorithm.
677
+ nit : int
678
+ The total number of iterations performed.
679
+ For the HiGHS interior-point method, this does not include
680
+ crossover iterations.
681
+ crossover_nit : int
682
+ The number of primal/dual pushes performed during the
683
+ crossover routine for the HiGHS interior-point method.
684
+ ineqlin : OptimizeResult
685
+ Solution and sensitivity information corresponding to the
686
+ inequality constraints, `b_ub`. A dictionary consisting of the
687
+ fields:
688
+
689
+ residual : np.ndnarray
690
+ The (nominally positive) values of the slack variables,
691
+ ``b_ub - A_ub @ x``. This quantity is also commonly
692
+ referred to as "slack".
693
+
694
+ marginals : np.ndarray
695
+ The sensitivity (partial derivative) of the objective
696
+ function with respect to the right-hand side of the
697
+ inequality constraints, `b_ub`.
698
+
699
+ eqlin : OptimizeResult
700
+ Solution and sensitivity information corresponding to the
701
+ equality constraints, `b_eq`. A dictionary consisting of the
702
+ fields:
703
+
704
+ residual : np.ndarray
705
+ The (nominally zero) residuals of the equality constraints,
706
+ ``b_eq - A_eq @ x``.
707
+
708
+ marginals : np.ndarray
709
+ The sensitivity (partial derivative) of the objective
710
+ function with respect to the right-hand side of the
711
+ equality constraints, `b_eq`.
712
+
713
+ lower, upper : OptimizeResult
714
+ Solution and sensitivity information corresponding to the
715
+ lower and upper bounds on decision variables, `bounds`.
716
+
717
+ residual : np.ndarray
718
+ The (nominally positive) values of the quantity
719
+ ``x - lb`` (lower) or ``ub - x`` (upper).
720
+
721
+ marginals : np.ndarray
722
+ The sensitivity (partial derivative) of the objective
723
+ function with respect to the lower and upper
724
+ `bounds`.
725
+
726
+ Notes
727
+ -----
728
+
729
+ Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
730
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
731
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
732
+ as a simplex solver.
733
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
734
+ of the C++ high performance dual revised simplex implementation (HSOL)
735
+ [13]_, [14]_. Method :ref:`'highs' <optimize.linprog-highs>` chooses
736
+ between the two automatically. For new code involving `linprog`, we
737
+ recommend explicitly choosing one of these three method values instead of
738
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
739
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
740
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
741
+
742
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
743
+ `marginals`, or partial derivatives of the objective function with respect
744
+ to the right-hand side of each constraint. These partial derivatives are
745
+ also referred to as "Lagrange multipliers", "dual values", and
746
+ "shadow prices". The sign convention of `marginals` is opposite that
747
+ of Lagrange multipliers produced by many nonlinear solvers.
748
+
749
+ References
750
+ ----------
751
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
752
+ "HiGHS - high performance software for linear optimization."
753
+ https://highs.dev/
754
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
755
+ simplex method." Mathematical Programming Computation, 10 (1),
756
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
757
+ """
758
+ pass
759
+
760
+
761
+ def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
762
+ bounds=None, method='interior-point', callback=None,
763
+ maxiter=1000, disp=False, presolve=True,
764
+ tol=1e-8, autoscale=False, rr=True,
765
+ alpha0=.99995, beta=0.1, sparse=False,
766
+ lstsq=False, sym_pos=True, cholesky=True, pc=True,
767
+ ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options):
768
+ r"""
769
+ Linear programming: minimize a linear objective function subject to linear
770
+ equality and inequality constraints using the interior-point method of
771
+ [4]_.
772
+
773
+ .. deprecated:: 1.9.0
774
+ `method='interior-point'` will be removed in SciPy 1.11.0.
775
+ It is replaced by `method='highs'` because the latter is
776
+ faster and more robust.
777
+
778
+ Linear programming solves problems of the following form:
779
+
780
+ .. math::
781
+
782
+ \min_x \ & c^T x \\
783
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
784
+ & A_{eq} x = b_{eq},\\
785
+ & l \leq x \leq u ,
786
+
787
+ where :math:`x` is a vector of decision variables; :math:`c`,
788
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
789
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
790
+
791
+ Alternatively, that's:
792
+
793
+ minimize::
794
+
795
+ c @ x
796
+
797
+ such that::
798
+
799
+ A_ub @ x <= b_ub
800
+ A_eq @ x == b_eq
801
+ lb <= x <= ub
802
+
803
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
804
+ ``bounds``.
805
+
806
+ Parameters
807
+ ----------
808
+ c : 1-D array
809
+ The coefficients of the linear objective function to be minimized.
810
+ A_ub : 2-D array, optional
811
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
812
+ coefficients of a linear inequality constraint on ``x``.
813
+ b_ub : 1-D array, optional
814
+ The inequality constraint vector. Each element represents an
815
+ upper bound on the corresponding value of ``A_ub @ x``.
816
+ A_eq : 2-D array, optional
817
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
818
+ coefficients of a linear equality constraint on ``x``.
819
+ b_eq : 1-D array, optional
820
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
821
+ the corresponding element of ``b_eq``.
822
+ bounds : sequence, optional
823
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
824
+ the minimum and maximum values of that decision variable. Use ``None``
825
+ to indicate that there is no bound. By default, bounds are
826
+ ``(0, None)`` (all decision variables are non-negative).
827
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
828
+ ``max`` will serve as bounds for all decision variables.
829
+ method : str
830
+ This is the method-specific documentation for 'interior-point'.
831
+ :ref:`'highs' <optimize.linprog-highs>`,
832
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
833
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
834
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
835
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
836
+ are also available.
837
+ callback : callable, optional
838
+ Callback function to be executed once per iteration.
839
+
840
+ Options
841
+ -------
842
+ maxiter : int (default: 1000)
843
+ The maximum number of iterations of the algorithm.
844
+ disp : bool (default: False)
845
+ Set to ``True`` if indicators of optimization status are to be printed
846
+ to the console each iteration.
847
+ presolve : bool (default: True)
848
+ Presolve attempts to identify trivial infeasibilities,
849
+ identify trivial unboundedness, and simplify the problem before
850
+ sending it to the main solver. It is generally recommended
851
+ to keep the default setting ``True``; set to ``False`` if
852
+ presolve is to be disabled.
853
+ tol : float (default: 1e-8)
854
+ Termination tolerance to be used for all termination criteria;
855
+ see [4]_ Section 4.5.
856
+ autoscale : bool (default: False)
857
+ Set to ``True`` to automatically perform equilibration.
858
+ Consider using this option if the numerical values in the
859
+ constraints are separated by several orders of magnitude.
860
+ rr : bool (default: True)
861
+ Set to ``False`` to disable automatic redundancy removal.
862
+ alpha0 : float (default: 0.99995)
863
+ The maximal step size for Mehrota's predictor-corrector search
864
+ direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
865
+ beta : float (default: 0.1)
866
+ The desired reduction of the path parameter :math:`\mu` (see [6]_)
867
+ when Mehrota's predictor-corrector is not in use (uncommon).
868
+ sparse : bool (default: False)
869
+ Set to ``True`` if the problem is to be treated as sparse after
870
+ presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
871
+ this option will automatically be set ``True``, and the problem
872
+ will be treated as sparse even during presolve. If your constraint
873
+ matrices contain mostly zeros and the problem is not very small (less
874
+ than about 100 constraints or variables), consider setting ``True``
875
+ or providing ``A_eq`` and ``A_ub`` as sparse matrices.
876
+ lstsq : bool (default: ``False``)
877
+ Set to ``True`` if the problem is expected to be very poorly
878
+ conditioned. This should always be left ``False`` unless severe
879
+ numerical difficulties are encountered. Leave this at the default
880
+ unless you receive a warning message suggesting otherwise.
881
+ sym_pos : bool (default: True)
882
+ Leave ``True`` if the problem is expected to yield a well conditioned
883
+ symmetric positive definite normal equation matrix
884
+ (almost always). Leave this at the default unless you receive
885
+ a warning message suggesting otherwise.
886
+ cholesky : bool (default: True)
887
+ Set to ``True`` if the normal equations are to be solved by explicit
888
+ Cholesky decomposition followed by explicit forward/backward
889
+ substitution. This is typically faster for problems
890
+ that are numerically well-behaved.
891
+ pc : bool (default: True)
892
+ Leave ``True`` if the predictor-corrector method of Mehrota is to be
893
+ used. This is almost always (if not always) beneficial.
894
+ ip : bool (default: False)
895
+ Set to ``True`` if the improved initial point suggestion due to [4]_
896
+ Section 4.3 is desired. Whether this is beneficial or not
897
+ depends on the problem.
898
+ permc_spec : str (default: 'MMD_AT_PLUS_A')
899
+ (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
900
+ True``, and no SuiteSparse.)
901
+ A matrix is factorized in each iteration of the algorithm.
902
+ This option specifies how to permute the columns of the matrix for
903
+ sparsity preservation. Acceptable values are:
904
+
905
+ - ``NATURAL``: natural ordering.
906
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
907
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
908
+ - ``COLAMD``: approximate minimum degree column ordering.
909
+
910
+ This option can impact the convergence of the
911
+ interior point algorithm; test different values to determine which
912
+ performs best for your problem. For more information, refer to
913
+ ``scipy.sparse.linalg.splu``.
914
+ unknown_options : dict
915
+ Optional arguments not used by this particular solver. If
916
+ `unknown_options` is non-empty a warning is issued listing all
917
+ unused options.
918
+
919
+ Returns
920
+ -------
921
+ res : OptimizeResult
922
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
923
+
924
+ x : 1-D array
925
+ The values of the decision variables that minimizes the
926
+ objective function while satisfying the constraints.
927
+ fun : float
928
+ The optimal value of the objective function ``c @ x``.
929
+ slack : 1-D array
930
+ The (nominally positive) values of the slack variables,
931
+ ``b_ub - A_ub @ x``.
932
+ con : 1-D array
933
+ The (nominally zero) residuals of the equality constraints,
934
+ ``b_eq - A_eq @ x``.
935
+ success : bool
936
+ ``True`` when the algorithm succeeds in finding an optimal
937
+ solution.
938
+ status : int
939
+ An integer representing the exit status of the algorithm.
940
+
941
+ ``0`` : Optimization terminated successfully.
942
+
943
+ ``1`` : Iteration limit reached.
944
+
945
+ ``2`` : Problem appears to be infeasible.
946
+
947
+ ``3`` : Problem appears to be unbounded.
948
+
949
+ ``4`` : Numerical difficulties encountered.
950
+
951
+ message : str
952
+ A string descriptor of the exit status of the algorithm.
953
+ nit : int
954
+ The total number of iterations performed in all phases.
955
+
956
+
957
+ Notes
958
+ -----
959
+ This method implements the algorithm outlined in [4]_ with ideas from [8]_
960
+ and a structure inspired by the simpler methods of [6]_.
961
+
962
+ The primal-dual path following method begins with initial 'guesses' of
963
+ the primal and dual variables of the standard form problem and iteratively
964
+ attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
965
+ problem with a gradually reduced logarithmic barrier term added to the
966
+ objective. This particular implementation uses a homogeneous self-dual
967
+ formulation, which provides certificates of infeasibility or unboundedness
968
+ where applicable.
969
+
970
+ The default initial point for the primal and dual variables is that
971
+ defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
972
+ point option ``ip=True``), an alternate (potentially improved) starting
973
+ point can be calculated according to the additional recommendations of
974
+ [4]_ Section 4.4.
975
+
976
+ A search direction is calculated using the predictor-corrector method
977
+ (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
978
+ (A potential improvement would be to implement the method of multiple
979
+ corrections described in [4]_ Section 4.2.) In practice, this is
980
+ accomplished by solving the normal equations, [4]_ Section 5.1 Equations
981
+ 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
982
+ 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
983
+ solving the normal equations rather than 8.25 directly is that the
984
+ matrices involved are symmetric positive definite, so Cholesky
985
+ decomposition can be used rather than the more expensive LU factorization.
986
+
987
+ With default options, the solver used to perform the factorization depends
988
+ on third-party software availability and the conditioning of the problem.
989
+
990
+ For dense problems, solvers are tried in the following order:
991
+
992
+ 1. ``scipy.linalg.cho_factor``
993
+
994
+ 2. ``scipy.linalg.solve`` with option ``sym_pos=True``
995
+
996
+ 3. ``scipy.linalg.solve`` with option ``sym_pos=False``
997
+
998
+ 4. ``scipy.linalg.lstsq``
999
+
1000
+ For sparse problems:
1001
+
1002
+ 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are
1003
+ installed)
1004
+
1005
+ 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse
1006
+ are installed)
1007
+
1008
+ 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
1009
+
1010
+ 4. ``scipy.sparse.linalg.lsqr``
1011
+
1012
+ If the solver fails for any reason, successively more robust (but slower)
1013
+ solvers are attempted in the order indicated. Attempting, failing, and
1014
+ re-starting factorization can be time consuming, so if the problem is
1015
+ numerically challenging, options can be set to bypass solvers that are
1016
+ failing. Setting ``cholesky=False`` skips to solver 2,
1017
+ ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
1018
+ to solver 4 for both sparse and dense problems.
1019
+
1020
+ Potential improvements for combatting issues associated with dense
1021
+ columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
1022
+ [10]_ Section 4.1-4.2; the latter also discusses the alleviation of
1023
+ accuracy issues associated with the substitution approach to free
1024
+ variables.
1025
+
1026
+ After calculating the search direction, the maximum possible step size
1027
+ that does not activate the non-negativity constraints is calculated, and
1028
+ the smaller of this step size and unity is applied (as in [4]_ Section
1029
+ 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
1030
+
1031
+ The new point is tested according to the termination conditions of [4]_
1032
+ Section 4.5. The same tolerance, which can be set using the ``tol`` option,
1033
+ is used for all checks. (A potential improvement would be to expose
1034
+ the different tolerances to be set independently.) If optimality,
1035
+ unboundedness, or infeasibility is detected, the solve procedure
1036
+ terminates; otherwise it repeats.
1037
+
1038
+ Whereas the top level ``linprog`` module expects a problem of form:
1039
+
1040
+ Minimize::
1041
+
1042
+ c @ x
1043
+
1044
+ Subject to::
1045
+
1046
+ A_ub @ x <= b_ub
1047
+ A_eq @ x == b_eq
1048
+ lb <= x <= ub
1049
+
1050
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem
1051
+ is automatically converted to the form:
1052
+
1053
+ Minimize::
1054
+
1055
+ c @ x
1056
+
1057
+ Subject to::
1058
+
1059
+ A @ x == b
1060
+ x >= 0
1061
+
1062
+ for solution. That is, the original problem contains equality, upper-bound
1063
+ and variable constraints whereas the method specific solver requires
1064
+ equality constraints and variable non-negativity. ``linprog`` converts the
1065
+ original problem to standard form by converting the simple bounds to upper
1066
+ bound constraints, introducing non-negative slack variables for inequality
1067
+ constraints, and expressing unbounded variables as the difference between
1068
+ two non-negative variables. The problem is converted back to the original
1069
+ form before results are reported.
1070
+
1071
+ References
1072
+ ----------
1073
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
1074
+ optimizer for linear programming: an implementation of the
1075
+ homogeneous algorithm." High performance optimization. Springer US,
1076
+ 2000. 197-232.
1077
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
1078
+ Programming based on Newton's Method." Unpublished Course Notes,
1079
+ March 2004. Available 2/25/2017 at
1080
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
1081
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
1082
+ programming." Mathematical Programming 71.2 (1995): 221-245.
1083
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1084
+ programming." Athena Scientific 1 (1997): 997.
1085
+ .. [10] Andersen, Erling D., et al. Implementation of interior point
1086
+ methods for large scale linear programming. HEC/Universite de
1087
+ Geneve, 1996.
1088
+ """
1089
+ pass
1090
+
1091
+
1092
+ def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
1093
+ bounds=None, method='interior-point', callback=None,
1094
+ x0=None, maxiter=5000, disp=False, presolve=True,
1095
+ tol=1e-12, autoscale=False, rr=True, maxupdate=10,
1096
+ mast=False, pivot="mrc", **unknown_options):
1097
+ r"""
1098
+ Linear programming: minimize a linear objective function subject to linear
1099
+ equality and inequality constraints using the revised simplex method.
1100
+
1101
+ .. deprecated:: 1.9.0
1102
+ `method='revised simplex'` will be removed in SciPy 1.11.0.
1103
+ It is replaced by `method='highs'` because the latter is
1104
+ faster and more robust.
1105
+
1106
+ Linear programming solves problems of the following form:
1107
+
1108
+ .. math::
1109
+
1110
+ \min_x \ & c^T x \\
1111
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
1112
+ & A_{eq} x = b_{eq},\\
1113
+ & l \leq x \leq u ,
1114
+
1115
+ where :math:`x` is a vector of decision variables; :math:`c`,
1116
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
1117
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
1118
+
1119
+ Alternatively, that's:
1120
+
1121
+ minimize::
1122
+
1123
+ c @ x
1124
+
1125
+ such that::
1126
+
1127
+ A_ub @ x <= b_ub
1128
+ A_eq @ x == b_eq
1129
+ lb <= x <= ub
1130
+
1131
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
1132
+ ``bounds``.
1133
+
1134
+ Parameters
1135
+ ----------
1136
+ c : 1-D array
1137
+ The coefficients of the linear objective function to be minimized.
1138
+ A_ub : 2-D array, optional
1139
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1140
+ coefficients of a linear inequality constraint on ``x``.
1141
+ b_ub : 1-D array, optional
1142
+ The inequality constraint vector. Each element represents an
1143
+ upper bound on the corresponding value of ``A_ub @ x``.
1144
+ A_eq : 2-D array, optional
1145
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1146
+ coefficients of a linear equality constraint on ``x``.
1147
+ b_eq : 1-D array, optional
1148
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1149
+ the corresponding element of ``b_eq``.
1150
+ bounds : sequence, optional
1151
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
1152
+ the minimum and maximum values of that decision variable. Use ``None``
1153
+ to indicate that there is no bound. By default, bounds are
1154
+ ``(0, None)`` (all decision variables are non-negative).
1155
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
1156
+ ``max`` will serve as bounds for all decision variables.
1157
+ method : str
1158
+ This is the method-specific documentation for 'revised simplex'.
1159
+ :ref:`'highs' <optimize.linprog-highs>`,
1160
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
1161
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
1162
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
1163
+ and :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
1164
+ are also available.
1165
+ callback : callable, optional
1166
+ Callback function to be executed once per iteration.
1167
+ x0 : 1-D array, optional
1168
+ Guess values of the decision variables, which will be refined by
1169
+ the optimization algorithm. This argument is currently used only by the
1170
+ 'revised simplex' method, and can only be used if `x0` represents a
1171
+ basic feasible solution.
1172
+
1173
+ Options
1174
+ -------
1175
+ maxiter : int (default: 5000)
1176
+ The maximum number of iterations to perform in either phase.
1177
+ disp : bool (default: False)
1178
+ Set to ``True`` if indicators of optimization status are to be printed
1179
+ to the console each iteration.
1180
+ presolve : bool (default: True)
1181
+ Presolve attempts to identify trivial infeasibilities,
1182
+ identify trivial unboundedness, and simplify the problem before
1183
+ sending it to the main solver. It is generally recommended
1184
+ to keep the default setting ``True``; set to ``False`` if
1185
+ presolve is to be disabled.
1186
+ tol : float (default: 1e-12)
1187
+ The tolerance which determines when a solution is "close enough" to
1188
+ zero in Phase 1 to be considered a basic feasible solution or close
1189
+ enough to positive to serve as an optimal solution.
1190
+ autoscale : bool (default: False)
1191
+ Set to ``True`` to automatically perform equilibration.
1192
+ Consider using this option if the numerical values in the
1193
+ constraints are separated by several orders of magnitude.
1194
+ rr : bool (default: True)
1195
+ Set to ``False`` to disable automatic redundancy removal.
1196
+ maxupdate : int (default: 10)
1197
+ The maximum number of updates performed on the LU factorization.
1198
+ After this many updates is reached, the basis matrix is factorized
1199
+ from scratch.
1200
+ mast : bool (default: False)
1201
+ Minimize Amortized Solve Time. If enabled, the average time to solve
1202
+ a linear system using the basis factorization is measured. Typically,
1203
+ the average solve time will decrease with each successive solve after
1204
+ initial factorization, as factorization takes much more time than the
1205
+ solve operation (and updates). Eventually, however, the updated
1206
+ factorization becomes sufficiently complex that the average solve time
1207
+ begins to increase. When this is detected, the basis is refactorized
1208
+ from scratch. Enable this option to maximize speed at the risk of
1209
+ nondeterministic behavior. Ignored if ``maxupdate`` is 0.
1210
+ pivot : "mrc" or "bland" (default: "mrc")
1211
+ Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland").
1212
+ Choose Bland's rule if iteration limit is reached and cycling is
1213
+ suspected.
1214
+ unknown_options : dict
1215
+ Optional arguments not used by this particular solver. If
1216
+ `unknown_options` is non-empty a warning is issued listing all
1217
+ unused options.
1218
+
1219
+ Returns
1220
+ -------
1221
+ res : OptimizeResult
1222
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
1223
+
1224
+ x : 1-D array
1225
+ The values of the decision variables that minimizes the
1226
+ objective function while satisfying the constraints.
1227
+ fun : float
1228
+ The optimal value of the objective function ``c @ x``.
1229
+ slack : 1-D array
1230
+ The (nominally positive) values of the slack variables,
1231
+ ``b_ub - A_ub @ x``.
1232
+ con : 1-D array
1233
+ The (nominally zero) residuals of the equality constraints,
1234
+ ``b_eq - A_eq @ x``.
1235
+ success : bool
1236
+ ``True`` when the algorithm succeeds in finding an optimal
1237
+ solution.
1238
+ status : int
1239
+ An integer representing the exit status of the algorithm.
1240
+
1241
+ ``0`` : Optimization terminated successfully.
1242
+
1243
+ ``1`` : Iteration limit reached.
1244
+
1245
+ ``2`` : Problem appears to be infeasible.
1246
+
1247
+ ``3`` : Problem appears to be unbounded.
1248
+
1249
+ ``4`` : Numerical difficulties encountered.
1250
+
1251
+ ``5`` : Problem has no constraints; turn presolve on.
1252
+
1253
+ ``6`` : Invalid guess provided.
1254
+
1255
+ message : str
1256
+ A string descriptor of the exit status of the algorithm.
1257
+ nit : int
1258
+ The total number of iterations performed in all phases.
1259
+
1260
+
1261
+ Notes
1262
+ -----
1263
+ Method *revised simplex* uses the revised simplex method as described in
1264
+ [9]_, except that a factorization [11]_ of the basis matrix, rather than
1265
+ its inverse, is efficiently maintained and used to solve the linear systems
1266
+ at each iteration of the algorithm.
1267
+
1268
+ References
1269
+ ----------
1270
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1271
+ programming." Athena Scientific 1 (1997): 997.
1272
+ .. [11] Bartels, Richard H. "A stabilization of the simplex method."
1273
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
1274
+ """
1275
+ pass
1276
+
1277
+
1278
+ def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
1279
+ bounds=None, method='interior-point', callback=None,
1280
+ maxiter=5000, disp=False, presolve=True,
1281
+ tol=1e-12, autoscale=False, rr=True, bland=False,
1282
+ **unknown_options):
1283
+ r"""
1284
+ Linear programming: minimize a linear objective function subject to linear
1285
+ equality and inequality constraints using the tableau-based simplex method.
1286
+
1287
+ .. deprecated:: 1.9.0
1288
+ `method='simplex'` will be removed in SciPy 1.11.0.
1289
+ It is replaced by `method='highs'` because the latter is
1290
+ faster and more robust.
1291
+
1292
+ Linear programming solves problems of the following form:
1293
+
1294
+ .. math::
1295
+
1296
+ \min_x \ & c^T x \\
1297
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
1298
+ & A_{eq} x = b_{eq},\\
1299
+ & l \leq x \leq u ,
1300
+
1301
+ where :math:`x` is a vector of decision variables; :math:`c`,
1302
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
1303
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
1304
+
1305
+ Alternatively, that's:
1306
+
1307
+ minimize::
1308
+
1309
+ c @ x
1310
+
1311
+ such that::
1312
+
1313
+ A_ub @ x <= b_ub
1314
+ A_eq @ x == b_eq
1315
+ lb <= x <= ub
1316
+
1317
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
1318
+ ``bounds``.
1319
+
1320
+ Parameters
1321
+ ----------
1322
+ c : 1-D array
1323
+ The coefficients of the linear objective function to be minimized.
1324
+ A_ub : 2-D array, optional
1325
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1326
+ coefficients of a linear inequality constraint on ``x``.
1327
+ b_ub : 1-D array, optional
1328
+ The inequality constraint vector. Each element represents an
1329
+ upper bound on the corresponding value of ``A_ub @ x``.
1330
+ A_eq : 2-D array, optional
1331
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1332
+ coefficients of a linear equality constraint on ``x``.
1333
+ b_eq : 1-D array, optional
1334
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1335
+ the corresponding element of ``b_eq``.
1336
+ bounds : sequence, optional
1337
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
1338
+ the minimum and maximum values of that decision variable. Use ``None``
1339
+ to indicate that there is no bound. By default, bounds are
1340
+ ``(0, None)`` (all decision variables are non-negative).
1341
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
1342
+ ``max`` will serve as bounds for all decision variables.
1343
+ method : str
1344
+ This is the method-specific documentation for 'simplex'.
1345
+ :ref:`'highs' <optimize.linprog-highs>`,
1346
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
1347
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
1348
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
1349
+ and :ref:`'revised simplex' <optimize.linprog-revised_simplex>`
1350
+ are also available.
1351
+ callback : callable, optional
1352
+ Callback function to be executed once per iteration.
1353
+
1354
+ Options
1355
+ -------
1356
+ maxiter : int (default: 5000)
1357
+ The maximum number of iterations to perform in either phase.
1358
+ disp : bool (default: False)
1359
+ Set to ``True`` if indicators of optimization status are to be printed
1360
+ to the console each iteration.
1361
+ presolve : bool (default: True)
1362
+ Presolve attempts to identify trivial infeasibilities,
1363
+ identify trivial unboundedness, and simplify the problem before
1364
+ sending it to the main solver. It is generally recommended
1365
+ to keep the default setting ``True``; set to ``False`` if
1366
+ presolve is to be disabled.
1367
+ tol : float (default: 1e-12)
1368
+ The tolerance which determines when a solution is "close enough" to
1369
+ zero in Phase 1 to be considered a basic feasible solution or close
1370
+ enough to positive to serve as an optimal solution.
1371
+ autoscale : bool (default: False)
1372
+ Set to ``True`` to automatically perform equilibration.
1373
+ Consider using this option if the numerical values in the
1374
+ constraints are separated by several orders of magnitude.
1375
+ rr : bool (default: True)
1376
+ Set to ``False`` to disable automatic redundancy removal.
1377
+ bland : bool
1378
+ If True, use Bland's anti-cycling rule [3]_ to choose pivots to
1379
+ prevent cycling. If False, choose pivots which should lead to a
1380
+ converged solution more quickly. The latter method is subject to
1381
+ cycling (non-convergence) in rare instances.
1382
+ unknown_options : dict
1383
+ Optional arguments not used by this particular solver. If
1384
+ `unknown_options` is non-empty a warning is issued listing all
1385
+ unused options.
1386
+
1387
+ Returns
1388
+ -------
1389
+ res : OptimizeResult
1390
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
1391
+
1392
+ x : 1-D array
1393
+ The values of the decision variables that minimizes the
1394
+ objective function while satisfying the constraints.
1395
+ fun : float
1396
+ The optimal value of the objective function ``c @ x``.
1397
+ slack : 1-D array
1398
+ The (nominally positive) values of the slack variables,
1399
+ ``b_ub - A_ub @ x``.
1400
+ con : 1-D array
1401
+ The (nominally zero) residuals of the equality constraints,
1402
+ ``b_eq - A_eq @ x``.
1403
+ success : bool
1404
+ ``True`` when the algorithm succeeds in finding an optimal
1405
+ solution.
1406
+ status : int
1407
+ An integer representing the exit status of the algorithm.
1408
+
1409
+ ``0`` : Optimization terminated successfully.
1410
+
1411
+ ``1`` : Iteration limit reached.
1412
+
1413
+ ``2`` : Problem appears to be infeasible.
1414
+
1415
+ ``3`` : Problem appears to be unbounded.
1416
+
1417
+ ``4`` : Numerical difficulties encountered.
1418
+
1419
+ message : str
1420
+ A string descriptor of the exit status of the algorithm.
1421
+ nit : int
1422
+ The total number of iterations performed in all phases.
1423
+
1424
+ References
1425
+ ----------
1426
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
1427
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1428
+ 1963
1429
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
1430
+ Mathematical Programming", McGraw-Hill, Chapter 4.
1431
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
1432
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
1433
+ """
1434
+ pass
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HiGHS Linear Optimization Methods
2
+
3
+ Interface to HiGHS linear optimization software.
4
+ https://highs.dev/
5
+
6
+ .. versionadded:: 1.5.0
7
+
8
+ References
9
+ ----------
10
+ .. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
11
+ method." Mathematical Programming Computation, 10 (1), 119-142,
12
+ 2018. DOI: 10.1007/s12532-017-0130-5
13
+
14
+ """
15
+
16
+ import inspect
17
+ import numpy as np
18
+ from ._optimize import OptimizeWarning, OptimizeResult
19
+ from warnings import warn
20
+ from ._highs._highs_wrapper import _highs_wrapper
21
+ from ._highs._highs_constants import (
22
+ CONST_INF,
23
+ MESSAGE_LEVEL_NONE,
24
+ HIGHS_OBJECTIVE_SENSE_MINIMIZE,
25
+
26
+ MODEL_STATUS_NOTSET,
27
+ MODEL_STATUS_LOAD_ERROR,
28
+ MODEL_STATUS_MODEL_ERROR,
29
+ MODEL_STATUS_PRESOLVE_ERROR,
30
+ MODEL_STATUS_SOLVE_ERROR,
31
+ MODEL_STATUS_POSTSOLVE_ERROR,
32
+ MODEL_STATUS_MODEL_EMPTY,
33
+ MODEL_STATUS_OPTIMAL,
34
+ MODEL_STATUS_INFEASIBLE,
35
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
36
+ MODEL_STATUS_UNBOUNDED,
37
+ MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
38
+ as MODEL_STATUS_RDOVUB,
39
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
40
+ MODEL_STATUS_REACHED_TIME_LIMIT,
41
+ MODEL_STATUS_REACHED_ITERATION_LIMIT,
42
+
43
+ HIGHS_SIMPLEX_STRATEGY_DUAL,
44
+
45
+ HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
46
+
47
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
48
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
49
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
50
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
51
+ )
52
+ from scipy.sparse import csc_matrix, vstack, issparse
53
+
54
+
55
+ def _highs_to_scipy_status_message(highs_status, highs_message):
56
+ """Converts HiGHS status number/message to SciPy status number/message"""
57
+
58
+ scipy_statuses_messages = {
59
+ None: (4, "HiGHS did not provide a status code. "),
60
+ MODEL_STATUS_NOTSET: (4, ""),
61
+ MODEL_STATUS_LOAD_ERROR: (4, ""),
62
+ MODEL_STATUS_MODEL_ERROR: (2, ""),
63
+ MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
64
+ MODEL_STATUS_SOLVE_ERROR: (4, ""),
65
+ MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
66
+ MODEL_STATUS_MODEL_EMPTY: (4, ""),
67
+ MODEL_STATUS_RDOVUB: (4, ""),
68
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
69
+ MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
70
+ MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
71
+ MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
72
+ MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
73
+ MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
74
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
75
+ "or infeasible. ")}
76
+ unrecognized = (4, "The HiGHS status code was not recognized. ")
77
+ scipy_status, scipy_message = (
78
+ scipy_statuses_messages.get(highs_status, unrecognized))
79
+ scipy_message = (f"{scipy_message}"
80
+ f"(HiGHS Status {highs_status}: {highs_message})")
81
+ return scipy_status, scipy_message
82
+
83
+
84
+ def _replace_inf(x):
85
+ # Replace `np.inf` with CONST_INF
86
+ infs = np.isinf(x)
87
+ with np.errstate(invalid="ignore"):
88
+ x[infs] = np.sign(x[infs])*CONST_INF
89
+ return x
90
+
91
+
92
+ def _convert_to_highs_enum(option, option_str, choices):
93
+ # If option is in the choices we can look it up, if not use
94
+ # the default value taken from function signature and warn:
95
+ try:
96
+ return choices[option.lower()]
97
+ except AttributeError:
98
+ return choices[option]
99
+ except KeyError:
100
+ sig = inspect.signature(_linprog_highs)
101
+ default_str = sig.parameters[option_str].default
102
+ warn(f"Option {option_str} is {option}, but only values in "
103
+ f"{set(choices.keys())} are allowed. Using default: "
104
+ f"{default_str}.",
105
+ OptimizeWarning, stacklevel=3)
106
+ return choices[default_str]
107
+
108
+
109
+ def _linprog_highs(lp, solver, time_limit=None, presolve=True,
110
+ disp=False, maxiter=None,
111
+ dual_feasibility_tolerance=None,
112
+ primal_feasibility_tolerance=None,
113
+ ipm_optimality_tolerance=None,
114
+ simplex_dual_edge_weight_strategy=None,
115
+ mip_rel_gap=None,
116
+ mip_max_nodes=None,
117
+ **unknown_options):
118
+ r"""
119
+ Solve the following linear programming problem using one of the HiGHS
120
+ solvers:
121
+
122
+ User-facing documentation is in _linprog_doc.py.
123
+
124
+ Parameters
125
+ ----------
126
+ lp : _LPProblem
127
+ A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
128
+ solver : "ipm" or "simplex" or None
129
+ Which HiGHS solver to use. If ``None``, "simplex" will be used.
130
+
131
+ Options
132
+ -------
133
+ maxiter : int
134
+ The maximum number of iterations to perform in either phase. For
135
+ ``solver='ipm'``, this does not include the number of crossover
136
+ iterations. Default is the largest possible value for an ``int``
137
+ on the platform.
138
+ disp : bool
139
+ Set to ``True`` if indicators of optimization status are to be printed
140
+ to the console each iteration; default ``False``.
141
+ time_limit : float
142
+ The maximum time in seconds allotted to solve the problem; default is
143
+ the largest possible value for a ``double`` on the platform.
144
+ presolve : bool
145
+ Presolve attempts to identify trivial infeasibilities,
146
+ identify trivial unboundedness, and simplify the problem before
147
+ sending it to the main solver. It is generally recommended
148
+ to keep the default setting ``True``; set to ``False`` if presolve is
149
+ to be disabled.
150
+ dual_feasibility_tolerance : double
151
+ Dual feasibility tolerance. Default is 1e-07.
152
+ The minimum of this and ``primal_feasibility_tolerance``
153
+ is used for the feasibility tolerance when ``solver='ipm'``.
154
+ primal_feasibility_tolerance : double
155
+ Primal feasibility tolerance. Default is 1e-07.
156
+ The minimum of this and ``dual_feasibility_tolerance``
157
+ is used for the feasibility tolerance when ``solver='ipm'``.
158
+ ipm_optimality_tolerance : double
159
+ Optimality tolerance for ``solver='ipm'``. Default is 1e-08.
160
+ Minimum possible value is 1e-12 and must be smaller than the largest
161
+ possible value for a ``double`` on the platform.
162
+ simplex_dual_edge_weight_strategy : str (default: None)
163
+ Strategy for simplex dual edge weights. The default, ``None``,
164
+ automatically selects one of the following.
165
+
166
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
167
+ negative reduced cost.
168
+
169
+ ``'devex'`` uses the strategy described in [15]_.
170
+
171
+ ``steepest`` uses the exact steepest edge strategy as described in
172
+ [16]_.
173
+
174
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
175
+ until the computation is too costly or inexact and then switches to
176
+ the devex method.
177
+
178
+ Currently, using ``None`` always selects ``'steepest-devex'``, but this
179
+ may change as new options become available.
180
+
181
+ mip_max_nodes : int
182
+ The maximum number of nodes allotted to solve the problem; default is
183
+ the largest possible value for a ``HighsInt`` on the platform.
184
+ Ignored if not using the MIP solver.
185
+ unknown_options : dict
186
+ Optional arguments not used by this particular solver. If
187
+ ``unknown_options`` is non-empty, a warning is issued listing all
188
+ unused options.
189
+
190
+ Returns
191
+ -------
192
+ sol : dict
193
+ A dictionary consisting of the fields:
194
+
195
+ x : 1D array
196
+ The values of the decision variables that minimizes the
197
+ objective function while satisfying the constraints.
198
+ fun : float
199
+ The optimal value of the objective function ``c @ x``.
200
+ slack : 1D array
201
+ The (nominally positive) values of the slack,
202
+ ``b_ub - A_ub @ x``.
203
+ con : 1D array
204
+ The (nominally zero) residuals of the equality constraints,
205
+ ``b_eq - A_eq @ x``.
206
+ success : bool
207
+ ``True`` when the algorithm succeeds in finding an optimal
208
+ solution.
209
+ status : int
210
+ An integer representing the exit status of the algorithm.
211
+
212
+ ``0`` : Optimization terminated successfully.
213
+
214
+ ``1`` : Iteration or time limit reached.
215
+
216
+ ``2`` : Problem appears to be infeasible.
217
+
218
+ ``3`` : Problem appears to be unbounded.
219
+
220
+ ``4`` : The HiGHS solver ran into a problem.
221
+
222
+ message : str
223
+ A string descriptor of the exit status of the algorithm.
224
+ nit : int
225
+ The total number of iterations performed.
226
+ For ``solver='simplex'``, this includes iterations in all
227
+ phases. For ``solver='ipm'``, this does not include
228
+ crossover iterations.
229
+ crossover_nit : int
230
+ The number of primal/dual pushes performed during the
231
+ crossover routine for ``solver='ipm'``. This is ``0``
232
+ for ``solver='simplex'``.
233
+ ineqlin : OptimizeResult
234
+ Solution and sensitivity information corresponding to the
235
+ inequality constraints, `b_ub`. A dictionary consisting of the
236
+ fields:
237
+
238
+ residual : np.ndnarray
239
+ The (nominally positive) values of the slack variables,
240
+ ``b_ub - A_ub @ x``. This quantity is also commonly
241
+ referred to as "slack".
242
+
243
+ marginals : np.ndarray
244
+ The sensitivity (partial derivative) of the objective
245
+ function with respect to the right-hand side of the
246
+ inequality constraints, `b_ub`.
247
+
248
+ eqlin : OptimizeResult
249
+ Solution and sensitivity information corresponding to the
250
+ equality constraints, `b_eq`. A dictionary consisting of the
251
+ fields:
252
+
253
+ residual : np.ndarray
254
+ The (nominally zero) residuals of the equality constraints,
255
+ ``b_eq - A_eq @ x``.
256
+
257
+ marginals : np.ndarray
258
+ The sensitivity (partial derivative) of the objective
259
+ function with respect to the right-hand side of the
260
+ equality constraints, `b_eq`.
261
+
262
+ lower, upper : OptimizeResult
263
+ Solution and sensitivity information corresponding to the
264
+ lower and upper bounds on decision variables, `bounds`.
265
+
266
+ residual : np.ndarray
267
+ The (nominally positive) values of the quantity
268
+ ``x - lb`` (lower) or ``ub - x`` (upper).
269
+
270
+ marginals : np.ndarray
271
+ The sensitivity (partial derivative) of the objective
272
+ function with respect to the lower and upper
273
+ `bounds`.
274
+
275
+ mip_node_count : int
276
+ The number of subproblems or "nodes" solved by the MILP
277
+ solver. Only present when `integrality` is not `None`.
278
+
279
+ mip_dual_bound : float
280
+ The MILP solver's final estimate of the lower bound on the
281
+ optimal solution. Only present when `integrality` is not
282
+ `None`.
283
+
284
+ mip_gap : float
285
+ The difference between the final objective function value
286
+ and the final dual bound, scaled by the final objective
287
+ function value. Only present when `integrality` is not
288
+ `None`.
289
+
290
+ Notes
291
+ -----
292
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
293
+ `marginals`, or partial derivatives of the objective function with respect
294
+ to the right-hand side of each constraint. These partial derivatives are
295
+ also referred to as "Lagrange multipliers", "dual values", and
296
+ "shadow prices". The sign convention of `marginals` is opposite that
297
+ of Lagrange multipliers produced by many nonlinear solvers.
298
+
299
+ References
300
+ ----------
301
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
302
+ Mathematical programming 5.1 (1973): 1-28.
303
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
304
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
305
+ """
306
+ if unknown_options:
307
+ message = (f"Unrecognized options detected: {unknown_options}. "
308
+ "These will be passed to HiGHS verbatim.")
309
+ warn(message, OptimizeWarning, stacklevel=3)
310
+
311
+ # Map options to HiGHS enum values
312
+ simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
313
+ simplex_dual_edge_weight_strategy,
314
+ 'simplex_dual_edge_weight_strategy',
315
+ choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
316
+ 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
317
+ 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
318
+ 'steepest':
319
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
320
+ None: None})
321
+
322
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
323
+
324
+ lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs
325
+ # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
326
+ with np.errstate(invalid="ignore"):
327
+ lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf
328
+ rhs_ub = b_ub # RHS of UB constraints is b_ub
329
+ lhs_eq = b_eq # Equality constraint is inequality
330
+ rhs_eq = b_eq # constraint with LHS=RHS
331
+ lhs = np.concatenate((lhs_ub, lhs_eq))
332
+ rhs = np.concatenate((rhs_ub, rhs_eq))
333
+
334
+ if issparse(A_ub) or issparse(A_eq):
335
+ A = vstack((A_ub, A_eq))
336
+ else:
337
+ A = np.vstack((A_ub, A_eq))
338
+ A = csc_matrix(A)
339
+
340
+ options = {
341
+ 'presolve': presolve,
342
+ 'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
343
+ 'solver': solver,
344
+ 'time_limit': time_limit,
345
+ 'highs_debug_level': MESSAGE_LEVEL_NONE,
346
+ 'dual_feasibility_tolerance': dual_feasibility_tolerance,
347
+ 'ipm_optimality_tolerance': ipm_optimality_tolerance,
348
+ 'log_to_console': disp,
349
+ 'mip_max_nodes': mip_max_nodes,
350
+ 'output_flag': disp,
351
+ 'primal_feasibility_tolerance': primal_feasibility_tolerance,
352
+ 'simplex_dual_edge_weight_strategy':
353
+ simplex_dual_edge_weight_strategy_enum,
354
+ 'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
355
+ 'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
356
+ 'ipm_iteration_limit': maxiter,
357
+ 'simplex_iteration_limit': maxiter,
358
+ 'mip_rel_gap': mip_rel_gap,
359
+ }
360
+ options.update(unknown_options)
361
+
362
+ # np.inf doesn't work; use very large constant
363
+ rhs = _replace_inf(rhs)
364
+ lhs = _replace_inf(lhs)
365
+ lb = _replace_inf(lb)
366
+ ub = _replace_inf(ub)
367
+
368
+ if integrality is None or np.sum(integrality) == 0:
369
+ integrality = np.empty(0)
370
+ else:
371
+ integrality = np.array(integrality)
372
+
373
+ res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
374
+ lb, ub, integrality.astype(np.uint8), options)
375
+
376
+ # HiGHS represents constraints as lhs/rhs, so
377
+ # Ax + s = b => Ax = b - s
378
+ # and we need to split up s by A_ub and A_eq
379
+ if 'slack' in res:
380
+ slack = res['slack']
381
+ con = np.array(slack[len(b_ub):])
382
+ slack = np.array(slack[:len(b_ub)])
383
+ else:
384
+ slack, con = None, None
385
+
386
+ # lagrange multipliers for equalities/inequalities and upper/lower bounds
387
+ if 'lambda' in res:
388
+ lamda = res['lambda']
389
+ marg_ineqlin = np.array(lamda[:len(b_ub)])
390
+ marg_eqlin = np.array(lamda[len(b_ub):])
391
+ marg_upper = np.array(res['marg_bnds'][1, :])
392
+ marg_lower = np.array(res['marg_bnds'][0, :])
393
+ else:
394
+ marg_ineqlin, marg_eqlin = None, None
395
+ marg_upper, marg_lower = None, None
396
+
397
+ # this needs to be updated if we start choosing the solver intelligently
398
+
399
+ # Convert to scipy-style status and message
400
+ highs_status = res.get('status', None)
401
+ highs_message = res.get('message', None)
402
+ status, message = _highs_to_scipy_status_message(highs_status,
403
+ highs_message)
404
+
405
+ x = np.array(res['x']) if 'x' in res else None
406
+ sol = {'x': x,
407
+ 'slack': slack,
408
+ 'con': con,
409
+ 'ineqlin': OptimizeResult({
410
+ 'residual': slack,
411
+ 'marginals': marg_ineqlin,
412
+ }),
413
+ 'eqlin': OptimizeResult({
414
+ 'residual': con,
415
+ 'marginals': marg_eqlin,
416
+ }),
417
+ 'lower': OptimizeResult({
418
+ 'residual': None if x is None else x - lb,
419
+ 'marginals': marg_lower,
420
+ }),
421
+ 'upper': OptimizeResult({
422
+ 'residual': None if x is None else ub - x,
423
+ 'marginals': marg_upper
424
+ }),
425
+ 'fun': res.get('fun'),
426
+ 'status': status,
427
+ 'success': res['status'] == MODEL_STATUS_OPTIMAL,
428
+ 'message': message,
429
+ 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
430
+ 'crossover_nit': res.get('crossover_nit'),
431
+ }
432
+
433
+ if np.any(x) and integrality is not None:
434
+ sol.update({
435
+ 'mip_node_count': res.get('mip_node_count', 0),
436
+ 'mip_dual_bound': res.get('mip_dual_bound', 0.0),
437
+ 'mip_gap': res.get('mip_gap', 0.0),
438
+ })
439
+
440
+ return sol
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Revised simplex method for linear programming
2
+
3
+ The *revised simplex* method uses the method described in [1]_, except
4
+ that a factorization [2]_ of the basis matrix, rather than its inverse,
5
+ is efficiently maintained and used to solve the linear systems at each
6
+ iteration of the algorithm.
7
+
8
+ .. versionadded:: 1.3.0
9
+
10
+ References
11
+ ----------
12
+ .. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
13
+ programming." Athena Scientific 1 (1997): 997.
14
+ .. [2] Bartels, Richard H. "A stabilization of the simplex method."
15
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
16
+
17
+ """
18
+ # Author: Matt Haberland
19
+
20
+ import numpy as np
21
+ from numpy.linalg import LinAlgError
22
+
23
+ from scipy.linalg import solve
24
+ from ._optimize import _check_unknown_options
25
+ from ._bglu_dense import LU
26
+ from ._bglu_dense import BGLU as BGLU
27
+ from ._linprog_util import _postsolve
28
+ from ._optimize import OptimizeResult
29
+
30
+
31
+ def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
32
+ maxupdate, mast, pivot):
33
+ """
34
+ The purpose of phase one is to find an initial basic feasible solution
35
+ (BFS) to the original problem.
36
+
37
+ Generates an auxiliary problem with a trivial BFS and an objective that
38
+ minimizes infeasibility of the original problem. Solves the auxiliary
39
+ problem using the main simplex routine (phase two). This either yields
40
+ a BFS to the original problem or determines that the original problem is
41
+ infeasible. If feasible, phase one detects redundant rows in the original
42
+ constraint matrix and removes them, then chooses additional indices as
43
+ necessary to complete a basis/BFS for the original problem.
44
+ """
45
+
46
+ m, n = A.shape
47
+ status = 0
48
+
49
+ # generate auxiliary problem to get initial BFS
50
+ A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
51
+
52
+ if status == 6:
53
+ residual = c.dot(x)
54
+ iter_k = 0
55
+ return x, basis, A, b, residual, status, iter_k
56
+
57
+ # solve auxiliary problem
58
+ phase_one_n = n
59
+ iter_k = 0
60
+ x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
61
+ postsolve_args,
62
+ maxiter, tol, disp,
63
+ maxupdate, mast, pivot,
64
+ iter_k, phase_one_n)
65
+
66
+ # check for infeasibility
67
+ residual = c.dot(x)
68
+ if status == 0 and residual > tol:
69
+ status = 2
70
+
71
+ # drive artificial variables out of basis
72
+ # TODO: test redundant row removal better
73
+ # TODO: make solve more efficient with BGLU? This could take a while.
74
+ keep_rows = np.ones(m, dtype=bool)
75
+ for basis_column in basis[basis >= n]:
76
+ B = A[:, basis]
77
+ try:
78
+ basis_finder = np.abs(solve(B, A)) # inefficient
79
+ pertinent_row = np.argmax(basis_finder[:, basis_column])
80
+ eligible_columns = np.ones(n, dtype=bool)
81
+ eligible_columns[basis[basis < n]] = 0
82
+ eligible_column_indices = np.where(eligible_columns)[0]
83
+ index = np.argmax(basis_finder[:, :n]
84
+ [pertinent_row, eligible_columns])
85
+ new_basis_column = eligible_column_indices[index]
86
+ if basis_finder[pertinent_row, new_basis_column] < tol:
87
+ keep_rows[pertinent_row] = False
88
+ else:
89
+ basis[basis == basis_column] = new_basis_column
90
+ except LinAlgError:
91
+ status = 4
92
+
93
+ # form solution to original problem
94
+ A = A[keep_rows, :n]
95
+ basis = basis[keep_rows]
96
+ x = x[:n]
97
+ m = A.shape[0]
98
+ return x, basis, A, b, residual, status, iter_k
99
+
100
+
101
+ def _get_more_basis_columns(A, basis):
102
+ """
103
+ Called when the auxiliary problem terminates with artificial columns in
104
+ the basis, which must be removed and replaced with non-artificial
105
+ columns. Finds additional columns that do not make the matrix singular.
106
+ """
107
+ m, n = A.shape
108
+
109
+ # options for inclusion are those that aren't already in the basis
110
+ a = np.arange(m+n)
111
+ bl = np.zeros(len(a), dtype=bool)
112
+ bl[basis] = 1
113
+ options = a[~bl]
114
+ options = options[options < n] # and they have to be non-artificial
115
+
116
+ # form basis matrix
117
+ B = np.zeros((m, m))
118
+ B[:, 0:len(basis)] = A[:, basis]
119
+
120
+ if (basis.size > 0 and
121
+ np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
122
+ raise Exception("Basis has dependent columns")
123
+
124
+ rank = 0 # just enter the loop
125
+ for i in range(n): # somewhat arbitrary, but we need another way out
126
+ # permute the options, and take as many as needed
127
+ new_basis = np.random.permutation(options)[:m-len(basis)]
128
+ B[:, len(basis):] = A[:, new_basis] # update the basis matrix
129
+ rank = np.linalg.matrix_rank(B) # check the rank
130
+ if rank == m:
131
+ break
132
+
133
+ return np.concatenate((basis, new_basis))
134
+
135
+
136
+ def _generate_auxiliary_problem(A, b, x0, tol):
137
+ """
138
+ Modifies original problem to create an auxiliary problem with a trivial
139
+ initial basic feasible solution and an objective that minimizes
140
+ infeasibility in the original problem.
141
+
142
+ Conceptually, this is done by stacking an identity matrix on the right of
143
+ the original constraint matrix, adding artificial variables to correspond
144
+ with each of these new columns, and generating a cost vector that is all
145
+ zeros except for ones corresponding with each of the new variables.
146
+
147
+ A initial basic feasible solution is trivial: all variables are zero
148
+ except for the artificial variables, which are set equal to the
149
+ corresponding element of the right hand side `b`.
150
+
151
+ Running the simplex method on this auxiliary problem drives all of the
152
+ artificial variables - and thus the cost - to zero if the original problem
153
+ is feasible. The original problem is declared infeasible otherwise.
154
+
155
+ Much of the complexity below is to improve efficiency by using singleton
156
+ columns in the original problem where possible, thus generating artificial
157
+ variables only as necessary, and using an initial 'guess' basic feasible
158
+ solution.
159
+ """
160
+ status = 0
161
+ m, n = A.shape
162
+
163
+ if x0 is not None:
164
+ x = x0
165
+ else:
166
+ x = np.zeros(n)
167
+
168
+ r = b - A@x # residual; this must be all zeros for feasibility
169
+
170
+ A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
171
+ b[r < 0] = -b[r < 0] # to the auxiliary problem
172
+ r[r < 0] *= -1
173
+
174
+ # Rows which we will need to find a trivial way to zero.
175
+ # This should just be the rows where there is a nonzero residual.
176
+ # But then we would not necessarily have a column singleton in every row.
177
+ # This makes it difficult to find an initial basis.
178
+ if x0 is None:
179
+ nonzero_constraints = np.arange(m)
180
+ else:
181
+ nonzero_constraints = np.where(r > tol)[0]
182
+
183
+ # these are (at least some of) the initial basis columns
184
+ basis = np.where(np.abs(x) > tol)[0]
185
+
186
+ if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
187
+ c = np.zeros(n)
188
+ basis = _get_more_basis_columns(A, basis)
189
+ return A, b, c, basis, x, status
190
+ elif (len(nonzero_constraints) > m - len(basis) or
191
+ np.any(x < 0)): # can't get trivial BFS
192
+ c = np.zeros(n)
193
+ status = 6
194
+ return A, b, c, basis, x, status
195
+
196
+ # chooses existing columns appropriate for inclusion in initial basis
197
+ cols, rows = _select_singleton_columns(A, r)
198
+
199
+ # find the rows we need to zero that we _can_ zero with column singletons
200
+ i_tofix = np.isin(rows, nonzero_constraints)
201
+ # these columns can't already be in the basis, though
202
+ # we are going to add them to the basis and change the corresponding x val
203
+ i_notinbasis = np.logical_not(np.isin(cols, basis))
204
+ i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
205
+ rows = rows[i_fix_without_aux]
206
+ cols = cols[i_fix_without_aux]
207
+
208
+ # indices of the rows we can only zero with auxiliary variable
209
+ # these rows will get a one in each auxiliary column
210
+ arows = nonzero_constraints[np.logical_not(
211
+ np.isin(nonzero_constraints, rows))]
212
+ n_aux = len(arows)
213
+ acols = n + np.arange(n_aux) # indices of auxiliary columns
214
+
215
+ basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
216
+ basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
217
+
218
+ # add auxiliary singleton columns
219
+ A = np.hstack((A, np.zeros((m, n_aux))))
220
+ A[arows, acols] = 1
221
+
222
+ # generate initial BFS
223
+ x = np.concatenate((x, np.zeros(n_aux)))
224
+ x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
225
+
226
+ # generate costs to minimize infeasibility
227
+ c = np.zeros(n_aux + n)
228
+ c[acols] = 1
229
+
230
+ # basis columns correspond with nonzeros in guess, those with column
231
+ # singletons we used to zero remaining constraints, and any additional
232
+ # columns to get a full set (m columns)
233
+ basis = np.concatenate((basis, basis_ng))
234
+ basis = _get_more_basis_columns(A, basis) # add columns as needed
235
+
236
+ return A, b, c, basis, x, status
237
+
238
+
239
+ def _select_singleton_columns(A, b):
240
+ """
241
+ Finds singleton columns for which the singleton entry is of the same sign
242
+ as the right-hand side; these columns are eligible for inclusion in an
243
+ initial basis. Determines the rows in which the singleton entries are
244
+ located. For each of these rows, returns the indices of the one singleton
245
+ column and its corresponding row.
246
+ """
247
+ # find indices of all singleton columns and corresponding row indices
248
+ column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
249
+ columns = A[:, column_indices] # array of singleton columns
250
+ row_indices = np.zeros(len(column_indices), dtype=int)
251
+ nonzero_rows, nonzero_columns = np.nonzero(columns)
252
+ row_indices[nonzero_columns] = nonzero_rows # corresponding row indices
253
+
254
+ # keep only singletons with entries that have same sign as RHS
255
+ # this is necessary because all elements of BFS must be non-negative
256
+ same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
257
+ column_indices = column_indices[same_sign][::-1]
258
+ row_indices = row_indices[same_sign][::-1]
259
+ # Reversing the order so that steps below select rightmost columns
260
+ # for initial basis, which will tend to be slack variables. (If the
261
+ # guess corresponds with a basic feasible solution but a constraint
262
+ # is not satisfied with the corresponding slack variable zero, the slack
263
+ # variable must be basic.)
264
+
265
+ # for each row, keep rightmost singleton column with an entry in that row
266
+ unique_row_indices, first_columns = np.unique(row_indices,
267
+ return_index=True)
268
+ return column_indices[first_columns], unique_row_indices
269
+
270
+
271
+ def _find_nonzero_rows(A, tol):
272
+ """
273
+ Returns logical array indicating the locations of rows with at least
274
+ one nonzero element.
275
+ """
276
+ return np.any(np.abs(A) > tol, axis=1)
277
+
278
+
279
+ def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
280
+ """
281
+ Selects a pivot to enter the basis. Currently Bland's rule - the smallest
282
+ index that has a negative reduced cost - is the default.
283
+ """
284
+ if rule.lower() == "mrc": # index with minimum reduced cost
285
+ return a[~bl][np.argmin(c_hat)]
286
+ else: # smallest index w/ negative reduced cost
287
+ return a[~bl][c_hat < -tol][0]
288
+
289
+
290
+ def _display_iter(phase, iteration, slack, con, fun):
291
+ """
292
+ Print indicators of optimization status to the console.
293
+ """
294
+ header = True if not iteration % 20 else False
295
+
296
+ if header:
297
+ print("Phase",
298
+ "Iteration",
299
+ "Minimum Slack ",
300
+ "Constraint Residual",
301
+ "Objective ")
302
+
303
+ # :<X.Y left aligns Y digits in X digit spaces
304
+ fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
305
+ try:
306
+ slack = np.min(slack)
307
+ except ValueError:
308
+ slack = "NA"
309
+ print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
310
+
311
+
312
+ def _display_and_callback(phase_one_n, x, postsolve_args, status,
313
+ iteration, disp, callback):
314
+ if phase_one_n is not None:
315
+ phase = 1
316
+ x_postsolve = x[:phase_one_n]
317
+ else:
318
+ phase = 2
319
+ x_postsolve = x
320
+ x_o, fun, slack, con = _postsolve(x_postsolve,
321
+ postsolve_args)
322
+
323
+ if callback is not None:
324
+ res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
325
+ 'con': con, 'nit': iteration,
326
+ 'phase': phase, 'complete': False,
327
+ 'status': status, 'message': "",
328
+ 'success': False})
329
+ callback(res)
330
+ if disp:
331
+ _display_iter(phase, iteration, slack, con, fun)
332
+
333
+
334
+ def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
335
+ maxupdate, mast, pivot, iteration=0, phase_one_n=None):
336
+ """
337
+ The heart of the simplex method. Beginning with a basic feasible solution,
338
+ moves to adjacent basic feasible solutions successively lower reduced cost.
339
+ Terminates when there are no basic feasible solutions with lower reduced
340
+ cost or if the problem is determined to be unbounded.
341
+
342
+ This implementation follows the revised simplex method based on LU
343
+ decomposition. Rather than maintaining a tableau or an inverse of the
344
+ basis matrix, we keep a factorization of the basis matrix that allows
345
+ efficient solution of linear systems while avoiding stability issues
346
+ associated with inverted matrices.
347
+ """
348
+ m, n = A.shape
349
+ status = 0
350
+ a = np.arange(n) # indices of columns of A
351
+ ab = np.arange(m) # indices of columns of B
352
+ if maxupdate:
353
+ # basis matrix factorization object; similar to B = A[:, b]
354
+ B = BGLU(A, b, maxupdate, mast)
355
+ else:
356
+ B = LU(A, b)
357
+
358
+ for iteration in range(iteration, maxiter):
359
+
360
+ if disp or callback is not None:
361
+ _display_and_callback(phase_one_n, x, postsolve_args, status,
362
+ iteration, disp, callback)
363
+
364
+ bl = np.zeros(len(a), dtype=bool)
365
+ bl[b] = 1
366
+
367
+ xb = x[b] # basic variables
368
+ cb = c[b] # basic costs
369
+
370
+ try:
371
+ v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
372
+ except LinAlgError:
373
+ status = 4
374
+ break
375
+
376
+ # TODO: cythonize?
377
+ c_hat = c - v.dot(A) # reduced cost
378
+ c_hat = c_hat[~bl]
379
+ # Above is much faster than:
380
+ # N = A[:, ~bl] # slow!
381
+ # c_hat = c[~bl] - v.T.dot(N)
382
+ # Can we perform the multiplication only on the nonbasic columns?
383
+
384
+ if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
385
+ break
386
+
387
+ j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
388
+ u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
389
+
390
+ i = u > tol # if none of the u are positive, unbounded
391
+ if not np.any(i):
392
+ status = 3
393
+ break
394
+
395
+ th = xb[i]/u[i]
396
+ l = np.argmin(th) # implicitly selects smallest subscript
397
+ th_star = th[l] # step size
398
+
399
+ x[b] = x[b] - th_star*u # take step
400
+ x[j] = th_star
401
+ B.update(ab[i][l], j) # modify basis
402
+ b = B.b # similar to b[ab[i][l]] =
403
+
404
+ else:
405
+ # If the end of the for loop is reached (without a break statement),
406
+ # then another step has been taken, so the iteration counter should
407
+ # increment, info should be displayed, and callback should be called.
408
+ iteration += 1
409
+ status = 1
410
+ if disp or callback is not None:
411
+ _display_and_callback(phase_one_n, x, postsolve_args, status,
412
+ iteration, disp, callback)
413
+
414
+ return x, b, status, iteration
415
+
416
+
417
+ def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
418
+ maxiter=5000, tol=1e-12, disp=False,
419
+ maxupdate=10, mast=False, pivot="mrc",
420
+ **unknown_options):
421
+ """
422
+ Solve the following linear programming problem via a two-phase
423
+ revised simplex algorithm.::
424
+
425
+ minimize: c @ x
426
+
427
+ subject to: A @ x == b
428
+ 0 <= x < oo
429
+
430
+ User-facing documentation is in _linprog_doc.py.
431
+
432
+ Parameters
433
+ ----------
434
+ c : 1-D array
435
+ Coefficients of the linear objective function to be minimized.
436
+ c0 : float
437
+ Constant term in objective function due to fixed (and eliminated)
438
+ variables. (Currently unused.)
439
+ A : 2-D array
440
+ 2-D array which, when matrix-multiplied by ``x``, gives the values of
441
+ the equality constraints at ``x``.
442
+ b : 1-D array
443
+ 1-D array of values representing the RHS of each equality constraint
444
+ (row) in ``A_eq``.
445
+ x0 : 1-D array, optional
446
+ Starting values of the independent variables, which will be refined by
447
+ the optimization algorithm. For the revised simplex method, these must
448
+ correspond with a basic feasible solution.
449
+ callback : callable, optional
450
+ If a callback function is provided, it will be called within each
451
+ iteration of the algorithm. The callback function must accept a single
452
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
453
+
454
+ x : 1-D array
455
+ Current solution vector.
456
+ fun : float
457
+ Current value of the objective function ``c @ x``.
458
+ success : bool
459
+ True only when an algorithm has completed successfully,
460
+ so this is always False as the callback function is called
461
+ only while the algorithm is still iterating.
462
+ slack : 1-D array
463
+ The values of the slack variables. Each slack variable
464
+ corresponds to an inequality constraint. If the slack is zero,
465
+ the corresponding constraint is active.
466
+ con : 1-D array
467
+ The (nominally zero) residuals of the equality constraints,
468
+ that is, ``b - A_eq @ x``.
469
+ phase : int
470
+ The phase of the algorithm being executed.
471
+ status : int
472
+ For revised simplex, this is always 0 because if a different
473
+ status is detected, the algorithm terminates.
474
+ nit : int
475
+ The number of iterations performed.
476
+ message : str
477
+ A string descriptor of the exit status of the optimization.
478
+ postsolve_args : tuple
479
+ Data needed by _postsolve to convert the solution to the standard-form
480
+ problem into the solution to the original problem.
481
+
482
+ Options
483
+ -------
484
+ maxiter : int
485
+ The maximum number of iterations to perform in either phase.
486
+ tol : float
487
+ The tolerance which determines when a solution is "close enough" to
488
+ zero in Phase 1 to be considered a basic feasible solution or close
489
+ enough to positive to serve as an optimal solution.
490
+ disp : bool
491
+ Set to ``True`` if indicators of optimization status are to be printed
492
+ to the console each iteration.
493
+ maxupdate : int
494
+ The maximum number of updates performed on the LU factorization.
495
+ After this many updates is reached, the basis matrix is factorized
496
+ from scratch.
497
+ mast : bool
498
+ Minimize Amortized Solve Time. If enabled, the average time to solve
499
+ a linear system using the basis factorization is measured. Typically,
500
+ the average solve time will decrease with each successive solve after
501
+ initial factorization, as factorization takes much more time than the
502
+ solve operation (and updates). Eventually, however, the updated
503
+ factorization becomes sufficiently complex that the average solve time
504
+ begins to increase. When this is detected, the basis is refactorized
505
+ from scratch. Enable this option to maximize speed at the risk of
506
+ nondeterministic behavior. Ignored if ``maxupdate`` is 0.
507
+ pivot : "mrc" or "bland"
508
+ Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
509
+ Bland's rule if iteration limit is reached and cycling is suspected.
510
+ unknown_options : dict
511
+ Optional arguments not used by this particular solver. If
512
+ `unknown_options` is non-empty a warning is issued listing all
513
+ unused options.
514
+
515
+ Returns
516
+ -------
517
+ x : 1-D array
518
+ Solution vector.
519
+ status : int
520
+ An integer representing the exit status of the optimization::
521
+
522
+ 0 : Optimization terminated successfully
523
+ 1 : Iteration limit reached
524
+ 2 : Problem appears to be infeasible
525
+ 3 : Problem appears to be unbounded
526
+ 4 : Numerical difficulties encountered
527
+ 5 : No constraints; turn presolve on
528
+ 6 : Guess x0 cannot be converted to a basic feasible solution
529
+
530
+ message : str
531
+ A string descriptor of the exit status of the optimization.
532
+ iteration : int
533
+ The number of iterations taken to solve the problem.
534
+ """
535
+
536
+ _check_unknown_options(unknown_options)
537
+
538
+ messages = ["Optimization terminated successfully.",
539
+ "Iteration limit reached.",
540
+ "The problem appears infeasible, as the phase one auxiliary "
541
+ "problem terminated successfully with a residual of {0:.1e}, "
542
+ "greater than the tolerance {1} required for the solution to "
543
+ "be considered feasible. Consider increasing the tolerance to "
544
+ "be greater than {0:.1e}. If this tolerance is unnaceptably "
545
+ "large, the problem is likely infeasible.",
546
+ "The problem is unbounded, as the simplex algorithm found "
547
+ "a basic feasible solution from which there is a direction "
548
+ "with negative reduced cost in which all decision variables "
549
+ "increase.",
550
+ "Numerical difficulties encountered; consider trying "
551
+ "method='interior-point'.",
552
+ "Problems with no constraints are trivially solved; please "
553
+ "turn presolve on.",
554
+ "The guess x0 cannot be converted to a basic feasible "
555
+ "solution. "
556
+ ]
557
+
558
+ if A.size == 0: # address test_unbounded_below_no_presolve_corrected
559
+ return np.zeros(c.shape), 5, messages[5], 0
560
+
561
+ x, basis, A, b, residual, status, iteration = (
562
+ _phase_one(A, b, x0, callback, postsolve_args,
563
+ maxiter, tol, disp, maxupdate, mast, pivot))
564
+
565
+ if status == 0:
566
+ x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
567
+ postsolve_args,
568
+ maxiter, tol, disp,
569
+ maxupdate, mast, pivot,
570
+ iteration)
571
+
572
+ return x, status, messages[status].format(residual, tol), iteration
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simplex method for linear programming
2
+
3
+ The *simplex* method uses a traditional, full-tableau implementation of
4
+ Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
5
+ This algorithm is included for backwards compatibility and educational
6
+ purposes.
7
+
8
+ .. versionadded:: 0.15.0
9
+
10
+ Warnings
11
+ --------
12
+
13
+ The simplex method may encounter numerical difficulties when pivot
14
+ values are close to the specified tolerance. If encountered try
15
+ remove any redundant constraints, change the pivot strategy to Bland's
16
+ rule or increase the tolerance value.
17
+
18
+ Alternatively, more robust methods maybe be used. See
19
+ :ref:`'interior-point' <optimize.linprog-interior-point>` and
20
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`.
21
+
22
+ References
23
+ ----------
24
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
25
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
26
+ 1963
27
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
28
+ Mathematical Programming", McGraw-Hill, Chapter 4.
29
+ """
30
+
31
+ import numpy as np
32
+ from warnings import warn
33
+ from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
34
+ from ._linprog_util import _postsolve
35
+
36
+
37
+ def _pivot_col(T, tol=1e-9, bland=False):
38
+ """
39
+ Given a linear programming simplex tableau, determine the column
40
+ of the variable to enter the basis.
41
+
42
+ Parameters
43
+ ----------
44
+ T : 2-D array
45
+ A 2-D array representing the simplex tableau, T, corresponding to the
46
+ linear programming problem. It should have the form:
47
+
48
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
49
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
50
+ .
51
+ .
52
+ .
53
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
54
+ [c[0], c[1], ..., c[n_total], 0]]
55
+
56
+ for a Phase 2 problem, or the form:
57
+
58
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
59
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
60
+ .
61
+ .
62
+ .
63
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
64
+ [c[0], c[1], ..., c[n_total], 0],
65
+ [c'[0], c'[1], ..., c'[n_total], 0]]
66
+
67
+ for a Phase 1 problem (a problem in which a basic feasible solution is
68
+ sought prior to maximizing the actual objective. ``T`` is modified in
69
+ place by ``_solve_simplex``.
70
+ tol : float
71
+ Elements in the objective row larger than -tol will not be considered
72
+ for pivoting. Nominally this value is zero, but numerical issues
73
+ cause a tolerance about zero to be necessary.
74
+ bland : bool
75
+ If True, use Bland's rule for selection of the column (select the
76
+ first column with a negative coefficient in the objective row,
77
+ regardless of magnitude).
78
+
79
+ Returns
80
+ -------
81
+ status: bool
82
+ True if a suitable pivot column was found, otherwise False.
83
+ A return of False indicates that the linear programming simplex
84
+ algorithm is complete.
85
+ col: int
86
+ The index of the column of the pivot element.
87
+ If status is False, col will be returned as nan.
88
+ """
89
+ ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
90
+ if ma.count() == 0:
91
+ return False, np.nan
92
+ if bland:
93
+ # ma.mask is sometimes 0d
94
+ return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
95
+ return True, np.ma.nonzero(ma == ma.min())[0][0]
96
+
97
+
98
+ def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
99
+ """
100
+ Given a linear programming simplex tableau, determine the row for the
101
+ pivot operation.
102
+
103
+ Parameters
104
+ ----------
105
+ T : 2-D array
106
+ A 2-D array representing the simplex tableau, T, corresponding to the
107
+ linear programming problem. It should have the form:
108
+
109
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
110
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
111
+ .
112
+ .
113
+ .
114
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
115
+ [c[0], c[1], ..., c[n_total], 0]]
116
+
117
+ for a Phase 2 problem, or the form:
118
+
119
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
120
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
121
+ .
122
+ .
123
+ .
124
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
125
+ [c[0], c[1], ..., c[n_total], 0],
126
+ [c'[0], c'[1], ..., c'[n_total], 0]]
127
+
128
+ for a Phase 1 problem (a Problem in which a basic feasible solution is
129
+ sought prior to maximizing the actual objective. ``T`` is modified in
130
+ place by ``_solve_simplex``.
131
+ basis : array
132
+ A list of the current basic variables.
133
+ pivcol : int
134
+ The index of the pivot column.
135
+ phase : int
136
+ The phase of the simplex algorithm (1 or 2).
137
+ tol : float
138
+ Elements in the pivot column smaller than tol will not be considered
139
+ for pivoting. Nominally this value is zero, but numerical issues
140
+ cause a tolerance about zero to be necessary.
141
+ bland : bool
142
+ If True, use Bland's rule for selection of the row (if more than one
143
+ row can be used, choose the one with the lowest variable index).
144
+
145
+ Returns
146
+ -------
147
+ status: bool
148
+ True if a suitable pivot row was found, otherwise False. A return
149
+ of False indicates that the linear programming problem is unbounded.
150
+ row: int
151
+ The index of the row of the pivot element. If status is False, row
152
+ will be returned as nan.
153
+ """
154
+ if phase == 1:
155
+ k = 2
156
+ else:
157
+ k = 1
158
+ ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
159
+ if ma.count() == 0:
160
+ return False, np.nan
161
+ mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
162
+ q = mb / ma
163
+ min_rows = np.ma.nonzero(q == q.min())[0]
164
+ if bland:
165
+ return True, min_rows[np.argmin(np.take(basis, min_rows))]
166
+ return True, min_rows[0]
167
+
168
+
169
+ def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
170
+ """
171
+ Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
172
+ The entering variable corresponds to the column given by pivcol forcing
173
+ the variable basis[pivrow] to leave the basis.
174
+
175
+ Parameters
176
+ ----------
177
+ T : 2-D array
178
+ A 2-D array representing the simplex tableau, T, corresponding to the
179
+ linear programming problem. It should have the form:
180
+
181
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
182
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
183
+ .
184
+ .
185
+ .
186
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
187
+ [c[0], c[1], ..., c[n_total], 0]]
188
+
189
+ for a Phase 2 problem, or the form:
190
+
191
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
192
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
193
+ .
194
+ .
195
+ .
196
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
197
+ [c[0], c[1], ..., c[n_total], 0],
198
+ [c'[0], c'[1], ..., c'[n_total], 0]]
199
+
200
+ for a Phase 1 problem (a problem in which a basic feasible solution is
201
+ sought prior to maximizing the actual objective. ``T`` is modified in
202
+ place by ``_solve_simplex``.
203
+ basis : 1-D array
204
+ An array of the indices of the basic variables, such that basis[i]
205
+ contains the column corresponding to the basic variable for row i.
206
+ Basis is modified in place by _apply_pivot.
207
+ pivrow : int
208
+ Row index of the pivot.
209
+ pivcol : int
210
+ Column index of the pivot.
211
+ """
212
+ basis[pivrow] = pivcol
213
+ pivval = T[pivrow, pivcol]
214
+ T[pivrow] = T[pivrow] / pivval
215
+ for irow in range(T.shape[0]):
216
+ if irow != pivrow:
217
+ T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
218
+
219
+ # The selected pivot should never lead to a pivot value less than the tol.
220
+ if np.isclose(pivval, tol, atol=0, rtol=1e4):
221
+ message = (
222
+ f"The pivot operation produces a pivot value of:{pivval: .1e}, "
223
+ "which is only slightly greater than the specified "
224
+ f"tolerance{tol: .1e}. This may lead to issues regarding the "
225
+ "numerical stability of the simplex method. "
226
+ "Removing redundant constraints, changing the pivot strategy "
227
+ "via Bland's rule or increasing the tolerance may "
228
+ "help reduce the issue.")
229
+ warn(message, OptimizeWarning, stacklevel=5)
230
+
231
+
232
+ def _solve_simplex(T, n, basis, callback, postsolve_args,
233
+ maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
234
+ ):
235
+ """
236
+ Solve a linear programming problem in "standard form" using the Simplex
237
+ Method. Linear Programming is intended to solve the following problem form:
238
+
239
+ Minimize::
240
+
241
+ c @ x
242
+
243
+ Subject to::
244
+
245
+ A @ x == b
246
+ x >= 0
247
+
248
+ Parameters
249
+ ----------
250
+ T : 2-D array
251
+ A 2-D array representing the simplex tableau, T, corresponding to the
252
+ linear programming problem. It should have the form:
253
+
254
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
255
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
256
+ .
257
+ .
258
+ .
259
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
260
+ [c[0], c[1], ..., c[n_total], 0]]
261
+
262
+ for a Phase 2 problem, or the form:
263
+
264
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
265
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
266
+ .
267
+ .
268
+ .
269
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
270
+ [c[0], c[1], ..., c[n_total], 0],
271
+ [c'[0], c'[1], ..., c'[n_total], 0]]
272
+
273
+ for a Phase 1 problem (a problem in which a basic feasible solution is
274
+ sought prior to maximizing the actual objective. ``T`` is modified in
275
+ place by ``_solve_simplex``.
276
+ n : int
277
+ The number of true variables in the problem.
278
+ basis : 1-D array
279
+ An array of the indices of the basic variables, such that basis[i]
280
+ contains the column corresponding to the basic variable for row i.
281
+ Basis is modified in place by _solve_simplex
282
+ callback : callable, optional
283
+ If a callback function is provided, it will be called within each
284
+ iteration of the algorithm. The callback must accept a
285
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
286
+
287
+ x : 1-D array
288
+ Current solution vector
289
+ fun : float
290
+ Current value of the objective function
291
+ success : bool
292
+ True only when a phase has completed successfully. This
293
+ will be False for most iterations.
294
+ slack : 1-D array
295
+ The values of the slack variables. Each slack variable
296
+ corresponds to an inequality constraint. If the slack is zero,
297
+ the corresponding constraint is active.
298
+ con : 1-D array
299
+ The (nominally zero) residuals of the equality constraints,
300
+ that is, ``b - A_eq @ x``
301
+ phase : int
302
+ The phase of the optimization being executed. In phase 1 a basic
303
+ feasible solution is sought and the T has an additional row
304
+ representing an alternate objective function.
305
+ status : int
306
+ An integer representing the exit status of the optimization::
307
+
308
+ 0 : Optimization terminated successfully
309
+ 1 : Iteration limit reached
310
+ 2 : Problem appears to be infeasible
311
+ 3 : Problem appears to be unbounded
312
+ 4 : Serious numerical difficulties encountered
313
+
314
+ nit : int
315
+ The number of iterations performed.
316
+ message : str
317
+ A string descriptor of the exit status of the optimization.
318
+ postsolve_args : tuple
319
+ Data needed by _postsolve to convert the solution to the standard-form
320
+ problem into the solution to the original problem.
321
+ maxiter : int
322
+ The maximum number of iterations to perform before aborting the
323
+ optimization.
324
+ tol : float
325
+ The tolerance which determines when a solution is "close enough" to
326
+ zero in Phase 1 to be considered a basic feasible solution or close
327
+ enough to positive to serve as an optimal solution.
328
+ phase : int
329
+ The phase of the optimization being executed. In phase 1 a basic
330
+ feasible solution is sought and the T has an additional row
331
+ representing an alternate objective function.
332
+ bland : bool
333
+ If True, choose pivots using Bland's rule [3]_. In problems which
334
+ fail to converge due to cycling, using Bland's rule can provide
335
+ convergence at the expense of a less optimal path about the simplex.
336
+ nit0 : int
337
+ The initial iteration number used to keep an accurate iteration total
338
+ in a two-phase problem.
339
+
340
+ Returns
341
+ -------
342
+ nit : int
343
+ The number of iterations. Used to keep an accurate iteration total
344
+ in the two-phase problem.
345
+ status : int
346
+ An integer representing the exit status of the optimization::
347
+
348
+ 0 : Optimization terminated successfully
349
+ 1 : Iteration limit reached
350
+ 2 : Problem appears to be infeasible
351
+ 3 : Problem appears to be unbounded
352
+ 4 : Serious numerical difficulties encountered
353
+
354
+ """
355
+ nit = nit0
356
+ status = 0
357
+ message = ''
358
+ complete = False
359
+
360
+ if phase == 1:
361
+ m = T.shape[1]-2
362
+ elif phase == 2:
363
+ m = T.shape[1]-1
364
+ else:
365
+ raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
366
+
367
+ if phase == 2:
368
+ # Check if any artificial variables are still in the basis.
369
+ # If yes, check if any coefficients from this row and a column
370
+ # corresponding to one of the non-artificial variable is non-zero.
371
+ # If found, pivot at this term. If not, start phase 2.
372
+ # Do this for all artificial variables in the basis.
373
+ # Ref: "An Introduction to Linear Programming and Game Theory"
374
+ # by Paul R. Thie, Gerard E. Keough, 3rd Ed,
375
+ # Chapter 3.7 Redundant Systems (pag 102)
376
+ for pivrow in [row for row in range(basis.size)
377
+ if basis[row] > T.shape[1] - 2]:
378
+ non_zero_row = [col for col in range(T.shape[1] - 1)
379
+ if abs(T[pivrow, col]) > tol]
380
+ if len(non_zero_row) > 0:
381
+ pivcol = non_zero_row[0]
382
+ _apply_pivot(T, basis, pivrow, pivcol, tol)
383
+ nit += 1
384
+
385
+ if len(basis[:m]) == 0:
386
+ solution = np.empty(T.shape[1] - 1, dtype=np.float64)
387
+ else:
388
+ solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
389
+ dtype=np.float64)
390
+
391
+ while not complete:
392
+ # Find the pivot column
393
+ pivcol_found, pivcol = _pivot_col(T, tol, bland)
394
+ if not pivcol_found:
395
+ pivcol = np.nan
396
+ pivrow = np.nan
397
+ status = 0
398
+ complete = True
399
+ else:
400
+ # Find the pivot row
401
+ pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
402
+ if not pivrow_found:
403
+ status = 3
404
+ complete = True
405
+
406
+ if callback is not None:
407
+ solution[:] = 0
408
+ solution[basis[:n]] = T[:n, -1]
409
+ x = solution[:m]
410
+ x, fun, slack, con = _postsolve(
411
+ x, postsolve_args
412
+ )
413
+ res = OptimizeResult({
414
+ 'x': x,
415
+ 'fun': fun,
416
+ 'slack': slack,
417
+ 'con': con,
418
+ 'status': status,
419
+ 'message': message,
420
+ 'nit': nit,
421
+ 'success': status == 0 and complete,
422
+ 'phase': phase,
423
+ 'complete': complete,
424
+ })
425
+ callback(res)
426
+
427
+ if not complete:
428
+ if nit >= maxiter:
429
+ # Iteration limit exceeded
430
+ status = 1
431
+ complete = True
432
+ else:
433
+ _apply_pivot(T, basis, pivrow, pivcol, tol)
434
+ nit += 1
435
+ return nit, status
436
+
437
+
438
+ def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
439
+ maxiter=1000, tol=1e-9, disp=False, bland=False,
440
+ **unknown_options):
441
+ """
442
+ Minimize a linear objective function subject to linear equality and
443
+ non-negativity constraints using the two phase simplex method.
444
+ Linear programming is intended to solve problems of the following form:
445
+
446
+ Minimize::
447
+
448
+ c @ x
449
+
450
+ Subject to::
451
+
452
+ A @ x == b
453
+ x >= 0
454
+
455
+ User-facing documentation is in _linprog_doc.py.
456
+
457
+ Parameters
458
+ ----------
459
+ c : 1-D array
460
+ Coefficients of the linear objective function to be minimized.
461
+ c0 : float
462
+ Constant term in objective function due to fixed (and eliminated)
463
+ variables. (Purely for display.)
464
+ A : 2-D array
465
+ 2-D array such that ``A @ x``, gives the values of the equality
466
+ constraints at ``x``.
467
+ b : 1-D array
468
+ 1-D array of values representing the right hand side of each equality
469
+ constraint (row) in ``A``.
470
+ callback : callable, optional
471
+ If a callback function is provided, it will be called within each
472
+ iteration of the algorithm. The callback function must accept a single
473
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
474
+
475
+ x : 1-D array
476
+ Current solution vector
477
+ fun : float
478
+ Current value of the objective function
479
+ success : bool
480
+ True when an algorithm has completed successfully.
481
+ slack : 1-D array
482
+ The values of the slack variables. Each slack variable
483
+ corresponds to an inequality constraint. If the slack is zero,
484
+ the corresponding constraint is active.
485
+ con : 1-D array
486
+ The (nominally zero) residuals of the equality constraints,
487
+ that is, ``b - A_eq @ x``
488
+ phase : int
489
+ The phase of the algorithm being executed.
490
+ status : int
491
+ An integer representing the status of the optimization::
492
+
493
+ 0 : Algorithm proceeding nominally
494
+ 1 : Iteration limit reached
495
+ 2 : Problem appears to be infeasible
496
+ 3 : Problem appears to be unbounded
497
+ 4 : Serious numerical difficulties encountered
498
+ nit : int
499
+ The number of iterations performed.
500
+ message : str
501
+ A string descriptor of the exit status of the optimization.
502
+ postsolve_args : tuple
503
+ Data needed by _postsolve to convert the solution to the standard-form
504
+ problem into the solution to the original problem.
505
+
506
+ Options
507
+ -------
508
+ maxiter : int
509
+ The maximum number of iterations to perform.
510
+ disp : bool
511
+ If True, print exit status message to sys.stdout
512
+ tol : float
513
+ The tolerance which determines when a solution is "close enough" to
514
+ zero in Phase 1 to be considered a basic feasible solution or close
515
+ enough to positive to serve as an optimal solution.
516
+ bland : bool
517
+ If True, use Bland's anti-cycling rule [3]_ to choose pivots to
518
+ prevent cycling. If False, choose pivots which should lead to a
519
+ converged solution more quickly. The latter method is subject to
520
+ cycling (non-convergence) in rare instances.
521
+ unknown_options : dict
522
+ Optional arguments not used by this particular solver. If
523
+ `unknown_options` is non-empty a warning is issued listing all
524
+ unused options.
525
+
526
+ Returns
527
+ -------
528
+ x : 1-D array
529
+ Solution vector.
530
+ status : int
531
+ An integer representing the exit status of the optimization::
532
+
533
+ 0 : Optimization terminated successfully
534
+ 1 : Iteration limit reached
535
+ 2 : Problem appears to be infeasible
536
+ 3 : Problem appears to be unbounded
537
+ 4 : Serious numerical difficulties encountered
538
+
539
+ message : str
540
+ A string descriptor of the exit status of the optimization.
541
+ iteration : int
542
+ The number of iterations taken to solve the problem.
543
+
544
+ References
545
+ ----------
546
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
547
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
548
+ 1963
549
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
550
+ Mathematical Programming", McGraw-Hill, Chapter 4.
551
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
552
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
553
+
554
+
555
+ Notes
556
+ -----
557
+ The expected problem formulation differs between the top level ``linprog``
558
+ module and the method specific solvers. The method specific solvers expect a
559
+ problem in standard form:
560
+
561
+ Minimize::
562
+
563
+ c @ x
564
+
565
+ Subject to::
566
+
567
+ A @ x == b
568
+ x >= 0
569
+
570
+ Whereas the top level ``linprog`` module expects a problem of form:
571
+
572
+ Minimize::
573
+
574
+ c @ x
575
+
576
+ Subject to::
577
+
578
+ A_ub @ x <= b_ub
579
+ A_eq @ x == b_eq
580
+ lb <= x <= ub
581
+
582
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
583
+
584
+ The original problem contains equality, upper-bound and variable constraints
585
+ whereas the method specific solver requires equality constraints and
586
+ variable non-negativity.
587
+
588
+ ``linprog`` module converts the original problem to standard form by
589
+ converting the simple bounds to upper bound constraints, introducing
590
+ non-negative slack variables for inequality constraints, and expressing
591
+ unbounded variables as the difference between two non-negative variables.
592
+ """
593
+ _check_unknown_options(unknown_options)
594
+
595
+ status = 0
596
+ messages = {0: "Optimization terminated successfully.",
597
+ 1: "Iteration limit reached.",
598
+ 2: "Optimization failed. Unable to find a feasible"
599
+ " starting point.",
600
+ 3: "Optimization failed. The problem appears to be unbounded.",
601
+ 4: "Optimization failed. Singular matrix encountered."}
602
+
603
+ n, m = A.shape
604
+
605
+ # All constraints must have b >= 0.
606
+ is_negative_constraint = np.less(b, 0)
607
+ A[is_negative_constraint] *= -1
608
+ b[is_negative_constraint] *= -1
609
+
610
+ # As all constraints are equality constraints the artificial variables
611
+ # will also be basic variables.
612
+ av = np.arange(n) + m
613
+ basis = av.copy()
614
+
615
+ # Format the phase one tableau by adding artificial variables and stacking
616
+ # the constraints, the objective row and pseudo-objective row.
617
+ row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
618
+ row_objective = np.hstack((c, np.zeros(n), c0))
619
+ row_pseudo_objective = -row_constraints.sum(axis=0)
620
+ row_pseudo_objective[av] = 0
621
+ T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
622
+
623
+ nit1, status = _solve_simplex(T, n, basis, callback=callback,
624
+ postsolve_args=postsolve_args,
625
+ maxiter=maxiter, tol=tol, phase=1,
626
+ bland=bland
627
+ )
628
+ # if pseudo objective is zero, remove the last row from the tableau and
629
+ # proceed to phase 2
630
+ nit2 = nit1
631
+ if abs(T[-1, -1]) < tol:
632
+ # Remove the pseudo-objective row from the tableau
633
+ T = T[:-1, :]
634
+ # Remove the artificial variable columns from the tableau
635
+ T = np.delete(T, av, 1)
636
+ else:
637
+ # Failure to find a feasible starting point
638
+ status = 2
639
+ messages[status] = (
640
+ "Phase 1 of the simplex method failed to find a feasible "
641
+ "solution. The pseudo-objective function evaluates to {0:.1e} "
642
+ "which exceeds the required tolerance of {1} for a solution to be "
643
+ "considered 'close enough' to zero to be a basic solution. "
644
+ "Consider increasing the tolerance to be greater than {0:.1e}. "
645
+ "If this tolerance is unacceptably large the problem may be "
646
+ "infeasible.".format(abs(T[-1, -1]), tol)
647
+ )
648
+
649
+ if status == 0:
650
+ # Phase 2
651
+ nit2, status = _solve_simplex(T, n, basis, callback=callback,
652
+ postsolve_args=postsolve_args,
653
+ maxiter=maxiter, tol=tol, phase=2,
654
+ bland=bland, nit0=nit1
655
+ )
656
+
657
+ solution = np.zeros(n + m)
658
+ solution[basis[:n]] = T[:n, -1]
659
+ x = solution[:m]
660
+
661
+ return x, status, messages[status], int(nit2)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions used by least-squares algorithms."""
2
+ from math import copysign
3
+
4
+ import numpy as np
5
+ from numpy.linalg import norm
6
+
7
+ from scipy.linalg import cho_factor, cho_solve, LinAlgError
8
+ from scipy.sparse import issparse
9
+ from scipy.sparse.linalg import LinearOperator, aslinearoperator
10
+
11
+
12
+ EPS = np.finfo(float).eps
13
+
14
+
15
+ # Functions related to a trust-region problem.
16
+
17
+
18
+ def intersect_trust_region(x, s, Delta):
19
+ """Find the intersection of a line with the boundary of a trust region.
20
+
21
+ This function solves the quadratic equation with respect to t
22
+ ||(x + s*t)||**2 = Delta**2.
23
+
24
+ Returns
25
+ -------
26
+ t_neg, t_pos : tuple of float
27
+ Negative and positive roots.
28
+
29
+ Raises
30
+ ------
31
+ ValueError
32
+ If `s` is zero or `x` is not within the trust region.
33
+ """
34
+ a = np.dot(s, s)
35
+ if a == 0:
36
+ raise ValueError("`s` is zero.")
37
+
38
+ b = np.dot(x, s)
39
+
40
+ c = np.dot(x, x) - Delta**2
41
+ if c > 0:
42
+ raise ValueError("`x` is not within the trust region.")
43
+
44
+ d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
45
+
46
+ # Computations below avoid loss of significance, see "Numerical Recipes".
47
+ q = -(b + copysign(d, b))
48
+ t1 = q / a
49
+ t2 = c / q
50
+
51
+ if t1 < t2:
52
+ return t1, t2
53
+ else:
54
+ return t2, t1
55
+
56
+
57
+ def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
58
+ rtol=0.01, max_iter=10):
59
+ """Solve a trust-region problem arising in least-squares minimization.
60
+
61
+ This function implements a method described by J. J. More [1]_ and used
62
+ in MINPACK, but it relies on a single SVD of Jacobian instead of series
63
+ of Cholesky decompositions. Before running this function, compute:
64
+ ``U, s, VT = svd(J, full_matrices=False)``.
65
+
66
+ Parameters
67
+ ----------
68
+ n : int
69
+ Number of variables.
70
+ m : int
71
+ Number of residuals.
72
+ uf : ndarray
73
+ Computed as U.T.dot(f).
74
+ s : ndarray
75
+ Singular values of J.
76
+ V : ndarray
77
+ Transpose of VT.
78
+ Delta : float
79
+ Radius of a trust region.
80
+ initial_alpha : float, optional
81
+ Initial guess for alpha, which might be available from a previous
82
+ iteration. If None, determined automatically.
83
+ rtol : float, optional
84
+ Stopping tolerance for the root-finding procedure. Namely, the
85
+ solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
86
+ max_iter : int, optional
87
+ Maximum allowed number of iterations for the root-finding procedure.
88
+
89
+ Returns
90
+ -------
91
+ p : ndarray, shape (n,)
92
+ Found solution of a trust-region problem.
93
+ alpha : float
94
+ Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
95
+ Sometimes called Levenberg-Marquardt parameter.
96
+ n_iter : int
97
+ Number of iterations made by root-finding procedure. Zero means
98
+ that Gauss-Newton step was selected as the solution.
99
+
100
+ References
101
+ ----------
102
+ .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
103
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
104
+ in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
105
+ """
106
+ def phi_and_derivative(alpha, suf, s, Delta):
107
+ """Function of which to find zero.
108
+
109
+ It is defined as "norm of regularized (by alpha) least-squares
110
+ solution minus `Delta`". Refer to [1]_.
111
+ """
112
+ denom = s**2 + alpha
113
+ p_norm = norm(suf / denom)
114
+ phi = p_norm - Delta
115
+ phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
116
+ return phi, phi_prime
117
+
118
+ suf = s * uf
119
+
120
+ # Check if J has full rank and try Gauss-Newton step.
121
+ if m >= n:
122
+ threshold = EPS * m * s[0]
123
+ full_rank = s[-1] > threshold
124
+ else:
125
+ full_rank = False
126
+
127
+ if full_rank:
128
+ p = -V.dot(uf / s)
129
+ if norm(p) <= Delta:
130
+ return p, 0.0, 0
131
+
132
+ alpha_upper = norm(suf) / Delta
133
+
134
+ if full_rank:
135
+ phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
136
+ alpha_lower = -phi / phi_prime
137
+ else:
138
+ alpha_lower = 0.0
139
+
140
+ if initial_alpha is None or not full_rank and initial_alpha == 0:
141
+ alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
142
+ else:
143
+ alpha = initial_alpha
144
+
145
+ for it in range(max_iter):
146
+ if alpha < alpha_lower or alpha > alpha_upper:
147
+ alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
148
+
149
+ phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
150
+
151
+ if phi < 0:
152
+ alpha_upper = alpha
153
+
154
+ ratio = phi / phi_prime
155
+ alpha_lower = max(alpha_lower, alpha - ratio)
156
+ alpha -= (phi + Delta) * ratio / Delta
157
+
158
+ if np.abs(phi) < rtol * Delta:
159
+ break
160
+
161
+ p = -V.dot(suf / (s**2 + alpha))
162
+
163
+ # Make the norm of p equal to Delta, p is changed only slightly during
164
+ # this. It is done to prevent p lie outside the trust region (which can
165
+ # cause problems later).
166
+ p *= Delta / norm(p)
167
+
168
+ return p, alpha, it + 1
169
+
170
+
171
+ def solve_trust_region_2d(B, g, Delta):
172
+ """Solve a general trust-region problem in 2 dimensions.
173
+
174
+ The problem is reformulated as a 4th order algebraic equation,
175
+ the solution of which is found by numpy.roots.
176
+
177
+ Parameters
178
+ ----------
179
+ B : ndarray, shape (2, 2)
180
+ Symmetric matrix, defines a quadratic term of the function.
181
+ g : ndarray, shape (2,)
182
+ Defines a linear term of the function.
183
+ Delta : float
184
+ Radius of a trust region.
185
+
186
+ Returns
187
+ -------
188
+ p : ndarray, shape (2,)
189
+ Found solution.
190
+ newton_step : bool
191
+ Whether the returned solution is the Newton step which lies within
192
+ the trust region.
193
+ """
194
+ try:
195
+ R, lower = cho_factor(B)
196
+ p = -cho_solve((R, lower), g)
197
+ if np.dot(p, p) <= Delta**2:
198
+ return p, True
199
+ except LinAlgError:
200
+ pass
201
+
202
+ a = B[0, 0] * Delta**2
203
+ b = B[0, 1] * Delta**2
204
+ c = B[1, 1] * Delta**2
205
+
206
+ d = g[0] * Delta
207
+ f = g[1] * Delta
208
+
209
+ coeffs = np.array(
210
+ [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
211
+ t = np.roots(coeffs) # Can handle leading zeros.
212
+ t = np.real(t[np.isreal(t)])
213
+
214
+ p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
215
+ value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
216
+ i = np.argmin(value)
217
+ p = p[:, i]
218
+
219
+ return p, False
220
+
221
+
222
+ def update_tr_radius(Delta, actual_reduction, predicted_reduction,
223
+ step_norm, bound_hit):
224
+ """Update the radius of a trust region based on the cost reduction.
225
+
226
+ Returns
227
+ -------
228
+ Delta : float
229
+ New radius.
230
+ ratio : float
231
+ Ratio between actual and predicted reductions.
232
+ """
233
+ if predicted_reduction > 0:
234
+ ratio = actual_reduction / predicted_reduction
235
+ elif predicted_reduction == actual_reduction == 0:
236
+ ratio = 1
237
+ else:
238
+ ratio = 0
239
+
240
+ if ratio < 0.25:
241
+ Delta = 0.25 * step_norm
242
+ elif ratio > 0.75 and bound_hit:
243
+ Delta *= 2.0
244
+
245
+ return Delta, ratio
246
+
247
+
248
+ # Construction and minimization of quadratic functions.
249
+
250
+
251
+ def build_quadratic_1d(J, g, s, diag=None, s0=None):
252
+ """Parameterize a multivariate quadratic function along a line.
253
+
254
+ The resulting univariate quadratic function is given as follows::
255
+
256
+ f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
257
+ g.T * (s0 + s*t)
258
+
259
+ Parameters
260
+ ----------
261
+ J : ndarray, sparse matrix or LinearOperator shape (m, n)
262
+ Jacobian matrix, affects the quadratic term.
263
+ g : ndarray, shape (n,)
264
+ Gradient, defines the linear term.
265
+ s : ndarray, shape (n,)
266
+ Direction vector of a line.
267
+ diag : None or ndarray with shape (n,), optional
268
+ Addition diagonal part, affects the quadratic term.
269
+ If None, assumed to be 0.
270
+ s0 : None or ndarray with shape (n,), optional
271
+ Initial point. If None, assumed to be 0.
272
+
273
+ Returns
274
+ -------
275
+ a : float
276
+ Coefficient for t**2.
277
+ b : float
278
+ Coefficient for t.
279
+ c : float
280
+ Free term. Returned only if `s0` is provided.
281
+ """
282
+ v = J.dot(s)
283
+ a = np.dot(v, v)
284
+ if diag is not None:
285
+ a += np.dot(s * diag, s)
286
+ a *= 0.5
287
+
288
+ b = np.dot(g, s)
289
+
290
+ if s0 is not None:
291
+ u = J.dot(s0)
292
+ b += np.dot(u, v)
293
+ c = 0.5 * np.dot(u, u) + np.dot(g, s0)
294
+ if diag is not None:
295
+ b += np.dot(s0 * diag, s)
296
+ c += 0.5 * np.dot(s0 * diag, s0)
297
+ return a, b, c
298
+ else:
299
+ return a, b
300
+
301
+
302
+ def minimize_quadratic_1d(a, b, lb, ub, c=0):
303
+ """Minimize a 1-D quadratic function subject to bounds.
304
+
305
+ The free term `c` is 0 by default. Bounds must be finite.
306
+
307
+ Returns
308
+ -------
309
+ t : float
310
+ Minimum point.
311
+ y : float
312
+ Minimum value.
313
+ """
314
+ t = [lb, ub]
315
+ if a != 0:
316
+ extremum = -0.5 * b / a
317
+ if lb < extremum < ub:
318
+ t.append(extremum)
319
+ t = np.asarray(t)
320
+ y = t * (a * t + b) + c
321
+ min_index = np.argmin(y)
322
+ return t[min_index], y[min_index]
323
+
324
+
325
+ def evaluate_quadratic(J, g, s, diag=None):
326
+ """Compute values of a quadratic function arising in least squares.
327
+
328
+ The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
329
+
330
+ Parameters
331
+ ----------
332
+ J : ndarray, sparse matrix or LinearOperator, shape (m, n)
333
+ Jacobian matrix, affects the quadratic term.
334
+ g : ndarray, shape (n,)
335
+ Gradient, defines the linear term.
336
+ s : ndarray, shape (k, n) or (n,)
337
+ Array containing steps as rows.
338
+ diag : ndarray, shape (n,), optional
339
+ Addition diagonal part, affects the quadratic term.
340
+ If None, assumed to be 0.
341
+
342
+ Returns
343
+ -------
344
+ values : ndarray with shape (k,) or float
345
+ Values of the function. If `s` was 2-D, then ndarray is
346
+ returned, otherwise, float is returned.
347
+ """
348
+ if s.ndim == 1:
349
+ Js = J.dot(s)
350
+ q = np.dot(Js, Js)
351
+ if diag is not None:
352
+ q += np.dot(s * diag, s)
353
+ else:
354
+ Js = J.dot(s.T)
355
+ q = np.sum(Js**2, axis=0)
356
+ if diag is not None:
357
+ q += np.sum(diag * s**2, axis=1)
358
+
359
+ l = np.dot(s, g)
360
+
361
+ return 0.5 * q + l
362
+
363
+
364
+ # Utility functions to work with bound constraints.
365
+
366
+
367
+ def in_bounds(x, lb, ub):
368
+ """Check if a point lies within bounds."""
369
+ return np.all((x >= lb) & (x <= ub))
370
+
371
+
372
+ def step_size_to_bound(x, s, lb, ub):
373
+ """Compute a min_step size required to reach a bound.
374
+
375
+ The function computes a positive scalar t, such that x + s * t is on
376
+ the bound.
377
+
378
+ Returns
379
+ -------
380
+ step : float
381
+ Computed step. Non-negative value.
382
+ hits : ndarray of int with shape of x
383
+ Each element indicates whether a corresponding variable reaches the
384
+ bound:
385
+
386
+ * 0 - the bound was not hit.
387
+ * -1 - the lower bound was hit.
388
+ * 1 - the upper bound was hit.
389
+ """
390
+ non_zero = np.nonzero(s)
391
+ s_non_zero = s[non_zero]
392
+ steps = np.empty_like(x)
393
+ steps.fill(np.inf)
394
+ with np.errstate(over='ignore'):
395
+ steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
396
+ (ub - x)[non_zero] / s_non_zero)
397
+ min_step = np.min(steps)
398
+ return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
399
+
400
+
401
+ def find_active_constraints(x, lb, ub, rtol=1e-10):
402
+ """Determine which constraints are active in a given point.
403
+
404
+ The threshold is computed using `rtol` and the absolute value of the
405
+ closest bound.
406
+
407
+ Returns
408
+ -------
409
+ active : ndarray of int with shape of x
410
+ Each component shows whether the corresponding constraint is active:
411
+
412
+ * 0 - a constraint is not active.
413
+ * -1 - a lower bound is active.
414
+ * 1 - a upper bound is active.
415
+ """
416
+ active = np.zeros_like(x, dtype=int)
417
+
418
+ if rtol == 0:
419
+ active[x <= lb] = -1
420
+ active[x >= ub] = 1
421
+ return active
422
+
423
+ lower_dist = x - lb
424
+ upper_dist = ub - x
425
+
426
+ lower_threshold = rtol * np.maximum(1, np.abs(lb))
427
+ upper_threshold = rtol * np.maximum(1, np.abs(ub))
428
+
429
+ lower_active = (np.isfinite(lb) &
430
+ (lower_dist <= np.minimum(upper_dist, lower_threshold)))
431
+ active[lower_active] = -1
432
+
433
+ upper_active = (np.isfinite(ub) &
434
+ (upper_dist <= np.minimum(lower_dist, upper_threshold)))
435
+ active[upper_active] = 1
436
+
437
+ return active
438
+
439
+
440
+ def make_strictly_feasible(x, lb, ub, rstep=1e-10):
441
+ """Shift a point to the interior of a feasible region.
442
+
443
+ Each element of the returned vector is at least at a relative distance
444
+ `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
445
+ """
446
+ x_new = x.copy()
447
+
448
+ active = find_active_constraints(x, lb, ub, rstep)
449
+ lower_mask = np.equal(active, -1)
450
+ upper_mask = np.equal(active, 1)
451
+
452
+ if rstep == 0:
453
+ x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
454
+ x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
455
+ else:
456
+ x_new[lower_mask] = (lb[lower_mask] +
457
+ rstep * np.maximum(1, np.abs(lb[lower_mask])))
458
+ x_new[upper_mask] = (ub[upper_mask] -
459
+ rstep * np.maximum(1, np.abs(ub[upper_mask])))
460
+
461
+ tight_bounds = (x_new < lb) | (x_new > ub)
462
+ x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
463
+
464
+ return x_new
465
+
466
+
467
+ def CL_scaling_vector(x, g, lb, ub):
468
+ """Compute Coleman-Li scaling vector and its derivatives.
469
+
470
+ Components of a vector v are defined as follows::
471
+
472
+ | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
473
+ v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
474
+ | 1, otherwise
475
+
476
+ According to this definition v[i] >= 0 for all i. It differs from the
477
+ definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
478
+ used. Both definitions are equivalent down the line.
479
+ Derivatives of v with respect to x take value 1, -1 or 0 depending on a
480
+ case.
481
+
482
+ Returns
483
+ -------
484
+ v : ndarray with shape of x
485
+ Scaling vector.
486
+ dv : ndarray with shape of x
487
+ Derivatives of v[i] with respect to x[i], diagonal elements of v's
488
+ Jacobian.
489
+
490
+ References
491
+ ----------
492
+ .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
493
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
494
+ Minimization Problems," SIAM Journal on Scientific Computing,
495
+ Vol. 21, Number 1, pp 1-23, 1999.
496
+ """
497
+ v = np.ones_like(x)
498
+ dv = np.zeros_like(x)
499
+
500
+ mask = (g < 0) & np.isfinite(ub)
501
+ v[mask] = ub[mask] - x[mask]
502
+ dv[mask] = -1
503
+
504
+ mask = (g > 0) & np.isfinite(lb)
505
+ v[mask] = x[mask] - lb[mask]
506
+ dv[mask] = 1
507
+
508
+ return v, dv
509
+
510
+
511
+ def reflective_transformation(y, lb, ub):
512
+ """Compute reflective transformation and its gradient."""
513
+ if in_bounds(y, lb, ub):
514
+ return y, np.ones_like(y)
515
+
516
+ lb_finite = np.isfinite(lb)
517
+ ub_finite = np.isfinite(ub)
518
+
519
+ x = y.copy()
520
+ g_negative = np.zeros_like(y, dtype=bool)
521
+
522
+ mask = lb_finite & ~ub_finite
523
+ x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
524
+ g_negative[mask] = y[mask] < lb[mask]
525
+
526
+ mask = ~lb_finite & ub_finite
527
+ x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
528
+ g_negative[mask] = y[mask] > ub[mask]
529
+
530
+ mask = lb_finite & ub_finite
531
+ d = ub - lb
532
+ t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
533
+ x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
534
+ g_negative[mask] = t > d[mask]
535
+
536
+ g = np.ones_like(y)
537
+ g[g_negative] = -1
538
+
539
+ return x, g
540
+
541
+
542
+ # Functions to display algorithm's progress.
543
+
544
+
545
+ def print_header_nonlinear():
546
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}"
547
+ .format("Iteration", "Total nfev", "Cost", "Cost reduction",
548
+ "Step norm", "Optimality"))
549
+
550
+
551
+ def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
552
+ step_norm, optimality):
553
+ if cost_reduction is None:
554
+ cost_reduction = " " * 15
555
+ else:
556
+ cost_reduction = f"{cost_reduction:^15.2e}"
557
+
558
+ if step_norm is None:
559
+ step_norm = " " * 15
560
+ else:
561
+ step_norm = f"{step_norm:^15.2e}"
562
+
563
+ print("{:^15}{:^15}{:^15.4e}{}{}{:^15.2e}"
564
+ .format(iteration, nfev, cost, cost_reduction,
565
+ step_norm, optimality))
566
+
567
+
568
+ def print_header_linear():
569
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}"
570
+ .format("Iteration", "Cost", "Cost reduction", "Step norm",
571
+ "Optimality"))
572
+
573
+
574
+ def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
575
+ optimality):
576
+ if cost_reduction is None:
577
+ cost_reduction = " " * 15
578
+ else:
579
+ cost_reduction = f"{cost_reduction:^15.2e}"
580
+
581
+ if step_norm is None:
582
+ step_norm = " " * 15
583
+ else:
584
+ step_norm = f"{step_norm:^15.2e}"
585
+
586
+ print(f"{iteration:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}")
587
+
588
+
589
+ # Simple helper functions.
590
+
591
+
592
+ def compute_grad(J, f):
593
+ """Compute gradient of the least-squares cost function."""
594
+ if isinstance(J, LinearOperator):
595
+ return J.rmatvec(f)
596
+ else:
597
+ return J.T.dot(f)
598
+
599
+
600
+ def compute_jac_scale(J, scale_inv_old=None):
601
+ """Compute variables scale based on the Jacobian matrix."""
602
+ if issparse(J):
603
+ scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
604
+ else:
605
+ scale_inv = np.sum(J**2, axis=0)**0.5
606
+
607
+ if scale_inv_old is None:
608
+ scale_inv[scale_inv == 0] = 1
609
+ else:
610
+ scale_inv = np.maximum(scale_inv, scale_inv_old)
611
+
612
+ return 1 / scale_inv, scale_inv
613
+
614
+
615
+ def left_multiplied_operator(J, d):
616
+ """Return diag(d) J as LinearOperator."""
617
+ J = aslinearoperator(J)
618
+
619
+ def matvec(x):
620
+ return d * J.matvec(x)
621
+
622
+ def matmat(X):
623
+ return d[:, np.newaxis] * J.matmat(X)
624
+
625
+ def rmatvec(x):
626
+ return J.rmatvec(x.ravel() * d)
627
+
628
+ return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
629
+ rmatvec=rmatvec)
630
+
631
+
632
+ def right_multiplied_operator(J, d):
633
+ """Return J diag(d) as LinearOperator."""
634
+ J = aslinearoperator(J)
635
+
636
+ def matvec(x):
637
+ return J.matvec(np.ravel(x) * d)
638
+
639
+ def matmat(X):
640
+ return J.matmat(X * d[:, np.newaxis])
641
+
642
+ def rmatvec(x):
643
+ return d * J.rmatvec(x)
644
+
645
+ return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
646
+ rmatvec=rmatvec)
647
+
648
+
649
+ def regularized_lsq_operator(J, diag):
650
+ """Return a matrix arising in regularized least squares as LinearOperator.
651
+
652
+ The matrix is
653
+ [ J ]
654
+ [ D ]
655
+ where D is diagonal matrix with elements from `diag`.
656
+ """
657
+ J = aslinearoperator(J)
658
+ m, n = J.shape
659
+
660
+ def matvec(x):
661
+ return np.hstack((J.matvec(x), diag * x))
662
+
663
+ def rmatvec(x):
664
+ x1 = x[:m]
665
+ x2 = x[m:]
666
+ return J.rmatvec(x1) + diag * x2
667
+
668
+ return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
669
+
670
+
671
+ def right_multiply(J, d, copy=True):
672
+ """Compute J diag(d).
673
+
674
+ If `copy` is False, `J` is modified in place (unless being LinearOperator).
675
+ """
676
+ if copy and not isinstance(J, LinearOperator):
677
+ J = J.copy()
678
+
679
+ if issparse(J):
680
+ J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
681
+ elif isinstance(J, LinearOperator):
682
+ J = right_multiplied_operator(J, d)
683
+ else:
684
+ J *= d
685
+
686
+ return J
687
+
688
+
689
+ def left_multiply(J, d, copy=True):
690
+ """Compute diag(d) J.
691
+
692
+ If `copy` is False, `J` is modified in place (unless being LinearOperator).
693
+ """
694
+ if copy and not isinstance(J, LinearOperator):
695
+ J = J.copy()
696
+
697
+ if issparse(J):
698
+ J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
699
+ elif isinstance(J, LinearOperator):
700
+ J = left_multiplied_operator(J, d)
701
+ else:
702
+ J *= d[:, np.newaxis]
703
+
704
+ return J
705
+
706
+
707
+ def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
708
+ """Check termination condition for nonlinear least squares."""
709
+ ftol_satisfied = dF < ftol * F and ratio > 0.25
710
+ xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
711
+
712
+ if ftol_satisfied and xtol_satisfied:
713
+ return 4
714
+ elif ftol_satisfied:
715
+ return 2
716
+ elif xtol_satisfied:
717
+ return 3
718
+ else:
719
+ return None
720
+
721
+
722
+ def scale_for_robust_loss_function(J, f, rho):
723
+ """Scale Jacobian and residuals for a robust loss function.
724
+
725
+ Arrays are modified in place.
726
+ """
727
+ J_scale = rho[1] + 2 * rho[2] * f**2
728
+ J_scale[J_scale < EPS] = EPS
729
+ J_scale **= 0.5
730
+
731
+ f *= rho[1] / J_scale
732
+
733
+ return left_multiply(J, J_scale, copy=False), f
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Dogleg algorithm with rectangular trust regions for least-squares minimization.
3
+
4
+ The description of the algorithm can be found in [Voglis]_. The algorithm does
5
+ trust-region iterations, but the shape of trust regions is rectangular as
6
+ opposed to conventional elliptical. The intersection of a trust region and
7
+ an initial feasible region is again some rectangle. Thus, on each iteration a
8
+ bound-constrained quadratic optimization problem is solved.
9
+
10
+ A quadratic problem is solved by well-known dogleg approach, where the
11
+ function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
12
+ Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
13
+ along this path, and optimization amounts to simply following along this
14
+ path as long as a point stays within the bounds. A constrained Cauchy step
15
+ (along the anti-gradient) is considered for safety in rank deficient cases,
16
+ in this situations the convergence might be slow.
17
+
18
+ If during iterations some variable hit the initial bound and the component
19
+ of anti-gradient points outside the feasible region, then a next dogleg step
20
+ won't make any progress. At this state such variables satisfy first-order
21
+ optimality conditions and they are excluded before computing a next dogleg
22
+ step.
23
+
24
+ Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
25
+ Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
26
+ dense and sparse matrices, or Jacobian being LinearOperator). The second
27
+ option allows to solve very large problems (up to couple of millions of
28
+ residuals on a regular PC), provided the Jacobian matrix is sufficiently
29
+ sparse. But note that dogbox is not very good for solving problems with
30
+ large number of constraints, because of variables exclusion-inclusion on each
31
+ iteration (a required number of function evaluations might be high or accuracy
32
+ of a solution will be poor), thus its large-scale usage is probably limited
33
+ to unconstrained problems.
34
+
35
+ References
36
+ ----------
37
+ .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
38
+ Approach for Unconstrained and Bound Constrained Nonlinear
39
+ Optimization", WSEAS International Conference on Applied
40
+ Mathematics, Corfu, Greece, 2004.
41
+ .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
42
+ """
43
+ import numpy as np
44
+ from numpy.linalg import lstsq, norm
45
+
46
+ from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
47
+ from scipy.optimize import OptimizeResult
48
+
49
+ from .common import (
50
+ step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
51
+ build_quadratic_1d, minimize_quadratic_1d, compute_grad,
52
+ compute_jac_scale, check_termination, scale_for_robust_loss_function,
53
+ print_header_nonlinear, print_iteration_nonlinear)
54
+
55
+
56
+ def lsmr_operator(Jop, d, active_set):
57
+ """Compute LinearOperator to use in LSMR by dogbox algorithm.
58
+
59
+ `active_set` mask is used to excluded active variables from computations
60
+ of matrix-vector products.
61
+ """
62
+ m, n = Jop.shape
63
+
64
+ def matvec(x):
65
+ x_free = x.ravel().copy()
66
+ x_free[active_set] = 0
67
+ return Jop.matvec(x * d)
68
+
69
+ def rmatvec(x):
70
+ r = d * Jop.rmatvec(x)
71
+ r[active_set] = 0
72
+ return r
73
+
74
+ return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
75
+
76
+
77
+ def find_intersection(x, tr_bounds, lb, ub):
78
+ """Find intersection of trust-region bounds and initial bounds.
79
+
80
+ Returns
81
+ -------
82
+ lb_total, ub_total : ndarray with shape of x
83
+ Lower and upper bounds of the intersection region.
84
+ orig_l, orig_u : ndarray of bool with shape of x
85
+ True means that an original bound is taken as a corresponding bound
86
+ in the intersection region.
87
+ tr_l, tr_u : ndarray of bool with shape of x
88
+ True means that a trust-region bound is taken as a corresponding bound
89
+ in the intersection region.
90
+ """
91
+ lb_centered = lb - x
92
+ ub_centered = ub - x
93
+
94
+ lb_total = np.maximum(lb_centered, -tr_bounds)
95
+ ub_total = np.minimum(ub_centered, tr_bounds)
96
+
97
+ orig_l = np.equal(lb_total, lb_centered)
98
+ orig_u = np.equal(ub_total, ub_centered)
99
+
100
+ tr_l = np.equal(lb_total, -tr_bounds)
101
+ tr_u = np.equal(ub_total, tr_bounds)
102
+
103
+ return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
104
+
105
+
106
+ def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
107
+ """Find dogleg step in a rectangular region.
108
+
109
+ Returns
110
+ -------
111
+ step : ndarray, shape (n,)
112
+ Computed dogleg step.
113
+ bound_hits : ndarray of int, shape (n,)
114
+ Each component shows whether a corresponding variable hits the
115
+ initial bound after the step is taken:
116
+ * 0 - a variable doesn't hit the bound.
117
+ * -1 - lower bound is hit.
118
+ * 1 - upper bound is hit.
119
+ tr_hit : bool
120
+ Whether the step hit the boundary of the trust-region.
121
+ """
122
+ lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
123
+ x, tr_bounds, lb, ub
124
+ )
125
+ bound_hits = np.zeros_like(x, dtype=int)
126
+
127
+ if in_bounds(newton_step, lb_total, ub_total):
128
+ return newton_step, bound_hits, False
129
+
130
+ to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
131
+
132
+ # The classical dogleg algorithm would check if Cauchy step fits into
133
+ # the bounds, and just return it constrained version if not. But in a
134
+ # rectangular trust region it makes sense to try to improve constrained
135
+ # Cauchy step too. Thus, we don't distinguish these two cases.
136
+
137
+ cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
138
+
139
+ step_diff = newton_step - cauchy_step
140
+ step_size, hits = step_size_to_bound(cauchy_step, step_diff,
141
+ lb_total, ub_total)
142
+ bound_hits[(hits < 0) & orig_l] = -1
143
+ bound_hits[(hits > 0) & orig_u] = 1
144
+ tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
145
+
146
+ return cauchy_step + step_size * step_diff, bound_hits, tr_hit
147
+
148
+
149
+ def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
150
+ loss_function, tr_solver, tr_options, verbose):
151
+ f = f0
152
+ f_true = f.copy()
153
+ nfev = 1
154
+
155
+ J = J0
156
+ njev = 1
157
+
158
+ if loss_function is not None:
159
+ rho = loss_function(f)
160
+ cost = 0.5 * np.sum(rho[0])
161
+ J, f = scale_for_robust_loss_function(J, f, rho)
162
+ else:
163
+ cost = 0.5 * np.dot(f, f)
164
+
165
+ g = compute_grad(J, f)
166
+
167
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
168
+ if jac_scale:
169
+ scale, scale_inv = compute_jac_scale(J)
170
+ else:
171
+ scale, scale_inv = x_scale, 1 / x_scale
172
+
173
+ Delta = norm(x0 * scale_inv, ord=np.inf)
174
+ if Delta == 0:
175
+ Delta = 1.0
176
+
177
+ on_bound = np.zeros_like(x0, dtype=int)
178
+ on_bound[np.equal(x0, lb)] = -1
179
+ on_bound[np.equal(x0, ub)] = 1
180
+
181
+ x = x0
182
+ step = np.empty_like(x0)
183
+
184
+ if max_nfev is None:
185
+ max_nfev = x0.size * 100
186
+
187
+ termination_status = None
188
+ iteration = 0
189
+ step_norm = None
190
+ actual_reduction = None
191
+
192
+ if verbose == 2:
193
+ print_header_nonlinear()
194
+
195
+ while True:
196
+ active_set = on_bound * g < 0
197
+ free_set = ~active_set
198
+
199
+ g_free = g[free_set]
200
+ g_full = g.copy()
201
+ g[active_set] = 0
202
+
203
+ g_norm = norm(g, ord=np.inf)
204
+ if g_norm < gtol:
205
+ termination_status = 1
206
+
207
+ if verbose == 2:
208
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
209
+ step_norm, g_norm)
210
+
211
+ if termination_status is not None or nfev == max_nfev:
212
+ break
213
+
214
+ x_free = x[free_set]
215
+ lb_free = lb[free_set]
216
+ ub_free = ub[free_set]
217
+ scale_free = scale[free_set]
218
+
219
+ # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
220
+ if tr_solver == 'exact':
221
+ J_free = J[:, free_set]
222
+ newton_step = lstsq(J_free, -f, rcond=-1)[0]
223
+
224
+ # Coefficients for the quadratic model along the anti-gradient.
225
+ a, b = build_quadratic_1d(J_free, g_free, -g_free)
226
+ elif tr_solver == 'lsmr':
227
+ Jop = aslinearoperator(J)
228
+
229
+ # We compute lsmr step in scaled variables and then
230
+ # transform back to normal variables, if lsmr would give exact lsq
231
+ # solution, this would be equivalent to not doing any
232
+ # transformations, but from experience it's better this way.
233
+
234
+ # We pass active_set to make computations as if we selected
235
+ # the free subset of J columns, but without actually doing any
236
+ # slicing, which is expensive for sparse matrices and impossible
237
+ # for LinearOperator.
238
+
239
+ lsmr_op = lsmr_operator(Jop, scale, active_set)
240
+ newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
241
+ newton_step *= scale_free
242
+
243
+ # Components of g for active variables were zeroed, so this call
244
+ # is correct and equivalent to using J_free and g_free.
245
+ a, b = build_quadratic_1d(Jop, g, -g)
246
+
247
+ actual_reduction = -1.0
248
+ while actual_reduction <= 0 and nfev < max_nfev:
249
+ tr_bounds = Delta * scale_free
250
+
251
+ step_free, on_bound_free, tr_hit = dogleg_step(
252
+ x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
253
+
254
+ step.fill(0.0)
255
+ step[free_set] = step_free
256
+
257
+ if tr_solver == 'exact':
258
+ predicted_reduction = -evaluate_quadratic(J_free, g_free,
259
+ step_free)
260
+ elif tr_solver == 'lsmr':
261
+ predicted_reduction = -evaluate_quadratic(Jop, g, step)
262
+
263
+ # gh11403 ensure that solution is fully within bounds.
264
+ x_new = np.clip(x + step, lb, ub)
265
+
266
+ f_new = fun(x_new)
267
+ nfev += 1
268
+
269
+ step_h_norm = norm(step * scale_inv, ord=np.inf)
270
+
271
+ if not np.all(np.isfinite(f_new)):
272
+ Delta = 0.25 * step_h_norm
273
+ continue
274
+
275
+ # Usual trust-region step quality estimation.
276
+ if loss_function is not None:
277
+ cost_new = loss_function(f_new, cost_only=True)
278
+ else:
279
+ cost_new = 0.5 * np.dot(f_new, f_new)
280
+ actual_reduction = cost - cost_new
281
+
282
+ Delta, ratio = update_tr_radius(
283
+ Delta, actual_reduction, predicted_reduction,
284
+ step_h_norm, tr_hit
285
+ )
286
+
287
+ step_norm = norm(step)
288
+ termination_status = check_termination(
289
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
290
+
291
+ if termination_status is not None:
292
+ break
293
+
294
+ if actual_reduction > 0:
295
+ on_bound[free_set] = on_bound_free
296
+
297
+ x = x_new
298
+ # Set variables exactly at the boundary.
299
+ mask = on_bound == -1
300
+ x[mask] = lb[mask]
301
+ mask = on_bound == 1
302
+ x[mask] = ub[mask]
303
+
304
+ f = f_new
305
+ f_true = f.copy()
306
+
307
+ cost = cost_new
308
+
309
+ J = jac(x, f)
310
+ njev += 1
311
+
312
+ if loss_function is not None:
313
+ rho = loss_function(f)
314
+ J, f = scale_for_robust_loss_function(J, f, rho)
315
+
316
+ g = compute_grad(J, f)
317
+
318
+ if jac_scale:
319
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
320
+ else:
321
+ step_norm = 0
322
+ actual_reduction = 0
323
+
324
+ iteration += 1
325
+
326
+ if termination_status is None:
327
+ termination_status = 0
328
+
329
+ return OptimizeResult(
330
+ x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
331
+ active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py ADDED
@@ -0,0 +1,967 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generic interface for least-squares minimization."""
2
+ from warnings import warn
3
+
4
+ import numpy as np
5
+ from numpy.linalg import norm
6
+
7
+ from scipy.sparse import issparse
8
+ from scipy.sparse.linalg import LinearOperator
9
+ from scipy.optimize import _minpack, OptimizeResult
10
+ from scipy.optimize._numdiff import approx_derivative, group_columns
11
+ from scipy.optimize._minimize import Bounds
12
+
13
+ from .trf import trf
14
+ from .dogbox import dogbox
15
+ from .common import EPS, in_bounds, make_strictly_feasible
16
+
17
+
18
+ TERMINATION_MESSAGES = {
19
+ -1: "Improper input parameters status returned from `leastsq`",
20
+ 0: "The maximum number of function evaluations is exceeded.",
21
+ 1: "`gtol` termination condition is satisfied.",
22
+ 2: "`ftol` termination condition is satisfied.",
23
+ 3: "`xtol` termination condition is satisfied.",
24
+ 4: "Both `ftol` and `xtol` termination conditions are satisfied."
25
+ }
26
+
27
+
28
+ FROM_MINPACK_TO_COMMON = {
29
+ 0: -1, # Improper input parameters from MINPACK.
30
+ 1: 2,
31
+ 2: 3,
32
+ 3: 4,
33
+ 4: 1,
34
+ 5: 0
35
+ # There are 6, 7, 8 for too small tolerance parameters,
36
+ # but we guard against it by checking ftol, xtol, gtol beforehand.
37
+ }
38
+
39
+
40
+ def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
41
+ n = x0.size
42
+
43
+ if diff_step is None:
44
+ epsfcn = EPS
45
+ else:
46
+ epsfcn = diff_step**2
47
+
48
+ # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
49
+ # ``x_scale='jac'`` corresponds to ``diag=None``.
50
+ if isinstance(x_scale, str) and x_scale == 'jac':
51
+ diag = None
52
+ else:
53
+ diag = 1 / x_scale
54
+
55
+ full_output = True
56
+ col_deriv = False
57
+ factor = 100.0
58
+
59
+ if jac is None:
60
+ if max_nfev is None:
61
+ # n squared to account for Jacobian evaluations.
62
+ max_nfev = 100 * n * (n + 1)
63
+ x, info, status = _minpack._lmdif(
64
+ fun, x0, (), full_output, ftol, xtol, gtol,
65
+ max_nfev, epsfcn, factor, diag)
66
+ else:
67
+ if max_nfev is None:
68
+ max_nfev = 100 * n
69
+ x, info, status = _minpack._lmder(
70
+ fun, jac, x0, (), full_output, col_deriv,
71
+ ftol, xtol, gtol, max_nfev, factor, diag)
72
+
73
+ f = info['fvec']
74
+
75
+ if callable(jac):
76
+ J = jac(x)
77
+ else:
78
+ J = np.atleast_2d(approx_derivative(fun, x))
79
+
80
+ cost = 0.5 * np.dot(f, f)
81
+ g = J.T.dot(f)
82
+ g_norm = norm(g, ord=np.inf)
83
+
84
+ nfev = info['nfev']
85
+ njev = info.get('njev', None)
86
+
87
+ status = FROM_MINPACK_TO_COMMON[status]
88
+ active_mask = np.zeros_like(x0, dtype=int)
89
+
90
+ return OptimizeResult(
91
+ x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
92
+ active_mask=active_mask, nfev=nfev, njev=njev, status=status)
93
+
94
+
95
+ def prepare_bounds(bounds, n):
96
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
97
+ if lb.ndim == 0:
98
+ lb = np.resize(lb, n)
99
+
100
+ if ub.ndim == 0:
101
+ ub = np.resize(ub, n)
102
+
103
+ return lb, ub
104
+
105
+
106
+ def check_tolerance(ftol, xtol, gtol, method):
107
+ def check(tol, name):
108
+ if tol is None:
109
+ tol = 0
110
+ elif tol < EPS:
111
+ warn(f"Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively "
112
+ f"disables the corresponding termination condition.",
113
+ stacklevel=3)
114
+ return tol
115
+
116
+ ftol = check(ftol, "ftol")
117
+ xtol = check(xtol, "xtol")
118
+ gtol = check(gtol, "gtol")
119
+
120
+ if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
121
+ raise ValueError("All tolerances must be higher than machine epsilon "
122
+ f"({EPS:.2e}) for method 'lm'.")
123
+ elif ftol < EPS and xtol < EPS and gtol < EPS:
124
+ raise ValueError("At least one of the tolerances must be higher than "
125
+ f"machine epsilon ({EPS:.2e}).")
126
+
127
+ return ftol, xtol, gtol
128
+
129
+
130
+ def check_x_scale(x_scale, x0):
131
+ if isinstance(x_scale, str) and x_scale == 'jac':
132
+ return x_scale
133
+
134
+ try:
135
+ x_scale = np.asarray(x_scale, dtype=float)
136
+ valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
137
+ except (ValueError, TypeError):
138
+ valid = False
139
+
140
+ if not valid:
141
+ raise ValueError("`x_scale` must be 'jac' or array_like with "
142
+ "positive numbers.")
143
+
144
+ if x_scale.ndim == 0:
145
+ x_scale = np.resize(x_scale, x0.shape)
146
+
147
+ if x_scale.shape != x0.shape:
148
+ raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
149
+
150
+ return x_scale
151
+
152
+
153
+ def check_jac_sparsity(jac_sparsity, m, n):
154
+ if jac_sparsity is None:
155
+ return None
156
+
157
+ if not issparse(jac_sparsity):
158
+ jac_sparsity = np.atleast_2d(jac_sparsity)
159
+
160
+ if jac_sparsity.shape != (m, n):
161
+ raise ValueError("`jac_sparsity` has wrong shape.")
162
+
163
+ return jac_sparsity, group_columns(jac_sparsity)
164
+
165
+
166
+ # Loss functions.
167
+
168
+
169
+ def huber(z, rho, cost_only):
170
+ mask = z <= 1
171
+ rho[0, mask] = z[mask]
172
+ rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
173
+ if cost_only:
174
+ return
175
+ rho[1, mask] = 1
176
+ rho[1, ~mask] = z[~mask]**-0.5
177
+ rho[2, mask] = 0
178
+ rho[2, ~mask] = -0.5 * z[~mask]**-1.5
179
+
180
+
181
+ def soft_l1(z, rho, cost_only):
182
+ t = 1 + z
183
+ rho[0] = 2 * (t**0.5 - 1)
184
+ if cost_only:
185
+ return
186
+ rho[1] = t**-0.5
187
+ rho[2] = -0.5 * t**-1.5
188
+
189
+
190
+ def cauchy(z, rho, cost_only):
191
+ rho[0] = np.log1p(z)
192
+ if cost_only:
193
+ return
194
+ t = 1 + z
195
+ rho[1] = 1 / t
196
+ rho[2] = -1 / t**2
197
+
198
+
199
+ def arctan(z, rho, cost_only):
200
+ rho[0] = np.arctan(z)
201
+ if cost_only:
202
+ return
203
+ t = 1 + z**2
204
+ rho[1] = 1 / t
205
+ rho[2] = -2 * z / t**2
206
+
207
+
208
+ IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
209
+ cauchy=cauchy, arctan=arctan)
210
+
211
+
212
+ def construct_loss_function(m, loss, f_scale):
213
+ if loss == 'linear':
214
+ return None
215
+
216
+ if not callable(loss):
217
+ loss = IMPLEMENTED_LOSSES[loss]
218
+ rho = np.empty((3, m))
219
+
220
+ def loss_function(f, cost_only=False):
221
+ z = (f / f_scale) ** 2
222
+ loss(z, rho, cost_only=cost_only)
223
+ if cost_only:
224
+ return 0.5 * f_scale ** 2 * np.sum(rho[0])
225
+ rho[0] *= f_scale ** 2
226
+ rho[2] /= f_scale ** 2
227
+ return rho
228
+ else:
229
+ def loss_function(f, cost_only=False):
230
+ z = (f / f_scale) ** 2
231
+ rho = loss(z)
232
+ if cost_only:
233
+ return 0.5 * f_scale ** 2 * np.sum(rho[0])
234
+ rho[0] *= f_scale ** 2
235
+ rho[2] /= f_scale ** 2
236
+ return rho
237
+
238
+ return loss_function
239
+
240
+
241
+ def least_squares(
242
+ fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
243
+ ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
244
+ f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
245
+ jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
246
+ """Solve a nonlinear least-squares problem with bounds on the variables.
247
+
248
+ Given the residuals f(x) (an m-D real function of n real
249
+ variables) and the loss function rho(s) (a scalar function), `least_squares`
250
+ finds a local minimum of the cost function F(x)::
251
+
252
+ minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
253
+ subject to lb <= x <= ub
254
+
255
+ The purpose of the loss function rho(s) is to reduce the influence of
256
+ outliers on the solution.
257
+
258
+ Parameters
259
+ ----------
260
+ fun : callable
261
+ Function which computes the vector of residuals, with the signature
262
+ ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
263
+ respect to its first argument. The argument ``x`` passed to this
264
+ function is an ndarray of shape (n,) (never a scalar, even for n=1).
265
+ It must allocate and return a 1-D array_like of shape (m,) or a scalar.
266
+ If the argument ``x`` is complex or the function ``fun`` returns
267
+ complex residuals, it must be wrapped in a real function of real
268
+ arguments, as shown at the end of the Examples section.
269
+ x0 : array_like with shape (n,) or float
270
+ Initial guess on independent variables. If float, it will be treated
271
+ as a 1-D array with one element. When `method` is 'trf', the initial
272
+ guess might be slightly adjusted to lie sufficiently within the given
273
+ `bounds`.
274
+ jac : {'2-point', '3-point', 'cs', callable}, optional
275
+ Method of computing the Jacobian matrix (an m-by-n matrix, where
276
+ element (i, j) is the partial derivative of f[i] with respect to
277
+ x[j]). The keywords select a finite difference scheme for numerical
278
+ estimation. The scheme '3-point' is more accurate, but requires
279
+ twice as many operations as '2-point' (default). The scheme 'cs'
280
+ uses complex steps, and while potentially the most accurate, it is
281
+ applicable only when `fun` correctly handles complex inputs and
282
+ can be analytically continued to the complex plane. Method 'lm'
283
+ always uses the '2-point' scheme. If callable, it is used as
284
+ ``jac(x, *args, **kwargs)`` and should return a good approximation
285
+ (or the exact value) for the Jacobian as an array_like (np.atleast_2d
286
+ is applied), a sparse matrix (csr_matrix preferred for performance) or
287
+ a `scipy.sparse.linalg.LinearOperator`.
288
+ bounds : 2-tuple of array_like or `Bounds`, optional
289
+ There are two ways to specify bounds:
290
+
291
+ 1. Instance of `Bounds` class
292
+ 2. Lower and upper bounds on independent variables. Defaults to no
293
+ bounds. Each array must match the size of `x0` or be a scalar,
294
+ in the latter case a bound will be the same for all variables.
295
+ Use ``np.inf`` with an appropriate sign to disable bounds on all
296
+ or some variables.
297
+ method : {'trf', 'dogbox', 'lm'}, optional
298
+ Algorithm to perform minimization.
299
+
300
+ * 'trf' : Trust Region Reflective algorithm, particularly suitable
301
+ for large sparse problems with bounds. Generally robust method.
302
+ * 'dogbox' : dogleg algorithm with rectangular trust regions,
303
+ typical use case is small problems with bounds. Not recommended
304
+ for problems with rank-deficient Jacobian.
305
+ * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
306
+ Doesn't handle bounds and sparse Jacobians. Usually the most
307
+ efficient method for small unconstrained problems.
308
+
309
+ Default is 'trf'. See Notes for more information.
310
+ ftol : float or None, optional
311
+ Tolerance for termination by the change of the cost function. Default
312
+ is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
313
+ and there was an adequate agreement between a local quadratic model and
314
+ the true model in the last step.
315
+
316
+ If None and 'method' is not 'lm', the termination by this condition is
317
+ disabled. If 'method' is 'lm', this tolerance must be higher than
318
+ machine epsilon.
319
+ xtol : float or None, optional
320
+ Tolerance for termination by the change of the independent variables.
321
+ Default is 1e-8. The exact condition depends on the `method` used:
322
+
323
+ * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
324
+ * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
325
+ a trust-region radius and ``xs`` is the value of ``x``
326
+ scaled according to `x_scale` parameter (see below).
327
+
328
+ If None and 'method' is not 'lm', the termination by this condition is
329
+ disabled. If 'method' is 'lm', this tolerance must be higher than
330
+ machine epsilon.
331
+ gtol : float or None, optional
332
+ Tolerance for termination by the norm of the gradient. Default is 1e-8.
333
+ The exact condition depends on a `method` used:
334
+
335
+ * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
336
+ ``g_scaled`` is the value of the gradient scaled to account for
337
+ the presence of the bounds [STIR]_.
338
+ * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
339
+ ``g_free`` is the gradient with respect to the variables which
340
+ are not in the optimal state on the boundary.
341
+ * For 'lm' : the maximum absolute value of the cosine of angles
342
+ between columns of the Jacobian and the residual vector is less
343
+ than `gtol`, or the residual vector is zero.
344
+
345
+ If None and 'method' is not 'lm', the termination by this condition is
346
+ disabled. If 'method' is 'lm', this tolerance must be higher than
347
+ machine epsilon.
348
+ x_scale : array_like or 'jac', optional
349
+ Characteristic scale of each variable. Setting `x_scale` is equivalent
350
+ to reformulating the problem in scaled variables ``xs = x / x_scale``.
351
+ An alternative view is that the size of a trust region along jth
352
+ dimension is proportional to ``x_scale[j]``. Improved convergence may
353
+ be achieved by setting `x_scale` such that a step of a given size
354
+ along any of the scaled variables has a similar effect on the cost
355
+ function. If set to 'jac', the scale is iteratively updated using the
356
+ inverse norms of the columns of the Jacobian matrix (as described in
357
+ [JJMore]_).
358
+ loss : str or callable, optional
359
+ Determines the loss function. The following keyword values are allowed:
360
+
361
+ * 'linear' (default) : ``rho(z) = z``. Gives a standard
362
+ least-squares problem.
363
+ * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
364
+ approximation of l1 (absolute value) loss. Usually a good
365
+ choice for robust least squares.
366
+ * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
367
+ similarly to 'soft_l1'.
368
+ * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
369
+ influence, but may cause difficulties in optimization process.
370
+ * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
371
+ a single residual, has properties similar to 'cauchy'.
372
+
373
+ If callable, it must take a 1-D ndarray ``z=f**2`` and return an
374
+ array_like with shape (3, m) where row 0 contains function values,
375
+ row 1 contains first derivatives and row 2 contains second
376
+ derivatives. Method 'lm' supports only 'linear' loss.
377
+ f_scale : float, optional
378
+ Value of soft margin between inlier and outlier residuals, default
379
+ is 1.0. The loss function is evaluated as follows
380
+ ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
381
+ and ``rho`` is determined by `loss` parameter. This parameter has
382
+ no effect with ``loss='linear'``, but for other `loss` values it is
383
+ of crucial importance.
384
+ max_nfev : None or int, optional
385
+ Maximum number of function evaluations before the termination.
386
+ If None (default), the value is chosen automatically:
387
+
388
+ * For 'trf' and 'dogbox' : 100 * n.
389
+ * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
390
+ otherwise (because 'lm' counts function calls in Jacobian
391
+ estimation).
392
+
393
+ diff_step : None or array_like, optional
394
+ Determines the relative step size for the finite difference
395
+ approximation of the Jacobian. The actual step is computed as
396
+ ``x * diff_step``. If None (default), then `diff_step` is taken to be
397
+ a conventional "optimal" power of machine epsilon for the finite
398
+ difference scheme used [NR]_.
399
+ tr_solver : {None, 'exact', 'lsmr'}, optional
400
+ Method for solving trust-region subproblems, relevant only for 'trf'
401
+ and 'dogbox' methods.
402
+
403
+ * 'exact' is suitable for not very large problems with dense
404
+ Jacobian matrices. The computational complexity per iteration is
405
+ comparable to a singular value decomposition of the Jacobian
406
+ matrix.
407
+ * 'lsmr' is suitable for problems with sparse and large Jacobian
408
+ matrices. It uses the iterative procedure
409
+ `scipy.sparse.linalg.lsmr` for finding a solution of a linear
410
+ least-squares problem and only requires matrix-vector product
411
+ evaluations.
412
+
413
+ If None (default), the solver is chosen based on the type of Jacobian
414
+ returned on the first iteration.
415
+ tr_options : dict, optional
416
+ Keyword options passed to trust-region solver.
417
+
418
+ * ``tr_solver='exact'``: `tr_options` are ignored.
419
+ * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
420
+ Additionally, ``method='trf'`` supports 'regularize' option
421
+ (bool, default is True), which adds a regularization term to the
422
+ normal equation, which improves convergence if the Jacobian is
423
+ rank-deficient [Byrd]_ (eq. 3.4).
424
+
425
+ jac_sparsity : {None, array_like, sparse matrix}, optional
426
+ Defines the sparsity structure of the Jacobian matrix for finite
427
+ difference estimation, its shape must be (m, n). If the Jacobian has
428
+ only few non-zero elements in *each* row, providing the sparsity
429
+ structure will greatly speed up the computations [Curtis]_. A zero
430
+ entry means that a corresponding element in the Jacobian is identically
431
+ zero. If provided, forces the use of 'lsmr' trust-region solver.
432
+ If None (default), then dense differencing will be used. Has no effect
433
+ for 'lm' method.
434
+ verbose : {0, 1, 2}, optional
435
+ Level of algorithm's verbosity:
436
+
437
+ * 0 (default) : work silently.
438
+ * 1 : display a termination report.
439
+ * 2 : display progress during iterations (not supported by 'lm'
440
+ method).
441
+
442
+ args, kwargs : tuple and dict, optional
443
+ Additional arguments passed to `fun` and `jac`. Both empty by default.
444
+ The calling signature is ``fun(x, *args, **kwargs)`` and the same for
445
+ `jac`.
446
+
447
+ Returns
448
+ -------
449
+ result : OptimizeResult
450
+ `OptimizeResult` with the following fields defined:
451
+
452
+ x : ndarray, shape (n,)
453
+ Solution found.
454
+ cost : float
455
+ Value of the cost function at the solution.
456
+ fun : ndarray, shape (m,)
457
+ Vector of residuals at the solution.
458
+ jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
459
+ Modified Jacobian matrix at the solution, in the sense that J^T J
460
+ is a Gauss-Newton approximation of the Hessian of the cost function.
461
+ The type is the same as the one used by the algorithm.
462
+ grad : ndarray, shape (m,)
463
+ Gradient of the cost function at the solution.
464
+ optimality : float
465
+ First-order optimality measure. In unconstrained problems, it is
466
+ always the uniform norm of the gradient. In constrained problems,
467
+ it is the quantity which was compared with `gtol` during iterations.
468
+ active_mask : ndarray of int, shape (n,)
469
+ Each component shows whether a corresponding constraint is active
470
+ (that is, whether a variable is at the bound):
471
+
472
+ * 0 : a constraint is not active.
473
+ * -1 : a lower bound is active.
474
+ * 1 : an upper bound is active.
475
+
476
+ Might be somewhat arbitrary for 'trf' method as it generates a
477
+ sequence of strictly feasible iterates and `active_mask` is
478
+ determined within a tolerance threshold.
479
+ nfev : int
480
+ Number of function evaluations done. Methods 'trf' and 'dogbox' do
481
+ not count function calls for numerical Jacobian approximation, as
482
+ opposed to 'lm' method.
483
+ njev : int or None
484
+ Number of Jacobian evaluations done. If numerical Jacobian
485
+ approximation is used in 'lm' method, it is set to None.
486
+ status : int
487
+ The reason for algorithm termination:
488
+
489
+ * -1 : improper input parameters status returned from MINPACK.
490
+ * 0 : the maximum number of function evaluations is exceeded.
491
+ * 1 : `gtol` termination condition is satisfied.
492
+ * 2 : `ftol` termination condition is satisfied.
493
+ * 3 : `xtol` termination condition is satisfied.
494
+ * 4 : Both `ftol` and `xtol` termination conditions are satisfied.
495
+
496
+ message : str
497
+ Verbal description of the termination reason.
498
+ success : bool
499
+ True if one of the convergence criteria is satisfied (`status` > 0).
500
+
501
+ See Also
502
+ --------
503
+ leastsq : A legacy wrapper for the MINPACK implementation of the
504
+ Levenberg-Marquadt algorithm.
505
+ curve_fit : Least-squares minimization applied to a curve-fitting problem.
506
+
507
+ Notes
508
+ -----
509
+ Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
510
+ algorithms implemented in MINPACK (lmder, lmdif). It runs the
511
+ Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
512
+ The implementation is based on paper [JJMore]_, it is very robust and
513
+ efficient with a lot of smart tricks. It should be your first choice
514
+ for unconstrained problems. Note that it doesn't support bounds. Also,
515
+ it doesn't work when m < n.
516
+
517
+ Method 'trf' (Trust Region Reflective) is motivated by the process of
518
+ solving a system of equations, which constitute the first-order optimality
519
+ condition for a bound-constrained minimization problem as formulated in
520
+ [STIR]_. The algorithm iteratively solves trust-region subproblems
521
+ augmented by a special diagonal quadratic term and with trust-region shape
522
+ determined by the distance from the bounds and the direction of the
523
+ gradient. This enhancements help to avoid making steps directly into bounds
524
+ and efficiently explore the whole space of variables. To further improve
525
+ convergence, the algorithm considers search directions reflected from the
526
+ bounds. To obey theoretical requirements, the algorithm keeps iterates
527
+ strictly feasible. With dense Jacobians trust-region subproblems are
528
+ solved by an exact method very similar to the one described in [JJMore]_
529
+ (and implemented in MINPACK). The difference from the MINPACK
530
+ implementation is that a singular value decomposition of a Jacobian
531
+ matrix is done once per iteration, instead of a QR decomposition and series
532
+ of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
533
+ approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
534
+ The subspace is spanned by a scaled gradient and an approximate
535
+ Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
536
+ constraints are imposed the algorithm is very similar to MINPACK and has
537
+ generally comparable performance. The algorithm works quite robust in
538
+ unbounded and bounded problems, thus it is chosen as a default algorithm.
539
+
540
+ Method 'dogbox' operates in a trust-region framework, but considers
541
+ rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
542
+ The intersection of a current trust region and initial bounds is again
543
+ rectangular, so on each iteration a quadratic minimization problem subject
544
+ to bound constraints is solved approximately by Powell's dogleg method
545
+ [NumOpt]_. The required Gauss-Newton step can be computed exactly for
546
+ dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
547
+ sparse Jacobians. The algorithm is likely to exhibit slow convergence when
548
+ the rank of Jacobian is less than the number of variables. The algorithm
549
+ often outperforms 'trf' in bounded problems with a small number of
550
+ variables.
551
+
552
+ Robust loss functions are implemented as described in [BA]_. The idea
553
+ is to modify a residual vector and a Jacobian matrix on each iteration
554
+ such that computed gradient and Gauss-Newton Hessian approximation match
555
+ the true gradient and Hessian approximation of the cost function. Then
556
+ the algorithm proceeds in a normal way, i.e., robust loss functions are
557
+ implemented as a simple wrapper over standard least-squares algorithms.
558
+
559
+ .. versionadded:: 0.17.0
560
+
561
+ References
562
+ ----------
563
+ .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
564
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
565
+ Minimization Problems," SIAM Journal on Scientific Computing,
566
+ Vol. 21, Number 1, pp 1-23, 1999.
567
+ .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
568
+ Computing. 3rd edition", Sec. 5.7.
569
+ .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
570
+ solution of the trust region problem by minimization over
571
+ two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
572
+ 1988.
573
+ .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
574
+ sparse Jacobian matrices", Journal of the Institute of
575
+ Mathematics and its Applications, 13, pp. 117-120, 1974.
576
+ .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
577
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
578
+ Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
579
+ .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
580
+ Dogleg Approach for Unconstrained and Bound Constrained
581
+ Nonlinear Optimization", WSEAS International Conference on
582
+ Applied Mathematics, Corfu, Greece, 2004.
583
+ .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
584
+ 2nd edition", Chapter 4.
585
+ .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
586
+ Proceedings of the International Workshop on Vision Algorithms:
587
+ Theory and Practice, pp. 298-372, 1999.
588
+
589
+ Examples
590
+ --------
591
+ In this example we find a minimum of the Rosenbrock function without bounds
592
+ on independent variables.
593
+
594
+ >>> import numpy as np
595
+ >>> def fun_rosenbrock(x):
596
+ ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
597
+
598
+ Notice that we only provide the vector of the residuals. The algorithm
599
+ constructs the cost function as a sum of squares of the residuals, which
600
+ gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
601
+
602
+ >>> from scipy.optimize import least_squares
603
+ >>> x0_rosenbrock = np.array([2, 2])
604
+ >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
605
+ >>> res_1.x
606
+ array([ 1., 1.])
607
+ >>> res_1.cost
608
+ 9.8669242910846867e-30
609
+ >>> res_1.optimality
610
+ 8.8928864934219529e-14
611
+
612
+ We now constrain the variables, in such a way that the previous solution
613
+ becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
614
+ ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
615
+ to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
616
+
617
+ We also provide the analytic Jacobian:
618
+
619
+ >>> def jac_rosenbrock(x):
620
+ ... return np.array([
621
+ ... [-20 * x[0], 10],
622
+ ... [-1, 0]])
623
+
624
+ Putting this all together, we see that the new solution lies on the bound:
625
+
626
+ >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
627
+ ... bounds=([-np.inf, 1.5], np.inf))
628
+ >>> res_2.x
629
+ array([ 1.22437075, 1.5 ])
630
+ >>> res_2.cost
631
+ 0.025213093946805685
632
+ >>> res_2.optimality
633
+ 1.5885401433157753e-07
634
+
635
+ Now we solve a system of equations (i.e., the cost function should be zero
636
+ at a minimum) for a Broyden tridiagonal vector-valued function of 100000
637
+ variables:
638
+
639
+ >>> def fun_broyden(x):
640
+ ... f = (3 - x) * x + 1
641
+ ... f[1:] -= x[:-1]
642
+ ... f[:-1] -= 2 * x[1:]
643
+ ... return f
644
+
645
+ The corresponding Jacobian matrix is sparse. We tell the algorithm to
646
+ estimate it by finite differences and provide the sparsity structure of
647
+ Jacobian to significantly speed up this process.
648
+
649
+ >>> from scipy.sparse import lil_matrix
650
+ >>> def sparsity_broyden(n):
651
+ ... sparsity = lil_matrix((n, n), dtype=int)
652
+ ... i = np.arange(n)
653
+ ... sparsity[i, i] = 1
654
+ ... i = np.arange(1, n)
655
+ ... sparsity[i, i - 1] = 1
656
+ ... i = np.arange(n - 1)
657
+ ... sparsity[i, i + 1] = 1
658
+ ... return sparsity
659
+ ...
660
+ >>> n = 100000
661
+ >>> x0_broyden = -np.ones(n)
662
+ ...
663
+ >>> res_3 = least_squares(fun_broyden, x0_broyden,
664
+ ... jac_sparsity=sparsity_broyden(n))
665
+ >>> res_3.cost
666
+ 4.5687069299604613e-23
667
+ >>> res_3.optimality
668
+ 1.1650454296851518e-11
669
+
670
+ Let's also solve a curve fitting problem using robust loss function to
671
+ take care of outliers in the data. Define the model function as
672
+ ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
673
+ observation and a, b, c are parameters to estimate.
674
+
675
+ First, define the function which generates the data with noise and
676
+ outliers, define the model parameters, and generate data:
677
+
678
+ >>> from numpy.random import default_rng
679
+ >>> rng = default_rng()
680
+ >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
681
+ ... rng = default_rng(seed)
682
+ ...
683
+ ... y = a + b * np.exp(t * c)
684
+ ...
685
+ ... error = noise * rng.standard_normal(t.size)
686
+ ... outliers = rng.integers(0, t.size, n_outliers)
687
+ ... error[outliers] *= 10
688
+ ...
689
+ ... return y + error
690
+ ...
691
+ >>> a = 0.5
692
+ >>> b = 2.0
693
+ >>> c = -1
694
+ >>> t_min = 0
695
+ >>> t_max = 10
696
+ >>> n_points = 15
697
+ ...
698
+ >>> t_train = np.linspace(t_min, t_max, n_points)
699
+ >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
700
+
701
+ Define function for computing residuals and initial estimate of
702
+ parameters.
703
+
704
+ >>> def fun(x, t, y):
705
+ ... return x[0] + x[1] * np.exp(x[2] * t) - y
706
+ ...
707
+ >>> x0 = np.array([1.0, 1.0, 0.0])
708
+
709
+ Compute a standard least-squares solution:
710
+
711
+ >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
712
+
713
+ Now compute two solutions with two different robust loss functions. The
714
+ parameter `f_scale` is set to 0.1, meaning that inlier residuals should
715
+ not significantly exceed 0.1 (the noise level used).
716
+
717
+ >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
718
+ ... args=(t_train, y_train))
719
+ >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
720
+ ... args=(t_train, y_train))
721
+
722
+ And, finally, plot all the curves. We see that by selecting an appropriate
723
+ `loss` we can get estimates close to optimal even in the presence of
724
+ strong outliers. But keep in mind that generally it is recommended to try
725
+ 'soft_l1' or 'huber' losses first (if at all necessary) as the other two
726
+ options may cause difficulties in optimization process.
727
+
728
+ >>> t_test = np.linspace(t_min, t_max, n_points * 10)
729
+ >>> y_true = gen_data(t_test, a, b, c)
730
+ >>> y_lsq = gen_data(t_test, *res_lsq.x)
731
+ >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
732
+ >>> y_log = gen_data(t_test, *res_log.x)
733
+ ...
734
+ >>> import matplotlib.pyplot as plt
735
+ >>> plt.plot(t_train, y_train, 'o')
736
+ >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
737
+ >>> plt.plot(t_test, y_lsq, label='linear loss')
738
+ >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
739
+ >>> plt.plot(t_test, y_log, label='cauchy loss')
740
+ >>> plt.xlabel("t")
741
+ >>> plt.ylabel("y")
742
+ >>> plt.legend()
743
+ >>> plt.show()
744
+
745
+ In the next example, we show how complex-valued residual functions of
746
+ complex variables can be optimized with ``least_squares()``. Consider the
747
+ following function:
748
+
749
+ >>> def f(z):
750
+ ... return z - (0.5 + 0.5j)
751
+
752
+ We wrap it into a function of real variables that returns real residuals
753
+ by simply handling the real and imaginary parts as independent variables:
754
+
755
+ >>> def f_wrap(x):
756
+ ... fx = f(x[0] + 1j*x[1])
757
+ ... return np.array([fx.real, fx.imag])
758
+
759
+ Thus, instead of the original m-D complex function of n complex
760
+ variables we optimize a 2m-D real function of 2n real variables:
761
+
762
+ >>> from scipy.optimize import least_squares
763
+ >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
764
+ >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
765
+ >>> z
766
+ (0.49999999999925893+0.49999999999925893j)
767
+
768
+ """
769
+ if method not in ['trf', 'dogbox', 'lm']:
770
+ raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
771
+
772
+ if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
773
+ raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
774
+ "callable.")
775
+
776
+ if tr_solver not in [None, 'exact', 'lsmr']:
777
+ raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
778
+
779
+ if loss not in IMPLEMENTED_LOSSES and not callable(loss):
780
+ raise ValueError("`loss` must be one of {} or a callable."
781
+ .format(IMPLEMENTED_LOSSES.keys()))
782
+
783
+ if method == 'lm' and loss != 'linear':
784
+ raise ValueError("method='lm' supports only 'linear' loss function.")
785
+
786
+ if verbose not in [0, 1, 2]:
787
+ raise ValueError("`verbose` must be in [0, 1, 2].")
788
+
789
+ if max_nfev is not None and max_nfev <= 0:
790
+ raise ValueError("`max_nfev` must be None or positive integer.")
791
+
792
+ if np.iscomplexobj(x0):
793
+ raise ValueError("`x0` must be real.")
794
+
795
+ x0 = np.atleast_1d(x0).astype(float)
796
+
797
+ if x0.ndim > 1:
798
+ raise ValueError("`x0` must have at most 1 dimension.")
799
+
800
+ if isinstance(bounds, Bounds):
801
+ lb, ub = bounds.lb, bounds.ub
802
+ bounds = (lb, ub)
803
+ else:
804
+ if len(bounds) == 2:
805
+ lb, ub = prepare_bounds(bounds, x0.shape[0])
806
+ else:
807
+ raise ValueError("`bounds` must contain 2 elements.")
808
+
809
+ if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
810
+ raise ValueError("Method 'lm' doesn't support bounds.")
811
+
812
+ if lb.shape != x0.shape or ub.shape != x0.shape:
813
+ raise ValueError("Inconsistent shapes between bounds and `x0`.")
814
+
815
+ if np.any(lb >= ub):
816
+ raise ValueError("Each lower bound must be strictly less than each "
817
+ "upper bound.")
818
+
819
+ if not in_bounds(x0, lb, ub):
820
+ raise ValueError("`x0` is infeasible.")
821
+
822
+ x_scale = check_x_scale(x_scale, x0)
823
+
824
+ ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
825
+
826
+ if method == 'trf':
827
+ x0 = make_strictly_feasible(x0, lb, ub)
828
+
829
+ def fun_wrapped(x):
830
+ return np.atleast_1d(fun(x, *args, **kwargs))
831
+
832
+ f0 = fun_wrapped(x0)
833
+
834
+ if f0.ndim != 1:
835
+ raise ValueError("`fun` must return at most 1-d array_like. "
836
+ f"f0.shape: {f0.shape}")
837
+
838
+ if not np.all(np.isfinite(f0)):
839
+ raise ValueError("Residuals are not finite in the initial point.")
840
+
841
+ n = x0.size
842
+ m = f0.size
843
+
844
+ if method == 'lm' and m < n:
845
+ raise ValueError("Method 'lm' doesn't work when the number of "
846
+ "residuals is less than the number of variables.")
847
+
848
+ loss_function = construct_loss_function(m, loss, f_scale)
849
+ if callable(loss):
850
+ rho = loss_function(f0)
851
+ if rho.shape != (3, m):
852
+ raise ValueError("The return value of `loss` callable has wrong "
853
+ "shape.")
854
+ initial_cost = 0.5 * np.sum(rho[0])
855
+ elif loss_function is not None:
856
+ initial_cost = loss_function(f0, cost_only=True)
857
+ else:
858
+ initial_cost = 0.5 * np.dot(f0, f0)
859
+
860
+ if callable(jac):
861
+ J0 = jac(x0, *args, **kwargs)
862
+
863
+ if issparse(J0):
864
+ J0 = J0.tocsr()
865
+
866
+ def jac_wrapped(x, _=None):
867
+ return jac(x, *args, **kwargs).tocsr()
868
+
869
+ elif isinstance(J0, LinearOperator):
870
+ def jac_wrapped(x, _=None):
871
+ return jac(x, *args, **kwargs)
872
+
873
+ else:
874
+ J0 = np.atleast_2d(J0)
875
+
876
+ def jac_wrapped(x, _=None):
877
+ return np.atleast_2d(jac(x, *args, **kwargs))
878
+
879
+ else: # Estimate Jacobian by finite differences.
880
+ if method == 'lm':
881
+ if jac_sparsity is not None:
882
+ raise ValueError("method='lm' does not support "
883
+ "`jac_sparsity`.")
884
+
885
+ if jac != '2-point':
886
+ warn(f"jac='{jac}' works equivalently to '2-point' for method='lm'.",
887
+ stacklevel=2)
888
+
889
+ J0 = jac_wrapped = None
890
+ else:
891
+ if jac_sparsity is not None and tr_solver == 'exact':
892
+ raise ValueError("tr_solver='exact' is incompatible "
893
+ "with `jac_sparsity`.")
894
+
895
+ jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
896
+
897
+ def jac_wrapped(x, f):
898
+ J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
899
+ f0=f, bounds=bounds, args=args,
900
+ kwargs=kwargs, sparsity=jac_sparsity)
901
+ if J.ndim != 2: # J is guaranteed not sparse.
902
+ J = np.atleast_2d(J)
903
+
904
+ return J
905
+
906
+ J0 = jac_wrapped(x0, f0)
907
+
908
+ if J0 is not None:
909
+ if J0.shape != (m, n):
910
+ raise ValueError(
911
+ f"The return value of `jac` has wrong shape: expected {(m, n)}, "
912
+ f"actual {J0.shape}."
913
+ )
914
+
915
+ if not isinstance(J0, np.ndarray):
916
+ if method == 'lm':
917
+ raise ValueError("method='lm' works only with dense "
918
+ "Jacobian matrices.")
919
+
920
+ if tr_solver == 'exact':
921
+ raise ValueError(
922
+ "tr_solver='exact' works only with dense "
923
+ "Jacobian matrices.")
924
+
925
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
926
+ if isinstance(J0, LinearOperator) and jac_scale:
927
+ raise ValueError("x_scale='jac' can't be used when `jac` "
928
+ "returns LinearOperator.")
929
+
930
+ if tr_solver is None:
931
+ if isinstance(J0, np.ndarray):
932
+ tr_solver = 'exact'
933
+ else:
934
+ tr_solver = 'lsmr'
935
+
936
+ if method == 'lm':
937
+ result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
938
+ max_nfev, x_scale, diff_step)
939
+
940
+ elif method == 'trf':
941
+ result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
942
+ gtol, max_nfev, x_scale, loss_function, tr_solver,
943
+ tr_options.copy(), verbose)
944
+
945
+ elif method == 'dogbox':
946
+ if tr_solver == 'lsmr' and 'regularize' in tr_options:
947
+ warn("The keyword 'regularize' in `tr_options` is not relevant "
948
+ "for 'dogbox' method.",
949
+ stacklevel=2)
950
+ tr_options = tr_options.copy()
951
+ del tr_options['regularize']
952
+
953
+ result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
954
+ xtol, gtol, max_nfev, x_scale, loss_function,
955
+ tr_solver, tr_options, verbose)
956
+
957
+ result.message = TERMINATION_MESSAGES[result.status]
958
+ result.success = result.status > 0
959
+
960
+ if verbose >= 1:
961
+ print(result.message)
962
+ print("Function evaluations {}, initial cost {:.4e}, final cost "
963
+ "{:.4e}, first-order optimality {:.2e}."
964
+ .format(result.nfev, initial_cost, result.cost,
965
+ result.optimality))
966
+
967
+ return result
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Linear least squares with bound constraints on independent variables."""
2
+ import numpy as np
3
+ from numpy.linalg import norm
4
+ from scipy.sparse import issparse, csr_matrix
5
+ from scipy.sparse.linalg import LinearOperator, lsmr
6
+ from scipy.optimize import OptimizeResult
7
+ from scipy.optimize._minimize import Bounds
8
+
9
+ from .common import in_bounds, compute_grad
10
+ from .trf_linear import trf_linear
11
+ from .bvls import bvls
12
+
13
+
14
+ def prepare_bounds(bounds, n):
15
+ if len(bounds) != 2:
16
+ raise ValueError("`bounds` must contain 2 elements.")
17
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
18
+
19
+ if lb.ndim == 0:
20
+ lb = np.resize(lb, n)
21
+
22
+ if ub.ndim == 0:
23
+ ub = np.resize(ub, n)
24
+
25
+ return lb, ub
26
+
27
+
28
+ TERMINATION_MESSAGES = {
29
+ -1: "The algorithm was not able to make progress on the last iteration.",
30
+ 0: "The maximum number of iterations is exceeded.",
31
+ 1: "The first-order optimality measure is less than `tol`.",
32
+ 2: "The relative change of the cost function is less than `tol`.",
33
+ 3: "The unconstrained solution is optimal."
34
+ }
35
+
36
+
37
+ def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
38
+ lsq_solver=None, lsmr_tol=None, max_iter=None,
39
+ verbose=0, *, lsmr_maxiter=None,):
40
+ r"""Solve a linear least-squares problem with bounds on the variables.
41
+
42
+ Given a m-by-n design matrix A and a target vector b with m elements,
43
+ `lsq_linear` solves the following optimization problem::
44
+
45
+ minimize 0.5 * ||A x - b||**2
46
+ subject to lb <= x <= ub
47
+
48
+ This optimization problem is convex, hence a found minimum (if iterations
49
+ have converged) is guaranteed to be global.
50
+
51
+ Parameters
52
+ ----------
53
+ A : array_like, sparse matrix of LinearOperator, shape (m, n)
54
+ Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
55
+ b : array_like, shape (m,)
56
+ Target vector.
57
+ bounds : 2-tuple of array_like or `Bounds`, optional
58
+ Lower and upper bounds on parameters. Defaults to no bounds.
59
+ There are two ways to specify the bounds:
60
+
61
+ - Instance of `Bounds` class.
62
+
63
+ - 2-tuple of array_like: Each element of the tuple must be either
64
+ an array with the length equal to the number of parameters, or a
65
+ scalar (in which case the bound is taken to be the same for all
66
+ parameters). Use ``np.inf`` with an appropriate sign to disable
67
+ bounds on all or some parameters.
68
+
69
+ method : 'trf' or 'bvls', optional
70
+ Method to perform minimization.
71
+
72
+ * 'trf' : Trust Region Reflective algorithm adapted for a linear
73
+ least-squares problem. This is an interior-point-like method
74
+ and the required number of iterations is weakly correlated with
75
+ the number of variables.
76
+ * 'bvls' : Bounded-variable least-squares algorithm. This is
77
+ an active set method, which requires the number of iterations
78
+ comparable to the number of variables. Can't be used when `A` is
79
+ sparse or LinearOperator.
80
+
81
+ Default is 'trf'.
82
+ tol : float, optional
83
+ Tolerance parameter. The algorithm terminates if a relative change
84
+ of the cost function is less than `tol` on the last iteration.
85
+ Additionally, the first-order optimality measure is considered:
86
+
87
+ * ``method='trf'`` terminates if the uniform norm of the gradient,
88
+ scaled to account for the presence of the bounds, is less than
89
+ `tol`.
90
+ * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
91
+ are satisfied within `tol` tolerance.
92
+
93
+ lsq_solver : {None, 'exact', 'lsmr'}, optional
94
+ Method of solving unbounded least-squares problems throughout
95
+ iterations:
96
+
97
+ * 'exact' : Use dense QR or SVD decomposition approach. Can't be
98
+ used when `A` is sparse or LinearOperator.
99
+ * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
100
+ which requires only matrix-vector product evaluations. Can't
101
+ be used with ``method='bvls'``.
102
+
103
+ If None (default), the solver is chosen based on type of `A`.
104
+ lsmr_tol : None, float or 'auto', optional
105
+ Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
106
+ If None (default), it is set to ``1e-2 * tol``. If 'auto', the
107
+ tolerance will be adjusted based on the optimality of the current
108
+ iterate, which can speed up the optimization process, but is not always
109
+ reliable.
110
+ max_iter : None or int, optional
111
+ Maximum number of iterations before termination. If None (default), it
112
+ is set to 100 for ``method='trf'`` or to the number of variables for
113
+ ``method='bvls'`` (not counting iterations for 'bvls' initialization).
114
+ verbose : {0, 1, 2}, optional
115
+ Level of algorithm's verbosity:
116
+
117
+ * 0 : work silently (default).
118
+ * 1 : display a termination report.
119
+ * 2 : display progress during iterations.
120
+ lsmr_maxiter : None or int, optional
121
+ Maximum number of iterations for the lsmr least squares solver,
122
+ if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it
123
+ uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the
124
+ number of rows and columns of `A`, respectively. Has no effect if
125
+ ``lsq_solver='exact'``.
126
+
127
+ Returns
128
+ -------
129
+ OptimizeResult with the following fields defined:
130
+ x : ndarray, shape (n,)
131
+ Solution found.
132
+ cost : float
133
+ Value of the cost function at the solution.
134
+ fun : ndarray, shape (m,)
135
+ Vector of residuals at the solution.
136
+ optimality : float
137
+ First-order optimality measure. The exact meaning depends on `method`,
138
+ refer to the description of `tol` parameter.
139
+ active_mask : ndarray of int, shape (n,)
140
+ Each component shows whether a corresponding constraint is active
141
+ (that is, whether a variable is at the bound):
142
+
143
+ * 0 : a constraint is not active.
144
+ * -1 : a lower bound is active.
145
+ * 1 : an upper bound is active.
146
+
147
+ Might be somewhat arbitrary for the `trf` method as it generates a
148
+ sequence of strictly feasible iterates and active_mask is determined
149
+ within a tolerance threshold.
150
+ unbounded_sol : tuple
151
+ Unbounded least squares solution tuple returned by the least squares
152
+ solver (set with `lsq_solver` option). If `lsq_solver` is not set or is
153
+ set to ``'exact'``, the tuple contains an ndarray of shape (n,) with
154
+ the unbounded solution, an ndarray with the sum of squared residuals,
155
+ an int with the rank of `A`, and an ndarray with the singular values
156
+ of `A` (see NumPy's ``linalg.lstsq`` for more information). If
157
+ `lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of
158
+ shape (n,) with the unbounded solution, an int with the exit code,
159
+ an int with the number of iterations, and five floats with
160
+ various norms and the condition number of `A` (see SciPy's
161
+ ``sparse.linalg.lsmr`` for more information). This output can be
162
+ useful for determining the convergence of the least squares solver,
163
+ particularly the iterative ``'lsmr'`` solver. The unbounded least
164
+ squares problem is to minimize ``0.5 * ||A x - b||**2``.
165
+ nit : int
166
+ Number of iterations. Zero if the unconstrained solution is optimal.
167
+ status : int
168
+ Reason for algorithm termination:
169
+
170
+ * -1 : the algorithm was not able to make progress on the last
171
+ iteration.
172
+ * 0 : the maximum number of iterations is exceeded.
173
+ * 1 : the first-order optimality measure is less than `tol`.
174
+ * 2 : the relative change of the cost function is less than `tol`.
175
+ * 3 : the unconstrained solution is optimal.
176
+
177
+ message : str
178
+ Verbal description of the termination reason.
179
+ success : bool
180
+ True if one of the convergence criteria is satisfied (`status` > 0).
181
+
182
+ See Also
183
+ --------
184
+ nnls : Linear least squares with non-negativity constraint.
185
+ least_squares : Nonlinear least squares with bounds on the variables.
186
+
187
+ Notes
188
+ -----
189
+ The algorithm first computes the unconstrained least-squares solution by
190
+ `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
191
+ `lsq_solver`. This solution is returned as optimal if it lies within the
192
+ bounds.
193
+
194
+ Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
195
+ a linear least-squares problem. The iterations are essentially the same as
196
+ in the nonlinear least-squares algorithm, but as the quadratic function
197
+ model is always accurate, we don't need to track or modify the radius of
198
+ a trust region. The line search (backtracking) is used as a safety net
199
+ when a selected step does not decrease the cost function. Read more
200
+ detailed description of the algorithm in `scipy.optimize.least_squares`.
201
+
202
+ Method 'bvls' runs a Python implementation of the algorithm described in
203
+ [BVLS]_. The algorithm maintains active and free sets of variables, on
204
+ each iteration chooses a new variable to move from the active set to the
205
+ free set and then solves the unconstrained least-squares problem on free
206
+ variables. This algorithm is guaranteed to give an accurate solution
207
+ eventually, but may require up to n iterations for a problem with n
208
+ variables. Additionally, an ad-hoc initialization procedure is
209
+ implemented, that determines which variables to set free or active
210
+ initially. It takes some number of iterations before actual BVLS starts,
211
+ but can significantly reduce the number of further iterations.
212
+
213
+ References
214
+ ----------
215
+ .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
216
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
217
+ Minimization Problems," SIAM Journal on Scientific Computing,
218
+ Vol. 21, Number 1, pp 1-23, 1999.
219
+ .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
220
+ an Algorithm and Applications", Computational Statistics, 10,
221
+ 129-141, 1995.
222
+
223
+ Examples
224
+ --------
225
+ In this example, a problem with a large sparse matrix and bounds on the
226
+ variables is solved.
227
+
228
+ >>> import numpy as np
229
+ >>> from scipy.sparse import rand
230
+ >>> from scipy.optimize import lsq_linear
231
+ >>> rng = np.random.default_rng()
232
+ ...
233
+ >>> m = 20000
234
+ >>> n = 10000
235
+ ...
236
+ >>> A = rand(m, n, density=1e-4, random_state=rng)
237
+ >>> b = rng.standard_normal(m)
238
+ ...
239
+ >>> lb = rng.standard_normal(n)
240
+ >>> ub = lb + 1
241
+ ...
242
+ >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
243
+ # may vary
244
+ The relative change of the cost function is less than `tol`.
245
+ Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
246
+ first-order optimality 4.66e-08.
247
+ """
248
+ if method not in ['trf', 'bvls']:
249
+ raise ValueError("`method` must be 'trf' or 'bvls'")
250
+
251
+ if lsq_solver not in [None, 'exact', 'lsmr']:
252
+ raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
253
+
254
+ if verbose not in [0, 1, 2]:
255
+ raise ValueError("`verbose` must be in [0, 1, 2].")
256
+
257
+ if issparse(A):
258
+ A = csr_matrix(A)
259
+ elif not isinstance(A, LinearOperator):
260
+ A = np.atleast_2d(np.asarray(A))
261
+
262
+ if method == 'bvls':
263
+ if lsq_solver == 'lsmr':
264
+ raise ValueError("method='bvls' can't be used with "
265
+ "lsq_solver='lsmr'")
266
+
267
+ if not isinstance(A, np.ndarray):
268
+ raise ValueError("method='bvls' can't be used with `A` being "
269
+ "sparse or LinearOperator.")
270
+
271
+ if lsq_solver is None:
272
+ if isinstance(A, np.ndarray):
273
+ lsq_solver = 'exact'
274
+ else:
275
+ lsq_solver = 'lsmr'
276
+ elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
277
+ raise ValueError("`exact` solver can't be used when `A` is "
278
+ "sparse or LinearOperator.")
279
+
280
+ if len(A.shape) != 2: # No ndim for LinearOperator.
281
+ raise ValueError("`A` must have at most 2 dimensions.")
282
+
283
+ if max_iter is not None and max_iter <= 0:
284
+ raise ValueError("`max_iter` must be None or positive integer.")
285
+
286
+ m, n = A.shape
287
+
288
+ b = np.atleast_1d(b)
289
+ if b.ndim != 1:
290
+ raise ValueError("`b` must have at most 1 dimension.")
291
+
292
+ if b.size != m:
293
+ raise ValueError("Inconsistent shapes between `A` and `b`.")
294
+
295
+ if isinstance(bounds, Bounds):
296
+ lb = bounds.lb
297
+ ub = bounds.ub
298
+ else:
299
+ lb, ub = prepare_bounds(bounds, n)
300
+
301
+ if lb.shape != (n,) and ub.shape != (n,):
302
+ raise ValueError("Bounds have wrong shape.")
303
+
304
+ if np.any(lb >= ub):
305
+ raise ValueError("Each lower bound must be strictly less than each "
306
+ "upper bound.")
307
+
308
+ if lsmr_maxiter is not None and lsmr_maxiter < 1:
309
+ raise ValueError("`lsmr_maxiter` must be None or positive integer.")
310
+
311
+ if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or
312
+ lsmr_tol in ('auto', None)):
313
+ raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.")
314
+
315
+ if lsq_solver == 'exact':
316
+ unbd_lsq = np.linalg.lstsq(A, b, rcond=-1)
317
+ elif lsq_solver == 'lsmr':
318
+ first_lsmr_tol = lsmr_tol # tol of first call to lsmr
319
+ if lsmr_tol is None or lsmr_tol == 'auto':
320
+ first_lsmr_tol = 1e-2 * tol # default if lsmr_tol not defined
321
+ unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter,
322
+ atol=first_lsmr_tol, btol=first_lsmr_tol)
323
+ x_lsq = unbd_lsq[0] # extract the solution from the least squares solver
324
+
325
+ if in_bounds(x_lsq, lb, ub):
326
+ r = A @ x_lsq - b
327
+ cost = 0.5 * np.dot(r, r)
328
+ termination_status = 3
329
+ termination_message = TERMINATION_MESSAGES[termination_status]
330
+ g = compute_grad(A, r)
331
+ g_norm = norm(g, ord=np.inf)
332
+
333
+ if verbose > 0:
334
+ print(termination_message)
335
+ print(f"Final cost {cost:.4e}, first-order optimality {g_norm:.2e}")
336
+
337
+ return OptimizeResult(
338
+ x=x_lsq, fun=r, cost=cost, optimality=g_norm,
339
+ active_mask=np.zeros(n), unbounded_sol=unbd_lsq,
340
+ nit=0, status=termination_status,
341
+ message=termination_message, success=True)
342
+
343
+ if method == 'trf':
344
+ res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
345
+ max_iter, verbose, lsmr_maxiter=lsmr_maxiter)
346
+ elif method == 'bvls':
347
+ res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
348
+
349
+ res.unbounded_sol = unbd_lsq
350
+ res.message = TERMINATION_MESSAGES[res.status]
351
+ res.success = res.status > 0
352
+
353
+ if verbose > 0:
354
+ print(res.message)
355
+ print(
356
+ f"Number of iterations {res.nit}, initial cost {res.initial_cost:.4e}, "
357
+ f"final cost {res.cost:.4e}, first-order optimality {res.optimality:.2e}."
358
+ )
359
+
360
+ del res.initial_cost
361
+
362
+ return res
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The adaptation of Trust Region Reflective algorithm for a linear
2
+ least-squares problem."""
3
+ import numpy as np
4
+ from numpy.linalg import norm
5
+ from scipy.linalg import qr, solve_triangular
6
+ from scipy.sparse.linalg import lsmr
7
+ from scipy.optimize import OptimizeResult
8
+
9
+ from .givens_elimination import givens_elimination
10
+ from .common import (
11
+ EPS, step_size_to_bound, find_active_constraints, in_bounds,
12
+ make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
13
+ minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
14
+ print_header_linear, print_iteration_linear, compute_grad,
15
+ regularized_lsq_operator, right_multiplied_operator)
16
+
17
+
18
+ def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
19
+ """Solve regularized least squares using information from QR-decomposition.
20
+
21
+ The initial problem is to solve the following system in a least-squares
22
+ sense::
23
+
24
+ A x = b
25
+ D x = 0
26
+
27
+ where D is diagonal matrix. The method is based on QR decomposition
28
+ of the form A P = Q R, where P is a column permutation matrix, Q is an
29
+ orthogonal matrix and R is an upper triangular matrix.
30
+
31
+ Parameters
32
+ ----------
33
+ m, n : int
34
+ Initial shape of A.
35
+ R : ndarray, shape (n, n)
36
+ Upper triangular matrix from QR decomposition of A.
37
+ QTb : ndarray, shape (n,)
38
+ First n components of Q^T b.
39
+ perm : ndarray, shape (n,)
40
+ Array defining column permutation of A, such that ith column of
41
+ P is perm[i]-th column of identity matrix.
42
+ diag : ndarray, shape (n,)
43
+ Array containing diagonal elements of D.
44
+
45
+ Returns
46
+ -------
47
+ x : ndarray, shape (n,)
48
+ Found least-squares solution.
49
+ """
50
+ if copy_R:
51
+ R = R.copy()
52
+ v = QTb.copy()
53
+
54
+ givens_elimination(R, v, diag[perm])
55
+
56
+ abs_diag_R = np.abs(np.diag(R))
57
+ threshold = EPS * max(m, n) * np.max(abs_diag_R)
58
+ nns, = np.nonzero(abs_diag_R > threshold)
59
+
60
+ R = R[np.ix_(nns, nns)]
61
+ v = v[nns]
62
+
63
+ x = np.zeros(n)
64
+ x[perm[nns]] = solve_triangular(R, v)
65
+
66
+ return x
67
+
68
+
69
+ def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
70
+ """Find an appropriate step size using backtracking line search."""
71
+ alpha = 1
72
+ while True:
73
+ x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
74
+ step = x_new - x
75
+ cost_change = -evaluate_quadratic(A, g, step)
76
+ if cost_change > -0.1 * alpha * p_dot_g:
77
+ break
78
+ alpha *= 0.5
79
+
80
+ active = find_active_constraints(x_new, lb, ub)
81
+ if np.any(active != 0):
82
+ x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
83
+ x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
84
+ step = x_new - x
85
+ cost_change = -evaluate_quadratic(A, g, step)
86
+
87
+ return x, step, cost_change
88
+
89
+
90
+ def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
91
+ """Select the best step according to Trust Region Reflective algorithm."""
92
+ if in_bounds(x + p, lb, ub):
93
+ return p
94
+
95
+ p_stride, hits = step_size_to_bound(x, p, lb, ub)
96
+ r_h = np.copy(p_h)
97
+ r_h[hits.astype(bool)] *= -1
98
+ r = d * r_h
99
+
100
+ # Restrict step, such that it hits the bound.
101
+ p *= p_stride
102
+ p_h *= p_stride
103
+ x_on_bound = x + p
104
+
105
+ # Find the step size along reflected direction.
106
+ r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
107
+
108
+ # Stay interior.
109
+ r_stride_l = (1 - theta) * r_stride_u
110
+ r_stride_u *= theta
111
+
112
+ if r_stride_u > 0:
113
+ a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
114
+ r_stride, r_value = minimize_quadratic_1d(
115
+ a, b, r_stride_l, r_stride_u, c=c)
116
+ r_h = p_h + r_h * r_stride
117
+ r = d * r_h
118
+ else:
119
+ r_value = np.inf
120
+
121
+ # Now correct p_h to make it strictly interior.
122
+ p_h *= theta
123
+ p *= theta
124
+ p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
125
+
126
+ ag_h = -g_h
127
+ ag = d * ag_h
128
+ ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
129
+ ag_stride_u *= theta
130
+ a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
131
+ ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
132
+ ag *= ag_stride
133
+
134
+ if p_value < r_value and p_value < ag_value:
135
+ return p
136
+ elif r_value < p_value and r_value < ag_value:
137
+ return r
138
+ else:
139
+ return ag
140
+
141
+
142
+ def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
143
+ max_iter, verbose, *, lsmr_maxiter=None):
144
+ m, n = A.shape
145
+ x, _ = reflective_transformation(x_lsq, lb, ub)
146
+ x = make_strictly_feasible(x, lb, ub, rstep=0.1)
147
+
148
+ if lsq_solver == 'exact':
149
+ QT, R, perm = qr(A, mode='economic', pivoting=True)
150
+ QT = QT.T
151
+
152
+ if m < n:
153
+ R = np.vstack((R, np.zeros((n - m, n))))
154
+
155
+ QTr = np.zeros(n)
156
+ k = min(m, n)
157
+ elif lsq_solver == 'lsmr':
158
+ r_aug = np.zeros(m + n)
159
+ auto_lsmr_tol = False
160
+ if lsmr_tol is None:
161
+ lsmr_tol = 1e-2 * tol
162
+ elif lsmr_tol == 'auto':
163
+ auto_lsmr_tol = True
164
+
165
+ r = A.dot(x) - b
166
+ g = compute_grad(A, r)
167
+ cost = 0.5 * np.dot(r, r)
168
+ initial_cost = cost
169
+
170
+ termination_status = None
171
+ step_norm = None
172
+ cost_change = None
173
+
174
+ if max_iter is None:
175
+ max_iter = 100
176
+
177
+ if verbose == 2:
178
+ print_header_linear()
179
+
180
+ for iteration in range(max_iter):
181
+ v, dv = CL_scaling_vector(x, g, lb, ub)
182
+ g_scaled = g * v
183
+ g_norm = norm(g_scaled, ord=np.inf)
184
+ if g_norm < tol:
185
+ termination_status = 1
186
+
187
+ if verbose == 2:
188
+ print_iteration_linear(iteration, cost, cost_change,
189
+ step_norm, g_norm)
190
+
191
+ if termination_status is not None:
192
+ break
193
+
194
+ diag_h = g * dv
195
+ diag_root_h = diag_h ** 0.5
196
+ d = v ** 0.5
197
+ g_h = d * g
198
+
199
+ A_h = right_multiplied_operator(A, d)
200
+ if lsq_solver == 'exact':
201
+ QTr[:k] = QT.dot(r)
202
+ p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
203
+ diag_root_h, copy_R=False)
204
+ elif lsq_solver == 'lsmr':
205
+ lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
206
+ r_aug[:m] = r
207
+ if auto_lsmr_tol:
208
+ eta = 1e-2 * min(0.5, g_norm)
209
+ lsmr_tol = max(EPS, min(0.1, eta * g_norm))
210
+ p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter,
211
+ atol=lsmr_tol, btol=lsmr_tol)[0]
212
+
213
+ p = d * p_h
214
+
215
+ p_dot_g = np.dot(p, g)
216
+ if p_dot_g > 0:
217
+ termination_status = -1
218
+
219
+ theta = 1 - min(0.005, g_norm)
220
+ step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
221
+ cost_change = -evaluate_quadratic(A, g, step)
222
+
223
+ # Perhaps almost never executed, the idea is that `p` is descent
224
+ # direction thus we must find acceptable cost decrease using simple
225
+ # "backtracking", otherwise the algorithm's logic would break.
226
+ if cost_change < 0:
227
+ x, step, cost_change = backtracking(
228
+ A, g, x, p, theta, p_dot_g, lb, ub)
229
+ else:
230
+ x = make_strictly_feasible(x + step, lb, ub, rstep=0)
231
+
232
+ step_norm = norm(step)
233
+ r = A.dot(x) - b
234
+ g = compute_grad(A, r)
235
+
236
+ if cost_change < tol * cost:
237
+ termination_status = 2
238
+
239
+ cost = 0.5 * np.dot(r, r)
240
+
241
+ if termination_status is None:
242
+ termination_status = 0
243
+
244
+ active_mask = find_active_constraints(x, lb, ub, rtol=tol)
245
+
246
+ return OptimizeResult(
247
+ x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
248
+ nit=iteration + 1, status=termination_status,
249
+ initial_cost=initial_cost)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minimize.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to minimization algorithms.
3
+
4
+ Functions
5
+ ---------
6
+ - minimize : minimization of a function of several variables.
7
+ - minimize_scalar : minimization of a function of one variable.
8
+ """
9
+
10
+ __all__ = ['minimize', 'minimize_scalar']
11
+
12
+
13
+ from warnings import warn
14
+
15
+ import numpy as np
16
+
17
+ # unconstrained minimization
18
+ from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
19
+ _minimize_bfgs, _minimize_newtoncg,
20
+ _minimize_scalar_brent, _minimize_scalar_bounded,
21
+ _minimize_scalar_golden, MemoizeJac, OptimizeResult,
22
+ _wrap_callback, _recover_from_bracket_error)
23
+ from ._trustregion_dogleg import _minimize_dogleg
24
+ from ._trustregion_ncg import _minimize_trust_ncg
25
+ from ._trustregion_krylov import _minimize_trust_krylov
26
+ from ._trustregion_exact import _minimize_trustregion_exact
27
+ from ._trustregion_constr import _minimize_trustregion_constr
28
+
29
+ # constrained minimization
30
+ from ._lbfgsb_py import _minimize_lbfgsb
31
+ from ._tnc import _minimize_tnc
32
+ from ._cobyla_py import _minimize_cobyla
33
+ from ._slsqp_py import _minimize_slsqp
34
+ from ._constraints import (old_bound_to_new, new_bounds_to_old,
35
+ old_constraint_to_new, new_constraint_to_old,
36
+ NonlinearConstraint, LinearConstraint, Bounds,
37
+ PreparedConstraint)
38
+ from ._differentiable_functions import FD_METHODS
39
+
40
+ MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
41
+ 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
42
+ 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
43
+
44
+ # These methods support the new callback interface (passed an OptimizeResult)
45
+ MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
46
+ 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg',
47
+ 'trust-exact', 'trust-krylov']
48
+
49
+ MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
50
+
51
+ def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
52
+ hessp=None, bounds=None, constraints=(), tol=None,
53
+ callback=None, options=None):
54
+ """Minimization of scalar function of one or more variables.
55
+
56
+ Parameters
57
+ ----------
58
+ fun : callable
59
+ The objective function to be minimized.
60
+
61
+ ``fun(x, *args) -> float``
62
+
63
+ where ``x`` is a 1-D array with shape (n,) and ``args``
64
+ is a tuple of the fixed parameters needed to completely
65
+ specify the function.
66
+ x0 : ndarray, shape (n,)
67
+ Initial guess. Array of real elements of size (n,),
68
+ where ``n`` is the number of independent variables.
69
+ args : tuple, optional
70
+ Extra arguments passed to the objective function and its
71
+ derivatives (`fun`, `jac` and `hess` functions).
72
+ method : str or callable, optional
73
+ Type of solver. Should be one of
74
+
75
+ - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
76
+ - 'Powell' :ref:`(see here) <optimize.minimize-powell>`
77
+ - 'CG' :ref:`(see here) <optimize.minimize-cg>`
78
+ - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
79
+ - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
80
+ - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
81
+ - 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
82
+ - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
83
+ - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
84
+ - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
85
+ - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
86
+ - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
87
+ - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
88
+ - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
89
+ - custom - a callable object, see below for description.
90
+
91
+ If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
92
+ depending on whether or not the problem has constraints or bounds.
93
+ jac : {callable, '2-point', '3-point', 'cs', bool}, optional
94
+ Method for computing the gradient vector. Only for CG, BFGS,
95
+ Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
96
+ trust-exact and trust-constr.
97
+ If it is a callable, it should be a function that returns the gradient
98
+ vector:
99
+
100
+ ``jac(x, *args) -> array_like, shape (n,)``
101
+
102
+ where ``x`` is an array with shape (n,) and ``args`` is a tuple with
103
+ the fixed parameters. If `jac` is a Boolean and is True, `fun` is
104
+ assumed to return a tuple ``(f, g)`` containing the objective
105
+ function and the gradient.
106
+ Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
107
+ 'trust-krylov' require that either a callable be supplied, or that
108
+ `fun` return the objective and gradient.
109
+ If None or False, the gradient will be estimated using 2-point finite
110
+ difference estimation with an absolute step size.
111
+ Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
112
+ to select a finite difference scheme for numerical estimation of the
113
+ gradient with a relative step size. These finite difference schemes
114
+ obey any specified `bounds`.
115
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
116
+ Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
117
+ trust-ncg, trust-krylov, trust-exact and trust-constr.
118
+ If it is callable, it should return the Hessian matrix:
119
+
120
+ ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
121
+
122
+ where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
123
+ parameters.
124
+ The keywords {'2-point', '3-point', 'cs'} can also be used to select
125
+ a finite difference scheme for numerical estimation of the hessian.
126
+ Alternatively, objects implementing the `HessianUpdateStrategy`
127
+ interface can be used to approximate the Hessian. Available
128
+ quasi-Newton methods implementing this interface are:
129
+
130
+ - `BFGS`;
131
+ - `SR1`.
132
+
133
+ Not all of the options are available for each of the methods; for
134
+ availability refer to the notes.
135
+ hessp : callable, optional
136
+ Hessian of objective function times an arbitrary vector p. Only for
137
+ Newton-CG, trust-ncg, trust-krylov, trust-constr.
138
+ Only one of `hessp` or `hess` needs to be given. If `hess` is
139
+ provided, then `hessp` will be ignored. `hessp` must compute the
140
+ Hessian times an arbitrary vector:
141
+
142
+ ``hessp(x, p, *args) -> ndarray shape (n,)``
143
+
144
+ where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
145
+ dimension (n,) and ``args`` is a tuple with the fixed
146
+ parameters.
147
+ bounds : sequence or `Bounds`, optional
148
+ Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell,
149
+ trust-constr, and COBYLA methods. There are two ways to specify the
150
+ bounds:
151
+
152
+ 1. Instance of `Bounds` class.
153
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`. None
154
+ is used to specify no bound.
155
+
156
+ constraints : {Constraint, dict} or List of {Constraint, dict}, optional
157
+ Constraints definition. Only for COBYLA, SLSQP and trust-constr.
158
+
159
+ Constraints for 'trust-constr' are defined as a single object or a
160
+ list of objects specifying constraints to the optimization problem.
161
+ Available constraints are:
162
+
163
+ - `LinearConstraint`
164
+ - `NonlinearConstraint`
165
+
166
+ Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
167
+ Each dictionary with fields:
168
+
169
+ type : str
170
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
171
+ fun : callable
172
+ The function defining the constraint.
173
+ jac : callable, optional
174
+ The Jacobian of `fun` (only for SLSQP).
175
+ args : sequence, optional
176
+ Extra arguments to be passed to the function and Jacobian.
177
+
178
+ Equality constraint means that the constraint function result is to
179
+ be zero whereas inequality means that it is to be non-negative.
180
+ Note that COBYLA only supports inequality constraints.
181
+ tol : float, optional
182
+ Tolerance for termination. When `tol` is specified, the selected
183
+ minimization algorithm sets some relevant solver-specific tolerance(s)
184
+ equal to `tol`. For detailed control, use solver-specific
185
+ options.
186
+ options : dict, optional
187
+ A dictionary of solver options. All methods except `TNC` accept the
188
+ following generic options:
189
+
190
+ maxiter : int
191
+ Maximum number of iterations to perform. Depending on the
192
+ method each iteration may use several function evaluations.
193
+
194
+ For `TNC` use `maxfun` instead of `maxiter`.
195
+ disp : bool
196
+ Set to True to print convergence messages.
197
+
198
+ For method-specific options, see :func:`show_options()`.
199
+ callback : callable, optional
200
+ A callable called after each iteration.
201
+
202
+ All methods except TNC, SLSQP, and COBYLA support a callable with
203
+ the signature:
204
+
205
+ ``callback(intermediate_result: OptimizeResult)``
206
+
207
+ where ``intermediate_result`` is a keyword parameter containing an
208
+ `OptimizeResult` with attributes ``x`` and ``fun``, the present values
209
+ of the parameter vector and objective function. Note that the name
210
+ of the parameter must be ``intermediate_result`` for the callback
211
+ to be passed an `OptimizeResult`. These methods will also terminate if
212
+ the callback raises ``StopIteration``.
213
+
214
+ All methods except trust-constr (also) support a signature like:
215
+
216
+ ``callback(xk)``
217
+
218
+ where ``xk`` is the current parameter vector.
219
+
220
+ Introspection is used to determine which of the signatures above to
221
+ invoke.
222
+
223
+ Returns
224
+ -------
225
+ res : OptimizeResult
226
+ The optimization result represented as a ``OptimizeResult`` object.
227
+ Important attributes are: ``x`` the solution array, ``success`` a
228
+ Boolean flag indicating if the optimizer exited successfully and
229
+ ``message`` which describes the cause of the termination. See
230
+ `OptimizeResult` for a description of other attributes.
231
+
232
+ See also
233
+ --------
234
+ minimize_scalar : Interface to minimization algorithms for scalar
235
+ univariate functions
236
+ show_options : Additional options accepted by the solvers
237
+
238
+ Notes
239
+ -----
240
+ This section describes the available solvers that can be selected by the
241
+ 'method' parameter. The default method is *BFGS*.
242
+
243
+ **Unconstrained minimization**
244
+
245
+ Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
246
+ gradient algorithm by Polak and Ribiere, a variant of the
247
+ Fletcher-Reeves method described in [5]_ pp.120-122. Only the
248
+ first derivatives are used.
249
+
250
+ Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
251
+ method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
252
+ pp. 136. It uses the first derivatives only. BFGS has proven good
253
+ performance even for non-smooth optimizations. This method also
254
+ returns an approximation of the Hessian inverse, stored as
255
+ `hess_inv` in the OptimizeResult object.
256
+
257
+ Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
258
+ Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
259
+ Newton method). It uses a CG method to the compute the search
260
+ direction. See also *TNC* method for a box-constrained
261
+ minimization with a similar algorithm. Suitable for large-scale
262
+ problems.
263
+
264
+ Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
265
+ trust-region algorithm [5]_ for unconstrained minimization. This
266
+ algorithm requires the gradient and Hessian; furthermore the
267
+ Hessian is required to be positive definite.
268
+
269
+ Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
270
+ Newton conjugate gradient trust-region algorithm [5]_ for
271
+ unconstrained minimization. This algorithm requires the gradient
272
+ and either the Hessian or a function that computes the product of
273
+ the Hessian with a given vector. Suitable for large-scale problems.
274
+
275
+ Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
276
+ the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
277
+ minimization. This algorithm requires the gradient
278
+ and either the Hessian or a function that computes the product of
279
+ the Hessian with a given vector. Suitable for large-scale problems.
280
+ On indefinite problems it requires usually less iterations than the
281
+ `trust-ncg` method and is recommended for medium and large-scale problems.
282
+
283
+ Method :ref:`trust-exact <optimize.minimize-trustexact>`
284
+ is a trust-region method for unconstrained minimization in which
285
+ quadratic subproblems are solved almost exactly [13]_. This
286
+ algorithm requires the gradient and the Hessian (which is
287
+ *not* required to be positive definite). It is, in many
288
+ situations, the Newton method to converge in fewer iterations
289
+ and the most recommended for small and medium-size problems.
290
+
291
+ **Bound-Constrained minimization**
292
+
293
+ Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
294
+ Simplex algorithm [1]_, [2]_. This algorithm is robust in many
295
+ applications. However, if numerical computation of derivative can be
296
+ trusted, other algorithms using the first and/or second derivatives
297
+ information might be preferred for their better performance in
298
+ general.
299
+
300
+ Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
301
+ algorithm [6]_, [7]_ for bound constrained minimization.
302
+
303
+ Method :ref:`Powell <optimize.minimize-powell>` is a modification
304
+ of Powell's method [3]_, [4]_ which is a conjugate direction
305
+ method. It performs sequential one-dimensional minimizations along
306
+ each vector of the directions set (`direc` field in `options` and
307
+ `info`), which is updated at each iteration of the main
308
+ minimization loop. The function need not be differentiable, and no
309
+ derivatives are taken. If bounds are not provided, then an
310
+ unbounded line search will be used. If bounds are provided and
311
+ the initial guess is within the bounds, then every function
312
+ evaluation throughout the minimization procedure will be within
313
+ the bounds. If bounds are provided, the initial guess is outside
314
+ the bounds, and `direc` is full rank (default has full rank), then
315
+ some function evaluations during the first iteration may be
316
+ outside the bounds, but every function evaluation after the first
317
+ iteration will be within the bounds. If `direc` is not full rank,
318
+ then some parameters may not be optimized and the solution is not
319
+ guaranteed to be within the bounds.
320
+
321
+ Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
322
+ algorithm [5]_, [8]_ to minimize a function with variables subject
323
+ to bounds. This algorithm uses gradient information; it is also
324
+ called Newton Conjugate-Gradient. It differs from the *Newton-CG*
325
+ method described above as it wraps a C implementation and allows
326
+ each variable to be given upper and lower bounds.
327
+
328
+ **Constrained Minimization**
329
+
330
+ Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
331
+ Constrained Optimization BY Linear Approximation (COBYLA) method
332
+ [9]_, [10]_, [11]_. The algorithm is based on linear
333
+ approximations to the objective function and each constraint. The
334
+ method wraps a FORTRAN implementation of the algorithm. The
335
+ constraints functions 'fun' may return either a single number
336
+ or an array or list of numbers.
337
+
338
+ Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
339
+ Least SQuares Programming to minimize a function of several
340
+ variables with any combination of bounds, equality and inequality
341
+ constraints. The method wraps the SLSQP Optimization subroutine
342
+ originally implemented by Dieter Kraft [12]_. Note that the
343
+ wrapper handles infinite values in bounds by converting them into
344
+ large floating values.
345
+
346
+ Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
347
+ trust-region algorithm for constrained optimization. It switches
348
+ between two implementations depending on the problem definition.
349
+ It is the most versatile constrained minimization algorithm
350
+ implemented in SciPy and the most appropriate for large-scale problems.
351
+ For equality constrained problems it is an implementation of Byrd-Omojokun
352
+ Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
353
+ inequality constraints are imposed as well, it switches to the trust-region
354
+ interior point method described in [16]_. This interior point algorithm,
355
+ in turn, solves inequality constraints by introducing slack variables
356
+ and solving a sequence of equality-constrained barrier problems
357
+ for progressively smaller values of the barrier parameter.
358
+ The previously described equality constrained SQP method is
359
+ used to solve the subproblems with increasing levels of accuracy
360
+ as the iterate gets closer to a solution.
361
+
362
+ **Finite-Difference Options**
363
+
364
+ For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
365
+ the gradient and the Hessian may be approximated using
366
+ three finite-difference schemes: {'2-point', '3-point', 'cs'}.
367
+ The scheme 'cs' is, potentially, the most accurate but it
368
+ requires the function to correctly handle complex inputs and to
369
+ be differentiable in the complex plane. The scheme '3-point' is more
370
+ accurate than '2-point' but requires twice as many operations. If the
371
+ gradient is estimated via finite-differences the Hessian must be
372
+ estimated using one of the quasi-Newton strategies.
373
+
374
+ **Method specific options for the** `hess` **keyword**
375
+
376
+ +--------------+------+----------+-------------------------+-----+
377
+ | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
378
+ +==============+======+==========+=========================+=====+
379
+ | Newton-CG | x | (n, n) | x | x |
380
+ | | | LO | | |
381
+ +--------------+------+----------+-------------------------+-----+
382
+ | dogleg | | (n, n) | | |
383
+ +--------------+------+----------+-------------------------+-----+
384
+ | trust-ncg | | (n, n) | x | x |
385
+ +--------------+------+----------+-------------------------+-----+
386
+ | trust-krylov | | (n, n) | x | x |
387
+ +--------------+------+----------+-------------------------+-----+
388
+ | trust-exact | | (n, n) | | |
389
+ +--------------+------+----------+-------------------------+-----+
390
+ | trust-constr | x | (n, n) | x | x |
391
+ | | | LO | | |
392
+ | | | sp | | |
393
+ +--------------+------+----------+-------------------------+-----+
394
+
395
+ where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
396
+
397
+ **Custom minimizers**
398
+
399
+ It may be useful to pass a custom minimization method, for example
400
+ when using a frontend to this method such as `scipy.optimize.basinhopping`
401
+ or a different library. You can simply pass a callable as the ``method``
402
+ parameter.
403
+
404
+ The callable is called as ``method(fun, x0, args, **kwargs, **options)``
405
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
406
+ (such as `callback`, `hess`, etc.), except the `options` dict, which has
407
+ its contents also passed as `method` parameters pair by pair. Also, if
408
+ `jac` has been passed as a bool type, `jac` and `fun` are mangled so that
409
+ `fun` returns just the function values and `jac` is converted to a function
410
+ returning the Jacobian. The method shall return an `OptimizeResult`
411
+ object.
412
+
413
+ The provided `method` callable must be able to accept (and possibly ignore)
414
+ arbitrary parameters; the set of parameters accepted by `minimize` may
415
+ expand in future versions and then these parameters will be passed to
416
+ the method. You can find an example in the scipy.optimize tutorial.
417
+
418
+ References
419
+ ----------
420
+ .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
421
+ Minimization. The Computer Journal 7: 308-13.
422
+ .. [2] Wright M H. 1996. Direct search methods: Once scorned, now
423
+ respectable, in Numerical Analysis 1995: Proceedings of the 1995
424
+ Dundee Biennial Conference in Numerical Analysis (Eds. D F
425
+ Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
426
+ 191-208.
427
+ .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
428
+ a function of several variables without calculating derivatives. The
429
+ Computer Journal 7: 155-162.
430
+ .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
431
+ Numerical Recipes (any edition), Cambridge University Press.
432
+ .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
433
+ Springer New York.
434
+ .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
435
+ Algorithm for Bound Constrained Optimization. SIAM Journal on
436
+ Scientific and Statistical Computing 16 (5): 1190-1208.
437
+ .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
438
+ 778: L-BFGS-B, FORTRAN routines for large scale bound constrained
439
+ optimization. ACM Transactions on Mathematical Software 23 (4):
440
+ 550-560.
441
+ .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
442
+ 1984. SIAM Journal of Numerical Analysis 21: 770-778.
443
+ .. [9] Powell, M J D. A direct search optimization method that models
444
+ the objective and constraint functions by linear interpolation.
445
+ 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
446
+ and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
447
+ .. [10] Powell M J D. Direct search algorithms for optimization
448
+ calculations. 1998. Acta Numerica 7: 287-336.
449
+ .. [11] Powell M J D. A view of algorithms for optimization without
450
+ derivatives. 2007.Cambridge University Technical Report DAMTP
451
+ 2007/NA03
452
+ .. [12] Kraft, D. A software package for sequential quadratic
453
+ programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
454
+ Center -- Institute for Flight Mechanics, Koln, Germany.
455
+ .. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
456
+ Trust region methods. 2000. Siam. pp. 169-200.
457
+ .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
458
+ implementation of the GLTR method for iterative solution of
459
+ the trust region problem", :arxiv:`1611.04718`
460
+ .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
461
+ Trust-Region Subproblem using the Lanczos Method",
462
+ SIAM J. Optim., 9(2), 504--525, (1999).
463
+ .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
464
+ An interior point algorithm for large-scale nonlinear programming.
465
+ SIAM Journal on Optimization 9.4: 877-900.
466
+ .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
467
+ implementation of an algorithm for large-scale equality constrained
468
+ optimization. SIAM Journal on Optimization 8.3: 682-706.
469
+
470
+ Examples
471
+ --------
472
+ Let us consider the problem of minimizing the Rosenbrock function. This
473
+ function (and its respective derivatives) is implemented in `rosen`
474
+ (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
475
+
476
+ >>> from scipy.optimize import minimize, rosen, rosen_der
477
+
478
+ A simple application of the *Nelder-Mead* method is:
479
+
480
+ >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
481
+ >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
482
+ >>> res.x
483
+ array([ 1., 1., 1., 1., 1.])
484
+
485
+ Now using the *BFGS* algorithm, using the first derivative and a few
486
+ options:
487
+
488
+ >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
489
+ ... options={'gtol': 1e-6, 'disp': True})
490
+ Optimization terminated successfully.
491
+ Current function value: 0.000000
492
+ Iterations: 26
493
+ Function evaluations: 31
494
+ Gradient evaluations: 31
495
+ >>> res.x
496
+ array([ 1., 1., 1., 1., 1.])
497
+ >>> print(res.message)
498
+ Optimization terminated successfully.
499
+ >>> res.hess_inv
500
+ array([
501
+ [ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
502
+ [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
503
+ [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
504
+ [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
505
+ [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]
506
+ ])
507
+
508
+
509
+ Next, consider a minimization problem with several constraints (namely
510
+ Example 16.4 from [5]_). The objective function is:
511
+
512
+ >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
513
+
514
+ There are three constraints defined as:
515
+
516
+ >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
517
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
518
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
519
+
520
+ And variables must be positive, hence the following bounds:
521
+
522
+ >>> bnds = ((0, None), (0, None))
523
+
524
+ The optimization problem is solved using the SLSQP method as:
525
+
526
+ >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
527
+ ... constraints=cons)
528
+
529
+ It should converge to the theoretical solution (1.4 ,1.7).
530
+
531
+ """
532
+ x0 = np.atleast_1d(np.asarray(x0))
533
+
534
+ if x0.ndim != 1:
535
+ raise ValueError("'x0' must only have one dimension.")
536
+
537
+ if x0.dtype.kind in np.typecodes["AllInteger"]:
538
+ x0 = np.asarray(x0, dtype=float)
539
+
540
+ if not isinstance(args, tuple):
541
+ args = (args,)
542
+
543
+ if method is None:
544
+ # Select automatically
545
+ if constraints:
546
+ method = 'SLSQP'
547
+ elif bounds is not None:
548
+ method = 'L-BFGS-B'
549
+ else:
550
+ method = 'BFGS'
551
+
552
+ if callable(method):
553
+ meth = "_custom"
554
+ else:
555
+ meth = method.lower()
556
+
557
+ if options is None:
558
+ options = {}
559
+ # check if optional parameters are supported by the selected method
560
+ # - jac
561
+ if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
562
+ warn('Method %s does not use gradient information (jac).' % method,
563
+ RuntimeWarning, stacklevel=2)
564
+ # - hess
565
+ if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
566
+ 'trust-krylov', 'trust-exact', '_custom') and hess is not None:
567
+ warn('Method %s does not use Hessian information (hess).' % method,
568
+ RuntimeWarning, stacklevel=2)
569
+ # - hessp
570
+ if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
571
+ 'trust-krylov', '_custom') \
572
+ and hessp is not None:
573
+ warn('Method %s does not use Hessian-vector product '
574
+ 'information (hessp).' % method,
575
+ RuntimeWarning, stacklevel=2)
576
+ # - constraints or bounds
577
+ if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and
578
+ np.any(constraints)):
579
+ warn('Method %s cannot handle constraints.' % method,
580
+ RuntimeWarning, stacklevel=2)
581
+ if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'slsqp',
582
+ 'tnc', 'trust-constr', '_custom') and bounds is not None:
583
+ warn('Method %s cannot handle bounds.' % method,
584
+ RuntimeWarning, stacklevel=2)
585
+ # - return_all
586
+ if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
587
+ options.get('return_all', False)):
588
+ warn('Method %s does not support the return_all option.' % method,
589
+ RuntimeWarning, stacklevel=2)
590
+
591
+ # check gradient vector
592
+ if callable(jac):
593
+ pass
594
+ elif jac is True:
595
+ # fun returns func and grad
596
+ fun = MemoizeJac(fun)
597
+ jac = fun.derivative
598
+ elif (jac in FD_METHODS and
599
+ meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
600
+ # finite differences with relative step
601
+ pass
602
+ elif meth in ['trust-constr']:
603
+ # default jac calculation for this method
604
+ jac = '2-point'
605
+ elif jac is None or bool(jac) is False:
606
+ # this will cause e.g. LBFGS to use forward difference, absolute step
607
+ jac = None
608
+ else:
609
+ # default if jac option is not understood
610
+ jac = None
611
+
612
+ # set default tolerances
613
+ if tol is not None:
614
+ options = dict(options)
615
+ if meth == 'nelder-mead':
616
+ options.setdefault('xatol', tol)
617
+ options.setdefault('fatol', tol)
618
+ if meth in ('newton-cg', 'powell', 'tnc'):
619
+ options.setdefault('xtol', tol)
620
+ if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
621
+ options.setdefault('ftol', tol)
622
+ if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
623
+ 'trust-ncg', 'trust-exact', 'trust-krylov'):
624
+ options.setdefault('gtol', tol)
625
+ if meth in ('cobyla', '_custom'):
626
+ options.setdefault('tol', tol)
627
+ if meth == 'trust-constr':
628
+ options.setdefault('xtol', tol)
629
+ options.setdefault('gtol', tol)
630
+ options.setdefault('barrier_tol', tol)
631
+
632
+ if meth == '_custom':
633
+ # custom method called before bounds and constraints are 'standardised'
634
+ # custom method should be able to accept whatever bounds/constraints
635
+ # are provided to it.
636
+ return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
637
+ bounds=bounds, constraints=constraints,
638
+ callback=callback, **options)
639
+
640
+ constraints = standardize_constraints(constraints, x0, meth)
641
+
642
+ remove_vars = False
643
+ if bounds is not None:
644
+ # convert to new-style bounds so we only have to consider one case
645
+ bounds = standardize_bounds(bounds, x0, 'new')
646
+ bounds = _validate_bounds(bounds, x0, meth)
647
+
648
+ if meth in {"tnc", "slsqp", "l-bfgs-b"}:
649
+ # These methods can't take the finite-difference derivatives they
650
+ # need when a variable is fixed by the bounds. To avoid this issue,
651
+ # remove fixed variables from the problem.
652
+ # NOTE: if this list is expanded, then be sure to update the
653
+ # accompanying tests and test_optimize.eb_data. Consider also if
654
+ # default OptimizeResult will need updating.
655
+
656
+ # determine whether any variables are fixed
657
+ i_fixed = (bounds.lb == bounds.ub)
658
+
659
+ if np.all(i_fixed):
660
+ # all the parameters are fixed, a minimizer is not able to do
661
+ # anything
662
+ return _optimize_result_for_equal_bounds(
663
+ fun, bounds, meth, args=args, constraints=constraints
664
+ )
665
+
666
+ # determine whether finite differences are needed for any grad/jac
667
+ fd_needed = (not callable(jac))
668
+ for con in constraints:
669
+ if not callable(con.get('jac', None)):
670
+ fd_needed = True
671
+
672
+ # If finite differences are ever used, remove all fixed variables
673
+ # Always remove fixed variables for TNC; see gh-14565
674
+ remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
675
+ if remove_vars:
676
+ x_fixed = (bounds.lb)[i_fixed]
677
+ x0 = x0[~i_fixed]
678
+ bounds = _remove_from_bounds(bounds, i_fixed)
679
+ fun = _remove_from_func(fun, i_fixed, x_fixed)
680
+ if callable(callback):
681
+ callback = _remove_from_func(callback, i_fixed, x_fixed)
682
+ if callable(jac):
683
+ jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
684
+
685
+ # make a copy of the constraints so the user's version doesn't
686
+ # get changed. (Shallow copy is ok)
687
+ constraints = [con.copy() for con in constraints]
688
+ for con in constraints: # yes, guaranteed to be a list
689
+ con['fun'] = _remove_from_func(con['fun'], i_fixed,
690
+ x_fixed, min_dim=1,
691
+ remove=0)
692
+ if callable(con.get('jac', None)):
693
+ con['jac'] = _remove_from_func(con['jac'], i_fixed,
694
+ x_fixed, min_dim=2,
695
+ remove=1)
696
+ bounds = standardize_bounds(bounds, x0, meth)
697
+
698
+ callback = _wrap_callback(callback, meth)
699
+
700
+ if meth == 'nelder-mead':
701
+ res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
702
+ **options)
703
+ elif meth == 'powell':
704
+ res = _minimize_powell(fun, x0, args, callback, bounds, **options)
705
+ elif meth == 'cg':
706
+ res = _minimize_cg(fun, x0, args, jac, callback, **options)
707
+ elif meth == 'bfgs':
708
+ res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
709
+ elif meth == 'newton-cg':
710
+ res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
711
+ **options)
712
+ elif meth == 'l-bfgs-b':
713
+ res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
714
+ callback=callback, **options)
715
+ elif meth == 'tnc':
716
+ res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
717
+ **options)
718
+ elif meth == 'cobyla':
719
+ res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
720
+ bounds=bounds, **options)
721
+ elif meth == 'slsqp':
722
+ res = _minimize_slsqp(fun, x0, args, jac, bounds,
723
+ constraints, callback=callback, **options)
724
+ elif meth == 'trust-constr':
725
+ res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
726
+ bounds, constraints,
727
+ callback=callback, **options)
728
+ elif meth == 'dogleg':
729
+ res = _minimize_dogleg(fun, x0, args, jac, hess,
730
+ callback=callback, **options)
731
+ elif meth == 'trust-ncg':
732
+ res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
733
+ callback=callback, **options)
734
+ elif meth == 'trust-krylov':
735
+ res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
736
+ callback=callback, **options)
737
+ elif meth == 'trust-exact':
738
+ res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
739
+ callback=callback, **options)
740
+ else:
741
+ raise ValueError('Unknown solver %s' % method)
742
+
743
+ if remove_vars:
744
+ res.x = _add_to_array(res.x, i_fixed, x_fixed)
745
+ res.jac = _add_to_array(res.jac, i_fixed, np.nan)
746
+ if "hess_inv" in res:
747
+ res.hess_inv = None # unknown
748
+
749
+ if getattr(callback, 'stop_iteration', False):
750
+ res.success = False
751
+ res.status = 99
752
+ res.message = "`callback` raised `StopIteration`."
753
+
754
+ return res
755
+
756
+
757
+ def minimize_scalar(fun, bracket=None, bounds=None, args=(),
758
+ method=None, tol=None, options=None):
759
+ """Local minimization of scalar function of one variable.
760
+
761
+ Parameters
762
+ ----------
763
+ fun : callable
764
+ Objective function.
765
+ Scalar function, must return a scalar.
766
+ bracket : sequence, optional
767
+ For methods 'brent' and 'golden', `bracket` defines the bracketing
768
+ interval and is required.
769
+ Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and
770
+ ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair
771
+ ``(xa, xb)`` to be used as initial points for a downhill bracket search
772
+ (see `scipy.optimize.bracket`).
773
+ The minimizer ``res.x`` will not necessarily satisfy
774
+ ``xa <= res.x <= xb``.
775
+ bounds : sequence, optional
776
+ For method 'bounded', `bounds` is mandatory and must have two finite
777
+ items corresponding to the optimization bounds.
778
+ args : tuple, optional
779
+ Extra arguments passed to the objective function.
780
+ method : str or callable, optional
781
+ Type of solver. Should be one of:
782
+
783
+ - :ref:`Brent <optimize.minimize_scalar-brent>`
784
+ - :ref:`Bounded <optimize.minimize_scalar-bounded>`
785
+ - :ref:`Golden <optimize.minimize_scalar-golden>`
786
+ - custom - a callable object (added in version 0.14.0), see below
787
+
788
+ Default is "Bounded" if bounds are provided and "Brent" otherwise.
789
+ See the 'Notes' section for details of each solver.
790
+
791
+ tol : float, optional
792
+ Tolerance for termination. For detailed control, use solver-specific
793
+ options.
794
+ options : dict, optional
795
+ A dictionary of solver options.
796
+
797
+ maxiter : int
798
+ Maximum number of iterations to perform.
799
+ disp : bool
800
+ Set to True to print convergence messages.
801
+
802
+ See :func:`show_options()` for solver-specific options.
803
+
804
+ Returns
805
+ -------
806
+ res : OptimizeResult
807
+ The optimization result represented as a ``OptimizeResult`` object.
808
+ Important attributes are: ``x`` the solution array, ``success`` a
809
+ Boolean flag indicating if the optimizer exited successfully and
810
+ ``message`` which describes the cause of the termination. See
811
+ `OptimizeResult` for a description of other attributes.
812
+
813
+ See also
814
+ --------
815
+ minimize : Interface to minimization algorithms for scalar multivariate
816
+ functions
817
+ show_options : Additional options accepted by the solvers
818
+
819
+ Notes
820
+ -----
821
+ This section describes the available solvers that can be selected by the
822
+ 'method' parameter. The default method is the ``"Bounded"`` Brent method if
823
+ `bounds` are passed and unbounded ``"Brent"`` otherwise.
824
+
825
+ Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
826
+ algorithm [1]_ to find a local minimum. The algorithm uses inverse
827
+ parabolic interpolation when possible to speed up convergence of
828
+ the golden section method.
829
+
830
+ Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
831
+ golden section search technique [1]_. It uses analog of the bisection
832
+ method to decrease the bracketed interval. It is usually
833
+ preferable to use the *Brent* method.
834
+
835
+ Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
836
+ perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
837
+ local minimum in the interval x1 < xopt < x2.
838
+
839
+ Note that the Brent and Golden methods do not guarantee success unless a
840
+ valid ``bracket`` triple is provided. If a three-point bracket cannot be
841
+ found, consider `scipy.optimize.minimize`. Also, all methods are intended
842
+ only for local minimization. When the function of interest has more than
843
+ one local minimum, consider :ref:`global_optimization`.
844
+
845
+ **Custom minimizers**
846
+
847
+ It may be useful to pass a custom minimization method, for example
848
+ when using some library frontend to minimize_scalar. You can simply
849
+ pass a callable as the ``method`` parameter.
850
+
851
+ The callable is called as ``method(fun, args, **kwargs, **options)``
852
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
853
+ (such as `bracket`, `tol`, etc.), except the `options` dict, which has
854
+ its contents also passed as `method` parameters pair by pair. The method
855
+ shall return an `OptimizeResult` object.
856
+
857
+ The provided `method` callable must be able to accept (and possibly ignore)
858
+ arbitrary parameters; the set of parameters accepted by `minimize` may
859
+ expand in future versions and then these parameters will be passed to
860
+ the method. You can find an example in the scipy.optimize tutorial.
861
+
862
+ .. versionadded:: 0.11.0
863
+
864
+ References
865
+ ----------
866
+ .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
867
+ Numerical Recipes in C. Cambridge University Press.
868
+ .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
869
+ for Mathematical Computations." Prentice-Hall Series in Automatic
870
+ Computation 259 (1977).
871
+ .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
872
+ Courier Corporation, 2013.
873
+
874
+ Examples
875
+ --------
876
+ Consider the problem of minimizing the following function.
877
+
878
+ >>> def f(x):
879
+ ... return (x - 2) * x * (x + 2)**2
880
+
881
+ Using the *Brent* method, we find the local minimum as:
882
+
883
+ >>> from scipy.optimize import minimize_scalar
884
+ >>> res = minimize_scalar(f)
885
+ >>> res.fun
886
+ -9.9149495908
887
+
888
+ The minimizer is:
889
+
890
+ >>> res.x
891
+ 1.28077640403
892
+
893
+ Using the *Bounded* method, we find a local minimum with specified
894
+ bounds as:
895
+
896
+ >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
897
+ >>> res.fun # minimum
898
+ 3.28365179850e-13
899
+ >>> res.x # minimizer
900
+ -2.0000002026
901
+
902
+ """
903
+ if not isinstance(args, tuple):
904
+ args = (args,)
905
+
906
+ if callable(method):
907
+ meth = "_custom"
908
+ elif method is None:
909
+ meth = 'brent' if bounds is None else 'bounded'
910
+ else:
911
+ meth = method.lower()
912
+ if options is None:
913
+ options = {}
914
+
915
+ if bounds is not None and meth in {'brent', 'golden'}:
916
+ message = f"Use of `bounds` is incompatible with 'method={method}'."
917
+ raise ValueError(message)
918
+
919
+ if tol is not None:
920
+ options = dict(options)
921
+ if meth == 'bounded' and 'xatol' not in options:
922
+ warn("Method 'bounded' does not support relative tolerance in x; "
923
+ "defaulting to absolute tolerance.",
924
+ RuntimeWarning, stacklevel=2)
925
+ options['xatol'] = tol
926
+ elif meth == '_custom':
927
+ options.setdefault('tol', tol)
928
+ else:
929
+ options.setdefault('xtol', tol)
930
+
931
+ # replace boolean "disp" option, if specified, by an integer value.
932
+ disp = options.get('disp')
933
+ if isinstance(disp, bool):
934
+ options['disp'] = 2 * int(disp)
935
+
936
+ if meth == '_custom':
937
+ res = method(fun, args=args, bracket=bracket, bounds=bounds, **options)
938
+ elif meth == 'brent':
939
+ res = _recover_from_bracket_error(_minimize_scalar_brent,
940
+ fun, bracket, args, **options)
941
+ elif meth == 'bounded':
942
+ if bounds is None:
943
+ raise ValueError('The `bounds` parameter is mandatory for '
944
+ 'method `bounded`.')
945
+ res = _minimize_scalar_bounded(fun, bounds, args, **options)
946
+ elif meth == 'golden':
947
+ res = _recover_from_bracket_error(_minimize_scalar_golden,
948
+ fun, bracket, args, **options)
949
+ else:
950
+ raise ValueError('Unknown solver %s' % method)
951
+
952
+ # gh-16196 reported inconsistencies in the output shape of `res.x`. While
953
+ # fixing this, future-proof it for when the function is vectorized:
954
+ # the shape of `res.x` should match that of `res.fun`.
955
+ res.fun = np.asarray(res.fun)[()]
956
+ res.x = np.reshape(res.x, res.fun.shape)[()]
957
+ return res
958
+
959
+
960
+ def _remove_from_bounds(bounds, i_fixed):
961
+ """Removes fixed variables from a `Bounds` instance"""
962
+ lb = bounds.lb[~i_fixed]
963
+ ub = bounds.ub[~i_fixed]
964
+ return Bounds(lb, ub) # don't mutate original Bounds object
965
+
966
+
967
+ def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
968
+ """Wraps a function such that fixed variables need not be passed in"""
969
+ def fun_out(x_in, *args, **kwargs):
970
+ x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
971
+ x_out[i_fixed] = x_fixed
972
+ x_out[~i_fixed] = x_in
973
+ y_out = fun_in(x_out, *args, **kwargs)
974
+ y_out = np.array(y_out)
975
+
976
+ if min_dim == 1:
977
+ y_out = np.atleast_1d(y_out)
978
+ elif min_dim == 2:
979
+ y_out = np.atleast_2d(y_out)
980
+
981
+ if remove == 1:
982
+ y_out = y_out[..., ~i_fixed]
983
+ elif remove == 2:
984
+ y_out = y_out[~i_fixed, ~i_fixed]
985
+
986
+ return y_out
987
+ return fun_out
988
+
989
+
990
+ def _add_to_array(x_in, i_fixed, x_fixed):
991
+ """Adds fixed variables back to an array"""
992
+ i_free = ~i_fixed
993
+ if x_in.ndim == 2:
994
+ i_free = i_free[:, None] @ i_free[None, :]
995
+ x_out = np.zeros_like(i_free, dtype=x_in.dtype)
996
+ x_out[~i_free] = x_fixed
997
+ x_out[i_free] = x_in.ravel()
998
+ return x_out
999
+
1000
+
1001
+ def _validate_bounds(bounds, x0, meth):
1002
+ """Check that bounds are valid."""
1003
+
1004
+ msg = "An upper bound is less than the corresponding lower bound."
1005
+ if np.any(bounds.ub < bounds.lb):
1006
+ raise ValueError(msg)
1007
+
1008
+ msg = "The number of bounds is not compatible with the length of `x0`."
1009
+ try:
1010
+ bounds.lb = np.broadcast_to(bounds.lb, x0.shape)
1011
+ bounds.ub = np.broadcast_to(bounds.ub, x0.shape)
1012
+ except Exception as e:
1013
+ raise ValueError(msg) from e
1014
+
1015
+ return bounds
1016
+
1017
+ def standardize_bounds(bounds, x0, meth):
1018
+ """Converts bounds to the form required by the solver."""
1019
+ if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'new'}:
1020
+ if not isinstance(bounds, Bounds):
1021
+ lb, ub = old_bound_to_new(bounds)
1022
+ bounds = Bounds(lb, ub)
1023
+ elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
1024
+ if isinstance(bounds, Bounds):
1025
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
1026
+ return bounds
1027
+
1028
+
1029
+ def standardize_constraints(constraints, x0, meth):
1030
+ """Converts constraints to the form required by the solver."""
1031
+ all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
1032
+ new_constraint_types = all_constraint_types[:-1]
1033
+ if constraints is None:
1034
+ constraints = []
1035
+ elif isinstance(constraints, all_constraint_types):
1036
+ constraints = [constraints]
1037
+ else:
1038
+ constraints = list(constraints) # ensure it's a mutable sequence
1039
+
1040
+ if meth in ['trust-constr', 'new']:
1041
+ for i, con in enumerate(constraints):
1042
+ if not isinstance(con, new_constraint_types):
1043
+ constraints[i] = old_constraint_to_new(i, con)
1044
+ else:
1045
+ # iterate over copy, changing original
1046
+ for i, con in enumerate(list(constraints)):
1047
+ if isinstance(con, new_constraint_types):
1048
+ old_constraints = new_constraint_to_old(con, x0)
1049
+ constraints[i] = old_constraints[0]
1050
+ constraints.extend(old_constraints[1:]) # appends 1 if present
1051
+
1052
+ return constraints
1053
+
1054
+
1055
+ def _optimize_result_for_equal_bounds(
1056
+ fun, bounds, method, args=(), constraints=()
1057
+ ):
1058
+ """
1059
+ Provides a default OptimizeResult for when a bounded minimization method
1060
+ has (lb == ub).all().
1061
+
1062
+ Parameters
1063
+ ----------
1064
+ fun: callable
1065
+ bounds: Bounds
1066
+ method: str
1067
+ constraints: Constraint
1068
+ """
1069
+ success = True
1070
+ message = 'All independent variables were fixed by bounds.'
1071
+
1072
+ # bounds is new-style
1073
+ x0 = bounds.lb
1074
+
1075
+ if constraints:
1076
+ message = ("All independent variables were fixed by bounds at values"
1077
+ " that satisfy the constraints.")
1078
+ constraints = standardize_constraints(constraints, x0, 'new')
1079
+
1080
+ maxcv = 0
1081
+ for c in constraints:
1082
+ pc = PreparedConstraint(c, x0)
1083
+ violation = pc.violation(x0)
1084
+ if np.sum(violation):
1085
+ maxcv = max(maxcv, np.max(violation))
1086
+ success = False
1087
+ message = (f"All independent variables were fixed by bounds, but "
1088
+ f"the independent variables do not satisfy the "
1089
+ f"constraints exactly. (Maximum violation: {maxcv}).")
1090
+
1091
+ return OptimizeResult(
1092
+ x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
1093
+ njev=0, nhev=0,
1094
+ )
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_qap.py ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import operator
3
+ from . import (linear_sum_assignment, OptimizeResult)
4
+ from ._optimize import _check_unknown_options
5
+
6
+ from scipy._lib._util import check_random_state
7
+ import itertools
8
+
9
+ QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt']
10
+
11
+ def quadratic_assignment(A, B, method="faq", options=None):
12
+ r"""
13
+ Approximates solution to the quadratic assignment problem and
14
+ the graph matching problem.
15
+
16
+ Quadratic assignment solves problems of the following form:
17
+
18
+ .. math::
19
+
20
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
21
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
22
+
23
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
24
+ and :math:`A` and :math:`B` are square matrices.
25
+
26
+ Graph matching tries to *maximize* the same objective function.
27
+ This algorithm can be thought of as finding the alignment of the
28
+ nodes of two graphs that minimizes the number of induced edge
29
+ disagreements, or, in the case of weighted graphs, the sum of squared
30
+ edge weight differences.
31
+
32
+ Note that the quadratic assignment problem is NP-hard. The results given
33
+ here are approximations and are not guaranteed to be optimal.
34
+
35
+
36
+ Parameters
37
+ ----------
38
+ A : 2-D array, square
39
+ The square matrix :math:`A` in the objective function above.
40
+
41
+ B : 2-D array, square
42
+ The square matrix :math:`B` in the objective function above.
43
+
44
+ method : str in {'faq', '2opt'} (default: 'faq')
45
+ The algorithm used to solve the problem.
46
+ :ref:`'faq' <optimize.qap-faq>` (default) and
47
+ :ref:`'2opt' <optimize.qap-2opt>` are available.
48
+
49
+ options : dict, optional
50
+ A dictionary of solver options. All solvers support the following:
51
+
52
+ maximize : bool (default: False)
53
+ Maximizes the objective function if ``True``.
54
+
55
+ partial_match : 2-D array of integers, optional (default: None)
56
+ Fixes part of the matching. Also known as a "seed" [2]_.
57
+
58
+ Each row of `partial_match` specifies a pair of matched nodes:
59
+ node ``partial_match[i, 0]`` of `A` is matched to node
60
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
61
+ where ``m`` is not greater than the number of nodes, :math:`n`.
62
+
63
+ rng : {None, int, `numpy.random.Generator`,
64
+ `numpy.random.RandomState`}, optional
65
+
66
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
67
+ singleton is used.
68
+ If `seed` is an int, a new ``RandomState`` instance is used,
69
+ seeded with `seed`.
70
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
71
+ that instance is used.
72
+
73
+ For method-specific options, see
74
+ :func:`show_options('quadratic_assignment') <show_options>`.
75
+
76
+ Returns
77
+ -------
78
+ res : OptimizeResult
79
+ `OptimizeResult` containing the following fields.
80
+
81
+ col_ind : 1-D array
82
+ Column indices corresponding to the best permutation found of the
83
+ nodes of `B`.
84
+ fun : float
85
+ The objective value of the solution.
86
+ nit : int
87
+ The number of iterations performed during optimization.
88
+
89
+ Notes
90
+ -----
91
+ The default method :ref:`'faq' <optimize.qap-faq>` uses the Fast
92
+ Approximate QAP algorithm [1]_; it typically offers the best combination of
93
+ speed and accuracy.
94
+ Method :ref:`'2opt' <optimize.qap-2opt>` can be computationally expensive,
95
+ but may be a useful alternative, or it can be used to refine the solution
96
+ returned by another method.
97
+
98
+ References
99
+ ----------
100
+ .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
101
+ S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
102
+ C.E. Priebe, "Fast approximate quadratic programming for graph
103
+ matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
104
+ :doi:`10.1371/journal.pone.0121002`
105
+
106
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
107
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
108
+ 203-215, :doi:`10.1016/j.patcog.2018.09.014`
109
+
110
+ .. [3] "2-opt," Wikipedia.
111
+ https://en.wikipedia.org/wiki/2-opt
112
+
113
+ Examples
114
+ --------
115
+ >>> import numpy as np
116
+ >>> from scipy.optimize import quadratic_assignment
117
+ >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100],
118
+ ... [150, 130, 0, 120], [170, 100, 120, 0]])
119
+ >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8],
120
+ ... [0, 0, 0, 3], [0, 0, 0, 0]])
121
+ >>> res = quadratic_assignment(A, B)
122
+ >>> print(res)
123
+ fun: 3260
124
+ col_ind: [0 3 2 1]
125
+ nit: 9
126
+
127
+ The see the relationship between the returned ``col_ind`` and ``fun``,
128
+ use ``col_ind`` to form the best permutation matrix found, then evaluate
129
+ the objective function :math:`f(P) = trace(A^T P B P^T )`.
130
+
131
+ >>> perm = res['col_ind']
132
+ >>> P = np.eye(len(A), dtype=int)[perm]
133
+ >>> fun = np.trace(A.T @ P @ B @ P.T)
134
+ >>> print(fun)
135
+ 3260
136
+
137
+ Alternatively, to avoid constructing the permutation matrix explicitly,
138
+ directly permute the rows and columns of the distance matrix.
139
+
140
+ >>> fun = np.trace(A.T @ B[perm][:, perm])
141
+ >>> print(fun)
142
+ 3260
143
+
144
+ Although not guaranteed in general, ``quadratic_assignment`` happens to
145
+ have found the globally optimal solution.
146
+
147
+ >>> from itertools import permutations
148
+ >>> perm_opt, fun_opt = None, np.inf
149
+ >>> for perm in permutations([0, 1, 2, 3]):
150
+ ... perm = np.array(perm)
151
+ ... fun = np.trace(A.T @ B[perm][:, perm])
152
+ ... if fun < fun_opt:
153
+ ... fun_opt, perm_opt = fun, perm
154
+ >>> print(np.array_equal(perm_opt, res['col_ind']))
155
+ True
156
+
157
+ Here is an example for which the default method,
158
+ :ref:`'faq' <optimize.qap-faq>`, does not find the global optimum.
159
+
160
+ >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1],
161
+ ... [8, 5, 0, 2], [6, 1, 2, 0]])
162
+ >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2],
163
+ ... [8, 5, 0, 5], [4, 2, 5, 0]])
164
+ >>> res = quadratic_assignment(A, B)
165
+ >>> print(res)
166
+ fun: 178
167
+ col_ind: [1 0 3 2]
168
+ nit: 13
169
+
170
+ If accuracy is important, consider using :ref:`'2opt' <optimize.qap-2opt>`
171
+ to refine the solution.
172
+
173
+ >>> guess = np.array([np.arange(len(A)), res.col_ind]).T
174
+ >>> res = quadratic_assignment(A, B, method="2opt",
175
+ ... options = {'partial_guess': guess})
176
+ >>> print(res)
177
+ fun: 176
178
+ col_ind: [1 2 3 0]
179
+ nit: 17
180
+
181
+ """
182
+
183
+ if options is None:
184
+ options = {}
185
+
186
+ method = method.lower()
187
+ methods = {"faq": _quadratic_assignment_faq,
188
+ "2opt": _quadratic_assignment_2opt}
189
+ if method not in methods:
190
+ raise ValueError(f"method {method} must be in {methods}.")
191
+ res = methods[method](A, B, **options)
192
+ return res
193
+
194
+
195
+ def _calc_score(A, B, perm):
196
+ # equivalent to objective function but avoids matmul
197
+ return np.sum(A * B[perm][:, perm])
198
+
199
+
200
+ def _common_input_validation(A, B, partial_match):
201
+ A = np.atleast_2d(A)
202
+ B = np.atleast_2d(B)
203
+
204
+ if partial_match is None:
205
+ partial_match = np.array([[], []]).T
206
+ partial_match = np.atleast_2d(partial_match).astype(int)
207
+
208
+ msg = None
209
+ if A.shape[0] != A.shape[1]:
210
+ msg = "`A` must be square"
211
+ elif B.shape[0] != B.shape[1]:
212
+ msg = "`B` must be square"
213
+ elif A.ndim != 2 or B.ndim != 2:
214
+ msg = "`A` and `B` must have exactly two dimensions"
215
+ elif A.shape != B.shape:
216
+ msg = "`A` and `B` matrices must be of equal size"
217
+ elif partial_match.shape[0] > A.shape[0]:
218
+ msg = "`partial_match` can have only as many seeds as there are nodes"
219
+ elif partial_match.shape[1] != 2:
220
+ msg = "`partial_match` must have two columns"
221
+ elif partial_match.ndim != 2:
222
+ msg = "`partial_match` must have exactly two dimensions"
223
+ elif (partial_match < 0).any():
224
+ msg = "`partial_match` must contain only positive indices"
225
+ elif (partial_match >= len(A)).any():
226
+ msg = "`partial_match` entries must be less than number of nodes"
227
+ elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or
228
+ not len(set(partial_match[:, 1])) == len(partial_match[:, 1])):
229
+ msg = "`partial_match` column entries must be unique"
230
+
231
+ if msg is not None:
232
+ raise ValueError(msg)
233
+
234
+ return A, B, partial_match
235
+
236
+
237
+ def _quadratic_assignment_faq(A, B,
238
+ maximize=False, partial_match=None, rng=None,
239
+ P0="barycenter", shuffle_input=False, maxiter=30,
240
+ tol=0.03, **unknown_options):
241
+ r"""Solve the quadratic assignment problem (approximately).
242
+
243
+ This function solves the Quadratic Assignment Problem (QAP) and the
244
+ Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
245
+ (FAQ) [1]_.
246
+
247
+ Quadratic assignment solves problems of the following form:
248
+
249
+ .. math::
250
+
251
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
252
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
253
+
254
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
255
+ and :math:`A` and :math:`B` are square matrices.
256
+
257
+ Graph matching tries to *maximize* the same objective function.
258
+ This algorithm can be thought of as finding the alignment of the
259
+ nodes of two graphs that minimizes the number of induced edge
260
+ disagreements, or, in the case of weighted graphs, the sum of squared
261
+ edge weight differences.
262
+
263
+ Note that the quadratic assignment problem is NP-hard. The results given
264
+ here are approximations and are not guaranteed to be optimal.
265
+
266
+ Parameters
267
+ ----------
268
+ A : 2-D array, square
269
+ The square matrix :math:`A` in the objective function above.
270
+ B : 2-D array, square
271
+ The square matrix :math:`B` in the objective function above.
272
+ method : str in {'faq', '2opt'} (default: 'faq')
273
+ The algorithm used to solve the problem. This is the method-specific
274
+ documentation for 'faq'.
275
+ :ref:`'2opt' <optimize.qap-2opt>` is also available.
276
+
277
+ Options
278
+ -------
279
+ maximize : bool (default: False)
280
+ Maximizes the objective function if ``True``.
281
+ partial_match : 2-D array of integers, optional (default: None)
282
+ Fixes part of the matching. Also known as a "seed" [2]_.
283
+
284
+ Each row of `partial_match` specifies a pair of matched nodes:
285
+ node ``partial_match[i, 0]`` of `A` is matched to node
286
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where
287
+ ``m`` is not greater than the number of nodes, :math:`n`.
288
+
289
+ rng : {None, int, `numpy.random.Generator`,
290
+ `numpy.random.RandomState`}, optional
291
+
292
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
293
+ singleton is used.
294
+ If `seed` is an int, a new ``RandomState`` instance is used,
295
+ seeded with `seed`.
296
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
297
+ that instance is used.
298
+ P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter")
299
+ Initial position. Must be a doubly-stochastic matrix [3]_.
300
+
301
+ If the initial position is an array, it must be a doubly stochastic
302
+ matrix of size :math:`m' \times m'` where :math:`m' = n - m`.
303
+
304
+ If ``"barycenter"`` (default), the initial position is the barycenter
305
+ of the Birkhoff polytope (the space of doubly stochastic matrices).
306
+ This is a :math:`m' \times m'` matrix with all entries equal to
307
+ :math:`1 / m'`.
308
+
309
+ If ``"randomized"`` the initial search position is
310
+ :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and
311
+ :math:`K` is a random doubly stochastic matrix.
312
+ shuffle_input : bool (default: False)
313
+ Set to `True` to resolve degenerate gradients randomly. For
314
+ non-degenerate gradients this option has no effect.
315
+ maxiter : int, positive (default: 30)
316
+ Integer specifying the max number of Frank-Wolfe iterations performed.
317
+ tol : float (default: 0.03)
318
+ Tolerance for termination. Frank-Wolfe iteration terminates when
319
+ :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`,
320
+ where :math:`i` is the iteration number.
321
+
322
+ Returns
323
+ -------
324
+ res : OptimizeResult
325
+ `OptimizeResult` containing the following fields.
326
+
327
+ col_ind : 1-D array
328
+ Column indices corresponding to the best permutation found of the
329
+ nodes of `B`.
330
+ fun : float
331
+ The objective value of the solution.
332
+ nit : int
333
+ The number of Frank-Wolfe iterations performed.
334
+
335
+ Notes
336
+ -----
337
+ The algorithm may be sensitive to the initial permutation matrix (or
338
+ search "position") due to the possibility of several local minima
339
+ within the feasible region. A barycenter initialization is more likely to
340
+ result in a better solution than a single random initialization. However,
341
+ calling ``quadratic_assignment`` several times with different random
342
+ initializations may result in a better optimum at the cost of longer
343
+ total execution time.
344
+
345
+ Examples
346
+ --------
347
+ As mentioned above, a barycenter initialization often results in a better
348
+ solution than a single random initialization.
349
+
350
+ >>> from numpy.random import default_rng
351
+ >>> rng = default_rng()
352
+ >>> n = 15
353
+ >>> A = rng.random((n, n))
354
+ >>> B = rng.random((n, n))
355
+ >>> res = quadratic_assignment(A, B) # FAQ is default method
356
+ >>> print(res.fun)
357
+ 46.871483385480545 # may vary
358
+
359
+ >>> options = {"P0": "randomized"} # use randomized initialization
360
+ >>> res = quadratic_assignment(A, B, options=options)
361
+ >>> print(res.fun)
362
+ 47.224831071310625 # may vary
363
+
364
+ However, consider running from several randomized initializations and
365
+ keeping the best result.
366
+
367
+ >>> res = min([quadratic_assignment(A, B, options=options)
368
+ ... for i in range(30)], key=lambda x: x.fun)
369
+ >>> print(res.fun)
370
+ 46.671852533681516 # may vary
371
+
372
+ The '2-opt' method can be used to further refine the results.
373
+
374
+ >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
375
+ >>> res = quadratic_assignment(A, B, method="2opt", options=options)
376
+ >>> print(res.fun)
377
+ 46.47160735721583 # may vary
378
+
379
+ References
380
+ ----------
381
+ .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
382
+ S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
383
+ C.E. Priebe, "Fast approximate quadratic programming for graph
384
+ matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
385
+ :doi:`10.1371/journal.pone.0121002`
386
+
387
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
388
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
389
+ 203-215, :doi:`10.1016/j.patcog.2018.09.014`
390
+
391
+ .. [3] "Doubly stochastic Matrix," Wikipedia.
392
+ https://en.wikipedia.org/wiki/Doubly_stochastic_matrix
393
+
394
+ """
395
+
396
+ _check_unknown_options(unknown_options)
397
+
398
+ maxiter = operator.index(maxiter)
399
+
400
+ # ValueError check
401
+ A, B, partial_match = _common_input_validation(A, B, partial_match)
402
+
403
+ msg = None
404
+ if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}:
405
+ msg = "Invalid 'P0' parameter string"
406
+ elif maxiter <= 0:
407
+ msg = "'maxiter' must be a positive integer"
408
+ elif tol <= 0:
409
+ msg = "'tol' must be a positive float"
410
+ if msg is not None:
411
+ raise ValueError(msg)
412
+
413
+ rng = check_random_state(rng)
414
+ n = len(A) # number of vertices in graphs
415
+ n_seeds = len(partial_match) # number of seeds
416
+ n_unseed = n - n_seeds
417
+
418
+ # [1] Algorithm 1 Line 1 - choose initialization
419
+ if not isinstance(P0, str):
420
+ P0 = np.atleast_2d(P0)
421
+ if P0.shape != (n_unseed, n_unseed):
422
+ msg = "`P0` matrix must have shape m' x m', where m'=n-m"
423
+ elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1)
424
+ or not np.allclose(np.sum(P0, axis=1), 1)):
425
+ msg = "`P0` matrix must be doubly stochastic"
426
+ if msg is not None:
427
+ raise ValueError(msg)
428
+ elif P0 == 'barycenter':
429
+ P0 = np.ones((n_unseed, n_unseed)) / n_unseed
430
+ elif P0 == 'randomized':
431
+ J = np.ones((n_unseed, n_unseed)) / n_unseed
432
+ # generate a nxn matrix where each entry is a random number [0, 1]
433
+ # would use rand, but Generators don't have it
434
+ # would use random, but old mtrand.RandomStates don't have it
435
+ K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed)))
436
+ P0 = (J + K) / 2
437
+
438
+ # check trivial cases
439
+ if n == 0 or n_seeds == n:
440
+ score = _calc_score(A, B, partial_match[:, 1])
441
+ res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
442
+ return OptimizeResult(res)
443
+
444
+ obj_func_scalar = 1
445
+ if maximize:
446
+ obj_func_scalar = -1
447
+
448
+ nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
449
+ if shuffle_input:
450
+ nonseed_B = rng.permutation(nonseed_B)
451
+
452
+ nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
453
+ perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
454
+ perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
455
+
456
+ # definitions according to Seeded Graph Matching [2].
457
+ A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
458
+ B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
459
+ const_sum = A21 @ B21.T + A12.T @ B12
460
+
461
+ P = P0
462
+ # [1] Algorithm 1 Line 2 - loop while stopping criteria not met
463
+ for n_iter in range(1, maxiter+1):
464
+ # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
465
+ grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22)
466
+ # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
467
+ _, cols = linear_sum_assignment(grad_fp, maximize=maximize)
468
+ Q = np.eye(n_unseed)[cols]
469
+
470
+ # [1] Algorithm 1 Line 5 - compute the step size
471
+ # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
472
+ # terms as ax**2 + bx + c. c does not affect location of minimum
473
+ # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
474
+ # apply where possible for efficiency.
475
+ R = P - Q
476
+ b21 = ((R.T @ A21) * B21).sum()
477
+ b12 = ((R.T @ A12.T) * B12.T).sum()
478
+ AR22 = A22.T @ R
479
+ BR22 = B22 @ R.T
480
+ b22a = (AR22 * B22.T[cols]).sum()
481
+ b22b = (A22 * BR22[cols]).sum()
482
+ a = (AR22.T * BR22).sum()
483
+ b = b21 + b12 + b22a + b22b
484
+ # critical point of ax^2 + bx + c is at x = -d/(2*e)
485
+ # if a * obj_func_scalar > 0, it is a minimum
486
+ # if minimum is not in [0, 1], only endpoints need to be considered
487
+ if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1:
488
+ alpha = -b/(2*a)
489
+ else:
490
+ alpha = np.argmin([0, (b + a)*obj_func_scalar])
491
+
492
+ # [1] Algorithm 1 Line 6 - Update P
493
+ P_i1 = alpha * P + (1 - alpha) * Q
494
+ if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
495
+ P = P_i1
496
+ break
497
+ P = P_i1
498
+ # [1] Algorithm 1 Line 7 - end main loop
499
+
500
+ # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
501
+ _, col = linear_sum_assignment(P, maximize=True)
502
+ perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
503
+
504
+ unshuffled_perm = np.zeros(n, dtype=int)
505
+ unshuffled_perm[perm_A] = perm_B[perm]
506
+
507
+ score = _calc_score(A, B, unshuffled_perm)
508
+ res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
509
+ return OptimizeResult(res)
510
+
511
+
512
+ def _split_matrix(X, n):
513
+ # definitions according to Seeded Graph Matching [2].
514
+ upper, lower = X[:n], X[n:]
515
+ return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
516
+
517
+
518
+ def _doubly_stochastic(P, tol=1e-3):
519
+ # Adapted from @btaba implementation
520
+ # https://github.com/btaba/sinkhorn_knopp
521
+ # of Sinkhorn-Knopp algorithm
522
+ # https://projecteuclid.org/euclid.pjm/1102992505
523
+
524
+ max_iter = 1000
525
+ c = 1 / P.sum(axis=0)
526
+ r = 1 / (P @ c)
527
+ P_eps = P
528
+
529
+ for it in range(max_iter):
530
+ if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
531
+ (np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
532
+ # All column/row sums ~= 1 within threshold
533
+ break
534
+
535
+ c = 1 / (r @ P)
536
+ r = 1 / (P @ c)
537
+ P_eps = r[:, None] * P * c
538
+
539
+ return P_eps
540
+
541
+
542
+ def _quadratic_assignment_2opt(A, B, maximize=False, rng=None,
543
+ partial_match=None,
544
+ partial_guess=None,
545
+ **unknown_options):
546
+ r"""Solve the quadratic assignment problem (approximately).
547
+
548
+ This function solves the Quadratic Assignment Problem (QAP) and the
549
+ Graph Matching Problem (GMP) using the 2-opt algorithm [1]_.
550
+
551
+ Quadratic assignment solves problems of the following form:
552
+
553
+ .. math::
554
+
555
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
556
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
557
+
558
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
559
+ and :math:`A` and :math:`B` are square matrices.
560
+
561
+ Graph matching tries to *maximize* the same objective function.
562
+ This algorithm can be thought of as finding the alignment of the
563
+ nodes of two graphs that minimizes the number of induced edge
564
+ disagreements, or, in the case of weighted graphs, the sum of squared
565
+ edge weight differences.
566
+
567
+ Note that the quadratic assignment problem is NP-hard. The results given
568
+ here are approximations and are not guaranteed to be optimal.
569
+
570
+ Parameters
571
+ ----------
572
+ A : 2-D array, square
573
+ The square matrix :math:`A` in the objective function above.
574
+ B : 2-D array, square
575
+ The square matrix :math:`B` in the objective function above.
576
+ method : str in {'faq', '2opt'} (default: 'faq')
577
+ The algorithm used to solve the problem. This is the method-specific
578
+ documentation for '2opt'.
579
+ :ref:`'faq' <optimize.qap-faq>` is also available.
580
+
581
+ Options
582
+ -------
583
+ maximize : bool (default: False)
584
+ Maximizes the objective function if ``True``.
585
+ rng : {None, int, `numpy.random.Generator`,
586
+ `numpy.random.RandomState`}, optional
587
+
588
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
589
+ singleton is used.
590
+ If `seed` is an int, a new ``RandomState`` instance is used,
591
+ seeded with `seed`.
592
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
593
+ that instance is used.
594
+ partial_match : 2-D array of integers, optional (default: None)
595
+ Fixes part of the matching. Also known as a "seed" [2]_.
596
+
597
+ Each row of `partial_match` specifies a pair of matched nodes: node
598
+ ``partial_match[i, 0]`` of `A` is matched to node
599
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
600
+ where ``m`` is not greater than the number of nodes, :math:`n`.
601
+
602
+ .. note::
603
+ `partial_match` must be sorted by the first column.
604
+
605
+ partial_guess : 2-D array of integers, optional (default: None)
606
+ A guess for the matching between the two matrices. Unlike
607
+ `partial_match`, `partial_guess` does not fix the indices; they are
608
+ still free to be optimized.
609
+
610
+ Each row of `partial_guess` specifies a pair of matched nodes: node
611
+ ``partial_guess[i, 0]`` of `A` is matched to node
612
+ ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``,
613
+ where ``m`` is not greater than the number of nodes, :math:`n`.
614
+
615
+ .. note::
616
+ `partial_guess` must be sorted by the first column.
617
+
618
+ Returns
619
+ -------
620
+ res : OptimizeResult
621
+ `OptimizeResult` containing the following fields.
622
+
623
+ col_ind : 1-D array
624
+ Column indices corresponding to the best permutation found of the
625
+ nodes of `B`.
626
+ fun : float
627
+ The objective value of the solution.
628
+ nit : int
629
+ The number of iterations performed during optimization.
630
+
631
+ Notes
632
+ -----
633
+ This is a greedy algorithm that works similarly to bubble sort: beginning
634
+ with an initial permutation, it iteratively swaps pairs of indices to
635
+ improve the objective function until no such improvements are possible.
636
+
637
+ References
638
+ ----------
639
+ .. [1] "2-opt," Wikipedia.
640
+ https://en.wikipedia.org/wiki/2-opt
641
+
642
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
643
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
644
+ 203-215, https://doi.org/10.1016/j.patcog.2018.09.014
645
+
646
+ """
647
+ _check_unknown_options(unknown_options)
648
+ rng = check_random_state(rng)
649
+ A, B, partial_match = _common_input_validation(A, B, partial_match)
650
+
651
+ N = len(A)
652
+ # check trivial cases
653
+ if N == 0 or partial_match.shape[0] == N:
654
+ score = _calc_score(A, B, partial_match[:, 1])
655
+ res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
656
+ return OptimizeResult(res)
657
+
658
+ if partial_guess is None:
659
+ partial_guess = np.array([[], []]).T
660
+ partial_guess = np.atleast_2d(partial_guess).astype(int)
661
+
662
+ msg = None
663
+ if partial_guess.shape[0] > A.shape[0]:
664
+ msg = ("`partial_guess` can have only as "
665
+ "many entries as there are nodes")
666
+ elif partial_guess.shape[1] != 2:
667
+ msg = "`partial_guess` must have two columns"
668
+ elif partial_guess.ndim != 2:
669
+ msg = "`partial_guess` must have exactly two dimensions"
670
+ elif (partial_guess < 0).any():
671
+ msg = "`partial_guess` must contain only positive indices"
672
+ elif (partial_guess >= len(A)).any():
673
+ msg = "`partial_guess` entries must be less than number of nodes"
674
+ elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or
675
+ not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])):
676
+ msg = "`partial_guess` column entries must be unique"
677
+ if msg is not None:
678
+ raise ValueError(msg)
679
+
680
+ fixed_rows = None
681
+ if partial_match.size or partial_guess.size:
682
+ # use partial_match and partial_guess for initial permutation,
683
+ # but randomly permute the rest.
684
+ guess_rows = np.zeros(N, dtype=bool)
685
+ guess_cols = np.zeros(N, dtype=bool)
686
+ fixed_rows = np.zeros(N, dtype=bool)
687
+ fixed_cols = np.zeros(N, dtype=bool)
688
+ perm = np.zeros(N, dtype=int)
689
+
690
+ rg, cg = partial_guess.T
691
+ guess_rows[rg] = True
692
+ guess_cols[cg] = True
693
+ perm[guess_rows] = cg
694
+
695
+ # match overrides guess
696
+ rf, cf = partial_match.T
697
+ fixed_rows[rf] = True
698
+ fixed_cols[cf] = True
699
+ perm[fixed_rows] = cf
700
+
701
+ random_rows = ~fixed_rows & ~guess_rows
702
+ random_cols = ~fixed_cols & ~guess_cols
703
+ perm[random_rows] = rng.permutation(np.arange(N)[random_cols])
704
+ else:
705
+ perm = rng.permutation(np.arange(N))
706
+
707
+ best_score = _calc_score(A, B, perm)
708
+
709
+ i_free = np.arange(N)
710
+ if fixed_rows is not None:
711
+ i_free = i_free[~fixed_rows]
712
+
713
+ better = operator.gt if maximize else operator.lt
714
+ n_iter = 0
715
+ done = False
716
+ while not done:
717
+ # equivalent to nested for loops i in range(N), j in range(i, N)
718
+ for i, j in itertools.combinations_with_replacement(i_free, 2):
719
+ n_iter += 1
720
+ perm[i], perm[j] = perm[j], perm[i]
721
+ score = _calc_score(A, B, perm)
722
+ if better(score, best_score):
723
+ best_score = score
724
+ break
725
+ # faster to swap back than to create a new list every time
726
+ perm[i], perm[j] = perm[j], perm[i]
727
+ else: # no swaps made
728
+ done = True
729
+
730
+ res = {"col_ind": perm, "fun": best_score, "nit": n_iter}
731
+ return OptimizeResult(res)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (86.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._trustregion import (_minimize_trust_region)
2
+ from ._trlib import (get_trlib_quadratic_subproblem)
3
+
4
+ __all__ = ['_minimize_trust_krylov']
5
+
6
+ def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None,
7
+ inexact=True, **trust_region_options):
8
+ """
9
+ Minimization of a scalar function of one or more variables using
10
+ a nearly exact trust-region algorithm that only requires matrix
11
+ vector products with the hessian matrix.
12
+
13
+ .. versionadded:: 1.0.0
14
+
15
+ Options
16
+ -------
17
+ inexact : bool, optional
18
+ Accuracy to solve subproblems. If True requires less nonlinear
19
+ iterations, but more vector products.
20
+ """
21
+
22
+ if jac is None:
23
+ raise ValueError('Jacobian is required for trust region ',
24
+ 'exact minimization.')
25
+ if hess is None and hessp is None:
26
+ raise ValueError('Either the Hessian or the Hessian-vector product '
27
+ 'is required for Krylov trust-region minimization')
28
+
29
+ # tol_rel specifies the termination tolerance relative to the initial
30
+ # gradient norm in the Krylov subspace iteration.
31
+
32
+ # - tol_rel_i specifies the tolerance for interior convergence.
33
+ # - tol_rel_b specifies the tolerance for boundary convergence.
34
+ # in nonlinear programming applications it is not necessary to solve
35
+ # the boundary case as exact as the interior case.
36
+
37
+ # - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov
38
+ # subspace iteration leading to quadratic convergence if eventually
39
+ # the trust region stays inactive.
40
+ # - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov
41
+ # subspace iteration leading to superlinear convergence as long
42
+ # as the iterates hit the trust region boundary.
43
+
44
+ # For details consult the documentation of trlib_krylov_min
45
+ # in _trlib/trlib_krylov.h
46
+ #
47
+ # Optimality of this choice of parameters among a range of possibilities
48
+ # has been tested on the unconstrained subset of the CUTEst library.
49
+
50
+ if inexact:
51
+ return _minimize_trust_region(fun, x0, args=args, jac=jac,
52
+ hess=hess, hessp=hessp,
53
+ subproblem=get_trlib_quadratic_subproblem(
54
+ tol_rel_i=-2.0, tol_rel_b=-3.0,
55
+ disp=trust_region_options.get('disp', False)
56
+ ),
57
+ **trust_region_options)
58
+ else:
59
+ return _minimize_trust_region(fun, x0, args=args, jac=jac,
60
+ hess=hess, hessp=hessp,
61
+ subproblem=get_trlib_quadratic_subproblem(
62
+ tol_rel_i=1e-8, tol_rel_b=1e-6,
63
+ disp=trust_region_options.get('disp', False)
64
+ ),
65
+ **trust_region_options)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for the basin hopping global minimization algorithm.
3
+ """
4
+ import copy
5
+
6
+ from numpy.testing import (assert_almost_equal, assert_equal, assert_,
7
+ assert_allclose)
8
+ import pytest
9
+ from pytest import raises as assert_raises
10
+ import numpy as np
11
+ from numpy import cos, sin
12
+
13
+ from scipy.optimize import basinhopping, OptimizeResult
14
+ from scipy.optimize._basinhopping import (
15
+ Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
16
+
17
+
18
+ def func1d(x):
19
+ f = cos(14.5 * x - 0.3) + (x + 0.2) * x
20
+ df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
21
+ return f, df
22
+
23
+
24
+ def func2d_nograd(x):
25
+ f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
26
+ return f
27
+
28
+
29
+ def func2d(x):
30
+ f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
31
+ df = np.zeros(2)
32
+ df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
33
+ df[1] = 2. * x[1] + 0.2
34
+ return f, df
35
+
36
+
37
+ def func2d_easyderiv(x):
38
+ f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
39
+ df = np.zeros(2)
40
+ df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
41
+ df[1] = 2.0*x[0] + 4.0*x[1]
42
+
43
+ return f, df
44
+
45
+
46
+ class MyTakeStep1(RandomDisplacement):
47
+ """use a copy of displace, but have it set a special parameter to
48
+ make sure it's actually being used."""
49
+ def __init__(self):
50
+ self.been_called = False
51
+ super().__init__()
52
+
53
+ def __call__(self, x):
54
+ self.been_called = True
55
+ return super().__call__(x)
56
+
57
+
58
+ def myTakeStep2(x):
59
+ """redo RandomDisplacement in function form without the attribute stepsize
60
+ to make sure everything still works ok
61
+ """
62
+ s = 0.5
63
+ x += np.random.uniform(-s, s, np.shape(x))
64
+ return x
65
+
66
+
67
+ class MyAcceptTest:
68
+ """pass a custom accept test
69
+
70
+ This does nothing but make sure it's being used and ensure all the
71
+ possible return values are accepted
72
+ """
73
+ def __init__(self):
74
+ self.been_called = False
75
+ self.ncalls = 0
76
+ self.testres = [False, 'force accept', True, np.bool_(True),
77
+ np.bool_(False), [], {}, 0, 1]
78
+
79
+ def __call__(self, **kwargs):
80
+ self.been_called = True
81
+ self.ncalls += 1
82
+ if self.ncalls - 1 < len(self.testres):
83
+ return self.testres[self.ncalls - 1]
84
+ else:
85
+ return True
86
+
87
+
88
+ class MyCallBack:
89
+ """pass a custom callback function
90
+
91
+ This makes sure it's being used. It also returns True after 10
92
+ steps to ensure that it's stopping early.
93
+
94
+ """
95
+ def __init__(self):
96
+ self.been_called = False
97
+ self.ncalls = 0
98
+
99
+ def __call__(self, x, f, accepted):
100
+ self.been_called = True
101
+ self.ncalls += 1
102
+ if self.ncalls == 10:
103
+ return True
104
+
105
+
106
+ class TestBasinHopping:
107
+
108
+ def setup_method(self):
109
+ """ Tests setup.
110
+
111
+ Run tests based on the 1-D and 2-D functions described above.
112
+ """
113
+ self.x0 = (1.0, [1.0, 1.0])
114
+ self.sol = (-0.195, np.array([-0.195, -0.1]))
115
+
116
+ self.tol = 3 # number of decimal places
117
+
118
+ self.niter = 100
119
+ self.disp = False
120
+
121
+ # fix random seed
122
+ np.random.seed(1234)
123
+
124
+ self.kwargs = {"method": "L-BFGS-B", "jac": True}
125
+ self.kwargs_nograd = {"method": "L-BFGS-B"}
126
+
127
+ def test_TypeError(self):
128
+ # test the TypeErrors are raised on bad input
129
+ i = 1
130
+ # if take_step is passed, it must be callable
131
+ assert_raises(TypeError, basinhopping, func2d, self.x0[i],
132
+ take_step=1)
133
+ # if accept_test is passed, it must be callable
134
+ assert_raises(TypeError, basinhopping, func2d, self.x0[i],
135
+ accept_test=1)
136
+
137
+ def test_input_validation(self):
138
+ msg = 'target_accept_rate has to be in range \\(0, 1\\)'
139
+ with assert_raises(ValueError, match=msg):
140
+ basinhopping(func1d, self.x0[0], target_accept_rate=0.)
141
+ with assert_raises(ValueError, match=msg):
142
+ basinhopping(func1d, self.x0[0], target_accept_rate=1.)
143
+
144
+ msg = 'stepwise_factor has to be in range \\(0, 1\\)'
145
+ with assert_raises(ValueError, match=msg):
146
+ basinhopping(func1d, self.x0[0], stepwise_factor=0.)
147
+ with assert_raises(ValueError, match=msg):
148
+ basinhopping(func1d, self.x0[0], stepwise_factor=1.)
149
+
150
+ def test_1d_grad(self):
151
+ # test 1-D minimizations with gradient
152
+ i = 0
153
+ res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
154
+ niter=self.niter, disp=self.disp)
155
+ assert_almost_equal(res.x, self.sol[i], self.tol)
156
+
157
+ def test_2d(self):
158
+ # test 2d minimizations with gradient
159
+ i = 1
160
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
161
+ niter=self.niter, disp=self.disp)
162
+ assert_almost_equal(res.x, self.sol[i], self.tol)
163
+ assert_(res.nfev > 0)
164
+
165
+ def test_njev(self):
166
+ # test njev is returned correctly
167
+ i = 1
168
+ minimizer_kwargs = self.kwargs.copy()
169
+ # L-BFGS-B doesn't use njev, but BFGS does
170
+ minimizer_kwargs["method"] = "BFGS"
171
+ res = basinhopping(func2d, self.x0[i],
172
+ minimizer_kwargs=minimizer_kwargs, niter=self.niter,
173
+ disp=self.disp)
174
+ assert_(res.nfev > 0)
175
+ assert_equal(res.nfev, res.njev)
176
+
177
+ def test_jac(self):
178
+ # test Jacobian returned
179
+ minimizer_kwargs = self.kwargs.copy()
180
+ # BFGS returns a Jacobian
181
+ minimizer_kwargs["method"] = "BFGS"
182
+
183
+ res = basinhopping(func2d_easyderiv, [0.0, 0.0],
184
+ minimizer_kwargs=minimizer_kwargs, niter=self.niter,
185
+ disp=self.disp)
186
+
187
+ assert_(hasattr(res.lowest_optimization_result, "jac"))
188
+
189
+ # in this case, the Jacobian is just [df/dx, df/dy]
190
+ _, jacobian = func2d_easyderiv(res.x)
191
+ assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
192
+ self.tol)
193
+
194
+ def test_2d_nograd(self):
195
+ # test 2-D minimizations without gradient
196
+ i = 1
197
+ res = basinhopping(func2d_nograd, self.x0[i],
198
+ minimizer_kwargs=self.kwargs_nograd,
199
+ niter=self.niter, disp=self.disp)
200
+ assert_almost_equal(res.x, self.sol[i], self.tol)
201
+
202
+ def test_all_minimizers(self):
203
+ # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA
204
+ # don't accept jac=True, so aren't included here.
205
+ i = 1
206
+ methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
207
+ minimizer_kwargs = copy.copy(self.kwargs)
208
+ for method in methods:
209
+ minimizer_kwargs["method"] = method
210
+ res = basinhopping(func2d, self.x0[i],
211
+ minimizer_kwargs=minimizer_kwargs,
212
+ niter=self.niter, disp=self.disp)
213
+ assert_almost_equal(res.x, self.sol[i], self.tol)
214
+
215
+ def test_all_nograd_minimizers(self):
216
+ # Test 2-D minimizations without gradient. Newton-CG requires jac=True,
217
+ # so not included here.
218
+ i = 1
219
+ methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
220
+ 'Nelder-Mead', 'Powell', 'COBYLA']
221
+ minimizer_kwargs = copy.copy(self.kwargs_nograd)
222
+ for method in methods:
223
+ minimizer_kwargs["method"] = method
224
+ res = basinhopping(func2d_nograd, self.x0[i],
225
+ minimizer_kwargs=minimizer_kwargs,
226
+ niter=self.niter, disp=self.disp)
227
+ tol = self.tol
228
+ if method == 'COBYLA':
229
+ tol = 2
230
+ assert_almost_equal(res.x, self.sol[i], decimal=tol)
231
+
232
+ def test_pass_takestep(self):
233
+ # test that passing a custom takestep works
234
+ # also test that the stepsize is being adjusted
235
+ takestep = MyTakeStep1()
236
+ initial_step_size = takestep.stepsize
237
+ i = 1
238
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
239
+ niter=self.niter, disp=self.disp,
240
+ take_step=takestep)
241
+ assert_almost_equal(res.x, self.sol[i], self.tol)
242
+ assert_(takestep.been_called)
243
+ # make sure that the build in adaptive step size has been used
244
+ assert_(initial_step_size != takestep.stepsize)
245
+
246
+ def test_pass_simple_takestep(self):
247
+ # test that passing a custom takestep without attribute stepsize
248
+ takestep = myTakeStep2
249
+ i = 1
250
+ res = basinhopping(func2d_nograd, self.x0[i],
251
+ minimizer_kwargs=self.kwargs_nograd,
252
+ niter=self.niter, disp=self.disp,
253
+ take_step=takestep)
254
+ assert_almost_equal(res.x, self.sol[i], self.tol)
255
+
256
+ def test_pass_accept_test(self):
257
+ # test passing a custom accept test
258
+ # makes sure it's being used and ensures all the possible return values
259
+ # are accepted.
260
+ accept_test = MyAcceptTest()
261
+ i = 1
262
+ # there's no point in running it more than a few steps.
263
+ basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
264
+ niter=10, disp=self.disp, accept_test=accept_test)
265
+ assert_(accept_test.been_called)
266
+
267
+ def test_pass_callback(self):
268
+ # test passing a custom callback function
269
+ # This makes sure it's being used. It also returns True after 10 steps
270
+ # to ensure that it's stopping early.
271
+ callback = MyCallBack()
272
+ i = 1
273
+ # there's no point in running it more than a few steps.
274
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
275
+ niter=30, disp=self.disp, callback=callback)
276
+ assert_(callback.been_called)
277
+ assert_("callback" in res.message[0])
278
+ # One of the calls of MyCallBack is during BasinHoppingRunner
279
+ # construction, so there are only 9 remaining before MyCallBack stops
280
+ # the minimization.
281
+ assert_equal(res.nit, 9)
282
+
283
+ def test_minimizer_fail(self):
284
+ # test if a minimizer fails
285
+ i = 1
286
+ self.kwargs["options"] = dict(maxiter=0)
287
+ self.niter = 10
288
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
289
+ niter=self.niter, disp=self.disp)
290
+ # the number of failed minimizations should be the number of
291
+ # iterations + 1
292
+ assert_equal(res.nit + 1, res.minimization_failures)
293
+
294
+ def test_niter_zero(self):
295
+ # gh5915, what happens if you call basinhopping with niter=0
296
+ i = 0
297
+ basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
298
+ niter=0, disp=self.disp)
299
+
300
+ def test_seed_reproducibility(self):
301
+ # seed should ensure reproducibility between runs
302
+ minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
303
+
304
+ f_1 = []
305
+
306
+ def callback(x, f, accepted):
307
+ f_1.append(f)
308
+
309
+ basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
310
+ niter=10, callback=callback, seed=10)
311
+
312
+ f_2 = []
313
+
314
+ def callback2(x, f, accepted):
315
+ f_2.append(f)
316
+
317
+ basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
318
+ niter=10, callback=callback2, seed=10)
319
+ assert_equal(np.array(f_1), np.array(f_2))
320
+
321
+ def test_random_gen(self):
322
+ # check that np.random.Generator can be used (numpy >= 1.17)
323
+ rng = np.random.default_rng(1)
324
+
325
+ minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
326
+
327
+ res1 = basinhopping(func2d, [1.0, 1.0],
328
+ minimizer_kwargs=minimizer_kwargs,
329
+ niter=10, seed=rng)
330
+
331
+ rng = np.random.default_rng(1)
332
+ res2 = basinhopping(func2d, [1.0, 1.0],
333
+ minimizer_kwargs=minimizer_kwargs,
334
+ niter=10, seed=rng)
335
+ assert_equal(res1.x, res2.x)
336
+
337
+ def test_monotonic_basin_hopping(self):
338
+ # test 1-D minimizations with gradient and T=0
339
+ i = 0
340
+ res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
341
+ niter=self.niter, disp=self.disp, T=0)
342
+ assert_almost_equal(res.x, self.sol[i], self.tol)
343
+
344
+
345
+ class Test_Storage:
346
+ def setup_method(self):
347
+ self.x0 = np.array(1)
348
+ self.f0 = 0
349
+
350
+ minres = OptimizeResult(success=True)
351
+ minres.x = self.x0
352
+ minres.fun = self.f0
353
+
354
+ self.storage = Storage(minres)
355
+
356
+ def test_higher_f_rejected(self):
357
+ new_minres = OptimizeResult(success=True)
358
+ new_minres.x = self.x0 + 1
359
+ new_minres.fun = self.f0 + 1
360
+
361
+ ret = self.storage.update(new_minres)
362
+ minres = self.storage.get_lowest()
363
+ assert_equal(self.x0, minres.x)
364
+ assert_equal(self.f0, minres.fun)
365
+ assert_(not ret)
366
+
367
+ @pytest.mark.parametrize('success', [True, False])
368
+ def test_lower_f_accepted(self, success):
369
+ new_minres = OptimizeResult(success=success)
370
+ new_minres.x = self.x0 + 1
371
+ new_minres.fun = self.f0 - 1
372
+
373
+ ret = self.storage.update(new_minres)
374
+ minres = self.storage.get_lowest()
375
+ assert (self.x0 != minres.x) == success # can't use `is`
376
+ assert (self.f0 != minres.fun) == success # left side is NumPy bool
377
+ assert ret is success
378
+
379
+
380
+ class Test_RandomDisplacement:
381
+ def setup_method(self):
382
+ self.stepsize = 1.0
383
+ self.displace = RandomDisplacement(stepsize=self.stepsize)
384
+ self.N = 300000
385
+ self.x0 = np.zeros([self.N])
386
+
387
+ def test_random(self):
388
+ # the mean should be 0
389
+ # the variance should be (2*stepsize)**2 / 12
390
+ # note these tests are random, they will fail from time to time
391
+ x = self.displace(self.x0)
392
+ v = (2. * self.stepsize) ** 2 / 12
393
+ assert_almost_equal(np.mean(x), 0., 1)
394
+ assert_almost_equal(np.var(x), v, 1)
395
+
396
+
397
+ class Test_Metropolis:
398
+ def setup_method(self):
399
+ self.T = 2.
400
+ self.met = Metropolis(self.T)
401
+ self.res_new = OptimizeResult(success=True, fun=0.)
402
+ self.res_old = OptimizeResult(success=True, fun=1.)
403
+
404
+ def test_boolean_return(self):
405
+ # the return must be a bool, else an error will be raised in
406
+ # basinhopping
407
+ ret = self.met(res_new=self.res_new, res_old=self.res_old)
408
+ assert isinstance(ret, bool)
409
+
410
+ def test_lower_f_accepted(self):
411
+ assert_(self.met(res_new=self.res_new, res_old=self.res_old))
412
+
413
+ def test_accept(self):
414
+ # test that steps are randomly accepted for f_new > f_old
415
+ one_accept = False
416
+ one_reject = False
417
+ for i in range(1000):
418
+ if one_accept and one_reject:
419
+ break
420
+ res_new = OptimizeResult(success=True, fun=1.)
421
+ res_old = OptimizeResult(success=True, fun=0.5)
422
+ ret = self.met(res_new=res_new, res_old=res_old)
423
+ if ret:
424
+ one_accept = True
425
+ else:
426
+ one_reject = True
427
+ assert_(one_accept)
428
+ assert_(one_reject)
429
+
430
+ def test_GH7495(self):
431
+ # an overflow in exp was producing a RuntimeWarning
432
+ # create own object here in case someone changes self.T
433
+ met = Metropolis(2)
434
+ res_new = OptimizeResult(success=True, fun=0.)
435
+ res_old = OptimizeResult(success=True, fun=2000)
436
+ with np.errstate(over='raise'):
437
+ met.accept_reject(res_new=res_new, res_old=res_old)
438
+
439
+ def test_gh7799(self):
440
+ # gh-7799 reported a problem in which local search was successful but
441
+ # basinhopping returned an invalid solution. Show that this is fixed.
442
+ def func(x):
443
+ return (x**2-8)**2+(x+2)**2
444
+
445
+ x0 = -4
446
+ limit = 50 # Constrain to func value >= 50
447
+ con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
448
+ res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con})
449
+ assert res.success
450
+ assert_allclose(res.fun, limit, rtol=1e-6)
451
+
452
+ def test_accept_gh7799(self):
453
+ # Metropolis should not accept the result of an unsuccessful new local
454
+ # search if the old local search was successful
455
+
456
+ met = Metropolis(0) # monotonic basin hopping
457
+ res_new = OptimizeResult(success=True, fun=0.)
458
+ res_old = OptimizeResult(success=True, fun=1.)
459
+
460
+ # if new local search was successful and energy is lower, accept
461
+ assert met(res_new=res_new, res_old=res_old)
462
+ # if new res is unsuccessful, don't accept - even if energy is lower
463
+ res_new.success = False
464
+ assert not met(res_new=res_new, res_old=res_old)
465
+ # ...unless the old res was unsuccessful, too. In that case, why not?
466
+ res_old.success = False
467
+ assert met(res_new=res_new, res_old=res_old)
468
+
469
+ def test_reject_all_gh7799(self):
470
+ # Test the behavior when there is no feasible solution
471
+ def fun(x):
472
+ return x@x
473
+
474
+ def constraint(x):
475
+ return x + 1
476
+
477
+ kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
478
+ 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
479
+ res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
480
+ assert not res.success
481
+
482
+
483
+ class Test_AdaptiveStepsize:
484
+ def setup_method(self):
485
+ self.stepsize = 1.
486
+ self.ts = RandomDisplacement(stepsize=self.stepsize)
487
+ self.target_accept_rate = 0.5
488
+ self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
489
+ accept_rate=self.target_accept_rate)
490
+
491
+ def test_adaptive_increase(self):
492
+ # if few steps are rejected, the stepsize should increase
493
+ x = 0.
494
+ self.takestep(x)
495
+ self.takestep.report(False)
496
+ for i in range(self.takestep.interval):
497
+ self.takestep(x)
498
+ self.takestep.report(True)
499
+ assert_(self.ts.stepsize > self.stepsize)
500
+
501
+ def test_adaptive_decrease(self):
502
+ # if few steps are rejected, the stepsize should increase
503
+ x = 0.
504
+ self.takestep(x)
505
+ self.takestep.report(True)
506
+ for i in range(self.takestep.interval):
507
+ self.takestep(x)
508
+ self.takestep.report(False)
509
+ assert_(self.ts.stepsize < self.stepsize)
510
+
511
+ def test_all_accepted(self):
512
+ # test that everything works OK if all steps were accepted
513
+ x = 0.
514
+ for i in range(self.takestep.interval + 1):
515
+ self.takestep(x)
516
+ self.takestep.report(True)
517
+ assert_(self.ts.stepsize > self.stepsize)
518
+
519
+ def test_all_rejected(self):
520
+ # test that everything works OK if all steps were rejected
521
+ x = 0.
522
+ for i in range(self.takestep.interval + 1):
523
+ self.takestep(x)
524
+ self.takestep.report(False)
525
+ assert_(self.ts.stepsize < self.stepsize)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py ADDED
@@ -0,0 +1,1677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for the differential global minimization algorithm.
3
+ """
4
+ import multiprocessing
5
+ import platform
6
+
7
+ from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
8
+ _ConstraintWrapper)
9
+ from scipy.optimize import differential_evolution, OptimizeResult
10
+ from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
11
+ LinearConstraint)
12
+ from scipy.optimize import rosen, minimize
13
+ from scipy.sparse import csr_matrix
14
+ from scipy import stats
15
+
16
+ import numpy as np
17
+ from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
18
+ assert_string_equal, assert_, suppress_warnings)
19
+ from pytest import raises as assert_raises, warns
20
+ import pytest
21
+
22
+
23
+ class TestDifferentialEvolutionSolver:
24
+
25
+ def setup_method(self):
26
+ self.old_seterr = np.seterr(invalid='raise')
27
+ self.limits = np.array([[0., 0.],
28
+ [2., 2.]])
29
+ self.bounds = [(0., 2.), (0., 2.)]
30
+
31
+ self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
32
+ [(0, 100)])
33
+
34
+ # dummy_solver2 will be used to test mutation strategies
35
+ self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
36
+ [(0, 1)],
37
+ popsize=7,
38
+ mutation=0.5)
39
+ # create a population that's only 7 members long
40
+ # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
41
+ population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
42
+ self.dummy_solver2.population = population
43
+
44
+ def teardown_method(self):
45
+ np.seterr(**self.old_seterr)
46
+
47
+ def quadratic(self, x):
48
+ return x[0]**2
49
+
50
+ def test__strategy_resolves(self):
51
+ # test that the correct mutation function is resolved by
52
+ # different requested strategy arguments
53
+ solver = DifferentialEvolutionSolver(rosen,
54
+ self.bounds,
55
+ strategy='best1exp')
56
+ assert_equal(solver.strategy, 'best1exp')
57
+ assert_equal(solver.mutation_func.__name__, '_best1')
58
+
59
+ solver = DifferentialEvolutionSolver(rosen,
60
+ self.bounds,
61
+ strategy='best1bin')
62
+ assert_equal(solver.strategy, 'best1bin')
63
+ assert_equal(solver.mutation_func.__name__, '_best1')
64
+
65
+ solver = DifferentialEvolutionSolver(rosen,
66
+ self.bounds,
67
+ strategy='rand1bin')
68
+ assert_equal(solver.strategy, 'rand1bin')
69
+ assert_equal(solver.mutation_func.__name__, '_rand1')
70
+
71
+ solver = DifferentialEvolutionSolver(rosen,
72
+ self.bounds,
73
+ strategy='rand1exp')
74
+ assert_equal(solver.strategy, 'rand1exp')
75
+ assert_equal(solver.mutation_func.__name__, '_rand1')
76
+
77
+ solver = DifferentialEvolutionSolver(rosen,
78
+ self.bounds,
79
+ strategy='rand2exp')
80
+ assert_equal(solver.strategy, 'rand2exp')
81
+ assert_equal(solver.mutation_func.__name__, '_rand2')
82
+
83
+ solver = DifferentialEvolutionSolver(rosen,
84
+ self.bounds,
85
+ strategy='best2bin')
86
+ assert_equal(solver.strategy, 'best2bin')
87
+ assert_equal(solver.mutation_func.__name__, '_best2')
88
+
89
+ solver = DifferentialEvolutionSolver(rosen,
90
+ self.bounds,
91
+ strategy='rand2bin')
92
+ assert_equal(solver.strategy, 'rand2bin')
93
+ assert_equal(solver.mutation_func.__name__, '_rand2')
94
+
95
+ solver = DifferentialEvolutionSolver(rosen,
96
+ self.bounds,
97
+ strategy='rand2exp')
98
+ assert_equal(solver.strategy, 'rand2exp')
99
+ assert_equal(solver.mutation_func.__name__, '_rand2')
100
+
101
+ solver = DifferentialEvolutionSolver(rosen,
102
+ self.bounds,
103
+ strategy='randtobest1bin')
104
+ assert_equal(solver.strategy, 'randtobest1bin')
105
+ assert_equal(solver.mutation_func.__name__, '_randtobest1')
106
+
107
+ solver = DifferentialEvolutionSolver(rosen,
108
+ self.bounds,
109
+ strategy='randtobest1exp')
110
+ assert_equal(solver.strategy, 'randtobest1exp')
111
+ assert_equal(solver.mutation_func.__name__, '_randtobest1')
112
+
113
+ solver = DifferentialEvolutionSolver(rosen,
114
+ self.bounds,
115
+ strategy='currenttobest1bin')
116
+ assert_equal(solver.strategy, 'currenttobest1bin')
117
+ assert_equal(solver.mutation_func.__name__, '_currenttobest1')
118
+
119
+ solver = DifferentialEvolutionSolver(rosen,
120
+ self.bounds,
121
+ strategy='currenttobest1exp')
122
+ assert_equal(solver.strategy, 'currenttobest1exp')
123
+ assert_equal(solver.mutation_func.__name__, '_currenttobest1')
124
+
125
+ def test__mutate1(self):
126
+ # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
127
+ result = np.array([0.05])
128
+ trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
129
+ assert_allclose(trial, result)
130
+
131
+ result = np.array([0.25])
132
+ trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
133
+ assert_allclose(trial, result)
134
+
135
+ def test__mutate2(self):
136
+ # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
137
+ # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
138
+
139
+ result = np.array([-0.1])
140
+ trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
141
+ assert_allclose(trial, result)
142
+
143
+ result = np.array([0.1])
144
+ trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
145
+ assert_allclose(trial, result)
146
+
147
+ def test__randtobest1(self):
148
+ # strategies randtobest/1/*
149
+ result = np.array([0.15])
150
+ trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
151
+ assert_allclose(trial, result)
152
+
153
+ def test__currenttobest1(self):
154
+ # strategies currenttobest/1/*
155
+ result = np.array([0.1])
156
+ trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
157
+ assert_allclose(trial, result)
158
+
159
+ def test_can_init_with_dithering(self):
160
+ mutation = (0.5, 1)
161
+ solver = DifferentialEvolutionSolver(self.quadratic,
162
+ self.bounds,
163
+ mutation=mutation)
164
+
165
+ assert_equal(solver.dither, list(mutation))
166
+
167
+ def test_invalid_mutation_values_arent_accepted(self):
168
+ func = rosen
169
+ mutation = (0.5, 3)
170
+ assert_raises(ValueError,
171
+ DifferentialEvolutionSolver,
172
+ func,
173
+ self.bounds,
174
+ mutation=mutation)
175
+
176
+ mutation = (-1, 1)
177
+ assert_raises(ValueError,
178
+ DifferentialEvolutionSolver,
179
+ func,
180
+ self.bounds,
181
+ mutation=mutation)
182
+
183
+ mutation = (0.1, np.nan)
184
+ assert_raises(ValueError,
185
+ DifferentialEvolutionSolver,
186
+ func,
187
+ self.bounds,
188
+ mutation=mutation)
189
+
190
+ mutation = 0.5
191
+ solver = DifferentialEvolutionSolver(func,
192
+ self.bounds,
193
+ mutation=mutation)
194
+ assert_equal(0.5, solver.scale)
195
+ assert_equal(None, solver.dither)
196
+
197
+ def test_invalid_functional(self):
198
+ def func(x):
199
+ return np.array([np.sum(x ** 2), np.sum(x)])
200
+
201
+ with assert_raises(
202
+ RuntimeError,
203
+ match=r"func\(x, \*args\) must return a scalar value"):
204
+ differential_evolution(func, [(-2, 2), (-2, 2)])
205
+
206
+ def test__scale_parameters(self):
207
+ trial = np.array([0.3])
208
+ assert_equal(30, self.dummy_solver._scale_parameters(trial))
209
+
210
+ # it should also work with the limits reversed
211
+ self.dummy_solver.limits = np.array([[100], [0.]])
212
+ assert_equal(30, self.dummy_solver._scale_parameters(trial))
213
+
214
+ def test__unscale_parameters(self):
215
+ trial = np.array([30])
216
+ assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
217
+
218
+ # it should also work with the limits reversed
219
+ self.dummy_solver.limits = np.array([[100], [0.]])
220
+ assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
221
+
222
+ def test_equal_bounds(self):
223
+ with np.errstate(invalid='raise'):
224
+ solver = DifferentialEvolutionSolver(
225
+ self.quadratic,
226
+ bounds=[(2.0, 2.0), (1.0, 3.0)]
227
+ )
228
+ v = solver._unscale_parameters([2.0, 2.0])
229
+ assert_allclose(v, 0.5)
230
+
231
+ res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)])
232
+ assert_equal(res.x, [2.0, 3.0])
233
+
234
+ def test__ensure_constraint(self):
235
+ trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
236
+ self.dummy_solver._ensure_constraint(trial)
237
+
238
+ assert_equal(trial[2], 0.9)
239
+ assert_(np.logical_and(trial >= 0, trial <= 1).all())
240
+
241
+ def test_differential_evolution(self):
242
+ # test that the Jmin of DifferentialEvolutionSolver
243
+ # is the same as the function evaluation
244
+ solver = DifferentialEvolutionSolver(
245
+ self.quadratic, [(-2, 2)], maxiter=1, polish=False
246
+ )
247
+ result = solver.solve()
248
+ assert_equal(result.fun, self.quadratic(result.x))
249
+
250
+ solver = DifferentialEvolutionSolver(
251
+ self.quadratic, [(-2, 2)], maxiter=1, polish=True
252
+ )
253
+ result = solver.solve()
254
+ assert_equal(result.fun, self.quadratic(result.x))
255
+
256
+ def test_best_solution_retrieval(self):
257
+ # test that the getter property method for the best solution works.
258
+ solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
259
+ result = solver.solve()
260
+ assert_equal(result.x, solver.x)
261
+
262
+ def test_intermediate_result(self):
263
+ # Check that intermediate result object passed into the callback
264
+ # function contains the expected information and that raising
265
+ # `StopIteration` causes the expected behavior.
266
+ maxiter = 10
267
+
268
+ def func(x):
269
+ val = rosen(x)
270
+ if val < func.val:
271
+ func.x = x
272
+ func.val = val
273
+ return val
274
+ func.x = None
275
+ func.val = np.inf
276
+
277
+ def callback(intermediate_result):
278
+ callback.nit += 1
279
+ callback.intermediate_result = intermediate_result
280
+ assert intermediate_result.population.ndim == 2
281
+ assert intermediate_result.population.shape[1] == 2
282
+ assert intermediate_result.nit == callback.nit
283
+
284
+ # Check that `x` and `fun` attributes are the best found so far
285
+ assert_equal(intermediate_result.x, callback.func.x)
286
+ assert_equal(intermediate_result.fun, callback.func.val)
287
+
288
+ # Check for consistency between `fun`, `population_energies`,
289
+ # `x`, and `population`
290
+ assert_equal(intermediate_result.fun, rosen(intermediate_result.x))
291
+ for i in range(len(intermediate_result.population_energies)):
292
+ res = intermediate_result.population_energies[i]
293
+ ref = rosen(intermediate_result.population[i])
294
+ assert_equal(res, ref)
295
+ assert_equal(intermediate_result.x,
296
+ intermediate_result.population[0])
297
+ assert_equal(intermediate_result.fun,
298
+ intermediate_result.population_energies[0])
299
+
300
+ assert intermediate_result.message == 'in progress'
301
+ assert intermediate_result.success is True
302
+ assert isinstance(intermediate_result, OptimizeResult)
303
+ if callback.nit == maxiter:
304
+ raise StopIteration
305
+ callback.nit = 0
306
+ callback.intermediate_result = None
307
+ callback.func = func
308
+
309
+ bounds = [(0, 2), (0, 2)]
310
+ kwargs = dict(func=func, bounds=bounds, seed=838245, polish=False)
311
+ res = differential_evolution(**kwargs, callback=callback)
312
+ ref = differential_evolution(**kwargs, maxiter=maxiter)
313
+
314
+ # Check that final `intermediate_result` is equivalent to returned
315
+ # result object and that terminating with callback `StopIteration`
316
+ # after `maxiter` iterations is equivalent to terminating with
317
+ # `maxiter` parameter.
318
+ assert res.success is ref.success is False
319
+ assert callback.nit == res.nit == maxiter
320
+ assert res.message == 'callback function requested stop early'
321
+ assert ref.message == 'Maximum number of iterations has been exceeded.'
322
+ for field, val in ref.items():
323
+ if field in {'message', 'success'}: # checked separately
324
+ continue
325
+ assert_equal(callback.intermediate_result[field], val)
326
+ assert_equal(res[field], val)
327
+
328
+ # Check that polish occurs after `StopIteration` as advertised
329
+ callback.nit = 0
330
+ func.val = np.inf
331
+ kwargs['polish'] = True
332
+ res = differential_evolution(**kwargs, callback=callback)
333
+ assert res.fun < ref.fun
334
+
335
+ def test_callback_terminates(self):
336
+ # test that if the callback returns true, then the minimization halts
337
+ bounds = [(0, 2), (0, 2)]
338
+ expected_msg = 'callback function requested stop early'
339
+ def callback_python_true(param, convergence=0.):
340
+ return True
341
+
342
+ result = differential_evolution(
343
+ rosen, bounds, callback=callback_python_true
344
+ )
345
+ assert_string_equal(result.message, expected_msg)
346
+
347
+ # if callback raises StopIteration then solve should be interrupted
348
+ def callback_stop(intermediate_result):
349
+ raise StopIteration
350
+
351
+ result = differential_evolution(rosen, bounds, callback=callback_stop)
352
+ assert not result.success
353
+
354
+ def callback_evaluates_true(param, convergence=0.):
355
+ # DE should stop if bool(self.callback) is True
356
+ return [10]
357
+
358
+ result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
359
+ assert_string_equal(result.message, expected_msg)
360
+ assert not result.success
361
+
362
+ def callback_evaluates_false(param, convergence=0.):
363
+ return []
364
+
365
+ result = differential_evolution(rosen, bounds,
366
+ callback=callback_evaluates_false)
367
+ assert result.success
368
+
369
+ def test_args_tuple_is_passed(self):
370
+ # test that the args tuple is passed to the cost function properly.
371
+ bounds = [(-10, 10)]
372
+ args = (1., 2., 3.)
373
+
374
+ def quadratic(x, *args):
375
+ if type(args) != tuple:
376
+ raise ValueError('args should be a tuple')
377
+ return args[0] + args[1] * x + args[2] * x**2.
378
+
379
+ result = differential_evolution(quadratic,
380
+ bounds,
381
+ args=args,
382
+ polish=True)
383
+ assert_almost_equal(result.fun, 2 / 3.)
384
+
385
+ def test_init_with_invalid_strategy(self):
386
+ # test that passing an invalid strategy raises ValueError
387
+ func = rosen
388
+ bounds = [(-3, 3)]
389
+ assert_raises(ValueError,
390
+ differential_evolution,
391
+ func,
392
+ bounds,
393
+ strategy='abc')
394
+
395
+ def test_bounds_checking(self):
396
+ # test that the bounds checking works
397
+ func = rosen
398
+ bounds = [(-3)]
399
+ assert_raises(ValueError,
400
+ differential_evolution,
401
+ func,
402
+ bounds)
403
+ bounds = [(-3, 3), (3, 4, 5)]
404
+ assert_raises(ValueError,
405
+ differential_evolution,
406
+ func,
407
+ bounds)
408
+
409
+ # test that we can use a new-type Bounds object
410
+ result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
411
+ assert_almost_equal(result.x, (1., 1.))
412
+
413
+ def test_select_samples(self):
414
+ # select_samples should return 5 separate random numbers.
415
+ limits = np.arange(12., dtype='float64').reshape(2, 6)
416
+ bounds = list(zip(limits[0, :], limits[1, :]))
417
+ solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
418
+ candidate = 0
419
+ r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
420
+ assert_equal(
421
+ len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
422
+
423
+ def test_maxiter_stops_solve(self):
424
+ # test that if the maximum number of iterations is exceeded
425
+ # the solver stops.
426
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
427
+ result = solver.solve()
428
+ assert_equal(result.success, False)
429
+ assert_equal(result.message,
430
+ 'Maximum number of iterations has been exceeded.')
431
+
432
+ def test_maxfun_stops_solve(self):
433
+ # test that if the maximum number of function evaluations is exceeded
434
+ # during initialisation the solver stops
435
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
436
+ polish=False)
437
+ result = solver.solve()
438
+
439
+ assert_equal(result.nfev, 2)
440
+ assert_equal(result.success, False)
441
+ assert_equal(result.message,
442
+ 'Maximum number of function evaluations has '
443
+ 'been exceeded.')
444
+
445
+ # test that if the maximum number of function evaluations is exceeded
446
+ # during the actual minimisation, then the solver stops.
447
+ # Have to turn polishing off, as this will still occur even if maxfun
448
+ # is reached. For popsize=5 and len(bounds)=2, then there are only 10
449
+ # function evaluations during initialisation.
450
+ solver = DifferentialEvolutionSolver(rosen,
451
+ self.bounds,
452
+ popsize=5,
453
+ polish=False,
454
+ maxfun=40)
455
+ result = solver.solve()
456
+
457
+ assert_equal(result.nfev, 41)
458
+ assert_equal(result.success, False)
459
+ assert_equal(result.message,
460
+ 'Maximum number of function evaluations has '
461
+ 'been exceeded.')
462
+
463
+ # now repeat for updating='deferred version
464
+ # 47 function evaluations is not a multiple of the population size,
465
+ # so maxfun is reached partway through a population evaluation.
466
+ solver = DifferentialEvolutionSolver(rosen,
467
+ self.bounds,
468
+ popsize=5,
469
+ polish=False,
470
+ maxfun=47,
471
+ updating='deferred')
472
+ result = solver.solve()
473
+
474
+ assert_equal(result.nfev, 47)
475
+ assert_equal(result.success, False)
476
+ assert_equal(result.message,
477
+ 'Maximum number of function evaluations has '
478
+ 'been reached.')
479
+
480
+ def test_quadratic(self):
481
+ # test the quadratic function from object
482
+ solver = DifferentialEvolutionSolver(self.quadratic,
483
+ [(-100, 100)],
484
+ tol=0.02)
485
+ solver.solve()
486
+ assert_equal(np.argmin(solver.population_energies), 0)
487
+
488
+ def test_quadratic_from_diff_ev(self):
489
+ # test the quadratic function from differential_evolution function
490
+ differential_evolution(self.quadratic,
491
+ [(-100, 100)],
492
+ tol=0.02)
493
+
494
+ def test_seed_gives_repeatability(self):
495
+ result = differential_evolution(self.quadratic,
496
+ [(-100, 100)],
497
+ polish=False,
498
+ seed=1,
499
+ tol=0.5)
500
+ result2 = differential_evolution(self.quadratic,
501
+ [(-100, 100)],
502
+ polish=False,
503
+ seed=1,
504
+ tol=0.5)
505
+ assert_equal(result.x, result2.x)
506
+ assert_equal(result.nfev, result2.nfev)
507
+
508
+ def test_random_generator(self):
509
+ # check that np.random.Generator can be used (numpy >= 1.17)
510
+ # obtain a np.random.Generator object
511
+ rng = np.random.default_rng()
512
+
513
+ inits = ['random', 'latinhypercube', 'sobol', 'halton']
514
+ for init in inits:
515
+ differential_evolution(self.quadratic,
516
+ [(-100, 100)],
517
+ polish=False,
518
+ seed=rng,
519
+ tol=0.5,
520
+ init=init)
521
+
522
+ def test_exp_runs(self):
523
+ # test whether exponential mutation loop runs
524
+ solver = DifferentialEvolutionSolver(rosen,
525
+ self.bounds,
526
+ strategy='best1exp',
527
+ maxiter=1)
528
+
529
+ solver.solve()
530
+
531
+ def test_gh_4511_regression(self):
532
+ # This modification of the differential evolution docstring example
533
+ # uses a custom popsize that had triggered an off-by-one error.
534
+ # Because we do not care about solving the optimization problem in
535
+ # this test, we use maxiter=1 to reduce the testing time.
536
+ bounds = [(-5, 5), (-5, 5)]
537
+ # result = differential_evolution(rosen, bounds, popsize=1815,
538
+ # maxiter=1)
539
+
540
+ # the original issue arose because of rounding error in arange, with
541
+ # linspace being a much better solution. 1815 is quite a large popsize
542
+ # to use and results in a long test time (~13s). I used the original
543
+ # issue to figure out the lowest number of samples that would cause
544
+ # this rounding error to occur, 49.
545
+ differential_evolution(rosen, bounds, popsize=49, maxiter=1)
546
+
547
+ def test_calculate_population_energies(self):
548
+ # if popsize is 3, then the overall generation has size (6,)
549
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
550
+ solver._calculate_population_energies(solver.population)
551
+ solver._promote_lowest_energy()
552
+ assert_equal(np.argmin(solver.population_energies), 0)
553
+
554
+ # initial calculation of the energies should require 6 nfev.
555
+ assert_equal(solver._nfev, 6)
556
+
557
+ def test_iteration(self):
558
+ # test that DifferentialEvolutionSolver is iterable
559
+ # if popsize is 3, then the overall generation has size (6,)
560
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
561
+ maxfun=12)
562
+ x, fun = next(solver)
563
+ assert_equal(np.size(x, 0), 2)
564
+
565
+ # 6 nfev are required for initial calculation of energies, 6 nfev are
566
+ # required for the evolution of the 6 population members.
567
+ assert_equal(solver._nfev, 12)
568
+
569
+ # the next generation should halt because it exceeds maxfun
570
+ assert_raises(StopIteration, next, solver)
571
+
572
+ # check a proper minimisation can be done by an iterable solver
573
+ solver = DifferentialEvolutionSolver(rosen, self.bounds)
574
+ _, fun_prev = next(solver)
575
+ for i, soln in enumerate(solver):
576
+ x_current, fun_current = soln
577
+ assert fun_prev >= fun_current
578
+ _, fun_prev = x_current, fun_current
579
+ # need to have this otherwise the solver would never stop.
580
+ if i == 50:
581
+ break
582
+
583
+ def test_convergence(self):
584
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
585
+ polish=False)
586
+ solver.solve()
587
+ assert_(solver.convergence < 0.2)
588
+
589
+ def test_maxiter_none_GH5731(self):
590
+ # Pre 0.17 the previous default for maxiter and maxfun was None.
591
+ # the numerical defaults are now 1000 and np.inf. However, some scripts
592
+ # will still supply None for both of those, this will raise a TypeError
593
+ # in the solve method.
594
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
595
+ maxfun=None)
596
+ solver.solve()
597
+
598
+ def test_population_initiation(self):
599
+ # test the different modes of population initiation
600
+
601
+ # init must be either 'latinhypercube' or 'random'
602
+ # raising ValueError is something else is passed in
603
+ assert_raises(ValueError,
604
+ DifferentialEvolutionSolver,
605
+ *(rosen, self.bounds),
606
+ **{'init': 'rubbish'})
607
+
608
+ solver = DifferentialEvolutionSolver(rosen, self.bounds)
609
+
610
+ # check that population initiation:
611
+ # 1) resets _nfev to 0
612
+ # 2) all population energies are np.inf
613
+ solver.init_population_random()
614
+ assert_equal(solver._nfev, 0)
615
+ assert_(np.all(np.isinf(solver.population_energies)))
616
+
617
+ solver.init_population_lhs()
618
+ assert_equal(solver._nfev, 0)
619
+ assert_(np.all(np.isinf(solver.population_energies)))
620
+
621
+ solver.init_population_qmc(qmc_engine='halton')
622
+ assert_equal(solver._nfev, 0)
623
+ assert_(np.all(np.isinf(solver.population_energies)))
624
+
625
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
626
+ solver.init_population_qmc(qmc_engine='sobol')
627
+ assert_equal(solver._nfev, 0)
628
+ assert_(np.all(np.isinf(solver.population_energies)))
629
+
630
+ # we should be able to initialize with our own array
631
+ population = np.linspace(-1, 3, 10).reshape(5, 2)
632
+ solver = DifferentialEvolutionSolver(rosen, self.bounds,
633
+ init=population,
634
+ strategy='best2bin',
635
+ atol=0.01, seed=1, popsize=5)
636
+
637
+ assert_equal(solver._nfev, 0)
638
+ assert_(np.all(np.isinf(solver.population_energies)))
639
+ assert_(solver.num_population_members == 5)
640
+ assert_(solver.population_shape == (5, 2))
641
+
642
+ # check that the population was initialized correctly
643
+ unscaled_population = np.clip(solver._unscale_parameters(population),
644
+ 0, 1)
645
+ assert_almost_equal(solver.population[:5], unscaled_population)
646
+
647
+ # population values need to be clipped to bounds
648
+ assert_almost_equal(np.min(solver.population[:5]), 0)
649
+ assert_almost_equal(np.max(solver.population[:5]), 1)
650
+
651
+ # shouldn't be able to initialize with an array if it's the wrong shape
652
+ # this would have too many parameters
653
+ population = np.linspace(-1, 3, 15).reshape(5, 3)
654
+ assert_raises(ValueError,
655
+ DifferentialEvolutionSolver,
656
+ *(rosen, self.bounds),
657
+ **{'init': population})
658
+
659
+ # provide an initial solution
660
+ # bounds are [(0, 2), (0, 2)]
661
+ x0 = np.random.uniform(low=0.0, high=2.0, size=2)
662
+ solver = DifferentialEvolutionSolver(
663
+ rosen, self.bounds, x0=x0
664
+ )
665
+ # parameters are scaled to unit interval
666
+ assert_allclose(solver.population[0], x0 / 2.0)
667
+
668
+ def test_x0(self):
669
+ # smoke test that checks that x0 is usable.
670
+ res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
671
+ assert res.success
672
+
673
+ # check what happens if some of the x0 lay outside the bounds
674
+ with assert_raises(ValueError):
675
+ differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
676
+
677
+ def test_infinite_objective_function(self):
678
+ # Test that there are no problems if the objective function
679
+ # returns inf on some runs
680
+ def sometimes_inf(x):
681
+ if x[0] < .5:
682
+ return np.inf
683
+ return x[1]
684
+ bounds = [(0, 1), (0, 1)]
685
+ differential_evolution(sometimes_inf, bounds=bounds, disp=False)
686
+
687
+ def test_deferred_updating(self):
688
+ # check setting of deferred updating, with default workers
689
+ bounds = [(0., 2.), (0., 2.)]
690
+ solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
691
+ assert_(solver._updating == 'deferred')
692
+ assert_(solver._mapwrapper._mapfunc is map)
693
+ solver.solve()
694
+
695
+ def test_immediate_updating(self):
696
+ # check setting of immediate updating, with default workers
697
+ bounds = [(0., 2.), (0., 2.)]
698
+ solver = DifferentialEvolutionSolver(rosen, bounds)
699
+ assert_(solver._updating == 'immediate')
700
+
701
+ # Safely forking from a multithreaded process is
702
+ # problematic, and deprecated in Python 3.12, so
703
+ # we use a slower but portable alternative
704
+ # see gh-19848
705
+ ctx = multiprocessing.get_context("spawn")
706
+ with ctx.Pool(2) as p:
707
+ # should raise a UserWarning because the updating='immediate'
708
+ # is being overridden by the workers keyword
709
+ with warns(UserWarning):
710
+ with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s:
711
+ pass
712
+ assert s._updating == 'deferred'
713
+
714
+ def test_parallel(self):
715
+ # smoke test for parallelization with deferred updating
716
+ bounds = [(0., 2.), (0., 2.)]
717
+ with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(
718
+ rosen, bounds, updating='deferred', workers=p.map) as solver:
719
+ assert_(solver._mapwrapper.pool is not None)
720
+ assert_(solver._updating == 'deferred')
721
+ solver.solve()
722
+
723
+ with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',
724
+ workers=2) as solver:
725
+ assert_(solver._mapwrapper.pool is not None)
726
+ assert_(solver._updating == 'deferred')
727
+ solver.solve()
728
+
729
+ def test_converged(self):
730
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
731
+ solver.solve()
732
+ assert_(solver.converged())
733
+
734
+ def test_constraint_violation_fn(self):
735
+ def constr_f(x):
736
+ return [x[0] + x[1]]
737
+
738
+ def constr_f2(x):
739
+ return np.array([x[0]**2 + x[1], x[0] - x[1]])
740
+
741
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
742
+
743
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
744
+ constraints=(nlc))
745
+
746
+ cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
747
+ assert_almost_equal(cv, 0.1)
748
+
749
+ nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
750
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
751
+ constraints=(nlc, nlc2))
752
+
753
+ # for multiple constraints the constraint violations should
754
+ # be concatenated.
755
+ xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
756
+ vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
757
+
758
+ for x, v in zip(xs, vs):
759
+ cv = solver._constraint_violation_fn(np.array(x))
760
+ assert_allclose(cv, np.atleast_2d(v))
761
+
762
+ # vectorized calculation of a series of solutions
763
+ assert_allclose(
764
+ solver._constraint_violation_fn(np.array(xs)), np.array(vs)
765
+ )
766
+
767
+ # the following line is used in _calculate_population_feasibilities.
768
+ # _constraint_violation_fn returns an (1, M) array when
769
+ # x.shape == (N,), i.e. a single solution. Therefore this list
770
+ # comprehension should generate (S, 1, M) array.
771
+ constraint_violation = np.array([solver._constraint_violation_fn(x)
772
+ for x in np.array(xs)])
773
+ assert constraint_violation.shape == (3, 1, 3)
774
+
775
+ # we need reasonable error messages if the constraint function doesn't
776
+ # return the right thing
777
+ def constr_f3(x):
778
+ # returns (S, M), rather than (M, S)
779
+ return constr_f2(x).T
780
+
781
+ nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
782
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
783
+ constraints=(nlc, nlc2),
784
+ vectorized=False)
785
+ solver.vectorized = True
786
+ with pytest.raises(
787
+ RuntimeError, match="An array returned from a Constraint"
788
+ ):
789
+ solver._constraint_violation_fn(np.array(xs))
790
+
791
+ def test_constraint_population_feasibilities(self):
792
+ def constr_f(x):
793
+ return [x[0] + x[1]]
794
+
795
+ def constr_f2(x):
796
+ return [x[0]**2 + x[1], x[0] - x[1]]
797
+
798
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
799
+
800
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
801
+ constraints=(nlc))
802
+
803
+ # are population feasibilities correct
804
+ # [0.5, 0.5] corresponds to scaled values of [1., 1.]
805
+ feas, cv = solver._calculate_population_feasibilities(
806
+ np.array([[0.5, 0.5], [1., 1.]]))
807
+ assert_equal(feas, [False, False])
808
+ assert_almost_equal(cv, np.array([[0.1], [2.1]]))
809
+ assert cv.shape == (2, 1)
810
+
811
+ nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
812
+
813
+ for vectorize in [False, True]:
814
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
815
+ constraints=(nlc, nlc2),
816
+ vectorized=vectorize,
817
+ updating='deferred')
818
+
819
+ feas, cv = solver._calculate_population_feasibilities(
820
+ np.array([[0.5, 0.5], [0.6, 0.5]]))
821
+ assert_equal(feas, [False, False])
822
+ assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
823
+
824
+ feas, cv = solver._calculate_population_feasibilities(
825
+ np.array([[0.5, 0.5], [1., 1.]]))
826
+ assert_equal(feas, [False, False])
827
+ assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
828
+ assert cv.shape == (2, 3)
829
+
830
+ feas, cv = solver._calculate_population_feasibilities(
831
+ np.array([[0.25, 0.25], [1., 1.]]))
832
+ assert_equal(feas, [True, False])
833
+ assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
834
+ assert cv.shape == (2, 3)
835
+
836
+ def test_constraint_solve(self):
837
+ def constr_f(x):
838
+ return np.array([x[0] + x[1]])
839
+
840
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
841
+
842
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
843
+ constraints=(nlc))
844
+
845
+ # trust-constr warns if the constraint function is linear
846
+ with warns(UserWarning):
847
+ res = solver.solve()
848
+
849
+ assert constr_f(res.x) <= 1.9
850
+ assert res.success
851
+
852
+ def test_impossible_constraint(self):
853
+ def constr_f(x):
854
+ return np.array([x[0] + x[1]])
855
+
856
+ nlc = NonlinearConstraint(constr_f, -np.inf, -1)
857
+
858
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
859
+ constraints=(nlc), popsize=3,
860
+ seed=1)
861
+
862
+ # a UserWarning is issued because the 'trust-constr' polishing is
863
+ # attempted on the least infeasible solution found.
864
+ with warns(UserWarning):
865
+ res = solver.solve()
866
+
867
+ assert res.maxcv > 0
868
+ assert not res.success
869
+
870
+ # test _promote_lowest_energy works when none of the population is
871
+ # feasible. In this case, the solution with the lowest constraint
872
+ # violation should be promoted.
873
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
874
+ constraints=(nlc), polish=False)
875
+ next(solver)
876
+ assert not solver.feasible.all()
877
+ assert not np.isfinite(solver.population_energies).all()
878
+
879
+ # now swap two of the entries in the population
880
+ l = 20
881
+ cv = solver.constraint_violation[0]
882
+
883
+ solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
884
+ solver.population[[0, l], :] = solver.population[[l, 0], :]
885
+ solver.constraint_violation[[0, l], :] = (
886
+ solver.constraint_violation[[l, 0], :])
887
+
888
+ solver._promote_lowest_energy()
889
+ assert_equal(solver.constraint_violation[0], cv)
890
+
891
+ def test_accept_trial(self):
892
+ # _accept_trial(self, energy_trial, feasible_trial, cv_trial,
893
+ # energy_orig, feasible_orig, cv_orig)
894
+ def constr_f(x):
895
+ return [x[0] + x[1]]
896
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
897
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
898
+ constraints=(nlc))
899
+ fn = solver._accept_trial
900
+ # both solutions are feasible, select lower energy
901
+ assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
902
+ assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False)
903
+ assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
904
+
905
+ # trial is feasible, original is not
906
+ assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
907
+
908
+ # trial and original are infeasible
909
+ # cv_trial have to be <= cv_original to be better
910
+ assert (fn(0.1, False, np.array([0.5, 0.5]),
911
+ 1.0, False, np.array([1., 1.0])))
912
+ assert (fn(0.1, False, np.array([0.5, 0.5]),
913
+ 1.0, False, np.array([1., 0.50])))
914
+ assert not (fn(1.0, False, np.array([0.5, 0.5]),
915
+ 1.0, False, np.array([1.0, 0.4])))
916
+
917
+ def test_constraint_wrapper(self):
918
+ lb = np.array([0, 20, 30])
919
+ ub = np.array([0.5, np.inf, 70])
920
+ x0 = np.array([1, 2, 3])
921
+ pc = _ConstraintWrapper(Bounds(lb, ub), x0)
922
+ assert (pc.violation(x0) > 0).any()
923
+ assert (pc.violation([0.25, 21, 31]) == 0).all()
924
+
925
+ # check vectorized Bounds constraint
926
+ xs = np.arange(1, 16).reshape(5, 3)
927
+ violations = []
928
+ for x in xs:
929
+ violations.append(pc.violation(x))
930
+ np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
931
+
932
+ x0 = np.array([1, 2, 3, 4])
933
+ A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
934
+ pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
935
+ assert (pc.violation(x0) > 0).any()
936
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
937
+
938
+ # check vectorized LinearConstraint, for 7 lots of parameter vectors
939
+ # with each parameter vector being 4 long, with 3 constraints
940
+ # xs is the same shape as stored in the differential evolution
941
+ # population, but it's sent to the violation function as (len(x), M)
942
+ xs = np.arange(1, 29).reshape(7, 4)
943
+ violations = []
944
+ for x in xs:
945
+ violations.append(pc.violation(x))
946
+ np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
947
+
948
+ pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
949
+ x0)
950
+ assert (pc.violation(x0) > 0).any()
951
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
952
+
953
+ def fun(x):
954
+ return A.dot(x)
955
+
956
+ nonlinear = NonlinearConstraint(fun, -np.inf, 0)
957
+ pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
958
+ assert (pc.violation(x0) > 0).any()
959
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
960
+
961
+ def test_constraint_wrapper_violation(self):
962
+ def cons_f(x):
963
+ # written in vectorised form to accept an array of (N, S)
964
+ # returning (M, S)
965
+ # where N is the number of parameters,
966
+ # S is the number of solution vectors to be examined,
967
+ # and M is the number of constraint components
968
+ return np.array([x[0] ** 2 + x[1],
969
+ x[0] ** 2 - x[1]])
970
+
971
+ nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
972
+ pc = _ConstraintWrapper(nlc, [0.5, 1])
973
+ assert np.size(pc.bounds[0]) == 2
974
+
975
+ xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
976
+ vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
977
+
978
+ for x, v in zip(xs, vs):
979
+ assert_allclose(pc.violation(x), v)
980
+
981
+ # now check that we can vectorize the constraint wrapper
982
+ assert_allclose(pc.violation(np.array(xs).T),
983
+ np.array(vs).T)
984
+ assert pc.fun(np.array(xs).T).shape == (2, len(xs))
985
+ assert pc.violation(np.array(xs).T).shape == (2, len(xs))
986
+ assert pc.num_constr == 2
987
+ assert pc.parameter_count == 2
988
+
989
+ def test_matrix_linear_constraint(self):
990
+ # gh20041 supplying an np.matrix to construct a LinearConstraint caused
991
+ # _ConstraintWrapper to start returning constraint violations of the
992
+ # wrong shape.
993
+ with suppress_warnings() as sup:
994
+ sup.filter(PendingDeprecationWarning)
995
+ matrix = np.matrix([[1, 1, 1, 1.],
996
+ [2, 2, 2, 2.]])
997
+ lc = LinearConstraint(matrix, 0, 1)
998
+ x0 = np.ones(4)
999
+ cw = _ConstraintWrapper(lc, x0)
1000
+ # the shape of the constraint violation should be the same as the number
1001
+ # of constraints applied.
1002
+ assert cw.violation(x0).shape == (2,)
1003
+
1004
+ # let's try a vectorised violation call.
1005
+ xtrial = np.arange(4 * 5).reshape(4, 5)
1006
+ assert cw.violation(xtrial).shape == (2, 5)
1007
+
1008
+
1009
+ def test_L1(self):
1010
+ # Lampinen ([5]) test problem 1
1011
+
1012
+ def f(x):
1013
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1014
+ fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
1015
+ return fun
1016
+
1017
+ A = np.zeros((10, 14)) # 1-indexed to match reference
1018
+ A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
1019
+ A[2, [1, 10]] = -8, 1
1020
+ A[3, [4, 5, 10]] = -2, -1, 1
1021
+ A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
1022
+ A[5, [2, 11]] = -8, 1
1023
+ A[6, [6, 7, 11]] = -2, -1, 1
1024
+ A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
1025
+ A[8, [3, 12]] = -8, 1
1026
+ A[9, [8, 9, 12]] = -2, -1, 1
1027
+ A = A[1:, 1:]
1028
+
1029
+ b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
1030
+
1031
+ L = LinearConstraint(A, -np.inf, b)
1032
+
1033
+ bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
1034
+
1035
+ # using a lower popsize to speed the test up
1036
+ res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
1037
+ constraints=(L), popsize=2)
1038
+
1039
+ x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
1040
+ f_opt = -15
1041
+
1042
+ assert_allclose(f(x_opt), f_opt, atol=6e-4)
1043
+ assert res.success
1044
+ assert_allclose(res.x, x_opt, atol=6e-4)
1045
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1046
+ assert_(np.all([email protected] <= b))
1047
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1048
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1049
+
1050
+ # now repeat the same solve, using the same overall constraints,
1051
+ # but using a sparse matrix for the LinearConstraint instead of an
1052
+ # array
1053
+
1054
+ L = LinearConstraint(csr_matrix(A), -np.inf, b)
1055
+
1056
+ # using a lower popsize to speed the test up
1057
+ res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
1058
+ constraints=(L), popsize=2)
1059
+
1060
+ assert_allclose(f(x_opt), f_opt)
1061
+ assert res.success
1062
+ assert_allclose(res.x, x_opt, atol=5e-4)
1063
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1064
+ assert_(np.all([email protected] <= b))
1065
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1066
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1067
+
1068
+ # now repeat the same solve, using the same overall constraints,
1069
+ # but specify half the constraints in terms of LinearConstraint,
1070
+ # and the other half by NonlinearConstraint
1071
+ def c1(x):
1072
+ x = np.hstack(([0], x))
1073
+ return [2*x[2] + 2*x[3] + x[11] + x[12],
1074
+ -8*x[3] + x[12]]
1075
+
1076
+ def c2(x):
1077
+ x = np.hstack(([0], x))
1078
+ return -2*x[8] - x[9] + x[12]
1079
+
1080
+ L = LinearConstraint(A[:5, :], -np.inf, b[:5])
1081
+ L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
1082
+ N = NonlinearConstraint(c1, -np.inf, b[6:8])
1083
+ N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
1084
+ constraints = (L, N, L2, N2)
1085
+
1086
+ with suppress_warnings() as sup:
1087
+ sup.filter(UserWarning)
1088
+ res = differential_evolution(f, bounds, strategy='rand1bin',
1089
+ seed=1234, constraints=constraints,
1090
+ popsize=2)
1091
+
1092
+ assert_allclose(res.x, x_opt, atol=6e-4)
1093
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1094
+ assert_(np.all([email protected] <= b))
1095
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1096
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1097
+
1098
+ def test_L2(self):
1099
+ # Lampinen ([5]) test problem 2
1100
+
1101
+ def f(x):
1102
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1103
+ fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
1104
+ 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
1105
+ 8*x[7])
1106
+ return fun
1107
+
1108
+ def c1(x):
1109
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1110
+ return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
1111
+ 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
1112
+ 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
1113
+ -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
1114
+ 5*x[6] + 11*x[7]]
1115
+
1116
+ N = NonlinearConstraint(c1, 0, np.inf)
1117
+ bounds = [(-10, 10)]*7
1118
+ constraints = (N)
1119
+
1120
+ with suppress_warnings() as sup:
1121
+ sup.filter(UserWarning)
1122
+ res = differential_evolution(f, bounds, strategy='rand1bin',
1123
+ seed=1234, constraints=constraints)
1124
+
1125
+ f_opt = 680.6300599487869
1126
+ x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
1127
+ -0.6244870, 1.038131, 1.594227)
1128
+
1129
+ assert_allclose(f(x_opt), f_opt)
1130
+ assert_allclose(res.fun, f_opt)
1131
+ assert_allclose(res.x, x_opt, atol=1e-5)
1132
+ assert res.success
1133
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1134
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1135
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1136
+
1137
+ def test_L3(self):
1138
+ # Lampinen ([5]) test problem 3
1139
+
1140
+ def f(x):
1141
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1142
+ fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
1143
+ (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
1144
+ 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
1145
+ (x[10] - 7)**2 + 45
1146
+ )
1147
+ return fun # maximize
1148
+
1149
+ A = np.zeros((4, 11))
1150
+ A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
1151
+ A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
1152
+ A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
1153
+ A = A[1:, 1:]
1154
+ b = np.array([-105, 0, -12])
1155
+
1156
+ def c1(x):
1157
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1158
+ return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
1159
+ -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
1160
+ -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
1161
+ -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
1162
+ -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
1163
+
1164
+ L = LinearConstraint(A, b, np.inf)
1165
+ N = NonlinearConstraint(c1, 0, np.inf)
1166
+ bounds = [(-10, 10)]*10
1167
+ constraints = (L, N)
1168
+
1169
+ with suppress_warnings() as sup:
1170
+ sup.filter(UserWarning)
1171
+ res = differential_evolution(f, bounds, seed=1234,
1172
+ constraints=constraints, popsize=3)
1173
+
1174
+ x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
1175
+ 1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
1176
+ f_opt = 24.3062091
1177
+
1178
+ assert_allclose(f(x_opt), f_opt, atol=1e-5)
1179
+ assert_allclose(res.x, x_opt, atol=1e-6)
1180
+ assert_allclose(res.fun, f_opt, atol=1e-5)
1181
+ assert res.success
1182
+ assert_(np.all(A @ res.x >= b))
1183
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1184
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1185
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1186
+
1187
+ def test_L4(self):
1188
+ # Lampinen ([5]) test problem 4
1189
+ def f(x):
1190
+ return np.sum(x[:3])
1191
+
1192
+ A = np.zeros((4, 9))
1193
+ A[1, [4, 6]] = 0.0025, 0.0025
1194
+ A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
1195
+ A[3, [8, 5]] = 0.01, -0.01
1196
+ A = A[1:, 1:]
1197
+ b = np.array([1, 1, 1])
1198
+
1199
+ def c1(x):
1200
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1201
+ return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
1202
+ x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
1203
+ x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
1204
+
1205
+ L = LinearConstraint(A, -np.inf, 1)
1206
+ N = NonlinearConstraint(c1, 0, np.inf)
1207
+
1208
+ bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
1209
+ constraints = (L, N)
1210
+
1211
+ with suppress_warnings() as sup:
1212
+ sup.filter(UserWarning)
1213
+ res = differential_evolution(f, bounds, strategy='rand1bin',
1214
+ seed=1234, constraints=constraints,
1215
+ popsize=3)
1216
+
1217
+ f_opt = 7049.248
1218
+
1219
+ x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
1220
+ 217.9823, 286.416528, 395.601172]
1221
+
1222
+ assert_allclose(f(x_opt), f_opt, atol=0.001)
1223
+ assert_allclose(res.fun, f_opt, atol=0.001)
1224
+
1225
+ # use higher tol here for 32-bit Windows, see gh-11693
1226
+ if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
1227
+ assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
1228
+ else:
1229
+ # tolerance determined from macOS + MKL failure, see gh-12701
1230
+ assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
1231
+
1232
+ assert res.success
1233
+ assert_(np.all(A @ res.x <= b))
1234
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1235
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1236
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1237
+
1238
+ def test_L5(self):
1239
+ # Lampinen ([5]) test problem 5
1240
+
1241
+ def f(x):
1242
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1243
+ fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
1244
+ (x[1]**3*(x[1]+x[2])))
1245
+ return -fun # maximize
1246
+
1247
+ def c1(x):
1248
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1249
+ return [x[1]**2 - x[2] + 1,
1250
+ 1 - x[1] + (x[2]-4)**2]
1251
+
1252
+ N = NonlinearConstraint(c1, -np.inf, 0)
1253
+ bounds = [(0, 10)]*2
1254
+ constraints = (N)
1255
+
1256
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1257
+ constraints=constraints)
1258
+
1259
+ x_opt = (1.22797135, 4.24537337)
1260
+ f_opt = -0.095825
1261
+ assert_allclose(f(x_opt), f_opt, atol=2e-5)
1262
+ assert_allclose(res.fun, f_opt, atol=1e-4)
1263
+ assert res.success
1264
+ assert_(np.all(np.array(c1(res.x)) <= 0))
1265
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1266
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1267
+
1268
+ def test_L6(self):
1269
+ # Lampinen ([5]) test problem 6
1270
+ def f(x):
1271
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1272
+ fun = (x[1]-10)**3 + (x[2] - 20)**3
1273
+ return fun
1274
+
1275
+ def c1(x):
1276
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1277
+ return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
1278
+ -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
1279
+
1280
+ N = NonlinearConstraint(c1, 0, np.inf)
1281
+ bounds = [(13, 100), (0, 100)]
1282
+ constraints = (N)
1283
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1284
+ constraints=constraints, tol=1e-7)
1285
+ x_opt = (14.095, 0.84296)
1286
+ f_opt = -6961.814744
1287
+
1288
+ assert_allclose(f(x_opt), f_opt, atol=1e-6)
1289
+ assert_allclose(res.fun, f_opt, atol=0.001)
1290
+ assert_allclose(res.x, x_opt, atol=1e-4)
1291
+ assert res.success
1292
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1293
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1294
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1295
+
1296
+ def test_L7(self):
1297
+ # Lampinen ([5]) test problem 7
1298
+ def f(x):
1299
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1300
+ fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
1301
+ 37.293239*x[1] - 40792.141)
1302
+ return fun
1303
+
1304
+ def c1(x):
1305
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1306
+ return [
1307
+ 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
1308
+ 0.0022053*x[3]*x[5],
1309
+
1310
+ 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
1311
+ 0.0021813*x[3]**2,
1312
+
1313
+ 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
1314
+ 0.0019085*x[3]*x[4]
1315
+ ]
1316
+
1317
+ N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
1318
+
1319
+ bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
1320
+ constraints = (N)
1321
+
1322
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1323
+ constraints=constraints)
1324
+
1325
+ # using our best solution, rather than Lampinen/Koziel. Koziel solution
1326
+ # doesn't satisfy constraints, Lampinen f_opt just plain wrong.
1327
+ x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
1328
+ 36.77579979]
1329
+
1330
+ f_opt = -30665.537578
1331
+
1332
+ assert_allclose(f(x_opt), f_opt)
1333
+ assert_allclose(res.x, x_opt, atol=1e-3)
1334
+ assert_allclose(res.fun, f_opt, atol=1e-3)
1335
+
1336
+ assert res.success
1337
+ assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
1338
+ assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
1339
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1340
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1341
+
1342
+ @pytest.mark.slow
1343
+ @pytest.mark.xfail(platform.machine() == 'ppc64le',
1344
+ reason="fails on ppc64le")
1345
+ def test_L8(self):
1346
+ def f(x):
1347
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1348
+ fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
1349
+ return fun
1350
+
1351
+ A = np.zeros((3, 5))
1352
+ A[1, [4, 3]] = 1, -1
1353
+ A[2, [3, 4]] = 1, -1
1354
+ A = A[1:, 1:]
1355
+ b = np.array([-.55, -.55])
1356
+
1357
+ def c1(x):
1358
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1359
+ return [
1360
+ 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
1361
+ 894.8 - x[1],
1362
+ 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
1363
+ 894.8 - x[2],
1364
+ 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
1365
+ 1294.8
1366
+ ]
1367
+ L = LinearConstraint(A, b, np.inf)
1368
+ N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
1369
+
1370
+ bounds = [(0, 1200)]*2+[(-.55, .55)]*2
1371
+ constraints = (L, N)
1372
+
1373
+ with suppress_warnings() as sup:
1374
+ sup.filter(UserWarning)
1375
+ # original Lampinen test was with rand1bin, but that takes a
1376
+ # huge amount of CPU time. Changing strategy to best1bin speeds
1377
+ # things up a lot
1378
+ res = differential_evolution(f, bounds, strategy='best1bin',
1379
+ seed=1234, constraints=constraints,
1380
+ maxiter=5000)
1381
+
1382
+ x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
1383
+ f_opt = 5126.4981
1384
+
1385
+ assert_allclose(f(x_opt), f_opt, atol=1e-3)
1386
+ assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
1387
+ assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
1388
+ assert_allclose(res.fun, f_opt, atol=2e-2)
1389
+ assert res.success
1390
+ assert_(np.all([email protected] >= b))
1391
+ assert_(np.all(np.array(c1(res.x)) >= -0.001))
1392
+ assert_(np.all(np.array(c1(res.x)) <= 0.001))
1393
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1394
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1395
+
1396
+ def test_L9(self):
1397
+ # Lampinen ([5]) test problem 9
1398
+
1399
+ def f(x):
1400
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1401
+ return x[1]**2 + (x[2]-1)**2
1402
+
1403
+ def c1(x):
1404
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1405
+ return [x[2] - x[1]**2]
1406
+
1407
+ N = NonlinearConstraint(c1, [-.001], [0.001])
1408
+
1409
+ bounds = [(-1, 1)]*2
1410
+ constraints = (N)
1411
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1412
+ constraints=constraints)
1413
+
1414
+ x_opt = [np.sqrt(2)/2, 0.5]
1415
+ f_opt = 0.75
1416
+
1417
+ assert_allclose(f(x_opt), f_opt)
1418
+ assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
1419
+ assert_allclose(res.fun, f_opt, atol=1e-3)
1420
+ assert res.success
1421
+ assert_(np.all(np.array(c1(res.x)) >= -0.001))
1422
+ assert_(np.all(np.array(c1(res.x)) <= 0.001))
1423
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1424
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1425
+
1426
+ def test_integrality(self):
1427
+ # test fitting discrete distribution to data
1428
+ rng = np.random.default_rng(6519843218105)
1429
+ dist = stats.nbinom
1430
+ shapes = (5, 0.5)
1431
+ x = dist.rvs(*shapes, size=10000, random_state=rng)
1432
+
1433
+ def func(p, *args):
1434
+ dist, x = args
1435
+ # negative log-likelihood function
1436
+ ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
1437
+ if np.isnan(ll): # occurs when x is outside of support
1438
+ ll = np.inf # we don't want that
1439
+ return ll
1440
+
1441
+ integrality = [True, False]
1442
+ bounds = [(1, 18), (0, 0.95)]
1443
+
1444
+ res = differential_evolution(func, bounds, args=(dist, x),
1445
+ integrality=integrality, polish=False,
1446
+ seed=rng)
1447
+ # tolerance has to be fairly relaxed for the second parameter
1448
+ # because we're fitting a distribution to random variates.
1449
+ assert res.x[0] == 5
1450
+ assert_allclose(res.x, shapes, rtol=0.025)
1451
+
1452
+ # check that we can still use integrality constraints with polishing
1453
+ res2 = differential_evolution(func, bounds, args=(dist, x),
1454
+ integrality=integrality, polish=True,
1455
+ seed=rng)
1456
+
1457
+ def func2(p, *args):
1458
+ n, dist, x = args
1459
+ return func(np.array([n, p[0]]), dist, x)
1460
+
1461
+ # compare the DE derived solution to an LBFGSB solution (that doesn't
1462
+ # have to find the integral values). Note we're setting x0 to be the
1463
+ # output from the first DE result, thereby making the polishing step
1464
+ # and this minimisation pretty much equivalent.
1465
+ LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
1466
+ bounds=[(0, 0.95)])
1467
+ assert_allclose(res2.x[1], LBFGSB.x)
1468
+ assert res2.fun <= res.fun
1469
+
1470
+ def test_integrality_limits(self):
1471
+ def f(x):
1472
+ return x
1473
+
1474
+ integrality = [True, False, True]
1475
+ bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
1476
+
1477
+ # no integrality constraints
1478
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1479
+ integrality=False)
1480
+ assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
1481
+ assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
1482
+
1483
+ # with integrality constraints
1484
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1485
+ integrality=integrality)
1486
+ assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
1487
+ assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
1488
+ assert_equal(solver.integrality, [True, False, True])
1489
+ assert solver.polish is False
1490
+
1491
+ bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
1492
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1493
+ integrality=integrality)
1494
+ assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
1495
+ assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
1496
+
1497
+ # A lower bound of -1.2 is converted to
1498
+ # np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
1499
+ # with a similar process to the upper bound. Check that the
1500
+ # conversions work
1501
+ assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
1502
+ assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
1503
+
1504
+ bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
1505
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1506
+ integrality=integrality)
1507
+ assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
1508
+ assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
1509
+
1510
+ bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
1511
+ with pytest.raises(ValueError, match='One of the integrality'):
1512
+ DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1513
+ integrality=integrality)
1514
+
1515
+ def test_vectorized(self):
1516
+ def quadratic(x):
1517
+ return np.sum(x**2)
1518
+
1519
+ def quadratic_vec(x):
1520
+ return np.sum(x**2, axis=0)
1521
+
1522
+ # A vectorized function needs to accept (len(x), S) and return (S,)
1523
+ with pytest.raises(RuntimeError, match='The vectorized function'):
1524
+ differential_evolution(quadratic, self.bounds,
1525
+ vectorized=True, updating='deferred')
1526
+
1527
+ # vectorized overrides the updating keyword, check for warning
1528
+ with warns(UserWarning, match="differential_evolution: the 'vector"):
1529
+ differential_evolution(quadratic_vec, self.bounds,
1530
+ vectorized=True)
1531
+
1532
+ # vectorized defers to the workers keyword, check for warning
1533
+ with warns(UserWarning, match="differential_evolution: the 'workers"):
1534
+ differential_evolution(quadratic_vec, self.bounds,
1535
+ vectorized=True, workers=map,
1536
+ updating='deferred')
1537
+
1538
+ ncalls = [0]
1539
+
1540
+ def rosen_vec(x):
1541
+ ncalls[0] += 1
1542
+ return rosen(x)
1543
+
1544
+ bounds = [(0, 10), (0, 10)]
1545
+ res1 = differential_evolution(rosen, bounds, updating='deferred',
1546
+ seed=1)
1547
+ res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
1548
+ updating='deferred', seed=1)
1549
+
1550
+ # the two minimisation runs should be functionally equivalent
1551
+ assert_allclose(res1.x, res2.x)
1552
+ assert ncalls[0] == res2.nfev
1553
+ assert res1.nit == res2.nit
1554
+
1555
+ def test_vectorized_constraints(self):
1556
+ def constr_f(x):
1557
+ return np.array([x[0] + x[1]])
1558
+
1559
+ def constr_f2(x):
1560
+ return np.array([x[0]**2 + x[1], x[0] - x[1]])
1561
+
1562
+ nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
1563
+ nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
1564
+
1565
+ def rosen_vec(x):
1566
+ # accept an (len(x0), S) array, returning a (S,) array
1567
+ v = 100 * (x[1:] - x[:-1]**2.0)**2.0
1568
+ v += (1 - x[:-1])**2.0
1569
+ return np.squeeze(v)
1570
+
1571
+ bounds = [(0, 10), (0, 10)]
1572
+
1573
+ res1 = differential_evolution(rosen, bounds, updating='deferred',
1574
+ seed=1, constraints=[nlc1, nlc2],
1575
+ polish=False)
1576
+ res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
1577
+ updating='deferred', seed=1,
1578
+ constraints=[nlc1, nlc2],
1579
+ polish=False)
1580
+ # the two minimisation runs should be functionally equivalent
1581
+ assert_allclose(res1.x, res2.x)
1582
+
1583
+ def test_constraint_violation_error_message(self):
1584
+
1585
+ def func(x):
1586
+ return np.cos(x[0]) + np.sin(x[1])
1587
+
1588
+ # Intentionally infeasible constraints.
1589
+ c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
1590
+ c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
1591
+
1592
+ result = differential_evolution(func,
1593
+ bounds=[(-1, 2), (-1, 1)],
1594
+ constraints=[c0, c1],
1595
+ maxiter=10,
1596
+ polish=False,
1597
+ seed=864197532)
1598
+ assert result.success is False
1599
+ # The numerical value in the error message might be sensitive to
1600
+ # changes in the implementation. It can be updated if the code is
1601
+ # changed. The essential part of the test is that there is a number
1602
+ # after the '=', so if necessary, the text could be reduced to, say,
1603
+ # "MAXCV = 0.".
1604
+ assert "MAXCV = 0.414" in result.message
1605
+
1606
+ def test_strategy_fn(self):
1607
+ # examines ability to customize strategy by mimicking one of the
1608
+ # in-built strategies and comparing to the actual in-built strategy.
1609
+ parameter_count = 4
1610
+ popsize = 10
1611
+ bounds = [(0, 10.)] * parameter_count
1612
+ total_popsize = parameter_count * popsize
1613
+ mutation = 0.8
1614
+ recombination = 0.7
1615
+
1616
+ def custom_strategy_fn(candidate, population, rng=None):
1617
+ trial = np.copy(population[candidate])
1618
+ fill_point = rng.choice(parameter_count)
1619
+
1620
+ pool = np.arange(total_popsize)
1621
+ rng.shuffle(pool)
1622
+
1623
+ idxs = []
1624
+ while len(idxs) < 2 and len(pool) > 0:
1625
+ idx = pool[0]
1626
+ pool = pool[1:]
1627
+ if idx != candidate:
1628
+ idxs.append(idx)
1629
+
1630
+ r0, r1 = idxs[:2]
1631
+
1632
+ bprime = (population[0] + mutation *
1633
+ (population[r0] - population[r1]))
1634
+
1635
+ crossovers = rng.uniform(size=parameter_count)
1636
+ crossovers = crossovers < recombination
1637
+ crossovers[fill_point] = True
1638
+ trial = np.where(crossovers, bprime, trial)
1639
+ return trial
1640
+
1641
+ solver = DifferentialEvolutionSolver(
1642
+ rosen,
1643
+ bounds,
1644
+ popsize=popsize,
1645
+ recombination=recombination,
1646
+ mutation=mutation,
1647
+ maxiter=2,
1648
+ strategy=custom_strategy_fn,
1649
+ seed=10,
1650
+ polish=False
1651
+ )
1652
+ assert solver.strategy is custom_strategy_fn
1653
+ res = solver.solve()
1654
+
1655
+ res2 = differential_evolution(
1656
+ rosen,
1657
+ bounds,
1658
+ mutation=mutation,
1659
+ popsize=popsize,
1660
+ recombination=recombination,
1661
+ maxiter=2,
1662
+ strategy='best1bin',
1663
+ polish=False,
1664
+ seed=10
1665
+ )
1666
+ assert_allclose(res.population, res2.population)
1667
+ assert_allclose(res.x, res2.x)
1668
+
1669
+ def custom_strategy_fn(candidate, population, rng=None):
1670
+ return np.array([1.0, 2.0])
1671
+
1672
+ with pytest.raises(RuntimeError, match="strategy*"):
1673
+ differential_evolution(
1674
+ rosen,
1675
+ bounds,
1676
+ strategy=custom_strategy_fn
1677
+ )
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dual annealing unit tests implementation.
2
+ # Copyright (c) 2018 Sylvain Gubian <[email protected]>,
3
+ # Yang Xiang <[email protected]>
4
+ # Author: Sylvain Gubian, PMP S.A.
5
+ """
6
+ Unit tests for the dual annealing global optimizer
7
+ """
8
+ from scipy.optimize import dual_annealing, Bounds
9
+
10
+ from scipy.optimize._dual_annealing import EnergyState
11
+ from scipy.optimize._dual_annealing import LocalSearchWrapper
12
+ from scipy.optimize._dual_annealing import ObjectiveFunWrapper
13
+ from scipy.optimize._dual_annealing import StrategyChain
14
+ from scipy.optimize._dual_annealing import VisitingDistribution
15
+ from scipy.optimize import rosen, rosen_der
16
+ import pytest
17
+ import numpy as np
18
+ from numpy.testing import assert_equal, assert_allclose, assert_array_less
19
+ from pytest import raises as assert_raises
20
+ from scipy._lib._util import check_random_state
21
+
22
+
23
+ class TestDualAnnealing:
24
+
25
+ def setup_method(self):
26
+ # A function that returns always infinity for initialization tests
27
+ self.weirdfunc = lambda x: np.inf
28
+ # 2-D bounds for testing function
29
+ self.ld_bounds = [(-5.12, 5.12)] * 2
30
+ # 4-D bounds for testing function
31
+ self.hd_bounds = self.ld_bounds * 4
32
+ # Number of values to be generated for testing visit function
33
+ self.nbtestvalues = 5000
34
+ self.high_temperature = 5230
35
+ self.low_temperature = 0.1
36
+ self.qv = 2.62
37
+ self.seed = 1234
38
+ self.rs = check_random_state(self.seed)
39
+ self.nb_fun_call = 0
40
+ self.ngev = 0
41
+
42
+ def callback(self, x, f, context):
43
+ # For testing callback mechanism. Should stop for e <= 1 as
44
+ # the callback function returns True
45
+ if f <= 1.0:
46
+ return True
47
+
48
+ def func(self, x, args=()):
49
+ # Using Rastrigin function for performing tests
50
+ if args:
51
+ shift = args
52
+ else:
53
+ shift = 0
54
+ y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
55
+ x - shift))) + 10 * np.size(x) + shift
56
+ self.nb_fun_call += 1
57
+ return y
58
+
59
+ def rosen_der_wrapper(self, x, args=()):
60
+ self.ngev += 1
61
+ return rosen_der(x, *args)
62
+
63
+ # FIXME: there are some discontinuities in behaviour as a function of `qv`,
64
+ # this needs investigating - see gh-12384
65
+ @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9])
66
+ def test_visiting_stepping(self, qv):
67
+ lu = list(zip(*self.ld_bounds))
68
+ lower = np.array(lu[0])
69
+ upper = np.array(lu[1])
70
+ dim = lower.size
71
+ vd = VisitingDistribution(lower, upper, qv, self.rs)
72
+ values = np.zeros(dim)
73
+ x_step_low = vd.visiting(values, 0, self.high_temperature)
74
+ # Make sure that only the first component is changed
75
+ assert_equal(np.not_equal(x_step_low, 0), True)
76
+ values = np.zeros(dim)
77
+ x_step_high = vd.visiting(values, dim, self.high_temperature)
78
+ # Make sure that component other than at dim has changed
79
+ assert_equal(np.not_equal(x_step_high[0], 0), True)
80
+
81
+ @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9])
82
+ def test_visiting_dist_high_temperature(self, qv):
83
+ lu = list(zip(*self.ld_bounds))
84
+ lower = np.array(lu[0])
85
+ upper = np.array(lu[1])
86
+ vd = VisitingDistribution(lower, upper, qv, self.rs)
87
+ # values = np.zeros(self.nbtestvalues)
88
+ # for i in np.arange(self.nbtestvalues):
89
+ # values[i] = vd.visit_fn(self.high_temperature)
90
+ values = vd.visit_fn(self.high_temperature, self.nbtestvalues)
91
+
92
+ # Visiting distribution is a distorted version of Cauchy-Lorentz
93
+ # distribution, and as no 1st and higher moments (no mean defined,
94
+ # no variance defined).
95
+ # Check that big tails values are generated
96
+ assert_array_less(np.min(values), 1e-10)
97
+ assert_array_less(1e+10, np.max(values))
98
+
99
+ def test_reset(self):
100
+ owf = ObjectiveFunWrapper(self.weirdfunc)
101
+ lu = list(zip(*self.ld_bounds))
102
+ lower = np.array(lu[0])
103
+ upper = np.array(lu[1])
104
+ es = EnergyState(lower, upper)
105
+ assert_raises(ValueError, es.reset, owf, check_random_state(None))
106
+
107
+ def test_low_dim(self):
108
+ ret = dual_annealing(
109
+ self.func, self.ld_bounds, seed=self.seed)
110
+ assert_allclose(ret.fun, 0., atol=1e-12)
111
+ assert ret.success
112
+
113
+ def test_high_dim(self):
114
+ ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
115
+ assert_allclose(ret.fun, 0., atol=1e-12)
116
+ assert ret.success
117
+
118
+ def test_low_dim_no_ls(self):
119
+ ret = dual_annealing(self.func, self.ld_bounds,
120
+ no_local_search=True, seed=self.seed)
121
+ assert_allclose(ret.fun, 0., atol=1e-4)
122
+
123
+ def test_high_dim_no_ls(self):
124
+ ret = dual_annealing(self.func, self.hd_bounds,
125
+ no_local_search=True, seed=self.seed)
126
+ assert_allclose(ret.fun, 0., atol=1e-4)
127
+
128
+ def test_nb_fun_call(self):
129
+ ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
130
+ assert_equal(self.nb_fun_call, ret.nfev)
131
+
132
+ def test_nb_fun_call_no_ls(self):
133
+ ret = dual_annealing(self.func, self.ld_bounds,
134
+ no_local_search=True, seed=self.seed)
135
+ assert_equal(self.nb_fun_call, ret.nfev)
136
+
137
+ def test_max_reinit(self):
138
+ assert_raises(ValueError, dual_annealing, self.weirdfunc,
139
+ self.ld_bounds)
140
+
141
+ def test_reproduce(self):
142
+ res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
143
+ res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
144
+ res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
145
+ # If we have reproducible results, x components found has to
146
+ # be exactly the same, which is not the case with no seeding
147
+ assert_equal(res1.x, res2.x)
148
+ assert_equal(res1.x, res3.x)
149
+
150
+ def test_rand_gen(self):
151
+ # check that np.random.Generator can be used (numpy >= 1.17)
152
+ # obtain a np.random.Generator object
153
+ rng = np.random.default_rng(1)
154
+
155
+ res1 = dual_annealing(self.func, self.ld_bounds, seed=rng)
156
+ # seed again
157
+ rng = np.random.default_rng(1)
158
+ res2 = dual_annealing(self.func, self.ld_bounds, seed=rng)
159
+ # If we have reproducible results, x components found has to
160
+ # be exactly the same, which is not the case with no seeding
161
+ assert_equal(res1.x, res2.x)
162
+
163
+ def test_bounds_integrity(self):
164
+ wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)]
165
+ assert_raises(ValueError, dual_annealing, self.func,
166
+ wrong_bounds)
167
+
168
+ def test_bound_validity(self):
169
+ invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)]
170
+ assert_raises(ValueError, dual_annealing, self.func,
171
+ invalid_bounds)
172
+ invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)]
173
+ assert_raises(ValueError, dual_annealing, self.func,
174
+ invalid_bounds)
175
+ invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)]
176
+ assert_raises(ValueError, dual_annealing, self.func,
177
+ invalid_bounds)
178
+
179
+ def test_deprecated_local_search_options_bounds(self):
180
+ def func(x):
181
+ return np.sum((x - 5) * (x - 1))
182
+ bounds = list(zip([-6, -5], [6, 5]))
183
+ # Test bounds can be passed (see gh-10831)
184
+
185
+ with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
186
+ dual_annealing(
187
+ func,
188
+ bounds=bounds,
189
+ minimizer_kwargs={"method": "CG", "bounds": bounds})
190
+
191
+ def test_minimizer_kwargs_bounds(self):
192
+ def func(x):
193
+ return np.sum((x - 5) * (x - 1))
194
+ bounds = list(zip([-6, -5], [6, 5]))
195
+ # Test bounds can be passed (see gh-10831)
196
+ dual_annealing(
197
+ func,
198
+ bounds=bounds,
199
+ minimizer_kwargs={"method": "SLSQP", "bounds": bounds})
200
+
201
+ with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
202
+ dual_annealing(
203
+ func,
204
+ bounds=bounds,
205
+ minimizer_kwargs={"method": "CG", "bounds": bounds})
206
+
207
+ def test_max_fun_ls(self):
208
+ ret = dual_annealing(self.func, self.ld_bounds, maxfun=100,
209
+ seed=self.seed)
210
+
211
+ ls_max_iter = min(max(
212
+ len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
213
+ LocalSearchWrapper.LS_MAXITER_MIN),
214
+ LocalSearchWrapper.LS_MAXITER_MAX)
215
+ assert ret.nfev <= 100 + ls_max_iter
216
+ assert not ret.success
217
+
218
+ def test_max_fun_no_ls(self):
219
+ ret = dual_annealing(self.func, self.ld_bounds,
220
+ no_local_search=True, maxfun=500, seed=self.seed)
221
+ assert ret.nfev <= 500
222
+ assert not ret.success
223
+
224
+ def test_maxiter(self):
225
+ ret = dual_annealing(self.func, self.ld_bounds, maxiter=700,
226
+ seed=self.seed)
227
+ assert ret.nit <= 700
228
+
229
+ # Testing that args are passed correctly for dual_annealing
230
+ def test_fun_args_ls(self):
231
+ ret = dual_annealing(self.func, self.ld_bounds,
232
+ args=((3.14159,)), seed=self.seed)
233
+ assert_allclose(ret.fun, 3.14159, atol=1e-6)
234
+
235
+ # Testing that args are passed correctly for pure simulated annealing
236
+ def test_fun_args_no_ls(self):
237
+ ret = dual_annealing(self.func, self.ld_bounds,
238
+ args=((3.14159, )), no_local_search=True,
239
+ seed=self.seed)
240
+ assert_allclose(ret.fun, 3.14159, atol=1e-4)
241
+
242
+ def test_callback_stop(self):
243
+ # Testing that callback make the algorithm stop for
244
+ # fun value <= 1.0 (see callback method)
245
+ ret = dual_annealing(self.func, self.ld_bounds,
246
+ callback=self.callback, seed=self.seed)
247
+ assert ret.fun <= 1.0
248
+ assert 'stop early' in ret.message[0]
249
+ assert not ret.success
250
+
251
+ @pytest.mark.parametrize('method, atol', [
252
+ ('Nelder-Mead', 2e-5),
253
+ ('COBYLA', 1e-5),
254
+ ('Powell', 1e-8),
255
+ ('CG', 1e-8),
256
+ ('BFGS', 1e-8),
257
+ ('TNC', 1e-8),
258
+ ('SLSQP', 2e-7),
259
+ ])
260
+ def test_multi_ls_minimizer(self, method, atol):
261
+ ret = dual_annealing(self.func, self.ld_bounds,
262
+ minimizer_kwargs=dict(method=method),
263
+ seed=self.seed)
264
+ assert_allclose(ret.fun, 0., atol=atol)
265
+
266
+ def test_wrong_restart_temp(self):
267
+ assert_raises(ValueError, dual_annealing, self.func,
268
+ self.ld_bounds, restart_temp_ratio=1)
269
+ assert_raises(ValueError, dual_annealing, self.func,
270
+ self.ld_bounds, restart_temp_ratio=0)
271
+
272
+ def test_gradient_gnev(self):
273
+ minimizer_opts = {
274
+ 'jac': self.rosen_der_wrapper,
275
+ }
276
+ ret = dual_annealing(rosen, self.ld_bounds,
277
+ minimizer_kwargs=minimizer_opts,
278
+ seed=self.seed)
279
+ assert ret.njev == self.ngev
280
+
281
+ def test_from_docstring(self):
282
+ def func(x):
283
+ return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
284
+ lw = [-5.12] * 10
285
+ up = [5.12] * 10
286
+ ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
287
+ assert_allclose(ret.x,
288
+ [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09,
289
+ -3.97165720e-09, -6.29151648e-09, -6.53145322e-09,
290
+ -3.93616815e-09, -6.55623025e-09, -6.05775280e-09,
291
+ -5.00668935e-09], atol=4e-8)
292
+ assert_allclose(ret.fun, 0.000000, atol=5e-13)
293
+
294
+ @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [
295
+ (0, 100, 1000, 1.0097587941791923),
296
+ (0, 2, 1000, 1.2599210498948732),
297
+ (10, 100, 878, 0.8786035869128718),
298
+ (10, 60, 695, 0.6812920690579612),
299
+ (2, 100, 990, 0.9897404249173424),
300
+ ])
301
+ def test_accept_reject_probabilistic(
302
+ self, new_e, temp_step, accepted, accept_rate):
303
+ # Test accepts unconditionally with e < current_energy and
304
+ # probabilistically with e > current_energy
305
+
306
+ rs = check_random_state(123)
307
+
308
+ count_accepted = 0
309
+ iterations = 1000
310
+
311
+ accept_param = -5
312
+ current_energy = 1
313
+ for _ in range(iterations):
314
+ energy_state = EnergyState(lower=None, upper=None)
315
+ # Set energy state with current_energy, any location.
316
+ energy_state.update_current(current_energy, [0])
317
+
318
+ chain = StrategyChain(
319
+ accept_param, None, None, None, rs, energy_state)
320
+ # Normally this is set in run()
321
+ chain.temperature_step = temp_step
322
+
323
+ # Check if update is accepted.
324
+ chain.accept_reject(j=1, e=new_e, x_visit=[2])
325
+ if energy_state.current_energy == new_e:
326
+ count_accepted += 1
327
+
328
+ assert count_accepted == accepted
329
+
330
+ # Check accept rate
331
+ pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step
332
+ rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param))
333
+
334
+ assert_allclose(rate, accept_rate)
335
+
336
+ def test_bounds_class(self):
337
+ # test that result does not depend on the bounds type
338
+ def func(x):
339
+ f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
340
+ return f
341
+ lw = [-5.12] * 5
342
+ up = [5.12] * 5
343
+
344
+ # Unbounded global minimum is all zeros. Most bounds below will force
345
+ # a DV away from unbounded minimum and be active at solution.
346
+ up[0] = -2.0
347
+ up[1] = -1.0
348
+ lw[3] = 1.0
349
+ lw[4] = 2.0
350
+
351
+ # run optimizations
352
+ bounds = Bounds(lw, up)
353
+ ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234)
354
+
355
+ bounds_old = list(zip(lw, up))
356
+ ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234)
357
+
358
+ # test that found minima, function evaluations and iterations match
359
+ assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8)
360
+ assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7)
361
+ assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9)
362
+ assert ret_bounds_list.nfev == ret_bounds_class.nfev
363
+
364
+ def test_callable_jac_with_args_gh11052(self):
365
+ # dual_annealing used to fail when `jac` was callable and `args` were
366
+ # used; check that this is resolved. Example is from gh-11052.
367
+ rng = np.random.default_rng(94253637693657847462)
368
+ def f(x, power):
369
+ return np.sum(np.exp(x ** power))
370
+
371
+ def jac(x, power):
372
+ return np.exp(x ** power) * power * x ** (power - 1)
373
+
374
+ res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
375
+ minimizer_kwargs=dict(method='L-BFGS-B'))
376
+ res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
377
+ minimizer_kwargs=dict(method='L-BFGS-B',
378
+ jac=jac))
379
+ assert_allclose(res1.fun, res2.fun, rtol=1e-6)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for Linear Programming via Simplex Algorithm.
3
+ """
4
+ import numpy as np
5
+ from numpy.testing import assert_, assert_allclose, assert_equal
6
+ from pytest import raises as assert_raises
7
+ from scipy.optimize._linprog_util import _clean_inputs, _LPProblem
8
+ from scipy._lib._util import VisibleDeprecationWarning
9
+ from copy import deepcopy
10
+ from datetime import date
11
+
12
+
13
+ def test_aliasing():
14
+ """
15
+ Test for ensuring that no objects referred to by `lp` attributes,
16
+ `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified
17
+ by `_clean_inputs` as a side effect.
18
+ """
19
+ lp = _LPProblem(
20
+ c=1,
21
+ A_ub=[[1]],
22
+ b_ub=[1],
23
+ A_eq=[[1]],
24
+ b_eq=[1],
25
+ bounds=(-np.inf, np.inf)
26
+ )
27
+ lp_copy = deepcopy(lp)
28
+
29
+ _clean_inputs(lp)
30
+
31
+ assert_(lp.c == lp_copy.c, "c modified by _clean_inputs")
32
+ assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs")
33
+ assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs")
34
+ assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs")
35
+ assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs")
36
+ assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
37
+
38
+
39
+ def test_aliasing2():
40
+ """
41
+ Similar purpose as `test_aliasing` above.
42
+ """
43
+ lp = _LPProblem(
44
+ c=np.array([1, 1]),
45
+ A_ub=np.array([[1, 1], [2, 2]]),
46
+ b_ub=np.array([[1], [1]]),
47
+ A_eq=np.array([[1, 1]]),
48
+ b_eq=np.array([1]),
49
+ bounds=[(-np.inf, np.inf), (None, 1)]
50
+ )
51
+ lp_copy = deepcopy(lp)
52
+
53
+ _clean_inputs(lp)
54
+
55
+ assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs")
56
+ assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs")
57
+ assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs")
58
+ assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs")
59
+ assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs")
60
+ assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
61
+
62
+
63
+ def test_missing_inputs():
64
+ c = [1, 2]
65
+ A_ub = np.array([[1, 1], [2, 2]])
66
+ b_ub = np.array([1, 1])
67
+ A_eq = np.array([[1, 1], [2, 2]])
68
+ b_eq = np.array([1, 1])
69
+
70
+ assert_raises(TypeError, _clean_inputs)
71
+ assert_raises(TypeError, _clean_inputs, _LPProblem(c=None))
72
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub))
73
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None))
74
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub))
75
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub))
76
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq))
77
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None))
78
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq))
79
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq))
80
+
81
+
82
+ def test_too_many_dimensions():
83
+ cb = [1, 2, 3, 4]
84
+ A = np.random.rand(4, 4)
85
+ bad2D = [[1, 2], [3, 4]]
86
+ bad3D = np.random.rand(4, 4, 4)
87
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb))
88
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb))
89
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D))
90
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb))
91
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D))
92
+
93
+
94
+ def test_too_few_dimensions():
95
+ bad = np.random.rand(4, 4).ravel()
96
+ cb = np.random.rand(4)
97
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb))
98
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb))
99
+
100
+
101
+ def test_inconsistent_dimensions():
102
+ m = 2
103
+ n = 4
104
+ c = [1, 2, 3, 4]
105
+
106
+ Agood = np.random.rand(m, n)
107
+ Abad = np.random.rand(m, n + 1)
108
+ bgood = np.random.rand(m)
109
+ bbad = np.random.rand(m + 1)
110
+ boundsbad = [(0, 1)] * (n + 1)
111
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood))
112
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad))
113
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood))
114
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad))
115
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad))
116
+ with np.testing.suppress_warnings() as sup:
117
+ sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
118
+ assert_raises(ValueError, _clean_inputs,
119
+ _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]]))
120
+
121
+
122
+ def test_type_errors():
123
+ lp = _LPProblem(
124
+ c=[1, 2],
125
+ A_ub=np.array([[1, 1], [2, 2]]),
126
+ b_ub=np.array([1, 1]),
127
+ A_eq=np.array([[1, 1], [2, 2]]),
128
+ b_eq=np.array([1, 1]),
129
+ bounds=[(0, 1)]
130
+ )
131
+ bad = "hello"
132
+
133
+ assert_raises(TypeError, _clean_inputs, lp._replace(c=bad))
134
+ assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad))
135
+ assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad))
136
+ assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad))
137
+ assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad))
138
+
139
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad))
140
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi"))
141
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"]))
142
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")]))
143
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")]))
144
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")]))
145
+ assert_raises(TypeError, _clean_inputs,
146
+ lp._replace(bounds=[(1, date(2020, 2, 29))]))
147
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]]))
148
+
149
+
150
+ def test_non_finite_errors():
151
+ lp = _LPProblem(
152
+ c=[1, 2],
153
+ A_ub=np.array([[1, 1], [2, 2]]),
154
+ b_ub=np.array([1, 1]),
155
+ A_eq=np.array([[1, 1], [2, 2]]),
156
+ b_eq=np.array([1, 1]),
157
+ bounds=[(0, 1)]
158
+ )
159
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None]))
160
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0]))
161
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf]))
162
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0]))
163
+
164
+ assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]]))
165
+ assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1]))
166
+ assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]]))
167
+ assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan]))
168
+
169
+
170
+ def test__clean_inputs1():
171
+ lp = _LPProblem(
172
+ c=[1, 2],
173
+ A_ub=[[1, 1], [2, 2]],
174
+ b_ub=[1, 1],
175
+ A_eq=[[1, 1], [2, 2]],
176
+ b_eq=[1, 1],
177
+ bounds=None
178
+ )
179
+
180
+ lp_cleaned = _clean_inputs(lp)
181
+
182
+ assert_allclose(lp_cleaned.c, np.array(lp.c))
183
+ assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
184
+ assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
185
+ assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
186
+ assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
187
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
188
+
189
+ assert_(lp_cleaned.c.shape == (2,), "")
190
+ assert_(lp_cleaned.A_ub.shape == (2, 2), "")
191
+ assert_(lp_cleaned.b_ub.shape == (2,), "")
192
+ assert_(lp_cleaned.A_eq.shape == (2, 2), "")
193
+ assert_(lp_cleaned.b_eq.shape == (2,), "")
194
+
195
+
196
+ def test__clean_inputs2():
197
+ lp = _LPProblem(
198
+ c=1,
199
+ A_ub=[[1]],
200
+ b_ub=1,
201
+ A_eq=[[1]],
202
+ b_eq=1,
203
+ bounds=(0, 1)
204
+ )
205
+
206
+ lp_cleaned = _clean_inputs(lp)
207
+
208
+ assert_allclose(lp_cleaned.c, np.array(lp.c))
209
+ assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
210
+ assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
211
+ assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
212
+ assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
213
+ assert_equal(lp_cleaned.bounds, [(0, 1)])
214
+
215
+ assert_(lp_cleaned.c.shape == (1,), "")
216
+ assert_(lp_cleaned.A_ub.shape == (1, 1), "")
217
+ assert_(lp_cleaned.b_ub.shape == (1,), "")
218
+ assert_(lp_cleaned.A_eq.shape == (1, 1), "")
219
+ assert_(lp_cleaned.b_eq.shape == (1,), "")
220
+
221
+
222
+ def test__clean_inputs3():
223
+ lp = _LPProblem(
224
+ c=[[1, 2]],
225
+ A_ub=np.random.rand(2, 2),
226
+ b_ub=[[1], [2]],
227
+ A_eq=np.random.rand(2, 2),
228
+ b_eq=[[1], [2]],
229
+ bounds=[(0, 1)]
230
+ )
231
+
232
+ lp_cleaned = _clean_inputs(lp)
233
+
234
+ assert_allclose(lp_cleaned.c, np.array([1, 2]))
235
+ assert_allclose(lp_cleaned.b_ub, np.array([1, 2]))
236
+ assert_allclose(lp_cleaned.b_eq, np.array([1, 2]))
237
+ assert_equal(lp_cleaned.bounds, [(0, 1)] * 2)
238
+
239
+ assert_(lp_cleaned.c.shape == (2,), "")
240
+ assert_(lp_cleaned.b_ub.shape == (2,), "")
241
+ assert_(lp_cleaned.b_eq.shape == (2,), "")
242
+
243
+
244
+ def test_bad_bounds():
245
+ lp = _LPProblem(c=[1, 2])
246
+
247
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2)))
248
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)]))
249
+ with np.testing.suppress_warnings() as sup:
250
+ sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
251
+ assert_raises(ValueError, _clean_inputs,
252
+ lp._replace(bounds=[(1, 2), (1, 2, 2)]))
253
+ assert_raises(ValueError, _clean_inputs,
254
+ lp._replace(bounds=[(1, 2), (1, 2), (1, 2)]))
255
+
256
+ lp = _LPProblem(c=[1, 2, 3, 4])
257
+
258
+ assert_raises(ValueError, _clean_inputs,
259
+ lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)]))
260
+
261
+
262
+ def test_good_bounds():
263
+ lp = _LPProblem(c=[1, 2])
264
+
265
+ lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
266
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
267
+
268
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[]))
269
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
270
+
271
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[[]]))
272
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
273
+
274
+ lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
275
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
276
+
277
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
278
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
279
+
280
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
281
+ assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2)
282
+
283
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
284
+ assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2)
285
+
286
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)]))
287
+ assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2)
288
+
289
+ lp = _LPProblem(c=[1, 2, 3, 4])
290
+
291
+ lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
292
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4)
293
+
294
+ lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
295
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
296
+
297
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
298
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
299
+
300
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
301
+ assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4)
302
+
303
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
304
+ assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4)
305
+
306
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None),
307
+ (-np.inf, None),
308
+ (None, np.inf),
309
+ (-np.inf, np.inf)]))
310
+ assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ from numpy.testing import assert_allclose, assert_equal, assert_
6
+ from pytest import raises as assert_raises
7
+
8
+ from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
9
+
10
+ from scipy.optimize._numdiff import (
11
+ _adjust_scheme_to_bounds, approx_derivative, check_derivative,
12
+ group_columns, _eps_for_method, _compute_absolute_step)
13
+
14
+
15
+ def test_group_columns():
16
+ structure = [
17
+ [1, 1, 0, 0, 0, 0],
18
+ [1, 1, 1, 0, 0, 0],
19
+ [0, 1, 1, 1, 0, 0],
20
+ [0, 0, 1, 1, 1, 0],
21
+ [0, 0, 0, 1, 1, 1],
22
+ [0, 0, 0, 0, 1, 1],
23
+ [0, 0, 0, 0, 0, 0]
24
+ ]
25
+ for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
26
+ A = transform(structure)
27
+ order = np.arange(6)
28
+ groups_true = np.array([0, 1, 2, 0, 1, 2])
29
+ groups = group_columns(A, order)
30
+ assert_equal(groups, groups_true)
31
+
32
+ order = [1, 2, 4, 3, 5, 0]
33
+ groups_true = np.array([2, 0, 1, 2, 0, 1])
34
+ groups = group_columns(A, order)
35
+ assert_equal(groups, groups_true)
36
+
37
+ # Test repeatability.
38
+ groups_1 = group_columns(A)
39
+ groups_2 = group_columns(A)
40
+ assert_equal(groups_1, groups_2)
41
+
42
+
43
+ def test_correct_fp_eps():
44
+ # check that relative step size is correct for FP size
45
+ EPS = np.finfo(np.float64).eps
46
+ relative_step = {"2-point": EPS**0.5,
47
+ "3-point": EPS**(1/3),
48
+ "cs": EPS**0.5}
49
+ for method in ['2-point', '3-point', 'cs']:
50
+ assert_allclose(
51
+ _eps_for_method(np.float64, np.float64, method),
52
+ relative_step[method])
53
+ assert_allclose(
54
+ _eps_for_method(np.complex128, np.complex128, method),
55
+ relative_step[method]
56
+ )
57
+
58
+ # check another FP size
59
+ EPS = np.finfo(np.float32).eps
60
+ relative_step = {"2-point": EPS**0.5,
61
+ "3-point": EPS**(1/3),
62
+ "cs": EPS**0.5}
63
+
64
+ for method in ['2-point', '3-point', 'cs']:
65
+ assert_allclose(
66
+ _eps_for_method(np.float64, np.float32, method),
67
+ relative_step[method]
68
+ )
69
+ assert_allclose(
70
+ _eps_for_method(np.float32, np.float64, method),
71
+ relative_step[method]
72
+ )
73
+ assert_allclose(
74
+ _eps_for_method(np.float32, np.float32, method),
75
+ relative_step[method]
76
+ )
77
+
78
+
79
+ class TestAdjustSchemeToBounds:
80
+ def test_no_bounds(self):
81
+ x0 = np.zeros(3)
82
+ h = np.full(3, 1e-2)
83
+ inf_lower = np.empty_like(x0)
84
+ inf_upper = np.empty_like(x0)
85
+ inf_lower.fill(-np.inf)
86
+ inf_upper.fill(np.inf)
87
+
88
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
89
+ x0, h, 1, '1-sided', inf_lower, inf_upper)
90
+ assert_allclose(h_adjusted, h)
91
+ assert_(np.all(one_sided))
92
+
93
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
94
+ x0, h, 2, '1-sided', inf_lower, inf_upper)
95
+ assert_allclose(h_adjusted, h)
96
+ assert_(np.all(one_sided))
97
+
98
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
99
+ x0, h, 1, '2-sided', inf_lower, inf_upper)
100
+ assert_allclose(h_adjusted, h)
101
+ assert_(np.all(~one_sided))
102
+
103
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
104
+ x0, h, 2, '2-sided', inf_lower, inf_upper)
105
+ assert_allclose(h_adjusted, h)
106
+ assert_(np.all(~one_sided))
107
+
108
+ def test_with_bound(self):
109
+ x0 = np.array([0.0, 0.85, -0.85])
110
+ lb = -np.ones(3)
111
+ ub = np.ones(3)
112
+ h = np.array([1, 1, -1]) * 1e-1
113
+
114
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
115
+ assert_allclose(h_adjusted, h)
116
+
117
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
118
+ assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
119
+
120
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
121
+ x0, h, 1, '2-sided', lb, ub)
122
+ assert_allclose(h_adjusted, np.abs(h))
123
+ assert_(np.all(~one_sided))
124
+
125
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
126
+ x0, h, 2, '2-sided', lb, ub)
127
+ assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
128
+ assert_equal(one_sided, np.array([False, True, True]))
129
+
130
+ def test_tight_bounds(self):
131
+ lb = np.array([-0.03, -0.03])
132
+ ub = np.array([0.05, 0.05])
133
+ x0 = np.array([0.0, 0.03])
134
+ h = np.array([-0.1, -0.1])
135
+
136
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
137
+ assert_allclose(h_adjusted, np.array([0.05, -0.06]))
138
+
139
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
140
+ assert_allclose(h_adjusted, np.array([0.025, -0.03]))
141
+
142
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
143
+ x0, h, 1, '2-sided', lb, ub)
144
+ assert_allclose(h_adjusted, np.array([0.03, -0.03]))
145
+ assert_equal(one_sided, np.array([False, True]))
146
+
147
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
148
+ x0, h, 2, '2-sided', lb, ub)
149
+ assert_allclose(h_adjusted, np.array([0.015, -0.015]))
150
+ assert_equal(one_sided, np.array([False, True]))
151
+
152
+
153
+ class TestApproxDerivativesDense:
154
+ def fun_scalar_scalar(self, x):
155
+ return np.sinh(x)
156
+
157
+ def jac_scalar_scalar(self, x):
158
+ return np.cosh(x)
159
+
160
+ def fun_scalar_vector(self, x):
161
+ return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
162
+
163
+ def jac_scalar_vector(self, x):
164
+ return np.array(
165
+ [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
166
+
167
+ def fun_vector_scalar(self, x):
168
+ return np.sin(x[0] * x[1]) * np.log(x[0])
169
+
170
+ def wrong_dimensions_fun(self, x):
171
+ return np.array([x**2, np.tan(x), np.exp(x)])
172
+
173
+ def jac_vector_scalar(self, x):
174
+ return np.array([
175
+ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
176
+ np.sin(x[0] * x[1]) / x[0],
177
+ x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
178
+ ])
179
+
180
+ def fun_vector_vector(self, x):
181
+ return np.array([
182
+ x[0] * np.sin(x[1]),
183
+ x[1] * np.cos(x[0]),
184
+ x[0] ** 3 * x[1] ** -0.5
185
+ ])
186
+
187
+ def jac_vector_vector(self, x):
188
+ return np.array([
189
+ [np.sin(x[1]), x[0] * np.cos(x[1])],
190
+ [-x[1] * np.sin(x[0]), np.cos(x[0])],
191
+ [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
192
+ ])
193
+
194
+ def fun_parametrized(self, x, c0, c1=1.0):
195
+ return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
196
+
197
+ def jac_parametrized(self, x, c0, c1=0.1):
198
+ return np.array([
199
+ [c0 * np.exp(c0 * x[0]), 0],
200
+ [0, c1 * np.exp(c1 * x[1])]
201
+ ])
202
+
203
+ def fun_with_nan(self, x):
204
+ return x if np.abs(x) <= 1e-8 else np.nan
205
+
206
+ def jac_with_nan(self, x):
207
+ return 1.0 if np.abs(x) <= 1e-8 else np.nan
208
+
209
+ def fun_zero_jacobian(self, x):
210
+ return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
211
+
212
+ def jac_zero_jacobian(self, x):
213
+ return np.array([
214
+ [x[1], x[0]],
215
+ [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
216
+ ])
217
+
218
+ def jac_non_numpy(self, x):
219
+ # x can be a scalar or an array [val].
220
+ # Cast to true scalar before handing over to math.exp
221
+ xp = np.asarray(x).item()
222
+ return math.exp(xp)
223
+
224
+ def test_scalar_scalar(self):
225
+ x0 = 1.0
226
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
227
+ method='2-point')
228
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
229
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
230
+ method='cs')
231
+ jac_true = self.jac_scalar_scalar(x0)
232
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
233
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
234
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
235
+
236
+ def test_scalar_scalar_abs_step(self):
237
+ # can approx_derivative use abs_step?
238
+ x0 = 1.0
239
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
240
+ method='2-point', abs_step=1.49e-8)
241
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
242
+ abs_step=1.49e-8)
243
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
244
+ method='cs', abs_step=1.49e-8)
245
+ jac_true = self.jac_scalar_scalar(x0)
246
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
247
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
248
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
249
+
250
+ def test_scalar_vector(self):
251
+ x0 = 0.5
252
+ jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
253
+ method='2-point')
254
+ jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
255
+ jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
256
+ method='cs')
257
+ jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
258
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
259
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
260
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
261
+
262
+ def test_vector_scalar(self):
263
+ x0 = np.array([100.0, -0.5])
264
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
265
+ method='2-point')
266
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
267
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
268
+ method='cs')
269
+ jac_true = self.jac_vector_scalar(x0)
270
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
271
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
272
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
273
+
274
+ def test_vector_scalar_abs_step(self):
275
+ # can approx_derivative use abs_step?
276
+ x0 = np.array([100.0, -0.5])
277
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
278
+ method='2-point', abs_step=1.49e-8)
279
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
280
+ abs_step=1.49e-8, rel_step=np.inf)
281
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
282
+ method='cs', abs_step=1.49e-8)
283
+ jac_true = self.jac_vector_scalar(x0)
284
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
285
+ assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
286
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
287
+
288
+ def test_vector_vector(self):
289
+ x0 = np.array([-100.0, 0.2])
290
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
291
+ method='2-point')
292
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
293
+ jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
294
+ method='cs')
295
+ jac_true = self.jac_vector_vector(x0)
296
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
297
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
298
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
299
+
300
+ def test_wrong_dimensions(self):
301
+ x0 = 1.0
302
+ assert_raises(RuntimeError, approx_derivative,
303
+ self.wrong_dimensions_fun, x0)
304
+ f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
305
+ assert_raises(ValueError, approx_derivative,
306
+ self.wrong_dimensions_fun, x0, f0=f0)
307
+
308
+ def test_custom_rel_step(self):
309
+ x0 = np.array([-0.1, 0.1])
310
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
311
+ method='2-point', rel_step=1e-4)
312
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
313
+ rel_step=1e-4)
314
+ jac_true = self.jac_vector_vector(x0)
315
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
316
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
317
+
318
+ def test_options(self):
319
+ x0 = np.array([1.0, 1.0])
320
+ c0 = -1.0
321
+ c1 = 1.0
322
+ lb = 0.0
323
+ ub = 2.0
324
+ f0 = self.fun_parametrized(x0, c0, c1=c1)
325
+ rel_step = np.array([-1e-6, 1e-7])
326
+ jac_true = self.jac_parametrized(x0, c0, c1)
327
+ jac_diff_2 = approx_derivative(
328
+ self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
329
+ f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
330
+ jac_diff_3 = approx_derivative(
331
+ self.fun_parametrized, x0, rel_step=rel_step,
332
+ f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
333
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
334
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
335
+
336
+ def test_with_bounds_2_point(self):
337
+ lb = -np.ones(2)
338
+ ub = np.ones(2)
339
+
340
+ x0 = np.array([-2.0, 0.2])
341
+ assert_raises(ValueError, approx_derivative,
342
+ self.fun_vector_vector, x0, bounds=(lb, ub))
343
+
344
+ x0 = np.array([-1.0, 1.0])
345
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
346
+ method='2-point', bounds=(lb, ub))
347
+ jac_true = self.jac_vector_vector(x0)
348
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
349
+
350
+ def test_with_bounds_3_point(self):
351
+ lb = np.array([1.0, 1.0])
352
+ ub = np.array([2.0, 2.0])
353
+
354
+ x0 = np.array([1.0, 2.0])
355
+ jac_true = self.jac_vector_vector(x0)
356
+
357
+ jac_diff = approx_derivative(self.fun_vector_vector, x0)
358
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
359
+
360
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
361
+ bounds=(lb, np.inf))
362
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
363
+
364
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
365
+ bounds=(-np.inf, ub))
366
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
367
+
368
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
369
+ bounds=(lb, ub))
370
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
371
+
372
+ def test_tight_bounds(self):
373
+ x0 = np.array([10.0, 10.0])
374
+ lb = x0 - 3e-9
375
+ ub = x0 + 2e-9
376
+ jac_true = self.jac_vector_vector(x0)
377
+ jac_diff = approx_derivative(
378
+ self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
379
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
380
+ jac_diff = approx_derivative(
381
+ self.fun_vector_vector, x0, method='2-point',
382
+ rel_step=1e-6, bounds=(lb, ub))
383
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
384
+
385
+ jac_diff = approx_derivative(
386
+ self.fun_vector_vector, x0, bounds=(lb, ub))
387
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
388
+ jac_diff = approx_derivative(
389
+ self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
390
+ assert_allclose(jac_true, jac_diff, rtol=1e-6)
391
+
392
+ def test_bound_switches(self):
393
+ lb = -1e-8
394
+ ub = 1e-8
395
+ x0 = 0.0
396
+ jac_true = self.jac_with_nan(x0)
397
+ jac_diff_2 = approx_derivative(
398
+ self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
399
+ bounds=(lb, ub))
400
+ jac_diff_3 = approx_derivative(
401
+ self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
402
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
403
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
404
+
405
+ x0 = 1e-8
406
+ jac_true = self.jac_with_nan(x0)
407
+ jac_diff_2 = approx_derivative(
408
+ self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
409
+ bounds=(lb, ub))
410
+ jac_diff_3 = approx_derivative(
411
+ self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
412
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
413
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
414
+
415
+ def test_non_numpy(self):
416
+ x0 = 1.0
417
+ jac_true = self.jac_non_numpy(x0)
418
+ jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
419
+ method='2-point')
420
+ jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
421
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
422
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
423
+
424
+ # math.exp cannot handle complex arguments, hence this raises
425
+ assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
426
+ **dict(method='cs'))
427
+
428
+ def test_fp(self):
429
+ # checks that approx_derivative works for FP size other than 64.
430
+ # Example is derived from the minimal working example in gh12991.
431
+ np.random.seed(1)
432
+
433
+ def func(p, x):
434
+ return p[0] + p[1] * x
435
+
436
+ def err(p, x, y):
437
+ return func(p, x) - y
438
+
439
+ x = np.linspace(0, 1, 100, dtype=np.float64)
440
+ y = np.random.random(100).astype(np.float64)
441
+ p0 = np.array([-1.0, -1.0])
442
+
443
+ jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y))
444
+
445
+ # parameter vector is float32, func output is float64
446
+ jac_fp = approx_derivative(err, p0.astype(np.float32),
447
+ method='2-point', args=(x, y))
448
+ assert err(p0, x, y).dtype == np.float64
449
+ assert_allclose(jac_fp, jac_fp64, atol=1e-3)
450
+
451
+ # parameter vector is float64, func output is float32
452
+ def err_fp32(p):
453
+ assert p.dtype == np.float32
454
+ return err(p, x, y).astype(np.float32)
455
+
456
+ jac_fp = approx_derivative(err_fp32, p0.astype(np.float32),
457
+ method='2-point')
458
+ assert_allclose(jac_fp, jac_fp64, atol=1e-3)
459
+
460
+ # check upper bound of error on the derivative for 2-point
461
+ def f(x):
462
+ return np.sin(x)
463
+ def g(x):
464
+ return np.cos(x)
465
+ def hess(x):
466
+ return -np.sin(x)
467
+
468
+ def calc_atol(h, x0, f, hess, EPS):
469
+ # truncation error
470
+ t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h)))
471
+ # roundoff error. There may be a divisor (>1) missing from
472
+ # the following line, so this contribution is possibly
473
+ # overestimated
474
+ t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h)))
475
+ return t0 + t1
476
+
477
+ for dtype in [np.float16, np.float32, np.float64]:
478
+ EPS = np.finfo(dtype).eps
479
+ x0 = np.array(1.0).astype(dtype)
480
+ h = _compute_absolute_step(None, x0, f(x0), '2-point')
481
+ atol = calc_atol(h, x0, f, hess, EPS)
482
+ err = approx_derivative(f, x0, method='2-point',
483
+ abs_step=h) - g(x0)
484
+ assert abs(err) < atol
485
+
486
+ def test_check_derivative(self):
487
+ x0 = np.array([-10.0, 10])
488
+ accuracy = check_derivative(self.fun_vector_vector,
489
+ self.jac_vector_vector, x0)
490
+ assert_(accuracy < 1e-9)
491
+ accuracy = check_derivative(self.fun_vector_vector,
492
+ self.jac_vector_vector, x0)
493
+ assert_(accuracy < 1e-6)
494
+
495
+ x0 = np.array([0.0, 0.0])
496
+ accuracy = check_derivative(self.fun_zero_jacobian,
497
+ self.jac_zero_jacobian, x0)
498
+ assert_(accuracy == 0)
499
+ accuracy = check_derivative(self.fun_zero_jacobian,
500
+ self.jac_zero_jacobian, x0)
501
+ assert_(accuracy == 0)
502
+
503
+
504
+ class TestApproxDerivativeSparse:
505
+ # Example from Numerical Optimization 2nd edition, p. 198.
506
+ def setup_method(self):
507
+ np.random.seed(0)
508
+ self.n = 50
509
+ self.lb = -0.1 * (1 + np.arange(self.n))
510
+ self.ub = 0.1 * (1 + np.arange(self.n))
511
+ self.x0 = np.empty(self.n)
512
+ self.x0[::2] = (1 - 1e-7) * self.lb[::2]
513
+ self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
514
+
515
+ self.J_true = self.jac(self.x0)
516
+
517
+ def fun(self, x):
518
+ e = x[1:]**3 - x[:-1]**2
519
+ return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
520
+
521
+ def jac(self, x):
522
+ n = x.size
523
+ J = np.zeros((n, n))
524
+ J[0, 0] = -4 * x[0]
525
+ J[0, 1] = 6 * x[1]**2
526
+ for i in range(1, n - 1):
527
+ J[i, i - 1] = -6 * x[i-1]
528
+ J[i, i] = 9 * x[i]**2 - 4 * x[i]
529
+ J[i, i + 1] = 6 * x[i+1]**2
530
+ J[-1, -1] = 9 * x[-1]**2
531
+ J[-1, -2] = -6 * x[-2]
532
+
533
+ return J
534
+
535
+ def structure(self, n):
536
+ A = np.zeros((n, n), dtype=int)
537
+ A[0, 0] = 1
538
+ A[0, 1] = 1
539
+ for i in range(1, n - 1):
540
+ A[i, i - 1: i + 2] = 1
541
+ A[-1, -1] = 1
542
+ A[-1, -2] = 1
543
+
544
+ return A
545
+
546
+ def test_all(self):
547
+ A = self.structure(self.n)
548
+ order = np.arange(self.n)
549
+ groups_1 = group_columns(A, order)
550
+ np.random.shuffle(order)
551
+ groups_2 = group_columns(A, order)
552
+
553
+ for method, groups, l, u in product(
554
+ ['2-point', '3-point', 'cs'], [groups_1, groups_2],
555
+ [-np.inf, self.lb], [np.inf, self.ub]):
556
+ J = approx_derivative(self.fun, self.x0, method=method,
557
+ bounds=(l, u), sparsity=(A, groups))
558
+ assert_(isinstance(J, csr_matrix))
559
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
560
+
561
+ rel_step = np.full_like(self.x0, 1e-8)
562
+ rel_step[::2] *= -1
563
+ J = approx_derivative(self.fun, self.x0, method=method,
564
+ rel_step=rel_step, sparsity=(A, groups))
565
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
566
+
567
+ def test_no_precomputed_groups(self):
568
+ A = self.structure(self.n)
569
+ J = approx_derivative(self.fun, self.x0, sparsity=A)
570
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
571
+
572
+ def test_equivalence(self):
573
+ structure = np.ones((self.n, self.n), dtype=int)
574
+ groups = np.arange(self.n)
575
+ for method in ['2-point', '3-point', 'cs']:
576
+ J_dense = approx_derivative(self.fun, self.x0, method=method)
577
+ J_sparse = approx_derivative(
578
+ self.fun, self.x0, sparsity=(structure, groups), method=method)
579
+ assert_allclose(J_dense, J_sparse.toarray(),
580
+ rtol=5e-16, atol=7e-15)
581
+
582
+ def test_check_derivative(self):
583
+ def jac(x):
584
+ return csr_matrix(self.jac(x))
585
+
586
+ accuracy = check_derivative(self.fun, jac, self.x0,
587
+ bounds=(self.lb, self.ub))
588
+ assert_(accuracy < 1e-9)
589
+
590
+ accuracy = check_derivative(self.fun, jac, self.x0,
591
+ bounds=(self.lb, self.ub))
592
+ assert_(accuracy < 1e-9)
593
+
594
+
595
+ class TestApproxDerivativeLinearOperator:
596
+
597
+ def fun_scalar_scalar(self, x):
598
+ return np.sinh(x)
599
+
600
+ def jac_scalar_scalar(self, x):
601
+ return np.cosh(x)
602
+
603
+ def fun_scalar_vector(self, x):
604
+ return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
605
+
606
+ def jac_scalar_vector(self, x):
607
+ return np.array(
608
+ [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
609
+
610
+ def fun_vector_scalar(self, x):
611
+ return np.sin(x[0] * x[1]) * np.log(x[0])
612
+
613
+ def jac_vector_scalar(self, x):
614
+ return np.array([
615
+ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
616
+ np.sin(x[0] * x[1]) / x[0],
617
+ x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
618
+ ])
619
+
620
+ def fun_vector_vector(self, x):
621
+ return np.array([
622
+ x[0] * np.sin(x[1]),
623
+ x[1] * np.cos(x[0]),
624
+ x[0] ** 3 * x[1] ** -0.5
625
+ ])
626
+
627
+ def jac_vector_vector(self, x):
628
+ return np.array([
629
+ [np.sin(x[1]), x[0] * np.cos(x[1])],
630
+ [-x[1] * np.sin(x[0]), np.cos(x[0])],
631
+ [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
632
+ ])
633
+
634
+ def test_scalar_scalar(self):
635
+ x0 = 1.0
636
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
637
+ method='2-point',
638
+ as_linear_operator=True)
639
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
640
+ as_linear_operator=True)
641
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
642
+ method='cs',
643
+ as_linear_operator=True)
644
+ jac_true = self.jac_scalar_scalar(x0)
645
+ np.random.seed(1)
646
+ for i in range(10):
647
+ p = np.random.uniform(-10, 10, size=(1,))
648
+ assert_allclose(jac_diff_2.dot(p), jac_true*p,
649
+ rtol=1e-5)
650
+ assert_allclose(jac_diff_3.dot(p), jac_true*p,
651
+ rtol=5e-6)
652
+ assert_allclose(jac_diff_4.dot(p), jac_true*p,
653
+ rtol=5e-6)
654
+
655
+ def test_scalar_vector(self):
656
+ x0 = 0.5
657
+ jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
658
+ method='2-point',
659
+ as_linear_operator=True)
660
+ jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
661
+ as_linear_operator=True)
662
+ jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
663
+ method='cs',
664
+ as_linear_operator=True)
665
+ jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
666
+ np.random.seed(1)
667
+ for i in range(10):
668
+ p = np.random.uniform(-10, 10, size=(1,))
669
+ assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
670
+ rtol=1e-5)
671
+ assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
672
+ rtol=5e-6)
673
+ assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
674
+ rtol=5e-6)
675
+
676
+ def test_vector_scalar(self):
677
+ x0 = np.array([100.0, -0.5])
678
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
679
+ method='2-point',
680
+ as_linear_operator=True)
681
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
682
+ as_linear_operator=True)
683
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
684
+ method='cs',
685
+ as_linear_operator=True)
686
+ jac_true = self.jac_vector_scalar(x0)
687
+ np.random.seed(1)
688
+ for i in range(10):
689
+ p = np.random.uniform(-10, 10, size=x0.shape)
690
+ assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
691
+ rtol=1e-5)
692
+ assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
693
+ rtol=5e-6)
694
+ assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
695
+ rtol=1e-7)
696
+
697
+ def test_vector_vector(self):
698
+ x0 = np.array([-100.0, 0.2])
699
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
700
+ method='2-point',
701
+ as_linear_operator=True)
702
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
703
+ as_linear_operator=True)
704
+ jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
705
+ method='cs',
706
+ as_linear_operator=True)
707
+ jac_true = self.jac_vector_vector(x0)
708
+ np.random.seed(1)
709
+ for i in range(10):
710
+ p = np.random.uniform(-10, 10, size=x0.shape)
711
+ assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
712
+ assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
713
+ assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
714
+
715
+ def test_exception(self):
716
+ x0 = np.array([-100.0, 0.2])
717
+ assert_raises(ValueError, approx_derivative,
718
+ self.fun_vector_vector, x0,
719
+ method='2-point', bounds=(1, np.inf))
720
+
721
+
722
+ def test_absolute_step_sign():
723
+ # test for gh12487
724
+ # if an absolute step is specified for 2-point differences make sure that
725
+ # the side corresponds to the step. i.e. if step is positive then forward
726
+ # differences should be used, if step is negative then backwards
727
+ # differences should be used.
728
+
729
+ # function has double discontinuity at x = [-1, -1]
730
+ # first component is \/, second component is /\
731
+ def f(x):
732
+ return -np.abs(x[0] + 1) + np.abs(x[1] + 1)
733
+
734
+ # check that the forward difference is used
735
+ grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
736
+ assert_allclose(grad, [-1.0, 1.0])
737
+
738
+ # check that the backwards difference is used
739
+ grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
740
+ assert_allclose(grad, [1.0, -1.0])
741
+
742
+ # check that the forwards difference is used with a step for both
743
+ # parameters
744
+ grad = approx_derivative(
745
+ f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8]
746
+ )
747
+ assert_allclose(grad, [-1.0, 1.0])
748
+
749
+ # check that we can mix forward/backwards steps.
750
+ grad = approx_derivative(
751
+ f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8]
752
+ )
753
+ assert_allclose(grad, [-1.0, -1.0])
754
+ grad = approx_derivative(
755
+ f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8]
756
+ )
757
+ assert_allclose(grad, [1.0, 1.0])
758
+
759
+ # the forward step should reverse to a backwards step if it runs into a
760
+ # bound
761
+ # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
762
+ # function.
763
+ grad = approx_derivative(
764
+ f, [-1, -1], method='2-point', abs_step=1e-8,
765
+ bounds=(-np.inf, -1)
766
+ )
767
+ assert_allclose(grad, [1.0, -1.0])
768
+
769
+ grad = approx_derivative(
770
+ f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf)
771
+ )
772
+ assert_allclose(grad, [-1.0, 1.0])
773
+
774
+
775
+ def test__compute_absolute_step():
776
+ # tests calculation of absolute step from rel_step
777
+ methods = ['2-point', '3-point', 'cs']
778
+
779
+ x0 = np.array([1e-5, 0, 1, 1e5])
780
+
781
+ EPS = np.finfo(np.float64).eps
782
+ relative_step = {
783
+ "2-point": EPS**0.5,
784
+ "3-point": EPS**(1/3),
785
+ "cs": EPS**0.5
786
+ }
787
+ f0 = np.array(1.0)
788
+
789
+ for method in methods:
790
+ rel_step = relative_step[method]
791
+ correct_step = np.array([rel_step,
792
+ rel_step * 1.,
793
+ rel_step * 1.,
794
+ rel_step * np.abs(x0[3])])
795
+
796
+ abs_step = _compute_absolute_step(None, x0, f0, method)
797
+ assert_allclose(abs_step, correct_step)
798
+
799
+ sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
800
+ abs_step = _compute_absolute_step(None, -x0, f0, method)
801
+ assert_allclose(abs_step, sign_x0 * correct_step)
802
+
803
+ # if a relative step is provided it should be used
804
+ rel_step = np.array([0.1, 1, 10, 100])
805
+ correct_step = np.array([rel_step[0] * x0[0],
806
+ relative_step['2-point'],
807
+ rel_step[2] * 1.,
808
+ rel_step[3] * np.abs(x0[3])])
809
+
810
+ abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point')
811
+ assert_allclose(abs_step, correct_step)
812
+
813
+ sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
814
+ abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point')
815
+ assert_allclose(abs_step, sign_x0 * correct_step)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for Linear Programming via Simplex Algorithm.
3
+ """
4
+
5
+ # TODO: add tests for:
6
+ # https://github.com/scipy/scipy/issues/5400
7
+ # https://github.com/scipy/scipy/issues/6690
8
+
9
+ import numpy as np
10
+ from numpy.testing import (
11
+ assert_,
12
+ assert_allclose,
13
+ assert_equal)
14
+
15
+ from .test_linprog import magic_square
16
+ from scipy.optimize._remove_redundancy import _remove_redundancy_svd
17
+ from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense
18
+ from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse
19
+ from scipy.optimize._remove_redundancy import _remove_redundancy_id
20
+
21
+ from scipy.sparse import csc_matrix
22
+
23
+
24
+ def setup_module():
25
+ np.random.seed(2017)
26
+
27
+
28
+ def redundancy_removed(A, B):
29
+ """Checks whether a matrix contains only independent rows of another"""
30
+ for rowA in A:
31
+ # `rowA in B` is not a reliable check
32
+ for rowB in B:
33
+ if np.all(rowA == rowB):
34
+ break
35
+ else:
36
+ return False
37
+ return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B)
38
+
39
+
40
+ class RRCommonTests:
41
+ def test_no_redundancy(self):
42
+ m, n = 10, 10
43
+ A0 = np.random.rand(m, n)
44
+ b0 = np.random.rand(m)
45
+ A1, b1, status, message = self.rr(A0, b0)
46
+ assert_allclose(A0, A1)
47
+ assert_allclose(b0, b1)
48
+ assert_equal(status, 0)
49
+
50
+ def test_infeasible_zero_row(self):
51
+ A = np.eye(3)
52
+ A[1, :] = 0
53
+ b = np.random.rand(3)
54
+ A1, b1, status, message = self.rr(A, b)
55
+ assert_equal(status, 2)
56
+
57
+ def test_remove_zero_row(self):
58
+ A = np.eye(3)
59
+ A[1, :] = 0
60
+ b = np.random.rand(3)
61
+ b[1] = 0
62
+ A1, b1, status, message = self.rr(A, b)
63
+ assert_equal(status, 0)
64
+ assert_allclose(A1, A[[0, 2], :])
65
+ assert_allclose(b1, b[[0, 2]])
66
+
67
+ def test_infeasible_m_gt_n(self):
68
+ m, n = 20, 10
69
+ A0 = np.random.rand(m, n)
70
+ b0 = np.random.rand(m)
71
+ A1, b1, status, message = self.rr(A0, b0)
72
+ assert_equal(status, 2)
73
+
74
+ def test_infeasible_m_eq_n(self):
75
+ m, n = 10, 10
76
+ A0 = np.random.rand(m, n)
77
+ b0 = np.random.rand(m)
78
+ A0[-1, :] = 2 * A0[-2, :]
79
+ A1, b1, status, message = self.rr(A0, b0)
80
+ assert_equal(status, 2)
81
+
82
+ def test_infeasible_m_lt_n(self):
83
+ m, n = 9, 10
84
+ A0 = np.random.rand(m, n)
85
+ b0 = np.random.rand(m)
86
+ A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
87
+ A1, b1, status, message = self.rr(A0, b0)
88
+ assert_equal(status, 2)
89
+
90
+ def test_m_gt_n(self):
91
+ np.random.seed(2032)
92
+ m, n = 20, 10
93
+ A0 = np.random.rand(m, n)
94
+ b0 = np.random.rand(m)
95
+ x = np.linalg.solve(A0[:n, :], b0[:n])
96
+ b0[n:] = A0[n:, :].dot(x)
97
+ A1, b1, status, message = self.rr(A0, b0)
98
+ assert_equal(status, 0)
99
+ assert_equal(A1.shape[0], n)
100
+ assert_equal(np.linalg.matrix_rank(A1), n)
101
+
102
+ def test_m_gt_n_rank_deficient(self):
103
+ m, n = 20, 10
104
+ A0 = np.zeros((m, n))
105
+ A0[:, 0] = 1
106
+ b0 = np.ones(m)
107
+ A1, b1, status, message = self.rr(A0, b0)
108
+ assert_equal(status, 0)
109
+ assert_allclose(A1, A0[0:1, :])
110
+ assert_allclose(b1, b0[0])
111
+
112
+ def test_m_lt_n_rank_deficient(self):
113
+ m, n = 9, 10
114
+ A0 = np.random.rand(m, n)
115
+ b0 = np.random.rand(m)
116
+ A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
117
+ b0[-1] = np.arange(m - 1).dot(b0[:-1])
118
+ A1, b1, status, message = self.rr(A0, b0)
119
+ assert_equal(status, 0)
120
+ assert_equal(A1.shape[0], 8)
121
+ assert_equal(np.linalg.matrix_rank(A1), 8)
122
+
123
+ def test_dense1(self):
124
+ A = np.ones((6, 6))
125
+ A[0, :3] = 0
126
+ A[1, 3:] = 0
127
+ A[3:, ::2] = -1
128
+ A[3, :2] = 0
129
+ A[4, 2:] = 0
130
+ b = np.zeros(A.shape[0])
131
+
132
+ A1, b1, status, message = self.rr(A, b)
133
+ assert_(redundancy_removed(A1, A))
134
+ assert_equal(status, 0)
135
+
136
+ def test_dense2(self):
137
+ A = np.eye(6)
138
+ A[-2, -1] = 1
139
+ A[-1, :] = 1
140
+ b = np.zeros(A.shape[0])
141
+ A1, b1, status, message = self.rr(A, b)
142
+ assert_(redundancy_removed(A1, A))
143
+ assert_equal(status, 0)
144
+
145
+ def test_dense3(self):
146
+ A = np.eye(6)
147
+ A[-2, -1] = 1
148
+ A[-1, :] = 1
149
+ b = np.random.rand(A.shape[0])
150
+ b[-1] = np.sum(b[:-1])
151
+ A1, b1, status, message = self.rr(A, b)
152
+ assert_(redundancy_removed(A1, A))
153
+ assert_equal(status, 0)
154
+
155
+ def test_m_gt_n_sparse(self):
156
+ np.random.seed(2013)
157
+ m, n = 20, 5
158
+ p = 0.1
159
+ A = np.random.rand(m, n)
160
+ A[np.random.rand(m, n) > p] = 0
161
+ rank = np.linalg.matrix_rank(A)
162
+ b = np.zeros(A.shape[0])
163
+ A1, b1, status, message = self.rr(A, b)
164
+ assert_equal(status, 0)
165
+ assert_equal(A1.shape[0], rank)
166
+ assert_equal(np.linalg.matrix_rank(A1), rank)
167
+
168
+ def test_m_lt_n_sparse(self):
169
+ np.random.seed(2017)
170
+ m, n = 20, 50
171
+ p = 0.05
172
+ A = np.random.rand(m, n)
173
+ A[np.random.rand(m, n) > p] = 0
174
+ rank = np.linalg.matrix_rank(A)
175
+ b = np.zeros(A.shape[0])
176
+ A1, b1, status, message = self.rr(A, b)
177
+ assert_equal(status, 0)
178
+ assert_equal(A1.shape[0], rank)
179
+ assert_equal(np.linalg.matrix_rank(A1), rank)
180
+
181
+ def test_m_eq_n_sparse(self):
182
+ np.random.seed(2017)
183
+ m, n = 100, 100
184
+ p = 0.01
185
+ A = np.random.rand(m, n)
186
+ A[np.random.rand(m, n) > p] = 0
187
+ rank = np.linalg.matrix_rank(A)
188
+ b = np.zeros(A.shape[0])
189
+ A1, b1, status, message = self.rr(A, b)
190
+ assert_equal(status, 0)
191
+ assert_equal(A1.shape[0], rank)
192
+ assert_equal(np.linalg.matrix_rank(A1), rank)
193
+
194
+ def test_magic_square(self):
195
+ A, b, c, numbers, _ = magic_square(3)
196
+ A1, b1, status, message = self.rr(A, b)
197
+ assert_equal(status, 0)
198
+ assert_equal(A1.shape[0], 23)
199
+ assert_equal(np.linalg.matrix_rank(A1), 23)
200
+
201
+ def test_magic_square2(self):
202
+ A, b, c, numbers, _ = magic_square(4)
203
+ A1, b1, status, message = self.rr(A, b)
204
+ assert_equal(status, 0)
205
+ assert_equal(A1.shape[0], 39)
206
+ assert_equal(np.linalg.matrix_rank(A1), 39)
207
+
208
+
209
+ class TestRRSVD(RRCommonTests):
210
+ def rr(self, A, b):
211
+ return _remove_redundancy_svd(A, b)
212
+
213
+
214
+ class TestRRPivotDense(RRCommonTests):
215
+ def rr(self, A, b):
216
+ return _remove_redundancy_pivot_dense(A, b)
217
+
218
+
219
+ class TestRRID(RRCommonTests):
220
+ def rr(self, A, b):
221
+ return _remove_redundancy_id(A, b)
222
+
223
+
224
+ class TestRRPivotSparse(RRCommonTests):
225
+ def rr(self, A, b):
226
+ rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b)
227
+ A1, b1, status, message = rr_res
228
+ return A1.toarray(), b1, status, message
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for optimization routines from _root.py.
3
+ """
4
+ from numpy.testing import assert_, assert_equal
5
+ import pytest
6
+ from pytest import raises as assert_raises, warns as assert_warns
7
+ import numpy as np
8
+
9
+ from scipy.optimize import root
10
+
11
+
12
+ class TestRoot:
13
+ def test_tol_parameter(self):
14
+ # Check that the minimize() tol= argument does something
15
+ def func(z):
16
+ x, y = z
17
+ return np.array([x**3 - 1, y**3 - 1])
18
+
19
+ def dfunc(z):
20
+ x, y = z
21
+ return np.array([[3*x**2, 0], [0, 3*y**2]])
22
+
23
+ for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
24
+ 'diagbroyden', 'krylov']:
25
+ if method in ('linearmixing', 'excitingmixing'):
26
+ # doesn't converge
27
+ continue
28
+
29
+ if method in ('hybr', 'lm'):
30
+ jac = dfunc
31
+ else:
32
+ jac = None
33
+
34
+ sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
35
+ sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
36
+ msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
37
+ assert_(sol1.success, msg)
38
+ assert_(sol2.success, msg)
39
+ assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
40
+ msg)
41
+
42
+ def test_tol_norm(self):
43
+
44
+ def norm(x):
45
+ return abs(x[0])
46
+
47
+ for method in ['excitingmixing',
48
+ 'diagbroyden',
49
+ 'linearmixing',
50
+ 'anderson',
51
+ 'broyden1',
52
+ 'broyden2',
53
+ 'krylov']:
54
+
55
+ root(np.zeros_like, np.zeros(2), method=method,
56
+ options={"tol_norm": norm})
57
+
58
+ def test_minimize_scalar_coerce_args_param(self):
59
+ # github issue #3503
60
+ def func(z, f=1):
61
+ x, y = z
62
+ return np.array([x**3 - 1, y**3 - f])
63
+ root(func, [1.1, 1.1], args=1.5)
64
+
65
+ def test_f_size(self):
66
+ # gh8320
67
+ # check that decreasing the size of the returned array raises an error
68
+ # and doesn't segfault
69
+ class fun:
70
+ def __init__(self):
71
+ self.count = 0
72
+
73
+ def __call__(self, x):
74
+ self.count += 1
75
+
76
+ if not (self.count % 5):
77
+ ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
78
+ else:
79
+ ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
80
+ 0.5 * (x[1] - x[0]) ** 3 + x[1]])
81
+
82
+ return ret
83
+
84
+ F = fun()
85
+ with assert_raises(ValueError):
86
+ root(F, [0.1, 0.0], method='lm')
87
+
88
+ def test_gh_10370(self):
89
+ # gh-10370 reported that passing both `args` and `jac` to `root` with
90
+ # `method='krylov'` caused a failure. Ensure that this is fixed whether
91
+ # the gradient is passed via `jac` or as a second output of `fun`.
92
+ def fun(x, ignored):
93
+ return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2]
94
+
95
+ def grad(x, ignored):
96
+ return [[3, 0.5 * x[1]], [0.2 * x[0], 5]]
97
+
98
+ def fun_grad(x, ignored):
99
+ return fun(x, ignored), grad(x, ignored)
100
+
101
+ x0 = np.zeros(2)
102
+
103
+ ref = root(fun, x0, args=(1,), method='krylov')
104
+ message = 'Method krylov does not use the jacobian'
105
+ with assert_warns(RuntimeWarning, match=message):
106
+ res1 = root(fun, x0, args=(1,), method='krylov', jac=grad)
107
+ with assert_warns(RuntimeWarning, match=message):
108
+ res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True)
109
+
110
+ assert_equal(res1.x, ref.x)
111
+ assert_equal(res2.x, ref.x)
112
+ assert res1.success is res2.success is ref.success is True
113
+
114
+ @pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2",
115
+ "anderson", "linearmixing",
116
+ "diagbroyden", "excitingmixing",
117
+ "krylov", "df-sane"])
118
+ def test_method_in_result(self, method):
119
+ def func(x):
120
+ return x - 1
121
+
122
+ res = root(func, x0=[1], method=method)
123
+ assert res.method == method
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py ADDED
@@ -0,0 +1,1159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+
4
+ import numpy
5
+ import numpy as np
6
+ import time
7
+ from multiprocessing import Pool
8
+ from numpy.testing import assert_allclose, IS_PYPY
9
+ import pytest
10
+ from pytest import raises as assert_raises, warns
11
+ from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen,
12
+ rosen_der, rosen_hess, NonlinearConstraint)
13
+ from scipy.optimize._constraints import new_constraint_to_old
14
+ from scipy.optimize._shgo import SHGO
15
+
16
+
17
+ class StructTestFunction:
18
+ def __init__(self, bounds, expected_x, expected_fun=None,
19
+ expected_xl=None, expected_funl=None):
20
+ self.bounds = bounds
21
+ self.expected_x = expected_x
22
+ self.expected_fun = expected_fun
23
+ self.expected_xl = expected_xl
24
+ self.expected_funl = expected_funl
25
+
26
+
27
+ def wrap_constraints(g):
28
+ cons = []
29
+ if g is not None:
30
+ if not isinstance(g, (tuple, list)):
31
+ g = (g,)
32
+ else:
33
+ pass
34
+ for g in g:
35
+ cons.append({'type': 'ineq',
36
+ 'fun': g})
37
+ cons = tuple(cons)
38
+ else:
39
+ cons = None
40
+ return cons
41
+
42
+
43
+ class StructTest1(StructTestFunction):
44
+ def f(self, x):
45
+ return x[0] ** 2 + x[1] ** 2
46
+
47
+ def g(x):
48
+ return -(numpy.sum(x, axis=0) - 6.0)
49
+
50
+ cons = wrap_constraints(g)
51
+
52
+
53
+ test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)],
54
+ expected_x=[0, 0])
55
+ test1_2 = StructTest1(bounds=[(0, 1), (0, 1)],
56
+ expected_x=[0, 0])
57
+ test1_3 = StructTest1(bounds=[(None, None), (None, None)],
58
+ expected_x=[0, 0])
59
+
60
+
61
+ class StructTest2(StructTestFunction):
62
+ """
63
+ Scalar function with several minima to test all minimiser retrievals
64
+ """
65
+
66
+ def f(self, x):
67
+ return (x - 30) * numpy.sin(x)
68
+
69
+ def g(x):
70
+ return 58 - numpy.sum(x, axis=0)
71
+
72
+ cons = wrap_constraints(g)
73
+
74
+
75
+ test2_1 = StructTest2(bounds=[(0, 60)],
76
+ expected_x=[1.53567906],
77
+ expected_fun=-28.44677132,
78
+ # Important: test that funl return is in the correct
79
+ # order
80
+ expected_xl=numpy.array([[1.53567906],
81
+ [55.01782167],
82
+ [7.80894889],
83
+ [48.74797493],
84
+ [14.07445705],
85
+ [42.4913859],
86
+ [20.31743841],
87
+ [36.28607535],
88
+ [26.43039605],
89
+ [30.76371366]]),
90
+
91
+ expected_funl=numpy.array([-28.44677132, -24.99785984,
92
+ -22.16855376, -18.72136195,
93
+ -15.89423937, -12.45154942,
94
+ -9.63133158, -6.20801301,
95
+ -3.43727232, -0.46353338])
96
+ )
97
+
98
+ test2_2 = StructTest2(bounds=[(0, 4.5)],
99
+ expected_x=[1.53567906],
100
+ expected_fun=[-28.44677132],
101
+ expected_xl=numpy.array([[1.53567906]]),
102
+ expected_funl=numpy.array([-28.44677132])
103
+ )
104
+
105
+
106
+ class StructTest3(StructTestFunction):
107
+ """
108
+ Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981)
109
+ http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
110
+ Minimize: f = 0.01 * (x_1)**2 + (x_2)**2
111
+
112
+ Subject to: x_1 * x_2 - 25.0 >= 0,
113
+ (x_1)**2 + (x_2)**2 - 25.0 >= 0,
114
+ 2 <= x_1 <= 50,
115
+ 0 <= x_2 <= 50.
116
+
117
+ Approx. Answer:
118
+ f([(250)**0.5 , (2.5)**0.5]) = 5.0
119
+
120
+
121
+ """
122
+
123
+ # amended to test vectorisation of constraints
124
+ def f(self, x):
125
+ return 0.01 * (x[0]) ** 2 + (x[1]) ** 2
126
+
127
+ def g1(x):
128
+ return x[0] * x[1] - 25.0
129
+
130
+ def g2(x):
131
+ return x[0] ** 2 + x[1] ** 2 - 25.0
132
+
133
+ # g = (g1, g2)
134
+ # cons = wrap_constraints(g)
135
+
136
+ def g(x):
137
+ return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0
138
+
139
+ # this checks that shgo can be sent new-style constraints
140
+ __nlc = NonlinearConstraint(g, 0, np.inf)
141
+ cons = (__nlc,)
142
+
143
+ test3_1 = StructTest3(bounds=[(2, 50), (0, 50)],
144
+ expected_x=[250 ** 0.5, 2.5 ** 0.5],
145
+ expected_fun=5.0
146
+ )
147
+
148
+
149
+ class StructTest4(StructTestFunction):
150
+ """
151
+ Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981)
152
+
153
+ NOTE: Did not find in original reference to HS collection, refer to
154
+ Henderson (2015) problem 7 instead. 02.03.2016
155
+ """
156
+
157
+ def f(self, x):
158
+ return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4
159
+ + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[
160
+ 6] ** 4
161
+ - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6]
162
+ )
163
+
164
+ def g1(x):
165
+ return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2
166
+ + 5 * x[4] - 127)
167
+
168
+ def g2(x):
169
+ return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0)
170
+
171
+ def g3(x):
172
+ return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196)
173
+
174
+ def g4(x):
175
+ return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2
176
+ + 5 * x[5] - 11 * x[6])
177
+
178
+ g = (g1, g2, g3, g4)
179
+
180
+ cons = wrap_constraints(g)
181
+
182
+
183
+ test4_1 = StructTest4(bounds=[(-10, 10), ] * 7,
184
+ expected_x=[2.330499, 1.951372, -0.4775414,
185
+ 4.365726, -0.6244870, 1.038131, 1.594227],
186
+ expected_fun=680.6300573
187
+ )
188
+
189
+
190
+ class StructTest5(StructTestFunction):
191
+ def f(self, x):
192
+ return (-(x[1] + 47.0)
193
+ * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
194
+ - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0))))
195
+ )
196
+
197
+ g = None
198
+ cons = wrap_constraints(g)
199
+
200
+
201
+ test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
202
+ expected_fun=[-959.64066272085051],
203
+ expected_x=[512., 404.23180542])
204
+
205
+
206
+ class StructTestLJ(StructTestFunction):
207
+ """
208
+ LennardJones objective function. Used to test symmetry constraints
209
+ settings.
210
+ """
211
+
212
+ def f(self, x, *args):
213
+ print(f'x = {x}')
214
+ self.N = args[0]
215
+ k = int(self.N / 3)
216
+ s = 0.0
217
+
218
+ for i in range(k - 1):
219
+ for j in range(i + 1, k):
220
+ a = 3 * i
221
+ b = 3 * j
222
+ xd = x[a] - x[b]
223
+ yd = x[a + 1] - x[b + 1]
224
+ zd = x[a + 2] - x[b + 2]
225
+ ed = xd * xd + yd * yd + zd * zd
226
+ ud = ed * ed * ed
227
+ if ed > 0.0:
228
+ s += (1.0 / ud - 2.0) / ud
229
+
230
+ return s
231
+
232
+ g = None
233
+ cons = wrap_constraints(g)
234
+
235
+
236
+ N = 6
237
+ boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
238
+
239
+ testLJ = StructTestLJ(bounds=boundsLJ,
240
+ expected_fun=[-1.0],
241
+ expected_x=None,
242
+ # expected_x=[-2.71247337e-08,
243
+ # -2.71247337e-08,
244
+ # -2.50000222e+00,
245
+ # -2.71247337e-08,
246
+ # -2.71247337e-08,
247
+ # -1.50000222e+00]
248
+ )
249
+
250
+
251
+ class StructTestS(StructTestFunction):
252
+ def f(self, x):
253
+ return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2
254
+ + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2)
255
+
256
+ g = None
257
+ cons = wrap_constraints(g)
258
+
259
+
260
+ test_s = StructTestS(bounds=[(0, 2.0), ] * 4,
261
+ expected_fun=0.0,
262
+ expected_x=numpy.ones(4) - 0.5
263
+ )
264
+
265
+
266
+ class StructTestTable(StructTestFunction):
267
+ def f(self, x):
268
+ if x[0] == 3.0 and x[1] == 3.0:
269
+ return 50
270
+ else:
271
+ return 100
272
+
273
+ g = None
274
+ cons = wrap_constraints(g)
275
+
276
+
277
+ test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
278
+ expected_fun=[50],
279
+ expected_x=[3.0, 3.0])
280
+
281
+
282
+ class StructTestInfeasible(StructTestFunction):
283
+ """
284
+ Test function with no feasible domain.
285
+ """
286
+
287
+ def f(self, x, *args):
288
+ return x[0] ** 2 + x[1] ** 2
289
+
290
+ def g1(x):
291
+ return x[0] + x[1] - 1
292
+
293
+ def g2(x):
294
+ return -(x[0] + x[1] - 1)
295
+
296
+ def g3(x):
297
+ return -x[0] + x[1] - 1
298
+
299
+ def g4(x):
300
+ return -(-x[0] + x[1] - 1)
301
+
302
+ g = (g1, g2, g3, g4)
303
+ cons = wrap_constraints(g)
304
+
305
+
306
+ test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
307
+ expected_fun=None,
308
+ expected_x=None
309
+ )
310
+
311
+
312
+ @pytest.mark.skip("Not a test")
313
+ def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
314
+ callback=None, minimizer_kwargs=None, options=None,
315
+ sampling_method='sobol', workers=1):
316
+ res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
317
+ n=n, iters=iters, callback=callback,
318
+ minimizer_kwargs=minimizer_kwargs, options=options,
319
+ sampling_method=sampling_method, workers=workers)
320
+
321
+ print(f'res = {res}')
322
+ logging.info(f'res = {res}')
323
+ if test.expected_x is not None:
324
+ numpy.testing.assert_allclose(res.x, test.expected_x,
325
+ rtol=test_atol,
326
+ atol=test_atol)
327
+
328
+ # (Optional tests)
329
+ if test.expected_fun is not None:
330
+ numpy.testing.assert_allclose(res.fun,
331
+ test.expected_fun,
332
+ atol=test_atol)
333
+
334
+ if test.expected_xl is not None:
335
+ numpy.testing.assert_allclose(res.xl,
336
+ test.expected_xl,
337
+ atol=test_atol)
338
+
339
+ if test.expected_funl is not None:
340
+ numpy.testing.assert_allclose(res.funl,
341
+ test.expected_funl,
342
+ atol=test_atol)
343
+ return
344
+
345
+
346
+ # Base test functions:
347
+ class TestShgoSobolTestFunctions:
348
+ """
349
+ Global optimisation tests with Sobol sampling:
350
+ """
351
+
352
+ # Sobol algorithm
353
+ def test_f1_1_sobol(self):
354
+ """Multivariate test function 1:
355
+ x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
356
+ run_test(test1_1)
357
+
358
+ def test_f1_2_sobol(self):
359
+ """Multivariate test function 1:
360
+ x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
361
+ run_test(test1_2)
362
+
363
+ def test_f1_3_sobol(self):
364
+ """Multivariate test function 1:
365
+ x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
366
+ options = {'disp': True}
367
+ run_test(test1_3, options=options)
368
+
369
+ def test_f2_1_sobol(self):
370
+ """Univariate test function on
371
+ f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
372
+ run_test(test2_1)
373
+
374
+ def test_f2_2_sobol(self):
375
+ """Univariate test function on
376
+ f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
377
+ run_test(test2_2)
378
+
379
+ def test_f3_sobol(self):
380
+ """NLP: Hock and Schittkowski problem 18"""
381
+ run_test(test3_1)
382
+
383
+ @pytest.mark.slow
384
+ def test_f4_sobol(self):
385
+ """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
386
+ options = {'infty_constraints': False}
387
+ # run_test(test4_1, n=990, options=options)
388
+ run_test(test4_1, n=990 * 2, options=options)
389
+
390
+ def test_f5_1_sobol(self):
391
+ """NLP: Eggholder, multimodal"""
392
+ # run_test(test5_1, n=30)
393
+ run_test(test5_1, n=60)
394
+
395
+ def test_f5_2_sobol(self):
396
+ """NLP: Eggholder, multimodal"""
397
+ # run_test(test5_1, n=60, iters=5)
398
+ run_test(test5_1, n=60, iters=5)
399
+
400
+ # def test_t911(self):
401
+ # """1D tabletop function"""
402
+ # run_test(test11_1)
403
+
404
+
405
+ class TestShgoSimplicialTestFunctions:
406
+ """
407
+ Global optimisation tests with Simplicial sampling:
408
+ """
409
+
410
+ def test_f1_1_simplicial(self):
411
+ """Multivariate test function 1:
412
+ x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
413
+ run_test(test1_1, n=1, sampling_method='simplicial')
414
+
415
+ def test_f1_2_simplicial(self):
416
+ """Multivariate test function 1:
417
+ x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
418
+ run_test(test1_2, n=1, sampling_method='simplicial')
419
+
420
+ def test_f1_3_simplicial(self):
421
+ """Multivariate test function 1: x[0]**2 + x[1]**2
422
+ with bounds=[(None, None),(None, None)]"""
423
+ run_test(test1_3, n=5, sampling_method='simplicial')
424
+
425
+ def test_f2_1_simplicial(self):
426
+ """Univariate test function on
427
+ f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
428
+ options = {'minimize_every_iter': False}
429
+ run_test(test2_1, n=200, iters=7, options=options,
430
+ sampling_method='simplicial')
431
+
432
+ def test_f2_2_simplicial(self):
433
+ """Univariate test function on
434
+ f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
435
+ run_test(test2_2, n=1, sampling_method='simplicial')
436
+
437
+ def test_f3_simplicial(self):
438
+ """NLP: Hock and Schittkowski problem 18"""
439
+ run_test(test3_1, n=1, sampling_method='simplicial')
440
+
441
+ @pytest.mark.slow
442
+ def test_f4_simplicial(self):
443
+ """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
444
+ run_test(test4_1, n=1, sampling_method='simplicial')
445
+
446
+ def test_lj_symmetry_old(self):
447
+ """LJ: Symmetry-constrained test function"""
448
+ options = {'symmetry': True,
449
+ 'disp': True}
450
+ args = (6,) # Number of atoms
451
+ run_test(testLJ, args=args, n=300,
452
+ options=options, iters=1,
453
+ sampling_method='simplicial')
454
+
455
+ def test_f5_1_lj_symmetry(self):
456
+ """LJ: Symmetry constrained test function"""
457
+ options = {'symmetry': [0, ] * 6,
458
+ 'disp': True}
459
+ args = (6,) # No. of atoms
460
+
461
+ run_test(testLJ, args=args, n=300,
462
+ options=options, iters=1,
463
+ sampling_method='simplicial')
464
+
465
+ def test_f5_2_cons_symmetry(self):
466
+ """Symmetry constrained test function"""
467
+ options = {'symmetry': [0, 0],
468
+ 'disp': True}
469
+
470
+ run_test(test1_1, n=200,
471
+ options=options, iters=1,
472
+ sampling_method='simplicial')
473
+
474
+ def test_f5_3_cons_symmetry(self):
475
+ """Assymmetrically constrained test function"""
476
+ options = {'symmetry': [0, 0, 0, 3],
477
+ 'disp': True}
478
+
479
+ run_test(test_s, n=10000,
480
+ options=options,
481
+ iters=1,
482
+ sampling_method='simplicial')
483
+
484
+ @pytest.mark.skip("Not a test")
485
+ def test_f0_min_variance(self):
486
+ """Return a minimum on a perfectly symmetric problem, based on
487
+ gh10429"""
488
+ avg = 0.5 # Given average value of x
489
+ cons = {'type': 'eq', 'fun': lambda x: numpy.mean(x) - avg}
490
+
491
+ # Minimize the variance of x under the given constraint
492
+ res = shgo(numpy.var, bounds=6 * [(0, 1)], constraints=cons)
493
+ assert res.success
494
+ assert_allclose(res.fun, 0, atol=1e-15)
495
+ assert_allclose(res.x, 0.5)
496
+
497
+ @pytest.mark.skip("Not a test")
498
+ def test_f0_min_variance_1D(self):
499
+ """Return a minimum on a perfectly symmetric 1D problem, based on
500
+ gh10538"""
501
+
502
+ def fun(x):
503
+ return x * (x - 1.0) * (x - 0.5)
504
+
505
+ bounds = [(0, 1)]
506
+ res = shgo(fun, bounds=bounds)
507
+ ref = minimize_scalar(fun, bounds=bounds[0])
508
+ assert res.success
509
+ assert_allclose(res.fun, ref.fun)
510
+ assert_allclose(res.x, ref.x, rtol=1e-6)
511
+
512
+ # Argument test functions
513
+ class TestShgoArguments:
514
+ def test_1_1_simpl_iter(self):
515
+ """Iterative simplicial sampling on TestFunction 1 (multivariate)"""
516
+ run_test(test1_2, n=None, iters=2, sampling_method='simplicial')
517
+
518
+ def test_1_2_simpl_iter(self):
519
+ """Iterative simplicial on TestFunction 2 (univariate)"""
520
+ options = {'minimize_every_iter': False}
521
+ run_test(test2_1, n=None, iters=9, options=options,
522
+ sampling_method='simplicial')
523
+
524
+ def test_2_1_sobol_iter(self):
525
+ """Iterative Sobol sampling on TestFunction 1 (multivariate)"""
526
+ run_test(test1_2, n=None, iters=1, sampling_method='sobol')
527
+
528
+ def test_2_2_sobol_iter(self):
529
+ """Iterative Sobol sampling on TestFunction 2 (univariate)"""
530
+ res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
531
+ n=None, iters=1, sampling_method='sobol')
532
+
533
+ numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
534
+ atol=1e-5)
535
+ numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
536
+
537
+ def test_3_1_disp_simplicial(self):
538
+ """Iterative sampling on TestFunction 1 and 2 (multi and univariate)
539
+ """
540
+
541
+ def callback_func(x):
542
+ print("Local minimization callback test")
543
+
544
+ for test in [test1_1, test2_1]:
545
+ shgo(test.f, test.bounds, iters=1,
546
+ sampling_method='simplicial',
547
+ callback=callback_func, options={'disp': True})
548
+ shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
549
+ callback=callback_func, options={'disp': True})
550
+
551
+ def test_3_2_disp_sobol(self):
552
+ """Iterative sampling on TestFunction 1 and 2 (multi and univariate)"""
553
+
554
+ def callback_func(x):
555
+ print("Local minimization callback test")
556
+
557
+ for test in [test1_1, test2_1]:
558
+ shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
559
+ callback=callback_func, options={'disp': True})
560
+
561
+ shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
562
+ callback=callback_func, options={'disp': True})
563
+
564
+ def test_args_gh14589(self):
565
+ """Using `args` used to cause `shgo` to fail; see #14589, #15986,
566
+ #16506"""
567
+ res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2)
568
+ )
569
+ ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)])
570
+ assert_allclose(res.fun, ref.fun)
571
+ assert_allclose(res.x, ref.x)
572
+
573
+ @pytest.mark.slow
574
+ def test_4_1_known_f_min(self):
575
+ """Test known function minima stopping criteria"""
576
+ # Specify known function value
577
+ options = {'f_min': test4_1.expected_fun,
578
+ 'f_tol': 1e-6,
579
+ 'minimize_every_iter': True}
580
+ # TODO: Make default n higher for faster tests
581
+ run_test(test4_1, n=None, test_atol=1e-5, options=options,
582
+ sampling_method='simplicial')
583
+
584
+ @pytest.mark.slow
585
+ def test_4_2_known_f_min(self):
586
+ """Test Global mode limiting local evaluations"""
587
+ options = { # Specify known function value
588
+ 'f_min': test4_1.expected_fun,
589
+ 'f_tol': 1e-6,
590
+ # Specify number of local iterations to perform
591
+ 'minimize_every_iter': True,
592
+ 'local_iter': 1}
593
+
594
+ run_test(test4_1, n=None, test_atol=1e-5, options=options,
595
+ sampling_method='simplicial')
596
+
597
+ def test_4_4_known_f_min(self):
598
+ """Test Global mode limiting local evaluations for 1D funcs"""
599
+ options = { # Specify known function value
600
+ 'f_min': test2_1.expected_fun,
601
+ 'f_tol': 1e-6,
602
+ # Specify number of local iterations to perform+
603
+ 'minimize_every_iter': True,
604
+ 'local_iter': 1,
605
+ 'infty_constraints': False}
606
+
607
+ res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
608
+ n=None, iters=None, options=options,
609
+ sampling_method='sobol')
610
+ numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
611
+ atol=1e-5)
612
+
613
+ def test_5_1_simplicial_argless(self):
614
+ """Test Default simplicial sampling settings on TestFunction 1"""
615
+ res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
616
+ numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
617
+ atol=1e-5)
618
+
619
+ def test_5_2_sobol_argless(self):
620
+ """Test Default sobol sampling settings on TestFunction 1"""
621
+ res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
622
+ sampling_method='sobol')
623
+ numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
624
+ atol=1e-5)
625
+
626
+ def test_6_1_simplicial_max_iter(self):
627
+ """Test that maximum iteration option works on TestFunction 3"""
628
+ options = {'max_iter': 2}
629
+ res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
630
+ options=options, sampling_method='simplicial')
631
+ numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
632
+ atol=1e-5)
633
+ numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
634
+
635
+ def test_6_2_simplicial_min_iter(self):
636
+ """Test that maximum iteration option works on TestFunction 3"""
637
+ options = {'min_iter': 2}
638
+ res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
639
+ options=options, sampling_method='simplicial')
640
+ numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
641
+ atol=1e-5)
642
+ numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
643
+
644
+ def test_7_1_minkwargs(self):
645
+ """Test the minimizer_kwargs arguments for solvers with constraints"""
646
+ # Test solvers
647
+ for solver in ['COBYLA', 'SLSQP']:
648
+ # Note that passing global constraints to SLSQP is tested in other
649
+ # unittests which run test4_1 normally
650
+ minimizer_kwargs = {'method': solver,
651
+ 'constraints': test3_1.cons}
652
+ run_test(test3_1, n=100, test_atol=1e-3,
653
+ minimizer_kwargs=minimizer_kwargs,
654
+ sampling_method='sobol')
655
+
656
+ def test_7_2_minkwargs(self):
657
+ """Test the minimizer_kwargs default inits"""
658
+ minimizer_kwargs = {'ftol': 1e-5}
659
+ options = {'disp': True} # For coverage purposes
660
+ SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0],
661
+ minimizer_kwargs=minimizer_kwargs, options=options)
662
+
663
+ def test_7_3_minkwargs(self):
664
+ """Test minimizer_kwargs arguments for solvers without constraints"""
665
+ for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
666
+ 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact',
667
+ 'trust-krylov']:
668
+ def jac(x):
669
+ return numpy.array([2 * x[0], 2 * x[1]]).T
670
+
671
+ def hess(x):
672
+ return numpy.array([[2, 0], [0, 2]])
673
+
674
+ minimizer_kwargs = {'method': solver,
675
+ 'jac': jac,
676
+ 'hess': hess}
677
+ logging.info(f"Solver = {solver}")
678
+ logging.info("=" * 100)
679
+ run_test(test1_1, n=100, test_atol=1e-3,
680
+ minimizer_kwargs=minimizer_kwargs,
681
+ sampling_method='sobol')
682
+
683
+ def test_8_homology_group_diff(self):
684
+ options = {'minhgrd': 1,
685
+ 'minimize_every_iter': True}
686
+
687
+ run_test(test1_1, n=None, iters=None, options=options,
688
+ sampling_method='simplicial')
689
+
690
+ def test_9_cons_g(self):
691
+ """Test single function constraint passing"""
692
+ SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0])
693
+
694
+ @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32',
695
+ reason="Failing and fix in PyPy not planned (see gh-18632)")
696
+ def test_10_finite_time(self):
697
+ """Test single function constraint passing"""
698
+ options = {'maxtime': 1e-15}
699
+
700
+ def f(x):
701
+ time.sleep(1e-14)
702
+ return 0.0
703
+
704
+ res = shgo(f, test1_1.bounds, iters=5, options=options)
705
+ # Assert that only 1 rather than 5 requested iterations ran:
706
+ assert res.nit == 1
707
+
708
+ def test_11_f_min_0(self):
709
+ """Test to cover the case where f_lowest == 0"""
710
+ options = {'f_min': 0.0,
711
+ 'disp': True}
712
+ res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None,
713
+ options=options, sampling_method='sobol')
714
+ numpy.testing.assert_equal(0, res.x[0])
715
+ numpy.testing.assert_equal(0, res.x[1])
716
+
717
+ # @nottest
718
+ @pytest.mark.skip(reason="no way of currently testing this")
719
+ def test_12_sobol_inf_cons(self):
720
+ """Test to cover the case where f_lowest == 0"""
721
+ # TODO: This test doesn't cover anything new, it is unknown what the
722
+ # original test was intended for as it was never complete. Delete or
723
+ # replace in the future.
724
+ options = {'maxtime': 1e-15,
725
+ 'f_min': 0.0}
726
+ res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
727
+ options=options, sampling_method='sobol')
728
+ numpy.testing.assert_equal(0.0, res.fun)
729
+
730
+ def test_13_high_sobol(self):
731
+ """Test init of high-dimensional sobol sequences"""
732
+
733
+ def f(x):
734
+ return 0
735
+
736
+ bounds = [(None, None), ] * 41
737
+ SHGOc = SHGO(f, bounds, sampling_method='sobol')
738
+ # SHGOc.sobol_points(2, 50)
739
+ SHGOc.sampling_function(2, 50)
740
+
741
+ def test_14_local_iter(self):
742
+ """Test limited local iterations for a pseudo-global mode"""
743
+ options = {'local_iter': 4}
744
+ run_test(test5_1, n=60, options=options)
745
+
746
+ def test_15_min_every_iter(self):
747
+ """Test minimize every iter options and cover function cache"""
748
+ options = {'minimize_every_iter': True}
749
+ run_test(test1_1, n=1, iters=7, options=options,
750
+ sampling_method='sobol')
751
+
752
+ def test_16_disp_bounds_minimizer(self, capsys):
753
+ """Test disp=True with minimizers that do not support bounds """
754
+ options = {'disp': True}
755
+ minimizer_kwargs = {'method': 'nelder-mead'}
756
+ run_test(test1_2, sampling_method='simplicial',
757
+ options=options, minimizer_kwargs=minimizer_kwargs)
758
+
759
+ def test_17_custom_sampling(self):
760
+ """Test the functionality to add custom sampling methods to shgo"""
761
+
762
+ def sample(n, d):
763
+ return numpy.random.uniform(size=(n, d))
764
+
765
+ run_test(test1_1, n=30, sampling_method=sample)
766
+
767
+ def test_18_bounds_class(self):
768
+ # test that new and old bounds yield same result
769
+ def f(x):
770
+ return numpy.square(x).sum()
771
+
772
+ lb = [-6., 1., -5.]
773
+ ub = [-1., 3., 5.]
774
+ bounds_old = list(zip(lb, ub))
775
+ bounds_new = Bounds(lb, ub)
776
+
777
+ res_old_bounds = shgo(f, bounds_old)
778
+ res_new_bounds = shgo(f, bounds_new)
779
+
780
+ assert res_new_bounds.nfev == res_old_bounds.nfev
781
+ assert res_new_bounds.message == res_old_bounds.message
782
+ assert res_new_bounds.success == res_old_bounds.success
783
+ x_opt = numpy.array([-1., 1., 0.])
784
+ numpy.testing.assert_allclose(res_new_bounds.x, x_opt)
785
+ numpy.testing.assert_allclose(res_new_bounds.x,
786
+ res_old_bounds.x)
787
+
788
+ def test_19_parallelization(self):
789
+ """Test the functionality to add custom sampling methods to shgo"""
790
+
791
+ with Pool(2) as p:
792
+ run_test(test1_1, n=30, workers=p.map) # Constrained
793
+ run_test(test1_1, n=30, workers=map) # Constrained
794
+ with Pool(2) as p:
795
+ run_test(test_s, n=30, workers=p.map) # Unconstrained
796
+ run_test(test_s, n=30, workers=map) # Unconstrained
797
+
798
+ def test_20_constrained_args(self):
799
+ """Test that constraints can be passed to arguments"""
800
+
801
+ def eggholder(x):
802
+ return (-(x[1] + 47.0)
803
+ * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
804
+ - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0))))
805
+ )
806
+
807
+ def f(x): # (cattle-feed)
808
+ return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3]
809
+
810
+ bounds = [(0, 1.0), ] * 4
811
+
812
+ def g1_modified(x, i):
813
+ return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[
814
+ 3] - 5 # >=0
815
+
816
+ def g2(x):
817
+ return (12 * x[0] + 11.9 * x[1] + 41.8 * x[2] + 52.1 * x[3] - 21
818
+ - 1.645 * numpy.sqrt(0.28 * x[0] ** 2 + 0.19 * x[1] ** 2
819
+ + 20.5 * x[2] ** 2 + 0.62 * x[3] ** 2)
820
+ ) # >=0
821
+
822
+ def h1(x):
823
+ return x[0] + x[1] + x[2] + x[3] - 1 # == 0
824
+
825
+ cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)},
826
+ {'type': 'ineq', 'fun': g2},
827
+ {'type': 'eq', 'fun': h1})
828
+
829
+ shgo(f, bounds, n=300, iters=1, constraints=cons)
830
+ # using constrain with arguments AND sampling method sobol
831
+ shgo(f, bounds, n=300, iters=1, constraints=cons,
832
+ sampling_method='sobol')
833
+
834
+ def test_21_1_jac_true(self):
835
+ """Test that shgo can handle objective functions that return the
836
+ gradient alongside the objective value. Fixes gh-13547"""
837
+ # previous
838
+ def func(x):
839
+ return numpy.sum(numpy.power(x, 2)), 2 * x
840
+
841
+ shgo(
842
+ func,
843
+ bounds=[[-1, 1], [1, 2]],
844
+ n=100, iters=5,
845
+ sampling_method="sobol",
846
+ minimizer_kwargs={'method': 'SLSQP', 'jac': True}
847
+ )
848
+
849
+ # new
850
+ def func(x):
851
+ return numpy.sum(x ** 2), 2 * x
852
+
853
+ bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]]
854
+
855
+ res = shgo(func, bounds=bounds, sampling_method="sobol",
856
+ minimizer_kwargs={'method': 'SLSQP', 'jac': True})
857
+ ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds,
858
+ jac=True)
859
+ assert res.success
860
+ assert_allclose(res.fun, ref.fun)
861
+ assert_allclose(res.x, ref.x, atol=1e-15)
862
+
863
+ @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp'])
864
+ def test_21_2_derivative_options(self, derivative):
865
+ """shgo used to raise an error when passing `options` with 'jac'
866
+ # see gh-12963. check that this is resolved
867
+ """
868
+
869
+ def objective(x):
870
+ return 3 * x[0] * x[0] + 2 * x[0] + 5
871
+
872
+ def gradient(x):
873
+ return 6 * x[0] + 2
874
+
875
+ def hess(x):
876
+ return 6
877
+
878
+ def hessp(x, p):
879
+ return 6 * p
880
+
881
+ derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp}
882
+ options = {derivative: derivative_funcs[derivative]}
883
+ minimizer_kwargs = {'method': 'trust-constr'}
884
+
885
+ bounds = [(-100, 100)]
886
+ res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs,
887
+ options=options)
888
+ ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs,
889
+ **options)
890
+
891
+ assert res.success
892
+ numpy.testing.assert_allclose(res.fun, ref.fun)
893
+ numpy.testing.assert_allclose(res.x, ref.x)
894
+
895
+ def test_21_3_hess_options_rosen(self):
896
+ """Ensure the Hessian gets passed correctly to the local minimizer
897
+ routine. Previous report gh-14533.
898
+ """
899
+ bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)]
900
+ options = {'jac': rosen_der, 'hess': rosen_hess}
901
+ minimizer_kwargs = {'method': 'Newton-CG'}
902
+ res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs,
903
+ options=options)
904
+ ref = minimize(rosen, numpy.zeros(5), method='Newton-CG',
905
+ **options)
906
+ assert res.success
907
+ assert_allclose(res.fun, ref.fun)
908
+ assert_allclose(res.x, ref.x, atol=1e-15)
909
+
910
+ def test_21_arg_tuple_sobol(self):
911
+ """shgo used to raise an error when passing `args` with Sobol sampling
912
+ # see gh-12114. check that this is resolved"""
913
+
914
+ def fun(x, k):
915
+ return x[0] ** k
916
+
917
+ constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1})
918
+
919
+ bounds = [(0, 10)]
920
+ res = shgo(fun, bounds, args=(1,), constraints=constraints,
921
+ sampling_method='sobol')
922
+ ref = minimize(fun, numpy.zeros(1), bounds=bounds, args=(1,),
923
+ constraints=constraints)
924
+ assert res.success
925
+ assert_allclose(res.fun, ref.fun)
926
+ assert_allclose(res.x, ref.x)
927
+
928
+
929
+ # Failure test functions
930
+ class TestShgoFailures:
931
+ def test_1_maxiter(self):
932
+ """Test failure on insufficient iterations"""
933
+ options = {'maxiter': 2}
934
+ res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None,
935
+ options=options, sampling_method='sobol')
936
+
937
+ numpy.testing.assert_equal(False, res.success)
938
+ # numpy.testing.assert_equal(4, res.nfev)
939
+ numpy.testing.assert_equal(4, res.tnev)
940
+
941
+ def test_2_sampling(self):
942
+ """Rejection of unknown sampling method"""
943
+ assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds,
944
+ sampling_method='not_Sobol')
945
+
946
+ def test_3_1_no_min_pool_sobol(self):
947
+ """Check that the routine stops when no minimiser is found
948
+ after maximum specified function evaluations"""
949
+ options = {'maxfev': 10,
950
+ # 'maxev': 10,
951
+ 'disp': True}
952
+ res = shgo(test_table.f, test_table.bounds, n=3, options=options,
953
+ sampling_method='sobol')
954
+ numpy.testing.assert_equal(False, res.success)
955
+ # numpy.testing.assert_equal(9, res.nfev)
956
+ numpy.testing.assert_equal(12, res.nfev)
957
+
958
+ def test_3_2_no_min_pool_simplicial(self):
959
+ """Check that the routine stops when no minimiser is found
960
+ after maximum specified sampling evaluations"""
961
+ options = {'maxev': 10,
962
+ 'disp': True}
963
+ res = shgo(test_table.f, test_table.bounds, n=3, options=options,
964
+ sampling_method='simplicial')
965
+ numpy.testing.assert_equal(False, res.success)
966
+
967
+ def test_4_1_bound_err(self):
968
+ """Specified bounds ub > lb"""
969
+ bounds = [(6, 3), (3, 5)]
970
+ assert_raises(ValueError, shgo, test1_1.f, bounds)
971
+
972
+ def test_4_2_bound_err(self):
973
+ """Specified bounds are of the form (lb, ub)"""
974
+ bounds = [(3, 5, 5), (3, 5)]
975
+ assert_raises(ValueError, shgo, test1_1.f, bounds)
976
+
977
+ def test_5_1_1_infeasible_sobol(self):
978
+ """Ensures the algorithm terminates on infeasible problems
979
+ after maxev is exceeded. Use infty constraints option"""
980
+ options = {'maxev': 100,
981
+ 'disp': True}
982
+
983
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
984
+ constraints=test_infeasible.cons, n=100, options=options,
985
+ sampling_method='sobol')
986
+
987
+ numpy.testing.assert_equal(False, res.success)
988
+
989
+ def test_5_1_2_infeasible_sobol(self):
990
+ """Ensures the algorithm terminates on infeasible problems
991
+ after maxev is exceeded. Do not use infty constraints option"""
992
+ options = {'maxev': 100,
993
+ 'disp': True,
994
+ 'infty_constraints': False}
995
+
996
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
997
+ constraints=test_infeasible.cons, n=100, options=options,
998
+ sampling_method='sobol')
999
+
1000
+ numpy.testing.assert_equal(False, res.success)
1001
+
1002
+ def test_5_2_infeasible_simplicial(self):
1003
+ """Ensures the algorithm terminates on infeasible problems
1004
+ after maxev is exceeded."""
1005
+ options = {'maxev': 1000,
1006
+ 'disp': False}
1007
+
1008
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
1009
+ constraints=test_infeasible.cons, n=100, options=options,
1010
+ sampling_method='simplicial')
1011
+
1012
+ numpy.testing.assert_equal(False, res.success)
1013
+
1014
+ def test_6_1_lower_known_f_min(self):
1015
+ """Test Global mode limiting local evaluations with f* too high"""
1016
+ options = { # Specify known function value
1017
+ 'f_min': test2_1.expected_fun + 2.0,
1018
+ 'f_tol': 1e-6,
1019
+ # Specify number of local iterations to perform+
1020
+ 'minimize_every_iter': True,
1021
+ 'local_iter': 1,
1022
+ 'infty_constraints': False}
1023
+ args = (test2_1.f, test2_1.bounds)
1024
+ kwargs = {'constraints': test2_1.cons,
1025
+ 'n': None,
1026
+ 'iters': None,
1027
+ 'options': options,
1028
+ 'sampling_method': 'sobol'
1029
+ }
1030
+ warns(UserWarning, shgo, *args, **kwargs)
1031
+
1032
+ def test(self):
1033
+ from scipy.optimize import rosen, shgo
1034
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1035
+
1036
+ def fun(x):
1037
+ fun.nfev += 1
1038
+ return rosen(x)
1039
+
1040
+ fun.nfev = 0
1041
+
1042
+ result = shgo(fun, bounds)
1043
+ print(result.x, result.fun, fun.nfev) # 50
1044
+
1045
+
1046
+ # Returns
1047
+ class TestShgoReturns:
1048
+ def test_1_nfev_simplicial(self):
1049
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1050
+
1051
+ def fun(x):
1052
+ fun.nfev += 1
1053
+ return rosen(x)
1054
+
1055
+ fun.nfev = 0
1056
+
1057
+ result = shgo(fun, bounds)
1058
+ numpy.testing.assert_equal(fun.nfev, result.nfev)
1059
+
1060
+ def test_1_nfev_sobol(self):
1061
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1062
+
1063
+ def fun(x):
1064
+ fun.nfev += 1
1065
+ return rosen(x)
1066
+
1067
+ fun.nfev = 0
1068
+
1069
+ result = shgo(fun, bounds, sampling_method='sobol')
1070
+ numpy.testing.assert_equal(fun.nfev, result.nfev)
1071
+
1072
+
1073
+ def test_vector_constraint():
1074
+ # gh15514
1075
+ def quad(x):
1076
+ x = np.asarray(x)
1077
+ return [np.sum(x ** 2)]
1078
+
1079
+ nlc = NonlinearConstraint(quad, [2.2], [3])
1080
+ oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
1081
+
1082
+ res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
1083
+ assert np.all(np.sum((res.x)**2) >= 2.2)
1084
+ assert np.all(np.sum((res.x) ** 2) <= 3.0)
1085
+ assert res.success
1086
+
1087
+
1088
+ @pytest.mark.filterwarnings("ignore:delta_grad")
1089
+ def test_trust_constr():
1090
+ def quad(x):
1091
+ x = np.asarray(x)
1092
+ return [np.sum(x ** 2)]
1093
+
1094
+ nlc = NonlinearConstraint(quad, [2.6], [3])
1095
+ minimizer_kwargs = {'method': 'trust-constr'}
1096
+ # note that we don't supply the constraints in minimizer_kwargs,
1097
+ # so if the final result obeys the constraints we know that shgo
1098
+ # passed them on to 'trust-constr'
1099
+ res = shgo(
1100
+ rosen,
1101
+ [(0, 10), (0, 10)],
1102
+ constraints=nlc,
1103
+ sampling_method='sobol',
1104
+ minimizer_kwargs=minimizer_kwargs
1105
+ )
1106
+ assert np.all(np.sum((res.x)**2) >= 2.6)
1107
+ assert np.all(np.sum((res.x) ** 2) <= 3.0)
1108
+ assert res.success
1109
+
1110
+
1111
+ def test_equality_constraints():
1112
+ # gh16260
1113
+ bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1.
1114
+
1115
+ def faulty(x):
1116
+ return x[0] + x[1]
1117
+
1118
+ nlc = NonlinearConstraint(faulty, 3.9, 3.9)
1119
+ res = shgo(rosen, bounds=bounds, constraints=nlc)
1120
+ assert_allclose(np.sum(res.x), 3.9)
1121
+
1122
+ def faulty(x):
1123
+ return x[0] + x[1] - 3.9
1124
+
1125
+ constraints = {'type': 'eq', 'fun': faulty}
1126
+ res = shgo(rosen, bounds=bounds, constraints=constraints)
1127
+ assert_allclose(np.sum(res.x), 3.9)
1128
+
1129
+ bounds = [(0, 1.0)] * 4
1130
+ # sum of variable should equal 1.
1131
+ def faulty(x):
1132
+ return x[0] + x[1] + x[2] + x[3] - 1
1133
+
1134
+ # options = {'minimize_every_iter': True, 'local_iter':10}
1135
+ constraints = {'type': 'eq', 'fun': faulty}
1136
+ res = shgo(
1137
+ lambda x: - np.prod(x),
1138
+ bounds=bounds,
1139
+ constraints=constraints,
1140
+ sampling_method='sobol'
1141
+ )
1142
+ assert_allclose(np.sum(res.x), 1.0)
1143
+
1144
+ def test_gh16971():
1145
+ def cons(x):
1146
+ return np.sum(x**2) - 0
1147
+
1148
+ c = {'fun': cons, 'type': 'ineq'}
1149
+ minimizer_kwargs = {
1150
+ 'method': 'COBYLA',
1151
+ 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05}
1152
+ }
1153
+
1154
+ s = SHGO(
1155
+ rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs
1156
+ )
1157
+
1158
+ assert s.minimizer_kwargs['method'].lower() == 'cobyla'
1159
+ assert s.minimizer_kwargs['options']['catol'] == 0.05
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy as np
4
+ from numpy import exp
5
+ from numpy.testing import assert_, assert_equal
6
+
7
+ from scipy.optimize import root
8
+
9
+
10
+ def test_performance():
11
+ # Compare performance results to those listed in
12
+ # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
13
+ # and
14
+ # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
15
+ # and those produced by dfsane.f from M. Raydan's website.
16
+ #
17
+ # Where the results disagree, the largest limits are taken.
18
+
19
+ e_a = 1e-5
20
+ e_r = 1e-4
21
+
22
+ table_1 = [
23
+ dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
24
+ dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
25
+ dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
26
+ dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
27
+ # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed:
28
+ # too sensitive to rounding errors
29
+ # Results from dfsane.f; papers list nit=3, nfev=3
30
+ dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6),
31
+ # Must have n%3==0, typo in papers?
32
+ dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29),
33
+ # Must have n%3==0, typo in papers?
34
+ dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29),
35
+ # Results from dfsane.f; papers list nit=nfev=6?
36
+ dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18),
37
+ dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
38
+ # Results from dfsane.f; papers list nit=2, nfev=12
39
+ dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5),
40
+ ]
41
+
42
+ # Check also scaling invariance
43
+ for xscale, yscale, line_search in itertools.product(
44
+ [1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng']
45
+ ):
46
+ for problem in table_1:
47
+ n = problem['n']
48
+ def func(x, n):
49
+ return yscale * problem['F'](x / xscale, n)
50
+ args = (n,)
51
+ x0 = problem['x0'](n) * xscale
52
+
53
+ fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
54
+
55
+ sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
56
+ sigma_0 = xscale/yscale
57
+
58
+ with np.errstate(over='ignore'):
59
+ sol = root(func, x0, args=args,
60
+ options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
61
+ sigma_0=sigma_0, sigma_eps=sigma_eps,
62
+ line_search=line_search),
63
+ method='DF-SANE')
64
+
65
+ err_msg = repr(
66
+ [xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
67
+ fatol, sol.success, sol.nit, sol.nfev]
68
+ )
69
+ assert sol.success, err_msg
70
+ # nfev+1: dfsane.f doesn't count first eval
71
+ assert sol.nfev <= problem['nfev'] + 1, err_msg
72
+ assert sol.nit <= problem['nit'], err_msg
73
+ assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg
74
+
75
+
76
+ def test_complex():
77
+ def func(z):
78
+ return z**2 - 1 + 2j
79
+ x0 = 2.0j
80
+
81
+ ftol = 1e-4
82
+ sol = root(func, x0, tol=ftol, method='DF-SANE')
83
+
84
+ assert_(sol.success)
85
+
86
+ f0 = np.linalg.norm(func(x0))
87
+ fx = np.linalg.norm(func(sol.x))
88
+ assert_(fx <= ftol*f0)
89
+
90
+
91
+ def test_linear_definite():
92
+ # The DF-SANE paper proves convergence for "strongly isolated"
93
+ # solutions.
94
+ #
95
+ # For linear systems F(x) = A x - b = 0, with A positive or
96
+ # negative definite, the solution is strongly isolated.
97
+
98
+ def check_solvability(A, b, line_search='cruz'):
99
+ def func(x):
100
+ return A.dot(x) - b
101
+ xp = np.linalg.solve(A, b)
102
+ eps = np.linalg.norm(func(xp)) * 1e3
103
+ sol = root(
104
+ func, b,
105
+ options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
106
+ method='DF-SANE',
107
+ )
108
+ assert_(sol.success)
109
+ assert_(np.linalg.norm(func(sol.x)) <= eps)
110
+
111
+ n = 90
112
+
113
+ # Test linear pos.def. system
114
+ np.random.seed(1234)
115
+ A = np.arange(n*n).reshape(n, n)
116
+ A = A + n*n * np.diag(1 + np.arange(n))
117
+ assert_(np.linalg.eigvals(A).min() > 0)
118
+ b = np.arange(n) * 1.0
119
+ check_solvability(A, b, 'cruz')
120
+ check_solvability(A, b, 'cheng')
121
+
122
+ # Test linear neg.def. system
123
+ check_solvability(-A, b, 'cruz')
124
+ check_solvability(-A, b, 'cheng')
125
+
126
+
127
+ def test_shape():
128
+ def f(x, arg):
129
+ return x - arg
130
+
131
+ for dt in [float, complex]:
132
+ x = np.zeros([2,2])
133
+ arg = np.ones([2,2], dtype=dt)
134
+
135
+ sol = root(f, x, args=(arg,), method='DF-SANE')
136
+ assert_(sol.success)
137
+ assert_equal(sol.x.shape, x.shape)
138
+
139
+
140
+ # Some of the test functions and initial guesses listed in
141
+ # [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
142
+
143
+ def F_1(x, n):
144
+ g = np.zeros([n])
145
+ i = np.arange(2, n+1)
146
+ g[0] = exp(x[0] - 1) - 1
147
+ g[1:] = i*(exp(x[1:] - 1) - x[1:])
148
+ return g
149
+
150
+ def x0_1(n):
151
+ x0 = np.empty([n])
152
+ x0.fill(n/(n-1))
153
+ return x0
154
+
155
+ def F_2(x, n):
156
+ g = np.zeros([n])
157
+ i = np.arange(2, n+1)
158
+ g[0] = exp(x[0]) - 1
159
+ g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
160
+ return g
161
+
162
+ def x0_2(n):
163
+ x0 = np.empty([n])
164
+ x0.fill(1/n**2)
165
+ return x0
166
+
167
+
168
+ def F_4(x, n): # skip name check
169
+ assert_equal(n % 3, 0)
170
+ g = np.zeros([n])
171
+ # Note: the first line is typoed in some of the references;
172
+ # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
173
+ g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
174
+ g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3]
175
+ - x[2::3] + 0.2 * x[2::3]**3 + 2.16)
176
+ g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
177
+ return g
178
+
179
+
180
+ def x0_4(n): # skip name check
181
+ assert_equal(n % 3, 0)
182
+ x0 = np.array([-1, 1/2, -1] * (n//3))
183
+ return x0
184
+
185
+ def F_6(x, n):
186
+ c = 0.9
187
+ mu = (np.arange(1, n+1) - 0.5)/n
188
+ return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
189
+
190
+ def x0_6(n):
191
+ return np.ones([n])
192
+
193
+ def F_7(x, n):
194
+ assert_equal(n % 3, 0)
195
+
196
+ def phi(t):
197
+ v = 0.5*t - 2
198
+ v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
199
+ v[t >= 2] = (0.5*t + 2)[t >= 2]
200
+ return v
201
+ g = np.zeros([n])
202
+ g[::3] = 1e4 * x[1::3]**2 - 1
203
+ g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
204
+ g[2::3] = phi(x[2::3])
205
+ return g
206
+
207
+ def x0_7(n):
208
+ assert_equal(n % 3, 0)
209
+ return np.array([1e-3, 18, 1] * (n//3))
210
+
211
+ def F_9(x, n):
212
+ g = np.zeros([n])
213
+ i = np.arange(2, n)
214
+ g[0] = x[0]**3/3 + x[1]**2/2
215
+ g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
216
+ g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
217
+ return g
218
+
219
+ def x0_9(n):
220
+ return np.ones([n])
221
+
222
+ def F_10(x, n):
223
+ return np.log(1 + x) - x/n
224
+
225
+ def x0_10(n):
226
+ return np.ones([n])
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_less, assert_allclose, assert_equal
5
+
6
+ from scipy.optimize._bracket import _bracket_root, _bracket_minimum, _ELIMITS
7
+ import scipy._lib._elementwise_iterative_method as eim
8
+ from scipy import stats
9
+
10
+ class TestBracketRoot:
11
+ @pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752))
12
+ @pytest.mark.parametrize("use_xmin", (False, True))
13
+ @pytest.mark.parametrize("other_side", (False, True))
14
+ @pytest.mark.parametrize("fix_one_side", (False, True))
15
+ def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side):
16
+ # Property-based test to confirm that _bracket_root is behaving as
17
+ # expected. The basic case is when root < a < b.
18
+ # The number of times bracket expands (per side) can be found by
19
+ # setting the expression for the left endpoint of the bracket to the
20
+ # root of f (x=0), solving for i, and rounding up. The corresponding
21
+ # lower and upper ends of the bracket are found by plugging this back
22
+ # into the expression for the ends of the bracket.
23
+ # `other_side=True` is the case that a < b < root
24
+ # Special cases like a < root < b are tested separately
25
+
26
+ rng = np.random.default_rng(seed)
27
+ xl0, d, factor = rng.random(size=3) * [1e5, 10, 5]
28
+ factor = 1 + factor # factor must be greater than 1
29
+ xr0 = xl0 + d # xr0 must be greater than a in basic case
30
+
31
+ def f(x):
32
+ f.count += 1
33
+ return x # root is 0
34
+
35
+ if use_xmin:
36
+ xmin = -rng.random()
37
+ n = np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor))
38
+ l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1)
39
+ kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin)
40
+ else:
41
+ n = np.ceil(np.log(xr0/d) / np.log(factor))
42
+ l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1)
43
+ kwargs = dict(xl0=xl0, xr0=xr0, factor=factor)
44
+
45
+ if other_side:
46
+ kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0']
47
+ l, u = -u, -l
48
+ if 'xmin' in kwargs:
49
+ kwargs['xmax'] = -kwargs.pop('xmin')
50
+
51
+ if fix_one_side:
52
+ if other_side:
53
+ kwargs['xmin'] = -xr0
54
+ else:
55
+ kwargs['xmax'] = xr0
56
+
57
+ f.count = 0
58
+ res = _bracket_root(f, **kwargs)
59
+
60
+ # Compare reported number of function evaluations `nfev` against
61
+ # reported `nit`, actual function call count `f.count`, and theoretical
62
+ # number of expansions `n`.
63
+ # When both sides are free, these get multiplied by 2 because function
64
+ # is evaluated on the left and the right each iteration.
65
+ # When one side is fixed, however, we add one: on the right side, the
66
+ # function gets evaluated once at b.
67
+ # Add 1 to `n` and `res.nit` because function evaluations occur at
68
+ # iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because
69
+ # function is called separately for left and right in iteration 0.
70
+ if not fix_one_side:
71
+ assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1)
72
+ else:
73
+ assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1
74
+
75
+ # Compare reported bracket to theoretical bracket and reported function
76
+ # values to function evaluated at bracket.
77
+ bracket = np.asarray([res.xl, res.xr])
78
+ assert_allclose(bracket, (l, u))
79
+ f_bracket = np.asarray([res.fl, res.fr])
80
+ assert_allclose(f_bracket, f(bracket))
81
+
82
+ # Check that bracket is valid and that status and success are correct
83
+ assert res.xr > res.xl
84
+ signs = np.sign(f_bracket)
85
+ assert signs[0] == -signs[1]
86
+ assert res.status == 0
87
+ assert res.success
88
+
89
+ def f(self, q, p):
90
+ return stats.norm.cdf(q) - p
91
+
92
+ @pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)])
93
+ @pytest.mark.parametrize('xmin', [-5, None])
94
+ @pytest.mark.parametrize('xmax', [5, None])
95
+ @pytest.mark.parametrize('factor', [1.2, 2])
96
+ def test_basic(self, p, xmin, xmax, factor):
97
+ # Test basic functionality to bracket root (distribution PPF)
98
+ res = _bracket_root(self.f, -0.01, 0.01, xmin=xmin, xmax=xmax,
99
+ factor=factor, args=(p,))
100
+ assert_equal(-np.sign(res.fl), np.sign(res.fr))
101
+
102
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
103
+ def test_vectorization(self, shape):
104
+ # Test for correct functionality, output shapes, and dtypes for various
105
+ # input shapes.
106
+ p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
107
+ args = (p,)
108
+ maxiter = 10
109
+
110
+ @np.vectorize
111
+ def bracket_root_single(xl0, xr0, xmin, xmax, factor, p):
112
+ return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax,
113
+ factor=factor, args=(p,),
114
+ maxiter=maxiter)
115
+
116
+ def f(*args, **kwargs):
117
+ f.f_evals += 1
118
+ return self.f(*args, **kwargs)
119
+ f.f_evals = 0
120
+
121
+ rng = np.random.default_rng(2348234)
122
+ xl0 = -rng.random(size=shape)
123
+ xr0 = rng.random(size=shape)
124
+ xmin, xmax = 1e3*xl0, 1e3*xr0
125
+ if shape: # make some elements un
126
+ i = rng.random(size=shape) > 0.5
127
+ xmin[i], xmax[i] = -np.inf, np.inf
128
+ factor = rng.random(size=shape) + 1.5
129
+ res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor,
130
+ args=args, maxiter=maxiter)
131
+ refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel()
132
+
133
+ attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit']
134
+ for attr in attrs:
135
+ ref_attr = [getattr(ref, attr) for ref in refs]
136
+ res_attr = getattr(res, attr)
137
+ assert_allclose(res_attr.ravel(), ref_attr)
138
+ assert_equal(res_attr.shape, shape)
139
+
140
+ assert np.issubdtype(res.success.dtype, np.bool_)
141
+ if shape:
142
+ assert np.all(res.success[1:-1])
143
+ assert np.issubdtype(res.status.dtype, np.integer)
144
+ assert np.issubdtype(res.nfev.dtype, np.integer)
145
+ assert np.issubdtype(res.nit.dtype, np.integer)
146
+ assert_equal(np.max(res.nit), f.f_evals - 2)
147
+ assert_array_less(res.xl, res.xr)
148
+ assert_allclose(res.fl, self.f(res.xl, *args))
149
+ assert_allclose(res.fr, self.f(res.xr, *args))
150
+
151
+ def test_flags(self):
152
+ # Test cases that should produce different status flags; show that all
153
+ # can be produced simultaneously.
154
+ def f(xs, js):
155
+ funcs = [lambda x: x - 1.5,
156
+ lambda x: x - 1000,
157
+ lambda x: x - 1000,
158
+ lambda x: np.nan]
159
+
160
+ return [funcs[j](x) for x, j in zip(xs, js)]
161
+
162
+ args = (np.arange(4, dtype=np.int64),)
163
+ res = _bracket_root(f, xl0=[-1, -1, -1, -1], xr0=[1, 1, 1, 1],
164
+ xmin=[-np.inf, -1, -np.inf, -np.inf],
165
+ xmax=[np.inf, 1, np.inf, np.inf],
166
+ args=args, maxiter=3)
167
+
168
+ ref_flags = np.array([eim._ECONVERGED,
169
+ _ELIMITS,
170
+ eim._ECONVERR,
171
+ eim._EVALUEERR])
172
+ assert_equal(res.status, ref_flags)
173
+
174
+ @pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
175
+ @pytest.mark.parametrize('xmin', [-5, None])
176
+ @pytest.mark.parametrize('xmax', [5, None])
177
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
178
+ def test_dtype(self, root, xmin, xmax, dtype):
179
+ # Test that dtypes are preserved
180
+
181
+ xmin = xmin if xmin is None else dtype(xmin)
182
+ xmax = xmax if xmax is None else dtype(xmax)
183
+ root = dtype(root)
184
+ def f(x, root):
185
+ return ((x - root) ** 3).astype(dtype)
186
+
187
+ bracket = np.asarray([-0.01, 0.01], dtype=dtype)
188
+ res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,))
189
+ assert np.all(res.success)
190
+ assert res.xl.dtype == res.xr.dtype == dtype
191
+ assert res.fl.dtype == res.fr.dtype == dtype
192
+
193
+ def test_input_validation(self):
194
+ # Test input validation for appropriate error messages
195
+
196
+ message = '`func` must be callable.'
197
+ with pytest.raises(ValueError, match=message):
198
+ _bracket_root(None, -4, 4)
199
+
200
+ message = '...must be numeric and real.'
201
+ with pytest.raises(ValueError, match=message):
202
+ _bracket_root(lambda x: x, -4+1j, 4)
203
+ with pytest.raises(ValueError, match=message):
204
+ _bracket_root(lambda x: x, -4, 'hello')
205
+ with pytest.raises(ValueError, match=message):
206
+ _bracket_root(lambda x: x, -4, 4, xmin=np)
207
+ with pytest.raises(ValueError, match=message):
208
+ _bracket_root(lambda x: x, -4, 4, xmax=object())
209
+ with pytest.raises(ValueError, match=message):
210
+ _bracket_root(lambda x: x, -4, 4, factor=sum)
211
+
212
+ message = "All elements of `factor` must be greater than 1."
213
+ with pytest.raises(ValueError, match=message):
214
+ _bracket_root(lambda x: x, -4, 4, factor=0.5)
215
+
216
+ message = '`xmin <= xl0 < xr0 <= xmax` must be True'
217
+ with pytest.raises(ValueError, match=message):
218
+ _bracket_root(lambda x: x, 4, -4)
219
+ with pytest.raises(ValueError, match=message):
220
+ _bracket_root(lambda x: x, -4, 4, xmax=np.nan)
221
+ with pytest.raises(ValueError, match=message):
222
+ _bracket_root(lambda x: x, -4, 4, xmin=10)
223
+
224
+ message = "shape mismatch: objects cannot be broadcast"
225
+ # raised by `np.broadcast, but the traceback is readable IMO
226
+ with pytest.raises(ValueError, match=message):
227
+ _bracket_root(lambda x: x, [-2, -3], [3, 4, 5])
228
+ # Consider making this give a more readable error message
229
+ # with pytest.raises(ValueError, match=message):
230
+ # _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
231
+
232
+ message = '`maxiter` must be a non-negative integer.'
233
+ with pytest.raises(ValueError, match=message):
234
+ _bracket_root(lambda x: x, -4, 4, maxiter=1.5)
235
+ with pytest.raises(ValueError, match=message):
236
+ _bracket_root(lambda x: x, -4, 4, maxiter=-1)
237
+
238
+ def test_special_cases(self):
239
+ # Test edge cases and other special cases
240
+
241
+ # Test that integers are not passed to `f`
242
+ # (otherwise this would overflow)
243
+ def f(x):
244
+ assert np.issubdtype(x.dtype, np.floating)
245
+ return x ** 99 - 1
246
+
247
+ res = _bracket_root(f, -7, 5)
248
+ assert res.success
249
+
250
+ # Test maxiter = 0. Should do nothing to bracket.
251
+ def f(x):
252
+ return x - 10
253
+
254
+ bracket = (-3, 5)
255
+ res = _bracket_root(f, *bracket, maxiter=0)
256
+ assert res.xl, res.xr == bracket
257
+ assert res.nit == 0
258
+ assert res.nfev == 2
259
+ assert res.status == -2
260
+
261
+ # Test scalar `args` (not in tuple)
262
+ def f(x, c):
263
+ return c*x - 1
264
+
265
+ res = _bracket_root(f, -1, 1, args=3)
266
+ assert res.success
267
+ assert_allclose(res.fl, f(res.xl, 3))
268
+
269
+ # Test other edge cases
270
+
271
+ def f(x):
272
+ f.count += 1
273
+ return x
274
+
275
+ # 1. root lies within guess of bracket
276
+ f.count = 0
277
+ _bracket_root(f, -10, 20)
278
+ assert_equal(f.count, 2)
279
+
280
+ # 2. bracket endpoint hits root exactly
281
+ f.count = 0
282
+ res = _bracket_root(f, 5, 10, factor=2)
283
+ bracket = (res.xl, res.xr)
284
+ assert_equal(res.nfev, 4)
285
+ assert_allclose(bracket, (0, 5), atol=1e-15)
286
+
287
+ # 3. bracket limit hits root exactly
288
+ with np.errstate(over='ignore'):
289
+ res = _bracket_root(f, 5, 10, xmin=0)
290
+ bracket = (res.xl, res.xr)
291
+ assert_allclose(bracket[0], 0, atol=1e-15)
292
+ with np.errstate(over='ignore'):
293
+ res = _bracket_root(f, -10, -5, xmax=0)
294
+ bracket = (res.xl, res.xr)
295
+ assert_allclose(bracket[1], 0, atol=1e-15)
296
+
297
+ # 4. bracket not within min, max
298
+ with np.errstate(over='ignore'):
299
+ res = _bracket_root(f, 5, 10, xmin=1)
300
+ assert not res.success
301
+
302
+
303
+ class TestBracketMinimum:
304
+ def init_f(self):
305
+ def f(x, a, b):
306
+ f.count += 1
307
+ return (x - a)**2 + b
308
+ f.count = 0
309
+ return f
310
+
311
+ def assert_valid_bracket(self, result):
312
+ assert np.all(
313
+ (result.xl < result.xm) & (result.xm < result.xr)
314
+ )
315
+ assert np.all(
316
+ (result.fl >= result.fm) & (result.fr > result.fm)
317
+ | (result.fl > result.fm) & (result.fr > result.fm)
318
+ )
319
+
320
+ def get_kwargs(
321
+ self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=()
322
+ ):
323
+ names = ("xl0", "xr0", "xmin", "xmax", "factor", "args")
324
+ return {
325
+ name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args))
326
+ if isinstance(val, np.ndarray) or np.isscalar(val)
327
+ or val not in [None, ()]
328
+ }
329
+
330
+ @pytest.mark.parametrize(
331
+ "seed",
332
+ (
333
+ 307448016549685229886351382450158984917,
334
+ 11650702770735516532954347931959000479,
335
+ 113767103358505514764278732330028568336,
336
+ )
337
+ )
338
+ @pytest.mark.parametrize("use_xmin", (False, True))
339
+ @pytest.mark.parametrize("other_side", (False, True))
340
+ def test_nfev_expected(self, seed, use_xmin, other_side):
341
+ rng = np.random.default_rng(seed)
342
+ args = (0, 0) # f(x) = x^2 with minimum at 0
343
+ # xl0, xm0, xr0 are chosen such that the initial bracket is to
344
+ # the right of the minimum, and the bracket will expand
345
+ # downhill towards zero.
346
+ xl0, d1, d2, factor = rng.random(size=4) * [1e5, 10, 10, 5]
347
+ xm0 = xl0 + d1
348
+ xr0 = xm0 + d2
349
+ # Factor should be greater than one.
350
+ factor += 1
351
+
352
+ if use_xmin:
353
+ xmin = -rng.random() * 5
354
+ n = int(np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor)))
355
+ lower = xmin + (xl0 - xmin)*factor**-n
356
+ middle = xmin + (xl0 - xmin)*factor**-(n-1)
357
+ upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0
358
+ # It may be the case the lower is below the minimum, but we still
359
+ # don't have a valid bracket.
360
+ if middle**2 > lower**2:
361
+ n += 1
362
+ lower, middle, upper = (
363
+ xmin + (xl0 - xmin)*factor**-n, lower, middle
364
+ )
365
+ else:
366
+ xmin = None
367
+ n = int(np.ceil(np.log(xl0 / d1) / np.log(factor)))
368
+ lower = xl0 - d1*factor**n
369
+ middle = xl0 - d1*factor**(n-1) if n > 1 else xl0
370
+ upper = xl0 - d1*factor**(n-2) if n > 1 else xm0
371
+ # It may be the case the lower is below the minimum, but we still
372
+ # don't have a valid bracket.
373
+ if middle**2 > lower**2:
374
+ n += 1
375
+ lower, middle, upper = (
376
+ xl0 - d1*factor**n, lower, middle
377
+ )
378
+ f = self.init_f()
379
+
380
+ xmax = None
381
+ if other_side:
382
+ xl0, xm0, xr0 = -xr0, -xm0, -xl0
383
+ xmin, xmax = None, -xmin if xmin is not None else None
384
+ lower, middle, upper = -upper, -middle, -lower
385
+
386
+ kwargs = self.get_kwargs(
387
+ xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args
388
+ )
389
+ result = _bracket_minimum(f, xm0, **kwargs)
390
+
391
+ # Check that `nfev` and `nit` have the correct relationship
392
+ assert result.nfev == result.nit + 3
393
+ # Check that `nfev` reports the correct number of function evaluations.
394
+ assert result.nfev == f.count
395
+ # Check that the number of iterations matches the theoretical value.
396
+ assert result.nit == n
397
+
398
+ # Compare reported bracket to theoretical bracket and reported function
399
+ # values to function evaluated at bracket.
400
+ bracket = np.asarray([result.xl, result.xm, result.xr])
401
+ assert_allclose(bracket, (lower, middle, upper))
402
+ f_bracket = np.asarray([result.fl, result.fm, result.fr])
403
+ assert_allclose(f_bracket, f(bracket, *args))
404
+
405
+ self.assert_valid_bracket(result)
406
+ assert result.status == 0
407
+ assert result.success
408
+
409
+ def test_flags(self):
410
+ # Test cases that should produce different status flags; show that all
411
+ # can be produced simultaneously
412
+ def f(xs, js):
413
+ funcs = [lambda x: (x - 1.5)**2,
414
+ lambda x: x,
415
+ lambda x: x,
416
+ lambda x: np.nan]
417
+ return [funcs[j](x) for x, j in zip(xs, js)]
418
+
419
+ args = (np.arange(4, dtype=np.int64),)
420
+ xl0, xm0, xr0 = np.full(4, -1.0), np.full(4, 0.0), np.full(4, 1.0)
421
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0,
422
+ xmin=[-np.inf, -1.0, -np.inf, -np.inf],
423
+ args=args, maxiter=3)
424
+
425
+ reference_flags = np.array([eim._ECONVERGED, _ELIMITS,
426
+ eim._ECONVERR, eim._EVALUEERR])
427
+ assert_equal(result.status, reference_flags)
428
+
429
+ @pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623]))
430
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
431
+ @pytest.mark.parametrize("xmin", [-5, None])
432
+ @pytest.mark.parametrize("xmax", [5, None])
433
+ def test_dtypes(self, minimum, xmin, xmax, dtype):
434
+ xmin = xmin if xmin is None else dtype(xmin)
435
+ xmax = xmax if xmax is None else dtype(xmax)
436
+ minimum = dtype(minimum)
437
+
438
+ def f(x, minimum):
439
+ return ((x - minimum)**2).astype(dtype)
440
+
441
+ xl0, xm0, xr0 = np.array([-0.01, 0.0, 0.01], dtype=dtype)
442
+ result = _bracket_minimum(
443
+ f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(minimum, )
444
+ )
445
+ assert np.all(result.success)
446
+ assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype
447
+ assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype
448
+
449
+ def test_input_validation(self):
450
+ # Test input validation for appropriate error messages
451
+
452
+ message = '`func` must be callable.'
453
+ with pytest.raises(ValueError, match=message):
454
+ _bracket_minimum(None, -4, xl0=4)
455
+
456
+ message = '...must be numeric and real.'
457
+ with pytest.raises(ValueError, match=message):
458
+ _bracket_minimum(lambda x: x**2, 4+1j)
459
+ with pytest.raises(ValueError, match=message):
460
+ _bracket_minimum(lambda x: x**2, -4, xl0='hello')
461
+ with pytest.raises(ValueError, match=message):
462
+ _bracket_minimum(lambda x: x**2, -4, xmin=np)
463
+ with pytest.raises(ValueError, match=message):
464
+ _bracket_minimum(lambda x: x**2, -4, xmax=object())
465
+ with pytest.raises(ValueError, match=message):
466
+ _bracket_minimum(lambda x: x**2, -4, factor=sum)
467
+
468
+ message = "All elements of `factor` must be greater than 1."
469
+ with pytest.raises(ValueError, match=message):
470
+ _bracket_minimum(lambda x: x, -4, factor=0.5)
471
+
472
+ message = '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True'
473
+ with pytest.raises(ValueError, match=message):
474
+ _bracket_minimum(lambda x: x**2, 4, xl0=6)
475
+ with pytest.raises(ValueError, match=message):
476
+ _bracket_minimum(lambda x: x**2, -4, xr0=-6)
477
+ with pytest.raises(ValueError, match=message):
478
+ _bracket_minimum(lambda x: x**2, -4, xl0=-3, xr0=-2)
479
+ with pytest.raises(ValueError, match=message):
480
+ _bracket_minimum(lambda x: x**2, -4, xl0=-6, xr0=-5)
481
+ with pytest.raises(ValueError, match=message):
482
+ _bracket_minimum(lambda x: x**2, -4, xl0=-np.nan)
483
+ with pytest.raises(ValueError, match=message):
484
+ _bracket_minimum(lambda x: x**2, -4, xr0=np.nan)
485
+
486
+ message = "shape mismatch: objects cannot be broadcast"
487
+ # raised by `np.broadcast, but the traceback is readable IMO
488
+ with pytest.raises(ValueError, match=message):
489
+ _bracket_minimum(lambda x: x**2, [-2, -3], xl0=[-3, -4, -5])
490
+
491
+ message = '`maxiter` must be a non-negative integer.'
492
+ with pytest.raises(ValueError, match=message):
493
+ _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=1.5)
494
+ with pytest.raises(ValueError, match=message):
495
+ _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=-1)
496
+
497
+ @pytest.mark.parametrize("xl0", [0.0, None])
498
+ @pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15))
499
+ @pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None))
500
+ # Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum
501
+ # is within, or at varying disances to the left or right of the initial
502
+ # bracket.
503
+ @pytest.mark.parametrize(
504
+ "args",
505
+ (
506
+ (1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0),
507
+ (121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0)
508
+ )
509
+ )
510
+ def test_scalar_no_limits(self, xl0, xm0, xr0, args):
511
+ f = self.init_f()
512
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=args)
513
+ result = _bracket_minimum(f, xm0, **kwargs)
514
+ self.assert_valid_bracket(result)
515
+ assert result.status == 0
516
+ assert result.success
517
+ assert result.nfev == f.count
518
+
519
+ @pytest.mark.parametrize(
520
+ # xmin is set at 0.0 in all cases.
521
+ "xl0,xm0,xr0,xmin",
522
+ (
523
+ # Initial bracket at varying distances from the xmin.
524
+ (0.5, 0.75, 1.0, 0.0),
525
+ (1.0, 2.5, 4.0, 0.0),
526
+ (2.0, 4.0, 6.0, 0.0),
527
+ (12.0, 16.0, 20.0, 0.0),
528
+ # Test default initial left endpoint selection. It should not
529
+ # be below xmin.
530
+ (None, 0.75, 1.0, 0.0),
531
+ (None, 2.5, 4.0, 0.0),
532
+ (None, 4.0, 6.0, 0.0),
533
+ (None, 16.0, 20.0, 0.0),
534
+ )
535
+ )
536
+ @pytest.mark.parametrize(
537
+ "args", (
538
+ (0.0, 0.0), # Minimum is directly at xmin.
539
+ (1e-300, 0.0), # Minimum is extremely close to xmin.
540
+ (1e-20, 0.0), # Minimum is very close to xmin.
541
+ # Minimum at varying distances from xmin.
542
+ (0.1, 0.0),
543
+ (0.2, 0.0),
544
+ (0.4, 0.0)
545
+ )
546
+ )
547
+ def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args):
548
+ f = self.init_f()
549
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, args=args)
550
+ result = _bracket_minimum(f, xm0, **kwargs)
551
+ self.assert_valid_bracket(result)
552
+ assert result.status == 0
553
+ assert result.success
554
+ assert result.nfev == f.count
555
+
556
+ @pytest.mark.parametrize(
557
+ #xmax is set to 1.0 in all cases.
558
+ "xl0,xm0,xr0,xmax",
559
+ (
560
+ # Bracket at varying distances from xmax.
561
+ (0.2, 0.3, 0.4, 1.0),
562
+ (0.05, 0.075, 0.1, 1.0),
563
+ (-0.2, -0.1, 0.0, 1.0),
564
+ (-21.2, -17.7, -14.2, 1.0),
565
+ # Test default right endpoint selection. It should not exceed xmax.
566
+ (0.2, 0.3, None, 1.0),
567
+ (0.05, 0.075, None, 1.0),
568
+ (-0.2, -0.1, None, 1.0),
569
+ (-21.2, -17.7, None, 1.0),
570
+ )
571
+ )
572
+ @pytest.mark.parametrize(
573
+ "args", (
574
+ (0.9999999999999999, 0.0), # Minimum very close to xmax.
575
+ # Minimum at varying distances from xmax.
576
+ (0.9, 0.0),
577
+ (0.7, 0.0),
578
+ (0.5, 0.0)
579
+ )
580
+ )
581
+ def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args):
582
+ f = self.init_f()
583
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args)
584
+ result = _bracket_minimum(f, xm0, **kwargs)
585
+ self.assert_valid_bracket(result)
586
+ assert result.status == 0
587
+ assert result.success
588
+ assert result.nfev == f.count
589
+
590
+ @pytest.mark.parametrize(
591
+ "xl0,xm0,xr0,xmin,xmax,args",
592
+ (
593
+ ( # Case 1:
594
+ # Initial bracket.
595
+ 0.2,
596
+ 0.3,
597
+ 0.4,
598
+ # Function slopes down to the right from the bracket to a minimum
599
+ # at 1.0. xmax is also at 1.0
600
+ None,
601
+ 1.0,
602
+ (1.0, 0.0)
603
+ ),
604
+ ( # Case 2:
605
+ # Initial bracket.
606
+ 1.4,
607
+ 1.95,
608
+ 2.5,
609
+ # Function slopes down to the left from the bracket to a minimum at
610
+ # 0.3 with xmin set to 0.3.
611
+ 0.3,
612
+ None,
613
+ (0.3, 0.0)
614
+ ),
615
+ (
616
+ # Case 3:
617
+ # Initial bracket.
618
+ 2.6,
619
+ 3.25,
620
+ 3.9,
621
+ # Function slopes down and to the right to a minimum at 99.4 with xmax
622
+ # at 99.4. Tests case where minimum is at xmax relatively further from
623
+ # the bracket.
624
+ None,
625
+ 99.4,
626
+ (99.4, 0)
627
+ ),
628
+ (
629
+ # Case 4:
630
+ # Initial bracket.
631
+ 4,
632
+ 4.5,
633
+ 5,
634
+ # Function slopes down and to the left away from the bracket with a
635
+ # minimum at -26.3 with xmin set to -26.3. Tests case where minimum is
636
+ # at xmin relatively far from the bracket.
637
+ -26.3,
638
+ None,
639
+ (-26.3, 0)
640
+ ),
641
+ (
642
+ # Case 5:
643
+ # Similar to Case 1 above, but tests default values of xl0 and xr0.
644
+ None,
645
+ 0.3,
646
+ None,
647
+ None,
648
+ 1.0,
649
+ (1.0, 0.0)
650
+ ),
651
+ ( # Case 6:
652
+ # Similar to Case 2 above, but tests default values of xl0 and xr0.
653
+ None,
654
+ 1.95,
655
+ None,
656
+ 0.3,
657
+ None,
658
+ (0.3, 0.0)
659
+ ),
660
+ (
661
+ # Case 7:
662
+ # Similar to Case 3 above, but tests default values of xl0 and xr0.
663
+ None,
664
+ 3.25,
665
+ None,
666
+ None,
667
+ 99.4,
668
+ (99.4, 0)
669
+ ),
670
+ (
671
+ # Case 8:
672
+ # Similar to Case 4 above, but tests default values of xl0 and xr0.
673
+ None,
674
+ 4.5,
675
+ None,
676
+ -26.3,
677
+ None,
678
+ (-26.3, 0)
679
+ ),
680
+ )
681
+ )
682
+ def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args):
683
+ f = self.init_f()
684
+ kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, args=args)
685
+ result = _bracket_minimum(f, xm0, **kwargs)
686
+ assert result.status == -1
687
+ assert args[0] in (result.xl, result.xr)
688
+ assert result.nfev == f.count
689
+
690
+ @pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)])
691
+ def test_vectorization(self, shape):
692
+ # Test for correct functionality, output shapes, and dtypes for
693
+ # various input shapes.
694
+ a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
695
+ args = (a, 0.0)
696
+ maxiter = 10
697
+
698
+ @np.vectorize
699
+ def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a):
700
+ return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin,
701
+ xmax=xmax, factor=factor, maxiter=maxiter,
702
+ args=(a, 0.0))
703
+
704
+ f = self.init_f()
705
+
706
+ rng = np.random.default_rng(2348234)
707
+ xl0 = -rng.random(size=shape)
708
+ xr0 = rng.random(size=shape)
709
+ xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0)
710
+ xmin, xmax = 1e3*xl0, 1e3*xr0
711
+ if shape: # make some elements un
712
+ i = rng.random(size=shape) > 0.5
713
+ xmin[i], xmax[i] = -np.inf, np.inf
714
+ factor = rng.random(size=shape) + 1.5
715
+ res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax,
716
+ factor=factor, args=args, maxiter=maxiter)
717
+ refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel()
718
+
719
+ attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit']
720
+ for attr in attrs:
721
+ ref_attr = [getattr(ref, attr) for ref in refs]
722
+ res_attr = getattr(res, attr)
723
+ assert_allclose(res_attr.ravel(), ref_attr)
724
+ assert_equal(res_attr.shape, shape)
725
+
726
+ assert np.issubdtype(res.success.dtype, np.bool_)
727
+ if shape:
728
+ assert np.all(res.success[1:-1])
729
+ assert np.issubdtype(res.status.dtype, np.integer)
730
+ assert np.issubdtype(res.nfev.dtype, np.integer)
731
+ assert np.issubdtype(res.nit.dtype, np.integer)
732
+ assert_equal(np.max(res.nit), f.count - 3)
733
+ self.assert_valid_bracket(res)
734
+ assert_allclose(res.fl, f(res.xl, *args))
735
+ assert_allclose(res.fm, f(res.xm, *args))
736
+ assert_allclose(res.fr, f(res.xr, *args))
737
+
738
+ def test_special_cases(self):
739
+ # Test edge cases and other special cases.
740
+
741
+ # Test that integers are not passed to `f`
742
+ # (otherwise this would overflow)
743
+ def f(x):
744
+ assert np.issubdtype(x.dtype, np.floating)
745
+ return x ** 98 - 1
746
+
747
+ result = _bracket_minimum(f, -7, xr0=5)
748
+ assert result.success
749
+
750
+ # Test maxiter = 0. Should do nothing to bracket.
751
+ def f(x):
752
+ return x**2 - 10
753
+
754
+ xl0, xm0, xr0 = -3, -1, 2
755
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0)
756
+ assert_equal([result.xl, result.xm, result.xr], [xl0, xm0, xr0])
757
+
758
+ # Test scalar `args` (not in tuple)
759
+ def f(x, c):
760
+ return c*x**2 - 1
761
+
762
+ result = _bracket_minimum(f, -1, args=3)
763
+ assert result.success
764
+ assert_allclose(result.fl, f(result.xl, 3))
765
+
766
+ # Initial bracket is valid.
767
+ f = self.init_f()
768
+ xl0, xm0, xr0 = [-1.0, -0.2, 1.0]
769
+ args = (0, 0)
770
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args)
771
+ assert f.count == 3
772
+
773
+ assert_equal(
774
+ [result.xl, result.xm, result.xr],
775
+ [xl0, xm0, xr0],
776
+ )
777
+ assert_equal(
778
+ [result.fl, result.fm, result.fr],
779
+ [f(xl0, *args), f(xm0, *args), f(xr0, *args)],
780
+ )
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py ADDED
@@ -0,0 +1,827 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose, assert_equal, assert_array_less
4
+
5
+ from scipy import stats
6
+ import scipy._lib._elementwise_iterative_method as eim
7
+
8
+ from scipy.optimize._chandrupatla import (_chandrupatla_minimize,
9
+ _chandrupatla as _chandrupatla_root)
10
+ from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS
11
+
12
+ from itertools import permutations
13
+ from .test_zeros import TestScalarRootFinders
14
+
15
+ def f1(x):
16
+ return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2.
17
+
18
+
19
+ def f2(x):
20
+ return 5 + (x - 2.)**6
21
+
22
+
23
+ def f3(x):
24
+ return np.exp(x) - 5*x
25
+
26
+
27
+ def f4(x):
28
+ return x**5. - 5*x**3. - 20.*x + 5.
29
+
30
+
31
+ def f5(x):
32
+ return 8*x**3 - 2*x**2 - 7*x + 3
33
+
34
+
35
+ def _bracket_minimum(func, x1, x2):
36
+ phi = 1.61803398875
37
+ maxiter = 100
38
+ f1 = func(x1)
39
+ f2 = func(x2)
40
+ step = x2 - x1
41
+ x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1
42
+ else (x1, x2, f1, f2, step))
43
+
44
+ for i in range(maxiter):
45
+ step *= phi
46
+ x3 = x2 + step
47
+ f3 = func(x3)
48
+ if f3 < f2:
49
+ x1, x2, f1, f2 = x2, x3, f2, f3
50
+ else:
51
+ break
52
+ return x1, x2, x3, f1, f2, f3
53
+
54
+
55
+ cases = [
56
+ (f1, -1, 11),
57
+ (f1, -2, 13),
58
+ (f1, -4, 13),
59
+ (f1, -8, 15),
60
+ (f1, -16, 16),
61
+ (f1, -32, 19),
62
+ (f1, -64, 20),
63
+ (f1, -128, 21),
64
+ (f1, -256, 21),
65
+ (f1, -512, 19),
66
+ (f1, -1024, 24),
67
+ (f2, -1, 8),
68
+ (f2, -2, 6),
69
+ (f2, -4, 6),
70
+ (f2, -8, 7),
71
+ (f2, -16, 8),
72
+ (f2, -32, 8),
73
+ (f2, -64, 9),
74
+ (f2, -128, 11),
75
+ (f2, -256, 13),
76
+ (f2, -512, 12),
77
+ (f2, -1024, 13),
78
+ (f3, -1, 11),
79
+ (f3, -2, 11),
80
+ (f3, -4, 11),
81
+ (f3, -8, 10),
82
+ (f3, -16, 14),
83
+ (f3, -32, 12),
84
+ (f3, -64, 15),
85
+ (f3, -128, 18),
86
+ (f3, -256, 18),
87
+ (f3, -512, 19),
88
+ (f3, -1024, 19),
89
+ (f4, -0.05, 9),
90
+ (f4, -0.10, 11),
91
+ (f4, -0.15, 11),
92
+ (f4, -0.20, 11),
93
+ (f4, -0.25, 11),
94
+ (f4, -0.30, 9),
95
+ (f4, -0.35, 9),
96
+ (f4, -0.40, 9),
97
+ (f4, -0.45, 10),
98
+ (f4, -0.50, 10),
99
+ (f4, -0.55, 10),
100
+ (f5, -0.05, 6),
101
+ (f5, -0.10, 7),
102
+ (f5, -0.15, 8),
103
+ (f5, -0.20, 10),
104
+ (f5, -0.25, 9),
105
+ (f5, -0.30, 8),
106
+ (f5, -0.35, 7),
107
+ (f5, -0.40, 7),
108
+ (f5, -0.45, 9),
109
+ (f5, -0.50, 9),
110
+ (f5, -0.55, 8)
111
+ ]
112
+
113
+
114
+ class TestChandrupatlaMinimize:
115
+
116
+ def f(self, x, loc):
117
+ dist = stats.norm()
118
+ return -dist.pdf(x - loc)
119
+
120
+ @pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)])
121
+ def test_basic(self, loc):
122
+ # Find mode of normal distribution. Compare mode against location
123
+ # parameter and value of pdf at mode against expected pdf.
124
+ res = _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc,))
125
+ ref = loc
126
+ np.testing.assert_allclose(res.x, ref, rtol=1e-6)
127
+ np.testing.assert_allclose(res.fun, -stats.norm.pdf(0), atol=0, rtol=0)
128
+ assert res.x.shape == np.shape(ref)
129
+
130
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
131
+ def test_vectorization(self, shape):
132
+ # Test for correct functionality, output shapes, and dtypes for various
133
+ # input shapes.
134
+ loc = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
135
+ args = (loc,)
136
+
137
+ @np.vectorize
138
+ def chandrupatla_single(loc_single):
139
+ return _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc_single,))
140
+
141
+ def f(*args, **kwargs):
142
+ f.f_evals += 1
143
+ return self.f(*args, **kwargs)
144
+ f.f_evals = 0
145
+
146
+ res = _chandrupatla_minimize(f, -5, 0, 5, args=args)
147
+ refs = chandrupatla_single(loc).ravel()
148
+
149
+ ref_x = [ref.x for ref in refs]
150
+ assert_allclose(res.x.ravel(), ref_x)
151
+ assert_equal(res.x.shape, shape)
152
+
153
+ ref_fun = [ref.fun for ref in refs]
154
+ assert_allclose(res.fun.ravel(), ref_fun)
155
+ assert_equal(res.fun.shape, shape)
156
+ assert_equal(res.fun, self.f(res.x, *args))
157
+
158
+ ref_success = [ref.success for ref in refs]
159
+ assert_equal(res.success.ravel(), ref_success)
160
+ assert_equal(res.success.shape, shape)
161
+ assert np.issubdtype(res.success.dtype, np.bool_)
162
+
163
+ ref_flag = [ref.status for ref in refs]
164
+ assert_equal(res.status.ravel(), ref_flag)
165
+ assert_equal(res.status.shape, shape)
166
+ assert np.issubdtype(res.status.dtype, np.integer)
167
+
168
+ ref_nfev = [ref.nfev for ref in refs]
169
+ assert_equal(res.nfev.ravel(), ref_nfev)
170
+ assert_equal(np.max(res.nfev), f.f_evals)
171
+ assert_equal(res.nfev.shape, res.fun.shape)
172
+ assert np.issubdtype(res.nfev.dtype, np.integer)
173
+
174
+ ref_nit = [ref.nit for ref in refs]
175
+ assert_equal(res.nit.ravel(), ref_nit)
176
+ assert_equal(np.max(res.nit), f.f_evals-3)
177
+ assert_equal(res.nit.shape, res.fun.shape)
178
+ assert np.issubdtype(res.nit.dtype, np.integer)
179
+
180
+ ref_xl = [ref.xl for ref in refs]
181
+ assert_allclose(res.xl.ravel(), ref_xl)
182
+ assert_equal(res.xl.shape, shape)
183
+
184
+ ref_xm = [ref.xm for ref in refs]
185
+ assert_allclose(res.xm.ravel(), ref_xm)
186
+ assert_equal(res.xm.shape, shape)
187
+
188
+ ref_xr = [ref.xr for ref in refs]
189
+ assert_allclose(res.xr.ravel(), ref_xr)
190
+ assert_equal(res.xr.shape, shape)
191
+
192
+ ref_fl = [ref.fl for ref in refs]
193
+ assert_allclose(res.fl.ravel(), ref_fl)
194
+ assert_equal(res.fl.shape, shape)
195
+ assert_allclose(res.fl, self.f(res.xl, *args))
196
+
197
+ ref_fm = [ref.fm for ref in refs]
198
+ assert_allclose(res.fm.ravel(), ref_fm)
199
+ assert_equal(res.fm.shape, shape)
200
+ assert_allclose(res.fm, self.f(res.xm, *args))
201
+
202
+ ref_fr = [ref.fr for ref in refs]
203
+ assert_allclose(res.fr.ravel(), ref_fr)
204
+ assert_equal(res.fr.shape, shape)
205
+ assert_allclose(res.fr, self.f(res.xr, *args))
206
+
207
+ def test_flags(self):
208
+ # Test cases that should produce different status flags; show that all
209
+ # can be produced simultaneously.
210
+ def f(xs, js):
211
+ funcs = [lambda x: (x - 2.5) ** 2,
212
+ lambda x: x - 10,
213
+ lambda x: (x - 2.5) ** 4,
214
+ lambda x: np.nan]
215
+
216
+ return [funcs[j](x) for x, j in zip(xs, js)]
217
+
218
+ args = (np.arange(4, dtype=np.int64),)
219
+
220
+ res = _chandrupatla_minimize(f, [0]*4, [2]*4, [np.pi]*4, args=args,
221
+ maxiter=10)
222
+
223
+ ref_flags = np.array([eim._ECONVERGED,
224
+ eim._ESIGNERR,
225
+ eim._ECONVERR,
226
+ eim._EVALUEERR])
227
+ assert_equal(res.status, ref_flags)
228
+
229
+ def test_convergence(self):
230
+ # Test that the convergence tolerances behave as expected
231
+ rng = np.random.default_rng(2585255913088665241)
232
+ p = rng.random(size=3)
233
+ bracket = (-5, 0, 5)
234
+ args = (p,)
235
+ kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
236
+
237
+ kwargs = kwargs0.copy()
238
+ kwargs['xatol'] = 1e-3
239
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
240
+ j1 = abs(res1.xr - res1.xl)
241
+ assert_array_less(j1, 4*kwargs['xatol'])
242
+ kwargs['xatol'] = 1e-6
243
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
244
+ j2 = abs(res2.xr - res2.xl)
245
+ assert_array_less(j2, 4*kwargs['xatol'])
246
+ assert_array_less(j2, j1)
247
+
248
+ kwargs = kwargs0.copy()
249
+ kwargs['xrtol'] = 1e-3
250
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
251
+ j1 = abs(res1.xr - res1.xl)
252
+ assert_array_less(j1, 4*kwargs['xrtol']*abs(res1.x))
253
+ kwargs['xrtol'] = 1e-6
254
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
255
+ j2 = abs(res2.xr - res2.xl)
256
+ assert_array_less(j2, 4*kwargs['xrtol']*abs(res2.x))
257
+ assert_array_less(j2, j1)
258
+
259
+ kwargs = kwargs0.copy()
260
+ kwargs['fatol'] = 1e-3
261
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
262
+ h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
263
+ assert_array_less(h1, 2*kwargs['fatol'])
264
+ kwargs['fatol'] = 1e-6
265
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
266
+ h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
267
+ assert_array_less(h2, 2*kwargs['fatol'])
268
+ assert_array_less(h2, h1)
269
+
270
+ kwargs = kwargs0.copy()
271
+ kwargs['frtol'] = 1e-3
272
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
273
+ h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
274
+ assert_array_less(h1, 2*kwargs['frtol']*abs(res1.fun))
275
+ kwargs['frtol'] = 1e-6
276
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
277
+ h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
278
+ assert_array_less(h2, 2*kwargs['frtol']*abs(res2.fun))
279
+ assert_array_less(h2, h1)
280
+
281
+ def test_maxiter_callback(self):
282
+ # Test behavior of `maxiter` parameter and `callback` interface
283
+ loc = 0.612814
284
+ bracket = (-5, 0, 5)
285
+ maxiter = 5
286
+
287
+ res = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
288
+ maxiter=maxiter)
289
+ assert not np.any(res.success)
290
+ assert np.all(res.nfev == maxiter+3)
291
+ assert np.all(res.nit == maxiter)
292
+
293
+ def callback(res):
294
+ callback.iter += 1
295
+ callback.res = res
296
+ assert hasattr(res, 'x')
297
+ if callback.iter == 0:
298
+ # callback is called once with initial bracket
299
+ assert (res.xl, res.xm, res.xr) == bracket
300
+ else:
301
+ changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr)
302
+ changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr)
303
+ assert np.all(changed_xr | changed_xl)
304
+
305
+ callback.xl = res.xl
306
+ callback.xr = res.xr
307
+ assert res.status == eim._EINPROGRESS
308
+ assert_equal(self.f(res.xl, loc), res.fl)
309
+ assert_equal(self.f(res.xm, loc), res.fm)
310
+ assert_equal(self.f(res.xr, loc), res.fr)
311
+ assert_equal(self.f(res.x, loc), res.fun)
312
+ if callback.iter == maxiter:
313
+ raise StopIteration
314
+
315
+ callback.xl = np.nan
316
+ callback.xr = np.nan
317
+ callback.iter = -1 # callback called once before first iteration
318
+ callback.res = None
319
+
320
+ res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
321
+ callback=callback)
322
+
323
+ # terminating with callback is identical to terminating due to maxiter
324
+ # (except for `status`)
325
+ for key in res.keys():
326
+ if key == 'status':
327
+ assert res[key] == eim._ECONVERR
328
+ assert callback.res[key] == eim._EINPROGRESS
329
+ assert res2[key] == eim._ECALLBACK
330
+ else:
331
+ assert res2[key] == callback.res[key] == res[key]
332
+
333
+ @pytest.mark.parametrize('case', cases)
334
+ def test_nit_expected(self, case):
335
+ # Test that `_chandrupatla` implements Chandrupatla's algorithm:
336
+ # in all 55 test cases, the number of iterations performed
337
+ # matches the number reported in the original paper.
338
+ func, x1, nit = case
339
+
340
+ # Find bracket using the algorithm in the paper
341
+ step = 0.2
342
+ x2 = x1 + step
343
+ x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2)
344
+
345
+ # Use tolerances from original paper
346
+ xatol = 0.0001
347
+ fatol = 0.000001
348
+ xrtol = 1e-16
349
+ frtol = 1e-16
350
+
351
+ res = _chandrupatla_minimize(func, x1, x2, x3, xatol=xatol,
352
+ fatol=fatol, xrtol=xrtol, frtol=frtol)
353
+ assert_equal(res.nit, nit)
354
+
355
+ @pytest.mark.parametrize("loc", (0.65, [0.65, 0.7]))
356
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
357
+ def test_dtype(self, loc, dtype):
358
+ # Test that dtypes are preserved
359
+
360
+ loc = dtype(loc)
361
+
362
+ def f(x, loc):
363
+ assert x.dtype == dtype
364
+ return ((x - loc) ** 2).astype(dtype)
365
+
366
+ res = _chandrupatla_minimize(f, dtype(-3), dtype(1), dtype(5),
367
+ args=(loc,))
368
+ assert res.x.dtype == dtype
369
+ assert_allclose(res.x, loc, rtol=np.sqrt(np.finfo(dtype).eps))
370
+
371
+ def test_input_validation(self):
372
+ # Test input validation for appropriate error messages
373
+
374
+ message = '`func` must be callable.'
375
+ with pytest.raises(ValueError, match=message):
376
+ _chandrupatla_minimize(None, -4, 0, 4)
377
+
378
+ message = 'Abscissae and function output must be real numbers.'
379
+ with pytest.raises(ValueError, match=message):
380
+ _chandrupatla_minimize(lambda x: x, -4+1j, 0, 4)
381
+
382
+ message = "shape mismatch: objects cannot be broadcast"
383
+ # raised by `np.broadcast, but the traceback is readable IMO
384
+ with pytest.raises(ValueError, match=message):
385
+ _chandrupatla_minimize(lambda x: x, [-2, -3], [0, 0], [3, 4, 5])
386
+
387
+ message = "The shape of the array returned by `func` must be the same"
388
+ with pytest.raises(ValueError, match=message):
389
+ _chandrupatla_minimize(lambda x: [x[0], x[1], x[1]], [-3, -3],
390
+ [0, 0], [5, 5])
391
+
392
+ message = 'Tolerances must be non-negative scalars.'
393
+ with pytest.raises(ValueError, match=message):
394
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, xatol=-1)
395
+ with pytest.raises(ValueError, match=message):
396
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, xrtol=np.nan)
397
+ with pytest.raises(ValueError, match=message):
398
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, fatol='ekki')
399
+ with pytest.raises(ValueError, match=message):
400
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, frtol=np.nan)
401
+
402
+ message = '`maxiter` must be a non-negative integer.'
403
+ with pytest.raises(ValueError, match=message):
404
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=1.5)
405
+ with pytest.raises(ValueError, match=message):
406
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=-1)
407
+
408
+ message = '`callback` must be callable.'
409
+ with pytest.raises(ValueError, match=message):
410
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, callback='shrubbery')
411
+
412
+ def test_bracket_order(self):
413
+ # Confirm that order of points in bracket doesn't matter
414
+ loc = np.linspace(-1, 1, 6)[:, np.newaxis]
415
+ brackets = np.array(list(permutations([-5, 0, 5]))).T
416
+ res = _chandrupatla_minimize(self.f, *brackets, args=(loc,))
417
+ assert np.all(np.isclose(res.x, loc) | (res.fun == self.f(loc, loc)))
418
+ ref = res.x[:, 0] # all columns should be the same
419
+ assert_allclose(*np.broadcast_arrays(res.x.T, ref), rtol=1e-15)
420
+
421
+ def test_special_cases(self):
422
+ # Test edge cases and other special cases
423
+
424
+ # Test that integers are not passed to `f`
425
+ # (otherwise this would overflow)
426
+ def f(x):
427
+ assert np.issubdtype(x.dtype, np.floating)
428
+ return (x-1) ** 100
429
+
430
+ with np.errstate(invalid='ignore'):
431
+ res = _chandrupatla_minimize(f, -7, 0, 8, fatol=0, frtol=0)
432
+ assert res.success
433
+ assert_allclose(res.x, 1, rtol=1e-3)
434
+ assert_equal(res.fun, 0)
435
+
436
+ # Test that if all elements of bracket equal minimizer, algorithm
437
+ # reports convergence
438
+ def f(x):
439
+ return (x-1)**2
440
+
441
+ res = _chandrupatla_minimize(f, 1, 1, 1)
442
+ assert res.success
443
+ assert_equal(res.x, 1)
444
+
445
+ # Test maxiter = 0. Should do nothing to bracket.
446
+ def f(x):
447
+ return (x-1)**2
448
+
449
+ bracket = (-3, 1.1, 5)
450
+ res = _chandrupatla_minimize(f, *bracket, maxiter=0)
451
+ assert res.xl, res.xr == bracket
452
+ assert res.nit == 0
453
+ assert res.nfev == 3
454
+ assert res.status == -2
455
+ assert res.x == 1.1 # best so far
456
+
457
+ # Test scalar `args` (not in tuple)
458
+ def f(x, c):
459
+ return (x-c)**2 - 1
460
+
461
+ res = _chandrupatla_minimize(f, -1, 0, 1, args=1/3)
462
+ assert_allclose(res.x, 1/3)
463
+
464
+ # Test zero tolerances
465
+ # TODO: fatol/frtol = 0?
466
+ def f(x):
467
+ return -np.sin(x)
468
+
469
+ res = _chandrupatla_minimize(f, 0, 1, np.pi, xatol=0, xrtol=0,
470
+ fatol=0, frtol=0)
471
+ assert res.success
472
+ # found a minimum exactly (according to floating point arithmetic)
473
+ assert res.xl < res.xm < res.xr
474
+ assert f(res.xl) == f(res.xm) == f(res.xr)
475
+
476
+
477
+ class TestChandrupatla(TestScalarRootFinders):
478
+
479
+ def f(self, q, p):
480
+ return stats.norm.cdf(q) - p
481
+
482
+ @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)])
483
+ def test_basic(self, p):
484
+ # Invert distribution CDF and compare against distrtibution `ppf`
485
+ res = _chandrupatla_root(self.f, -5, 5, args=(p,))
486
+ ref = stats.norm().ppf(p)
487
+ np.testing.assert_allclose(res.x, ref)
488
+ assert res.x.shape == ref.shape
489
+
490
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
491
+ def test_vectorization(self, shape):
492
+ # Test for correct functionality, output shapes, and dtypes for various
493
+ # input shapes.
494
+ p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
495
+ args = (p,)
496
+
497
+ @np.vectorize
498
+ def chandrupatla_single(p):
499
+ return _chandrupatla_root(self.f, -5, 5, args=(p,))
500
+
501
+ def f(*args, **kwargs):
502
+ f.f_evals += 1
503
+ return self.f(*args, **kwargs)
504
+ f.f_evals = 0
505
+
506
+ res = _chandrupatla_root(f, -5, 5, args=args)
507
+ refs = chandrupatla_single(p).ravel()
508
+
509
+ ref_x = [ref.x for ref in refs]
510
+ assert_allclose(res.x.ravel(), ref_x)
511
+ assert_equal(res.x.shape, shape)
512
+
513
+ ref_fun = [ref.fun for ref in refs]
514
+ assert_allclose(res.fun.ravel(), ref_fun)
515
+ assert_equal(res.fun.shape, shape)
516
+ assert_equal(res.fun, self.f(res.x, *args))
517
+
518
+ ref_success = [ref.success for ref in refs]
519
+ assert_equal(res.success.ravel(), ref_success)
520
+ assert_equal(res.success.shape, shape)
521
+ assert np.issubdtype(res.success.dtype, np.bool_)
522
+
523
+ ref_flag = [ref.status for ref in refs]
524
+ assert_equal(res.status.ravel(), ref_flag)
525
+ assert_equal(res.status.shape, shape)
526
+ assert np.issubdtype(res.status.dtype, np.integer)
527
+
528
+ ref_nfev = [ref.nfev for ref in refs]
529
+ assert_equal(res.nfev.ravel(), ref_nfev)
530
+ assert_equal(np.max(res.nfev), f.f_evals)
531
+ assert_equal(res.nfev.shape, res.fun.shape)
532
+ assert np.issubdtype(res.nfev.dtype, np.integer)
533
+
534
+ ref_nit = [ref.nit for ref in refs]
535
+ assert_equal(res.nit.ravel(), ref_nit)
536
+ assert_equal(np.max(res.nit), f.f_evals-2)
537
+ assert_equal(res.nit.shape, res.fun.shape)
538
+ assert np.issubdtype(res.nit.dtype, np.integer)
539
+
540
+ ref_xl = [ref.xl for ref in refs]
541
+ assert_allclose(res.xl.ravel(), ref_xl)
542
+ assert_equal(res.xl.shape, shape)
543
+
544
+ ref_xr = [ref.xr for ref in refs]
545
+ assert_allclose(res.xr.ravel(), ref_xr)
546
+ assert_equal(res.xr.shape, shape)
547
+
548
+ assert_array_less(res.xl, res.xr)
549
+ finite = np.isfinite(res.x)
550
+ assert np.all((res.x[finite] == res.xl[finite])
551
+ | (res.x[finite] == res.xr[finite]))
552
+
553
+ ref_fl = [ref.fl for ref in refs]
554
+ assert_allclose(res.fl.ravel(), ref_fl)
555
+ assert_equal(res.fl.shape, shape)
556
+ assert_allclose(res.fl, self.f(res.xl, *args))
557
+
558
+ ref_fr = [ref.fr for ref in refs]
559
+ assert_allclose(res.fr.ravel(), ref_fr)
560
+ assert_equal(res.fr.shape, shape)
561
+ assert_allclose(res.fr, self.f(res.xr, *args))
562
+
563
+ assert np.all(np.abs(res.fun[finite]) ==
564
+ np.minimum(np.abs(res.fl[finite]),
565
+ np.abs(res.fr[finite])))
566
+
567
+ def test_flags(self):
568
+ # Test cases that should produce different status flags; show that all
569
+ # can be produced simultaneously.
570
+ def f(xs, js):
571
+ funcs = [lambda x: x - 2.5,
572
+ lambda x: x - 10,
573
+ lambda x: (x - 0.1)**3,
574
+ lambda x: np.nan]
575
+ return [funcs[j](x) for x, j in zip(xs, js)]
576
+
577
+ args = (np.arange(4, dtype=np.int64),)
578
+ res = _chandrupatla_root(f, [0]*4, [np.pi]*4, args=args, maxiter=2)
579
+
580
+ ref_flags = np.array([eim._ECONVERGED,
581
+ eim._ESIGNERR,
582
+ eim._ECONVERR,
583
+ eim._EVALUEERR])
584
+ assert_equal(res.status, ref_flags)
585
+
586
+ def test_convergence(self):
587
+ # Test that the convergence tolerances behave as expected
588
+ rng = np.random.default_rng(2585255913088665241)
589
+ p = rng.random(size=3)
590
+ bracket = (-5, 5)
591
+ args = (p,)
592
+ kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
593
+
594
+ kwargs = kwargs0.copy()
595
+ kwargs['xatol'] = 1e-3
596
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
597
+ assert_array_less(res1.xr - res1.xl, 1e-3)
598
+ kwargs['xatol'] = 1e-6
599
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
600
+ assert_array_less(res2.xr - res2.xl, 1e-6)
601
+ assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl)
602
+
603
+ kwargs = kwargs0.copy()
604
+ kwargs['xrtol'] = 1e-3
605
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
606
+ assert_array_less(res1.xr - res1.xl, 1e-3 * np.abs(res1.x))
607
+ kwargs['xrtol'] = 1e-6
608
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
609
+ assert_array_less(res2.xr - res2.xl, 1e-6 * np.abs(res2.x))
610
+ assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl)
611
+
612
+ kwargs = kwargs0.copy()
613
+ kwargs['fatol'] = 1e-3
614
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
615
+ assert_array_less(np.abs(res1.fun), 1e-3)
616
+ kwargs['fatol'] = 1e-6
617
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
618
+ assert_array_less(np.abs(res2.fun), 1e-6)
619
+ assert_array_less(np.abs(res2.fun), np.abs(res1.fun))
620
+
621
+ kwargs = kwargs0.copy()
622
+ kwargs['frtol'] = 1e-3
623
+ x1, x2 = bracket
624
+ f0 = np.minimum(abs(self.f(x1, *args)), abs(self.f(x2, *args)))
625
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
626
+ assert_array_less(np.abs(res1.fun), 1e-3*f0)
627
+ kwargs['frtol'] = 1e-6
628
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
629
+ assert_array_less(np.abs(res2.fun), 1e-6*f0)
630
+ assert_array_less(np.abs(res2.fun), np.abs(res1.fun))
631
+
632
+ def test_maxiter_callback(self):
633
+ # Test behavior of `maxiter` parameter and `callback` interface
634
+ p = 0.612814
635
+ bracket = (-5, 5)
636
+ maxiter = 5
637
+
638
+ def f(q, p):
639
+ res = stats.norm().cdf(q) - p
640
+ f.x = q
641
+ f.fun = res
642
+ return res
643
+ f.x = None
644
+ f.fun = None
645
+
646
+ res = _chandrupatla_root(f, *bracket, args=(p,),
647
+ maxiter=maxiter)
648
+ assert not np.any(res.success)
649
+ assert np.all(res.nfev == maxiter+2)
650
+ assert np.all(res.nit == maxiter)
651
+
652
+ def callback(res):
653
+ callback.iter += 1
654
+ callback.res = res
655
+ assert hasattr(res, 'x')
656
+ if callback.iter == 0:
657
+ # callback is called once with initial bracket
658
+ assert (res.xl, res.xr) == bracket
659
+ else:
660
+ changed = (((res.xl == callback.xl) & (res.xr != callback.xr))
661
+ | ((res.xl != callback.xl) & (res.xr == callback.xr)))
662
+ assert np.all(changed)
663
+
664
+ callback.xl = res.xl
665
+ callback.xr = res.xr
666
+ assert res.status == eim._EINPROGRESS
667
+ assert_equal(self.f(res.xl, p), res.fl)
668
+ assert_equal(self.f(res.xr, p), res.fr)
669
+ assert_equal(self.f(res.x, p), res.fun)
670
+ if callback.iter == maxiter:
671
+ raise StopIteration
672
+ callback.iter = -1 # callback called once before first iteration
673
+ callback.res = None
674
+ callback.xl = None
675
+ callback.xr = None
676
+
677
+ res2 = _chandrupatla_root(f, *bracket, args=(p,),
678
+ callback=callback)
679
+
680
+ # terminating with callback is identical to terminating due to maxiter
681
+ # (except for `status`)
682
+ for key in res.keys():
683
+ if key == 'status':
684
+ assert res[key] == eim._ECONVERR
685
+ assert callback.res[key] == eim._EINPROGRESS
686
+ assert res2[key] == eim._ECALLBACK
687
+ else:
688
+ assert res2[key] == callback.res[key] == res[key]
689
+
690
+ @pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS)
691
+ def test_nit_expected(self, case):
692
+ # Test that `_chandrupatla` implements Chandrupatla's algorithm:
693
+ # in all 40 test cases, the number of iterations performed
694
+ # matches the number reported in the original paper.
695
+ f, bracket, root, nfeval, id = case
696
+ # Chandrupatla's criterion is equivalent to
697
+ # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard
698
+ # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x
699
+ # that used by Chandrupatla in tests.
700
+ res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5)
701
+ assert_allclose(res.fun, f(root), rtol=1e-8, atol=2e-3)
702
+ assert_equal(res.nfev, nfeval)
703
+
704
+ @pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
705
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
706
+ def test_dtype(self, root, dtype):
707
+ # Test that dtypes are preserved
708
+
709
+ root = dtype(root)
710
+ def f(x, root):
711
+ return ((x - root) ** 3).astype(dtype)
712
+
713
+ res = _chandrupatla_root(f, dtype(-3), dtype(5),
714
+ args=(root,), xatol=1e-3)
715
+ assert res.x.dtype == dtype
716
+ assert np.allclose(res.x, root, atol=1e-3) or np.all(res.fun == 0)
717
+
718
+ def test_input_validation(self):
719
+ # Test input validation for appropriate error messages
720
+
721
+ message = '`func` must be callable.'
722
+ with pytest.raises(ValueError, match=message):
723
+ _chandrupatla_root(None, -4, 4)
724
+
725
+ message = 'Abscissae and function output must be real numbers.'
726
+ with pytest.raises(ValueError, match=message):
727
+ _chandrupatla_root(lambda x: x, -4+1j, 4)
728
+
729
+ message = "shape mismatch: objects cannot be broadcast"
730
+ # raised by `np.broadcast, but the traceback is readable IMO
731
+ with pytest.raises(ValueError, match=message):
732
+ _chandrupatla_root(lambda x: x, [-2, -3], [3, 4, 5])
733
+
734
+ message = "The shape of the array returned by `func`..."
735
+ with pytest.raises(ValueError, match=message):
736
+ _chandrupatla_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
737
+
738
+ message = 'Tolerances must be non-negative scalars.'
739
+ with pytest.raises(ValueError, match=message):
740
+ _chandrupatla_root(lambda x: x, -4, 4, xatol=-1)
741
+ with pytest.raises(ValueError, match=message):
742
+ _chandrupatla_root(lambda x: x, -4, 4, xrtol=np.nan)
743
+ with pytest.raises(ValueError, match=message):
744
+ _chandrupatla_root(lambda x: x, -4, 4, fatol='ekki')
745
+ with pytest.raises(ValueError, match=message):
746
+ _chandrupatla_root(lambda x: x, -4, 4, frtol=np.nan)
747
+
748
+ message = '`maxiter` must be a non-negative integer.'
749
+ with pytest.raises(ValueError, match=message):
750
+ _chandrupatla_root(lambda x: x, -4, 4, maxiter=1.5)
751
+ with pytest.raises(ValueError, match=message):
752
+ _chandrupatla_root(lambda x: x, -4, 4, maxiter=-1)
753
+
754
+ message = '`callback` must be callable.'
755
+ with pytest.raises(ValueError, match=message):
756
+ _chandrupatla_root(lambda x: x, -4, 4, callback='shrubbery')
757
+
758
+ def test_special_cases(self):
759
+ # Test edge cases and other special cases
760
+
761
+ # Test that integers are not passed to `f`
762
+ # (otherwise this would overflow)
763
+ def f(x):
764
+ assert np.issubdtype(x.dtype, np.floating)
765
+ return x ** 99 - 1
766
+
767
+ res = _chandrupatla_root(f, -7, 5)
768
+ assert res.success
769
+ assert_allclose(res.x, 1)
770
+
771
+ # Test that if both ends of bracket equal root, algorithm reports
772
+ # convergence
773
+ def f(x):
774
+ return x**2 - 1
775
+
776
+ res = _chandrupatla_root(f, 1, 1)
777
+ assert res.success
778
+ assert_equal(res.x, 1)
779
+
780
+ def f(x):
781
+ return 1/x
782
+
783
+ with np.errstate(invalid='ignore'):
784
+ res = _chandrupatla_root(f, np.inf, np.inf)
785
+ assert res.success
786
+ assert_equal(res.x, np.inf)
787
+
788
+ # Test maxiter = 0. Should do nothing to bracket.
789
+ def f(x):
790
+ return x**3 - 1
791
+
792
+ bracket = (-3, 5)
793
+ res = _chandrupatla_root(f, *bracket, maxiter=0)
794
+ assert res.xl, res.xr == bracket
795
+ assert res.nit == 0
796
+ assert res.nfev == 2
797
+ assert res.status == -2
798
+ assert res.x == -3 # best so far
799
+
800
+ # Test maxiter = 1
801
+ res = _chandrupatla_root(f, *bracket, maxiter=1)
802
+ assert res.success
803
+ assert res.status == 0
804
+ assert res.nit == 1
805
+ assert res.nfev == 3
806
+ assert_allclose(res.x, 1)
807
+
808
+ # Test scalar `args` (not in tuple)
809
+ def f(x, c):
810
+ return c*x - 1
811
+
812
+ res = _chandrupatla_root(f, -1, 1, args=3)
813
+ assert_allclose(res.x, 1/3)
814
+
815
+ # # TODO: Test zero tolerance
816
+ # # ~~What's going on here - why are iterations repeated?~~
817
+ # # tl goes to zero when xatol=xrtol=0. When function is nearly linear,
818
+ # # this causes convergence issues.
819
+ # def f(x):
820
+ # return np.cos(x)
821
+ #
822
+ # res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0)
823
+ # assert res.nit < 100
824
+ # xp = np.nextafter(res.x, np.inf)
825
+ # xm = np.nextafter(res.x, -np.inf)
826
+ # assert np.abs(res.fun) < np.abs(f(xp))
827
+ # assert np.abs(res.fun) < np.abs(f(xm))
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_allclose, assert_, assert_array_equal
5
+ import pytest
6
+
7
+ from scipy.optimize import fmin_cobyla, minimize, Bounds
8
+
9
+
10
+ class TestCobyla:
11
+ def setup_method(self):
12
+ self.x0 = [4.95, 0.66]
13
+ self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
14
+ self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
15
+ 'maxiter': 100}
16
+
17
+ def fun(self, x):
18
+ return x[0]**2 + abs(x[1])**3
19
+
20
+ def con1(self, x):
21
+ return x[0]**2 + x[1]**2 - 25
22
+
23
+ def con2(self, x):
24
+ return -self.con1(x)
25
+
26
+ @pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely')
27
+ def test_simple(self, capfd):
28
+ # use disp=True as smoke test for gh-8118
29
+ x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
30
+ rhoend=1e-5, maxfun=100, disp=True)
31
+ assert_allclose(x, self.solution, atol=1e-4)
32
+
33
+ def test_minimize_simple(self):
34
+ class Callback:
35
+ def __init__(self):
36
+ self.n_calls = 0
37
+ self.last_x = None
38
+
39
+ def __call__(self, x):
40
+ self.n_calls += 1
41
+ self.last_x = x
42
+
43
+ callback = Callback()
44
+
45
+ # Minimize with method='COBYLA'
46
+ cons = ({'type': 'ineq', 'fun': self.con1},
47
+ {'type': 'ineq', 'fun': self.con2})
48
+ sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
49
+ callback=callback, options=self.opts)
50
+ assert_allclose(sol.x, self.solution, atol=1e-4)
51
+ assert_(sol.success, sol.message)
52
+ assert_(sol.maxcv < 1e-5, sol)
53
+ assert_(sol.nfev < 70, sol)
54
+ assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
55
+ assert_(sol.nfev == callback.n_calls,
56
+ "Callback is not called exactly once for every function eval.")
57
+ assert_array_equal(
58
+ sol.x,
59
+ callback.last_x,
60
+ "Last design vector sent to the callback is not equal to returned value.",
61
+ )
62
+
63
+ def test_minimize_constraint_violation(self):
64
+ np.random.seed(1234)
65
+ pb = np.random.rand(10, 10)
66
+ spread = np.random.rand(10)
67
+
68
+ def p(w):
69
+ return pb.dot(w)
70
+
71
+ def f(w):
72
+ return -(w * spread).sum()
73
+
74
+ def c1(w):
75
+ return 500 - abs(p(w)).sum()
76
+
77
+ def c2(w):
78
+ return 5 - abs(p(w).sum())
79
+
80
+ def c3(w):
81
+ return 5 - abs(p(w)).max()
82
+
83
+ cons = ({'type': 'ineq', 'fun': c1},
84
+ {'type': 'ineq', 'fun': c2},
85
+ {'type': 'ineq', 'fun': c3})
86
+ w0 = np.zeros((10,))
87
+ sol = minimize(f, w0, method='cobyla', constraints=cons,
88
+ options={'catol': 1e-6})
89
+ assert_(sol.maxcv > 1e-6)
90
+ assert_(not sol.success)
91
+
92
+
93
+ def test_vector_constraints():
94
+ # test that fmin_cobyla and minimize can take a combination
95
+ # of constraints, some returning a number and others an array
96
+ def fun(x):
97
+ return (x[0] - 1)**2 + (x[1] - 2.5)**2
98
+
99
+ def fmin(x):
100
+ return fun(x) - 1
101
+
102
+ def cons1(x):
103
+ a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
104
+ return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
105
+ a[i, 2] for i in range(len(a))])
106
+
107
+ def cons2(x):
108
+ return x # identity, acts as bounds x > 0
109
+
110
+ x0 = np.array([2, 0])
111
+ cons_list = [fun, cons1, cons2]
112
+
113
+ xsol = [1.4, 1.7]
114
+ fsol = 0.8
115
+
116
+ # testing fmin_cobyla
117
+ sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5)
118
+ assert_allclose(sol, xsol, atol=1e-4)
119
+
120
+ sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5)
121
+ assert_allclose(fun(sol), 1, atol=1e-4)
122
+
123
+ # testing minimize
124
+ constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
125
+ sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
126
+ assert_allclose(sol.x, xsol, atol=1e-4)
127
+ assert_(sol.success, sol.message)
128
+ assert_allclose(sol.fun, fsol, atol=1e-4)
129
+
130
+ constraints = {'type': 'ineq', 'fun': fmin}
131
+ sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
132
+ assert_allclose(sol.fun, 1, atol=1e-4)
133
+
134
+
135
+ class TestBounds:
136
+ # Test cobyla support for bounds (only when used via `minimize`)
137
+ # Invalid bounds is tested in
138
+ # test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds
139
+
140
+ def test_basic(self):
141
+ def f(x):
142
+ return np.sum(x**2)
143
+
144
+ lb = [-1, None, 1, None, -0.5]
145
+ ub = [-0.5, -0.5, None, None, -0.5]
146
+ bounds = [(a, b) for a, b in zip(lb, ub)]
147
+ # these are converted to Bounds internally
148
+
149
+ res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds)
150
+ ref = [-0.5, -0.5, 1, 0, -0.5]
151
+ assert res.success
152
+ assert_allclose(res.x, ref, atol=1e-3)
153
+
154
+ def test_unbounded(self):
155
+ def f(x):
156
+ return np.sum(x**2)
157
+
158
+ bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])
159
+ res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
160
+ assert res.success
161
+ assert_allclose(res.x, 0, atol=1e-3)
162
+
163
+ bounds = Bounds([1, -np.inf], [np.inf, np.inf])
164
+ res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
165
+ assert res.success
166
+ assert_allclose(res.x, [1, 0], atol=1e-3)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for constraint conversion
3
+ """
4
+
5
+ import numpy as np
6
+ from numpy.testing import (assert_array_almost_equal,
7
+ assert_allclose, assert_warns, suppress_warnings)
8
+ import pytest
9
+ from scipy.optimize import (NonlinearConstraint, LinearConstraint,
10
+ OptimizeWarning, minimize, BFGS)
11
+ from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock,
12
+ IneqRosenbrock, EqIneqRosenbrock,
13
+ BoundedRosenbrock, Elec)
14
+
15
+
16
+ class TestOldToNew:
17
+ x0 = (2, 0)
18
+ bnds = ((0, None), (0, None))
19
+ method = "trust-constr"
20
+
21
+ def test_constraint_dictionary_1(self):
22
+ def fun(x):
23
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
24
+ cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
25
+ {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
26
+ {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
27
+
28
+ with suppress_warnings() as sup:
29
+ sup.filter(UserWarning, "delta_grad == 0.0")
30
+ res = minimize(fun, self.x0, method=self.method,
31
+ bounds=self.bnds, constraints=cons)
32
+ assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
33
+ assert_allclose(res.fun, 0.8, rtol=1e-4)
34
+
35
+ def test_constraint_dictionary_2(self):
36
+ def fun(x):
37
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
38
+ cons = {'type': 'eq',
39
+ 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1],
40
+ 'args': (1, 1.1),
41
+ 'jac': lambda x, p1, p2: np.array([[p1, -p2]])}
42
+ with suppress_warnings() as sup:
43
+ sup.filter(UserWarning, "delta_grad == 0.0")
44
+ res = minimize(fun, self.x0, method=self.method,
45
+ bounds=self.bnds, constraints=cons)
46
+ assert_allclose(res.x, [1.7918552, 1.62895927])
47
+ assert_allclose(res.fun, 1.3857466063348418)
48
+
49
+ def test_constraint_dictionary_3(self):
50
+ def fun(x):
51
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
52
+ cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
53
+ NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]
54
+
55
+ with suppress_warnings() as sup:
56
+ sup.filter(UserWarning, "delta_grad == 0.0")
57
+ res = minimize(fun, self.x0, method=self.method,
58
+ bounds=self.bnds, constraints=cons)
59
+ assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
60
+ assert_allclose(res.fun, 1.125, rtol=1e-4)
61
+
62
+
63
+ class TestNewToOld:
64
+
65
+ def test_multiple_constraint_objects(self):
66
+ def fun(x):
67
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
68
+ x0 = [2, 0, 1]
69
+ coni = [] # only inequality constraints (can use cobyla)
70
+ methods = ["slsqp", "cobyla", "trust-constr"]
71
+
72
+ # mixed old and new
73
+ coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
74
+ NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
75
+
76
+ coni.append([LinearConstraint([1, -2, 0], -2, np.inf),
77
+ NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
78
+
79
+ coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
80
+ NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
81
+
82
+ for con in coni:
83
+ funs = {}
84
+ for method in methods:
85
+ with suppress_warnings() as sup:
86
+ sup.filter(UserWarning)
87
+ result = minimize(fun, x0, method=method, constraints=con)
88
+ funs[method] = result.fun
89
+ assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4)
90
+ assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
91
+
92
+ def test_individual_constraint_objects(self):
93
+ def fun(x):
94
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
95
+ x0 = [2, 0, 1]
96
+
97
+ cone = [] # with equality constraints (can't use cobyla)
98
+ coni = [] # only inequality constraints (can use cobyla)
99
+ methods = ["slsqp", "cobyla", "trust-constr"]
100
+
101
+ # nonstandard data types for constraint equality bounds
102
+ cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
103
+ cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
104
+ cone.append(NonlinearConstraint(lambda x: x[0] - x[1],
105
+ 1.21, np.array([1.21])))
106
+
107
+ # multiple equalities
108
+ cone.append(NonlinearConstraint(
109
+ lambda x: [x[0] - x[1], x[1] - x[2]],
110
+ 1.21, 1.21)) # two same equalities
111
+ cone.append(NonlinearConstraint(
112
+ lambda x: [x[0] - x[1], x[1] - x[2]],
113
+ [1.21, 1.4], [1.21, 1.4])) # two different equalities
114
+ cone.append(NonlinearConstraint(
115
+ lambda x: [x[0] - x[1], x[1] - x[2]],
116
+ [1.21, 1.21], 1.21)) # equality specified two ways
117
+ cone.append(NonlinearConstraint(
118
+ lambda x: [x[0] - x[1], x[1] - x[2]],
119
+ [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded
120
+
121
+ # nonstandard data types for constraint inequality bounds
122
+ coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
123
+ coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
124
+ coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
125
+ 1.21, np.array([np.inf])))
126
+ coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
127
+ coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
128
+ np.array(-np.inf), -3))
129
+
130
+ # multiple inequalities/equalities
131
+ coni.append(NonlinearConstraint(
132
+ lambda x: [x[0] - x[1], x[1] - x[2]],
133
+ 1.21, np.inf)) # two same inequalities
134
+ cone.append(NonlinearConstraint(
135
+ lambda x: [x[0] - x[1], x[1] - x[2]],
136
+ [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality
137
+ coni.append(NonlinearConstraint(
138
+ lambda x: [x[0] - x[1], x[1] - x[2]],
139
+ [1.1, .8], [1.2, 1.4])) # bounded above and below
140
+ coni.append(NonlinearConstraint(
141
+ lambda x: [x[0] - x[1], x[1] - x[2]],
142
+ [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below
143
+
144
+ # quick check of LinearConstraint class (very little new code to test)
145
+ cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
146
+ cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
147
+ cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]],
148
+ [1.21, -np.inf], [1.21, 1.4]))
149
+
150
+ for con in coni:
151
+ funs = {}
152
+ for method in methods:
153
+ with suppress_warnings() as sup:
154
+ sup.filter(UserWarning)
155
+ result = minimize(fun, x0, method=method, constraints=con)
156
+ funs[method] = result.fun
157
+ assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
158
+ assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3)
159
+
160
+ for con in cone:
161
+ funs = {}
162
+ for method in methods[::2]: # skip cobyla
163
+ with suppress_warnings() as sup:
164
+ sup.filter(UserWarning)
165
+ result = minimize(fun, x0, method=method, constraints=con)
166
+ funs[method] = result.fun
167
+ assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
168
+
169
+
170
+ class TestNewToOldSLSQP:
171
+ method = 'slsqp'
172
+ elec = Elec(n_electrons=2)
173
+ elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047,
174
+ -0.73597044, 0.34180668, -0.34180667])
175
+ brock = BoundedRosenbrock()
176
+ brock.x_opt = [0, 0]
177
+ list_of_problems = [Maratos(),
178
+ HyperbolicIneq(),
179
+ Rosenbrock(),
180
+ IneqRosenbrock(),
181
+ EqIneqRosenbrock(),
182
+ elec,
183
+ brock
184
+ ]
185
+
186
+ def test_list_of_problems(self):
187
+
188
+ for prob in self.list_of_problems:
189
+
190
+ with suppress_warnings() as sup:
191
+ sup.filter(UserWarning)
192
+ result = minimize(prob.fun, prob.x0,
193
+ method=self.method,
194
+ bounds=prob.bounds,
195
+ constraints=prob.constr)
196
+
197
+ assert_array_almost_equal(result.x, prob.x_opt, decimal=3)
198
+
199
+ def test_warn_mixed_constraints(self):
200
+ # warns about inefficiency of mixed equality/inequality constraints
201
+ def fun(x):
202
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
203
+ cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]],
204
+ [1.1, .8], [1.1, 1.4])
205
+ bnds = ((0, None), (0, None), (0, None))
206
+ with suppress_warnings() as sup:
207
+ sup.filter(UserWarning, "delta_grad == 0.0")
208
+ assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1),
209
+ method=self.method, bounds=bnds, constraints=cons)
210
+
211
+ def test_warn_ignored_options(self):
212
+ # warns about constraint options being ignored
213
+ def fun(x):
214
+ return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
215
+ x0 = (2, 0, 1)
216
+
217
+ if self.method == "slsqp":
218
+ bnds = ((0, None), (0, None), (0, None))
219
+ else:
220
+ bnds = None
221
+
222
+ cons = NonlinearConstraint(lambda x: x[0], 2, np.inf)
223
+ res = minimize(fun, x0, method=self.method,
224
+ bounds=bnds, constraints=cons)
225
+ # no warnings without constraint options
226
+ assert_allclose(res.fun, 1)
227
+
228
+ cons = LinearConstraint([1, 0, 0], 2, np.inf)
229
+ res = minimize(fun, x0, method=self.method,
230
+ bounds=bnds, constraints=cons)
231
+ # no warnings without constraint options
232
+ assert_allclose(res.fun, 1)
233
+
234
+ cons = []
235
+ cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
236
+ keep_feasible=True))
237
+ cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
238
+ hess=BFGS()))
239
+ cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
240
+ finite_diff_jac_sparsity=42))
241
+ cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
242
+ finite_diff_rel_step=42))
243
+ cons.append(LinearConstraint([1, 0, 0], 2, np.inf,
244
+ keep_feasible=True))
245
+ for con in cons:
246
+ assert_warns(OptimizeWarning, minimize, fun, x0,
247
+ method=self.method, bounds=bnds, constraints=cons)
248
+
249
+
250
+ class TestNewToOldCobyla:
251
+ method = 'cobyla'
252
+
253
+ list_of_problems = [
254
+ Elec(n_electrons=2),
255
+ Elec(n_electrons=4),
256
+ ]
257
+
258
+ @pytest.mark.slow
259
+ def test_list_of_problems(self):
260
+
261
+ for prob in self.list_of_problems:
262
+
263
+ with suppress_warnings() as sup:
264
+ sup.filter(UserWarning)
265
+ truth = minimize(prob.fun, prob.x0,
266
+ method='trust-constr',
267
+ bounds=prob.bounds,
268
+ constraints=prob.constr)
269
+ result = minimize(prob.fun, prob.x0,
270
+ method=self.method,
271
+ bounds=prob.bounds,
272
+ constraints=prob.constr)
273
+
274
+ assert_allclose(result.fun, truth.fun, rtol=1e-3)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import TestCase, assert_array_equal
4
+ import scipy.sparse as sps
5
+ from scipy.optimize._constraints import (
6
+ Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
7
+ new_bounds_to_old, old_bound_to_new, strict_bounds)
8
+
9
+
10
+ class TestStrictBounds(TestCase):
11
+ def test_scalarvalue_unique_enforce_feasibility(self):
12
+ m = 3
13
+ lb = 2
14
+ ub = 4
15
+ enforce_feasibility = False
16
+ strict_lb, strict_ub = strict_bounds(lb, ub,
17
+ enforce_feasibility,
18
+ m)
19
+ assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
20
+ assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
21
+
22
+ enforce_feasibility = True
23
+ strict_lb, strict_ub = strict_bounds(lb, ub,
24
+ enforce_feasibility,
25
+ m)
26
+ assert_array_equal(strict_lb, [2, 2, 2])
27
+ assert_array_equal(strict_ub, [4, 4, 4])
28
+
29
+ def test_vectorvalue_unique_enforce_feasibility(self):
30
+ m = 3
31
+ lb = [1, 2, 3]
32
+ ub = [4, 5, 6]
33
+ enforce_feasibility = False
34
+ strict_lb, strict_ub = strict_bounds(lb, ub,
35
+ enforce_feasibility,
36
+ m)
37
+ assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
38
+ assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
39
+
40
+ enforce_feasibility = True
41
+ strict_lb, strict_ub = strict_bounds(lb, ub,
42
+ enforce_feasibility,
43
+ m)
44
+ assert_array_equal(strict_lb, [1, 2, 3])
45
+ assert_array_equal(strict_ub, [4, 5, 6])
46
+
47
+ def test_scalarvalue_vector_enforce_feasibility(self):
48
+ m = 3
49
+ lb = 2
50
+ ub = 4
51
+ enforce_feasibility = [False, True, False]
52
+ strict_lb, strict_ub = strict_bounds(lb, ub,
53
+ enforce_feasibility,
54
+ m)
55
+ assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
56
+ assert_array_equal(strict_ub, [np.inf, 4, np.inf])
57
+
58
+ def test_vectorvalue_vector_enforce_feasibility(self):
59
+ m = 3
60
+ lb = [1, 2, 3]
61
+ ub = [4, 6, np.inf]
62
+ enforce_feasibility = [True, False, True]
63
+ strict_lb, strict_ub = strict_bounds(lb, ub,
64
+ enforce_feasibility,
65
+ m)
66
+ assert_array_equal(strict_lb, [1, -np.inf, 3])
67
+ assert_array_equal(strict_ub, [4, np.inf, np.inf])
68
+
69
+
70
+ def test_prepare_constraint_infeasible_x0():
71
+ lb = np.array([0, 20, 30])
72
+ ub = np.array([0.5, np.inf, 70])
73
+ x0 = np.array([1, 2, 3])
74
+ enforce_feasibility = np.array([False, True, True], dtype=bool)
75
+ bounds = Bounds(lb, ub, enforce_feasibility)
76
+ pytest.raises(ValueError, PreparedConstraint, bounds, x0)
77
+
78
+ pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3])
79
+ assert (pc.violation([1, 2, 3]) > 0).any()
80
+ assert (pc.violation([0.25, 21, 31]) == 0).all()
81
+
82
+ x0 = np.array([1, 2, 3, 4])
83
+ A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
84
+ enforce_feasibility = np.array([True, True, True], dtype=bool)
85
+ linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
86
+ pytest.raises(ValueError, PreparedConstraint, linear, x0)
87
+
88
+ pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0),
89
+ [1, 2, 3, 4])
90
+ assert (pc.violation([1, 2, 3, 4]) > 0).any()
91
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
92
+
93
+ def fun(x):
94
+ return A.dot(x)
95
+
96
+ def jac(x):
97
+ return A
98
+
99
+ def hess(x, v):
100
+ return sps.csr_matrix((4, 4))
101
+
102
+ nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
103
+ enforce_feasibility)
104
+ pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
105
+
106
+ pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4])
107
+ assert (pc.violation([1, 2, 3, 4]) > 0).any()
108
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
109
+
110
+
111
+ def test_violation():
112
+ def cons_f(x):
113
+ return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])
114
+
115
+ nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
116
+ pc = PreparedConstraint(nlc, [0.5, 1])
117
+
118
+ assert_array_equal(pc.violation([0.5, 1]), [0., 0.])
119
+
120
+ np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])
121
+
122
+ np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])
123
+
124
+ np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])
125
+
126
+ np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])
127
+
128
+
129
+ def test_new_bounds_to_old():
130
+ lb = np.array([-np.inf, 2, 3])
131
+ ub = np.array([3, np.inf, 10])
132
+
133
+ bounds = [(None, 3), (2, None), (3, 10)]
134
+ assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
135
+
136
+ bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
137
+ assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
138
+
139
+ bounds_no_lb = [(None, 3), (None, None), (None, 10)]
140
+ assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
141
+
142
+ bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
143
+ assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
144
+
145
+ bounds_no_ub = [(None, None), (2, None), (3, None)]
146
+ assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
147
+
148
+ bounds_single_both = [(1, 2), (1, 2), (1, 2)]
149
+ assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
150
+
151
+ bounds_no_both = [(None, None), (None, None), (None, None)]
152
+ assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
153
+
154
+
155
+ def test_old_bounds_to_new():
156
+ bounds = ([1, 2], (None, 3), (-1, None))
157
+ lb_true = np.array([1, -np.inf, -1])
158
+ ub_true = np.array([2, 3, np.inf])
159
+
160
+ lb, ub = old_bound_to_new(bounds)
161
+ assert_array_equal(lb, lb_true)
162
+ assert_array_equal(ub, ub_true)
163
+
164
+ bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))]
165
+ lb, ub = old_bound_to_new(bounds)
166
+
167
+ assert_array_equal(lb, [-np.inf, 1])
168
+ assert_array_equal(ub, [np.inf, 1])
169
+
170
+
171
+ class TestBounds:
172
+ def test_repr(self):
173
+ # so that eval works
174
+ from numpy import array, inf # noqa: F401
175
+ for args in (
176
+ (-1.0, 5.0),
177
+ (-1.0, np.inf, True),
178
+ (np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
179
+ (np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
180
+ np.array([True, False])),
181
+ ):
182
+ bounds = Bounds(*args)
183
+ bounds2 = eval(repr(Bounds(*args)))
184
+ assert_array_equal(bounds.lb, bounds2.lb)
185
+ assert_array_equal(bounds.ub, bounds2.ub)
186
+ assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
187
+
188
+ def test_array(self):
189
+ # gh13501
190
+ b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
191
+ assert isinstance(b.lb, np.ndarray)
192
+ assert isinstance(b.ub, np.ndarray)
193
+
194
+ def test_defaults(self):
195
+ b1 = Bounds()
196
+ b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
197
+ assert b1.lb == b2.lb
198
+ assert b1.ub == b2.ub
199
+
200
+ def test_input_validation(self):
201
+ message = "Lower and upper bounds must be dense arrays."
202
+ with pytest.raises(ValueError, match=message):
203
+ Bounds(sps.coo_array([1, 2]), [1, 2])
204
+ with pytest.raises(ValueError, match=message):
205
+ Bounds([1, 2], sps.coo_array([1, 2]))
206
+
207
+ message = "`keep_feasible` must be a dense array."
208
+ with pytest.raises(ValueError, match=message):
209
+ Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True]))
210
+
211
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
212
+ with pytest.raises(ValueError, match=message):
213
+ Bounds([1, 2], [1, 2, 3])
214
+
215
+ def test_residual(self):
216
+ bounds = Bounds(-2, 4)
217
+ x0 = [-1, 2]
218
+ np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
219
+
220
+
221
+ class TestLinearConstraint:
222
+ def test_defaults(self):
223
+ A = np.eye(4)
224
+ lc = LinearConstraint(A)
225
+ lc2 = LinearConstraint(A, -np.inf, np.inf)
226
+ assert_array_equal(lc.lb, lc2.lb)
227
+ assert_array_equal(lc.ub, lc2.ub)
228
+
229
+ def test_input_validation(self):
230
+ A = np.eye(4)
231
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
232
+ with pytest.raises(ValueError, match=message):
233
+ LinearConstraint(A, [1, 2], [1, 2, 3])
234
+
235
+ message = "Constraint limits must be dense arrays"
236
+ with pytest.raises(ValueError, match=message):
237
+ LinearConstraint(A, sps.coo_array([1, 2]), [2, 3])
238
+ with pytest.raises(ValueError, match=message):
239
+ LinearConstraint(A, [1, 2], sps.coo_array([2, 3]))
240
+
241
+ message = "`keep_feasible` must be a dense array"
242
+ with pytest.raises(ValueError, match=message):
243
+ keep_feasible = sps.coo_array([True, True])
244
+ LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible)
245
+
246
+ A = np.empty((4, 3, 5))
247
+ message = "`A` must have exactly two dimensions."
248
+ with pytest.raises(ValueError, match=message):
249
+ LinearConstraint(A)
250
+
251
+ def test_residual(self):
252
+ A = np.eye(2)
253
+ lc = LinearConstraint(A, -2, 4)
254
+ x0 = [-1, 2]
255
+ np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``,
3
+ and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a
4
+ 3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st,
5
+ 2nd, and 3rd order terms in ``args``.
6
+
7
+ .. math::
8
+
9
+ f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0
10
+
11
+ The 3rd order polynomial function is written in Cython and called in a Python
12
+ wrapper named after the zero function. See the private ``_zeros`` Cython module
13
+ in `scipy.optimize.cython_optimze` for more information.
14
+ """
15
+
16
+ import numpy.testing as npt
17
+ from scipy.optimize.cython_optimize import _zeros
18
+
19
+ # CONSTANTS
20
+ # Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9].
21
+ # The ARGS have 3 elements just to show how this could be done for any cubic
22
+ # polynomial.
23
+ A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term
24
+ ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms
25
+ XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions
26
+ # absolute and relative tolerances and max iterations for zeros functions
27
+ XTOL, RTOL, MITR = 0.001, 0.001, 10
28
+ EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0]
29
+ # = [1.2599210498948732,
30
+ # 1.2805791649874942,
31
+ # 1.300591446851387,
32
+ # 1.3200061217959123,
33
+ # 1.338865900164339,
34
+ # 1.3572088082974532,
35
+ # 1.375068867074141,
36
+ # 1.3924766500838337,
37
+ # 1.4094597464129783,
38
+ # 1.4260431471424087]
39
+
40
+
41
+ # test bisect
42
+ def test_bisect():
43
+ npt.assert_allclose(
44
+ EXPECTED,
45
+ list(
46
+ _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
47
+ ),
48
+ rtol=RTOL, atol=XTOL
49
+ )
50
+
51
+
52
+ # test ridder
53
+ def test_ridder():
54
+ npt.assert_allclose(
55
+ EXPECTED,
56
+ list(
57
+ _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
58
+ ),
59
+ rtol=RTOL, atol=XTOL
60
+ )
61
+
62
+
63
+ # test brenth
64
+ def test_brenth():
65
+ npt.assert_allclose(
66
+ EXPECTED,
67
+ list(
68
+ _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
69
+ ),
70
+ rtol=RTOL, atol=XTOL
71
+ )
72
+
73
+
74
+ # test brentq
75
+ def test_brentq():
76
+ npt.assert_allclose(
77
+ EXPECTED,
78
+ list(
79
+ _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
80
+ ),
81
+ rtol=RTOL, atol=XTOL
82
+ )
83
+
84
+
85
+ # test brentq with full output
86
+ def test_brentq_full_output():
87
+ output = _zeros.full_output_example(
88
+ (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR)
89
+ npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
90
+ npt.assert_equal(6, output['iterations'])
91
+ npt.assert_equal(7, output['funcalls'])
92
+ npt.assert_equal(0, output['error_num'])