applied-ai-018 commited on
Commit
b4c75b4
·
verified ·
1 Parent(s): d9d012b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/exp_avg.pt +3 -0
  4. venv/lib/python3.10/site-packages/scipy/optimize/README +76 -0
  5. venv/lib/python3.10/site-packages/scipy/optimize/__init__.py +451 -0
  6. venv/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so +0 -0
  7. venv/lib/python3.10/site-packages/scipy/optimize/_bracket.py +663 -0
  8. venv/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py +524 -0
  9. venv/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so +0 -0
  10. venv/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py +316 -0
  11. venv/lib/python3.10/site-packages/scipy/optimize/_constraints.py +590 -0
  12. venv/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py +728 -0
  13. venv/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py +646 -0
  14. venv/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py +1897 -0
  15. venv/lib/python3.10/site-packages/scipy/optimize/_differentiate.py +669 -0
  16. venv/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so +0 -0
  17. venv/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py +715 -0
  18. venv/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so +0 -0
  19. venv/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py +430 -0
  20. venv/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py +0 -0
  21. venv/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so +0 -0
  22. venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd +22 -0
  23. venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd +110 -0
  24. venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd +9 -0
  25. venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd +12 -0
  26. venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd +95 -0
  27. venv/lib/python3.10/site-packages/scipy/optimize/_isotonic.py +158 -0
  28. venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so +0 -0
  29. venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py +543 -0
  30. venv/lib/python3.10/site-packages/scipy/optimize/_linesearch.py +897 -0
  31. venv/lib/python3.10/site-packages/scipy/optimize/_linprog.py +714 -0
  32. venv/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py +440 -0
  33. venv/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py +572 -0
  34. venv/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py +1522 -0
  35. venv/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/scipy/optimize/_milp.py +392 -0
  37. venv/lib/python3.10/site-packages/scipy/optimize/_minimize.py +1094 -0
  38. venv/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so +0 -0
  39. venv/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so +0 -0
  40. venv/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py +1157 -0
  41. venv/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so +0 -0
  42. venv/lib/python3.10/site-packages/scipy/optimize/_nnls.py +164 -0
  43. venv/lib/python3.10/site-packages/scipy/optimize/_nonlin.py +1584 -0
  44. venv/lib/python3.10/site-packages/scipy/optimize/_numdiff.py +775 -0
  45. venv/lib/python3.10/site-packages/scipy/optimize/_optimize.py +0 -0
  46. venv/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  47. venv/lib/python3.10/site-packages/scipy/optimize/_qap.py +731 -0
  48. venv/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py +522 -0
  49. venv/lib/python3.10/site-packages/scipy/optimize/_root.py +711 -0
  50. venv/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py +525 -0
ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ce52a40f210a9d1be7feca261731331d2ac454dae28e82ab371c9aa3f7f2a5
3
+ size 33555627
ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18b5ad9ed2b97e354ae7411416261ffc0b4e4701b81b866acdc86969a949c5f8
3
+ size 33555533
ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bad19900d45260a95a9e03b6b500270b3a3cf37dd12fa74340e96bc76b76f51
3
+ size 50332828
venv/lib/python3.10/site-packages/scipy/optimize/README ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ From the website for the L-BFGS-B code (from at
2
+ http://www.ece.northwestern.edu/~nocedal/lbfgsb.html):
3
+
4
+ """
5
+ L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained
6
+ optimization, i.e. for problems where the only constraints are of the
7
+ form l<= x <= u.
8
+ """
9
+
10
+ This is a Python wrapper (using F2PY) written by David M. Cooke
11
+ <[email protected]> and released as version 0.9 on April 9, 2004.
12
+ The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version
13
+ in March 2012.
14
+
15
+ License of L-BFGS-B (Fortran code)
16
+ ==================================
17
+
18
+ The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was
19
+ written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <[email protected]>. It
20
+ carries the following condition for use:
21
+
22
+ """
23
+ This software is freely available, but we expect that all publications
24
+ describing work using this software, or all commercial products using it,
25
+ quote at least one of the references given below. This software is released
26
+ under the BSD License.
27
+
28
+ References
29
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
30
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
31
+ Statistical Computing, 16, 5, pp. 1190-1208.
32
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
33
+ FORTRAN routines for large scale bound constrained optimization (1997),
34
+ ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
35
+ * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
36
+ FORTRAN routines for large scale bound constrained optimization (2011),
37
+ ACM Transactions on Mathematical Software, 38, 1.
38
+ """
39
+
40
+ The Python wrapper
41
+ ==================
42
+
43
+ This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate
44
+ the wrapper around the Fortran code.
45
+
46
+ The Python code and wrapper are copyrighted 2004 by David M. Cooke
47
48
+
49
+ Example usage
50
+ =============
51
+
52
+ An example of the usage is given at the bottom of the lbfgsb.py file.
53
+ Run it with 'python lbfgsb.py'.
54
+
55
+ License for the Python wrapper
56
+ ==============================
57
+
58
+ Copyright (c) 2004 David M. Cooke <[email protected]>
59
+
60
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
61
+ this software and associated documentation files (the "Software"), to deal in
62
+ the Software without restriction, including without limitation the rights to
63
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
64
+ of the Software, and to permit persons to whom the Software is furnished to do
65
+ so, subject to the following conditions:
66
+
67
+ The above copyright notice and this permission notice shall be included in all
68
+ copies or substantial portions of the Software.
69
+
70
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
73
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
74
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
75
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
76
+ SOFTWARE.
venv/lib/python3.10/site-packages/scipy/optimize/__init__.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =====================================================
3
+ Optimization and root finding (:mod:`scipy.optimize`)
4
+ =====================================================
5
+
6
+ .. currentmodule:: scipy.optimize
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ optimize.cython_optimize
12
+
13
+ SciPy ``optimize`` provides functions for minimizing (or maximizing)
14
+ objective functions, possibly subject to constraints. It includes
15
+ solvers for nonlinear problems (with support for both local and global
16
+ optimization algorithms), linear programming, constrained
17
+ and nonlinear least-squares, root finding, and curve fitting.
18
+
19
+ Common functions and objects, shared across different solvers, are:
20
+
21
+ .. autosummary::
22
+ :toctree: generated/
23
+
24
+ show_options - Show specific options optimization solvers.
25
+ OptimizeResult - The optimization result returned by some optimizers.
26
+ OptimizeWarning - The optimization encountered problems.
27
+
28
+
29
+ Optimization
30
+ ============
31
+
32
+ Scalar functions optimization
33
+ -----------------------------
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ minimize_scalar - Interface for minimizers of univariate functions
39
+
40
+ The `minimize_scalar` function supports the following methods:
41
+
42
+ .. toctree::
43
+
44
+ optimize.minimize_scalar-brent
45
+ optimize.minimize_scalar-bounded
46
+ optimize.minimize_scalar-golden
47
+
48
+ Local (multivariate) optimization
49
+ ---------------------------------
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ minimize - Interface for minimizers of multivariate functions.
55
+
56
+ The `minimize` function supports the following methods:
57
+
58
+ .. toctree::
59
+
60
+ optimize.minimize-neldermead
61
+ optimize.minimize-powell
62
+ optimize.minimize-cg
63
+ optimize.minimize-bfgs
64
+ optimize.minimize-newtoncg
65
+ optimize.minimize-lbfgsb
66
+ optimize.minimize-tnc
67
+ optimize.minimize-cobyla
68
+ optimize.minimize-slsqp
69
+ optimize.minimize-trustconstr
70
+ optimize.minimize-dogleg
71
+ optimize.minimize-trustncg
72
+ optimize.minimize-trustkrylov
73
+ optimize.minimize-trustexact
74
+
75
+ Constraints are passed to `minimize` function as a single object or
76
+ as a list of objects from the following classes:
77
+
78
+ .. autosummary::
79
+ :toctree: generated/
80
+
81
+ NonlinearConstraint - Class defining general nonlinear constraints.
82
+ LinearConstraint - Class defining general linear constraints.
83
+
84
+ Simple bound constraints are handled separately and there is a special class
85
+ for them:
86
+
87
+ .. autosummary::
88
+ :toctree: generated/
89
+
90
+ Bounds - Bound constraints.
91
+
92
+ Quasi-Newton strategies implementing `HessianUpdateStrategy`
93
+ interface can be used to approximate the Hessian in `minimize`
94
+ function (available only for the 'trust-constr' method). Available
95
+ quasi-Newton methods implementing this interface are:
96
+
97
+ .. autosummary::
98
+ :toctree: generated/
99
+
100
+ BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
101
+ SR1 - Symmetric-rank-1 Hessian update strategy.
102
+
103
+ .. _global_optimization:
104
+
105
+ Global optimization
106
+ -------------------
107
+
108
+ .. autosummary::
109
+ :toctree: generated/
110
+
111
+ basinhopping - Basinhopping stochastic optimizer.
112
+ brute - Brute force searching optimizer.
113
+ differential_evolution - Stochastic optimizer using differential evolution.
114
+
115
+ shgo - Simplicial homology global optimizer.
116
+ dual_annealing - Dual annealing stochastic optimizer.
117
+ direct - DIRECT (Dividing Rectangles) optimizer.
118
+
119
+ Least-squares and curve fitting
120
+ ===============================
121
+
122
+ Nonlinear least-squares
123
+ -----------------------
124
+
125
+ .. autosummary::
126
+ :toctree: generated/
127
+
128
+ least_squares - Solve a nonlinear least-squares problem with bounds on the variables.
129
+
130
+ Linear least-squares
131
+ --------------------
132
+
133
+ .. autosummary::
134
+ :toctree: generated/
135
+
136
+ nnls - Linear least-squares problem with non-negativity constraint.
137
+ lsq_linear - Linear least-squares problem with bound constraints.
138
+ isotonic_regression - Least squares problem of isotonic regression via PAVA.
139
+
140
+ Curve fitting
141
+ -------------
142
+
143
+ .. autosummary::
144
+ :toctree: generated/
145
+
146
+ curve_fit -- Fit curve to a set of points.
147
+
148
+ Root finding
149
+ ============
150
+
151
+ Scalar functions
152
+ ----------------
153
+ .. autosummary::
154
+ :toctree: generated/
155
+
156
+ root_scalar - Unified interface for nonlinear solvers of scalar functions.
157
+ brentq - quadratic interpolation Brent method.
158
+ brenth - Brent method, modified by Harris with hyperbolic extrapolation.
159
+ ridder - Ridder's method.
160
+ bisect - Bisection method.
161
+ newton - Newton's method (also Secant and Halley's methods).
162
+ toms748 - Alefeld, Potra & Shi Algorithm 748.
163
+ RootResults - The root finding result returned by some root finders.
164
+
165
+ The `root_scalar` function supports the following methods:
166
+
167
+ .. toctree::
168
+
169
+ optimize.root_scalar-brentq
170
+ optimize.root_scalar-brenth
171
+ optimize.root_scalar-bisect
172
+ optimize.root_scalar-ridder
173
+ optimize.root_scalar-newton
174
+ optimize.root_scalar-toms748
175
+ optimize.root_scalar-secant
176
+ optimize.root_scalar-halley
177
+
178
+
179
+
180
+ The table below lists situations and appropriate methods, along with
181
+ *asymptotic* convergence rates per iteration (and per function evaluation)
182
+ for successful convergence to a simple root(*).
183
+ Bisection is the slowest of them all, adding one bit of accuracy for each
184
+ function evaluation, but is guaranteed to converge.
185
+ The other bracketing methods all (eventually) increase the number of accurate
186
+ bits by about 50% for every function evaluation.
187
+ The derivative-based methods, all built on `newton`, can converge quite quickly
188
+ if the initial value is close to the root. They can also be applied to
189
+ functions defined on (a subset of) the complex plane.
190
+
191
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
192
+ | Domain of f | Bracket? | Derivatives? | Solvers | Convergence |
193
+ + + +----------+-----------+ +-------------+----------------+
194
+ | | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) |
195
+ +=============+==========+==========+===========+=============+=============+================+
196
+ | `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" |
197
+ | | | | | - brentq | - Yes | - >=1, <= 1.62 |
198
+ | | | | | - brenth | - Yes | - >=1, <= 1.62 |
199
+ | | | | | - ridder | - Yes | - 2.0 (1.41) |
200
+ | | | | | - toms748 | - Yes | - 2.7 (1.65) |
201
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
202
+ | `R` or `C` | No | No | No | secant | No | 1.62 (1.62) |
203
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
204
+ | `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) |
205
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
206
+ | `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) |
207
+ +-------------+----------+----------+-----------+-------------+-------------+----------------+
208
+
209
+ .. seealso::
210
+
211
+ `scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions
212
+
213
+ Fixed point finding:
214
+
215
+ .. autosummary::
216
+ :toctree: generated/
217
+
218
+ fixed_point - Single-variable fixed-point solver.
219
+
220
+ Multidimensional
221
+ ----------------
222
+
223
+ .. autosummary::
224
+ :toctree: generated/
225
+
226
+ root - Unified interface for nonlinear solvers of multivariate functions.
227
+
228
+ The `root` function supports the following methods:
229
+
230
+ .. toctree::
231
+
232
+ optimize.root-hybr
233
+ optimize.root-lm
234
+ optimize.root-broyden1
235
+ optimize.root-broyden2
236
+ optimize.root-anderson
237
+ optimize.root-linearmixing
238
+ optimize.root-diagbroyden
239
+ optimize.root-excitingmixing
240
+ optimize.root-krylov
241
+ optimize.root-dfsane
242
+
243
+ Linear programming / MILP
244
+ =========================
245
+
246
+ .. autosummary::
247
+ :toctree: generated/
248
+
249
+ milp -- Mixed integer linear programming.
250
+ linprog -- Unified interface for minimizers of linear programming problems.
251
+
252
+ The `linprog` function supports the following methods:
253
+
254
+ .. toctree::
255
+
256
+ optimize.linprog-simplex
257
+ optimize.linprog-interior-point
258
+ optimize.linprog-revised_simplex
259
+ optimize.linprog-highs-ipm
260
+ optimize.linprog-highs-ds
261
+ optimize.linprog-highs
262
+
263
+ The simplex, interior-point, and revised simplex methods support callback
264
+ functions, such as:
265
+
266
+ .. autosummary::
267
+ :toctree: generated/
268
+
269
+ linprog_verbose_callback -- Sample callback function for linprog (simplex).
270
+
271
+ Assignment problems
272
+ ===================
273
+
274
+ .. autosummary::
275
+ :toctree: generated/
276
+
277
+ linear_sum_assignment -- Solves the linear-sum assignment problem.
278
+ quadratic_assignment -- Solves the quadratic assignment problem.
279
+
280
+ The `quadratic_assignment` function supports the following methods:
281
+
282
+ .. toctree::
283
+
284
+ optimize.qap-faq
285
+ optimize.qap-2opt
286
+
287
+ Utilities
288
+ =========
289
+
290
+ Finite-difference approximation
291
+ -------------------------------
292
+
293
+ .. autosummary::
294
+ :toctree: generated/
295
+
296
+ approx_fprime - Approximate the gradient of a scalar function.
297
+ check_grad - Check the supplied derivative using finite differences.
298
+
299
+
300
+ Line search
301
+ -----------
302
+
303
+ .. autosummary::
304
+ :toctree: generated/
305
+
306
+ bracket - Bracket a minimum, given two starting points.
307
+ line_search - Return a step that satisfies the strong Wolfe conditions.
308
+
309
+ Hessian approximation
310
+ ---------------------
311
+
312
+ .. autosummary::
313
+ :toctree: generated/
314
+
315
+ LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.
316
+ HessianUpdateStrategy - Interface for implementing Hessian update strategies
317
+
318
+ Benchmark problems
319
+ ------------------
320
+
321
+ .. autosummary::
322
+ :toctree: generated/
323
+
324
+ rosen - The Rosenbrock function.
325
+ rosen_der - The derivative of the Rosenbrock function.
326
+ rosen_hess - The Hessian matrix of the Rosenbrock function.
327
+ rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
328
+
329
+ Legacy functions
330
+ ================
331
+
332
+ The functions below are not recommended for use in new scripts;
333
+ all of these methods are accessible via a newer, more consistent
334
+ interfaces, provided by the interfaces above.
335
+
336
+ Optimization
337
+ ------------
338
+
339
+ General-purpose multivariate methods:
340
+
341
+ .. autosummary::
342
+ :toctree: generated/
343
+
344
+ fmin - Nelder-Mead Simplex algorithm.
345
+ fmin_powell - Powell's (modified) conjugate direction method.
346
+ fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.
347
+ fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).
348
+ fmin_ncg - Line-search Newton Conjugate Gradient.
349
+
350
+ Constrained multivariate methods:
351
+
352
+ .. autosummary::
353
+ :toctree: generated/
354
+
355
+ fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.
356
+ fmin_tnc - Truncated Newton code.
357
+ fmin_cobyla - Constrained optimization by linear approximation.
358
+ fmin_slsqp - Minimization using sequential least-squares programming.
359
+
360
+ Univariate (scalar) minimization methods:
361
+
362
+ .. autosummary::
363
+ :toctree: generated/
364
+
365
+ fminbound - Bounded minimization of a scalar function.
366
+ brent - 1-D function minimization using Brent method.
367
+ golden - 1-D function minimization using Golden Section method.
368
+
369
+ Least-squares
370
+ -------------
371
+
372
+ .. autosummary::
373
+ :toctree: generated/
374
+
375
+ leastsq - Minimize the sum of squares of M equations in N unknowns.
376
+
377
+ Root finding
378
+ ------------
379
+
380
+ General nonlinear solvers:
381
+
382
+ .. autosummary::
383
+ :toctree: generated/
384
+
385
+ fsolve - Non-linear multivariable equation solver.
386
+ broyden1 - Broyden's first method.
387
+ broyden2 - Broyden's second method.
388
+ NoConvergence - Exception raised when nonlinear solver does not converge.
389
+
390
+ Large-scale nonlinear solvers:
391
+
392
+ .. autosummary::
393
+ :toctree: generated/
394
+
395
+ newton_krylov
396
+ anderson
397
+
398
+ BroydenFirst
399
+ InverseJacobian
400
+ KrylovJacobian
401
+
402
+ Simple iteration solvers:
403
+
404
+ .. autosummary::
405
+ :toctree: generated/
406
+
407
+ excitingmixing
408
+ linearmixing
409
+ diagbroyden
410
+
411
+ """ # noqa: E501
412
+
413
+ from ._optimize import *
414
+ from ._minimize import *
415
+ from ._root import *
416
+ from ._root_scalar import *
417
+ from ._minpack_py import *
418
+ from ._zeros_py import *
419
+ from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct
420
+ from ._tnc import fmin_tnc
421
+ from ._cobyla_py import fmin_cobyla
422
+ from ._nonlin import *
423
+ from ._slsqp_py import fmin_slsqp
424
+ from ._nnls import nnls
425
+ from ._basinhopping import basinhopping
426
+ from ._linprog import linprog, linprog_verbose_callback
427
+ from ._lsap import linear_sum_assignment
428
+ from ._differentialevolution import differential_evolution
429
+ from ._lsq import least_squares, lsq_linear
430
+ from ._isotonic import isotonic_regression
431
+ from ._constraints import (NonlinearConstraint,
432
+ LinearConstraint,
433
+ Bounds)
434
+ from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1
435
+ from ._shgo import shgo
436
+ from ._dual_annealing import dual_annealing
437
+ from ._qap import quadratic_assignment
438
+ from ._direct_py import direct
439
+ from ._milp import milp
440
+
441
+ # Deprecated namespaces, to be removed in v2.0.0
442
+ from . import (
443
+ cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize,
444
+ slsqp, tnc, zeros
445
+ )
446
+
447
+ __all__ = [s for s in dir() if not s.startswith('_')]
448
+
449
+ from scipy._lib._testutils import PytestTester
450
+ test = PytestTester(__name__)
451
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (364 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_bracket.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy._lib._elementwise_iterative_method as eim
3
+ from scipy._lib._util import _RichResult
4
+
5
+ _ELIMITS = -1 # used in _bracket_root
6
+ _ESTOPONESIDE = 2 # used in _bracket_root
7
+
8
+ def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter):
9
+
10
+ if not callable(func):
11
+ raise ValueError('`func` must be callable.')
12
+
13
+ if not np.iterable(args):
14
+ args = (args,)
15
+
16
+ xl0 = np.asarray(xl0)[()]
17
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
18
+ raise ValueError('`xl0` must be numeric and real.')
19
+
20
+ xr0 = xl0 + 1 if xr0 is None else xr0
21
+ xmin = -np.inf if xmin is None else xmin
22
+ xmax = np.inf if xmax is None else xmax
23
+ factor = 2. if factor is None else factor
24
+ xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor)
25
+
26
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
27
+ raise ValueError('`xr0` must be numeric and real.')
28
+
29
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
30
+ raise ValueError('`xmin` must be numeric and real.')
31
+
32
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
33
+ raise ValueError('`xmax` must be numeric and real.')
34
+
35
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
36
+ raise ValueError('`factor` must be numeric and real.')
37
+ if not np.all(factor > 1):
38
+ raise ValueError('All elements of `factor` must be greater than 1.')
39
+
40
+ maxiter = np.asarray(maxiter)
41
+ message = '`maxiter` must be a non-negative integer.'
42
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
43
+ or np.iscomplex(maxiter)):
44
+ raise ValueError(message)
45
+ maxiter_int = int(maxiter[()])
46
+ if not maxiter == maxiter_int or maxiter < 0:
47
+ raise ValueError(message)
48
+
49
+ if not np.all((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)):
50
+ raise ValueError('`xmin <= xl0 < xr0 <= xmax` must be True (elementwise).')
51
+
52
+ return func, xl0, xr0, xmin, xmax, factor, args, maxiter
53
+
54
+
55
+ def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None,
56
+ args=(), maxiter=1000):
57
+ """Bracket the root of a monotonic scalar function of one variable
58
+
59
+ This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and
60
+ the elements of `args` are broadcastable arrays.
61
+
62
+ Parameters
63
+ ----------
64
+ func : callable
65
+ The function for which the root is to be bracketed.
66
+ The signature must be::
67
+
68
+ func(x: ndarray, *args) -> ndarray
69
+
70
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
71
+ which may contain an arbitrary number of arrays that are broadcastable
72
+ with `x`. ``func`` must be an elementwise function: each element
73
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
74
+ xl0, xr0: float array_like
75
+ Starting guess of bracket, which need not contain a root. If `xr0` is
76
+ not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another.
77
+ xmin, xmax : float array_like, optional
78
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
79
+ be broadcastable with `xl0` and `xr0`.
80
+ factor : float array_like, default: 2
81
+ The factor used to grow the bracket. See notes for details.
82
+ args : tuple, optional
83
+ Additional positional arguments to be passed to `func`. Must be arrays
84
+ broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be
85
+ bracketed requires arguments that are not broadcastable with these
86
+ arrays, wrap that callable with `func` such that `func` accepts
87
+ only `x` and broadcastable arrays.
88
+ maxiter : int, optional
89
+ The maximum number of iterations of the algorithm to perform.
90
+
91
+ Returns
92
+ -------
93
+ res : _RichResult
94
+ An instance of `scipy._lib._util._RichResult` with the following
95
+ attributes. The descriptions are written as though the values will be
96
+ scalars; however, if `func` returns an array, the outputs will be
97
+ arrays of the same shape.
98
+
99
+ xl, xr : float
100
+ The lower and upper ends of the bracket, if the algorithm
101
+ terminated successfully.
102
+ fl, fr : float
103
+ The function value at the lower and upper ends of the bracket.
104
+ nfev : int
105
+ The number of function evaluations required to find the bracket.
106
+ This is distinct from the number of times `func` is *called*
107
+ because the function may evaluated at multiple points in a single
108
+ call.
109
+ nit : int
110
+ The number of iterations of the algorithm that were performed.
111
+ status : int
112
+ An integer representing the exit status of the algorithm.
113
+
114
+ - ``0`` : The algorithm produced a valid bracket.
115
+ - ``-1`` : The bracket expanded to the allowable limits without finding a bracket.
116
+ - ``-2`` : The maximum number of iterations was reached.
117
+ - ``-3`` : A non-finite value was encountered.
118
+ - ``-4`` : Iteration was terminated by `callback`.
119
+ - ``1`` : The algorithm is proceeding normally (in `callback` only).
120
+ - ``2`` : A bracket was found in the opposite search direction (in `callback` only).
121
+
122
+ success : bool
123
+ ``True`` when the algorithm terminated successfully (status ``0``).
124
+
125
+ Notes
126
+ -----
127
+ This function generalizes an algorithm found in pieces throughout
128
+ `scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)`
129
+ until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows.
130
+
131
+ - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively
132
+ increased by `factor`.
133
+ - If `xmin` is provided, the distance between `xmin` and `l` is iteratively
134
+ decreased by `factor`. Note that this also *increases* the bracket size.
135
+
136
+ Growth of the bracket to the right is analogous.
137
+
138
+ Growth of the bracket in one direction stops when the endpoint is no longer
139
+ finite, the function value at the endpoint is no longer finite, or the
140
+ endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates
141
+ when the bracket stops growing in both directions, the bracket surrounds
142
+ the root, or a root is found (accidentally).
143
+
144
+ If two brackets are found - that is, a bracket is found on both sides in
145
+ the same iteration, the smaller of the two is returned.
146
+ If roots of the function are found, both `l` and `r` are set to the
147
+ leftmost root.
148
+
149
+ """ # noqa: E501
150
+ # Todo:
151
+ # - find bracket with sign change in specified direction
152
+ # - Add tolerance
153
+ # - allow factor < 1?
154
+
155
+ callback = None # works; I just don't want to test it
156
+ temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter)
157
+ func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
158
+
159
+ xs = (xl0, xr0)
160
+ temp = eim._initialize(func, xs, args)
161
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
162
+
163
+ # The approach is to treat the left and right searches as though they were
164
+ # (almost) totally independent one-sided bracket searches. (The interaction
165
+ # is considered when checking for termination and preparing the result
166
+ # object.)
167
+ # `x` is the "moving" end of the bracket
168
+ x = np.concatenate(xs)
169
+ f = np.concatenate(fs)
170
+ n = len(x) // 2
171
+
172
+ # `x_last` is the previous location of the moving end of the bracket. If
173
+ # the signs of `f` and `f_last` are different, `x` and `x_last` form a
174
+ # bracket.
175
+ x_last = np.concatenate((x[n:], x[:n]))
176
+ f_last = np.concatenate((f[n:], f[:n]))
177
+ # `x0` is the "fixed" end of the bracket.
178
+ x0 = x_last
179
+ # We don't need to retain the corresponding function value, since the
180
+ # fixed end of the bracket is only needed to compute the new value of the
181
+ # moving end; it is never returned.
182
+
183
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
184
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
185
+ limit = np.concatenate((xmin, xmax))
186
+
187
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel()
188
+ factor = np.concatenate((factor, factor))
189
+
190
+ active = np.arange(2*n)
191
+ args = [np.concatenate((arg, arg)) for arg in args]
192
+
193
+ # This is needed due to inner workings of `eim._loop`.
194
+ # We're abusing it a tiny bit.
195
+ shape = shape + (2,)
196
+
197
+ # `d` is for "distance".
198
+ # For searches without a limit, the distance between the fixed end of the
199
+ # bracket `x0` and the moving end `x` will grow by `factor` each iteration.
200
+ # For searches with a limit, the distance between the `limit` and moving
201
+ # end of the bracket `x` will shrink by `factor` each iteration.
202
+ i = np.isinf(limit)
203
+ ni = ~i
204
+ d = np.zeros_like(x)
205
+ d[i] = x[i] - x0[i]
206
+ d[ni] = limit[ni] - x[ni]
207
+
208
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
209
+ nit, nfev = 0, 1 # one function evaluation per side performed above
210
+
211
+ work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor,
212
+ active=active, d=d, x_last=x_last, f_last=f_last,
213
+ nit=nit, nfev=nfev, status=status, args=args,
214
+ xl=None, xr=None, fl=None, fr=None, n=n)
215
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'),
216
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'),
217
+ ('fr', 'fr'), ('x', 'x'), ('f', 'f'),
218
+ ('x_last', 'x_last'), ('f_last', 'f_last')]
219
+
220
+ def pre_func_eval(work):
221
+ # Initialize moving end of bracket
222
+ x = np.zeros_like(work.x)
223
+
224
+ # Unlimited brackets grow by `factor` by increasing distance from fixed
225
+ # end to moving end.
226
+ i = np.isinf(work.limit) # indices of unlimited brackets
227
+ work.d[i] *= work.factor[i]
228
+ x[i] = work.x0[i] + work.d[i]
229
+
230
+ # Limited brackets grow by decreasing the distance from the limit to
231
+ # the moving end.
232
+ ni = ~i # indices of limited brackets
233
+ work.d[ni] /= work.factor[ni]
234
+ x[ni] = work.limit[ni] - work.d[ni]
235
+
236
+ return x
237
+
238
+ def post_func_eval(x, f, work):
239
+ # Keep track of the previous location of the moving end so that we can
240
+ # return a narrower bracket. (The alternative is to remember the
241
+ # original fixed end, but then the bracket would be wider than needed.)
242
+ work.x_last = work.x
243
+ work.f_last = work.f
244
+ work.x = x
245
+ work.f = f
246
+
247
+ def check_termination(work):
248
+ stop = np.zeros_like(work.x, dtype=bool)
249
+
250
+ # Condition 1: a valid bracket (or the root itself) has been found
251
+ sf = np.sign(work.f)
252
+ sf_last = np.sign(work.f_last)
253
+ i = (sf_last == -sf) | (sf_last == 0) | (sf == 0)
254
+ work.status[i] = eim._ECONVERGED
255
+ stop[i] = True
256
+
257
+ # Condition 2: the other side's search found a valid bracket.
258
+ # (If we just found a bracket with the rightward search, we can stop
259
+ # the leftward search, and vice-versa.)
260
+ # To do this, we need to set the status of the other side's search;
261
+ # this is tricky because `work.status` contains only the *active*
262
+ # elements, so we don't immediately know the index of the element we
263
+ # need to set - or even if it's still there. (That search may have
264
+ # terminated already, e.g. by reaching its `limit`.)
265
+ # To facilitate this, `work.active` contains a unit integer index of
266
+ # each search. Index `k` (`k < n)` and `k + n` correspond with a
267
+ # leftward and rightward search, respectively. Elements are removed
268
+ # from `work.active` just as they are removed from `work.status`, so
269
+ # we use `work.active` to help find the right location in
270
+ # `work.status`.
271
+ # Get the integer indices of the elements that can also stop
272
+ also_stop = (work.active[i] + work.n) % (2*work.n)
273
+ # Check whether they are still active.
274
+ # To start, we need to find out where in `work.active` they would
275
+ # appear if they are indeed there.
276
+ j = np.searchsorted(work.active, also_stop)
277
+ # If the location exceeds the length of the `work.active`, they are
278
+ # not there.
279
+ j = j[j < len(work.active)]
280
+ # Check whether they are still there.
281
+ j = j[also_stop == work.active[j]]
282
+ # Now convert these to boolean indices to use with `work.status`.
283
+ i = np.zeros_like(stop)
284
+ i[j] = True # boolean indices of elements that can also stop
285
+ i = i & ~stop
286
+ work.status[i] = _ESTOPONESIDE
287
+ stop[i] = True
288
+
289
+ # Condition 3: moving end of bracket reaches limit
290
+ i = (work.x == work.limit) & ~stop
291
+ work.status[i] = _ELIMITS
292
+ stop[i] = True
293
+
294
+ # Condition 4: non-finite value encountered
295
+ i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop
296
+ work.status[i] = eim._EVALUEERR
297
+ stop[i] = True
298
+
299
+ return stop
300
+
301
+ def post_termination_check(work):
302
+ pass
303
+
304
+ def customize_result(res, shape):
305
+ n = len(res['x']) // 2
306
+
307
+ # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint
308
+ # as `a` and `xr0`, the initial right endpoint, as `b`.
309
+ # Because we treat the two one-sided searches as though they were
310
+ # independent, what we keep track of in `work` and what we want to
311
+ # return in `res` look quite different. Combine the results from the
312
+ # two one-sided searches before reporting the results to the user.
313
+ # - "a" refers to the leftward search (the moving end started at `a`)
314
+ # - "b" refers to the rightward search (the moving end started at `b`)
315
+ # - "l" refers to the left end of the bracket (closer to -oo)
316
+ # - "r" refers to the right end of the bracket (closer to +oo)
317
+ xal = res['x'][:n]
318
+ xar = res['x_last'][:n]
319
+ xbl = res['x_last'][n:]
320
+ xbr = res['x'][n:]
321
+
322
+ fal = res['f'][:n]
323
+ far = res['f_last'][:n]
324
+ fbl = res['f_last'][n:]
325
+ fbr = res['f'][n:]
326
+
327
+ # Initialize the brackets and corresponding function values to return
328
+ # to the user. Brackets may not be valid (e.g. there is no root,
329
+ # there weren't enough iterations, NaN encountered), but we still need
330
+ # to return something. One option would be all NaNs, but what I've
331
+ # chosen here is the left- and right-most points at which the function
332
+ # has been evaluated. This gives the user some information about what
333
+ # interval of the real line has been searched and shows that there is
334
+ # no sign change between the two ends.
335
+ xl = xal.copy()
336
+ fl = fal.copy()
337
+ xr = xbr.copy()
338
+ fr = fbr.copy()
339
+
340
+ # `status` indicates whether the bracket is valid or not. If so,
341
+ # we want to adjust the bracket we return to be the narrowest possible
342
+ # given the points at which we evaluated the function.
343
+ # For example if bracket "a" is valid and smaller than bracket "b" OR
344
+ # if bracket "a" is valid and bracket "b" is not valid, we want to
345
+ # return bracket "a" (and vice versa).
346
+ sa = res['status'][:n]
347
+ sb = res['status'][n:]
348
+
349
+ da = xar - xal
350
+ db = xbr - xbl
351
+
352
+ i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0))
353
+ i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0))
354
+
355
+ xr[i1] = xar[i1]
356
+ fr[i1] = far[i1]
357
+ xl[i2] = xbl[i2]
358
+ fl[i2] = fbl[i2]
359
+
360
+ # Finish assembling the result object
361
+ res['xl'] = xl
362
+ res['xr'] = xr
363
+ res['fl'] = fl
364
+ res['fr'] = fr
365
+
366
+ res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:])
367
+ res['nfev'] = res['nfev'][:n] + res['nfev'][n:]
368
+ # If the status on one side is zero, the status is zero. In any case,
369
+ # report the status from one side only.
370
+ res['status'] = np.choose(sa == 0, (sb, sa))
371
+ res['success'] = (res['status'] == 0)
372
+
373
+ del res['x']
374
+ del res['f']
375
+ del res['x_last']
376
+ del res['f_last']
377
+
378
+ return shape[:-1]
379
+
380
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
381
+ pre_func_eval, post_func_eval, check_termination,
382
+ post_termination_check, customize_result, res_work_pairs)
383
+
384
+
385
+ def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter):
386
+
387
+ if not callable(func):
388
+ raise ValueError('`func` must be callable.')
389
+
390
+ if not np.iterable(args):
391
+ args = (args,)
392
+
393
+ xm0 = np.asarray(xm0)[()]
394
+ if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any():
395
+ raise ValueError('`xm0` must be numeric and real.')
396
+
397
+ xmin = -np.inf if xmin is None else xmin
398
+ xmax = np.inf if xmax is None else xmax
399
+
400
+ xl0_not_supplied = False
401
+ if xl0 is None:
402
+ xl0 = xm0 - 0.5
403
+ xl0_not_supplied = True
404
+
405
+ xr0_not_supplied = False
406
+ if xr0 is None:
407
+ xr0 = xm0 + 0.5
408
+ xr0_not_supplied = True
409
+
410
+ factor = 2.0 if factor is None else factor
411
+ xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays(
412
+ xl0, xm0, xr0, xmin, xmax, factor
413
+ )
414
+
415
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
416
+ raise ValueError('`xl0` must be numeric and real.')
417
+
418
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
419
+ raise ValueError('`xr0` must be numeric and real.')
420
+
421
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
422
+ raise ValueError('`xmin` must be numeric and real.')
423
+
424
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
425
+ raise ValueError('`xmax` must be numeric and real.')
426
+
427
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
428
+ raise ValueError('`factor` must be numeric and real.')
429
+ if not np.all(factor > 1):
430
+ raise ValueError('All elements of `factor` must be greater than 1.')
431
+
432
+ # Default choices for xl or xr might have exceeded xmin or xmax. Adjust
433
+ # to make sure this doesn't happen. We replace with copies because xl, and xr
434
+ # are read-only views produced by broadcast_arrays.
435
+ if xl0_not_supplied:
436
+ xl0 = xl0.copy()
437
+ cond = ~np.isinf(xmin) & (xl0 < xmin)
438
+ xl0[cond] = (
439
+ xm0[cond] - xmin[cond]
440
+ ) / np.array(16, dtype=xl0.dtype)
441
+ if xr0_not_supplied:
442
+ xr0 = xr0.copy()
443
+ cond = ~np.isinf(xmax) & (xmax < xr0)
444
+ xr0[cond] = (
445
+ xmax[cond] - xm0[cond]
446
+ ) / np.array(16, dtype=xr0.dtype)
447
+
448
+ maxiter = np.asarray(maxiter)
449
+ message = '`maxiter` must be a non-negative integer.'
450
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
451
+ or np.iscomplex(maxiter)):
452
+ raise ValueError(message)
453
+ maxiter_int = int(maxiter[()])
454
+ if not maxiter == maxiter_int or maxiter < 0:
455
+ raise ValueError(message)
456
+
457
+ if not np.all((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)):
458
+ raise ValueError(
459
+ '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True (elementwise).'
460
+ )
461
+
462
+ return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter
463
+
464
+
465
+ def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None,
466
+ factor=None, args=(), maxiter=1000):
467
+ """Bracket the minimum of a unimodal scalar function of one variable
468
+
469
+ This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`,
470
+ and the elements of `args` are broadcastable arrays.
471
+
472
+ Parameters
473
+ ----------
474
+ func : callable
475
+ The function for which the minimum is to be bracketed.
476
+ The signature must be::
477
+
478
+ func(x: ndarray, *args) -> ndarray
479
+
480
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
481
+ which may contain an arbitrary number of arrays that are broadcastable
482
+ with ``x``. `func` must be an elementwise function: each element
483
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`.
484
+ xm0: float array_like
485
+ Starting guess for middle point of bracket.
486
+ xl0, xr0: float array_like, optional
487
+ Starting guesses for left and right endpoints of the bracket. Must be
488
+ broadcastable with one another and with `xm0`.
489
+ xmin, xmax : float array_like, optional
490
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
491
+ be broadcastable with `xl0`, `xm0`, and `xr0`.
492
+ factor : float array_like, optional
493
+ Controls expansion of bracket endpoint in downhill direction. Works
494
+ differently in the cases where a limit is set in the downhill direction
495
+ with `xmax` or `xmin`. See Notes.
496
+ args : tuple, optional
497
+ Additional positional arguments to be passed to `func`. Must be arrays
498
+ broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the
499
+ callable to be bracketed requires arguments that are not broadcastable
500
+ with these arrays, wrap that callable with `func` such that `func`
501
+ accepts only ``x`` and broadcastable arrays.
502
+ maxiter : int, optional
503
+ The maximum number of iterations of the algorithm to perform. The number
504
+ of function evaluations is three greater than the number of iterations.
505
+
506
+ Returns
507
+ -------
508
+ res : _RichResult
509
+ An instance of `scipy._lib._util._RichResult` with the following
510
+ attributes. The descriptions are written as though the values will be
511
+ scalars; however, if `func` returns an array, the outputs will be
512
+ arrays of the same shape.
513
+
514
+ xl, xm, xr : float
515
+ The left, middle, and right points of the bracket, if the algorithm
516
+ terminated successfully.
517
+ fl, fm, fr : float
518
+ The function value at the left, middle, and right points of the bracket.
519
+ nfev : int
520
+ The number of function evaluations required to find the bracket.
521
+ nit : int
522
+ The number of iterations of the algorithm that were performed.
523
+ status : int
524
+ An integer representing the exit status of the algorithm.
525
+
526
+ - ``0`` : The algorithm produced a valid bracket.
527
+ - ``-1`` : The bracket expanded to the allowable limits. Assuming
528
+ unimodality, this implies the endpoint at the limit is a
529
+ minimizer.
530
+ - ``-2`` : The maximum number of iterations was reached.
531
+ - ``-3`` : A non-finite value was encountered.
532
+
533
+ success : bool
534
+ ``True`` when the algorithm terminated successfully (status ``0``).
535
+
536
+ Notes
537
+ -----
538
+ Similar to `scipy.optimize.bracket`, this function seeks to find real
539
+ points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``,
540
+ where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`,
541
+ this function can operate in a vectorized manner on array input, so long as
542
+ the input arrays are broadcastable with each other. Also unlike
543
+ `scipy.optimize.bracket`, users may specify minimum and maximum endpoints
544
+ for the desired bracket.
545
+
546
+ Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``,
547
+ the algorithm checks if these points already give a valid bracket. If not,
548
+ a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new
549
+ opposite endpoint, and either `xl` or `xr` becomes the new middle point,
550
+ depending on which direction is downhill. The algorithm repeats from here.
551
+
552
+ The new endpoint `w` is chosen differently depending on whether or not a
553
+ boundary `xmin` or `xmax` has been set in the downhill direction. Without
554
+ loss of generality, suppose the downhill direction is to the right, so that
555
+ ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w`
556
+ is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by
557
+ the user (defaults to 2.0) so that step sizes increase in geometric proportion.
558
+ If there is a boundary, `xmax` in this case, then `w` is chosen to be
559
+ ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at
560
+ `xmax`. This cautious approach ensures that a minimum near but distinct from
561
+ the boundary isn't missed while also detecting whether or not the `xmax` is
562
+ a minimizer when `xmax` is reached after a finite number of steps.
563
+ """ # noqa: E501
564
+ callback = None # works; I just don't want to test it
565
+
566
+ temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter)
567
+ func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
568
+
569
+ xs = (xl0, xm0, xr0)
570
+ func, xs, fs, args, shape, dtype = eim._initialize(func, xs, args)
571
+
572
+ xl0, xm0, xr0 = xs
573
+ fl0, fm0, fr0 = fs
574
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
575
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
576
+ # We will modify factor later on so make a copy. np.broadcast_to returns
577
+ # a read-only view.
578
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel()
579
+
580
+ # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be
581
+ # marching downhill in the direction from xl to xr.
582
+ comp = fl0 < fr0
583
+ xl0[comp], xr0[comp] = xr0[comp], xl0[comp]
584
+ fl0[comp], fr0[comp] = fr0[comp], fl0[comp]
585
+ # We only need the boundary in the direction we're traveling.
586
+ limit = np.where(comp, xmin, xmax)
587
+
588
+ unlimited = np.isinf(limit)
589
+ limited = ~unlimited
590
+ step = np.empty_like(xl0)
591
+
592
+ step[unlimited] = (xr0[unlimited] - xm0[unlimited])
593
+ step[limited] = (limit[limited] - xr0[limited])
594
+
595
+ # Step size is divided by factor for case where there is a limit.
596
+ factor[limited] = 1 / factor[limited]
597
+
598
+ status = np.full_like(xl0, eim._EINPROGRESS, dtype=int)
599
+ nit, nfev = 0, 3
600
+
601
+ work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0,
602
+ step=step, limit=limit, limited=limited, factor=factor, nit=nit,
603
+ nfev=nfev, status=status, args=args)
604
+
605
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'),
606
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'),
607
+ ('fr', 'fr')]
608
+
609
+ def pre_func_eval(work):
610
+ work.step *= work.factor
611
+ x = np.empty_like(work.xr)
612
+ x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited]
613
+ x[work.limited] = work.limit[work.limited] - work.step[work.limited]
614
+ # Since the new bracket endpoint is calculated from an offset with the
615
+ # limit, it may be the case that the new endpoint equals the old endpoint,
616
+ # when the old endpoint is sufficiently close to the limit. We use the
617
+ # limit itself as the new endpoint in these cases.
618
+ x[work.limited] = np.where(
619
+ x[work.limited] == work.xr[work.limited],
620
+ work.limit[work.limited],
621
+ x[work.limited],
622
+ )
623
+ return x
624
+
625
+ def post_func_eval(x, f, work):
626
+ work.xl, work.xm, work.xr = work.xm, work.xr, x
627
+ work.fl, work.fm, work.fr = work.fm, work.fr, f
628
+
629
+ def check_termination(work):
630
+ # Condition 1: A valid bracket has been found.
631
+ stop = (
632
+ (work.fl >= work.fm) & (work.fr > work.fm)
633
+ | (work.fl > work.fm) & (work.fr >= work.fm)
634
+ )
635
+ work.status[stop] = eim._ECONVERGED
636
+
637
+ # Condition 2: Moving end of bracket reaches limit.
638
+ i = (work.xr == work.limit) & ~stop
639
+ work.status[i] = _ELIMITS
640
+ stop[i] = True
641
+
642
+ # Condition 3: non-finite value encountered
643
+ i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop
644
+ work.status[i] = eim._EVALUEERR
645
+ stop[i] = True
646
+
647
+ return stop
648
+
649
+ def post_termination_check(work):
650
+ pass
651
+
652
+ def customize_result(res, shape):
653
+ # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0).
654
+ comp = res['xl'] > res['xr']
655
+ res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp]
656
+ res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp]
657
+ return shape
658
+
659
+ return eim._loop(work, callback, shape,
660
+ maxiter, func, args, dtype,
661
+ pre_func_eval, post_func_eval,
662
+ check_termination, post_termination_check,
663
+ customize_result, res_work_pairs)
venv/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from ._zeros_py import _xtol, _rtol, _iter
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol,
7
+ fatol=None, frtol=0, maxiter=_iter, callback=None):
8
+ """Find the root of an elementwise function using Chandrupatla's algorithm.
9
+
10
+ For each element of the output of `func`, `chandrupatla` seeks the scalar
11
+ root that makes the element 0. This function allows for `a`, `b`, and the
12
+ output of `func` to be of any broadcastable shapes.
13
+
14
+ Parameters
15
+ ----------
16
+ func : callable
17
+ The function whose root is desired. The signature must be::
18
+
19
+ func(x: ndarray, *args) -> ndarray
20
+
21
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
22
+ which may contain an arbitrary number of components of any type(s).
23
+ ``func`` must be an elementwise function: each element ``func(x)[i]``
24
+ must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla`
25
+ seeks an array ``x`` such that ``func(x)`` is an array of zeros.
26
+ a, b : array_like
27
+ The lower and upper bounds of the root of the function. Must be
28
+ broadcastable with one another.
29
+ args : tuple, optional
30
+ Additional positional arguments to be passed to `func`.
31
+ xatol, xrtol, fatol, frtol : float, optional
32
+ Absolute and relative tolerances on the root and function value.
33
+ See Notes for details.
34
+ maxiter : int, optional
35
+ The maximum number of iterations of the algorithm to perform.
36
+ callback : callable, optional
37
+ An optional user-supplied function to be called before the first
38
+ iteration and after each iteration.
39
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
40
+ similar to that returned by `_chandrupatla` (but containing the current
41
+ iterate's values of all variables). If `callback` raises a
42
+ ``StopIteration``, the algorithm will terminate immediately and
43
+ `_chandrupatla` will return a result.
44
+
45
+ Returns
46
+ -------
47
+ res : _RichResult
48
+ An instance of `scipy._lib._util._RichResult` with the following
49
+ attributes. The descriptions are written as though the values will be
50
+ scalars; however, if `func` returns an array, the outputs will be
51
+ arrays of the same shape.
52
+
53
+ x : float
54
+ The root of the function, if the algorithm terminated successfully.
55
+ nfev : int
56
+ The number of times the function was called to find the root.
57
+ nit : int
58
+ The number of iterations of Chandrupatla's algorithm performed.
59
+ status : int
60
+ An integer representing the exit status of the algorithm.
61
+ ``0`` : The algorithm converged to the specified tolerances.
62
+ ``-1`` : The algorithm encountered an invalid bracket.
63
+ ``-2`` : The maximum number of iterations was reached.
64
+ ``-3`` : A non-finite value was encountered.
65
+ ``-4`` : Iteration was terminated by `callback`.
66
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
67
+ success : bool
68
+ ``True`` when the algorithm terminated successfully (status ``0``).
69
+ fun : float
70
+ The value of `func` evaluated at `x`.
71
+ xl, xr : float
72
+ The lower and upper ends of the bracket.
73
+ fl, fr : float
74
+ The function value at the lower and upper ends of the bracket.
75
+
76
+ Notes
77
+ -----
78
+ Implemented based on Chandrupatla's original paper [1]_.
79
+
80
+ If ``xl`` and ``xr`` are the left and right ends of the bracket,
81
+ ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``,
82
+ and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to
83
+ have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or
84
+ ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the
85
+ termination condition described in [1]_ with ``xrtol = 4e-10``,
86
+ ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are
87
+ ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``,
88
+ and ``fatol`` is the smallest normal number of the ``dtype`` returned
89
+ by ``func``.
90
+
91
+ References
92
+ ----------
93
+
94
+ .. [1] Chandrupatla, Tirupathi R.
95
+ "A new hybrid quadratic/bisection algorithm for finding the zero of a
96
+ nonlinear function without using derivatives".
97
+ Advances in Engineering Software, 28(3), 145-149.
98
+ https://doi.org/10.1016/s0965-9978(96)00051-8
99
+
100
+ See Also
101
+ --------
102
+ brentq, brenth, ridder, bisect, newton
103
+
104
+ Examples
105
+ --------
106
+ >>> from scipy import optimize
107
+ >>> def f(x, c):
108
+ ... return x**3 - 2*x - c
109
+ >>> c = 5
110
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
111
+ >>> res.x
112
+ 2.0945514818937463
113
+
114
+ >>> c = [3, 4, 5]
115
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
116
+ >>> res.x
117
+ array([1.8932892 , 2. , 2.09455148])
118
+
119
+ """
120
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
121
+ fatol, frtol, maxiter, callback)
122
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
123
+
124
+ # Initialization
125
+ temp = eim._initialize(func, (a, b), args)
126
+ func, xs, fs, args, shape, dtype = temp
127
+ x1, x2 = xs
128
+ f1, f2 = fs
129
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
130
+ nit, nfev = 0, 2 # two function evaluations performed above
131
+ xatol = _xtol if xatol is None else xatol
132
+ xrtol = _rtol if xrtol is None else xrtol
133
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
134
+ frtol = frtol * np.minimum(np.abs(f1), np.abs(f2))
135
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5,
136
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
137
+ nit=nit, nfev=nfev, status=status)
138
+ res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'),
139
+ ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'),
140
+ ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')]
141
+
142
+ def pre_func_eval(work):
143
+ # [1] Figure 1 (first box)
144
+ x = work.x1 + work.t * (work.x2 - work.x1)
145
+ return x
146
+
147
+ def post_func_eval(x, f, work):
148
+ # [1] Figure 1 (first diamond and boxes)
149
+ # Note: y/n are reversed in figure; compare to BASIC in appendix
150
+ work.x3, work.f3 = work.x2.copy(), work.f2.copy()
151
+ j = np.sign(f) == np.sign(work.f1)
152
+ nj = ~j
153
+ work.x3[j], work.f3[j] = work.x1[j], work.f1[j]
154
+ work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj]
155
+ work.x1, work.f1 = x, f
156
+
157
+ def check_termination(work):
158
+ # [1] Figure 1 (second diamond)
159
+ # Check for all terminal conditions and record statuses.
160
+
161
+ # See [1] Section 4 (first two sentences)
162
+ i = np.abs(work.f1) < np.abs(work.f2)
163
+ work.xmin = np.choose(i, (work.x2, work.x1))
164
+ work.fmin = np.choose(i, (work.f2, work.f1))
165
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
166
+
167
+ # This is the convergence criterion used in bisect. Chandrupatla's
168
+ # criterion is equivalent to this except with a factor of 4 on `xrtol`.
169
+ work.dx = abs(work.x2 - work.x1)
170
+ work.tol = abs(work.xmin) * work.xrtol + work.xatol
171
+ i = work.dx < work.tol
172
+ # Modify in place to incorporate tolerance on function value. Note that
173
+ # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`,
174
+ # where `f1` and `f2` are the function evaluated at the original ends of
175
+ # the bracket.
176
+ i |= np.abs(work.fmin) <= work.fatol + work.frtol
177
+ work.status[i] = eim._ECONVERGED
178
+ stop[i] = True
179
+
180
+ i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop
181
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._ESIGNERR
182
+ stop[i] = True
183
+
184
+ i = ~((np.isfinite(work.x1) & np.isfinite(work.x2)
185
+ & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop)
186
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._EVALUEERR
187
+ stop[i] = True
188
+
189
+ return stop
190
+
191
+ def post_termination_check(work):
192
+ # [1] Figure 1 (third diamond and boxes / Equation 1)
193
+ xi1 = (work.x1 - work.x2) / (work.x3 - work.x2)
194
+ phi1 = (work.f1 - work.f2) / (work.f3 - work.f2)
195
+ alpha = (work.x3 - work.x1) / (work.x2 - work.x1)
196
+ j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1))
197
+
198
+ f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j]
199
+ t = np.full_like(alpha, 0.5)
200
+ t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j)
201
+ - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j))
202
+
203
+ # [1] Figure 1 (last box; see also BASIC in appendix with comment
204
+ # "Adjust T Away from the Interval Boundary")
205
+ tl = 0.5 * work.tol / work.dx
206
+ work.t = np.clip(t, tl, 1 - tl)
207
+
208
+ def customize_result(res, shape):
209
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
210
+ i = res['xl'] < res['xr']
211
+ res['xl'] = np.choose(i, (xr, xl))
212
+ res['xr'] = np.choose(i, (xl, xr))
213
+ res['fl'] = np.choose(i, (fr, fl))
214
+ res['fr'] = np.choose(i, (fl, fr))
215
+ return shape
216
+
217
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
218
+ pre_func_eval, post_func_eval, check_termination,
219
+ post_termination_check, customize_result, res_work_pairs)
220
+
221
+
222
+ def _chandrupatla_iv(func, args, xatol, xrtol,
223
+ fatol, frtol, maxiter, callback):
224
+ # Input validation for `_chandrupatla`
225
+
226
+ if not callable(func):
227
+ raise ValueError('`func` must be callable.')
228
+
229
+ if not np.iterable(args):
230
+ args = (args,)
231
+
232
+ tols = np.asarray([xatol if xatol is not None else 1,
233
+ xrtol if xrtol is not None else 1,
234
+ fatol if fatol is not None else 1,
235
+ frtol if frtol is not None else 1])
236
+ if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0)
237
+ or np.any(np.isnan(tols)) or tols.shape != (4,)):
238
+ raise ValueError('Tolerances must be non-negative scalars.')
239
+
240
+ maxiter_int = int(maxiter)
241
+ if maxiter != maxiter_int or maxiter < 0:
242
+ raise ValueError('`maxiter` must be a non-negative integer.')
243
+
244
+ if callback is not None and not callable(callback):
245
+ raise ValueError('`callback` must be callable.')
246
+
247
+ return func, args, xatol, xrtol, fatol, frtol, maxiter, callback
248
+
249
+
250
+ def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None,
251
+ xrtol=None, fatol=None, frtol=None, maxiter=100,
252
+ callback=None):
253
+ """Find the minimizer of an elementwise function.
254
+
255
+ For each element of the output of `func`, `_chandrupatla_minimize` seeks
256
+ the scalar minimizer that minimizes the element. This function allows for
257
+ `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any
258
+ broadcastable shapes.
259
+
260
+ Parameters
261
+ ----------
262
+ func : callable
263
+ The function whose minimizer is desired. The signature must be::
264
+
265
+ func(x: ndarray, *args) -> ndarray
266
+
267
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
268
+ which may contain an arbitrary number of arrays that are broadcastable
269
+ with `x`. ``func`` must be an elementwise function: each element
270
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
271
+ `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array
272
+ of minima.
273
+ x1, x2, x3 : array_like
274
+ The abscissae of a standard scalar minimization bracket. A bracket is
275
+ valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``.
276
+ Must be broadcastable with one another and `args`.
277
+ args : tuple, optional
278
+ Additional positional arguments to be passed to `func`. Must be arrays
279
+ broadcastable with `x1`, `x2`, and `x3`. If the callable to be
280
+ differentiated requires arguments that are not broadcastable with `x`,
281
+ wrap that callable with `func` such that `func` accepts only `x` and
282
+ broadcastable arrays.
283
+ xatol, xrtol, fatol, frtol : float, optional
284
+ Absolute and relative tolerances on the minimizer and function value.
285
+ See Notes for details.
286
+ maxiter : int, optional
287
+ The maximum number of iterations of the algorithm to perform.
288
+ callback : callable, optional
289
+ An optional user-supplied function to be called before the first
290
+ iteration and after each iteration.
291
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
292
+ similar to that returned by `_chandrupatla_minimize` (but containing
293
+ the current iterate's values of all variables). If `callback` raises a
294
+ ``StopIteration``, the algorithm will terminate immediately and
295
+ `_chandrupatla_minimize` will return a result.
296
+
297
+ Returns
298
+ -------
299
+ res : _RichResult
300
+ An instance of `scipy._lib._util._RichResult` with the following
301
+ attributes. (The descriptions are written as though the values will be
302
+ scalars; however, if `func` returns an array, the outputs will be
303
+ arrays of the same shape.)
304
+
305
+ success : bool
306
+ ``True`` when the algorithm terminated successfully (status ``0``).
307
+ status : int
308
+ An integer representing the exit status of the algorithm.
309
+ ``0`` : The algorithm converged to the specified tolerances.
310
+ ``-1`` : The algorithm encountered an invalid bracket.
311
+ ``-2`` : The maximum number of iterations was reached.
312
+ ``-3`` : A non-finite value was encountered.
313
+ ``-4`` : Iteration was terminated by `callback`.
314
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
315
+ x : float
316
+ The minimizer of the function, if the algorithm terminated
317
+ successfully.
318
+ fun : float
319
+ The value of `func` evaluated at `x`.
320
+ nfev : int
321
+ The number of points at which `func` was evaluated.
322
+ nit : int
323
+ The number of iterations of the algorithm that were performed.
324
+ xl, xm, xr : float
325
+ The final three-point bracket.
326
+ fl, fm, fr : float
327
+ The function value at the bracket points.
328
+
329
+ Notes
330
+ -----
331
+ Implemented based on Chandrupatla's original paper [1]_.
332
+
333
+ If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3``
334
+ are the values of ``func`` at those points, then the algorithm is
335
+ considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol``
336
+ or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of
337
+ these differs from the termination conditions described in [1]_. The
338
+ default values of `xrtol` is the square root of the precision of the
339
+ appropriate dtype, and ``xatol=fatol = frtol`` is the smallest normal
340
+ number of the appropriate dtype.
341
+
342
+ References
343
+ ----------
344
+ .. [1] Chandrupatla, Tirupathi R. (1998).
345
+ "An efficient quadratic fit-sectioning algorithm for minimization
346
+ without derivatives".
347
+ Computer Methods in Applied Mechanics and Engineering, 152 (1-2),
348
+ 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4
349
+
350
+ See Also
351
+ --------
352
+ golden, brent, bounded
353
+
354
+ Examples
355
+ --------
356
+ >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize
357
+ >>> def f(x, args=1):
358
+ ... return (x - args)**2
359
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5)
360
+ >>> res.x
361
+ 1.0
362
+ >>> c = [1, 1.5, 2]
363
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,))
364
+ >>> res.x
365
+ array([1. , 1.5, 2. ])
366
+ """
367
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
368
+ fatol, frtol, maxiter, callback)
369
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
370
+
371
+ # Initialization
372
+ xs = (x1, x2, x3)
373
+ temp = eim._initialize(func, xs, args)
374
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
375
+ x1, x2, x3 = xs
376
+ f1, f2, f3 = fs
377
+ phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio
378
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
379
+ nit, nfev = 0, 3 # three function evaluations performed above
380
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
381
+ frtol = np.finfo(dtype).tiny if frtol is None else frtol
382
+ xatol = np.finfo(dtype).tiny if xatol is None else xatol
383
+ xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol
384
+
385
+ # Ensure that x1 < x2 < x3 initially.
386
+ xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3))
387
+ i = np.argsort(xs, axis=0)
388
+ x1, x2, x3 = np.take_along_axis(xs, i, axis=0)
389
+ f1, f2, f3 = np.take_along_axis(fs, i, axis=0)
390
+ q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7))
391
+
392
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi,
393
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
394
+ nit=nit, nfev=nfev, status=status, q0=q0, args=args)
395
+ res_work_pairs = [('status', 'status'),
396
+ ('x', 'x2'), ('fun', 'f2'),
397
+ ('nit', 'nit'), ('nfev', 'nfev'),
398
+ ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'),
399
+ ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')]
400
+
401
+ def pre_func_eval(work):
402
+ # `_check_termination` is called first -> `x3 - x2 > x2 - x1`
403
+ # But let's calculate a few terms that we'll reuse
404
+ x21 = work.x2 - work.x1
405
+ x32 = work.x3 - work.x2
406
+
407
+ # [1] Section 3. "The quadratic minimum point Q1 is calculated using
408
+ # the relations developed in the previous section." [1] Section 2 (5/6)
409
+ A = x21 * (work.f3 - work.f2)
410
+ B = x32 * (work.f1 - work.f2)
411
+ C = A / (A + B)
412
+ # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2
413
+ q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster
414
+ # this is an array, so multiplying by 0.5 does not change dtype
415
+
416
+ # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is
417
+ # sufficiently away from the inside point x2"
418
+ i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7)
419
+ xi = q1[i]
420
+ # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of
421
+ # x2, the new point is chosen in the larger interval at a distance
422
+ # tol away from x2."
423
+ # See also QBASIC code after "Accept Ql adjust if close to X2".
424
+ j = abs(q1[i] - work.x2[i]) <= work.xtol[i]
425
+ xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j]
426
+
427
+ # "If condition (7) is not satisfied, golden sectioning of the larger
428
+ # interval is carried out to introduce the new point."
429
+ # (For simplicity, we go ahead and calculate it for all points, but we
430
+ # change the elements for which the condition was satisfied.)
431
+ x = work.x2 + (2 - work.phi) * x32
432
+ x[i] = xi
433
+
434
+ # "We define Q0 as the value of Q1 at the previous iteration."
435
+ work.q0 = q1
436
+ return x
437
+
438
+ def post_func_eval(x, f, work):
439
+ # Standard logic for updating a three-point bracket based on a new
440
+ # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...".
441
+ # There is an awful lot of data copying going on here; this would
442
+ # probably benefit from code optimization or implementation in Pythran.
443
+ i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2)
444
+ xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i],
445
+ fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i]
446
+ j = fi > f2i
447
+ x3i[j], f3i[j] = xi[j], fi[j]
448
+ j = ~j
449
+ x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j]
450
+
451
+ ni = ~i
452
+ xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni],
453
+ fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni]
454
+ j = fni > f2ni
455
+ x1ni[j], f1ni[j] = xni[j], fni[j]
456
+ j = ~j
457
+ x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j]
458
+
459
+ work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i
460
+ work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i
461
+ work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni,
462
+ work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni
463
+
464
+ def check_termination(work):
465
+ # Check for all terminal conditions and record statuses.
466
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
467
+
468
+ # Bracket is invalid; stop and don't return minimizer/minimum
469
+ i = ((work.f2 > work.f1) | (work.f2 > work.f3))
470
+ work.x2[i], work.f2[i] = np.nan, np.nan
471
+ stop[i], work.status[i] = True, eim._ESIGNERR
472
+
473
+ # Non-finite values; stop and don't return minimizer/minimum
474
+ finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3)
475
+ i = ~(finite | stop)
476
+ work.x2[i], work.f2[i] = np.nan, np.nan
477
+ stop[i], work.status[i] = True, eim._EVALUEERR
478
+
479
+ # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make
480
+ # the (x2, x3) the larger interval."
481
+ # Note: I had used np.choose; this is much faster. This would be a good
482
+ # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and
483
+ # didn't notice a speed boost, so let's keep it simple.
484
+ i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1)
485
+ temp = work.x1[i]
486
+ work.x1[i] = work.x3[i]
487
+ work.x3[i] = temp
488
+ temp = work.f1[i]
489
+ work.f1[i] = work.f3[i]
490
+ work.f3[i] = temp
491
+
492
+ # [1] Section 3 (bottom of page 212)
493
+ # "We set a tolerance value xtol..."
494
+ work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8)
495
+ # "The convergence based on interval is achieved when..."
496
+ # Note: Equality allowed in case of `xtol=0`
497
+ i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9)
498
+
499
+ # "We define ftol using..."
500
+ ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10)
501
+ # "The convergence based on function values is achieved when..."
502
+ # Note 1: modify in place to incorporate tolerance on function value.
503
+ # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop
504
+ i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11)
505
+ i &= ~stop
506
+ stop[i], work.status[i] = True, eim._ECONVERGED
507
+
508
+ return stop
509
+
510
+ def post_termination_check(work):
511
+ pass
512
+
513
+ def customize_result(res, shape):
514
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
515
+ i = res['xl'] < res['xr']
516
+ res['xl'] = np.choose(i, (xr, xl))
517
+ res['xr'] = np.choose(i, (xl, xr))
518
+ res['fl'] = np.choose(i, (fr, fl))
519
+ res['fr'] = np.choose(i, (fl, fr))
520
+ return shape
521
+
522
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
523
+ pre_func_eval, post_func_eval, check_termination,
524
+ post_termination_check, customize_result, res_work_pairs)
venv/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (101 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface to Constrained Optimization By Linear Approximation
3
+
4
+ Functions
5
+ ---------
6
+ .. autosummary::
7
+ :toctree: generated/
8
+
9
+ fmin_cobyla
10
+
11
+ """
12
+
13
+ import functools
14
+ from threading import RLock
15
+
16
+ import numpy as np
17
+ from scipy.optimize import _cobyla as cobyla
18
+ from ._optimize import (OptimizeResult, _check_unknown_options,
19
+ _prepare_scalar_function)
20
+ try:
21
+ from itertools import izip
22
+ except ImportError:
23
+ izip = zip
24
+
25
+ __all__ = ['fmin_cobyla']
26
+
27
+ # Workaround as _cobyla.minimize is not threadsafe
28
+ # due to an unknown f2py bug and can segfault,
29
+ # see gh-9658.
30
+ _module_lock = RLock()
31
+ def synchronized(func):
32
+ @functools.wraps(func)
33
+ def wrapper(*args, **kwargs):
34
+ with _module_lock:
35
+ return func(*args, **kwargs)
36
+ return wrapper
37
+
38
+ @synchronized
39
+ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
40
+ rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4,
41
+ *, callback=None):
42
+ """
43
+ Minimize a function using the Constrained Optimization By Linear
44
+ Approximation (COBYLA) method. This method wraps a FORTRAN
45
+ implementation of the algorithm.
46
+
47
+ Parameters
48
+ ----------
49
+ func : callable
50
+ Function to minimize. In the form func(x, \\*args).
51
+ x0 : ndarray
52
+ Initial guess.
53
+ cons : sequence
54
+ Constraint functions; must all be ``>=0`` (a single function
55
+ if only 1 constraint). Each function takes the parameters `x`
56
+ as its first argument, and it can return either a single number or
57
+ an array or list of numbers.
58
+ args : tuple, optional
59
+ Extra arguments to pass to function.
60
+ consargs : tuple, optional
61
+ Extra arguments to pass to constraint functions (default of None means
62
+ use same extra arguments as those passed to func).
63
+ Use ``()`` for no extra arguments.
64
+ rhobeg : float, optional
65
+ Reasonable initial changes to the variables.
66
+ rhoend : float, optional
67
+ Final accuracy in the optimization (not precisely guaranteed). This
68
+ is a lower bound on the size of the trust region.
69
+ disp : {0, 1, 2, 3}, optional
70
+ Controls the frequency of output; 0 implies no output.
71
+ maxfun : int, optional
72
+ Maximum number of function evaluations.
73
+ catol : float, optional
74
+ Absolute tolerance for constraint violations.
75
+ callback : callable, optional
76
+ Called after each iteration, as ``callback(x)``, where ``x`` is the
77
+ current parameter vector.
78
+
79
+ Returns
80
+ -------
81
+ x : ndarray
82
+ The argument that minimises `f`.
83
+
84
+ See also
85
+ --------
86
+ minimize: Interface to minimization algorithms for multivariate
87
+ functions. See the 'COBYLA' `method` in particular.
88
+
89
+ Notes
90
+ -----
91
+ This algorithm is based on linear approximations to the objective
92
+ function and each constraint. We briefly describe the algorithm.
93
+
94
+ Suppose the function is being minimized over k variables. At the
95
+ jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
96
+ an approximate solution x_j, and a radius RHO_j.
97
+ (i.e., linear plus a constant) approximations to the objective
98
+ function and constraint functions such that their function values
99
+ agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
100
+ This gives a linear program to solve (where the linear approximations
101
+ of the constraint functions are constrained to be non-negative).
102
+
103
+ However, the linear approximations are likely only good
104
+ approximations near the current simplex, so the linear program is
105
+ given the further requirement that the solution, which
106
+ will become x_(j+1), must be within RHO_j from x_j. RHO_j only
107
+ decreases, never increases. The initial RHO_j is rhobeg and the
108
+ final RHO_j is rhoend. In this way COBYLA's iterations behave
109
+ like a trust region algorithm.
110
+
111
+ Additionally, the linear program may be inconsistent, or the
112
+ approximation may give poor improvement. For details about
113
+ how these issues are resolved, as well as how the points v_i are
114
+ updated, refer to the source code or the references below.
115
+
116
+
117
+ References
118
+ ----------
119
+ Powell M.J.D. (1994), "A direct search optimization method that models
120
+ the objective and constraint functions by linear interpolation.", in
121
+ Advances in Optimization and Numerical Analysis, eds. S. Gomez and
122
+ J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
123
+
124
+ Powell M.J.D. (1998), "Direct search algorithms for optimization
125
+ calculations", Acta Numerica 7, 287-336
126
+
127
+ Powell M.J.D. (2007), "A view of algorithms for optimization without
128
+ derivatives", Cambridge University Technical Report DAMTP 2007/NA03
129
+
130
+
131
+ Examples
132
+ --------
133
+ Minimize the objective function f(x,y) = x*y subject
134
+ to the constraints x**2 + y**2 < 1 and y > 0::
135
+
136
+ >>> def objective(x):
137
+ ... return x[0]*x[1]
138
+ ...
139
+ >>> def constr1(x):
140
+ ... return 1 - (x[0]**2 + x[1]**2)
141
+ ...
142
+ >>> def constr2(x):
143
+ ... return x[1]
144
+ ...
145
+ >>> from scipy.optimize import fmin_cobyla
146
+ >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
147
+ array([-0.70710685, 0.70710671])
148
+
149
+ The exact solution is (-sqrt(2)/2, sqrt(2)/2).
150
+
151
+
152
+
153
+ """
154
+ err = "cons must be a sequence of callable functions or a single"\
155
+ " callable function."
156
+ try:
157
+ len(cons)
158
+ except TypeError as e:
159
+ if callable(cons):
160
+ cons = [cons]
161
+ else:
162
+ raise TypeError(err) from e
163
+ else:
164
+ for thisfunc in cons:
165
+ if not callable(thisfunc):
166
+ raise TypeError(err)
167
+
168
+ if consargs is None:
169
+ consargs = args
170
+
171
+ # build constraints
172
+ con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
173
+
174
+ # options
175
+ opts = {'rhobeg': rhobeg,
176
+ 'tol': rhoend,
177
+ 'disp': disp,
178
+ 'maxiter': maxfun,
179
+ 'catol': catol,
180
+ 'callback': callback}
181
+
182
+ sol = _minimize_cobyla(func, x0, args, constraints=con,
183
+ **opts)
184
+ if disp and not sol['success']:
185
+ print(f"COBYLA failed to find a solution: {sol.message}")
186
+ return sol['x']
187
+
188
+
189
+ @synchronized
190
+ def _minimize_cobyla(fun, x0, args=(), constraints=(),
191
+ rhobeg=1.0, tol=1e-4, maxiter=1000,
192
+ disp=False, catol=2e-4, callback=None, bounds=None,
193
+ **unknown_options):
194
+ """
195
+ Minimize a scalar function of one or more variables using the
196
+ Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
197
+
198
+ Options
199
+ -------
200
+ rhobeg : float
201
+ Reasonable initial changes to the variables.
202
+ tol : float
203
+ Final accuracy in the optimization (not precisely guaranteed).
204
+ This is a lower bound on the size of the trust region.
205
+ disp : bool
206
+ Set to True to print convergence messages. If False,
207
+ `verbosity` is ignored as set to 0.
208
+ maxiter : int
209
+ Maximum number of function evaluations.
210
+ catol : float
211
+ Tolerance (absolute) for constraint violations
212
+
213
+ """
214
+ _check_unknown_options(unknown_options)
215
+ maxfun = maxiter
216
+ rhoend = tol
217
+ iprint = int(bool(disp))
218
+
219
+ # check constraints
220
+ if isinstance(constraints, dict):
221
+ constraints = (constraints, )
222
+
223
+ if bounds:
224
+ i_lb = np.isfinite(bounds.lb)
225
+ if np.any(i_lb):
226
+ def lb_constraint(x, *args, **kwargs):
227
+ return x[i_lb] - bounds.lb[i_lb]
228
+
229
+ constraints.append({'type': 'ineq', 'fun': lb_constraint})
230
+
231
+ i_ub = np.isfinite(bounds.ub)
232
+ if np.any(i_ub):
233
+ def ub_constraint(x):
234
+ return bounds.ub[i_ub] - x[i_ub]
235
+
236
+ constraints.append({'type': 'ineq', 'fun': ub_constraint})
237
+
238
+ for ic, con in enumerate(constraints):
239
+ # check type
240
+ try:
241
+ ctype = con['type'].lower()
242
+ except KeyError as e:
243
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
244
+ except TypeError as e:
245
+ raise TypeError('Constraints must be defined using a '
246
+ 'dictionary.') from e
247
+ except AttributeError as e:
248
+ raise TypeError("Constraint's type must be a string.") from e
249
+ else:
250
+ if ctype != 'ineq':
251
+ raise ValueError("Constraints of type '%s' not handled by "
252
+ "COBYLA." % con['type'])
253
+
254
+ # check function
255
+ if 'fun' not in con:
256
+ raise KeyError('Constraint %d has no function defined.' % ic)
257
+
258
+ # check extra arguments
259
+ if 'args' not in con:
260
+ con['args'] = ()
261
+
262
+ # m is the total number of constraint values
263
+ # it takes into account that some constraints may be vector-valued
264
+ cons_lengths = []
265
+ for c in constraints:
266
+ f = c['fun'](x0, *c['args'])
267
+ try:
268
+ cons_length = len(f)
269
+ except TypeError:
270
+ cons_length = 1
271
+ cons_lengths.append(cons_length)
272
+ m = sum(cons_lengths)
273
+
274
+ # create the ScalarFunction, cobyla doesn't require derivative function
275
+ def _jac(x, *args):
276
+ return None
277
+
278
+ sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac)
279
+
280
+ def calcfc(x, con):
281
+ f = sf.fun(x)
282
+ i = 0
283
+ for size, c in izip(cons_lengths, constraints):
284
+ con[i: i + size] = c['fun'](x, *c['args'])
285
+ i += size
286
+ return f
287
+
288
+ def wrapped_callback(x):
289
+ if callback is not None:
290
+ callback(np.copy(x))
291
+
292
+ info = np.zeros(4, np.float64)
293
+ xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
294
+ rhoend=rhoend, iprint=iprint, maxfun=maxfun,
295
+ dinfo=info, callback=wrapped_callback)
296
+
297
+ if info[3] > catol:
298
+ # Check constraint violation
299
+ info[0] = 4
300
+
301
+ return OptimizeResult(x=xopt,
302
+ status=int(info[0]),
303
+ success=info[0] == 1,
304
+ message={1: 'Optimization terminated successfully.',
305
+ 2: 'Maximum number of function evaluations '
306
+ 'has been exceeded.',
307
+ 3: 'Rounding errors are becoming damaging '
308
+ 'in COBYLA subroutine.',
309
+ 4: 'Did not converge to a solution '
310
+ 'satisfying the constraints. See '
311
+ '`maxcv` for magnitude of violation.',
312
+ 5: 'NaN result encountered.'
313
+ }.get(info[0], 'Unknown exit status.'),
314
+ nfev=int(info[1]),
315
+ fun=info[2],
316
+ maxcv=info[3])
venv/lib/python3.10/site-packages/scipy/optimize/_constraints.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Constraints definition for minimize."""
2
+ import numpy as np
3
+ from ._hessian_update_strategy import BFGS
4
+ from ._differentiable_functions import (
5
+ VectorFunction, LinearVectorFunction, IdentityVectorFunction)
6
+ from ._optimize import OptimizeWarning
7
+ from warnings import warn, catch_warnings, simplefilter, filterwarnings
8
+ from scipy.sparse import issparse
9
+
10
+
11
+ def _arr_to_scalar(x):
12
+ # If x is a numpy array, return x.item(). This will
13
+ # fail if the array has more than one element.
14
+ return x.item() if isinstance(x, np.ndarray) else x
15
+
16
+
17
+ class NonlinearConstraint:
18
+ """Nonlinear constraint on the variables.
19
+
20
+ The constraint has the general inequality form::
21
+
22
+ lb <= fun(x) <= ub
23
+
24
+ Here the vector of independent variables x is passed as ndarray of shape
25
+ (n,) and ``fun`` returns a vector with m components.
26
+
27
+ It is possible to use equal bounds to represent an equality constraint or
28
+ infinite bounds to represent a one-sided constraint.
29
+
30
+ Parameters
31
+ ----------
32
+ fun : callable
33
+ The function defining the constraint.
34
+ The signature is ``fun(x) -> array_like, shape (m,)``.
35
+ lb, ub : array_like
36
+ Lower and upper bounds on the constraint. Each array must have the
37
+ shape (m,) or be a scalar, in the latter case a bound will be the same
38
+ for all components of the constraint. Use ``np.inf`` with an
39
+ appropriate sign to specify a one-sided constraint.
40
+ Set components of `lb` and `ub` equal to represent an equality
41
+ constraint. Note that you can mix constraints of different types:
42
+ interval, one-sided or equality, by setting different components of
43
+ `lb` and `ub` as necessary.
44
+ jac : {callable, '2-point', '3-point', 'cs'}, optional
45
+ Method of computing the Jacobian matrix (an m-by-n matrix,
46
+ where element (i, j) is the partial derivative of f[i] with
47
+ respect to x[j]). The keywords {'2-point', '3-point',
48
+ 'cs'} select a finite difference scheme for the numerical estimation.
49
+ A callable must have the following signature:
50
+ ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
51
+ Default is '2-point'.
52
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
53
+ Method for computing the Hessian matrix. The keywords
54
+ {'2-point', '3-point', 'cs'} select a finite difference scheme for
55
+ numerical estimation. Alternatively, objects implementing
56
+ `HessianUpdateStrategy` interface can be used to approximate the
57
+ Hessian. Currently available implementations are:
58
+
59
+ - `BFGS` (default option)
60
+ - `SR1`
61
+
62
+ A callable must return the Hessian matrix of ``dot(fun, v)`` and
63
+ must have the following signature:
64
+ ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
65
+ Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
66
+ keep_feasible : array_like of bool, optional
67
+ Whether to keep the constraint components feasible throughout
68
+ iterations. A single value set this property for all components.
69
+ Default is False. Has no effect for equality constraints.
70
+ finite_diff_rel_step: None or array_like, optional
71
+ Relative step size for the finite difference approximation. Default is
72
+ None, which will select a reasonable value automatically depending
73
+ on a finite difference scheme.
74
+ finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
75
+ Defines the sparsity structure of the Jacobian matrix for finite
76
+ difference estimation, its shape must be (m, n). If the Jacobian has
77
+ only few non-zero elements in *each* row, providing the sparsity
78
+ structure will greatly speed up the computations. A zero entry means
79
+ that a corresponding element in the Jacobian is identically zero.
80
+ If provided, forces the use of 'lsmr' trust-region solver.
81
+ If None (default) then dense differencing will be used.
82
+
83
+ Notes
84
+ -----
85
+ Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
86
+ approximating either the Jacobian or the Hessian. We, however, do not allow
87
+ its use for approximating both simultaneously. Hence whenever the Jacobian
88
+ is estimated via finite-differences, we require the Hessian to be estimated
89
+ using one of the quasi-Newton strategies.
90
+
91
+ The scheme 'cs' is potentially the most accurate, but requires the function
92
+ to correctly handles complex inputs and be analytically continuable to the
93
+ complex plane. The scheme '3-point' is more accurate than '2-point' but
94
+ requires twice as many operations.
95
+
96
+ Examples
97
+ --------
98
+ Constrain ``x[0] < sin(x[1]) + 1.9``
99
+
100
+ >>> from scipy.optimize import NonlinearConstraint
101
+ >>> import numpy as np
102
+ >>> con = lambda x: x[0] - np.sin(x[1])
103
+ >>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
104
+
105
+ """
106
+ def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
107
+ keep_feasible=False, finite_diff_rel_step=None,
108
+ finite_diff_jac_sparsity=None):
109
+ self.fun = fun
110
+ self.lb = lb
111
+ self.ub = ub
112
+ self.finite_diff_rel_step = finite_diff_rel_step
113
+ self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
114
+ self.jac = jac
115
+ self.hess = hess
116
+ self.keep_feasible = keep_feasible
117
+
118
+
119
+ class LinearConstraint:
120
+ """Linear constraint on the variables.
121
+
122
+ The constraint has the general inequality form::
123
+
124
+ lb <= A.dot(x) <= ub
125
+
126
+ Here the vector of independent variables x is passed as ndarray of shape
127
+ (n,) and the matrix A has shape (m, n).
128
+
129
+ It is possible to use equal bounds to represent an equality constraint or
130
+ infinite bounds to represent a one-sided constraint.
131
+
132
+ Parameters
133
+ ----------
134
+ A : {array_like, sparse matrix}, shape (m, n)
135
+ Matrix defining the constraint.
136
+ lb, ub : dense array_like, optional
137
+ Lower and upper limits on the constraint. Each array must have the
138
+ shape (m,) or be a scalar, in the latter case a bound will be the same
139
+ for all components of the constraint. Use ``np.inf`` with an
140
+ appropriate sign to specify a one-sided constraint.
141
+ Set components of `lb` and `ub` equal to represent an equality
142
+ constraint. Note that you can mix constraints of different types:
143
+ interval, one-sided or equality, by setting different components of
144
+ `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
145
+ and ``ub = np.inf`` (no limits).
146
+ keep_feasible : dense array_like of bool, optional
147
+ Whether to keep the constraint components feasible throughout
148
+ iterations. A single value set this property for all components.
149
+ Default is False. Has no effect for equality constraints.
150
+ """
151
+ def _input_validation(self):
152
+ if self.A.ndim != 2:
153
+ message = "`A` must have exactly two dimensions."
154
+ raise ValueError(message)
155
+
156
+ try:
157
+ shape = self.A.shape[0:1]
158
+ self.lb = np.broadcast_to(self.lb, shape)
159
+ self.ub = np.broadcast_to(self.ub, shape)
160
+ self.keep_feasible = np.broadcast_to(self.keep_feasible, shape)
161
+ except ValueError:
162
+ message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable "
163
+ "to shape `A.shape[0:1]`")
164
+ raise ValueError(message)
165
+
166
+ def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False):
167
+ if not issparse(A):
168
+ # In some cases, if the constraint is not valid, this emits a
169
+ # VisibleDeprecationWarning about ragged nested sequences
170
+ # before eventually causing an error. `scipy.optimize.milp` would
171
+ # prefer that this just error out immediately so it can handle it
172
+ # rather than concerning the user.
173
+ with catch_warnings():
174
+ simplefilter("error")
175
+ self.A = np.atleast_2d(A).astype(np.float64)
176
+ else:
177
+ self.A = A
178
+ if issparse(lb) or issparse(ub):
179
+ raise ValueError("Constraint limits must be dense arrays.")
180
+ self.lb = np.atleast_1d(lb).astype(np.float64)
181
+ self.ub = np.atleast_1d(ub).astype(np.float64)
182
+
183
+ if issparse(keep_feasible):
184
+ raise ValueError("`keep_feasible` must be a dense array.")
185
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
186
+ self._input_validation()
187
+
188
+ def residual(self, x):
189
+ """
190
+ Calculate the residual between the constraint function and the limits
191
+
192
+ For a linear constraint of the form::
193
+
194
+ lb <= A@x <= ub
195
+
196
+ the lower and upper residuals between ``A@x`` and the limits are values
197
+ ``sl`` and ``sb`` such that::
198
+
199
+ lb + sl == A@x == ub - sb
200
+
201
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
202
+ the constraint are satisfied; a negative element in ``sl`` or ``sb``
203
+ indicates that the corresponding element of the constraint is not
204
+ satisfied.
205
+
206
+ Parameters
207
+ ----------
208
+ x: array_like
209
+ Vector of independent variables
210
+
211
+ Returns
212
+ -------
213
+ sl, sb : array-like
214
+ The lower and upper residuals
215
+ """
216
+ return self.A@x - self.lb, self.ub - self.A@x
217
+
218
+
219
+ class Bounds:
220
+ """Bounds constraint on the variables.
221
+
222
+ The constraint has the general inequality form::
223
+
224
+ lb <= x <= ub
225
+
226
+ It is possible to use equal bounds to represent an equality constraint or
227
+ infinite bounds to represent a one-sided constraint.
228
+
229
+ Parameters
230
+ ----------
231
+ lb, ub : dense array_like, optional
232
+ Lower and upper bounds on independent variables. `lb`, `ub`, and
233
+ `keep_feasible` must be the same shape or broadcastable.
234
+ Set components of `lb` and `ub` equal
235
+ to fix a variable. Use ``np.inf`` with an appropriate sign to disable
236
+ bounds on all or some variables. Note that you can mix constraints of
237
+ different types: interval, one-sided or equality, by setting different
238
+ components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
239
+ and ``ub = np.inf`` (no bounds).
240
+ keep_feasible : dense array_like of bool, optional
241
+ Whether to keep the constraint components feasible throughout
242
+ iterations. Must be broadcastable with `lb` and `ub`.
243
+ Default is False. Has no effect for equality constraints.
244
+ """
245
+ def _input_validation(self):
246
+ try:
247
+ res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible)
248
+ self.lb, self.ub, self.keep_feasible = res
249
+ except ValueError:
250
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
251
+ raise ValueError(message)
252
+
253
+ def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False):
254
+ if issparse(lb) or issparse(ub):
255
+ raise ValueError("Lower and upper bounds must be dense arrays.")
256
+ self.lb = np.atleast_1d(lb)
257
+ self.ub = np.atleast_1d(ub)
258
+
259
+ if issparse(keep_feasible):
260
+ raise ValueError("`keep_feasible` must be a dense array.")
261
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
262
+ self._input_validation()
263
+
264
+ def __repr__(self):
265
+ start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
266
+ if np.any(self.keep_feasible):
267
+ end = f", keep_feasible={self.keep_feasible!r})"
268
+ else:
269
+ end = ")"
270
+ return start + end
271
+
272
+ def residual(self, x):
273
+ """Calculate the residual (slack) between the input and the bounds
274
+
275
+ For a bound constraint of the form::
276
+
277
+ lb <= x <= ub
278
+
279
+ the lower and upper residuals between `x` and the bounds are values
280
+ ``sl`` and ``sb`` such that::
281
+
282
+ lb + sl == x == ub - sb
283
+
284
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
285
+ ``x`` lie within the bounds; a negative element in ``sl`` or ``sb``
286
+ indicates that the corresponding element of ``x`` is out of bounds.
287
+
288
+ Parameters
289
+ ----------
290
+ x: array_like
291
+ Vector of independent variables
292
+
293
+ Returns
294
+ -------
295
+ sl, sb : array-like
296
+ The lower and upper residuals
297
+ """
298
+ return x - self.lb, self.ub - x
299
+
300
+
301
+ class PreparedConstraint:
302
+ """Constraint prepared from a user defined constraint.
303
+
304
+ On creation it will check whether a constraint definition is valid and
305
+ the initial point is feasible. If created successfully, it will contain
306
+ the attributes listed below.
307
+
308
+ Parameters
309
+ ----------
310
+ constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
311
+ Constraint to check and prepare.
312
+ x0 : array_like
313
+ Initial vector of independent variables.
314
+ sparse_jacobian : bool or None, optional
315
+ If bool, then the Jacobian of the constraint will be converted
316
+ to the corresponded format if necessary. If None (default), such
317
+ conversion is not made.
318
+ finite_diff_bounds : 2-tuple, optional
319
+ Lower and upper bounds on the independent variables for the finite
320
+ difference approximation, if applicable. Defaults to no bounds.
321
+
322
+ Attributes
323
+ ----------
324
+ fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
325
+ Function defining the constraint wrapped by one of the convenience
326
+ classes.
327
+ bounds : 2-tuple
328
+ Contains lower and upper bounds for the constraints --- lb and ub.
329
+ These are converted to ndarray and have a size equal to the number of
330
+ the constraints.
331
+ keep_feasible : ndarray
332
+ Array indicating which components must be kept feasible with a size
333
+ equal to the number of the constraints.
334
+ """
335
+ def __init__(self, constraint, x0, sparse_jacobian=None,
336
+ finite_diff_bounds=(-np.inf, np.inf)):
337
+ if isinstance(constraint, NonlinearConstraint):
338
+ fun = VectorFunction(constraint.fun, x0,
339
+ constraint.jac, constraint.hess,
340
+ constraint.finite_diff_rel_step,
341
+ constraint.finite_diff_jac_sparsity,
342
+ finite_diff_bounds, sparse_jacobian)
343
+ elif isinstance(constraint, LinearConstraint):
344
+ fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
345
+ elif isinstance(constraint, Bounds):
346
+ fun = IdentityVectorFunction(x0, sparse_jacobian)
347
+ else:
348
+ raise ValueError("`constraint` of an unknown type is passed.")
349
+
350
+ m = fun.m
351
+
352
+ lb = np.asarray(constraint.lb, dtype=float)
353
+ ub = np.asarray(constraint.ub, dtype=float)
354
+ keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
355
+
356
+ lb = np.broadcast_to(lb, m)
357
+ ub = np.broadcast_to(ub, m)
358
+ keep_feasible = np.broadcast_to(keep_feasible, m)
359
+
360
+ if keep_feasible.shape != (m,):
361
+ raise ValueError("`keep_feasible` has a wrong shape.")
362
+
363
+ mask = keep_feasible & (lb != ub)
364
+ f0 = fun.f
365
+ if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
366
+ raise ValueError("`x0` is infeasible with respect to some "
367
+ "inequality constraint with `keep_feasible` "
368
+ "set to True.")
369
+
370
+ self.fun = fun
371
+ self.bounds = (lb, ub)
372
+ self.keep_feasible = keep_feasible
373
+
374
+ def violation(self, x):
375
+ """How much the constraint is exceeded by.
376
+
377
+ Parameters
378
+ ----------
379
+ x : array-like
380
+ Vector of independent variables
381
+
382
+ Returns
383
+ -------
384
+ excess : array-like
385
+ How much the constraint is exceeded by, for each of the
386
+ constraints specified by `PreparedConstraint.fun`.
387
+ """
388
+ with catch_warnings():
389
+ # Ignore the following warning, it's not important when
390
+ # figuring out total violation
391
+ # UserWarning: delta_grad == 0.0. Check if the approximated
392
+ # function is linear
393
+ filterwarnings("ignore", "delta_grad", UserWarning)
394
+ ev = self.fun.fun(np.asarray(x))
395
+
396
+ excess_lb = np.maximum(self.bounds[0] - ev, 0)
397
+ excess_ub = np.maximum(ev - self.bounds[1], 0)
398
+
399
+ return excess_lb + excess_ub
400
+
401
+
402
+ def new_bounds_to_old(lb, ub, n):
403
+ """Convert the new bounds representation to the old one.
404
+
405
+ The new representation is a tuple (lb, ub) and the old one is a list
406
+ containing n tuples, ith containing lower and upper bound on a ith
407
+ variable.
408
+ If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
409
+ None.
410
+ """
411
+ lb = np.broadcast_to(lb, n)
412
+ ub = np.broadcast_to(ub, n)
413
+
414
+ lb = [float(x) if x > -np.inf else None for x in lb]
415
+ ub = [float(x) if x < np.inf else None for x in ub]
416
+
417
+ return list(zip(lb, ub))
418
+
419
+
420
+ def old_bound_to_new(bounds):
421
+ """Convert the old bounds representation to the new one.
422
+
423
+ The new representation is a tuple (lb, ub) and the old one is a list
424
+ containing n tuples, ith containing lower and upper bound on a ith
425
+ variable.
426
+ If any of the entries in lb/ub are None they are replaced by
427
+ -np.inf/np.inf.
428
+ """
429
+ lb, ub = zip(*bounds)
430
+
431
+ # Convert occurrences of None to -inf or inf, and replace occurrences of
432
+ # any numpy array x with x.item(). Then wrap the results in numpy arrays.
433
+ lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
434
+ for x in lb])
435
+ ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
436
+ for x in ub])
437
+
438
+ return lb, ub
439
+
440
+
441
+ def strict_bounds(lb, ub, keep_feasible, n_vars):
442
+ """Remove bounds which are not asked to be kept feasible."""
443
+ strict_lb = np.resize(lb, n_vars).astype(float)
444
+ strict_ub = np.resize(ub, n_vars).astype(float)
445
+ keep_feasible = np.resize(keep_feasible, n_vars)
446
+ strict_lb[~keep_feasible] = -np.inf
447
+ strict_ub[~keep_feasible] = np.inf
448
+ return strict_lb, strict_ub
449
+
450
+
451
+ def new_constraint_to_old(con, x0):
452
+ """
453
+ Converts new-style constraint objects to old-style constraint dictionaries.
454
+ """
455
+ if isinstance(con, NonlinearConstraint):
456
+ if (con.finite_diff_jac_sparsity is not None or
457
+ con.finite_diff_rel_step is not None or
458
+ not isinstance(con.hess, BFGS) or # misses user specified BFGS
459
+ con.keep_feasible):
460
+ warn("Constraint options `finite_diff_jac_sparsity`, "
461
+ "`finite_diff_rel_step`, `keep_feasible`, and `hess`"
462
+ "are ignored by this method.",
463
+ OptimizeWarning, stacklevel=3)
464
+
465
+ fun = con.fun
466
+ if callable(con.jac):
467
+ jac = con.jac
468
+ else:
469
+ jac = None
470
+
471
+ else: # LinearConstraint
472
+ if np.any(con.keep_feasible):
473
+ warn("Constraint option `keep_feasible` is ignored by this method.",
474
+ OptimizeWarning, stacklevel=3)
475
+
476
+ A = con.A
477
+ if issparse(A):
478
+ A = A.toarray()
479
+ def fun(x):
480
+ return np.dot(A, x)
481
+ def jac(x):
482
+ return A
483
+
484
+ # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
485
+ # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
486
+ pcon = PreparedConstraint(con, x0)
487
+ lb, ub = pcon.bounds
488
+
489
+ i_eq = lb == ub
490
+ i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
491
+ i_bound_above = np.logical_xor(ub != np.inf, i_eq)
492
+ i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
493
+
494
+ if np.any(i_unbounded):
495
+ warn("At least one constraint is unbounded above and below. Such "
496
+ "constraints are ignored.",
497
+ OptimizeWarning, stacklevel=3)
498
+
499
+ ceq = []
500
+ if np.any(i_eq):
501
+ def f_eq(x):
502
+ y = np.array(fun(x)).flatten()
503
+ return y[i_eq] - lb[i_eq]
504
+ ceq = [{"type": "eq", "fun": f_eq}]
505
+
506
+ if jac is not None:
507
+ def j_eq(x):
508
+ dy = jac(x)
509
+ if issparse(dy):
510
+ dy = dy.toarray()
511
+ dy = np.atleast_2d(dy)
512
+ return dy[i_eq, :]
513
+ ceq[0]["jac"] = j_eq
514
+
515
+ cineq = []
516
+ n_bound_below = np.sum(i_bound_below)
517
+ n_bound_above = np.sum(i_bound_above)
518
+ if n_bound_below + n_bound_above:
519
+ def f_ineq(x):
520
+ y = np.zeros(n_bound_below + n_bound_above)
521
+ y_all = np.array(fun(x)).flatten()
522
+ y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
523
+ y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
524
+ return y
525
+ cineq = [{"type": "ineq", "fun": f_ineq}]
526
+
527
+ if jac is not None:
528
+ def j_ineq(x):
529
+ dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
530
+ dy_all = jac(x)
531
+ if issparse(dy_all):
532
+ dy_all = dy_all.toarray()
533
+ dy_all = np.atleast_2d(dy_all)
534
+ dy[:n_bound_below, :] = dy_all[i_bound_below]
535
+ dy[n_bound_below:, :] = -dy_all[i_bound_above]
536
+ return dy
537
+ cineq[0]["jac"] = j_ineq
538
+
539
+ old_constraints = ceq + cineq
540
+
541
+ if len(old_constraints) > 1:
542
+ warn("Equality and inequality constraints are specified in the same "
543
+ "element of the constraint list. For efficient use with this "
544
+ "method, equality and inequality constraints should be specified "
545
+ "in separate elements of the constraint list. ",
546
+ OptimizeWarning, stacklevel=3)
547
+ return old_constraints
548
+
549
+
550
+ def old_constraint_to_new(ic, con):
551
+ """
552
+ Converts old-style constraint dictionaries to new-style constraint objects.
553
+ """
554
+ # check type
555
+ try:
556
+ ctype = con['type'].lower()
557
+ except KeyError as e:
558
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
559
+ except TypeError as e:
560
+ raise TypeError(
561
+ 'Constraints must be a sequence of dictionaries.'
562
+ ) from e
563
+ except AttributeError as e:
564
+ raise TypeError("Constraint's type must be a string.") from e
565
+ else:
566
+ if ctype not in ['eq', 'ineq']:
567
+ raise ValueError("Unknown constraint type '%s'." % con['type'])
568
+ if 'fun' not in con:
569
+ raise ValueError('Constraint %d has no function defined.' % ic)
570
+
571
+ lb = 0
572
+ if ctype == 'eq':
573
+ ub = 0
574
+ else:
575
+ ub = np.inf
576
+
577
+ jac = '2-point'
578
+ if 'args' in con:
579
+ args = con['args']
580
+ def fun(x):
581
+ return con["fun"](x, *args)
582
+ if 'jac' in con:
583
+ def jac(x):
584
+ return con["jac"](x, *args)
585
+ else:
586
+ fun = con['fun']
587
+ if 'jac' in con:
588
+ jac = con['jac']
589
+
590
+ return NonlinearConstraint(fun, lb, ub, jac)
venv/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ """
4
+ # 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python
5
+ c MINPACK-1 Project. June 1983.
6
+ c Argonne National Laboratory.
7
+ c Jorge J. More' and David J. Thuente.
8
+ c
9
+ c MINPACK-2 Project. November 1993.
10
+ c Argonne National Laboratory and University of Minnesota.
11
+ c Brett M. Averick, Richard G. Carter, and Jorge J. More'.
12
+ """
13
+
14
+ # NOTE this file was linted by black on first commit, and can be kept that way.
15
+
16
+
17
+ class DCSRCH:
18
+ """
19
+ Parameters
20
+ ----------
21
+ phi : callable phi(alpha)
22
+ Function at point `alpha`
23
+ derphi : callable phi'(alpha)
24
+ Objective function derivative. Returns a scalar.
25
+ ftol : float
26
+ A nonnegative tolerance for the sufficient decrease condition.
27
+ gtol : float
28
+ A nonnegative tolerance for the curvature condition.
29
+ xtol : float
30
+ A nonnegative relative tolerance for an acceptable step. The
31
+ subroutine exits with a warning if the relative difference between
32
+ sty and stx is less than xtol.
33
+ stpmin : float
34
+ A nonnegative lower bound for the step.
35
+ stpmax :
36
+ A nonnegative upper bound for the step.
37
+
38
+ Notes
39
+ -----
40
+
41
+ This subroutine finds a step that satisfies a sufficient
42
+ decrease condition and a curvature condition.
43
+
44
+ Each call of the subroutine updates an interval with
45
+ endpoints stx and sty. The interval is initially chosen
46
+ so that it contains a minimizer of the modified function
47
+
48
+ psi(stp) = f(stp) - f(0) - ftol*stp*f'(0).
49
+
50
+ If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
51
+ interval is chosen so that it contains a minimizer of f.
52
+
53
+ The algorithm is designed to find a step that satisfies
54
+ the sufficient decrease condition
55
+
56
+ f(stp) <= f(0) + ftol*stp*f'(0),
57
+
58
+ and the curvature condition
59
+
60
+ abs(f'(stp)) <= gtol*abs(f'(0)).
61
+
62
+ If ftol is less than gtol and if, for example, the function
63
+ is bounded below, then there is always a step which satisfies
64
+ both conditions.
65
+
66
+ If no step can be found that satisfies both conditions, then
67
+ the algorithm stops with a warning. In this case stp only
68
+ satisfies the sufficient decrease condition.
69
+
70
+ A typical invocation of dcsrch has the following outline:
71
+
72
+ Evaluate the function at stp = 0.0d0; store in f.
73
+ Evaluate the gradient at stp = 0.0d0; store in g.
74
+ Choose a starting step stp.
75
+
76
+ task = 'START'
77
+ 10 continue
78
+ call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax,
79
+ isave,dsave)
80
+ if (task .eq. 'FG') then
81
+ Evaluate the function and the gradient at stp
82
+ go to 10
83
+ end if
84
+
85
+ NOTE: The user must not alter work arrays between calls.
86
+
87
+ The subroutine statement is
88
+
89
+ subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax,
90
+ task,isave,dsave)
91
+ where
92
+
93
+ stp is a double precision variable.
94
+ On entry stp is the current estimate of a satisfactory
95
+ step. On initial entry, a positive initial estimate
96
+ must be provided.
97
+ On exit stp is the current estimate of a satisfactory step
98
+ if task = 'FG'. If task = 'CONV' then stp satisfies
99
+ the sufficient decrease and curvature condition.
100
+
101
+ f is a double precision variable.
102
+ On initial entry f is the value of the function at 0.
103
+ On subsequent entries f is the value of the
104
+ function at stp.
105
+ On exit f is the value of the function at stp.
106
+
107
+ g is a double precision variable.
108
+ On initial entry g is the derivative of the function at 0.
109
+ On subsequent entries g is the derivative of the
110
+ function at stp.
111
+ On exit g is the derivative of the function at stp.
112
+
113
+ ftol is a double precision variable.
114
+ On entry ftol specifies a nonnegative tolerance for the
115
+ sufficient decrease condition.
116
+ On exit ftol is unchanged.
117
+
118
+ gtol is a double precision variable.
119
+ On entry gtol specifies a nonnegative tolerance for the
120
+ curvature condition.
121
+ On exit gtol is unchanged.
122
+
123
+ xtol is a double precision variable.
124
+ On entry xtol specifies a nonnegative relative tolerance
125
+ for an acceptable step. The subroutine exits with a
126
+ warning if the relative difference between sty and stx
127
+ is less than xtol.
128
+
129
+ On exit xtol is unchanged.
130
+
131
+ task is a character variable of length at least 60.
132
+ On initial entry task must be set to 'START'.
133
+ On exit task indicates the required action:
134
+
135
+ If task(1:2) = 'FG' then evaluate the function and
136
+ derivative at stp and call dcsrch again.
137
+
138
+ If task(1:4) = 'CONV' then the search is successful.
139
+
140
+ If task(1:4) = 'WARN' then the subroutine is not able
141
+ to satisfy the convergence conditions. The exit value of
142
+ stp contains the best point found during the search.
143
+
144
+ If task(1:5) = 'ERROR' then there is an error in the
145
+ input arguments.
146
+
147
+ On exit with convergence, a warning or an error, the
148
+ variable task contains additional information.
149
+
150
+ stpmin is a double precision variable.
151
+ On entry stpmin is a nonnegative lower bound for the step.
152
+ On exit stpmin is unchanged.
153
+
154
+ stpmax is a double precision variable.
155
+ On entry stpmax is a nonnegative upper bound for the step.
156
+ On exit stpmax is unchanged.
157
+
158
+ isave is an integer work array of dimension 2.
159
+
160
+ dsave is a double precision work array of dimension 13.
161
+
162
+ Subprograms called
163
+
164
+ MINPACK-2 ... dcstep
165
+ MINPACK-1 Project. June 1983.
166
+ Argonne National Laboratory.
167
+ Jorge J. More' and David J. Thuente.
168
+
169
+ MINPACK-2 Project. November 1993.
170
+ Argonne National Laboratory and University of Minnesota.
171
+ Brett M. Averick, Richard G. Carter, and Jorge J. More'.
172
+ """
173
+
174
+ def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax):
175
+ self.stage = None
176
+ self.ginit = None
177
+ self.gtest = None
178
+ self.gx = None
179
+ self.gy = None
180
+ self.finit = None
181
+ self.fx = None
182
+ self.fy = None
183
+ self.stx = None
184
+ self.sty = None
185
+ self.stmin = None
186
+ self.stmax = None
187
+ self.width = None
188
+ self.width1 = None
189
+
190
+ # leave all assessment of tolerances/limits to the first call of
191
+ # this object
192
+ self.ftol = ftol
193
+ self.gtol = gtol
194
+ self.xtol = xtol
195
+ self.stpmin = stpmin
196
+ self.stpmax = stpmax
197
+
198
+ self.phi = phi
199
+ self.derphi = derphi
200
+
201
+ def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100):
202
+ """
203
+ Parameters
204
+ ----------
205
+ alpha1 : float
206
+ alpha1 is the current estimate of a satisfactory
207
+ step. A positive initial estimate must be provided.
208
+ phi0 : float
209
+ the value of `phi` at 0 (if known).
210
+ derphi0 : float
211
+ the derivative of `derphi` at 0 (if known).
212
+ maxiter : int
213
+
214
+ Returns
215
+ -------
216
+ alpha : float
217
+ Step size, or None if no suitable step was found.
218
+ phi : float
219
+ Value of `phi` at the new point `alpha`.
220
+ phi0 : float
221
+ Value of `phi` at `alpha=0`.
222
+ task : bytes
223
+ On exit task indicates status information.
224
+
225
+ If task[:4] == b'CONV' then the search is successful.
226
+
227
+ If task[:4] == b'WARN' then the subroutine is not able
228
+ to satisfy the convergence conditions. The exit value of
229
+ stp contains the best point found during the search.
230
+
231
+ If task[:5] == b'ERROR' then there is an error in the
232
+ input arguments.
233
+ """
234
+ if phi0 is None:
235
+ phi0 = self.phi(0.0)
236
+ if derphi0 is None:
237
+ derphi0 = self.derphi(0.0)
238
+
239
+ phi1 = phi0
240
+ derphi1 = derphi0
241
+
242
+ task = b"START"
243
+ for i in range(maxiter):
244
+ stp, phi1, derphi1, task = self._iterate(
245
+ alpha1, phi1, derphi1, task
246
+ )
247
+
248
+ if not np.isfinite(stp):
249
+ task = b"WARN"
250
+ stp = None
251
+ break
252
+
253
+ if task[:2] == b"FG":
254
+ alpha1 = stp
255
+ phi1 = self.phi(stp)
256
+ derphi1 = self.derphi(stp)
257
+ else:
258
+ break
259
+ else:
260
+ # maxiter reached, the line search did not converge
261
+ stp = None
262
+ task = b"WARNING: dcsrch did not converge within max iterations"
263
+
264
+ if task[:5] == b"ERROR" or task[:4] == b"WARN":
265
+ stp = None # failed
266
+
267
+ return stp, phi1, phi0, task
268
+
269
+ def _iterate(self, stp, f, g, task):
270
+ """
271
+ Parameters
272
+ ----------
273
+ stp : float
274
+ The current estimate of a satisfactory step. On initial entry, a
275
+ positive initial estimate must be provided.
276
+ f : float
277
+ On first call f is the value of the function at 0. On subsequent
278
+ entries f should be the value of the function at stp.
279
+ g : float
280
+ On initial entry g is the derivative of the function at 0. On
281
+ subsequent entries g is the derivative of the function at stp.
282
+ task : bytes
283
+ On initial entry task must be set to 'START'.
284
+
285
+ On exit with convergence, a warning or an error, the
286
+ variable task contains additional information.
287
+
288
+
289
+ Returns
290
+ -------
291
+ stp, f, g, task: tuple
292
+
293
+ stp : float
294
+ the current estimate of a satisfactory step if task = 'FG'. If
295
+ task = 'CONV' then stp satisfies the sufficient decrease and
296
+ curvature condition.
297
+ f : float
298
+ the value of the function at stp.
299
+ g : float
300
+ the derivative of the function at stp.
301
+ task : bytes
302
+ On exit task indicates the required action:
303
+
304
+ If task(1:2) == b'FG' then evaluate the function and
305
+ derivative at stp and call dcsrch again.
306
+
307
+ If task(1:4) == b'CONV' then the search is successful.
308
+
309
+ If task(1:4) == b'WARN' then the subroutine is not able
310
+ to satisfy the convergence conditions. The exit value of
311
+ stp contains the best point found during the search.
312
+
313
+ If task(1:5) == b'ERROR' then there is an error in the
314
+ input arguments.
315
+ """
316
+ p5 = 0.5
317
+ p66 = 0.66
318
+ xtrapl = 1.1
319
+ xtrapu = 4.0
320
+
321
+ if task[:5] == b"START":
322
+ if stp < self.stpmin:
323
+ task = b"ERROR: STP .LT. STPMIN"
324
+ if stp > self.stpmax:
325
+ task = b"ERROR: STP .GT. STPMAX"
326
+ if g >= 0:
327
+ task = b"ERROR: INITIAL G .GE. ZERO"
328
+ if self.ftol < 0:
329
+ task = b"ERROR: FTOL .LT. ZERO"
330
+ if self.gtol < 0:
331
+ task = b"ERROR: GTOL .LT. ZERO"
332
+ if self.xtol < 0:
333
+ task = b"ERROR: XTOL .LT. ZERO"
334
+ if self.stpmin < 0:
335
+ task = b"ERROR: STPMIN .LT. ZERO"
336
+ if self.stpmax < self.stpmin:
337
+ task = b"ERROR: STPMAX .LT. STPMIN"
338
+
339
+ if task[:5] == b"ERROR":
340
+ return stp, f, g, task
341
+
342
+ # Initialize local variables.
343
+
344
+ self.brackt = False
345
+ self.stage = 1
346
+ self.finit = f
347
+ self.ginit = g
348
+ self.gtest = self.ftol * self.ginit
349
+ self.width = self.stpmax - self.stpmin
350
+ self.width1 = self.width / p5
351
+
352
+ # The variables stx, fx, gx contain the values of the step,
353
+ # function, and derivative at the best step.
354
+ # The variables sty, fy, gy contain the value of the step,
355
+ # function, and derivative at sty.
356
+ # The variables stp, f, g contain the values of the step,
357
+ # function, and derivative at stp.
358
+
359
+ self.stx = 0.0
360
+ self.fx = self.finit
361
+ self.gx = self.ginit
362
+ self.sty = 0.0
363
+ self.fy = self.finit
364
+ self.gy = self.ginit
365
+ self.stmin = 0
366
+ self.stmax = stp + xtrapu * stp
367
+ task = b"FG"
368
+ return stp, f, g, task
369
+
370
+ # in the original Fortran this was a location to restore variables
371
+ # we don't need to do that because they're attributes.
372
+
373
+ # If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
374
+ # algorithm enters the second stage.
375
+ ftest = self.finit + stp * self.gtest
376
+
377
+ if self.stage == 1 and f <= ftest and g >= 0:
378
+ self.stage = 2
379
+
380
+ # test for warnings
381
+ if self.brackt and (stp <= self.stmin or stp >= self.stmax):
382
+ task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS"
383
+ if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax:
384
+ task = b"WARNING: XTOL TEST SATISFIED"
385
+ if stp == self.stpmax and f <= ftest and g <= self.gtest:
386
+ task = b"WARNING: STP = STPMAX"
387
+ if stp == self.stpmin and (f > ftest or g >= self.gtest):
388
+ task = b"WARNING: STP = STPMIN"
389
+
390
+ # test for convergence
391
+ if f <= ftest and abs(g) <= self.gtol * -self.ginit:
392
+ task = b"CONVERGENCE"
393
+
394
+ # test for termination
395
+ if task[:4] == b"WARN" or task[:4] == b"CONV":
396
+ return stp, f, g, task
397
+
398
+ # A modified function is used to predict the step during the
399
+ # first stage if a lower function value has been obtained but
400
+ # the decrease is not sufficient.
401
+ if self.stage == 1 and f <= self.fx and f > ftest:
402
+ # Define the modified function and derivative values.
403
+ fm = f - stp * self.gtest
404
+ fxm = self.fx - self.stx * self.gtest
405
+ fym = self.fy - self.sty * self.gtest
406
+ gm = g - self.gtest
407
+ gxm = self.gx - self.gtest
408
+ gym = self.gy - self.gtest
409
+
410
+ # Call dcstep to update stx, sty, and to compute the new step.
411
+ # dcstep can have several operations which can produce NaN
412
+ # e.g. inf/inf. Filter these out.
413
+ with np.errstate(invalid="ignore", over="ignore"):
414
+ tup = dcstep(
415
+ self.stx,
416
+ fxm,
417
+ gxm,
418
+ self.sty,
419
+ fym,
420
+ gym,
421
+ stp,
422
+ fm,
423
+ gm,
424
+ self.brackt,
425
+ self.stmin,
426
+ self.stmax,
427
+ )
428
+ self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup
429
+
430
+ # Reset the function and derivative values for f
431
+ self.fx = fxm + self.stx * self.gtest
432
+ self.fy = fym + self.sty * self.gtest
433
+ self.gx = gxm + self.gtest
434
+ self.gy = gym + self.gtest
435
+
436
+ else:
437
+ # Call dcstep to update stx, sty, and to compute the new step.
438
+ # dcstep can have several operations which can produce NaN
439
+ # e.g. inf/inf. Filter these out.
440
+
441
+ with np.errstate(invalid="ignore", over="ignore"):
442
+ tup = dcstep(
443
+ self.stx,
444
+ self.fx,
445
+ self.gx,
446
+ self.sty,
447
+ self.fy,
448
+ self.gy,
449
+ stp,
450
+ f,
451
+ g,
452
+ self.brackt,
453
+ self.stmin,
454
+ self.stmax,
455
+ )
456
+ (
457
+ self.stx,
458
+ self.fx,
459
+ self.gx,
460
+ self.sty,
461
+ self.fy,
462
+ self.gy,
463
+ stp,
464
+ self.brackt,
465
+ ) = tup
466
+
467
+ # Decide if a bisection step is needed
468
+ if self.brackt:
469
+ if abs(self.sty - self.stx) >= p66 * self.width1:
470
+ stp = self.stx + p5 * (self.sty - self.stx)
471
+ self.width1 = self.width
472
+ self.width = abs(self.sty - self.stx)
473
+
474
+ # Set the minimum and maximum steps allowed for stp.
475
+ if self.brackt:
476
+ self.stmin = min(self.stx, self.sty)
477
+ self.stmax = max(self.stx, self.sty)
478
+ else:
479
+ self.stmin = stp + xtrapl * (stp - self.stx)
480
+ self.stmax = stp + xtrapu * (stp - self.stx)
481
+
482
+ # Force the step to be within the bounds stpmax and stpmin.
483
+ stp = np.clip(stp, self.stpmin, self.stpmax)
484
+
485
+ # If further progress is not possible, let stp be the best
486
+ # point obtained during the search.
487
+ if (
488
+ self.brackt
489
+ and (stp <= self.stmin or stp >= self.stmax)
490
+ or (
491
+ self.brackt
492
+ and self.stmax - self.stmin <= self.xtol * self.stmax
493
+ )
494
+ ):
495
+ stp = self.stx
496
+
497
+ # Obtain another function and derivative
498
+ task = b"FG"
499
+ return stp, f, g, task
500
+
501
+
502
+ def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):
503
+ """
504
+ Subroutine dcstep
505
+
506
+ This subroutine computes a safeguarded step for a search
507
+ procedure and updates an interval that contains a step that
508
+ satisfies a sufficient decrease and a curvature condition.
509
+
510
+ The parameter stx contains the step with the least function
511
+ value. If brackt is set to .true. then a minimizer has
512
+ been bracketed in an interval with endpoints stx and sty.
513
+ The parameter stp contains the current step.
514
+ The subroutine assumes that if brackt is set to .true. then
515
+
516
+ min(stx,sty) < stp < max(stx,sty),
517
+
518
+ and that the derivative at stx is negative in the direction
519
+ of the step.
520
+
521
+ The subroutine statement is
522
+
523
+ subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,
524
+ stpmin,stpmax)
525
+
526
+ where
527
+
528
+ stx is a double precision variable.
529
+ On entry stx is the best step obtained so far and is an
530
+ endpoint of the interval that contains the minimizer.
531
+ On exit stx is the updated best step.
532
+
533
+ fx is a double precision variable.
534
+ On entry fx is the function at stx.
535
+ On exit fx is the function at stx.
536
+
537
+ dx is a double precision variable.
538
+ On entry dx is the derivative of the function at
539
+ stx. The derivative must be negative in the direction of
540
+ the step, that is, dx and stp - stx must have opposite
541
+ signs.
542
+ On exit dx is the derivative of the function at stx.
543
+
544
+ sty is a double precision variable.
545
+ On entry sty is the second endpoint of the interval that
546
+ contains the minimizer.
547
+ On exit sty is the updated endpoint of the interval that
548
+ contains the minimizer.
549
+
550
+ fy is a double precision variable.
551
+ On entry fy is the function at sty.
552
+ On exit fy is the function at sty.
553
+
554
+ dy is a double precision variable.
555
+ On entry dy is the derivative of the function at sty.
556
+ On exit dy is the derivative of the function at the exit sty.
557
+
558
+ stp is a double precision variable.
559
+ On entry stp is the current step. If brackt is set to .true.
560
+ then on input stp must be between stx and sty.
561
+ On exit stp is a new trial step.
562
+
563
+ fp is a double precision variable.
564
+ On entry fp is the function at stp
565
+ On exit fp is unchanged.
566
+
567
+ dp is a double precision variable.
568
+ On entry dp is the derivative of the function at stp.
569
+ On exit dp is unchanged.
570
+
571
+ brackt is an logical variable.
572
+ On entry brackt specifies if a minimizer has been bracketed.
573
+ Initially brackt must be set to .false.
574
+ On exit brackt specifies if a minimizer has been bracketed.
575
+ When a minimizer is bracketed brackt is set to .true.
576
+
577
+ stpmin is a double precision variable.
578
+ On entry stpmin is a lower bound for the step.
579
+ On exit stpmin is unchanged.
580
+
581
+ stpmax is a double precision variable.
582
+ On entry stpmax is an upper bound for the step.
583
+ On exit stpmax is unchanged.
584
+
585
+ MINPACK-1 Project. June 1983
586
+ Argonne National Laboratory.
587
+ Jorge J. More' and David J. Thuente.
588
+
589
+ MINPACK-2 Project. November 1993.
590
+ Argonne National Laboratory and University of Minnesota.
591
+ Brett M. Averick and Jorge J. More'.
592
+
593
+ """
594
+ sgn_dp = np.sign(dp)
595
+ sgn_dx = np.sign(dx)
596
+
597
+ # sgnd = dp * (dx / abs(dx))
598
+ sgnd = sgn_dp * sgn_dx
599
+
600
+ # First case: A higher function value. The minimum is bracketed.
601
+ # If the cubic step is closer to stx than the quadratic step, the
602
+ # cubic step is taken, otherwise the average of the cubic and
603
+ # quadratic steps is taken.
604
+ if fp > fx:
605
+ theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp
606
+ s = max(abs(theta), abs(dx), abs(dp))
607
+ gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
608
+ if stp < stx:
609
+ gamma *= -1
610
+ p = (gamma - dx) + theta
611
+ q = ((gamma - dx) + gamma) + dp
612
+ r = p / q
613
+ stpc = stx + r * (stp - stx)
614
+ stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)
615
+ if abs(stpc - stx) <= abs(stpq - stx):
616
+ stpf = stpc
617
+ else:
618
+ stpf = stpc + (stpq - stpc) / 2.0
619
+ brackt = True
620
+ elif sgnd < 0.0:
621
+ # Second case: A lower function value and derivatives of opposite
622
+ # sign. The minimum is bracketed. If the cubic step is farther from
623
+ # stp than the secant step, the cubic step is taken, otherwise the
624
+ # secant step is taken.
625
+ theta = 3 * (fx - fp) / (stp - stx) + dx + dp
626
+ s = max(abs(theta), abs(dx), abs(dp))
627
+ gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
628
+ if stp > stx:
629
+ gamma *= -1
630
+ p = (gamma - dp) + theta
631
+ q = ((gamma - dp) + gamma) + dx
632
+ r = p / q
633
+ stpc = stp + r * (stx - stp)
634
+ stpq = stp + (dp / (dp - dx)) * (stx - stp)
635
+ if abs(stpc - stp) > abs(stpq - stp):
636
+ stpf = stpc
637
+ else:
638
+ stpf = stpq
639
+ brackt = True
640
+ elif abs(dp) < abs(dx):
641
+ # Third case: A lower function value, derivatives of the same sign,
642
+ # and the magnitude of the derivative decreases.
643
+
644
+ # The cubic step is computed only if the cubic tends to infinity
645
+ # in the direction of the step or if the minimum of the cubic
646
+ # is beyond stp. Otherwise the cubic step is defined to be the
647
+ # secant step.
648
+ theta = 3 * (fx - fp) / (stp - stx) + dx + dp
649
+ s = max(abs(theta), abs(dx), abs(dp))
650
+
651
+ # The case gamma = 0 only arises if the cubic does not tend
652
+ # to infinity in the direction of the step.
653
+ gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s)))
654
+ if stp > stx:
655
+ gamma = -gamma
656
+ p = (gamma - dp) + theta
657
+ q = (gamma + (dx - dp)) + gamma
658
+ r = p / q
659
+ if r < 0 and gamma != 0:
660
+ stpc = stp + r * (stx - stp)
661
+ elif stp > stx:
662
+ stpc = stpmax
663
+ else:
664
+ stpc = stpmin
665
+ stpq = stp + (dp / (dp - dx)) * (stx - stp)
666
+
667
+ if brackt:
668
+ # A minimizer has been bracketed. If the cubic step is
669
+ # closer to stp than the secant step, the cubic step is
670
+ # taken, otherwise the secant step is taken.
671
+ if abs(stpc - stp) < abs(stpq - stp):
672
+ stpf = stpc
673
+ else:
674
+ stpf = stpq
675
+
676
+ if stp > stx:
677
+ stpf = min(stp + 0.66 * (sty - stp), stpf)
678
+ else:
679
+ stpf = max(stp + 0.66 * (sty - stp), stpf)
680
+ else:
681
+ # A minimizer has not been bracketed. If the cubic step is
682
+ # farther from stp than the secant step, the cubic step is
683
+ # taken, otherwise the secant step is taken.
684
+ if abs(stpc - stp) > abs(stpq - stp):
685
+ stpf = stpc
686
+ else:
687
+ stpf = stpq
688
+ stpf = np.clip(stpf, stpmin, stpmax)
689
+
690
+ else:
691
+ # Fourth case: A lower function value, derivatives of the same sign,
692
+ # and the magnitude of the derivative does not decrease. If the
693
+ # minimum is not bracketed, the step is either stpmin or stpmax,
694
+ # otherwise the cubic step is taken.
695
+ if brackt:
696
+ theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp
697
+ s = max(abs(theta), abs(dy), abs(dp))
698
+ gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))
699
+ if stp > sty:
700
+ gamma = -gamma
701
+ p = (gamma - dp) + theta
702
+ q = ((gamma - dp) + gamma) + dy
703
+ r = p / q
704
+ stpc = stp + r * (sty - stp)
705
+ stpf = stpc
706
+ elif stp > stx:
707
+ stpf = stpmax
708
+ else:
709
+ stpf = stpmin
710
+
711
+ # Update the interval which contains a minimizer.
712
+ if fp > fx:
713
+ sty = stp
714
+ fy = fp
715
+ dy = dp
716
+ else:
717
+ if sgnd < 0:
718
+ sty = stx
719
+ fy = fx
720
+ dy = dx
721
+ stx = stp
722
+ fx = fp
723
+ dx = dp
724
+
725
+ # Compute the new step.
726
+ stp = stpf
727
+
728
+ return stx, fx, dx, sty, fy, dy, stp, brackt
venv/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.sparse as sps
3
+ from ._numdiff import approx_derivative, group_columns
4
+ from ._hessian_update_strategy import HessianUpdateStrategy
5
+ from scipy.sparse.linalg import LinearOperator
6
+ from scipy._lib._array_api import atleast_nd, array_namespace
7
+
8
+
9
+ FD_METHODS = ('2-point', '3-point', 'cs')
10
+
11
+
12
+ class ScalarFunction:
13
+ """Scalar function and its derivatives.
14
+
15
+ This class defines a scalar function F: R^n->R and methods for
16
+ computing or approximating its first and second derivatives.
17
+
18
+ Parameters
19
+ ----------
20
+ fun : callable
21
+ evaluates the scalar function. Must be of the form ``fun(x, *args)``,
22
+ where ``x`` is the argument in the form of a 1-D array and ``args`` is
23
+ a tuple of any additional fixed parameters needed to completely specify
24
+ the function. Should return a scalar.
25
+ x0 : array-like
26
+ Provides an initial set of variables for evaluating fun. Array of real
27
+ elements of size (n,), where 'n' is the number of independent
28
+ variables.
29
+ args : tuple, optional
30
+ Any additional fixed parameters needed to completely specify the scalar
31
+ function.
32
+ grad : {callable, '2-point', '3-point', 'cs'}
33
+ Method for computing the gradient vector.
34
+ If it is a callable, it should be a function that returns the gradient
35
+ vector:
36
+
37
+ ``grad(x, *args) -> array_like, shape (n,)``
38
+
39
+ where ``x`` is an array with shape (n,) and ``args`` is a tuple with
40
+ the fixed parameters.
41
+ Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
42
+ to select a finite difference scheme for numerical estimation of the
43
+ gradient with a relative step size. These finite difference schemes
44
+ obey any specified `bounds`.
45
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}
46
+ Method for computing the Hessian matrix. If it is callable, it should
47
+ return the Hessian matrix:
48
+
49
+ ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
50
+
51
+ where x is a (n,) ndarray and `args` is a tuple with the fixed
52
+ parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'}
53
+ select a finite difference scheme for numerical estimation. Or, objects
54
+ implementing `HessianUpdateStrategy` interface can be used to
55
+ approximate the Hessian.
56
+ Whenever the gradient is estimated via finite-differences, the Hessian
57
+ cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
58
+ to be estimated using one of the quasi-Newton strategies.
59
+ finite_diff_rel_step : None or array_like
60
+ Relative step size to use. The absolute step size is computed as
61
+ ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly
62
+ adjusted to fit into the bounds. For ``method='3-point'`` the sign
63
+ of `h` is ignored. If None then finite_diff_rel_step is selected
64
+ automatically,
65
+ finite_diff_bounds : tuple of array_like
66
+ Lower and upper bounds on independent variables. Defaults to no bounds,
67
+ (-np.inf, np.inf). Each bound must match the size of `x0` or be a
68
+ scalar, in the latter case the bound will be the same for all
69
+ variables. Use it to limit the range of function evaluation.
70
+ epsilon : None or array_like, optional
71
+ Absolute step size to use, possibly adjusted to fit into the bounds.
72
+ For ``method='3-point'`` the sign of `epsilon` is ignored. By default
73
+ relative steps are used, only if ``epsilon is not None`` are absolute
74
+ steps used.
75
+
76
+ Notes
77
+ -----
78
+ This class implements a memoization logic. There are methods `fun`,
79
+ `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
80
+ things should be considered:
81
+
82
+ 1. Use only public methods `fun`, `grad` and `hess`.
83
+ 2. After one of the methods is called, the corresponding attribute
84
+ will be set. However, a subsequent call with a different argument
85
+ of *any* of the methods may overwrite the attribute.
86
+ """
87
+ def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
88
+ finite_diff_bounds, epsilon=None):
89
+ if not callable(grad) and grad not in FD_METHODS:
90
+ raise ValueError(
91
+ f"`grad` must be either callable or one of {FD_METHODS}."
92
+ )
93
+
94
+ if not (callable(hess) or hess in FD_METHODS
95
+ or isinstance(hess, HessianUpdateStrategy)):
96
+ raise ValueError(
97
+ f"`hess` must be either callable, HessianUpdateStrategy"
98
+ f" or one of {FD_METHODS}."
99
+ )
100
+
101
+ if grad in FD_METHODS and hess in FD_METHODS:
102
+ raise ValueError("Whenever the gradient is estimated via "
103
+ "finite-differences, we require the Hessian "
104
+ "to be estimated using one of the "
105
+ "quasi-Newton strategies.")
106
+
107
+ self.xp = xp = array_namespace(x0)
108
+ _x = atleast_nd(x0, ndim=1, xp=xp)
109
+ _dtype = xp.float64
110
+ if xp.isdtype(_x.dtype, "real floating"):
111
+ _dtype = _x.dtype
112
+
113
+ # promotes to floating
114
+ self.x = xp.astype(_x, _dtype)
115
+ self.x_dtype = _dtype
116
+ self.n = self.x.size
117
+ self.nfev = 0
118
+ self.ngev = 0
119
+ self.nhev = 0
120
+ self.f_updated = False
121
+ self.g_updated = False
122
+ self.H_updated = False
123
+
124
+ self._lowest_x = None
125
+ self._lowest_f = np.inf
126
+
127
+ finite_diff_options = {}
128
+ if grad in FD_METHODS:
129
+ finite_diff_options["method"] = grad
130
+ finite_diff_options["rel_step"] = finite_diff_rel_step
131
+ finite_diff_options["abs_step"] = epsilon
132
+ finite_diff_options["bounds"] = finite_diff_bounds
133
+ if hess in FD_METHODS:
134
+ finite_diff_options["method"] = hess
135
+ finite_diff_options["rel_step"] = finite_diff_rel_step
136
+ finite_diff_options["abs_step"] = epsilon
137
+ finite_diff_options["as_linear_operator"] = True
138
+
139
+ # Function evaluation
140
+ def fun_wrapped(x):
141
+ self.nfev += 1
142
+ # Send a copy because the user may overwrite it.
143
+ # Overwriting results in undefined behaviour because
144
+ # fun(self.x) will change self.x, with the two no longer linked.
145
+ fx = fun(np.copy(x), *args)
146
+ # Make sure the function returns a true scalar
147
+ if not np.isscalar(fx):
148
+ try:
149
+ fx = np.asarray(fx).item()
150
+ except (TypeError, ValueError) as e:
151
+ raise ValueError(
152
+ "The user-provided objective function "
153
+ "must return a scalar value."
154
+ ) from e
155
+
156
+ if fx < self._lowest_f:
157
+ self._lowest_x = x
158
+ self._lowest_f = fx
159
+
160
+ return fx
161
+
162
+ def update_fun():
163
+ self.f = fun_wrapped(self.x)
164
+
165
+ self._update_fun_impl = update_fun
166
+ self._update_fun()
167
+
168
+ # Gradient evaluation
169
+ if callable(grad):
170
+ def grad_wrapped(x):
171
+ self.ngev += 1
172
+ return np.atleast_1d(grad(np.copy(x), *args))
173
+
174
+ def update_grad():
175
+ self.g = grad_wrapped(self.x)
176
+
177
+ elif grad in FD_METHODS:
178
+ def update_grad():
179
+ self._update_fun()
180
+ self.ngev += 1
181
+ self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
182
+ **finite_diff_options)
183
+
184
+ self._update_grad_impl = update_grad
185
+ self._update_grad()
186
+
187
+ # Hessian Evaluation
188
+ if callable(hess):
189
+ self.H = hess(np.copy(x0), *args)
190
+ self.H_updated = True
191
+ self.nhev += 1
192
+
193
+ if sps.issparse(self.H):
194
+ def hess_wrapped(x):
195
+ self.nhev += 1
196
+ return sps.csr_matrix(hess(np.copy(x), *args))
197
+ self.H = sps.csr_matrix(self.H)
198
+
199
+ elif isinstance(self.H, LinearOperator):
200
+ def hess_wrapped(x):
201
+ self.nhev += 1
202
+ return hess(np.copy(x), *args)
203
+
204
+ else:
205
+ def hess_wrapped(x):
206
+ self.nhev += 1
207
+ return np.atleast_2d(np.asarray(hess(np.copy(x), *args)))
208
+ self.H = np.atleast_2d(np.asarray(self.H))
209
+
210
+ def update_hess():
211
+ self.H = hess_wrapped(self.x)
212
+
213
+ elif hess in FD_METHODS:
214
+ def update_hess():
215
+ self._update_grad()
216
+ self.H = approx_derivative(grad_wrapped, self.x, f0=self.g,
217
+ **finite_diff_options)
218
+ return self.H
219
+
220
+ update_hess()
221
+ self.H_updated = True
222
+ elif isinstance(hess, HessianUpdateStrategy):
223
+ self.H = hess
224
+ self.H.initialize(self.n, 'hess')
225
+ self.H_updated = True
226
+ self.x_prev = None
227
+ self.g_prev = None
228
+
229
+ def update_hess():
230
+ self._update_grad()
231
+ self.H.update(self.x - self.x_prev, self.g - self.g_prev)
232
+
233
+ self._update_hess_impl = update_hess
234
+
235
+ if isinstance(hess, HessianUpdateStrategy):
236
+ def update_x(x):
237
+ self._update_grad()
238
+ self.x_prev = self.x
239
+ self.g_prev = self.g
240
+ # ensure that self.x is a copy of x. Don't store a reference
241
+ # otherwise the memoization doesn't work properly.
242
+
243
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
244
+ self.x = self.xp.astype(_x, self.x_dtype)
245
+ self.f_updated = False
246
+ self.g_updated = False
247
+ self.H_updated = False
248
+ self._update_hess()
249
+ else:
250
+ def update_x(x):
251
+ # ensure that self.x is a copy of x. Don't store a reference
252
+ # otherwise the memoization doesn't work properly.
253
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
254
+ self.x = self.xp.astype(_x, self.x_dtype)
255
+ self.f_updated = False
256
+ self.g_updated = False
257
+ self.H_updated = False
258
+ self._update_x_impl = update_x
259
+
260
+ def _update_fun(self):
261
+ if not self.f_updated:
262
+ self._update_fun_impl()
263
+ self.f_updated = True
264
+
265
+ def _update_grad(self):
266
+ if not self.g_updated:
267
+ self._update_grad_impl()
268
+ self.g_updated = True
269
+
270
+ def _update_hess(self):
271
+ if not self.H_updated:
272
+ self._update_hess_impl()
273
+ self.H_updated = True
274
+
275
+ def fun(self, x):
276
+ if not np.array_equal(x, self.x):
277
+ self._update_x_impl(x)
278
+ self._update_fun()
279
+ return self.f
280
+
281
+ def grad(self, x):
282
+ if not np.array_equal(x, self.x):
283
+ self._update_x_impl(x)
284
+ self._update_grad()
285
+ return self.g
286
+
287
+ def hess(self, x):
288
+ if not np.array_equal(x, self.x):
289
+ self._update_x_impl(x)
290
+ self._update_hess()
291
+ return self.H
292
+
293
+ def fun_and_grad(self, x):
294
+ if not np.array_equal(x, self.x):
295
+ self._update_x_impl(x)
296
+ self._update_fun()
297
+ self._update_grad()
298
+ return self.f, self.g
299
+
300
+
301
+ class VectorFunction:
302
+ """Vector function and its derivatives.
303
+
304
+ This class defines a vector function F: R^n->R^m and methods for
305
+ computing or approximating its first and second derivatives.
306
+
307
+ Notes
308
+ -----
309
+ This class implements a memoization logic. There are methods `fun`,
310
+ `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following
311
+ things should be considered:
312
+
313
+ 1. Use only public methods `fun`, `jac` and `hess`.
314
+ 2. After one of the methods is called, the corresponding attribute
315
+ will be set. However, a subsequent call with a different argument
316
+ of *any* of the methods may overwrite the attribute.
317
+ """
318
+ def __init__(self, fun, x0, jac, hess,
319
+ finite_diff_rel_step, finite_diff_jac_sparsity,
320
+ finite_diff_bounds, sparse_jacobian):
321
+ if not callable(jac) and jac not in FD_METHODS:
322
+ raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.")
323
+
324
+ if not (callable(hess) or hess in FD_METHODS
325
+ or isinstance(hess, HessianUpdateStrategy)):
326
+ raise ValueError("`hess` must be either callable,"
327
+ f"HessianUpdateStrategy or one of {FD_METHODS}.")
328
+
329
+ if jac in FD_METHODS and hess in FD_METHODS:
330
+ raise ValueError("Whenever the Jacobian is estimated via "
331
+ "finite-differences, we require the Hessian to "
332
+ "be estimated using one of the quasi-Newton "
333
+ "strategies.")
334
+
335
+ self.xp = xp = array_namespace(x0)
336
+ _x = atleast_nd(x0, ndim=1, xp=xp)
337
+ _dtype = xp.float64
338
+ if xp.isdtype(_x.dtype, "real floating"):
339
+ _dtype = _x.dtype
340
+
341
+ # promotes to floating
342
+ self.x = xp.astype(_x, _dtype)
343
+ self.x_dtype = _dtype
344
+
345
+ self.n = self.x.size
346
+ self.nfev = 0
347
+ self.njev = 0
348
+ self.nhev = 0
349
+ self.f_updated = False
350
+ self.J_updated = False
351
+ self.H_updated = False
352
+
353
+ finite_diff_options = {}
354
+ if jac in FD_METHODS:
355
+ finite_diff_options["method"] = jac
356
+ finite_diff_options["rel_step"] = finite_diff_rel_step
357
+ if finite_diff_jac_sparsity is not None:
358
+ sparsity_groups = group_columns(finite_diff_jac_sparsity)
359
+ finite_diff_options["sparsity"] = (finite_diff_jac_sparsity,
360
+ sparsity_groups)
361
+ finite_diff_options["bounds"] = finite_diff_bounds
362
+ self.x_diff = np.copy(self.x)
363
+ if hess in FD_METHODS:
364
+ finite_diff_options["method"] = hess
365
+ finite_diff_options["rel_step"] = finite_diff_rel_step
366
+ finite_diff_options["as_linear_operator"] = True
367
+ self.x_diff = np.copy(self.x)
368
+ if jac in FD_METHODS and hess in FD_METHODS:
369
+ raise ValueError("Whenever the Jacobian is estimated via "
370
+ "finite-differences, we require the Hessian to "
371
+ "be estimated using one of the quasi-Newton "
372
+ "strategies.")
373
+
374
+ # Function evaluation
375
+ def fun_wrapped(x):
376
+ self.nfev += 1
377
+ return np.atleast_1d(fun(x))
378
+
379
+ def update_fun():
380
+ self.f = fun_wrapped(self.x)
381
+
382
+ self._update_fun_impl = update_fun
383
+ update_fun()
384
+
385
+ self.v = np.zeros_like(self.f)
386
+ self.m = self.v.size
387
+
388
+ # Jacobian Evaluation
389
+ if callable(jac):
390
+ self.J = jac(self.x)
391
+ self.J_updated = True
392
+ self.njev += 1
393
+
394
+ if (sparse_jacobian or
395
+ sparse_jacobian is None and sps.issparse(self.J)):
396
+ def jac_wrapped(x):
397
+ self.njev += 1
398
+ return sps.csr_matrix(jac(x))
399
+ self.J = sps.csr_matrix(self.J)
400
+ self.sparse_jacobian = True
401
+
402
+ elif sps.issparse(self.J):
403
+ def jac_wrapped(x):
404
+ self.njev += 1
405
+ return jac(x).toarray()
406
+ self.J = self.J.toarray()
407
+ self.sparse_jacobian = False
408
+
409
+ else:
410
+ def jac_wrapped(x):
411
+ self.njev += 1
412
+ return np.atleast_2d(jac(x))
413
+ self.J = np.atleast_2d(self.J)
414
+ self.sparse_jacobian = False
415
+
416
+ def update_jac():
417
+ self.J = jac_wrapped(self.x)
418
+
419
+ elif jac in FD_METHODS:
420
+ self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
421
+ **finite_diff_options)
422
+ self.J_updated = True
423
+
424
+ if (sparse_jacobian or
425
+ sparse_jacobian is None and sps.issparse(self.J)):
426
+ def update_jac():
427
+ self._update_fun()
428
+ self.J = sps.csr_matrix(
429
+ approx_derivative(fun_wrapped, self.x, f0=self.f,
430
+ **finite_diff_options))
431
+ self.J = sps.csr_matrix(self.J)
432
+ self.sparse_jacobian = True
433
+
434
+ elif sps.issparse(self.J):
435
+ def update_jac():
436
+ self._update_fun()
437
+ self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
438
+ **finite_diff_options).toarray()
439
+ self.J = self.J.toarray()
440
+ self.sparse_jacobian = False
441
+
442
+ else:
443
+ def update_jac():
444
+ self._update_fun()
445
+ self.J = np.atleast_2d(
446
+ approx_derivative(fun_wrapped, self.x, f0=self.f,
447
+ **finite_diff_options))
448
+ self.J = np.atleast_2d(self.J)
449
+ self.sparse_jacobian = False
450
+
451
+ self._update_jac_impl = update_jac
452
+
453
+ # Define Hessian
454
+ if callable(hess):
455
+ self.H = hess(self.x, self.v)
456
+ self.H_updated = True
457
+ self.nhev += 1
458
+
459
+ if sps.issparse(self.H):
460
+ def hess_wrapped(x, v):
461
+ self.nhev += 1
462
+ return sps.csr_matrix(hess(x, v))
463
+ self.H = sps.csr_matrix(self.H)
464
+
465
+ elif isinstance(self.H, LinearOperator):
466
+ def hess_wrapped(x, v):
467
+ self.nhev += 1
468
+ return hess(x, v)
469
+
470
+ else:
471
+ def hess_wrapped(x, v):
472
+ self.nhev += 1
473
+ return np.atleast_2d(np.asarray(hess(x, v)))
474
+ self.H = np.atleast_2d(np.asarray(self.H))
475
+
476
+ def update_hess():
477
+ self.H = hess_wrapped(self.x, self.v)
478
+ elif hess in FD_METHODS:
479
+ def jac_dot_v(x, v):
480
+ return jac_wrapped(x).T.dot(v)
481
+
482
+ def update_hess():
483
+ self._update_jac()
484
+ self.H = approx_derivative(jac_dot_v, self.x,
485
+ f0=self.J.T.dot(self.v),
486
+ args=(self.v,),
487
+ **finite_diff_options)
488
+ update_hess()
489
+ self.H_updated = True
490
+ elif isinstance(hess, HessianUpdateStrategy):
491
+ self.H = hess
492
+ self.H.initialize(self.n, 'hess')
493
+ self.H_updated = True
494
+ self.x_prev = None
495
+ self.J_prev = None
496
+
497
+ def update_hess():
498
+ self._update_jac()
499
+ # When v is updated before x was updated, then x_prev and
500
+ # J_prev are None and we need this check.
501
+ if self.x_prev is not None and self.J_prev is not None:
502
+ delta_x = self.x - self.x_prev
503
+ delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v)
504
+ self.H.update(delta_x, delta_g)
505
+
506
+ self._update_hess_impl = update_hess
507
+
508
+ if isinstance(hess, HessianUpdateStrategy):
509
+ def update_x(x):
510
+ self._update_jac()
511
+ self.x_prev = self.x
512
+ self.J_prev = self.J
513
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
514
+ self.x = self.xp.astype(_x, self.x_dtype)
515
+ self.f_updated = False
516
+ self.J_updated = False
517
+ self.H_updated = False
518
+ self._update_hess()
519
+ else:
520
+ def update_x(x):
521
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
522
+ self.x = self.xp.astype(_x, self.x_dtype)
523
+ self.f_updated = False
524
+ self.J_updated = False
525
+ self.H_updated = False
526
+
527
+ self._update_x_impl = update_x
528
+
529
+ def _update_v(self, v):
530
+ if not np.array_equal(v, self.v):
531
+ self.v = v
532
+ self.H_updated = False
533
+
534
+ def _update_x(self, x):
535
+ if not np.array_equal(x, self.x):
536
+ self._update_x_impl(x)
537
+
538
+ def _update_fun(self):
539
+ if not self.f_updated:
540
+ self._update_fun_impl()
541
+ self.f_updated = True
542
+
543
+ def _update_jac(self):
544
+ if not self.J_updated:
545
+ self._update_jac_impl()
546
+ self.J_updated = True
547
+
548
+ def _update_hess(self):
549
+ if not self.H_updated:
550
+ self._update_hess_impl()
551
+ self.H_updated = True
552
+
553
+ def fun(self, x):
554
+ self._update_x(x)
555
+ self._update_fun()
556
+ return self.f
557
+
558
+ def jac(self, x):
559
+ self._update_x(x)
560
+ self._update_jac()
561
+ return self.J
562
+
563
+ def hess(self, x, v):
564
+ # v should be updated before x.
565
+ self._update_v(v)
566
+ self._update_x(x)
567
+ self._update_hess()
568
+ return self.H
569
+
570
+
571
+ class LinearVectorFunction:
572
+ """Linear vector function and its derivatives.
573
+
574
+ Defines a linear function F = A x, where x is N-D vector and
575
+ A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian
576
+ is identically zero and it is returned as a csr matrix.
577
+ """
578
+ def __init__(self, A, x0, sparse_jacobian):
579
+ if sparse_jacobian or sparse_jacobian is None and sps.issparse(A):
580
+ self.J = sps.csr_matrix(A)
581
+ self.sparse_jacobian = True
582
+ elif sps.issparse(A):
583
+ self.J = A.toarray()
584
+ self.sparse_jacobian = False
585
+ else:
586
+ # np.asarray makes sure A is ndarray and not matrix
587
+ self.J = np.atleast_2d(np.asarray(A))
588
+ self.sparse_jacobian = False
589
+
590
+ self.m, self.n = self.J.shape
591
+
592
+ self.xp = xp = array_namespace(x0)
593
+ _x = atleast_nd(x0, ndim=1, xp=xp)
594
+ _dtype = xp.float64
595
+ if xp.isdtype(_x.dtype, "real floating"):
596
+ _dtype = _x.dtype
597
+
598
+ # promotes to floating
599
+ self.x = xp.astype(_x, _dtype)
600
+ self.x_dtype = _dtype
601
+
602
+ self.f = self.J.dot(self.x)
603
+ self.f_updated = True
604
+
605
+ self.v = np.zeros(self.m, dtype=float)
606
+ self.H = sps.csr_matrix((self.n, self.n))
607
+
608
+ def _update_x(self, x):
609
+ if not np.array_equal(x, self.x):
610
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
611
+ self.x = self.xp.astype(_x, self.x_dtype)
612
+ self.f_updated = False
613
+
614
+ def fun(self, x):
615
+ self._update_x(x)
616
+ if not self.f_updated:
617
+ self.f = self.J.dot(x)
618
+ self.f_updated = True
619
+ return self.f
620
+
621
+ def jac(self, x):
622
+ self._update_x(x)
623
+ return self.J
624
+
625
+ def hess(self, x, v):
626
+ self._update_x(x)
627
+ self.v = v
628
+ return self.H
629
+
630
+
631
+ class IdentityVectorFunction(LinearVectorFunction):
632
+ """Identity vector function and its derivatives.
633
+
634
+ The Jacobian is the identity matrix, returned as a dense array when
635
+ `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is
636
+ identically zero and it is returned as a csr matrix.
637
+ """
638
+ def __init__(self, x0, sparse_jacobian):
639
+ n = len(x0)
640
+ if sparse_jacobian or sparse_jacobian is None:
641
+ A = sps.eye(n, format='csr')
642
+ sparse_jacobian = True
643
+ else:
644
+ A = np.eye(n)
645
+ sparse_jacobian = False
646
+ super().__init__(A, x0, sparse_jacobian)
venv/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py ADDED
@@ -0,0 +1,1897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ differential_evolution: The differential evolution global optimization algorithm
3
+ Added by Andrew Nelson 2014
4
+ """
5
+ import warnings
6
+
7
+ import numpy as np
8
+ from scipy.optimize import OptimizeResult, minimize
9
+ from scipy.optimize._optimize import _status_message, _wrap_callback
10
+ from scipy._lib._util import check_random_state, MapWrapper, _FunctionWrapper
11
+
12
+ from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
13
+ NonlinearConstraint, LinearConstraint)
14
+ from scipy.sparse import issparse
15
+
16
+ __all__ = ['differential_evolution']
17
+
18
+
19
+ _MACHEPS = np.finfo(np.float64).eps
20
+
21
+
22
+ def differential_evolution(func, bounds, args=(), strategy='best1bin',
23
+ maxiter=1000, popsize=15, tol=0.01,
24
+ mutation=(0.5, 1), recombination=0.7, seed=None,
25
+ callback=None, disp=False, polish=True,
26
+ init='latinhypercube', atol=0, updating='immediate',
27
+ workers=1, constraints=(), x0=None, *,
28
+ integrality=None, vectorized=False):
29
+ """Finds the global minimum of a multivariate function.
30
+
31
+ The differential evolution method [1]_ is stochastic in nature. It does
32
+ not use gradient methods to find the minimum, and can search large areas
33
+ of candidate space, but often requires larger numbers of function
34
+ evaluations than conventional gradient-based techniques.
35
+
36
+ The algorithm is due to Storn and Price [2]_.
37
+
38
+ Parameters
39
+ ----------
40
+ func : callable
41
+ The objective function to be minimized. Must be in the form
42
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
43
+ and ``args`` is a tuple of any additional fixed parameters needed to
44
+ completely specify the function. The number of parameters, N, is equal
45
+ to ``len(x)``.
46
+ bounds : sequence or `Bounds`
47
+ Bounds for variables. There are two ways to specify the bounds:
48
+
49
+ 1. Instance of `Bounds` class.
50
+ 2. ``(min, max)`` pairs for each element in ``x``, defining the
51
+ finite lower and upper bounds for the optimizing argument of
52
+ `func`.
53
+
54
+ The total number of bounds is used to determine the number of
55
+ parameters, N. If there are parameters whose bounds are equal the total
56
+ number of free parameters is ``N - N_equal``.
57
+
58
+ args : tuple, optional
59
+ Any additional fixed parameters needed to
60
+ completely specify the objective function.
61
+ strategy : {str, callable}, optional
62
+ The differential evolution strategy to use. Should be one of:
63
+
64
+ - 'best1bin'
65
+ - 'best1exp'
66
+ - 'rand1bin'
67
+ - 'rand1exp'
68
+ - 'rand2bin'
69
+ - 'rand2exp'
70
+ - 'randtobest1bin'
71
+ - 'randtobest1exp'
72
+ - 'currenttobest1bin'
73
+ - 'currenttobest1exp'
74
+ - 'best2exp'
75
+ - 'best2bin'
76
+
77
+ The default is 'best1bin'. Strategies that may be implemented are
78
+ outlined in 'Notes'.
79
+ Alternatively the differential evolution strategy can be customized by
80
+ providing a callable that constructs a trial vector. The callable must
81
+ have the form ``strategy(candidate: int, population: np.ndarray, rng=None)``,
82
+ where ``candidate`` is an integer specifying which entry of the
83
+ population is being evolved, ``population`` is an array of shape
84
+ ``(S, N)`` containing all the population members (where S is the
85
+ total population size), and ``rng`` is the random number generator
86
+ being used within the solver.
87
+ ``candidate`` will be in the range ``[0, S)``.
88
+ ``strategy`` must return a trial vector with shape `(N,)`. The
89
+ fitness of this trial vector is compared against the fitness of
90
+ ``population[candidate]``.
91
+
92
+ .. versionchanged:: 1.12.0
93
+ Customization of evolution strategy via a callable.
94
+
95
+ maxiter : int, optional
96
+ The maximum number of generations over which the entire population is
97
+ evolved. The maximum number of function evaluations (with no polishing)
98
+ is: ``(maxiter + 1) * popsize * (N - N_equal)``
99
+ popsize : int, optional
100
+ A multiplier for setting the total population size. The population has
101
+ ``popsize * (N - N_equal)`` individuals. This keyword is overridden if
102
+ an initial population is supplied via the `init` keyword. When using
103
+ ``init='sobol'`` the population size is calculated as the next power
104
+ of 2 after ``popsize * (N - N_equal)``.
105
+ tol : float, optional
106
+ Relative tolerance for convergence, the solving stops when
107
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
108
+ where and `atol` and `tol` are the absolute and relative tolerance
109
+ respectively.
110
+ mutation : float or tuple(float, float), optional
111
+ The mutation constant. In the literature this is also known as
112
+ differential weight, being denoted by F.
113
+ If specified as a float it should be in the range [0, 2].
114
+ If specified as a tuple ``(min, max)`` dithering is employed. Dithering
115
+ randomly changes the mutation constant on a generation by generation
116
+ basis. The mutation constant for that generation is taken from
117
+ ``U[min, max)``. Dithering can help speed convergence significantly.
118
+ Increasing the mutation constant increases the search radius, but will
119
+ slow down convergence.
120
+ recombination : float, optional
121
+ The recombination constant, should be in the range [0, 1]. In the
122
+ literature this is also known as the crossover probability, being
123
+ denoted by CR. Increasing this value allows a larger number of mutants
124
+ to progress into the next generation, but at the risk of population
125
+ stability.
126
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
127
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
128
+ singleton is used.
129
+ If `seed` is an int, a new ``RandomState`` instance is used,
130
+ seeded with `seed`.
131
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
132
+ that instance is used.
133
+ Specify `seed` for repeatable minimizations.
134
+ disp : bool, optional
135
+ Prints the evaluated `func` at every iteration.
136
+ callback : callable, optional
137
+ A callable called after each iteration. Has the signature:
138
+
139
+ ``callback(intermediate_result: OptimizeResult)``
140
+
141
+ where ``intermediate_result`` is a keyword parameter containing an
142
+ `OptimizeResult` with attributes ``x`` and ``fun``, the best solution
143
+ found so far and the objective function. Note that the name
144
+ of the parameter must be ``intermediate_result`` for the callback
145
+ to be passed an `OptimizeResult`.
146
+
147
+ The callback also supports a signature like:
148
+
149
+ ``callback(x, convergence: float=val)``
150
+
151
+ ``val`` represents the fractional value of the population convergence.
152
+ When ``val`` is greater than ``1.0``, the function halts.
153
+
154
+ Introspection is used to determine which of the signatures is invoked.
155
+
156
+ Global minimization will halt if the callback raises ``StopIteration``
157
+ or returns ``True``; any polishing is still carried out.
158
+
159
+ .. versionchanged:: 1.12.0
160
+ callback accepts the ``intermediate_result`` keyword.
161
+
162
+ polish : bool, optional
163
+ If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
164
+ method is used to polish the best population member at the end, which
165
+ can improve the minimization slightly. If a constrained problem is
166
+ being studied then the `trust-constr` method is used instead. For large
167
+ problems with many constraints, polishing can take a long time due to
168
+ the Jacobian computations.
169
+ init : str or array-like, optional
170
+ Specify which type of population initialization is performed. Should be
171
+ one of:
172
+
173
+ - 'latinhypercube'
174
+ - 'sobol'
175
+ - 'halton'
176
+ - 'random'
177
+ - array specifying the initial population. The array should have
178
+ shape ``(S, N)``, where S is the total population size and N is
179
+ the number of parameters.
180
+ `init` is clipped to `bounds` before use.
181
+
182
+ The default is 'latinhypercube'. Latin Hypercube sampling tries to
183
+ maximize coverage of the available parameter space.
184
+
185
+ 'sobol' and 'halton' are superior alternatives and maximize even more
186
+ the parameter space. 'sobol' will enforce an initial population
187
+ size which is calculated as the next power of 2 after
188
+ ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
189
+ less efficient. See `scipy.stats.qmc` for more details.
190
+
191
+ 'random' initializes the population randomly - this has the drawback
192
+ that clustering can occur, preventing the whole of parameter space
193
+ being covered. Use of an array to specify a population could be used,
194
+ for example, to create a tight bunch of initial guesses in an location
195
+ where the solution is known to exist, thereby reducing time for
196
+ convergence.
197
+ atol : float, optional
198
+ Absolute tolerance for convergence, the solving stops when
199
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
200
+ where and `atol` and `tol` are the absolute and relative tolerance
201
+ respectively.
202
+ updating : {'immediate', 'deferred'}, optional
203
+ If ``'immediate'``, the best solution vector is continuously updated
204
+ within a single generation [4]_. This can lead to faster convergence as
205
+ trial vectors can take advantage of continuous improvements in the best
206
+ solution.
207
+ With ``'deferred'``, the best solution vector is updated once per
208
+ generation. Only ``'deferred'`` is compatible with parallelization or
209
+ vectorization, and the `workers` and `vectorized` keywords can
210
+ over-ride this option.
211
+
212
+ .. versionadded:: 1.2.0
213
+
214
+ workers : int or map-like callable, optional
215
+ If `workers` is an int the population is subdivided into `workers`
216
+ sections and evaluated in parallel
217
+ (uses `multiprocessing.Pool <multiprocessing>`).
218
+ Supply -1 to use all available CPU cores.
219
+ Alternatively supply a map-like callable, such as
220
+ `multiprocessing.Pool.map` for evaluating the population in parallel.
221
+ This evaluation is carried out as ``workers(func, iterable)``.
222
+ This option will override the `updating` keyword to
223
+ ``updating='deferred'`` if ``workers != 1``.
224
+ This option overrides the `vectorized` keyword if ``workers != 1``.
225
+ Requires that `func` be pickleable.
226
+
227
+ .. versionadded:: 1.2.0
228
+
229
+ constraints : {NonLinearConstraint, LinearConstraint, Bounds}
230
+ Constraints on the solver, over and above those applied by the `bounds`
231
+ kwd. Uses the approach by Lampinen [5]_.
232
+
233
+ .. versionadded:: 1.4.0
234
+
235
+ x0 : None or array-like, optional
236
+ Provides an initial guess to the minimization. Once the population has
237
+ been initialized this vector replaces the first (best) member. This
238
+ replacement is done even if `init` is given an initial population.
239
+ ``x0.shape == (N,)``.
240
+
241
+ .. versionadded:: 1.7.0
242
+
243
+ integrality : 1-D array, optional
244
+ For each decision variable, a boolean value indicating whether the
245
+ decision variable is constrained to integer values. The array is
246
+ broadcast to ``(N,)``.
247
+ If any decision variables are constrained to be integral, they will not
248
+ be changed during polishing.
249
+ Only integer values lying between the lower and upper bounds are used.
250
+ If there are no integer values lying between the bounds then a
251
+ `ValueError` is raised.
252
+
253
+ .. versionadded:: 1.9.0
254
+
255
+ vectorized : bool, optional
256
+ If ``vectorized is True``, `func` is sent an `x` array with
257
+ ``x.shape == (N, S)``, and is expected to return an array of shape
258
+ ``(S,)``, where `S` is the number of solution vectors to be calculated.
259
+ If constraints are applied, each of the functions used to construct
260
+ a `Constraint` object should accept an `x` array with
261
+ ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
262
+ `M` is the number of constraint components.
263
+ This option is an alternative to the parallelization offered by
264
+ `workers`, and may help in optimization speed by reducing interpreter
265
+ overhead from multiple function calls. This keyword is ignored if
266
+ ``workers != 1``.
267
+ This option will override the `updating` keyword to
268
+ ``updating='deferred'``.
269
+ See the notes section for further discussion on when to use
270
+ ``'vectorized'``, and when to use ``'workers'``.
271
+
272
+ .. versionadded:: 1.9.0
273
+
274
+ Returns
275
+ -------
276
+ res : OptimizeResult
277
+ The optimization result represented as a `OptimizeResult` object.
278
+ Important attributes are: ``x`` the solution array, ``success`` a
279
+ Boolean flag indicating if the optimizer exited successfully,
280
+ ``message`` which describes the cause of the termination,
281
+ ``population`` the solution vectors present in the population, and
282
+ ``population_energies`` the value of the objective function for each
283
+ entry in ``population``.
284
+ See `OptimizeResult` for a description of other attributes. If `polish`
285
+ was employed, and a lower minimum was obtained by the polishing, then
286
+ OptimizeResult also contains the ``jac`` attribute.
287
+ If the eventual solution does not satisfy the applied constraints
288
+ ``success`` will be `False`.
289
+
290
+ Notes
291
+ -----
292
+ Differential evolution is a stochastic population based method that is
293
+ useful for global optimization problems. At each pass through the
294
+ population the algorithm mutates each candidate solution by mixing with
295
+ other candidate solutions to create a trial candidate. There are several
296
+ strategies [3]_ for creating trial candidates, which suit some problems
297
+ more than others. The 'best1bin' strategy is a good starting point for
298
+ many systems. In this strategy two members of the population are randomly
299
+ chosen. Their difference is used to mutate the best member (the 'best' in
300
+ 'best1bin'), :math:`x_0`, so far:
301
+
302
+ .. math::
303
+
304
+ b' = x_0 + mutation * (x_{r_0} - x_{r_1})
305
+
306
+ A trial vector is then constructed. Starting with a randomly chosen ith
307
+ parameter the trial is sequentially filled (in modulo) with parameters
308
+ from ``b'`` or the original candidate. The choice of whether to use ``b'``
309
+ or the original candidate is made with a binomial distribution (the 'bin'
310
+ in 'best1bin') - a random number in [0, 1) is generated. If this number is
311
+ less than the `recombination` constant then the parameter is loaded from
312
+ ``b'``, otherwise it is loaded from the original candidate. The final
313
+ parameter is always loaded from ``b'``. Once the trial candidate is built
314
+ its fitness is assessed. If the trial is better than the original candidate
315
+ then it takes its place. If it is also better than the best overall
316
+ candidate it also replaces that.
317
+
318
+ The other strategies available are outlined in Qiang and
319
+ Mitchell (2014) [3]_.
320
+
321
+ .. math::
322
+ rand1* : b' = x_{r_0} + mutation*(x_{r_1} - x_{r_2})
323
+
324
+ rand2* : b' = x_{r_0} + mutation*(x_{r_1} + x_{r_2}
325
+ - x_{r_3} - x_{r_4})
326
+
327
+ best1* : b' = x_0 + mutation*(x_{r_0} - x_{r_1})
328
+
329
+ best2* : b' = x_0 + mutation*(x_{r_0} + x_{r_1}
330
+ - x_{r_2} - x_{r_3})
331
+
332
+ currenttobest1* : b' = x_i + mutation*(x_0 - x_i
333
+ + x_{r_0} - x_{r_1})
334
+
335
+ randtobest1* : b' = x_{r_0} + mutation*(x_0 - x_{r_0}
336
+ + x_{r_1} - x_{r_2})
337
+
338
+ where the integers :math:`r_0, r_1, r_2, r_3, r_4` are chosen randomly
339
+ from the interval [0, NP) with `NP` being the total population size and
340
+ the original candidate having index `i`. The user can fully customize the
341
+ generation of the trial candidates by supplying a callable to ``strategy``.
342
+
343
+ To improve your chances of finding a global minimum use higher `popsize`
344
+ values, with higher `mutation` and (dithering), but lower `recombination`
345
+ values. This has the effect of widening the search radius, but slowing
346
+ convergence.
347
+
348
+ By default the best solution vector is updated continuously within a single
349
+ iteration (``updating='immediate'``). This is a modification [4]_ of the
350
+ original differential evolution algorithm which can lead to faster
351
+ convergence as trial vectors can immediately benefit from improved
352
+ solutions. To use the original Storn and Price behaviour, updating the best
353
+ solution once per iteration, set ``updating='deferred'``.
354
+ The ``'deferred'`` approach is compatible with both parallelization and
355
+ vectorization (``'workers'`` and ``'vectorized'`` keywords). These may
356
+ improve minimization speed by using computer resources more efficiently.
357
+ The ``'workers'`` distribute calculations over multiple processors. By
358
+ default the Python `multiprocessing` module is used, but other approaches
359
+ are also possible, such as the Message Passing Interface (MPI) used on
360
+ clusters [6]_ [7]_. The overhead from these approaches (creating new
361
+ Processes, etc) may be significant, meaning that computational speed
362
+ doesn't necessarily scale with the number of processors used.
363
+ Parallelization is best suited to computationally expensive objective
364
+ functions. If the objective function is less expensive, then
365
+ ``'vectorized'`` may aid by only calling the objective function once per
366
+ iteration, rather than multiple times for all the population members; the
367
+ interpreter overhead is reduced.
368
+
369
+ .. versionadded:: 0.15.0
370
+
371
+ References
372
+ ----------
373
+ .. [1] Differential evolution, Wikipedia,
374
+ http://en.wikipedia.org/wiki/Differential_evolution
375
+ .. [2] Storn, R and Price, K, Differential Evolution - a Simple and
376
+ Efficient Heuristic for Global Optimization over Continuous Spaces,
377
+ Journal of Global Optimization, 1997, 11, 341 - 359.
378
+ .. [3] Qiang, J., Mitchell, C., A Unified Differential Evolution Algorithm
379
+ for Global Optimization, 2014, https://www.osti.gov/servlets/purl/1163659
380
+ .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
381
+ Characterization of structures from X-ray scattering data using
382
+ genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
383
+ 2827-2848
384
+ .. [5] Lampinen, J., A constraint handling approach for the differential
385
+ evolution algorithm. Proceedings of the 2002 Congress on
386
+ Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
387
+ 2002.
388
+ .. [6] https://mpi4py.readthedocs.io/en/stable/
389
+ .. [7] https://schwimmbad.readthedocs.io/en/latest/
390
+
391
+
392
+ Examples
393
+ --------
394
+ Let us consider the problem of minimizing the Rosenbrock function. This
395
+ function is implemented in `rosen` in `scipy.optimize`.
396
+
397
+ >>> import numpy as np
398
+ >>> from scipy.optimize import rosen, differential_evolution
399
+ >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
400
+ >>> result = differential_evolution(rosen, bounds)
401
+ >>> result.x, result.fun
402
+ (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
403
+
404
+ Now repeat, but with parallelization.
405
+
406
+ >>> result = differential_evolution(rosen, bounds, updating='deferred',
407
+ ... workers=2)
408
+ >>> result.x, result.fun
409
+ (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
410
+
411
+ Let's do a constrained minimization.
412
+
413
+ >>> from scipy.optimize import LinearConstraint, Bounds
414
+
415
+ We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less
416
+ than or equal to 1.9. This is a linear constraint, which may be written
417
+ ``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as
418
+ a `LinearConstraint` instance:
419
+
420
+ >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9)
421
+
422
+ Specify limits using a `Bounds` object.
423
+
424
+ >>> bounds = Bounds([0., 0.], [2., 2.])
425
+ >>> result = differential_evolution(rosen, bounds, constraints=lc,
426
+ ... seed=1)
427
+ >>> result.x, result.fun
428
+ (array([0.96632622, 0.93367155]), 0.0011352416852625719)
429
+
430
+ Next find the minimum of the Ackley function
431
+ (https://en.wikipedia.org/wiki/Test_functions_for_optimization).
432
+
433
+ >>> def ackley(x):
434
+ ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
435
+ ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
436
+ ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
437
+ >>> bounds = [(-5, 5), (-5, 5)]
438
+ >>> result = differential_evolution(ackley, bounds, seed=1)
439
+ >>> result.x, result.fun
440
+ (array([0., 0.]), 4.440892098500626e-16)
441
+
442
+ The Ackley function is written in a vectorized manner, so the
443
+ ``'vectorized'`` keyword can be employed. Note the reduced number of
444
+ function evaluations.
445
+
446
+ >>> result = differential_evolution(
447
+ ... ackley, bounds, vectorized=True, updating='deferred', seed=1
448
+ ... )
449
+ >>> result.x, result.fun
450
+ (array([0., 0.]), 4.440892098500626e-16)
451
+
452
+ The following custom strategy function mimics 'best1bin':
453
+
454
+ >>> def custom_strategy_fn(candidate, population, rng=None):
455
+ ... parameter_count = population.shape(-1)
456
+ ... mutation, recombination = 0.7, 0.9
457
+ ... trial = np.copy(population[candidate])
458
+ ... fill_point = rng.choice(parameter_count)
459
+ ...
460
+ ... pool = np.arange(len(population))
461
+ ... rng.shuffle(pool)
462
+ ...
463
+ ... # two unique random numbers that aren't the same, and
464
+ ... # aren't equal to candidate.
465
+ ... idxs = []
466
+ ... while len(idxs) < 2 and len(pool) > 0:
467
+ ... idx = pool[0]
468
+ ... pool = pool[1:]
469
+ ... if idx != candidate:
470
+ ... idxs.append(idx)
471
+ ...
472
+ ... r0, r1 = idxs[:2]
473
+ ...
474
+ ... bprime = (population[0] + mutation *
475
+ ... (population[r0] - population[r1]))
476
+ ...
477
+ ... crossovers = rng.uniform(size=parameter_count)
478
+ ... crossovers = crossovers < recombination
479
+ ... crossovers[fill_point] = True
480
+ ... trial = np.where(crossovers, bprime, trial)
481
+ ... return trial
482
+
483
+ """
484
+
485
+ # using a context manager means that any created Pool objects are
486
+ # cleared up.
487
+ with DifferentialEvolutionSolver(func, bounds, args=args,
488
+ strategy=strategy,
489
+ maxiter=maxiter,
490
+ popsize=popsize, tol=tol,
491
+ mutation=mutation,
492
+ recombination=recombination,
493
+ seed=seed, polish=polish,
494
+ callback=callback,
495
+ disp=disp, init=init, atol=atol,
496
+ updating=updating,
497
+ workers=workers,
498
+ constraints=constraints,
499
+ x0=x0,
500
+ integrality=integrality,
501
+ vectorized=vectorized) as solver:
502
+ ret = solver.solve()
503
+
504
+ return ret
505
+
506
+
507
+ class DifferentialEvolutionSolver:
508
+
509
+ """This class implements the differential evolution solver
510
+
511
+ Parameters
512
+ ----------
513
+ func : callable
514
+ The objective function to be minimized. Must be in the form
515
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
516
+ and ``args`` is a tuple of any additional fixed parameters needed to
517
+ completely specify the function. The number of parameters, N, is equal
518
+ to ``len(x)``.
519
+ bounds : sequence or `Bounds`
520
+ Bounds for variables. There are two ways to specify the bounds:
521
+
522
+ 1. Instance of `Bounds` class.
523
+ 2. ``(min, max)`` pairs for each element in ``x``, defining the
524
+ finite lower and upper bounds for the optimizing argument of
525
+ `func`.
526
+
527
+ The total number of bounds is used to determine the number of
528
+ parameters, N. If there are parameters whose bounds are equal the total
529
+ number of free parameters is ``N - N_equal``.
530
+ args : tuple, optional
531
+ Any additional fixed parameters needed to
532
+ completely specify the objective function.
533
+ strategy : {str, callable}, optional
534
+ The differential evolution strategy to use. Should be one of:
535
+
536
+ - 'best1bin'
537
+ - 'best1exp'
538
+ - 'rand1bin'
539
+ - 'rand1exp'
540
+ - 'rand2bin'
541
+ - 'rand2exp'
542
+ - 'randtobest1bin'
543
+ - 'randtobest1exp'
544
+ - 'currenttobest1bin'
545
+ - 'currenttobest1exp'
546
+ - 'best2exp'
547
+ - 'best2bin'
548
+
549
+ The default is 'best1bin'. Strategies that may be
550
+ implemented are outlined in 'Notes'.
551
+
552
+ Alternatively the differential evolution strategy can be customized
553
+ by providing a callable that constructs a trial vector. The callable
554
+ must have the form
555
+ ``strategy(candidate: int, population: np.ndarray, rng=None)``,
556
+ where ``candidate`` is an integer specifying which entry of the
557
+ population is being evolved, ``population`` is an array of shape
558
+ ``(S, N)`` containing all the population members (where S is the
559
+ total population size), and ``rng`` is the random number generator
560
+ being used within the solver.
561
+ ``candidate`` will be in the range ``[0, S)``.
562
+ ``strategy`` must return a trial vector with shape `(N,)`. The
563
+ fitness of this trial vector is compared against the fitness of
564
+ ``population[candidate]``.
565
+ maxiter : int, optional
566
+ The maximum number of generations over which the entire population is
567
+ evolved. The maximum number of function evaluations (with no polishing)
568
+ is: ``(maxiter + 1) * popsize * (N - N_equal)``
569
+ popsize : int, optional
570
+ A multiplier for setting the total population size. The population has
571
+ ``popsize * (N - N_equal)`` individuals. This keyword is overridden if
572
+ an initial population is supplied via the `init` keyword. When using
573
+ ``init='sobol'`` the population size is calculated as the next power
574
+ of 2 after ``popsize * (N - N_equal)``.
575
+ tol : float, optional
576
+ Relative tolerance for convergence, the solving stops when
577
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
578
+ where and `atol` and `tol` are the absolute and relative tolerance
579
+ respectively.
580
+ mutation : float or tuple(float, float), optional
581
+ The mutation constant. In the literature this is also known as
582
+ differential weight, being denoted by F.
583
+ If specified as a float it should be in the range [0, 2].
584
+ If specified as a tuple ``(min, max)`` dithering is employed. Dithering
585
+ randomly changes the mutation constant on a generation by generation
586
+ basis. The mutation constant for that generation is taken from
587
+ U[min, max). Dithering can help speed convergence significantly.
588
+ Increasing the mutation constant increases the search radius, but will
589
+ slow down convergence.
590
+ recombination : float, optional
591
+ The recombination constant, should be in the range [0, 1]. In the
592
+ literature this is also known as the crossover probability, being
593
+ denoted by CR. Increasing this value allows a larger number of mutants
594
+ to progress into the next generation, but at the risk of population
595
+ stability.
596
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
597
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
598
+ singleton is used.
599
+ If `seed` is an int, a new ``RandomState`` instance is used,
600
+ seeded with `seed`.
601
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
602
+ that instance is used.
603
+ Specify `seed` for repeatable minimizations.
604
+ disp : bool, optional
605
+ Prints the evaluated `func` at every iteration.
606
+ callback : callable, optional
607
+ A callable called after each iteration. Has the signature:
608
+
609
+ ``callback(intermediate_result: OptimizeResult)``
610
+
611
+ where ``intermediate_result`` is a keyword parameter containing an
612
+ `OptimizeResult` with attributes ``x`` and ``fun``, the best solution
613
+ found so far and the objective function. Note that the name
614
+ of the parameter must be ``intermediate_result`` for the callback
615
+ to be passed an `OptimizeResult`.
616
+
617
+ The callback also supports a signature like:
618
+
619
+ ``callback(x, convergence: float=val)``
620
+
621
+ ``val`` represents the fractional value of the population convergence.
622
+ When ``val`` is greater than ``1.0``, the function halts.
623
+
624
+ Introspection is used to determine which of the signatures is invoked.
625
+
626
+ Global minimization will halt if the callback raises ``StopIteration``
627
+ or returns ``True``; any polishing is still carried out.
628
+
629
+ .. versionchanged:: 1.12.0
630
+ callback accepts the ``intermediate_result`` keyword.
631
+
632
+ polish : bool, optional
633
+ If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
634
+ method is used to polish the best population member at the end, which
635
+ can improve the minimization slightly. If a constrained problem is
636
+ being studied then the `trust-constr` method is used instead. For large
637
+ problems with many constraints, polishing can take a long time due to
638
+ the Jacobian computations.
639
+ maxfun : int, optional
640
+ Set the maximum number of function evaluations. However, it probably
641
+ makes more sense to set `maxiter` instead.
642
+ init : str or array-like, optional
643
+ Specify which type of population initialization is performed. Should be
644
+ one of:
645
+
646
+ - 'latinhypercube'
647
+ - 'sobol'
648
+ - 'halton'
649
+ - 'random'
650
+ - array specifying the initial population. The array should have
651
+ shape ``(S, N)``, where S is the total population size and
652
+ N is the number of parameters.
653
+ `init` is clipped to `bounds` before use.
654
+
655
+ The default is 'latinhypercube'. Latin Hypercube sampling tries to
656
+ maximize coverage of the available parameter space.
657
+
658
+ 'sobol' and 'halton' are superior alternatives and maximize even more
659
+ the parameter space. 'sobol' will enforce an initial population
660
+ size which is calculated as the next power of 2 after
661
+ ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
662
+ less efficient. See `scipy.stats.qmc` for more details.
663
+
664
+ 'random' initializes the population randomly - this has the drawback
665
+ that clustering can occur, preventing the whole of parameter space
666
+ being covered. Use of an array to specify a population could be used,
667
+ for example, to create a tight bunch of initial guesses in an location
668
+ where the solution is known to exist, thereby reducing time for
669
+ convergence.
670
+ atol : float, optional
671
+ Absolute tolerance for convergence, the solving stops when
672
+ ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
673
+ where and `atol` and `tol` are the absolute and relative tolerance
674
+ respectively.
675
+ updating : {'immediate', 'deferred'}, optional
676
+ If ``'immediate'``, the best solution vector is continuously updated
677
+ within a single generation [4]_. This can lead to faster convergence as
678
+ trial vectors can take advantage of continuous improvements in the best
679
+ solution.
680
+ With ``'deferred'``, the best solution vector is updated once per
681
+ generation. Only ``'deferred'`` is compatible with parallelization or
682
+ vectorization, and the `workers` and `vectorized` keywords can
683
+ over-ride this option.
684
+ workers : int or map-like callable, optional
685
+ If `workers` is an int the population is subdivided into `workers`
686
+ sections and evaluated in parallel
687
+ (uses `multiprocessing.Pool <multiprocessing>`).
688
+ Supply `-1` to use all cores available to the Process.
689
+ Alternatively supply a map-like callable, such as
690
+ `multiprocessing.Pool.map` for evaluating the population in parallel.
691
+ This evaluation is carried out as ``workers(func, iterable)``.
692
+ This option will override the `updating` keyword to
693
+ `updating='deferred'` if `workers != 1`.
694
+ Requires that `func` be pickleable.
695
+ constraints : {NonLinearConstraint, LinearConstraint, Bounds}
696
+ Constraints on the solver, over and above those applied by the `bounds`
697
+ kwd. Uses the approach by Lampinen.
698
+ x0 : None or array-like, optional
699
+ Provides an initial guess to the minimization. Once the population has
700
+ been initialized this vector replaces the first (best) member. This
701
+ replacement is done even if `init` is given an initial population.
702
+ ``x0.shape == (N,)``.
703
+ integrality : 1-D array, optional
704
+ For each decision variable, a boolean value indicating whether the
705
+ decision variable is constrained to integer values. The array is
706
+ broadcast to ``(N,)``.
707
+ If any decision variables are constrained to be integral, they will not
708
+ be changed during polishing.
709
+ Only integer values lying between the lower and upper bounds are used.
710
+ If there are no integer values lying between the bounds then a
711
+ `ValueError` is raised.
712
+ vectorized : bool, optional
713
+ If ``vectorized is True``, `func` is sent an `x` array with
714
+ ``x.shape == (N, S)``, and is expected to return an array of shape
715
+ ``(S,)``, where `S` is the number of solution vectors to be calculated.
716
+ If constraints are applied, each of the functions used to construct
717
+ a `Constraint` object should accept an `x` array with
718
+ ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
719
+ `M` is the number of constraint components.
720
+ This option is an alternative to the parallelization offered by
721
+ `workers`, and may help in optimization speed. This keyword is
722
+ ignored if ``workers != 1``.
723
+ This option will override the `updating` keyword to
724
+ ``updating='deferred'``.
725
+ """
726
+
727
+ # Dispatch of mutation strategy method (binomial or exponential).
728
+ _binomial = {'best1bin': '_best1',
729
+ 'randtobest1bin': '_randtobest1',
730
+ 'currenttobest1bin': '_currenttobest1',
731
+ 'best2bin': '_best2',
732
+ 'rand2bin': '_rand2',
733
+ 'rand1bin': '_rand1'}
734
+ _exponential = {'best1exp': '_best1',
735
+ 'rand1exp': '_rand1',
736
+ 'randtobest1exp': '_randtobest1',
737
+ 'currenttobest1exp': '_currenttobest1',
738
+ 'best2exp': '_best2',
739
+ 'rand2exp': '_rand2'}
740
+
741
+ __init_error_msg = ("The population initialization method must be one of "
742
+ "'latinhypercube' or 'random', or an array of shape "
743
+ "(S, N) where N is the number of parameters and S>5")
744
+
745
+ def __init__(self, func, bounds, args=(),
746
+ strategy='best1bin', maxiter=1000, popsize=15,
747
+ tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
748
+ maxfun=np.inf, callback=None, disp=False, polish=True,
749
+ init='latinhypercube', atol=0, updating='immediate',
750
+ workers=1, constraints=(), x0=None, *, integrality=None,
751
+ vectorized=False):
752
+
753
+ if callable(strategy):
754
+ # a callable strategy is going to be stored in self.strategy anyway
755
+ pass
756
+ elif strategy in self._binomial:
757
+ self.mutation_func = getattr(self, self._binomial[strategy])
758
+ elif strategy in self._exponential:
759
+ self.mutation_func = getattr(self, self._exponential[strategy])
760
+ else:
761
+ raise ValueError("Please select a valid mutation strategy")
762
+ self.strategy = strategy
763
+
764
+ self.callback = _wrap_callback(callback, "differential_evolution")
765
+ self.polish = polish
766
+
767
+ # set the updating / parallelisation options
768
+ if updating in ['immediate', 'deferred']:
769
+ self._updating = updating
770
+
771
+ self.vectorized = vectorized
772
+
773
+ # want to use parallelisation, but updating is immediate
774
+ if workers != 1 and updating == 'immediate':
775
+ warnings.warn("differential_evolution: the 'workers' keyword has"
776
+ " overridden updating='immediate' to"
777
+ " updating='deferred'", UserWarning, stacklevel=2)
778
+ self._updating = 'deferred'
779
+
780
+ if vectorized and workers != 1:
781
+ warnings.warn("differential_evolution: the 'workers' keyword"
782
+ " overrides the 'vectorized' keyword", stacklevel=2)
783
+ self.vectorized = vectorized = False
784
+
785
+ if vectorized and updating == 'immediate':
786
+ warnings.warn("differential_evolution: the 'vectorized' keyword"
787
+ " has overridden updating='immediate' to updating"
788
+ "='deferred'", UserWarning, stacklevel=2)
789
+ self._updating = 'deferred'
790
+
791
+ # an object with a map method.
792
+ if vectorized:
793
+ def maplike_for_vectorized_func(func, x):
794
+ # send an array (N, S) to the user func,
795
+ # expect to receive (S,). Transposition is required because
796
+ # internally the population is held as (S, N)
797
+ return np.atleast_1d(func(x.T))
798
+ workers = maplike_for_vectorized_func
799
+
800
+ self._mapwrapper = MapWrapper(workers)
801
+
802
+ # relative and absolute tolerances for convergence
803
+ self.tol, self.atol = tol, atol
804
+
805
+ # Mutation constant should be in [0, 2). If specified as a sequence
806
+ # then dithering is performed.
807
+ self.scale = mutation
808
+ if (not np.all(np.isfinite(mutation)) or
809
+ np.any(np.array(mutation) >= 2) or
810
+ np.any(np.array(mutation) < 0)):
811
+ raise ValueError('The mutation constant must be a float in '
812
+ 'U[0, 2), or specified as a tuple(min, max)'
813
+ ' where min < max and min, max are in U[0, 2).')
814
+
815
+ self.dither = None
816
+ if hasattr(mutation, '__iter__') and len(mutation) > 1:
817
+ self.dither = [mutation[0], mutation[1]]
818
+ self.dither.sort()
819
+
820
+ self.cross_over_probability = recombination
821
+
822
+ # we create a wrapped function to allow the use of map (and Pool.map
823
+ # in the future)
824
+ self.func = _FunctionWrapper(func, args)
825
+ self.args = args
826
+
827
+ # convert tuple of lower and upper bounds to limits
828
+ # [(low_0, high_0), ..., (low_n, high_n]
829
+ # -> [[low_0, ..., low_n], [high_0, ..., high_n]]
830
+ if isinstance(bounds, Bounds):
831
+ self.limits = np.array(new_bounds_to_old(bounds.lb,
832
+ bounds.ub,
833
+ len(bounds.lb)),
834
+ dtype=float).T
835
+ else:
836
+ self.limits = np.array(bounds, dtype='float').T
837
+
838
+ if (np.size(self.limits, 0) != 2 or not
839
+ np.all(np.isfinite(self.limits))):
840
+ raise ValueError('bounds should be a sequence containing finite '
841
+ 'real valued (min, max) pairs for each value'
842
+ ' in x')
843
+
844
+ if maxiter is None: # the default used to be None
845
+ maxiter = 1000
846
+ self.maxiter = maxiter
847
+ if maxfun is None: # the default used to be None
848
+ maxfun = np.inf
849
+ self.maxfun = maxfun
850
+
851
+ # population is scaled to between [0, 1].
852
+ # We have to scale between parameter <-> population
853
+ # save these arguments for _scale_parameter and
854
+ # _unscale_parameter. This is an optimization
855
+ self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
856
+ self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
857
+ with np.errstate(divide='ignore'):
858
+ # if lb == ub then the following line will be 1/0, which is why
859
+ # we ignore the divide by zero warning. The result from 1/0 is
860
+ # inf, so replace those values by 0.
861
+ self.__recip_scale_arg2 = 1 / self.__scale_arg2
862
+ self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0
863
+
864
+ self.parameter_count = np.size(self.limits, 1)
865
+
866
+ self.random_number_generator = check_random_state(seed)
867
+
868
+ # Which parameters are going to be integers?
869
+ if np.any(integrality):
870
+ # # user has provided a truth value for integer constraints
871
+ integrality = np.broadcast_to(
872
+ integrality,
873
+ self.parameter_count
874
+ )
875
+ integrality = np.asarray(integrality, bool)
876
+ # For integrality parameters change the limits to only allow
877
+ # integer values lying between the limits.
878
+ lb, ub = np.copy(self.limits)
879
+
880
+ lb = np.ceil(lb)
881
+ ub = np.floor(ub)
882
+ if not (lb[integrality] <= ub[integrality]).all():
883
+ # there's a parameter that doesn't have an integer value
884
+ # lying between the limits
885
+ raise ValueError("One of the integrality constraints does not"
886
+ " have any possible integer values between"
887
+ " the lower/upper bounds.")
888
+ nlb = np.nextafter(lb[integrality] - 0.5, np.inf)
889
+ nub = np.nextafter(ub[integrality] + 0.5, -np.inf)
890
+
891
+ self.integrality = integrality
892
+ self.limits[0, self.integrality] = nlb
893
+ self.limits[1, self.integrality] = nub
894
+ else:
895
+ self.integrality = False
896
+
897
+ # check for equal bounds
898
+ eb = self.limits[0] == self.limits[1]
899
+ eb_count = np.count_nonzero(eb)
900
+
901
+ # default population initialization is a latin hypercube design, but
902
+ # there are other population initializations possible.
903
+ # the minimum is 5 because 'best2bin' requires a population that's at
904
+ # least 5 long
905
+ # 202301 - reduced population size to account for parameters with
906
+ # equal bounds. If there are no varying parameters set N to at least 1
907
+ self.num_population_members = max(
908
+ 5,
909
+ popsize * max(1, self.parameter_count - eb_count)
910
+ )
911
+ self.population_shape = (self.num_population_members,
912
+ self.parameter_count)
913
+
914
+ self._nfev = 0
915
+ # check first str otherwise will fail to compare str with array
916
+ if isinstance(init, str):
917
+ if init == 'latinhypercube':
918
+ self.init_population_lhs()
919
+ elif init == 'sobol':
920
+ # must be Ns = 2**m for Sobol'
921
+ n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
922
+ self.num_population_members = n_s
923
+ self.population_shape = (self.num_population_members,
924
+ self.parameter_count)
925
+ self.init_population_qmc(qmc_engine='sobol')
926
+ elif init == 'halton':
927
+ self.init_population_qmc(qmc_engine='halton')
928
+ elif init == 'random':
929
+ self.init_population_random()
930
+ else:
931
+ raise ValueError(self.__init_error_msg)
932
+ else:
933
+ self.init_population_array(init)
934
+
935
+ if x0 is not None:
936
+ # scale to within unit interval and
937
+ # ensure parameters are within bounds.
938
+ x0_scaled = self._unscale_parameters(np.asarray(x0))
939
+ if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
940
+ raise ValueError(
941
+ "Some entries in x0 lay outside the specified bounds"
942
+ )
943
+ self.population[0] = x0_scaled
944
+
945
+ # infrastructure for constraints
946
+ self.constraints = constraints
947
+ self._wrapped_constraints = []
948
+
949
+ if hasattr(constraints, '__len__'):
950
+ # sequence of constraints, this will also deal with default
951
+ # keyword parameter
952
+ for c in constraints:
953
+ self._wrapped_constraints.append(
954
+ _ConstraintWrapper(c, self.x)
955
+ )
956
+ else:
957
+ self._wrapped_constraints = [
958
+ _ConstraintWrapper(constraints, self.x)
959
+ ]
960
+ self.total_constraints = np.sum(
961
+ [c.num_constr for c in self._wrapped_constraints]
962
+ )
963
+ self.constraint_violation = np.zeros((self.num_population_members, 1))
964
+ self.feasible = np.ones(self.num_population_members, bool)
965
+
966
+ self.disp = disp
967
+
968
+ def init_population_lhs(self):
969
+ """
970
+ Initializes the population with Latin Hypercube Sampling.
971
+ Latin Hypercube Sampling ensures that each parameter is uniformly
972
+ sampled over its range.
973
+ """
974
+ rng = self.random_number_generator
975
+
976
+ # Each parameter range needs to be sampled uniformly. The scaled
977
+ # parameter range ([0, 1)) needs to be split into
978
+ # `self.num_population_members` segments, each of which has the following
979
+ # size:
980
+ segsize = 1.0 / self.num_population_members
981
+
982
+ # Within each segment we sample from a uniform random distribution.
983
+ # We need to do this sampling for each parameter.
984
+ samples = (segsize * rng.uniform(size=self.population_shape)
985
+
986
+ # Offset each segment to cover the entire parameter range [0, 1)
987
+ + np.linspace(0., 1., self.num_population_members,
988
+ endpoint=False)[:, np.newaxis])
989
+
990
+ # Create an array for population of candidate solutions.
991
+ self.population = np.zeros_like(samples)
992
+
993
+ # Initialize population of candidate solutions by permutation of the
994
+ # random samples.
995
+ for j in range(self.parameter_count):
996
+ order = rng.permutation(range(self.num_population_members))
997
+ self.population[:, j] = samples[order, j]
998
+
999
+ # reset population energies
1000
+ self.population_energies = np.full(self.num_population_members,
1001
+ np.inf)
1002
+
1003
+ # reset number of function evaluations counter
1004
+ self._nfev = 0
1005
+
1006
+ def init_population_qmc(self, qmc_engine):
1007
+ """Initializes the population with a QMC method.
1008
+
1009
+ QMC methods ensures that each parameter is uniformly
1010
+ sampled over its range.
1011
+
1012
+ Parameters
1013
+ ----------
1014
+ qmc_engine : str
1015
+ The QMC method to use for initialization. Can be one of
1016
+ ``latinhypercube``, ``sobol`` or ``halton``.
1017
+
1018
+ """
1019
+ from scipy.stats import qmc
1020
+
1021
+ rng = self.random_number_generator
1022
+
1023
+ # Create an array for population of candidate solutions.
1024
+ if qmc_engine == 'latinhypercube':
1025
+ sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
1026
+ elif qmc_engine == 'sobol':
1027
+ sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
1028
+ elif qmc_engine == 'halton':
1029
+ sampler = qmc.Halton(d=self.parameter_count, seed=rng)
1030
+ else:
1031
+ raise ValueError(self.__init_error_msg)
1032
+
1033
+ self.population = sampler.random(n=self.num_population_members)
1034
+
1035
+ # reset population energies
1036
+ self.population_energies = np.full(self.num_population_members,
1037
+ np.inf)
1038
+
1039
+ # reset number of function evaluations counter
1040
+ self._nfev = 0
1041
+
1042
+ def init_population_random(self):
1043
+ """
1044
+ Initializes the population at random. This type of initialization
1045
+ can possess clustering, Latin Hypercube sampling is generally better.
1046
+ """
1047
+ rng = self.random_number_generator
1048
+ self.population = rng.uniform(size=self.population_shape)
1049
+
1050
+ # reset population energies
1051
+ self.population_energies = np.full(self.num_population_members,
1052
+ np.inf)
1053
+
1054
+ # reset number of function evaluations counter
1055
+ self._nfev = 0
1056
+
1057
+ def init_population_array(self, init):
1058
+ """
1059
+ Initializes the population with a user specified population.
1060
+
1061
+ Parameters
1062
+ ----------
1063
+ init : np.ndarray
1064
+ Array specifying subset of the initial population. The array should
1065
+ have shape (S, N), where N is the number of parameters.
1066
+ The population is clipped to the lower and upper bounds.
1067
+ """
1068
+ # make sure you're using a float array
1069
+ popn = np.asarray(init, dtype=np.float64)
1070
+
1071
+ if (np.size(popn, 0) < 5 or
1072
+ popn.shape[1] != self.parameter_count or
1073
+ len(popn.shape) != 2):
1074
+ raise ValueError("The population supplied needs to have shape"
1075
+ " (S, len(x)), where S > 4.")
1076
+
1077
+ # scale values and clip to bounds, assigning to population
1078
+ self.population = np.clip(self._unscale_parameters(popn), 0, 1)
1079
+
1080
+ self.num_population_members = np.size(self.population, 0)
1081
+
1082
+ self.population_shape = (self.num_population_members,
1083
+ self.parameter_count)
1084
+
1085
+ # reset population energies
1086
+ self.population_energies = np.full(self.num_population_members,
1087
+ np.inf)
1088
+
1089
+ # reset number of function evaluations counter
1090
+ self._nfev = 0
1091
+
1092
+ @property
1093
+ def x(self):
1094
+ """
1095
+ The best solution from the solver
1096
+ """
1097
+ return self._scale_parameters(self.population[0])
1098
+
1099
+ @property
1100
+ def convergence(self):
1101
+ """
1102
+ The standard deviation of the population energies divided by their
1103
+ mean.
1104
+ """
1105
+ if np.any(np.isinf(self.population_energies)):
1106
+ return np.inf
1107
+ return (np.std(self.population_energies) /
1108
+ (np.abs(np.mean(self.population_energies)) + _MACHEPS))
1109
+
1110
+ def converged(self):
1111
+ """
1112
+ Return True if the solver has converged.
1113
+ """
1114
+ if np.any(np.isinf(self.population_energies)):
1115
+ return False
1116
+
1117
+ return (np.std(self.population_energies) <=
1118
+ self.atol +
1119
+ self.tol * np.abs(np.mean(self.population_energies)))
1120
+
1121
+ def solve(self):
1122
+ """
1123
+ Runs the DifferentialEvolutionSolver.
1124
+
1125
+ Returns
1126
+ -------
1127
+ res : OptimizeResult
1128
+ The optimization result represented as a `OptimizeResult` object.
1129
+ Important attributes are: ``x`` the solution array, ``success`` a
1130
+ Boolean flag indicating if the optimizer exited successfully,
1131
+ ``message`` which describes the cause of the termination,
1132
+ ``population`` the solution vectors present in the population, and
1133
+ ``population_energies`` the value of the objective function for
1134
+ each entry in ``population``.
1135
+ See `OptimizeResult` for a description of other attributes. If
1136
+ `polish` was employed, and a lower minimum was obtained by the
1137
+ polishing, then OptimizeResult also contains the ``jac`` attribute.
1138
+ If the eventual solution does not satisfy the applied constraints
1139
+ ``success`` will be `False`.
1140
+ """
1141
+ nit, warning_flag = 0, False
1142
+ status_message = _status_message['success']
1143
+
1144
+ # The population may have just been initialized (all entries are
1145
+ # np.inf). If it has you have to calculate the initial energies.
1146
+ # Although this is also done in the evolve generator it's possible
1147
+ # that someone can set maxiter=0, at which point we still want the
1148
+ # initial energies to be calculated (the following loop isn't run).
1149
+ if np.all(np.isinf(self.population_energies)):
1150
+ self.feasible, self.constraint_violation = (
1151
+ self._calculate_population_feasibilities(self.population))
1152
+
1153
+ # only work out population energies for feasible solutions
1154
+ self.population_energies[self.feasible] = (
1155
+ self._calculate_population_energies(
1156
+ self.population[self.feasible]))
1157
+
1158
+ self._promote_lowest_energy()
1159
+
1160
+ # do the optimization.
1161
+ for nit in range(1, self.maxiter + 1):
1162
+ # evolve the population by a generation
1163
+ try:
1164
+ next(self)
1165
+ except StopIteration:
1166
+ warning_flag = True
1167
+ if self._nfev > self.maxfun:
1168
+ status_message = _status_message['maxfev']
1169
+ elif self._nfev == self.maxfun:
1170
+ status_message = ('Maximum number of function evaluations'
1171
+ ' has been reached.')
1172
+ break
1173
+
1174
+ if self.disp:
1175
+ print(f"differential_evolution step {nit}: f(x)="
1176
+ f" {self.population_energies[0]}"
1177
+ )
1178
+
1179
+ if self.callback:
1180
+ c = self.tol / (self.convergence + _MACHEPS)
1181
+ res = self._result(nit=nit, message="in progress")
1182
+ res.convergence = c
1183
+ try:
1184
+ warning_flag = bool(self.callback(res))
1185
+ except StopIteration:
1186
+ warning_flag = True
1187
+
1188
+ if warning_flag:
1189
+ status_message = 'callback function requested stop early'
1190
+
1191
+ # should the solver terminate?
1192
+ if warning_flag or self.converged():
1193
+ break
1194
+
1195
+ else:
1196
+ status_message = _status_message['maxiter']
1197
+ warning_flag = True
1198
+
1199
+ DE_result = self._result(
1200
+ nit=nit, message=status_message, warning_flag=warning_flag
1201
+ )
1202
+
1203
+ if self.polish and not np.all(self.integrality):
1204
+ # can't polish if all the parameters are integers
1205
+ if np.any(self.integrality):
1206
+ # set the lower/upper bounds equal so that any integrality
1207
+ # constraints work.
1208
+ limits, integrality = self.limits, self.integrality
1209
+ limits[0, integrality] = DE_result.x[integrality]
1210
+ limits[1, integrality] = DE_result.x[integrality]
1211
+
1212
+ polish_method = 'L-BFGS-B'
1213
+
1214
+ if self._wrapped_constraints:
1215
+ polish_method = 'trust-constr'
1216
+
1217
+ constr_violation = self._constraint_violation_fn(DE_result.x)
1218
+ if np.any(constr_violation > 0.):
1219
+ warnings.warn("differential evolution didn't find a "
1220
+ "solution satisfying the constraints, "
1221
+ "attempting to polish from the least "
1222
+ "infeasible solution",
1223
+ UserWarning, stacklevel=2)
1224
+ if self.disp:
1225
+ print(f"Polishing solution with '{polish_method}'")
1226
+ result = minimize(self.func,
1227
+ np.copy(DE_result.x),
1228
+ method=polish_method,
1229
+ bounds=self.limits.T,
1230
+ constraints=self.constraints)
1231
+
1232
+ self._nfev += result.nfev
1233
+ DE_result.nfev = self._nfev
1234
+
1235
+ # Polishing solution is only accepted if there is an improvement in
1236
+ # cost function, the polishing was successful and the solution lies
1237
+ # within the bounds.
1238
+ if (result.fun < DE_result.fun and
1239
+ result.success and
1240
+ np.all(result.x <= self.limits[1]) and
1241
+ np.all(self.limits[0] <= result.x)):
1242
+ DE_result.fun = result.fun
1243
+ DE_result.x = result.x
1244
+ DE_result.jac = result.jac
1245
+ # to keep internal state consistent
1246
+ self.population_energies[0] = result.fun
1247
+ self.population[0] = self._unscale_parameters(result.x)
1248
+
1249
+ if self._wrapped_constraints:
1250
+ DE_result.constr = [c.violation(DE_result.x) for
1251
+ c in self._wrapped_constraints]
1252
+ DE_result.constr_violation = np.max(
1253
+ np.concatenate(DE_result.constr))
1254
+ DE_result.maxcv = DE_result.constr_violation
1255
+ if DE_result.maxcv > 0:
1256
+ # if the result is infeasible then success must be False
1257
+ DE_result.success = False
1258
+ DE_result.message = ("The solution does not satisfy the "
1259
+ f"constraints, MAXCV = {DE_result.maxcv}")
1260
+
1261
+ return DE_result
1262
+
1263
+ def _result(self, **kwds):
1264
+ # form an intermediate OptimizeResult
1265
+ nit = kwds.get('nit', None)
1266
+ message = kwds.get('message', None)
1267
+ warning_flag = kwds.get('warning_flag', False)
1268
+ result = OptimizeResult(
1269
+ x=self.x,
1270
+ fun=self.population_energies[0],
1271
+ nfev=self._nfev,
1272
+ nit=nit,
1273
+ message=message,
1274
+ success=(warning_flag is not True),
1275
+ population=self._scale_parameters(self.population),
1276
+ population_energies=self.population_energies
1277
+ )
1278
+ if self._wrapped_constraints:
1279
+ result.constr = [c.violation(result.x)
1280
+ for c in self._wrapped_constraints]
1281
+ result.constr_violation = np.max(np.concatenate(result.constr))
1282
+ result.maxcv = result.constr_violation
1283
+ if result.maxcv > 0:
1284
+ result.success = False
1285
+
1286
+ return result
1287
+
1288
+ def _calculate_population_energies(self, population):
1289
+ """
1290
+ Calculate the energies of a population.
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ population : ndarray
1295
+ An array of parameter vectors normalised to [0, 1] using lower
1296
+ and upper limits. Has shape ``(np.size(population, 0), N)``.
1297
+
1298
+ Returns
1299
+ -------
1300
+ energies : ndarray
1301
+ An array of energies corresponding to each population member. If
1302
+ maxfun will be exceeded during this call, then the number of
1303
+ function evaluations will be reduced and energies will be
1304
+ right-padded with np.inf. Has shape ``(np.size(population, 0),)``
1305
+ """
1306
+ num_members = np.size(population, 0)
1307
+ # S is the number of function evals left to stay under the
1308
+ # maxfun budget
1309
+ S = min(num_members, self.maxfun - self._nfev)
1310
+
1311
+ energies = np.full(num_members, np.inf)
1312
+
1313
+ parameters_pop = self._scale_parameters(population)
1314
+ try:
1315
+ calc_energies = list(
1316
+ self._mapwrapper(self.func, parameters_pop[0:S])
1317
+ )
1318
+ calc_energies = np.squeeze(calc_energies)
1319
+ except (TypeError, ValueError) as e:
1320
+ # wrong number of arguments for _mapwrapper
1321
+ # or wrong length returned from the mapper
1322
+ raise RuntimeError(
1323
+ "The map-like callable must be of the form f(func, iterable), "
1324
+ "returning a sequence of numbers the same length as 'iterable'"
1325
+ ) from e
1326
+
1327
+ if calc_energies.size != S:
1328
+ if self.vectorized:
1329
+ raise RuntimeError("The vectorized function must return an"
1330
+ " array of shape (S,) when given an array"
1331
+ " of shape (len(x), S)")
1332
+ raise RuntimeError("func(x, *args) must return a scalar value")
1333
+
1334
+ energies[0:S] = calc_energies
1335
+
1336
+ if self.vectorized:
1337
+ self._nfev += 1
1338
+ else:
1339
+ self._nfev += S
1340
+
1341
+ return energies
1342
+
1343
+ def _promote_lowest_energy(self):
1344
+ # swaps 'best solution' into first population entry
1345
+
1346
+ idx = np.arange(self.num_population_members)
1347
+ feasible_solutions = idx[self.feasible]
1348
+ if feasible_solutions.size:
1349
+ # find the best feasible solution
1350
+ idx_t = np.argmin(self.population_energies[feasible_solutions])
1351
+ l = feasible_solutions[idx_t]
1352
+ else:
1353
+ # no solution was feasible, use 'best' infeasible solution, which
1354
+ # will violate constraints the least
1355
+ l = np.argmin(np.sum(self.constraint_violation, axis=1))
1356
+
1357
+ self.population_energies[[0, l]] = self.population_energies[[l, 0]]
1358
+ self.population[[0, l], :] = self.population[[l, 0], :]
1359
+ self.feasible[[0, l]] = self.feasible[[l, 0]]
1360
+ self.constraint_violation[[0, l], :] = (
1361
+ self.constraint_violation[[l, 0], :])
1362
+
1363
+ def _constraint_violation_fn(self, x):
1364
+ """
1365
+ Calculates total constraint violation for all the constraints, for a
1366
+ set of solutions.
1367
+
1368
+ Parameters
1369
+ ----------
1370
+ x : ndarray
1371
+ Solution vector(s). Has shape (S, N), or (N,), where S is the
1372
+ number of solutions to investigate and N is the number of
1373
+ parameters.
1374
+
1375
+ Returns
1376
+ -------
1377
+ cv : ndarray
1378
+ Total violation of constraints. Has shape ``(S, M)``, where M is
1379
+ the total number of constraint components (which is not necessarily
1380
+ equal to len(self._wrapped_constraints)).
1381
+ """
1382
+ # how many solution vectors you're calculating constraint violations
1383
+ # for
1384
+ S = np.size(x) // self.parameter_count
1385
+ _out = np.zeros((S, self.total_constraints))
1386
+ offset = 0
1387
+ for con in self._wrapped_constraints:
1388
+ # the input/output of the (vectorized) constraint function is
1389
+ # {(N, S), (N,)} --> (M, S)
1390
+ # The input to _constraint_violation_fn is (S, N) or (N,), so
1391
+ # transpose to pass it to the constraint. The output is transposed
1392
+ # from (M, S) to (S, M) for further use.
1393
+ c = con.violation(x.T).T
1394
+
1395
+ # The shape of c should be (M,), (1, M), or (S, M). Check for
1396
+ # those shapes, as an incorrect shape indicates that the
1397
+ # user constraint function didn't return the right thing, and
1398
+ # the reshape operation will fail. Intercept the wrong shape
1399
+ # to give a reasonable error message. I'm not sure what failure
1400
+ # modes an inventive user will come up with.
1401
+ if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):
1402
+ raise RuntimeError("An array returned from a Constraint has"
1403
+ " the wrong shape. If `vectorized is False`"
1404
+ " the Constraint should return an array of"
1405
+ " shape (M,). If `vectorized is True` then"
1406
+ " the Constraint must return an array of"
1407
+ " shape (M, S), where S is the number of"
1408
+ " solution vectors and M is the number of"
1409
+ " constraint components in a given"
1410
+ " Constraint object.")
1411
+
1412
+ # the violation function may return a 1D array, but is it a
1413
+ # sequence of constraints for one solution (S=1, M>=1), or the
1414
+ # value of a single constraint for a sequence of solutions
1415
+ # (S>=1, M=1)
1416
+ c = np.reshape(c, (S, con.num_constr))
1417
+ _out[:, offset:offset + con.num_constr] = c
1418
+ offset += con.num_constr
1419
+
1420
+ return _out
1421
+
1422
+ def _calculate_population_feasibilities(self, population):
1423
+ """
1424
+ Calculate the feasibilities of a population.
1425
+
1426
+ Parameters
1427
+ ----------
1428
+ population : ndarray
1429
+ An array of parameter vectors normalised to [0, 1] using lower
1430
+ and upper limits. Has shape ``(np.size(population, 0), N)``.
1431
+
1432
+ Returns
1433
+ -------
1434
+ feasible, constraint_violation : ndarray, ndarray
1435
+ Boolean array of feasibility for each population member, and an
1436
+ array of the constraint violation for each population member.
1437
+ constraint_violation has shape ``(np.size(population, 0), M)``,
1438
+ where M is the number of constraints.
1439
+ """
1440
+ num_members = np.size(population, 0)
1441
+ if not self._wrapped_constraints:
1442
+ # shortcut for no constraints
1443
+ return np.ones(num_members, bool), np.zeros((num_members, 1))
1444
+
1445
+ # (S, N)
1446
+ parameters_pop = self._scale_parameters(population)
1447
+
1448
+ if self.vectorized:
1449
+ # (S, M)
1450
+ constraint_violation = np.array(
1451
+ self._constraint_violation_fn(parameters_pop)
1452
+ )
1453
+ else:
1454
+ # (S, 1, M)
1455
+ constraint_violation = np.array([self._constraint_violation_fn(x)
1456
+ for x in parameters_pop])
1457
+ # if you use the list comprehension in the line above it will
1458
+ # create an array of shape (S, 1, M), because each iteration
1459
+ # generates an array of (1, M). In comparison the vectorized
1460
+ # version returns (S, M). It's therefore necessary to remove axis 1
1461
+ constraint_violation = constraint_violation[:, 0]
1462
+
1463
+ feasible = ~(np.sum(constraint_violation, axis=1) > 0)
1464
+
1465
+ return feasible, constraint_violation
1466
+
1467
+ def __iter__(self):
1468
+ return self
1469
+
1470
+ def __enter__(self):
1471
+ return self
1472
+
1473
+ def __exit__(self, *args):
1474
+ return self._mapwrapper.__exit__(*args)
1475
+
1476
+ def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
1477
+ energy_orig, feasible_orig, cv_orig):
1478
+ """
1479
+ Trial is accepted if:
1480
+ * it satisfies all constraints and provides a lower or equal objective
1481
+ function value, while both the compared solutions are feasible
1482
+ - or -
1483
+ * it is feasible while the original solution is infeasible,
1484
+ - or -
1485
+ * it is infeasible, but provides a lower or equal constraint violation
1486
+ for all constraint functions.
1487
+
1488
+ This test corresponds to section III of Lampinen [1]_.
1489
+
1490
+ Parameters
1491
+ ----------
1492
+ energy_trial : float
1493
+ Energy of the trial solution
1494
+ feasible_trial : float
1495
+ Feasibility of trial solution
1496
+ cv_trial : array-like
1497
+ Excess constraint violation for the trial solution
1498
+ energy_orig : float
1499
+ Energy of the original solution
1500
+ feasible_orig : float
1501
+ Feasibility of original solution
1502
+ cv_orig : array-like
1503
+ Excess constraint violation for the original solution
1504
+
1505
+ Returns
1506
+ -------
1507
+ accepted : bool
1508
+
1509
+ """
1510
+ if feasible_orig and feasible_trial:
1511
+ return energy_trial <= energy_orig
1512
+ elif feasible_trial and not feasible_orig:
1513
+ return True
1514
+ elif not feasible_trial and (cv_trial <= cv_orig).all():
1515
+ # cv_trial < cv_orig would imply that both trial and orig are not
1516
+ # feasible
1517
+ return True
1518
+
1519
+ return False
1520
+
1521
+ def __next__(self):
1522
+ """
1523
+ Evolve the population by a single generation
1524
+
1525
+ Returns
1526
+ -------
1527
+ x : ndarray
1528
+ The best solution from the solver.
1529
+ fun : float
1530
+ Value of objective function obtained from the best solution.
1531
+ """
1532
+ # the population may have just been initialized (all entries are
1533
+ # np.inf). If it has you have to calculate the initial energies
1534
+ if np.all(np.isinf(self.population_energies)):
1535
+ self.feasible, self.constraint_violation = (
1536
+ self._calculate_population_feasibilities(self.population))
1537
+
1538
+ # only need to work out population energies for those that are
1539
+ # feasible
1540
+ self.population_energies[self.feasible] = (
1541
+ self._calculate_population_energies(
1542
+ self.population[self.feasible]))
1543
+
1544
+ self._promote_lowest_energy()
1545
+
1546
+ if self.dither is not None:
1547
+ self.scale = self.random_number_generator.uniform(self.dither[0],
1548
+ self.dither[1])
1549
+
1550
+ if self._updating == 'immediate':
1551
+ # update best solution immediately
1552
+ for candidate in range(self.num_population_members):
1553
+ if self._nfev > self.maxfun:
1554
+ raise StopIteration
1555
+
1556
+ # create a trial solution
1557
+ trial = self._mutate(candidate)
1558
+
1559
+ # ensuring that it's in the range [0, 1)
1560
+ self._ensure_constraint(trial)
1561
+
1562
+ # scale from [0, 1) to the actual parameter value
1563
+ parameters = self._scale_parameters(trial)
1564
+
1565
+ # determine the energy of the objective function
1566
+ if self._wrapped_constraints:
1567
+ cv = self._constraint_violation_fn(parameters)
1568
+ feasible = False
1569
+ energy = np.inf
1570
+ if not np.sum(cv) > 0:
1571
+ # solution is feasible
1572
+ feasible = True
1573
+ energy = self.func(parameters)
1574
+ self._nfev += 1
1575
+ else:
1576
+ feasible = True
1577
+ cv = np.atleast_2d([0.])
1578
+ energy = self.func(parameters)
1579
+ self._nfev += 1
1580
+
1581
+ # compare trial and population member
1582
+ if self._accept_trial(energy, feasible, cv,
1583
+ self.population_energies[candidate],
1584
+ self.feasible[candidate],
1585
+ self.constraint_violation[candidate]):
1586
+ self.population[candidate] = trial
1587
+ self.population_energies[candidate] = np.squeeze(energy)
1588
+ self.feasible[candidate] = feasible
1589
+ self.constraint_violation[candidate] = cv
1590
+
1591
+ # if the trial candidate is also better than the best
1592
+ # solution then promote it.
1593
+ if self._accept_trial(energy, feasible, cv,
1594
+ self.population_energies[0],
1595
+ self.feasible[0],
1596
+ self.constraint_violation[0]):
1597
+ self._promote_lowest_energy()
1598
+
1599
+ elif self._updating == 'deferred':
1600
+ # update best solution once per generation
1601
+ if self._nfev >= self.maxfun:
1602
+ raise StopIteration
1603
+
1604
+ # 'deferred' approach, vectorised form.
1605
+ # create trial solutions
1606
+ trial_pop = np.array(
1607
+ [self._mutate(i) for i in range(self.num_population_members)])
1608
+
1609
+ # enforce bounds
1610
+ self._ensure_constraint(trial_pop)
1611
+
1612
+ # determine the energies of the objective function, but only for
1613
+ # feasible trials
1614
+ feasible, cv = self._calculate_population_feasibilities(trial_pop)
1615
+ trial_energies = np.full(self.num_population_members, np.inf)
1616
+
1617
+ # only calculate for feasible entries
1618
+ trial_energies[feasible] = self._calculate_population_energies(
1619
+ trial_pop[feasible])
1620
+
1621
+ # which solutions are 'improved'?
1622
+ loc = [self._accept_trial(*val) for val in
1623
+ zip(trial_energies, feasible, cv, self.population_energies,
1624
+ self.feasible, self.constraint_violation)]
1625
+ loc = np.array(loc)
1626
+ self.population = np.where(loc[:, np.newaxis],
1627
+ trial_pop,
1628
+ self.population)
1629
+ self.population_energies = np.where(loc,
1630
+ trial_energies,
1631
+ self.population_energies)
1632
+ self.feasible = np.where(loc,
1633
+ feasible,
1634
+ self.feasible)
1635
+ self.constraint_violation = np.where(loc[:, np.newaxis],
1636
+ cv,
1637
+ self.constraint_violation)
1638
+
1639
+ # make sure the best solution is updated if updating='deferred'.
1640
+ # put the lowest energy into the best solution position.
1641
+ self._promote_lowest_energy()
1642
+
1643
+ return self.x, self.population_energies[0]
1644
+
1645
+ def _scale_parameters(self, trial):
1646
+ """Scale from a number between 0 and 1 to parameters."""
1647
+ # trial either has shape (N, ) or (L, N), where L is the number of
1648
+ # solutions being scaled
1649
+ scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
1650
+ if np.any(self.integrality):
1651
+ i = np.broadcast_to(self.integrality, scaled.shape)
1652
+ scaled[i] = np.round(scaled[i])
1653
+ return scaled
1654
+
1655
+ def _unscale_parameters(self, parameters):
1656
+ """Scale from parameters to a number between 0 and 1."""
1657
+ return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5
1658
+
1659
+ def _ensure_constraint(self, trial):
1660
+ """Make sure the parameters lie between the limits."""
1661
+ mask = np.where((trial > 1) | (trial < 0))
1662
+ trial[mask] = self.random_number_generator.uniform(size=mask[0].shape)
1663
+
1664
+ def _mutate(self, candidate):
1665
+ """Create a trial vector based on a mutation strategy."""
1666
+ rng = self.random_number_generator
1667
+
1668
+ if callable(self.strategy):
1669
+ _population = self._scale_parameters(self.population)
1670
+ trial = np.array(
1671
+ self.strategy(candidate, _population, rng=rng), dtype=float
1672
+ )
1673
+ if trial.shape != (self.parameter_count,):
1674
+ raise RuntimeError(
1675
+ "strategy must have signature"
1676
+ " f(candidate: int, population: np.ndarray, rng=None)"
1677
+ " returning an array of shape (N,)"
1678
+ )
1679
+ return self._unscale_parameters(trial)
1680
+
1681
+ trial = np.copy(self.population[candidate])
1682
+ fill_point = rng.choice(self.parameter_count)
1683
+
1684
+ if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
1685
+ bprime = self.mutation_func(candidate,
1686
+ self._select_samples(candidate, 5))
1687
+ else:
1688
+ bprime = self.mutation_func(self._select_samples(candidate, 5))
1689
+
1690
+ if self.strategy in self._binomial:
1691
+ crossovers = rng.uniform(size=self.parameter_count)
1692
+ crossovers = crossovers < self.cross_over_probability
1693
+ # the last one is always from the bprime vector for binomial
1694
+ # If you fill in modulo with a loop you have to set the last one to
1695
+ # true. If you don't use a loop then you can have any random entry
1696
+ # be True.
1697
+ crossovers[fill_point] = True
1698
+ trial = np.where(crossovers, bprime, trial)
1699
+ return trial
1700
+
1701
+ elif self.strategy in self._exponential:
1702
+ i = 0
1703
+ crossovers = rng.uniform(size=self.parameter_count)
1704
+ crossovers = crossovers < self.cross_over_probability
1705
+ crossovers[0] = True
1706
+ while (i < self.parameter_count and crossovers[i]):
1707
+ trial[fill_point] = bprime[fill_point]
1708
+ fill_point = (fill_point + 1) % self.parameter_count
1709
+ i += 1
1710
+
1711
+ return trial
1712
+
1713
+ def _best1(self, samples):
1714
+ """best1bin, best1exp"""
1715
+ r0, r1 = samples[:2]
1716
+ return (self.population[0] + self.scale *
1717
+ (self.population[r0] - self.population[r1]))
1718
+
1719
+ def _rand1(self, samples):
1720
+ """rand1bin, rand1exp"""
1721
+ r0, r1, r2 = samples[:3]
1722
+ return (self.population[r0] + self.scale *
1723
+ (self.population[r1] - self.population[r2]))
1724
+
1725
+ def _randtobest1(self, samples):
1726
+ """randtobest1bin, randtobest1exp"""
1727
+ r0, r1, r2 = samples[:3]
1728
+ bprime = np.copy(self.population[r0])
1729
+ bprime += self.scale * (self.population[0] - bprime)
1730
+ bprime += self.scale * (self.population[r1] -
1731
+ self.population[r2])
1732
+ return bprime
1733
+
1734
+ def _currenttobest1(self, candidate, samples):
1735
+ """currenttobest1bin, currenttobest1exp"""
1736
+ r0, r1 = samples[:2]
1737
+ bprime = (self.population[candidate] + self.scale *
1738
+ (self.population[0] - self.population[candidate] +
1739
+ self.population[r0] - self.population[r1]))
1740
+ return bprime
1741
+
1742
+ def _best2(self, samples):
1743
+ """best2bin, best2exp"""
1744
+ r0, r1, r2, r3 = samples[:4]
1745
+ bprime = (self.population[0] + self.scale *
1746
+ (self.population[r0] + self.population[r1] -
1747
+ self.population[r2] - self.population[r3]))
1748
+
1749
+ return bprime
1750
+
1751
+ def _rand2(self, samples):
1752
+ """rand2bin, rand2exp"""
1753
+ r0, r1, r2, r3, r4 = samples
1754
+ bprime = (self.population[r0] + self.scale *
1755
+ (self.population[r1] + self.population[r2] -
1756
+ self.population[r3] - self.population[r4]))
1757
+
1758
+ return bprime
1759
+
1760
+ def _select_samples(self, candidate, number_samples):
1761
+ """
1762
+ obtain random integers from range(self.num_population_members),
1763
+ without replacement. You can't have the original candidate either.
1764
+ """
1765
+ pool = np.arange(self.num_population_members)
1766
+ self.random_number_generator.shuffle(pool)
1767
+
1768
+ idxs = []
1769
+ while len(idxs) < number_samples and len(pool) > 0:
1770
+ idx = pool[0]
1771
+ pool = pool[1:]
1772
+ if idx != candidate:
1773
+ idxs.append(idx)
1774
+
1775
+ return idxs
1776
+
1777
+
1778
+ class _ConstraintWrapper:
1779
+ """Object to wrap/evaluate user defined constraints.
1780
+
1781
+ Very similar in practice to `PreparedConstraint`, except that no evaluation
1782
+ of jac/hess is performed (explicit or implicit).
1783
+
1784
+ If created successfully, it will contain the attributes listed below.
1785
+
1786
+ Parameters
1787
+ ----------
1788
+ constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
1789
+ Constraint to check and prepare.
1790
+ x0 : array_like
1791
+ Initial vector of independent variables, shape (N,)
1792
+
1793
+ Attributes
1794
+ ----------
1795
+ fun : callable
1796
+ Function defining the constraint wrapped by one of the convenience
1797
+ classes.
1798
+ bounds : 2-tuple
1799
+ Contains lower and upper bounds for the constraints --- lb and ub.
1800
+ These are converted to ndarray and have a size equal to the number of
1801
+ the constraints.
1802
+
1803
+ Notes
1804
+ -----
1805
+ _ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent
1806
+ arrays of shape (N, S) or (N,), where S is the number of vectors of shape
1807
+ (N,) to consider constraints for.
1808
+ """
1809
+ def __init__(self, constraint, x0):
1810
+ self.constraint = constraint
1811
+
1812
+ if isinstance(constraint, NonlinearConstraint):
1813
+ def fun(x):
1814
+ x = np.asarray(x)
1815
+ return np.atleast_1d(constraint.fun(x))
1816
+ elif isinstance(constraint, LinearConstraint):
1817
+ def fun(x):
1818
+ if issparse(constraint.A):
1819
+ A = constraint.A
1820
+ else:
1821
+ A = np.atleast_2d(constraint.A)
1822
+
1823
+ res = A.dot(x)
1824
+ # x either has shape (N, S) or (N)
1825
+ # (M, N) x (N, S) --> (M, S)
1826
+ # (M, N) x (N,) --> (M,)
1827
+ # However, if (M, N) is a matrix then:
1828
+ # (M, N) * (N,) --> (M, 1), we need this to be (M,)
1829
+ if x.ndim == 1 and res.ndim == 2:
1830
+ # deal with case that constraint.A is an np.matrix
1831
+ # see gh20041
1832
+ res = np.asarray(res)[:, 0]
1833
+
1834
+ return res
1835
+ elif isinstance(constraint, Bounds):
1836
+ def fun(x):
1837
+ return np.asarray(x)
1838
+ else:
1839
+ raise ValueError("`constraint` of an unknown type is passed.")
1840
+
1841
+ self.fun = fun
1842
+
1843
+ lb = np.asarray(constraint.lb, dtype=float)
1844
+ ub = np.asarray(constraint.ub, dtype=float)
1845
+
1846
+ x0 = np.asarray(x0)
1847
+
1848
+ # find out the number of constraints
1849
+ f0 = fun(x0)
1850
+ self.num_constr = m = f0.size
1851
+ self.parameter_count = x0.size
1852
+
1853
+ if lb.ndim == 0:
1854
+ lb = np.resize(lb, m)
1855
+ if ub.ndim == 0:
1856
+ ub = np.resize(ub, m)
1857
+
1858
+ self.bounds = (lb, ub)
1859
+
1860
+ def __call__(self, x):
1861
+ return np.atleast_1d(self.fun(x))
1862
+
1863
+ def violation(self, x):
1864
+ """How much the constraint is exceeded by.
1865
+
1866
+ Parameters
1867
+ ----------
1868
+ x : array-like
1869
+ Vector of independent variables, (N, S), where N is number of
1870
+ parameters and S is the number of solutions to be investigated.
1871
+
1872
+ Returns
1873
+ -------
1874
+ excess : array-like
1875
+ How much the constraint is exceeded by, for each of the
1876
+ constraints specified by `_ConstraintWrapper.fun`.
1877
+ Has shape (M, S) where M is the number of constraint components.
1878
+ """
1879
+ # expect ev to have shape (num_constr, S) or (num_constr,)
1880
+ ev = self.fun(np.asarray(x))
1881
+
1882
+ try:
1883
+ excess_lb = np.maximum(self.bounds[0] - ev.T, 0)
1884
+ excess_ub = np.maximum(ev.T - self.bounds[1], 0)
1885
+ except ValueError as e:
1886
+ raise RuntimeError("An array returned from a Constraint has"
1887
+ " the wrong shape. If `vectorized is False`"
1888
+ " the Constraint should return an array of"
1889
+ " shape (M,). If `vectorized is True` then"
1890
+ " the Constraint must return an array of"
1891
+ " shape (M, S), where S is the number of"
1892
+ " solution vectors and M is the number of"
1893
+ " constraint components in a given"
1894
+ " Constraint object.") from e
1895
+
1896
+ v = (excess_lb + excess_ub).T
1897
+ return v
venv/lib/python3.10/site-packages/scipy/optimize/_differentiate.py ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ _EERRORINCREASE = -1 # used in _differentiate
7
+
8
+ def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
9
+ step_factor, step_direction, preserve_shape, callback):
10
+ # Input validation for `_differentiate`
11
+
12
+ if not callable(func):
13
+ raise ValueError('`func` must be callable.')
14
+
15
+ # x has more complex IV that is taken care of during initialization
16
+ x = np.asarray(x)
17
+ dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64
18
+
19
+ if not np.iterable(args):
20
+ args = (args,)
21
+
22
+ if atol is None:
23
+ atol = np.finfo(dtype).tiny
24
+
25
+ if rtol is None:
26
+ rtol = np.sqrt(np.finfo(dtype).eps)
27
+
28
+ message = 'Tolerances and step parameters must be non-negative scalars.'
29
+ tols = np.asarray([atol, rtol, initial_step, step_factor])
30
+ if (not np.issubdtype(tols.dtype, np.number)
31
+ or np.any(tols < 0)
32
+ or tols.shape != (4,)):
33
+ raise ValueError(message)
34
+ initial_step, step_factor = tols[2:].astype(dtype)
35
+
36
+ maxiter_int = int(maxiter)
37
+ if maxiter != maxiter_int or maxiter <= 0:
38
+ raise ValueError('`maxiter` must be a positive integer.')
39
+
40
+ order_int = int(order)
41
+ if order_int != order or order <= 0:
42
+ raise ValueError('`order` must be a positive integer.')
43
+
44
+ step_direction = np.sign(step_direction).astype(dtype)
45
+ x, step_direction = np.broadcast_arrays(x, step_direction)
46
+ x, step_direction = x[()], step_direction[()]
47
+
48
+ message = '`preserve_shape` must be True or False.'
49
+ if preserve_shape not in {True, False}:
50
+ raise ValueError(message)
51
+
52
+ if callback is not None and not callable(callback):
53
+ raise ValueError('`callback` must be callable.')
54
+
55
+ return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step,
56
+ step_factor, step_direction, preserve_shape, callback)
57
+
58
+
59
+ def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10,
60
+ order=8, initial_step=0.5, step_factor=2.0,
61
+ step_direction=0, preserve_shape=False, callback=None):
62
+ """Evaluate the derivative of an elementwise scalar function numerically.
63
+
64
+ Parameters
65
+ ----------
66
+ func : callable
67
+ The function whose derivative is desired. The signature must be::
68
+
69
+ func(x: ndarray, *fargs) -> ndarray
70
+
71
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
72
+ which may contain an arbitrary number of arrays that are broadcastable
73
+ with `x`. ``func`` must be an elementwise function: each element
74
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
75
+ x : array_like
76
+ Abscissae at which to evaluate the derivative.
77
+ args : tuple, optional
78
+ Additional positional arguments to be passed to `func`. Must be arrays
79
+ broadcastable with `x`. If the callable to be differentiated requires
80
+ arguments that are not broadcastable with `x`, wrap that callable with
81
+ `func`. See Examples.
82
+ atol, rtol : float, optional
83
+ Absolute and relative tolerances for the stopping condition: iteration
84
+ will stop when ``res.error < atol + rtol * abs(res.df)``. The default
85
+ `atol` is the smallest normal number of the appropriate dtype, and
86
+ the default `rtol` is the square root of the precision of the
87
+ appropriate dtype.
88
+ order : int, default: 8
89
+ The (positive integer) order of the finite difference formula to be
90
+ used. Odd integers will be rounded up to the next even integer.
91
+ initial_step : float, default: 0.5
92
+ The (absolute) initial step size for the finite difference derivative
93
+ approximation.
94
+ step_factor : float, default: 2.0
95
+ The factor by which the step size is *reduced* in each iteration; i.e.
96
+ the step size in iteration 1 is ``initial_step/step_factor``. If
97
+ ``step_factor < 1``, subsequent steps will be greater than the initial
98
+ step; this may be useful if steps smaller than some threshold are
99
+ undesirable (e.g. due to subtractive cancellation error).
100
+ maxiter : int, default: 10
101
+ The maximum number of iterations of the algorithm to perform. See
102
+ notes.
103
+ step_direction : array_like
104
+ An array representing the direction of the finite difference steps (for
105
+ use when `x` lies near to the boundary of the domain of the function.)
106
+ Must be broadcastable with `x` and all `args`.
107
+ Where 0 (default), central differences are used; where negative (e.g.
108
+ -1), steps are non-positive; and where positive (e.g. 1), all steps are
109
+ non-negative.
110
+ preserve_shape : bool, default: False
111
+ In the following, "arguments of `func`" refers to the array ``x`` and
112
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
113
+ of `x` and all elements of `args` (which is conceptually
114
+ distinct from ``fargs`` passed into `f`).
115
+
116
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
117
+ of *any* broadcastable shapes.
118
+
119
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
120
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
121
+ abscissae at which the function is being evaluated.
122
+
123
+ In either case, for each scalar element ``xi`` within `x`, the array
124
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
125
+ Consequently, the shape of the output is always the shape of the input
126
+ ``x``.
127
+
128
+ See Examples.
129
+ callback : callable, optional
130
+ An optional user-supplied function to be called before the first
131
+ iteration and after each iteration.
132
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
133
+ similar to that returned by `_differentiate` (but containing the
134
+ current iterate's values of all variables). If `callback` raises a
135
+ ``StopIteration``, the algorithm will terminate immediately and
136
+ `_differentiate` will return a result.
137
+
138
+ Returns
139
+ -------
140
+ res : _RichResult
141
+ An instance of `scipy._lib._util._RichResult` with the following
142
+ attributes. (The descriptions are written as though the values will be
143
+ scalars; however, if `func` returns an array, the outputs will be
144
+ arrays of the same shape.)
145
+
146
+ success : bool
147
+ ``True`` when the algorithm terminated successfully (status ``0``).
148
+ status : int
149
+ An integer representing the exit status of the algorithm.
150
+ ``0`` : The algorithm converged to the specified tolerances.
151
+ ``-1`` : The error estimate increased, so iteration was terminated.
152
+ ``-2`` : The maximum number of iterations was reached.
153
+ ``-3`` : A non-finite value was encountered.
154
+ ``-4`` : Iteration was terminated by `callback`.
155
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
156
+ df : float
157
+ The derivative of `func` at `x`, if the algorithm terminated
158
+ successfully.
159
+ error : float
160
+ An estimate of the error: the magnitude of the difference between
161
+ the current estimate of the derivative and the estimate in the
162
+ previous iteration.
163
+ nit : int
164
+ The number of iterations performed.
165
+ nfev : int
166
+ The number of points at which `func` was evaluated.
167
+ x : float
168
+ The value at which the derivative of `func` was evaluated
169
+ (after broadcasting with `args` and `step_direction`).
170
+
171
+ Notes
172
+ -----
173
+ The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
174
+ DERIVEST [3]_, but the implementation follows the theory of Taylor series
175
+ more straightforwardly (and arguably naively so).
176
+ In the first iteration, the derivative is estimated using a finite
177
+ difference formula of order `order` with maximum step size `initial_step`.
178
+ Each subsequent iteration, the maximum step size is reduced by
179
+ `step_factor`, and the derivative is estimated again until a termination
180
+ condition is reached. The error estimate is the magnitude of the difference
181
+ between the current derivative approximation and that of the previous
182
+ iteration.
183
+
184
+ The stencils of the finite difference formulae are designed such that
185
+ abscissae are "nested": after `func` is evaluated at ``order + 1``
186
+ points in the first iteration, `func` is evaluated at only two new points
187
+ in each subsequent iteration; ``order - 1`` previously evaluated function
188
+ values required by the finite difference formula are reused, and two
189
+ function values (evaluations at the points furthest from `x`) are unused.
190
+
191
+ Step sizes are absolute. When the step size is small relative to the
192
+ magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
193
+ default initial step size of ``0.5`` cannot be resolved. Accordingly,
194
+ consider using larger initial step sizes for large magnitudes of `x`.
195
+
196
+ The default tolerances are challenging to satisfy at points where the
197
+ true derivative is exactly zero. If the derivative may be exactly zero,
198
+ consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to
199
+ improve convergence.
200
+
201
+ References
202
+ ----------
203
+ [1]_ Hans Dembinski (@HDembinski). jacobi.
204
+ https://github.com/HDembinski/jacobi
205
+ [2]_ Per A. Brodtkorb and John D'Errico. numdifftools.
206
+ https://numdifftools.readthedocs.io/en/latest/
207
+ [3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
208
+ https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
209
+ [4]_ Numerical Differentition. Wikipedia.
210
+ https://en.wikipedia.org/wiki/Numerical_differentiation
211
+
212
+ Examples
213
+ --------
214
+ Evaluate the derivative of ``np.exp`` at several points ``x``.
215
+
216
+ >>> import numpy as np
217
+ >>> from scipy.optimize._differentiate import _differentiate
218
+ >>> f = np.exp
219
+ >>> df = np.exp # true derivative
220
+ >>> x = np.linspace(1, 2, 5)
221
+ >>> res = _differentiate(f, x)
222
+ >>> res.df # approximation of the derivative
223
+ array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
224
+ >>> res.error # estimate of the error
225
+ array(
226
+ [7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11]
227
+ )
228
+ >>> abs(res.df - df(x)) # true error
229
+ array(
230
+ [3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14]
231
+ )
232
+
233
+ Show the convergence of the approximation as the step size is reduced.
234
+ Each iteration, the step size is reduced by `step_factor`, so for
235
+ sufficiently small initial step, each iteration reduces the error by a
236
+ factor of ``1/step_factor**order`` until finite precision arithmetic
237
+ inhibits further improvement.
238
+
239
+ >>> iter = list(range(1, 12)) # maximum iterations
240
+ >>> hfac = 2 # step size reduction per iteration
241
+ >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
242
+ >>> order = 4 # order of differentiation formula
243
+ >>> x = 1
244
+ >>> ref = df(x)
245
+ >>> errors = [] # true error
246
+ >>> for i in iter:
247
+ ... res = _differentiate(f, x, maxiter=i, step_factor=hfac,
248
+ ... step_direction=hdir, order=order,
249
+ ... atol=0, rtol=0) # prevent early termination
250
+ ... errors.append(abs(res.df - ref))
251
+ >>> errors = np.array(errors)
252
+ >>> plt.semilogy(iter, errors[:, 0], label='left differences')
253
+ >>> plt.semilogy(iter, errors[:, 1], label='central differences')
254
+ >>> plt.semilogy(iter, errors[:, 2], label='right differences')
255
+ >>> plt.xlabel('iteration')
256
+ >>> plt.ylabel('error')
257
+ >>> plt.legend()
258
+ >>> plt.show()
259
+ >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
260
+ (0.06215223140159822, 0.0625)
261
+
262
+ The implementation is vectorized over `x`, `step_direction`, and `args`.
263
+ The function is evaluated once before the first iteration to perform input
264
+ validation and standardization, and once per iteration thereafter.
265
+
266
+ >>> def f(x, p):
267
+ ... print('here')
268
+ ... f.nit += 1
269
+ ... return x**p
270
+ >>> f.nit = 0
271
+ >>> def df(x, p):
272
+ ... return p*x**(p-1)
273
+ >>> x = np.arange(1, 5)
274
+ >>> p = np.arange(1, 6).reshape((-1, 1))
275
+ >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
276
+ >>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1)
277
+ >>> np.allclose(res.df, df(x, p))
278
+ True
279
+ >>> res.df.shape
280
+ (3, 5, 4)
281
+ >>> f.nit
282
+ 2
283
+
284
+ By default, `preserve_shape` is False, and therefore the callable
285
+ `f` may be called with arrays of any broadcastable shapes.
286
+ For example:
287
+
288
+ >>> shapes = []
289
+ >>> def f(x, c):
290
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
291
+ ... shapes.append(shape)
292
+ ... return np.sin(c*x)
293
+ >>>
294
+ >>> c = [1, 5, 10, 20]
295
+ >>> res = _differentiate(f, 0, args=(c,))
296
+ >>> shapes
297
+ [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
298
+
299
+ To understand where these shapes are coming from - and to better
300
+ understand how `_differentiate` computes accurate results - note that
301
+ higher values of ``c`` correspond with higher frequency sinusoids.
302
+ The higher frequency sinusoids make the function's derivative change
303
+ faster, so more function evaluations are required to achieve the target
304
+ accuracy:
305
+
306
+ >>> res.nfev
307
+ array([11, 13, 15, 17])
308
+
309
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
310
+ function at a single abscissa and all four frequencies; this is used
311
+ for input validation and to determine the size and dtype of the arrays
312
+ that store results. The next shape corresponds with evaluating the
313
+ function at an initial grid of abscissae and all four frequencies.
314
+ Successive calls to the function evaluate the function at two more
315
+ abscissae, increasing the effective order of the approximation by two.
316
+ However, in later function evaluations, the function is evaluated at
317
+ fewer frequencies because the corresponding derivative has already
318
+ converged to the required tolerance. This saves function evaluations to
319
+ improve performance, but it requires the function to accept arguments of
320
+ any shape.
321
+
322
+ "Vector-valued" functions are unlikely to satisfy this requirement.
323
+ For example, consider
324
+
325
+ >>> def f(x):
326
+ ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
327
+
328
+ This integrand is not compatible with `_differentiate` as written; for instance,
329
+ the shape of the output will not be the same as the shape of ``x``. Such a
330
+ function *could* be converted to a compatible form with the introduction of
331
+ additional parameters, but this would be inconvenient. In such cases,
332
+ a simpler solution would be to use `preserve_shape`.
333
+
334
+ >>> shapes = []
335
+ >>> def f(x):
336
+ ... shapes.append(x.shape)
337
+ ... x0, x1, x2, x3 = x
338
+ ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
339
+ >>>
340
+ >>> x = np.zeros(4)
341
+ >>> res = _differentiate(f, x, preserve_shape=True)
342
+ >>> shapes
343
+ [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
344
+
345
+ Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
346
+ function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
347
+ and this is what we observe.
348
+
349
+ """
350
+ # TODO (followup):
351
+ # - investigate behavior at saddle points
352
+ # - array initial_step / step_factor?
353
+ # - multivariate functions?
354
+
355
+ res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
356
+ step_factor, step_direction, preserve_shape, callback)
357
+ (func, x, args, atol, rtol, maxiter, order,
358
+ h0, fac, hdir, preserve_shape, callback) = res
359
+
360
+ # Initialization
361
+ # Since f(x) (no step) is not needed for central differences, it may be
362
+ # possible to eliminate this function evaluation. However, it's useful for
363
+ # input validation and standardization, and everything else is designed to
364
+ # reduce function calls, so let's keep it simple.
365
+ temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
366
+ func, xs, fs, args, shape, dtype = temp
367
+ x, f = xs[0], fs[0]
368
+ df = np.full_like(f, np.nan)
369
+ # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
370
+ # it's simpler to do it here than to generalize `_elementwise_algo_init` further.
371
+ # `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know
372
+ # that `hdir` can be broadcasted to the final shape.
373
+ hdir = np.broadcast_to(hdir, shape).flatten()
374
+
375
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
376
+ nit, nfev = 0, 1 # one function evaluations performed above
377
+ # Boolean indices of left, central, right, and (all) one-sided steps
378
+ il = hdir < 0
379
+ ic = hdir == 0
380
+ ir = hdir > 0
381
+ io = il | ir
382
+
383
+ # Most of these attributes are reasonably obvious, but:
384
+ # - `fs` holds all the function values of all active `x`. The zeroth
385
+ # axis corresponds with active points `x`, the first axis corresponds
386
+ # with the different steps (in the order described in
387
+ # `_differentiate_weights`).
388
+ # - `terms` (which could probably use a better name) is half the `order`,
389
+ # which is always even.
390
+ work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0,
391
+ df_last=np.nan, error_last=np.nan, h0=h0, fac=fac,
392
+ atol=atol, rtol=rtol, nit=nit, nfev=nfev,
393
+ status=status, dtype=dtype, terms=(order+1)//2,
394
+ hdir=hdir, il=il, ic=ic, ir=ir, io=io)
395
+ # This is the correspondence between terms in the `work` object and the
396
+ # final result. In this case, the mapping is trivial. Note that `success`
397
+ # is prepended automatically.
398
+ res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
399
+ ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
400
+
401
+ def pre_func_eval(work):
402
+ """Determine the abscissae at which the function needs to be evaluated.
403
+
404
+ See `_differentiate_weights` for a description of the stencil (pattern
405
+ of the abscissae).
406
+
407
+ In the first iteration, there is only one stored function value in
408
+ `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
409
+ subsequent iterations, we evaluate at two new points. Note that
410
+ `work.x` is always flattened into a 1D array after broadcasting with
411
+ all `args`, so we add a new axis at the end and evaluate all point
412
+ in one call to the function.
413
+
414
+ For improvement:
415
+ - Consider measuring the step size actually taken, since `(x + h) - x`
416
+ is not identically equal to `h` with floating point arithmetic.
417
+ - Adjust the step size automatically if `x` is too big to resolve the
418
+ step.
419
+ - We could probably save some work if there are no central difference
420
+ steps or no one-sided steps.
421
+ """
422
+ n = work.terms # half the order
423
+ h = work.h # step size
424
+ c = work.fac # step reduction factor
425
+ d = c**0.5 # square root of step reduction factor (one-sided stencil)
426
+ # Note - no need to be careful about dtypes until we allocate `x_eval`
427
+
428
+ if work.nit == 0:
429
+ hc = h / c**np.arange(n)
430
+ hc = np.concatenate((-hc[::-1], hc))
431
+ else:
432
+ hc = np.asarray([-h, h]) / c**(n-1)
433
+
434
+ if work.nit == 0:
435
+ hr = h / d**np.arange(2*n)
436
+ else:
437
+ hr = np.asarray([h, h/d]) / c**(n-1)
438
+
439
+ n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
440
+ x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype)
441
+ il, ic, ir = work.il, work.ic, work.ir
442
+ x_eval[ir] = work.x[ir, np.newaxis] + hr
443
+ x_eval[ic] = work.x[ic, np.newaxis] + hc
444
+ x_eval[il] = work.x[il, np.newaxis] - hr
445
+ return x_eval
446
+
447
+ def post_func_eval(x, f, work):
448
+ """ Estimate the derivative and error from the function evaluations
449
+
450
+ As in `pre_func_eval`: in the first iteration, there is only one stored
451
+ function value in `work.fs`, `f(x)`, so we need to add the `order` new
452
+ points. In subsequent iterations, we add two new points. The tricky
453
+ part is getting the order to match that of the weights, which is
454
+ described in `_differentiate_weights`.
455
+
456
+ For improvement:
457
+ - Change the order of the weights (and steps in `pre_func_eval`) to
458
+ simplify `work_fc` concatenation and eliminate `fc` concatenation.
459
+ - It would be simple to do one-step Richardson extrapolation with `df`
460
+ and `df_last` to increase the order of the estimate and/or improve
461
+ the error estimate.
462
+ - Process the function evaluations in a more numerically favorable
463
+ way. For instance, combining the pairs of central difference evals
464
+ into a second-order approximation and using Richardson extrapolation
465
+ to produce a higher order approximation seemed to retain accuracy up
466
+ to very high order.
467
+ - Alternatively, we could use `polyfit` like Jacobi. An advantage of
468
+ fitting polynomial to more points than necessary is improved noise
469
+ tolerance.
470
+ """
471
+ n = work.terms
472
+ n_new = n if work.nit == 0 else 1
473
+ il, ic, io = work.il, work.ic, work.io
474
+
475
+ # Central difference
476
+ # `work_fc` is *all* the points at which the function has been evaluated
477
+ # `fc` is the points we're using *this iteration* to produce the estimate
478
+ work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:])
479
+ work_fc = np.concatenate(work_fc, axis=-1)
480
+ if work.nit == 0:
481
+ fc = work_fc
482
+ else:
483
+ fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
484
+ fc = np.concatenate(fc, axis=-1)
485
+
486
+ # One-sided difference
487
+ work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1)
488
+ if work.nit == 0:
489
+ fo = work_fo
490
+ else:
491
+ fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
492
+
493
+ work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new))
494
+ work.fs[ic] = work_fc
495
+ work.fs[io] = work_fo
496
+
497
+ wc, wo = _differentiate_weights(work, n)
498
+ work.df_last = work.df.copy()
499
+ work.df[ic] = fc @ wc / work.h
500
+ work.df[io] = fo @ wo / work.h
501
+ work.df[il] *= -1
502
+
503
+ work.h /= work.fac
504
+ work.error_last = work.error
505
+ # Simple error estimate - the difference in derivative estimates between
506
+ # this iteration and the last. This is typically conservative because if
507
+ # convergence has begin, the true error is much closer to the difference
508
+ # between the current estimate and the *next* error estimate. However,
509
+ # we could use Richarson extrapolation to produce an error estimate that
510
+ # is one order higher, and take the difference between that and
511
+ # `work.df` (which would just be constant factor that depends on `fac`.)
512
+ work.error = abs(work.df - work.df_last)
513
+
514
+ def check_termination(work):
515
+ """Terminate due to convergence, non-finite values, or error increase"""
516
+ stop = np.zeros_like(work.df).astype(bool)
517
+
518
+ i = work.error < work.atol + work.rtol*abs(work.df)
519
+ work.status[i] = eim._ECONVERGED
520
+ stop[i] = True
521
+
522
+ if work.nit > 0:
523
+ i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop)
524
+ work.df[i], work.status[i] = np.nan, eim._EVALUEERR
525
+ stop[i] = True
526
+
527
+ # With infinite precision, there is a step size below which
528
+ # all smaller step sizes will reduce the error. But in floating point
529
+ # arithmetic, catastrophic cancellation will begin to cause the error
530
+ # to increase again. This heuristic tries to avoid step sizes that are
531
+ # too small. There may be more theoretically sound approaches for
532
+ # detecting a step size that minimizes the total error, but this
533
+ # heuristic seems simple and effective.
534
+ i = (work.error > work.error_last*10) & ~stop
535
+ work.status[i] = _EERRORINCREASE
536
+ stop[i] = True
537
+
538
+ return stop
539
+
540
+ def post_termination_check(work):
541
+ return
542
+
543
+ def customize_result(res, shape):
544
+ return shape
545
+
546
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
547
+ pre_func_eval, post_func_eval, check_termination,
548
+ post_termination_check, customize_result, res_work_pairs,
549
+ preserve_shape)
550
+
551
+
552
+ def _differentiate_weights(work, n):
553
+ # This produces the weights of the finite difference formula for a given
554
+ # stencil. In experiments, use of a second-order central difference formula
555
+ # with Richardson extrapolation was more accurate numerically, but it was
556
+ # more complicated, and it would have become even more complicated when
557
+ # adding support for one-sided differences. However, now that all the
558
+ # function evaluation values are stored, they can be processed in whatever
559
+ # way is desired to produce the derivative estimate. We leave alternative
560
+ # approaches to future work. To be more self-contained, here is the theory
561
+ # for deriving the weights below.
562
+ #
563
+ # Recall that the Taylor expansion of a univariate, scalar-values function
564
+ # about a point `x` may be expressed as:
565
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
566
+ # Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
567
+ # f(x) = f(x)
568
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
569
+ # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
570
+ # We can solve for weights `wi` such that:
571
+ # w1*f(x) = w1*(f(x))
572
+ # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
573
+ # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
574
+ # = 0 + f'(x)*h + 0 + O(h**3)
575
+ # Then
576
+ # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
577
+ # is a finite difference derivative approximation with error O(h**2),
578
+ # and so it is said to be a "second-order" approximation. Under certain
579
+ # conditions (e.g. well-behaved function, `h` sufficiently small), the
580
+ # error in the approximation will decrease with h**2; that is, if `h` is
581
+ # reduced by a factor of 2, the error is reduced by a factor of 4.
582
+ #
583
+ # By default, we use eighth-order formulae. Our central-difference formula
584
+ # uses abscissae:
585
+ # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
586
+ # where `c` is the step factor. (Typically, the step factor is greater than
587
+ # one, so the outermost points - as written above - are actually closest to
588
+ # `x`.) This "stencil" is chosen so that each iteration, the step can be
589
+ # reduced by the factor `c`, and most of the function evaluations can be
590
+ # reused with the new step size. For example, in the next iteration, we
591
+ # will have:
592
+ # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
593
+ # We do not reuse `x-h` and `x+h` for the new derivative estimate.
594
+ # While this would increase the order of the formula and thus the
595
+ # theoretical convergence rate, it is also less stable numerically.
596
+ # (As noted above, there are other ways of processing the values that are
597
+ # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
598
+ # to simplify future development of this sort of improvement.)
599
+ #
600
+ # The (right) one-sided formula is produced similarly using abscissae
601
+ # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
602
+ # where `d` is the square root of `c`. (The left one-sided formula simply
603
+ # uses -h.) When the step size is reduced by factor `c = d**2`, we have
604
+ # abscissae:
605
+ # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
606
+ # `d` is chosen as the square root of `c` so that the rate of the step-size
607
+ # reduction is the same per iteration as in the central difference case.
608
+ # Note that because the central difference formulas are inherently of even
609
+ # order, for simplicity, we use only even-order formulas for one-sided
610
+ # differences, too.
611
+
612
+ # It's possible for the user to specify `fac` in, say, double precision but
613
+ # `x` and `args` in single precision. `fac` gets converted to single
614
+ # precision, but we should always use double precision for the intermediate
615
+ # calculations here to avoid additional error in the weights.
616
+ fac = work.fac.astype(np.float64)
617
+
618
+ # Note that if the user switches back to floating point precision with
619
+ # `x` and `args`, then `fac` will not necessarily equal the (lower
620
+ # precision) cached `_differentiate_weights.fac`, and the weights will
621
+ # need to be recalculated. This could be fixed, but it's late, and of
622
+ # low consequence.
623
+ if fac != _differentiate_weights.fac:
624
+ _differentiate_weights.central = []
625
+ _differentiate_weights.right = []
626
+ _differentiate_weights.fac = fac
627
+
628
+ if len(_differentiate_weights.central) != 2*n + 1:
629
+ # Central difference weights. Consider refactoring this; it could
630
+ # probably be more compact.
631
+ i = np.arange(-n, n + 1)
632
+ p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
633
+ s = np.sign(i)
634
+
635
+ h = s / fac ** p
636
+ A = np.vander(h, increasing=True).T
637
+ b = np.zeros(2*n + 1)
638
+ b[1] = 1
639
+ weights = np.linalg.solve(A, b)
640
+
641
+ # Enforce identities to improve accuracy
642
+ weights[n] = 0
643
+ for i in range(n):
644
+ weights[-i-1] = -weights[i]
645
+
646
+ # Cache the weights. We only need to calculate them once unless
647
+ # the step factor changes.
648
+ _differentiate_weights.central = weights
649
+
650
+ # One-sided difference weights. The left one-sided weights (with
651
+ # negative steps) are simply the negative of the right one-sided
652
+ # weights, so no need to compute them separately.
653
+ i = np.arange(2*n + 1)
654
+ p = i - 1.
655
+ s = np.sign(i)
656
+
657
+ h = s / np.sqrt(fac) ** p
658
+ A = np.vander(h, increasing=True).T
659
+ b = np.zeros(2 * n + 1)
660
+ b[1] = 1
661
+ weights = np.linalg.solve(A, b)
662
+
663
+ _differentiate_weights.right = weights
664
+
665
+ return (_differentiate_weights.central.astype(work.dtype, copy=False),
666
+ _differentiate_weights.right.astype(work.dtype, copy=False))
667
+ _differentiate_weights.central = []
668
+ _differentiate_weights.right = []
669
+ _differentiate_weights.fac = None
venv/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (43.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dual Annealing implementation.
2
+ # Copyright (c) 2018 Sylvain Gubian <[email protected]>,
3
+ # Yang Xiang <[email protected]>
4
+ # Author: Sylvain Gubian, Yang Xiang, PMP S.A.
5
+
6
+ """
7
+ A Dual Annealing global optimization algorithm
8
+ """
9
+
10
+ import numpy as np
11
+ from scipy.optimize import OptimizeResult
12
+ from scipy.optimize import minimize, Bounds
13
+ from scipy.special import gammaln
14
+ from scipy._lib._util import check_random_state
15
+ from scipy.optimize._constraints import new_bounds_to_old
16
+
17
+ __all__ = ['dual_annealing']
18
+
19
+
20
+ class VisitingDistribution:
21
+ """
22
+ Class used to generate new coordinates based on the distorted
23
+ Cauchy-Lorentz distribution. Depending on the steps within the strategy
24
+ chain, the class implements the strategy for generating new location
25
+ changes.
26
+
27
+ Parameters
28
+ ----------
29
+ lb : array_like
30
+ A 1-D NumPy ndarray containing lower bounds of the generated
31
+ components. Neither NaN or inf are allowed.
32
+ ub : array_like
33
+ A 1-D NumPy ndarray containing upper bounds for the generated
34
+ components. Neither NaN or inf are allowed.
35
+ visiting_param : float
36
+ Parameter for visiting distribution. Default value is 2.62.
37
+ Higher values give the visiting distribution a heavier tail, this
38
+ makes the algorithm jump to a more distant region.
39
+ The value range is (1, 3]. Its value is fixed for the life of the
40
+ object.
41
+ rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
42
+ A `~numpy.random.RandomState`, `~numpy.random.Generator` object
43
+ for using the current state of the created random generator container.
44
+
45
+ """
46
+ TAIL_LIMIT = 1.e8
47
+ MIN_VISIT_BOUND = 1.e-10
48
+
49
+ def __init__(self, lb, ub, visiting_param, rand_gen):
50
+ # if you wish to make _visiting_param adjustable during the life of
51
+ # the object then _factor2, _factor3, _factor5, _d1, _factor6 will
52
+ # have to be dynamically calculated in `visit_fn`. They're factored
53
+ # out here so they don't need to be recalculated all the time.
54
+ self._visiting_param = visiting_param
55
+ self.rand_gen = rand_gen
56
+ self.lower = lb
57
+ self.upper = ub
58
+ self.bound_range = ub - lb
59
+
60
+ # these are invariant numbers unless visiting_param changes
61
+ self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
62
+ self._visiting_param - 1.0))
63
+ self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
64
+ / (self._visiting_param - 1.0))
65
+ self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
66
+ 3.0 - self._visiting_param))
67
+
68
+ self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
69
+ self._d1 = 2.0 - self._factor5
70
+ self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
71
+ np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
72
+
73
+ def visiting(self, x, step, temperature):
74
+ """ Based on the step in the strategy chain, new coordinates are
75
+ generated by changing all components is the same time or only
76
+ one of them, the new values are computed with visit_fn method
77
+ """
78
+ dim = x.size
79
+ if step < dim:
80
+ # Changing all coordinates with a new visiting value
81
+ visits = self.visit_fn(temperature, dim)
82
+ upper_sample, lower_sample = self.rand_gen.uniform(size=2)
83
+ visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
84
+ visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
85
+ x_visit = visits + x
86
+ a = x_visit - self.lower
87
+ b = np.fmod(a, self.bound_range) + self.bound_range
88
+ x_visit = np.fmod(b, self.bound_range) + self.lower
89
+ x_visit[np.fabs(
90
+ x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
91
+ else:
92
+ # Changing only one coordinate at a time based on strategy
93
+ # chain step
94
+ x_visit = np.copy(x)
95
+ visit = self.visit_fn(temperature, 1)[0]
96
+ if visit > self.TAIL_LIMIT:
97
+ visit = self.TAIL_LIMIT * self.rand_gen.uniform()
98
+ elif visit < -self.TAIL_LIMIT:
99
+ visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
100
+ index = step - dim
101
+ x_visit[index] = visit + x[index]
102
+ a = x_visit[index] - self.lower[index]
103
+ b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
104
+ x_visit[index] = np.fmod(b, self.bound_range[
105
+ index]) + self.lower[index]
106
+ if np.fabs(x_visit[index] - self.lower[
107
+ index]) < self.MIN_VISIT_BOUND:
108
+ x_visit[index] += self.MIN_VISIT_BOUND
109
+ return x_visit
110
+
111
+ def visit_fn(self, temperature, dim):
112
+ """ Formula Visita from p. 405 of reference [2] """
113
+ x, y = self.rand_gen.normal(size=(dim, 2)).T
114
+
115
+ factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
116
+ factor4 = self._factor4_p * factor1
117
+
118
+ # sigmax
119
+ x *= np.exp(-(self._visiting_param - 1.0) * np.log(
120
+ self._factor6 / factor4) / (3.0 - self._visiting_param))
121
+
122
+ den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
123
+ (3.0 - self._visiting_param))
124
+
125
+ return x / den
126
+
127
+
128
+ class EnergyState:
129
+ """
130
+ Class used to record the energy state. At any time, it knows what is the
131
+ currently used coordinates and the most recent best location.
132
+
133
+ Parameters
134
+ ----------
135
+ lower : array_like
136
+ A 1-D NumPy ndarray containing lower bounds for generating an initial
137
+ random components in the `reset` method.
138
+ upper : array_like
139
+ A 1-D NumPy ndarray containing upper bounds for generating an initial
140
+ random components in the `reset` method
141
+ components. Neither NaN or inf are allowed.
142
+ callback : callable, ``callback(x, f, context)``, optional
143
+ A callback function which will be called for all minima found.
144
+ ``x`` and ``f`` are the coordinates and function value of the
145
+ latest minimum found, and `context` has value in [0, 1, 2]
146
+ """
147
+ # Maximum number of trials for generating a valid starting point
148
+ MAX_REINIT_COUNT = 1000
149
+
150
+ def __init__(self, lower, upper, callback=None):
151
+ self.ebest = None
152
+ self.current_energy = None
153
+ self.current_location = None
154
+ self.xbest = None
155
+ self.lower = lower
156
+ self.upper = upper
157
+ self.callback = callback
158
+
159
+ def reset(self, func_wrapper, rand_gen, x0=None):
160
+ """
161
+ Initialize current location is the search domain. If `x0` is not
162
+ provided, a random location within the bounds is generated.
163
+ """
164
+ if x0 is None:
165
+ self.current_location = rand_gen.uniform(self.lower, self.upper,
166
+ size=len(self.lower))
167
+ else:
168
+ self.current_location = np.copy(x0)
169
+ init_error = True
170
+ reinit_counter = 0
171
+ while init_error:
172
+ self.current_energy = func_wrapper.fun(self.current_location)
173
+ if self.current_energy is None:
174
+ raise ValueError('Objective function is returning None')
175
+ if (not np.isfinite(self.current_energy) or np.isnan(
176
+ self.current_energy)):
177
+ if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
178
+ init_error = False
179
+ message = (
180
+ 'Stopping algorithm because function '
181
+ 'create NaN or (+/-) infinity values even with '
182
+ 'trying new random parameters'
183
+ )
184
+ raise ValueError(message)
185
+ self.current_location = rand_gen.uniform(self.lower,
186
+ self.upper,
187
+ size=self.lower.size)
188
+ reinit_counter += 1
189
+ else:
190
+ init_error = False
191
+ # If first time reset, initialize ebest and xbest
192
+ if self.ebest is None and self.xbest is None:
193
+ self.ebest = self.current_energy
194
+ self.xbest = np.copy(self.current_location)
195
+ # Otherwise, we keep them in case of reannealing reset
196
+
197
+ def update_best(self, e, x, context):
198
+ self.ebest = e
199
+ self.xbest = np.copy(x)
200
+ if self.callback is not None:
201
+ val = self.callback(x, e, context)
202
+ if val is not None:
203
+ if val:
204
+ return ('Callback function requested to stop early by '
205
+ 'returning True')
206
+
207
+ def update_current(self, e, x):
208
+ self.current_energy = e
209
+ self.current_location = np.copy(x)
210
+
211
+
212
+ class StrategyChain:
213
+ """
214
+ Class that implements within a Markov chain the strategy for location
215
+ acceptance and local search decision making.
216
+
217
+ Parameters
218
+ ----------
219
+ acceptance_param : float
220
+ Parameter for acceptance distribution. It is used to control the
221
+ probability of acceptance. The lower the acceptance parameter, the
222
+ smaller the probability of acceptance. Default value is -5.0 with
223
+ a range (-1e4, -5].
224
+ visit_dist : VisitingDistribution
225
+ Instance of `VisitingDistribution` class.
226
+ func_wrapper : ObjectiveFunWrapper
227
+ Instance of `ObjectiveFunWrapper` class.
228
+ minimizer_wrapper: LocalSearchWrapper
229
+ Instance of `LocalSearchWrapper` class.
230
+ rand_gen : {None, int, `numpy.random.Generator`,
231
+ `numpy.random.RandomState`}, optional
232
+
233
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
234
+ singleton is used.
235
+ If `seed` is an int, a new ``RandomState`` instance is used,
236
+ seeded with `seed`.
237
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
238
+ that instance is used.
239
+ energy_state: EnergyState
240
+ Instance of `EnergyState` class.
241
+
242
+ """
243
+
244
+ def __init__(self, acceptance_param, visit_dist, func_wrapper,
245
+ minimizer_wrapper, rand_gen, energy_state):
246
+ # Local strategy chain minimum energy and location
247
+ self.emin = energy_state.current_energy
248
+ self.xmin = np.array(energy_state.current_location)
249
+ # Global optimizer state
250
+ self.energy_state = energy_state
251
+ # Acceptance parameter
252
+ self.acceptance_param = acceptance_param
253
+ # Visiting distribution instance
254
+ self.visit_dist = visit_dist
255
+ # Wrapper to objective function
256
+ self.func_wrapper = func_wrapper
257
+ # Wrapper to the local minimizer
258
+ self.minimizer_wrapper = minimizer_wrapper
259
+ self.not_improved_idx = 0
260
+ self.not_improved_max_idx = 1000
261
+ self._rand_gen = rand_gen
262
+ self.temperature_step = 0
263
+ self.K = 100 * len(energy_state.current_location)
264
+
265
+ def accept_reject(self, j, e, x_visit):
266
+ r = self._rand_gen.uniform()
267
+ pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
268
+ (e - self.energy_state.current_energy) / self.temperature_step)
269
+ if pqv_temp <= 0.:
270
+ pqv = 0.
271
+ else:
272
+ pqv = np.exp(np.log(pqv_temp) / (
273
+ 1. - self.acceptance_param))
274
+
275
+ if r <= pqv:
276
+ # We accept the new location and update state
277
+ self.energy_state.update_current(e, x_visit)
278
+ self.xmin = np.copy(self.energy_state.current_location)
279
+
280
+ # No improvement for a long time
281
+ if self.not_improved_idx >= self.not_improved_max_idx:
282
+ if j == 0 or self.energy_state.current_energy < self.emin:
283
+ self.emin = self.energy_state.current_energy
284
+ self.xmin = np.copy(self.energy_state.current_location)
285
+
286
+ def run(self, step, temperature):
287
+ self.temperature_step = temperature / float(step + 1)
288
+ self.not_improved_idx += 1
289
+ for j in range(self.energy_state.current_location.size * 2):
290
+ if j == 0:
291
+ if step == 0:
292
+ self.energy_state_improved = True
293
+ else:
294
+ self.energy_state_improved = False
295
+ x_visit = self.visit_dist.visiting(
296
+ self.energy_state.current_location, j, temperature)
297
+ # Calling the objective function
298
+ e = self.func_wrapper.fun(x_visit)
299
+ if e < self.energy_state.current_energy:
300
+ # We have got a better energy value
301
+ self.energy_state.update_current(e, x_visit)
302
+ if e < self.energy_state.ebest:
303
+ val = self.energy_state.update_best(e, x_visit, 0)
304
+ if val is not None:
305
+ if val:
306
+ return val
307
+ self.energy_state_improved = True
308
+ self.not_improved_idx = 0
309
+ else:
310
+ # We have not improved but do we accept the new location?
311
+ self.accept_reject(j, e, x_visit)
312
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
313
+ return ('Maximum number of function call reached '
314
+ 'during annealing')
315
+ # End of StrategyChain loop
316
+
317
+ def local_search(self):
318
+ # Decision making for performing a local search
319
+ # based on strategy chain results
320
+ # If energy has been improved or no improvement since too long,
321
+ # performing a local search with the best strategy chain location
322
+ if self.energy_state_improved:
323
+ # Global energy has improved, let's see if LS improves further
324
+ e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
325
+ self.energy_state.ebest)
326
+ if e < self.energy_state.ebest:
327
+ self.not_improved_idx = 0
328
+ val = self.energy_state.update_best(e, x, 1)
329
+ if val is not None:
330
+ if val:
331
+ return val
332
+ self.energy_state.update_current(e, x)
333
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
334
+ return ('Maximum number of function call reached '
335
+ 'during local search')
336
+ # Check probability of a need to perform a LS even if no improvement
337
+ do_ls = False
338
+ if self.K < 90 * len(self.energy_state.current_location):
339
+ pls = np.exp(self.K * (
340
+ self.energy_state.ebest - self.energy_state.current_energy) /
341
+ self.temperature_step)
342
+ if pls >= self._rand_gen.uniform():
343
+ do_ls = True
344
+ # Global energy not improved, let's see what LS gives
345
+ # on the best strategy chain location
346
+ if self.not_improved_idx >= self.not_improved_max_idx:
347
+ do_ls = True
348
+ if do_ls:
349
+ e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
350
+ self.xmin = np.copy(x)
351
+ self.emin = e
352
+ self.not_improved_idx = 0
353
+ self.not_improved_max_idx = self.energy_state.current_location.size
354
+ if e < self.energy_state.ebest:
355
+ val = self.energy_state.update_best(
356
+ self.emin, self.xmin, 2)
357
+ if val is not None:
358
+ if val:
359
+ return val
360
+ self.energy_state.update_current(e, x)
361
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
362
+ return ('Maximum number of function call reached '
363
+ 'during dual annealing')
364
+
365
+
366
+ class ObjectiveFunWrapper:
367
+
368
+ def __init__(self, func, maxfun=1e7, *args):
369
+ self.func = func
370
+ self.args = args
371
+ # Number of objective function evaluations
372
+ self.nfev = 0
373
+ # Number of gradient function evaluation if used
374
+ self.ngev = 0
375
+ # Number of hessian of the objective function if used
376
+ self.nhev = 0
377
+ self.maxfun = maxfun
378
+
379
+ def fun(self, x):
380
+ self.nfev += 1
381
+ return self.func(x, *self.args)
382
+
383
+
384
+ class LocalSearchWrapper:
385
+ """
386
+ Class used to wrap around the minimizer used for local search
387
+ Default local minimizer is SciPy minimizer L-BFGS-B
388
+ """
389
+
390
+ LS_MAXITER_RATIO = 6
391
+ LS_MAXITER_MIN = 100
392
+ LS_MAXITER_MAX = 1000
393
+
394
+ def __init__(self, search_bounds, func_wrapper, *args, **kwargs):
395
+ self.func_wrapper = func_wrapper
396
+ self.kwargs = kwargs
397
+ self.jac = self.kwargs.get('jac', None)
398
+ self.minimizer = minimize
399
+ bounds_list = list(zip(*search_bounds))
400
+ self.lower = np.array(bounds_list[0])
401
+ self.upper = np.array(bounds_list[1])
402
+
403
+ # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
404
+ if not self.kwargs:
405
+ n = len(self.lower)
406
+ ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
407
+ self.LS_MAXITER_MIN),
408
+ self.LS_MAXITER_MAX)
409
+ self.kwargs['method'] = 'L-BFGS-B'
410
+ self.kwargs['options'] = {
411
+ 'maxiter': ls_max_iter,
412
+ }
413
+ self.kwargs['bounds'] = list(zip(self.lower, self.upper))
414
+ elif callable(self.jac):
415
+ def wrapped_jac(x):
416
+ return self.jac(x, *args)
417
+ self.kwargs['jac'] = wrapped_jac
418
+
419
+ def local_search(self, x, e):
420
+ # Run local search from the given x location where energy value is e
421
+ x_tmp = np.copy(x)
422
+ mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
423
+ if 'njev' in mres:
424
+ self.func_wrapper.ngev += mres.njev
425
+ if 'nhev' in mres:
426
+ self.func_wrapper.nhev += mres.nhev
427
+ # Check if is valid value
428
+ is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
429
+ in_bounds = np.all(mres.x >= self.lower) and np.all(
430
+ mres.x <= self.upper)
431
+ is_valid = is_finite and in_bounds
432
+
433
+ # Use the new point only if it is valid and return a better results
434
+ if is_valid and mres.fun < e:
435
+ return mres.fun, mres.x
436
+ else:
437
+ return e, x_tmp
438
+
439
+
440
+ def dual_annealing(func, bounds, args=(), maxiter=1000,
441
+ minimizer_kwargs=None, initial_temp=5230.,
442
+ restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
443
+ maxfun=1e7, seed=None, no_local_search=False,
444
+ callback=None, x0=None):
445
+ """
446
+ Find the global minimum of a function using Dual Annealing.
447
+
448
+ Parameters
449
+ ----------
450
+ func : callable
451
+ The objective function to be minimized. Must be in the form
452
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
453
+ and ``args`` is a tuple of any additional fixed parameters needed to
454
+ completely specify the function.
455
+ bounds : sequence or `Bounds`
456
+ Bounds for variables. There are two ways to specify the bounds:
457
+
458
+ 1. Instance of `Bounds` class.
459
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`.
460
+
461
+ args : tuple, optional
462
+ Any additional fixed parameters needed to completely specify the
463
+ objective function.
464
+ maxiter : int, optional
465
+ The maximum number of global search iterations. Default value is 1000.
466
+ minimizer_kwargs : dict, optional
467
+ Extra keyword arguments to be passed to the local minimizer
468
+ (`minimize`). Some important options could be:
469
+ ``method`` for the minimizer method to use and ``args`` for
470
+ objective function additional arguments.
471
+ initial_temp : float, optional
472
+ The initial temperature, use higher values to facilitates a wider
473
+ search of the energy landscape, allowing dual_annealing to escape
474
+ local minima that it is trapped in. Default value is 5230. Range is
475
+ (0.01, 5.e4].
476
+ restart_temp_ratio : float, optional
477
+ During the annealing process, temperature is decreasing, when it
478
+ reaches ``initial_temp * restart_temp_ratio``, the reannealing process
479
+ is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
480
+ visit : float, optional
481
+ Parameter for visiting distribution. Default value is 2.62. Higher
482
+ values give the visiting distribution a heavier tail, this makes
483
+ the algorithm jump to a more distant region. The value range is (1, 3].
484
+ accept : float, optional
485
+ Parameter for acceptance distribution. It is used to control the
486
+ probability of acceptance. The lower the acceptance parameter, the
487
+ smaller the probability of acceptance. Default value is -5.0 with
488
+ a range (-1e4, -5].
489
+ maxfun : int, optional
490
+ Soft limit for the number of objective function calls. If the
491
+ algorithm is in the middle of a local search, this number will be
492
+ exceeded, the algorithm will stop just after the local search is
493
+ done. Default value is 1e7.
494
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
495
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
496
+ singleton is used.
497
+ If `seed` is an int, a new ``RandomState`` instance is used,
498
+ seeded with `seed`.
499
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
500
+ that instance is used.
501
+ Specify `seed` for repeatable minimizations. The random numbers
502
+ generated with this seed only affect the visiting distribution function
503
+ and new coordinates generation.
504
+ no_local_search : bool, optional
505
+ If `no_local_search` is set to True, a traditional Generalized
506
+ Simulated Annealing will be performed with no local search
507
+ strategy applied.
508
+ callback : callable, optional
509
+ A callback function with signature ``callback(x, f, context)``,
510
+ which will be called for all minima found.
511
+ ``x`` and ``f`` are the coordinates and function value of the
512
+ latest minimum found, and ``context`` has value in [0, 1, 2], with the
513
+ following meaning:
514
+
515
+ - 0: minimum detected in the annealing process.
516
+ - 1: detection occurred in the local search process.
517
+ - 2: detection done in the dual annealing process.
518
+
519
+ If the callback implementation returns True, the algorithm will stop.
520
+ x0 : ndarray, shape(n,), optional
521
+ Coordinates of a single N-D starting point.
522
+
523
+ Returns
524
+ -------
525
+ res : OptimizeResult
526
+ The optimization result represented as a `OptimizeResult` object.
527
+ Important attributes are: ``x`` the solution array, ``fun`` the value
528
+ of the function at the solution, and ``message`` which describes the
529
+ cause of the termination.
530
+ See `OptimizeResult` for a description of other attributes.
531
+
532
+ Notes
533
+ -----
534
+ This function implements the Dual Annealing optimization. This stochastic
535
+ approach derived from [3]_ combines the generalization of CSA (Classical
536
+ Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
537
+ to a strategy for applying a local search on accepted locations [4]_.
538
+ An alternative implementation of this same algorithm is described in [5]_
539
+ and benchmarks are presented in [6]_. This approach introduces an advanced
540
+ method to refine the solution found by the generalized annealing
541
+ process. This algorithm uses a distorted Cauchy-Lorentz visiting
542
+ distribution, with its shape controlled by the parameter :math:`q_{v}`
543
+
544
+ .. math::
545
+
546
+ g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
547
+ \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
548
+ \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
549
+ \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
550
+ \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
551
+
552
+ Where :math:`t` is the artificial time. This visiting distribution is used
553
+ to generate a trial jump distance :math:`\\Delta x(t)` of variable
554
+ :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
555
+
556
+ From the starting point, after calling the visiting distribution
557
+ function, the acceptance probability is computed as follows:
558
+
559
+ .. math::
560
+
561
+ p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
562
+ \\frac{1}{1-q_{a}}}\\}}
563
+
564
+ Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
565
+ acceptance probability is assigned to the cases where
566
+
567
+ .. math::
568
+
569
+ [1-(1-q_{a}) \\beta \\Delta E] < 0
570
+
571
+ The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
572
+
573
+ .. math::
574
+
575
+ T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
576
+ 1 + t\\right)^{q_{v}-1}-1}
577
+
578
+ Where :math:`q_{v}` is the visiting parameter.
579
+
580
+ .. versionadded:: 1.2.0
581
+
582
+ References
583
+ ----------
584
+ .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
585
+ statistics. Journal of Statistical Physics, 52, 479-487 (1998).
586
+ .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
587
+ Physica A, 233, 395-406 (1996).
588
+ .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
589
+ Annealing Algorithm and Its Application to the Thomson Model.
590
+ Physics Letters A, 233, 216-220 (1997).
591
+ .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
592
+ Annealing. Physical Review E, 62, 4473 (2000).
593
+ .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
594
+ Simulated Annealing for Efficient Global Optimization: the GenSA
595
+ Package for R. The R Journal, Volume 5/1 (2013).
596
+ .. [6] Mullen, K. Continuous Global Optimization in R. Journal of
597
+ Statistical Software, 60(6), 1 - 45, (2014).
598
+ :doi:`10.18637/jss.v060.i06`
599
+
600
+ Examples
601
+ --------
602
+ The following example is a 10-D problem, with many local minima.
603
+ The function involved is called Rastrigin
604
+ (https://en.wikipedia.org/wiki/Rastrigin_function)
605
+
606
+ >>> import numpy as np
607
+ >>> from scipy.optimize import dual_annealing
608
+ >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
609
+ >>> lw = [-5.12] * 10
610
+ >>> up = [5.12] * 10
611
+ >>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
612
+ >>> ret.x
613
+ array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
614
+ -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
615
+ -6.05775280e-09, -5.00668935e-09]) # random
616
+ >>> ret.fun
617
+ 0.000000
618
+
619
+ """
620
+
621
+ if isinstance(bounds, Bounds):
622
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
623
+
624
+ if x0 is not None and not len(x0) == len(bounds):
625
+ raise ValueError('Bounds size does not match x0')
626
+
627
+ lu = list(zip(*bounds))
628
+ lower = np.array(lu[0])
629
+ upper = np.array(lu[1])
630
+ # Check that restart temperature ratio is correct
631
+ if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
632
+ raise ValueError('Restart temperature ratio has to be in range (0, 1)')
633
+ # Checking bounds are valid
634
+ if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
635
+ np.isnan(lower)) or np.any(np.isnan(upper))):
636
+ raise ValueError('Some bounds values are inf values or nan values')
637
+ # Checking that bounds are consistent
638
+ if not np.all(lower < upper):
639
+ raise ValueError('Bounds are not consistent min < max')
640
+ # Checking that bounds are the same length
641
+ if not len(lower) == len(upper):
642
+ raise ValueError('Bounds do not have the same dimensions')
643
+
644
+ # Wrapper for the objective function
645
+ func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
646
+
647
+ # minimizer_kwargs has to be a dict, not None
648
+ minimizer_kwargs = minimizer_kwargs or {}
649
+
650
+ minimizer_wrapper = LocalSearchWrapper(
651
+ bounds, func_wrapper, *args, **minimizer_kwargs)
652
+
653
+ # Initialization of random Generator for reproducible runs if seed provided
654
+ rand_state = check_random_state(seed)
655
+ # Initialization of the energy state
656
+ energy_state = EnergyState(lower, upper, callback)
657
+ energy_state.reset(func_wrapper, rand_state, x0)
658
+ # Minimum value of annealing temperature reached to perform
659
+ # re-annealing
660
+ temperature_restart = initial_temp * restart_temp_ratio
661
+ # VisitingDistribution instance
662
+ visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
663
+ # Strategy chain instance
664
+ strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
665
+ minimizer_wrapper, rand_state, energy_state)
666
+ need_to_stop = False
667
+ iteration = 0
668
+ message = []
669
+ # OptimizeResult object to be returned
670
+ optimize_res = OptimizeResult()
671
+ optimize_res.success = True
672
+ optimize_res.status = 0
673
+
674
+ t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
675
+ # Run the search loop
676
+ while not need_to_stop:
677
+ for i in range(maxiter):
678
+ # Compute temperature for this step
679
+ s = float(i) + 2.0
680
+ t2 = np.exp((visit - 1) * np.log(s)) - 1.0
681
+ temperature = initial_temp * t1 / t2
682
+ if iteration >= maxiter:
683
+ message.append("Maximum number of iteration reached")
684
+ need_to_stop = True
685
+ break
686
+ # Need a re-annealing process?
687
+ if temperature < temperature_restart:
688
+ energy_state.reset(func_wrapper, rand_state)
689
+ break
690
+ # starting strategy chain
691
+ val = strategy_chain.run(i, temperature)
692
+ if val is not None:
693
+ message.append(val)
694
+ need_to_stop = True
695
+ optimize_res.success = False
696
+ break
697
+ # Possible local search at the end of the strategy chain
698
+ if not no_local_search:
699
+ val = strategy_chain.local_search()
700
+ if val is not None:
701
+ message.append(val)
702
+ need_to_stop = True
703
+ optimize_res.success = False
704
+ break
705
+ iteration += 1
706
+
707
+ # Setting the OptimizeResult values
708
+ optimize_res.x = energy_state.xbest
709
+ optimize_res.fun = energy_state.ebest
710
+ optimize_res.nit = iteration
711
+ optimize_res.nfev = func_wrapper.nfev
712
+ optimize_res.njev = func_wrapper.ngev
713
+ optimize_res.nhev = func_wrapper.nhev
714
+ optimize_res.message = message
715
+ return optimize_res
venv/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (96 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hessian update strategies for quasi-Newton optimization methods."""
2
+ import numpy as np
3
+ from numpy.linalg import norm
4
+ from scipy.linalg import get_blas_funcs
5
+ from warnings import warn
6
+
7
+
8
+ __all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
9
+
10
+
11
+ class HessianUpdateStrategy:
12
+ """Interface for implementing Hessian update strategies.
13
+
14
+ Many optimization methods make use of Hessian (or inverse Hessian)
15
+ approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
16
+ Some of these approximations, however, do not actually need to store
17
+ the entire matrix or can compute the internal matrix product with a
18
+ given vector in a very efficiently manner. This class serves as an
19
+ abstract interface between the optimization algorithm and the
20
+ quasi-Newton update strategies, giving freedom of implementation
21
+ to store and update the internal matrix as efficiently as possible.
22
+ Different choices of initialization and update procedure will result
23
+ in different quasi-Newton strategies.
24
+
25
+ Four methods should be implemented in derived classes: ``initialize``,
26
+ ``update``, ``dot`` and ``get_matrix``.
27
+
28
+ Notes
29
+ -----
30
+ Any instance of a class that implements this interface,
31
+ can be accepted by the method ``minimize`` and used by
32
+ the compatible solvers to approximate the Hessian (or
33
+ inverse Hessian) used by the optimization algorithms.
34
+ """
35
+
36
+ def initialize(self, n, approx_type):
37
+ """Initialize internal matrix.
38
+
39
+ Allocate internal memory for storing and updating
40
+ the Hessian or its inverse.
41
+
42
+ Parameters
43
+ ----------
44
+ n : int
45
+ Problem dimension.
46
+ approx_type : {'hess', 'inv_hess'}
47
+ Selects either the Hessian or the inverse Hessian.
48
+ When set to 'hess' the Hessian will be stored and updated.
49
+ When set to 'inv_hess' its inverse will be used instead.
50
+ """
51
+ raise NotImplementedError("The method ``initialize(n, approx_type)``"
52
+ " is not implemented.")
53
+
54
+ def update(self, delta_x, delta_grad):
55
+ """Update internal matrix.
56
+
57
+ Update Hessian matrix or its inverse (depending on how 'approx_type'
58
+ is defined) using information about the last evaluated points.
59
+
60
+ Parameters
61
+ ----------
62
+ delta_x : ndarray
63
+ The difference between two points the gradient
64
+ function have been evaluated at: ``delta_x = x2 - x1``.
65
+ delta_grad : ndarray
66
+ The difference between the gradients:
67
+ ``delta_grad = grad(x2) - grad(x1)``.
68
+ """
69
+ raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
70
+ " is not implemented.")
71
+
72
+ def dot(self, p):
73
+ """Compute the product of the internal matrix with the given vector.
74
+
75
+ Parameters
76
+ ----------
77
+ p : array_like
78
+ 1-D array representing a vector.
79
+
80
+ Returns
81
+ -------
82
+ Hp : array
83
+ 1-D represents the result of multiplying the approximation matrix
84
+ by vector p.
85
+ """
86
+ raise NotImplementedError("The method ``dot(p)``"
87
+ " is not implemented.")
88
+
89
+ def get_matrix(self):
90
+ """Return current internal matrix.
91
+
92
+ Returns
93
+ -------
94
+ H : ndarray, shape (n, n)
95
+ Dense matrix containing either the Hessian
96
+ or its inverse (depending on how 'approx_type'
97
+ is defined).
98
+ """
99
+ raise NotImplementedError("The method ``get_matrix(p)``"
100
+ " is not implemented.")
101
+
102
+
103
+ class FullHessianUpdateStrategy(HessianUpdateStrategy):
104
+ """Hessian update strategy with full dimensional internal representation.
105
+ """
106
+ _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update
107
+ _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update
108
+ # Symmetric matrix-vector product
109
+ _symv = get_blas_funcs('symv', dtype='d')
110
+
111
+ def __init__(self, init_scale='auto'):
112
+ self.init_scale = init_scale
113
+ # Until initialize is called we can't really use the class,
114
+ # so it makes sense to set everything to None.
115
+ self.first_iteration = None
116
+ self.approx_type = None
117
+ self.B = None
118
+ self.H = None
119
+
120
+ def initialize(self, n, approx_type):
121
+ """Initialize internal matrix.
122
+
123
+ Allocate internal memory for storing and updating
124
+ the Hessian or its inverse.
125
+
126
+ Parameters
127
+ ----------
128
+ n : int
129
+ Problem dimension.
130
+ approx_type : {'hess', 'inv_hess'}
131
+ Selects either the Hessian or the inverse Hessian.
132
+ When set to 'hess' the Hessian will be stored and updated.
133
+ When set to 'inv_hess' its inverse will be used instead.
134
+ """
135
+ self.first_iteration = True
136
+ self.n = n
137
+ self.approx_type = approx_type
138
+ if approx_type not in ('hess', 'inv_hess'):
139
+ raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
140
+ # Create matrix
141
+ if self.approx_type == 'hess':
142
+ self.B = np.eye(n, dtype=float)
143
+ else:
144
+ self.H = np.eye(n, dtype=float)
145
+
146
+ def _auto_scale(self, delta_x, delta_grad):
147
+ # Heuristic to scale matrix at first iteration.
148
+ # Described in Nocedal and Wright "Numerical Optimization"
149
+ # p.143 formula (6.20).
150
+ s_norm2 = np.dot(delta_x, delta_x)
151
+ y_norm2 = np.dot(delta_grad, delta_grad)
152
+ ys = np.abs(np.dot(delta_grad, delta_x))
153
+ if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
154
+ return 1
155
+ if self.approx_type == 'hess':
156
+ return y_norm2 / ys
157
+ else:
158
+ return ys / y_norm2
159
+
160
+ def _update_implementation(self, delta_x, delta_grad):
161
+ raise NotImplementedError("The method ``_update_implementation``"
162
+ " is not implemented.")
163
+
164
+ def update(self, delta_x, delta_grad):
165
+ """Update internal matrix.
166
+
167
+ Update Hessian matrix or its inverse (depending on how 'approx_type'
168
+ is defined) using information about the last evaluated points.
169
+
170
+ Parameters
171
+ ----------
172
+ delta_x : ndarray
173
+ The difference between two points the gradient
174
+ function have been evaluated at: ``delta_x = x2 - x1``.
175
+ delta_grad : ndarray
176
+ The difference between the gradients:
177
+ ``delta_grad = grad(x2) - grad(x1)``.
178
+ """
179
+ if np.all(delta_x == 0.0):
180
+ return
181
+ if np.all(delta_grad == 0.0):
182
+ warn('delta_grad == 0.0. Check if the approximated '
183
+ 'function is linear. If the function is linear '
184
+ 'better results can be obtained by defining the '
185
+ 'Hessian as zero instead of using quasi-Newton '
186
+ 'approximations.',
187
+ UserWarning, stacklevel=2)
188
+ return
189
+ if self.first_iteration:
190
+ # Get user specific scale
191
+ if self.init_scale == "auto":
192
+ scale = self._auto_scale(delta_x, delta_grad)
193
+ else:
194
+ scale = float(self.init_scale)
195
+ # Scale initial matrix with ``scale * np.eye(n)``
196
+ if self.approx_type == 'hess':
197
+ self.B *= scale
198
+ else:
199
+ self.H *= scale
200
+ self.first_iteration = False
201
+ self._update_implementation(delta_x, delta_grad)
202
+
203
+ def dot(self, p):
204
+ """Compute the product of the internal matrix with the given vector.
205
+
206
+ Parameters
207
+ ----------
208
+ p : array_like
209
+ 1-D array representing a vector.
210
+
211
+ Returns
212
+ -------
213
+ Hp : array
214
+ 1-D represents the result of multiplying the approximation matrix
215
+ by vector p.
216
+ """
217
+ if self.approx_type == 'hess':
218
+ return self._symv(1, self.B, p)
219
+ else:
220
+ return self._symv(1, self.H, p)
221
+
222
+ def get_matrix(self):
223
+ """Return the current internal matrix.
224
+
225
+ Returns
226
+ -------
227
+ M : ndarray, shape (n, n)
228
+ Dense matrix containing either the Hessian or its inverse
229
+ (depending on how `approx_type` was defined).
230
+ """
231
+ if self.approx_type == 'hess':
232
+ M = np.copy(self.B)
233
+ else:
234
+ M = np.copy(self.H)
235
+ li = np.tril_indices_from(M, k=-1)
236
+ M[li] = M.T[li]
237
+ return M
238
+
239
+
240
+ class BFGS(FullHessianUpdateStrategy):
241
+ """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
242
+
243
+ Parameters
244
+ ----------
245
+ exception_strategy : {'skip_update', 'damp_update'}, optional
246
+ Define how to proceed when the curvature condition is violated.
247
+ Set it to 'skip_update' to just skip the update. Or, alternatively,
248
+ set it to 'damp_update' to interpolate between the actual BFGS
249
+ result and the unmodified matrix. Both exceptions strategies
250
+ are explained in [1]_, p.536-537.
251
+ min_curvature : float
252
+ This number, scaled by a normalization factor, defines the
253
+ minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
254
+ unaffected by the exception strategy. By default is equal to
255
+ 1e-8 when ``exception_strategy = 'skip_update'`` and equal
256
+ to 0.2 when ``exception_strategy = 'damp_update'``.
257
+ init_scale : {float, 'auto'}
258
+ Matrix scale at first iteration. At the first
259
+ iteration the Hessian matrix or its inverse will be initialized
260
+ with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
261
+ Set it to 'auto' in order to use an automatic heuristic for choosing
262
+ the initial scale. The heuristic is described in [1]_, p.143.
263
+ By default uses 'auto'.
264
+
265
+ Notes
266
+ -----
267
+ The update is based on the description in [1]_, p.140.
268
+
269
+ References
270
+ ----------
271
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
272
+ Second Edition (2006).
273
+ """
274
+
275
+ def __init__(self, exception_strategy='skip_update', min_curvature=None,
276
+ init_scale='auto'):
277
+ if exception_strategy == 'skip_update':
278
+ if min_curvature is not None:
279
+ self.min_curvature = min_curvature
280
+ else:
281
+ self.min_curvature = 1e-8
282
+ elif exception_strategy == 'damp_update':
283
+ if min_curvature is not None:
284
+ self.min_curvature = min_curvature
285
+ else:
286
+ self.min_curvature = 0.2
287
+ else:
288
+ raise ValueError("`exception_strategy` must be 'skip_update' "
289
+ "or 'damp_update'.")
290
+
291
+ super().__init__(init_scale)
292
+ self.exception_strategy = exception_strategy
293
+
294
+ def _update_inverse_hessian(self, ys, Hy, yHy, s):
295
+ """Update the inverse Hessian matrix.
296
+
297
+ BFGS update using the formula:
298
+
299
+ ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
300
+ - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
301
+
302
+ where ``s = delta_x`` and ``y = delta_grad``. This formula is
303
+ equivalent to (6.17) in [1]_ written in a more efficient way
304
+ for implementation.
305
+
306
+ References
307
+ ----------
308
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
309
+ Second Edition (2006).
310
+ """
311
+ self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
312
+ self.H = self._syr((ys+yHy)/ys**2, s, a=self.H)
313
+
314
+ def _update_hessian(self, ys, Bs, sBs, y):
315
+ """Update the Hessian matrix.
316
+
317
+ BFGS update using the formula:
318
+
319
+ ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
320
+
321
+ where ``s`` is short for ``delta_x`` and ``y`` is short
322
+ for ``delta_grad``. Formula (6.19) in [1]_.
323
+
324
+ References
325
+ ----------
326
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
327
+ Second Edition (2006).
328
+ """
329
+ self.B = self._syr(1.0 / ys, y, a=self.B)
330
+ self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
331
+
332
+ def _update_implementation(self, delta_x, delta_grad):
333
+ # Auxiliary variables w and z
334
+ if self.approx_type == 'hess':
335
+ w = delta_x
336
+ z = delta_grad
337
+ else:
338
+ w = delta_grad
339
+ z = delta_x
340
+ # Do some common operations
341
+ wz = np.dot(w, z)
342
+ Mw = self.dot(w)
343
+ wMw = Mw.dot(w)
344
+ # Guarantee that wMw > 0 by reinitializing matrix.
345
+ # While this is always true in exact arithmetic,
346
+ # indefinite matrix may appear due to roundoff errors.
347
+ if wMw <= 0.0:
348
+ scale = self._auto_scale(delta_x, delta_grad)
349
+ # Reinitialize matrix
350
+ if self.approx_type == 'hess':
351
+ self.B = scale * np.eye(self.n, dtype=float)
352
+ else:
353
+ self.H = scale * np.eye(self.n, dtype=float)
354
+ # Do common operations for new matrix
355
+ Mw = self.dot(w)
356
+ wMw = Mw.dot(w)
357
+ # Check if curvature condition is violated
358
+ if wz <= self.min_curvature * wMw:
359
+ # If the option 'skip_update' is set
360
+ # we just skip the update when the condition
361
+ # is violated.
362
+ if self.exception_strategy == 'skip_update':
363
+ return
364
+ # If the option 'damp_update' is set we
365
+ # interpolate between the actual BFGS
366
+ # result and the unmodified matrix.
367
+ elif self.exception_strategy == 'damp_update':
368
+ update_factor = (1-self.min_curvature) / (1 - wz/wMw)
369
+ z = update_factor*z + (1-update_factor)*Mw
370
+ wz = np.dot(w, z)
371
+ # Update matrix
372
+ if self.approx_type == 'hess':
373
+ self._update_hessian(wz, Mw, wMw, z)
374
+ else:
375
+ self._update_inverse_hessian(wz, Mw, wMw, z)
376
+
377
+
378
+ class SR1(FullHessianUpdateStrategy):
379
+ """Symmetric-rank-1 Hessian update strategy.
380
+
381
+ Parameters
382
+ ----------
383
+ min_denominator : float
384
+ This number, scaled by a normalization factor,
385
+ defines the minimum denominator magnitude allowed
386
+ in the update. When the condition is violated we skip
387
+ the update. By default uses ``1e-8``.
388
+ init_scale : {float, 'auto'}, optional
389
+ Matrix scale at first iteration. At the first
390
+ iteration the Hessian matrix or its inverse will be initialized
391
+ with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
392
+ Set it to 'auto' in order to use an automatic heuristic for choosing
393
+ the initial scale. The heuristic is described in [1]_, p.143.
394
+ By default uses 'auto'.
395
+
396
+ Notes
397
+ -----
398
+ The update is based on the description in [1]_, p.144-146.
399
+
400
+ References
401
+ ----------
402
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
403
+ Second Edition (2006).
404
+ """
405
+
406
+ def __init__(self, min_denominator=1e-8, init_scale='auto'):
407
+ self.min_denominator = min_denominator
408
+ super().__init__(init_scale)
409
+
410
+ def _update_implementation(self, delta_x, delta_grad):
411
+ # Auxiliary variables w and z
412
+ if self.approx_type == 'hess':
413
+ w = delta_x
414
+ z = delta_grad
415
+ else:
416
+ w = delta_grad
417
+ z = delta_x
418
+ # Do some common operations
419
+ Mw = self.dot(w)
420
+ z_minus_Mw = z - Mw
421
+ denominator = np.dot(w, z_minus_Mw)
422
+ # If the denominator is too small
423
+ # we just skip the update.
424
+ if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
425
+ return
426
+ # Update matrix
427
+ if self.approx_type == 'hess':
428
+ self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
429
+ else:
430
+ self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
venv/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (36.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ cdef extern from "HighsInfo.h" nogil:
4
+ # From HiGHS/src/lp_data/HighsInfo.h
5
+ cdef cppclass HighsInfo:
6
+ # Inherited from HighsInfoStruct:
7
+ int mip_node_count
8
+ int simplex_iteration_count
9
+ int ipm_iteration_count
10
+ int crossover_iteration_count
11
+ int primal_solution_status
12
+ int dual_solution_status
13
+ int basis_validity
14
+ double objective_function_value
15
+ double mip_dual_bound
16
+ double mip_gap
17
+ int num_primal_infeasibilities
18
+ double max_primal_infeasibility
19
+ double sum_primal_infeasibilities
20
+ int num_dual_infeasibilities
21
+ double max_dual_infeasibility
22
+ double sum_dual_infeasibilities
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+ from libcpp.vector cimport vector
8
+
9
+ from .HConst cimport HighsOptionType
10
+
11
+ cdef extern from "HighsOptions.h" nogil:
12
+
13
+ cdef cppclass OptionRecord:
14
+ HighsOptionType type
15
+ string name
16
+ string description
17
+ bool advanced
18
+
19
+ cdef cppclass OptionRecordBool(OptionRecord):
20
+ bool* value
21
+ bool default_value
22
+
23
+ cdef cppclass OptionRecordInt(OptionRecord):
24
+ int* value
25
+ int lower_bound
26
+ int default_value
27
+ int upper_bound
28
+
29
+ cdef cppclass OptionRecordDouble(OptionRecord):
30
+ double* value
31
+ double lower_bound
32
+ double default_value
33
+ double upper_bound
34
+
35
+ cdef cppclass OptionRecordString(OptionRecord):
36
+ string* value
37
+ string default_value
38
+
39
+ cdef cppclass HighsOptions:
40
+ # From HighsOptionsStruct:
41
+
42
+ # Options read from the command line
43
+ string model_file
44
+ string presolve
45
+ string solver
46
+ string parallel
47
+ double time_limit
48
+ string options_file
49
+
50
+ # Options read from the file
51
+ double infinite_cost
52
+ double infinite_bound
53
+ double small_matrix_value
54
+ double large_matrix_value
55
+ double primal_feasibility_tolerance
56
+ double dual_feasibility_tolerance
57
+ double ipm_optimality_tolerance
58
+ double dual_objective_value_upper_bound
59
+ int highs_debug_level
60
+ int simplex_strategy
61
+ int simplex_scale_strategy
62
+ int simplex_crash_strategy
63
+ int simplex_dual_edge_weight_strategy
64
+ int simplex_primal_edge_weight_strategy
65
+ int simplex_iteration_limit
66
+ int simplex_update_limit
67
+ int ipm_iteration_limit
68
+ int highs_min_threads
69
+ int highs_max_threads
70
+ int message_level
71
+ string solution_file
72
+ bool write_solution_to_file
73
+ bool write_solution_pretty
74
+
75
+ # Advanced options
76
+ bool run_crossover
77
+ bool mps_parser_type_free
78
+ int keep_n_rows
79
+ int allowed_simplex_matrix_scale_factor
80
+ int allowed_simplex_cost_scale_factor
81
+ int simplex_dualise_strategy
82
+ int simplex_permute_strategy
83
+ int dual_simplex_cleanup_strategy
84
+ int simplex_price_strategy
85
+ int dual_chuzc_sort_strategy
86
+ bool simplex_initial_condition_check
87
+ double simplex_initial_condition_tolerance
88
+ double dual_steepest_edge_weight_log_error_threshhold
89
+ double dual_simplex_cost_perturbation_multiplier
90
+ double start_crossover_tolerance
91
+ bool less_infeasible_DSE_check
92
+ bool less_infeasible_DSE_choose_row
93
+ bool use_original_HFactor_logic
94
+
95
+ # Options for MIP solver
96
+ int mip_max_nodes
97
+ int mip_report_level
98
+
99
+ # Switch for MIP solver
100
+ bool mip
101
+
102
+ # Options for HighsPrintMessage and HighsLogMessage
103
+ FILE* logfile
104
+ FILE* output
105
+ int message_level
106
+ string solution_file
107
+ bool write_solution_to_file
108
+ bool write_solution_pretty
109
+
110
+ vector[OptionRecord*] records
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsRuntimeOptions.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsRuntimeOptions.h
9
+ bool loadOptions(int argc, char** argv, HighsOptions& options)
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp.string cimport string
4
+
5
+ cdef extern from "HighsStatus.h" nogil:
6
+ ctypedef enum HighsStatus:
7
+ HighsStatusError "HighsStatus::kError" = -1
8
+ HighsStatusOK "HighsStatus::kOk" = 0
9
+ HighsStatusWarning "HighsStatus::kWarning" = 1
10
+
11
+
12
+ string highsStatusToString(HighsStatus status)
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ cdef extern from "SimplexConst.h" nogil:
6
+
7
+ cdef enum SimplexAlgorithm:
8
+ PRIMAL "SimplexAlgorithm::kPrimal" = 0
9
+ DUAL "SimplexAlgorithm::kDual"
10
+
11
+ cdef enum SimplexStrategy:
12
+ SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0
13
+ SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN
14
+ SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual"
15
+ SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL
16
+ SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks"
17
+ SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti"
18
+ SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal"
19
+ SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL
20
+ SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum"
21
+
22
+ cdef enum SimplexCrashStrategy:
23
+ SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0
24
+ SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN
25
+ SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK"
26
+ SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K
27
+ SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby"
28
+ SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri"
29
+ SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK"
30
+ SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri"
31
+ SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf"
32
+ SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts"
33
+ SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic"
34
+ SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing"
35
+ SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING
36
+
37
+ cdef enum SimplexEdgeWeightStrategy:
38
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1
39
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN
40
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig"
41
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex"
42
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge"
43
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial"
44
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL
45
+
46
+ cdef enum SimplexPriceStrategy:
47
+ SIMPLEX_PRICE_STRATEGY_MIN = 0
48
+ SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN
49
+ SIMPLEX_PRICE_STRATEGY_ROW
50
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH
51
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
52
+ SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
53
+
54
+ cdef enum SimplexDualChuzcStrategy:
55
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0
56
+ SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN
57
+ SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD
58
+ SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP
59
+ SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
60
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
61
+
62
+ cdef enum InvertHint:
63
+ INVERT_HINT_NO = 0
64
+ INVERT_HINT_UPDATE_LIMIT_REACHED
65
+ INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT
66
+ INVERT_HINT_POSSIBLY_OPTIMAL
67
+ INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED
68
+ INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED
69
+ INVERT_HINT_POSSIBLY_SINGULAR_BASIS
70
+ INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX
71
+ INVERT_HINT_CHOOSE_COLUMN_FAIL
72
+ INVERT_HINT_Count
73
+
74
+ cdef enum DualEdgeWeightMode:
75
+ DANTZIG "DualEdgeWeightMode::DANTZIG" = 0
76
+ DEVEX "DualEdgeWeightMode::DEVEX"
77
+ STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE"
78
+ Count "DualEdgeWeightMode::Count"
79
+
80
+ cdef enum PriceMode:
81
+ ROW "PriceMode::ROW" = 0
82
+ COL "PriceMode::COL"
83
+
84
+ const int PARALLEL_THREADS_DEFAULT
85
+ const int DUAL_TASKS_MIN_THREADS
86
+ const int DUAL_MULTI_MIN_THREADS
87
+
88
+ const bool invert_if_row_out_negative
89
+
90
+ const int NONBASIC_FLAG_TRUE
91
+ const int NONBASIC_FLAG_FALSE
92
+
93
+ const int NONBASIC_MOVE_UP
94
+ const int NONBASIC_MOVE_DN
95
+ const int NONBASIC_MOVE_ZE
venv/lib/python3.10/site-packages/scipy/optimize/_isotonic.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING
3
+
4
+ import numpy as np
5
+
6
+ from ._optimize import OptimizeResult
7
+ from ._pava_pybind import pava
8
+
9
+ if TYPE_CHECKING:
10
+ import numpy.typing as npt
11
+
12
+
13
+ __all__ = ["isotonic_regression"]
14
+
15
+
16
+ def isotonic_regression(
17
+ y: npt.ArrayLike,
18
+ *,
19
+ weights: npt.ArrayLike | None = None,
20
+ increasing: bool = True,
21
+ ) -> OptimizeResult:
22
+ r"""Nonparametric isotonic regression.
23
+
24
+ A (not strictly) monotonically increasing array `x` with the same length
25
+ as `y` is calculated by the pool adjacent violators algorithm (PAVA), see
26
+ [1]_. See the Notes section for more details.
27
+
28
+ Parameters
29
+ ----------
30
+ y : (N,) array_like
31
+ Response variable.
32
+ weights : (N,) array_like or None
33
+ Case weights.
34
+ increasing : bool
35
+ If True, fit monotonic increasing, i.e. isotonic, regression.
36
+ If False, fit a monotonic decreasing, i.e. antitonic, regression.
37
+ Default is True.
38
+
39
+ Returns
40
+ -------
41
+ res : OptimizeResult
42
+ The optimization result represented as a ``OptimizeResult`` object.
43
+ Important attributes are:
44
+
45
+ - ``x``: The isotonic regression solution, i.e. an increasing (or
46
+ decreasing) array of the same length than y, with elements in the
47
+ range from min(y) to max(y).
48
+ - ``weights`` : Array with the sum of case weights for each block
49
+ (or pool) B.
50
+ - ``blocks``: Array of length B+1 with the indices of the start
51
+ positions of each block (or pool) B. The j-th block is given by
52
+ ``x[blocks[j]:blocks[j+1]]`` for which all values are the same.
53
+
54
+ Notes
55
+ -----
56
+ Given data :math:`y` and case weights :math:`w`, the isotonic regression
57
+ solves the following optimization problem:
58
+
59
+ .. math::
60
+
61
+ \operatorname{argmin}_{x_i} \sum_i w_i (y_i - x_i)^2 \quad
62
+ \text{subject to } x_i \leq x_j \text{ whenever } i \leq j \,.
63
+
64
+ For every input value :math:`y_i`, it generates a value :math:`x_i` such
65
+ that :math:`x` is increasing (but not strictly), i.e.
66
+ :math:`x_i \leq x_{i+1}`. This is accomplished by the PAVA.
67
+ The solution consists of pools or blocks, i.e. neighboring elements of
68
+ :math:`x`, e.g. :math:`x_i` and :math:`x_{i+1}`, that all have the same
69
+ value.
70
+
71
+ Most interestingly, the solution stays the same if the squared loss is
72
+ replaced by the wide class of Bregman functions which are the unique
73
+ class of strictly consistent scoring functions for the mean, see [2]_
74
+ and references therein.
75
+
76
+ The implemented version of PAVA according to [1]_ has a computational
77
+ complexity of O(N) with input size N.
78
+
79
+ References
80
+ ----------
81
+ .. [1] Busing, F. M. T. A. (2022).
82
+ Monotone Regression: A Simple and Fast O(n) PAVA Implementation.
83
+ Journal of Statistical Software, Code Snippets, 102(1), 1-25.
84
+ :doi:`10.18637/jss.v102.c01`
85
+ .. [2] Jordan, A.I., Mühlemann, A. & Ziegel, J.F.
86
+ Characterizing the optimal solutions to the isotonic regression
87
+ problem for identifiable functionals.
88
+ Ann Inst Stat Math 74, 489-514 (2022).
89
+ :doi:`10.1007/s10463-021-00808-0`
90
+
91
+ Examples
92
+ --------
93
+ This example demonstrates that ``isotonic_regression`` really solves a
94
+ constrained optimization problem.
95
+
96
+ >>> import numpy as np
97
+ >>> from scipy.optimize import isotonic_regression, minimize
98
+ >>> y = [1.5, 1.0, 4.0, 6.0, 5.7, 5.0, 7.8, 9.0, 7.5, 9.5, 9.0]
99
+ >>> def objective(yhat, y):
100
+ ... return np.sum((yhat - y)**2)
101
+ >>> def constraint(yhat, y):
102
+ ... # This is for a monotonically increasing regression.
103
+ ... return np.diff(yhat)
104
+ >>> result = minimize(objective, x0=y, args=(y,),
105
+ ... constraints=[{'type': 'ineq',
106
+ ... 'fun': lambda x: constraint(x, y)}])
107
+ >>> result.x
108
+ array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
109
+ 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
110
+ 9.25 ])
111
+ >>> result = isotonic_regression(y)
112
+ >>> result.x
113
+ array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
114
+ 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
115
+ 9.25 ])
116
+
117
+ The big advantage of ``isotonic_regression`` compared to calling
118
+ ``minimize`` is that it is more user friendly, i.e. one does not need to
119
+ define objective and constraint functions, and that it is orders of
120
+ magnitudes faster. On commodity hardware (in 2023), for normal distributed
121
+ input y of length 1000, the minimizer takes about 4 seconds, while
122
+ ``isotonic_regression`` takes about 200 microseconds.
123
+ """
124
+ yarr = np.asarray(y) # Check yarr.ndim == 1 is implicit (pybind11) in pava.
125
+ if weights is None:
126
+ warr = np.ones_like(yarr)
127
+ else:
128
+ warr = np.asarray(weights)
129
+
130
+ if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]):
131
+ raise ValueError(
132
+ "Input arrays y and w must have one dimension of equal length."
133
+ )
134
+ if np.any(warr <= 0):
135
+ raise ValueError("Weights w must be strictly positive.")
136
+
137
+ order = slice(None) if increasing else slice(None, None, -1)
138
+ x = np.array(yarr[order], order="C", dtype=np.float64, copy=True)
139
+ wx = np.array(warr[order], order="C", dtype=np.float64, copy=True)
140
+ n = x.shape[0]
141
+ r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp)
142
+ x, wx, r, b = pava(x, wx, r)
143
+ # Now that we know the number of blocks b, we only keep the relevant part
144
+ # of r and wx.
145
+ # As information: Due to the pava implementation, after the last block
146
+ # index, there might be smaller numbers appended to r, e.g.
147
+ # r = [0, 10, 8, 7] which in the end should be r = [0, 10].
148
+ r = r[:b + 1]
149
+ wx = wx[:b]
150
+ if not increasing:
151
+ x = x[::-1]
152
+ wx = wx[::-1]
153
+ r = r[-1] - r[::-1]
154
+ return OptimizeResult(
155
+ x=x,
156
+ weights=wx,
157
+ blocks=r,
158
+ )
venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (125 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ fmin_l_bfgs_b
8
+
9
+ """
10
+
11
+ ## License for the Python wrapper
12
+ ## ==============================
13
+
14
+ ## Copyright (c) 2004 David M. Cooke <[email protected]>
15
+
16
+ ## Permission is hereby granted, free of charge, to any person obtaining a
17
+ ## copy of this software and associated documentation files (the "Software"),
18
+ ## to deal in the Software without restriction, including without limitation
19
+ ## the rights to use, copy, modify, merge, publish, distribute, sublicense,
20
+ ## and/or sell copies of the Software, and to permit persons to whom the
21
+ ## Software is furnished to do so, subject to the following conditions:
22
+
23
+ ## The above copyright notice and this permission notice shall be included in
24
+ ## all copies or substantial portions of the Software.
25
+
26
+ ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
+ ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
+ ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29
+ ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30
+ ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31
+ ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
32
+ ## DEALINGS IN THE SOFTWARE.
33
+
34
+ ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
35
+
36
+ import numpy as np
37
+ from numpy import array, asarray, float64, zeros
38
+ from . import _lbfgsb
39
+ from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt,
40
+ _wrap_callback, _check_unknown_options,
41
+ _prepare_scalar_function)
42
+ from ._constraints import old_bound_to_new
43
+
44
+ from scipy.sparse.linalg import LinearOperator
45
+
46
+ __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
47
+
48
+
49
+ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
50
+ approx_grad=0,
51
+ bounds=None, m=10, factr=1e7, pgtol=1e-5,
52
+ epsilon=1e-8,
53
+ iprint=-1, maxfun=15000, maxiter=15000, disp=None,
54
+ callback=None, maxls=20):
55
+ """
56
+ Minimize a function func using the L-BFGS-B algorithm.
57
+
58
+ Parameters
59
+ ----------
60
+ func : callable f(x,*args)
61
+ Function to minimize.
62
+ x0 : ndarray
63
+ Initial guess.
64
+ fprime : callable fprime(x,*args), optional
65
+ The gradient of `func`. If None, then `func` returns the function
66
+ value and the gradient (``f, g = func(x, *args)``), unless
67
+ `approx_grad` is True in which case `func` returns only ``f``.
68
+ args : sequence, optional
69
+ Arguments to pass to `func` and `fprime`.
70
+ approx_grad : bool, optional
71
+ Whether to approximate the gradient numerically (in which case
72
+ `func` returns only the function value).
73
+ bounds : list, optional
74
+ ``(min, max)`` pairs for each element in ``x``, defining
75
+ the bounds on that parameter. Use None or +-inf for one of ``min`` or
76
+ ``max`` when there is no bound in that direction.
77
+ m : int, optional
78
+ The maximum number of variable metric corrections
79
+ used to define the limited memory matrix. (The limited memory BFGS
80
+ method does not store the full hessian but uses this many terms in an
81
+ approximation to it.)
82
+ factr : float, optional
83
+ The iteration stops when
84
+ ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
85
+ where ``eps`` is the machine precision, which is automatically
86
+ generated by the code. Typical values for `factr` are: 1e12 for
87
+ low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
88
+ high accuracy. See Notes for relationship to `ftol`, which is exposed
89
+ (instead of `factr`) by the `scipy.optimize.minimize` interface to
90
+ L-BFGS-B.
91
+ pgtol : float, optional
92
+ The iteration will stop when
93
+ ``max{|proj g_i | i = 1, ..., n} <= pgtol``
94
+ where ``proj g_i`` is the i-th component of the projected gradient.
95
+ epsilon : float, optional
96
+ Step size used when `approx_grad` is True, for numerically
97
+ calculating the gradient
98
+ iprint : int, optional
99
+ Controls the frequency of output. ``iprint < 0`` means no output;
100
+ ``iprint = 0`` print only one line at the last iteration;
101
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
102
+ ``iprint = 99`` print details of every iteration except n-vectors;
103
+ ``iprint = 100`` print also the changes of active set and final x;
104
+ ``iprint > 100`` print details of every iteration including x and g.
105
+ disp : int, optional
106
+ If zero, then no output. If a positive number, then this over-rides
107
+ `iprint` (i.e., `iprint` gets the value of `disp`).
108
+ maxfun : int, optional
109
+ Maximum number of function evaluations. Note that this function
110
+ may violate the limit because of evaluating gradients by numerical
111
+ differentiation.
112
+ maxiter : int, optional
113
+ Maximum number of iterations.
114
+ callback : callable, optional
115
+ Called after each iteration, as ``callback(xk)``, where ``xk`` is the
116
+ current parameter vector.
117
+ maxls : int, optional
118
+ Maximum number of line search steps (per iteration). Default is 20.
119
+
120
+ Returns
121
+ -------
122
+ x : array_like
123
+ Estimated position of the minimum.
124
+ f : float
125
+ Value of `func` at the minimum.
126
+ d : dict
127
+ Information dictionary.
128
+
129
+ * d['warnflag'] is
130
+
131
+ - 0 if converged,
132
+ - 1 if too many function evaluations or too many iterations,
133
+ - 2 if stopped for another reason, given in d['task']
134
+
135
+ * d['grad'] is the gradient at the minimum (should be 0 ish)
136
+ * d['funcalls'] is the number of function calls made.
137
+ * d['nit'] is the number of iterations.
138
+
139
+ See also
140
+ --------
141
+ minimize: Interface to minimization algorithms for multivariate
142
+ functions. See the 'L-BFGS-B' `method` in particular. Note that the
143
+ `ftol` option is made available via that interface, while `factr` is
144
+ provided via this interface, where `factr` is the factor multiplying
145
+ the default machine floating-point precision to arrive at `ftol`:
146
+ ``ftol = factr * numpy.finfo(float).eps``.
147
+
148
+ Notes
149
+ -----
150
+ License of L-BFGS-B (FORTRAN code):
151
+
152
+ The version included here (in fortran code) is 3.0
153
+ (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
154
+ and Jorge Nocedal <[email protected]>. It carries the following
155
+ condition for use:
156
+
157
+ This software is freely available, but we expect that all publications
158
+ describing work using this software, or all commercial products using it,
159
+ quote at least one of the references given below. This software is released
160
+ under the BSD License.
161
+
162
+ References
163
+ ----------
164
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
165
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
166
+ Statistical Computing, 16, 5, pp. 1190-1208.
167
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
168
+ FORTRAN routines for large scale bound constrained optimization (1997),
169
+ ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
170
+ * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
171
+ FORTRAN routines for large scale bound constrained optimization (2011),
172
+ ACM Transactions on Mathematical Software, 38, 1.
173
+
174
+ Examples
175
+ --------
176
+ Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define
177
+ an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the
178
+ observations and `y_model` the prediction of the linear model as
179
+ ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily
180
+ chosen as ``(0,5)`` and ``(5,10)`` for this example.
181
+
182
+ >>> import numpy as np
183
+ >>> from scipy.optimize import fmin_l_bfgs_b
184
+ >>> X = np.arange(0, 10, 1)
185
+ >>> M = 2
186
+ >>> B = 3
187
+ >>> Y = M * X + B
188
+ >>> def func(parameters, *args):
189
+ ... x = args[0]
190
+ ... y = args[1]
191
+ ... m, b = parameters
192
+ ... y_model = m*x + b
193
+ ... error = sum(np.power((y - y_model), 2))
194
+ ... return error
195
+
196
+ >>> initial_values = np.array([0.0, 1.0])
197
+
198
+ >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
199
+ ... approx_grad=True)
200
+ >>> x_opt, f_opt
201
+ array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary
202
+
203
+ The optimized parameters in ``x_opt`` agree with the ground truth parameters
204
+ ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds`
205
+ parameter.
206
+
207
+ >>> bounds = [(0, 5), (5, 10)]
208
+ >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
209
+ ... approx_grad=True, bounds=bounds)
210
+ >>> x_opt, f_opt
211
+ array([1.65990508, 5.31649385]), 15.721334516453945 # may vary
212
+ """
213
+ # handle fprime/approx_grad
214
+ if approx_grad:
215
+ fun = func
216
+ jac = None
217
+ elif fprime is None:
218
+ fun = MemoizeJac(func)
219
+ jac = fun.derivative
220
+ else:
221
+ fun = func
222
+ jac = fprime
223
+
224
+ # build options
225
+ callback = _wrap_callback(callback)
226
+ opts = {'disp': disp,
227
+ 'iprint': iprint,
228
+ 'maxcor': m,
229
+ 'ftol': factr * np.finfo(float).eps,
230
+ 'gtol': pgtol,
231
+ 'eps': epsilon,
232
+ 'maxfun': maxfun,
233
+ 'maxiter': maxiter,
234
+ 'callback': callback,
235
+ 'maxls': maxls}
236
+
237
+ res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
238
+ **opts)
239
+ d = {'grad': res['jac'],
240
+ 'task': res['message'],
241
+ 'funcalls': res['nfev'],
242
+ 'nit': res['nit'],
243
+ 'warnflag': res['status']}
244
+ f = res['fun']
245
+ x = res['x']
246
+
247
+ return x, f, d
248
+
249
+
250
+ def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
251
+ disp=None, maxcor=10, ftol=2.2204460492503131e-09,
252
+ gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
253
+ iprint=-1, callback=None, maxls=20,
254
+ finite_diff_rel_step=None, **unknown_options):
255
+ """
256
+ Minimize a scalar function of one or more variables using the L-BFGS-B
257
+ algorithm.
258
+
259
+ Options
260
+ -------
261
+ disp : None or int
262
+ If `disp is None` (the default), then the supplied version of `iprint`
263
+ is used. If `disp is not None`, then it overrides the supplied version
264
+ of `iprint` with the behaviour you outlined.
265
+ maxcor : int
266
+ The maximum number of variable metric corrections used to
267
+ define the limited memory matrix. (The limited memory BFGS
268
+ method does not store the full hessian but uses this many terms
269
+ in an approximation to it.)
270
+ ftol : float
271
+ The iteration stops when ``(f^k -
272
+ f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
273
+ gtol : float
274
+ The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
275
+ <= gtol`` where ``proj g_i`` is the i-th component of the
276
+ projected gradient.
277
+ eps : float or ndarray
278
+ If `jac is None` the absolute step size used for numerical
279
+ approximation of the jacobian via forward differences.
280
+ maxfun : int
281
+ Maximum number of function evaluations. Note that this function
282
+ may violate the limit because of evaluating gradients by numerical
283
+ differentiation.
284
+ maxiter : int
285
+ Maximum number of iterations.
286
+ iprint : int, optional
287
+ Controls the frequency of output. ``iprint < 0`` means no output;
288
+ ``iprint = 0`` print only one line at the last iteration;
289
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
290
+ ``iprint = 99`` print details of every iteration except n-vectors;
291
+ ``iprint = 100`` print also the changes of active set and final x;
292
+ ``iprint > 100`` print details of every iteration including x and g.
293
+ maxls : int, optional
294
+ Maximum number of line search steps (per iteration). Default is 20.
295
+ finite_diff_rel_step : None or array_like, optional
296
+ If `jac in ['2-point', '3-point', 'cs']` the relative step size to
297
+ use for numerical approximation of the jacobian. The absolute step
298
+ size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
299
+ possibly adjusted to fit into the bounds. For ``method='3-point'``
300
+ the sign of `h` is ignored. If None (default) then step is selected
301
+ automatically.
302
+
303
+ Notes
304
+ -----
305
+ The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
306
+ but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
307
+ relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
308
+ I.e., `factr` multiplies the default machine floating-point precision to
309
+ arrive at `ftol`.
310
+
311
+ """
312
+ _check_unknown_options(unknown_options)
313
+ m = maxcor
314
+ pgtol = gtol
315
+ factr = ftol / np.finfo(float).eps
316
+
317
+ x0 = asarray(x0).ravel()
318
+ n, = x0.shape
319
+
320
+ # historically old-style bounds were/are expected by lbfgsb.
321
+ # That's still the case but we'll deal with new-style from here on,
322
+ # it's easier
323
+ if bounds is None:
324
+ pass
325
+ elif len(bounds) != n:
326
+ raise ValueError('length of x0 != length of bounds')
327
+ else:
328
+ bounds = np.array(old_bound_to_new(bounds))
329
+
330
+ # check bounds
331
+ if (bounds[0] > bounds[1]).any():
332
+ raise ValueError(
333
+ "LBFGSB - one of the lower bounds is greater than an upper bound."
334
+ )
335
+
336
+ # initial vector must lie within the bounds. Otherwise ScalarFunction and
337
+ # approx_derivative will cause problems
338
+ x0 = np.clip(x0, bounds[0], bounds[1])
339
+
340
+ if disp is not None:
341
+ if disp == 0:
342
+ iprint = -1
343
+ else:
344
+ iprint = disp
345
+
346
+ # _prepare_scalar_function can use bounds=None to represent no bounds
347
+ sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
348
+ bounds=bounds,
349
+ finite_diff_rel_step=finite_diff_rel_step)
350
+
351
+ func_and_grad = sf.fun_and_grad
352
+
353
+ fortran_int = _lbfgsb.types.intvar.dtype
354
+
355
+ nbd = zeros(n, fortran_int)
356
+ low_bnd = zeros(n, float64)
357
+ upper_bnd = zeros(n, float64)
358
+ bounds_map = {(-np.inf, np.inf): 0,
359
+ (1, np.inf): 1,
360
+ (1, 1): 2,
361
+ (-np.inf, 1): 3}
362
+
363
+ if bounds is not None:
364
+ for i in range(0, n):
365
+ l, u = bounds[0, i], bounds[1, i]
366
+ if not np.isinf(l):
367
+ low_bnd[i] = l
368
+ l = 1
369
+ if not np.isinf(u):
370
+ upper_bnd[i] = u
371
+ u = 1
372
+ nbd[i] = bounds_map[l, u]
373
+
374
+ if not maxls > 0:
375
+ raise ValueError('maxls must be positive.')
376
+
377
+ x = array(x0, float64)
378
+ f = array(0.0, float64)
379
+ g = zeros((n,), float64)
380
+ wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
381
+ iwa = zeros(3*n, fortran_int)
382
+ task = zeros(1, 'S60')
383
+ csave = zeros(1, 'S60')
384
+ lsave = zeros(4, fortran_int)
385
+ isave = zeros(44, fortran_int)
386
+ dsave = zeros(29, float64)
387
+
388
+ task[:] = 'START'
389
+
390
+ n_iterations = 0
391
+
392
+ while 1:
393
+ # g may become float32 if a user provides a function that calculates
394
+ # the Jacobian in float32 (see gh-18730). The underlying Fortran code
395
+ # expects float64, so upcast it
396
+ g = g.astype(np.float64)
397
+ # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
398
+ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
399
+ pgtol, wa, iwa, task, iprint, csave, lsave,
400
+ isave, dsave, maxls)
401
+ task_str = task.tobytes()
402
+ if task_str.startswith(b'FG'):
403
+ # The minimization routine wants f and g at the current x.
404
+ # Note that interruptions due to maxfun are postponed
405
+ # until the completion of the current minimization iteration.
406
+ # Overwrite f and g:
407
+ f, g = func_and_grad(x)
408
+ elif task_str.startswith(b'NEW_X'):
409
+ # new iteration
410
+ n_iterations += 1
411
+
412
+ intermediate_result = OptimizeResult(x=x, fun=f)
413
+ if _call_callback_maybe_halt(callback, intermediate_result):
414
+ task[:] = 'STOP: CALLBACK REQUESTED HALT'
415
+ if n_iterations >= maxiter:
416
+ task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
417
+ elif sf.nfev > maxfun:
418
+ task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
419
+ 'EXCEEDS LIMIT')
420
+ else:
421
+ break
422
+
423
+ task_str = task.tobytes().strip(b'\x00').strip()
424
+ if task_str.startswith(b'CONV'):
425
+ warnflag = 0
426
+ elif sf.nfev > maxfun or n_iterations >= maxiter:
427
+ warnflag = 1
428
+ else:
429
+ warnflag = 2
430
+
431
+ # These two portions of the workspace are described in the mainlb
432
+ # subroutine in lbfgsb.f. See line 363.
433
+ s = wa[0: m*n].reshape(m, n)
434
+ y = wa[m*n: 2*m*n].reshape(m, n)
435
+
436
+ # See lbfgsb.f line 160 for this portion of the workspace.
437
+ # isave(31) = the total number of BFGS updates prior the current iteration;
438
+ n_bfgs_updates = isave[30]
439
+
440
+ n_corrs = min(n_bfgs_updates, maxcor)
441
+ hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
442
+
443
+ task_str = task_str.decode()
444
+ return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
445
+ njev=sf.ngev,
446
+ nit=n_iterations, status=warnflag, message=task_str,
447
+ x=x, success=(warnflag == 0), hess_inv=hess_inv)
448
+
449
+
450
+ class LbfgsInvHessProduct(LinearOperator):
451
+ """Linear operator for the L-BFGS approximate inverse Hessian.
452
+
453
+ This operator computes the product of a vector with the approximate inverse
454
+ of the Hessian of the objective function, using the L-BFGS limited
455
+ memory approximation to the inverse Hessian, accumulated during the
456
+ optimization.
457
+
458
+ Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
459
+ interface.
460
+
461
+ Parameters
462
+ ----------
463
+ sk : array_like, shape=(n_corr, n)
464
+ Array of `n_corr` most recent updates to the solution vector.
465
+ (See [1]).
466
+ yk : array_like, shape=(n_corr, n)
467
+ Array of `n_corr` most recent updates to the gradient. (See [1]).
468
+
469
+ References
470
+ ----------
471
+ .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
472
+ storage." Mathematics of computation 35.151 (1980): 773-782.
473
+
474
+ """
475
+
476
+ def __init__(self, sk, yk):
477
+ """Construct the operator."""
478
+ if sk.shape != yk.shape or sk.ndim != 2:
479
+ raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
480
+ n_corrs, n = sk.shape
481
+
482
+ super().__init__(dtype=np.float64, shape=(n, n))
483
+
484
+ self.sk = sk
485
+ self.yk = yk
486
+ self.n_corrs = n_corrs
487
+ self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
488
+
489
+ def _matvec(self, x):
490
+ """Efficient matrix-vector multiply with the BFGS matrices.
491
+
492
+ This calculation is described in Section (4) of [1].
493
+
494
+ Parameters
495
+ ----------
496
+ x : ndarray
497
+ An array with shape (n,) or (n,1).
498
+
499
+ Returns
500
+ -------
501
+ y : ndarray
502
+ The matrix-vector product
503
+
504
+ """
505
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
506
+ q = np.array(x, dtype=self.dtype, copy=True)
507
+ if q.ndim == 2 and q.shape[1] == 1:
508
+ q = q.reshape(-1)
509
+
510
+ alpha = np.empty(n_corrs)
511
+
512
+ for i in range(n_corrs-1, -1, -1):
513
+ alpha[i] = rho[i] * np.dot(s[i], q)
514
+ q = q - alpha[i]*y[i]
515
+
516
+ r = q
517
+ for i in range(n_corrs):
518
+ beta = rho[i] * np.dot(y[i], r)
519
+ r = r + s[i] * (alpha[i] - beta)
520
+
521
+ return r
522
+
523
+ def todense(self):
524
+ """Return a dense array representation of this operator.
525
+
526
+ Returns
527
+ -------
528
+ arr : ndarray, shape=(n, n)
529
+ An array with the same shape and containing
530
+ the same data represented by this `LinearOperator`.
531
+
532
+ """
533
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
534
+ I = np.eye(*self.shape, dtype=self.dtype)
535
+ Hk = I
536
+
537
+ for i in range(n_corrs):
538
+ A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
539
+ A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
540
+
541
+ Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
542
+ s[i][np.newaxis, :])
543
+ return Hk
venv/lib/python3.10/site-packages/scipy/optimize/_linesearch.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ line_search_armijo
8
+ line_search_wolfe1
9
+ line_search_wolfe2
10
+ scalar_search_wolfe1
11
+ scalar_search_wolfe2
12
+
13
+ """
14
+ from warnings import warn
15
+
16
+ from scipy.optimize import _minpack2 as minpack2 # noqa: F401
17
+ from ._dcsrch import DCSRCH
18
+ import numpy as np
19
+
20
+ __all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
21
+ 'scalar_search_wolfe1', 'scalar_search_wolfe2',
22
+ 'line_search_armijo']
23
+
24
+ class LineSearchWarning(RuntimeWarning):
25
+ pass
26
+
27
+
28
+ def _check_c1_c2(c1, c2):
29
+ if not (0 < c1 < c2 < 1):
30
+ raise ValueError("'c1' and 'c2' do not satisfy"
31
+ "'0 < c1 < c2 < 1'.")
32
+
33
+
34
+ #------------------------------------------------------------------------------
35
+ # Minpack's Wolfe line and scalar searches
36
+ #------------------------------------------------------------------------------
37
+
38
+ def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
39
+ old_fval=None, old_old_fval=None,
40
+ args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
41
+ xtol=1e-14):
42
+ """
43
+ As `scalar_search_wolfe1` but do a line search to direction `pk`
44
+
45
+ Parameters
46
+ ----------
47
+ f : callable
48
+ Function `f(x)`
49
+ fprime : callable
50
+ Gradient of `f`
51
+ xk : array_like
52
+ Current point
53
+ pk : array_like
54
+ Search direction
55
+ gfk : array_like, optional
56
+ Gradient of `f` at point `xk`
57
+ old_fval : float, optional
58
+ Value of `f` at point `xk`
59
+ old_old_fval : float, optional
60
+ Value of `f` at point preceding `xk`
61
+
62
+ The rest of the parameters are the same as for `scalar_search_wolfe1`.
63
+
64
+ Returns
65
+ -------
66
+ stp, f_count, g_count, fval, old_fval
67
+ As in `line_search_wolfe1`
68
+ gval : array
69
+ Gradient of `f` at the final point
70
+
71
+ Notes
72
+ -----
73
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``.
74
+
75
+ """
76
+ if gfk is None:
77
+ gfk = fprime(xk, *args)
78
+
79
+ gval = [gfk]
80
+ gc = [0]
81
+ fc = [0]
82
+
83
+ def phi(s):
84
+ fc[0] += 1
85
+ return f(xk + s*pk, *args)
86
+
87
+ def derphi(s):
88
+ gval[0] = fprime(xk + s*pk, *args)
89
+ gc[0] += 1
90
+ return np.dot(gval[0], pk)
91
+
92
+ derphi0 = np.dot(gfk, pk)
93
+
94
+ stp, fval, old_fval = scalar_search_wolfe1(
95
+ phi, derphi, old_fval, old_old_fval, derphi0,
96
+ c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
97
+
98
+ return stp, fc[0], gc[0], fval, old_fval, gval[0]
99
+
100
+
101
+ def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
102
+ c1=1e-4, c2=0.9,
103
+ amax=50, amin=1e-8, xtol=1e-14):
104
+ """
105
+ Scalar function search for alpha that satisfies strong Wolfe conditions
106
+
107
+ alpha > 0 is assumed to be a descent direction.
108
+
109
+ Parameters
110
+ ----------
111
+ phi : callable phi(alpha)
112
+ Function at point `alpha`
113
+ derphi : callable phi'(alpha)
114
+ Objective function derivative. Returns a scalar.
115
+ phi0 : float, optional
116
+ Value of phi at 0
117
+ old_phi0 : float, optional
118
+ Value of phi at previous point
119
+ derphi0 : float, optional
120
+ Value derphi at 0
121
+ c1 : float, optional
122
+ Parameter for Armijo condition rule.
123
+ c2 : float, optional
124
+ Parameter for curvature condition rule.
125
+ amax, amin : float, optional
126
+ Maximum and minimum step size
127
+ xtol : float, optional
128
+ Relative tolerance for an acceptable step.
129
+
130
+ Returns
131
+ -------
132
+ alpha : float
133
+ Step size, or None if no suitable step was found
134
+ phi : float
135
+ Value of `phi` at the new point `alpha`
136
+ phi0 : float
137
+ Value of `phi` at `alpha=0`
138
+
139
+ Notes
140
+ -----
141
+ Uses routine DCSRCH from MINPACK.
142
+
143
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_.
144
+
145
+ References
146
+ ----------
147
+
148
+ .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization.
149
+ In Springer Series in Operations Research and Financial Engineering.
150
+ (Springer Series in Operations Research and Financial Engineering).
151
+ Springer Nature.
152
+
153
+ """
154
+ _check_c1_c2(c1, c2)
155
+
156
+ if phi0 is None:
157
+ phi0 = phi(0.)
158
+ if derphi0 is None:
159
+ derphi0 = derphi(0.)
160
+
161
+ if old_phi0 is not None and derphi0 != 0:
162
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
163
+ if alpha1 < 0:
164
+ alpha1 = 1.0
165
+ else:
166
+ alpha1 = 1.0
167
+
168
+ maxiter = 100
169
+
170
+ dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
171
+ stp, phi1, phi0, task = dcsrch(
172
+ alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter
173
+ )
174
+
175
+ return stp, phi1, phi0
176
+
177
+
178
+ line_search = line_search_wolfe1
179
+
180
+
181
+ #------------------------------------------------------------------------------
182
+ # Pure-Python Wolfe line and scalar searches
183
+ #------------------------------------------------------------------------------
184
+
185
+ # Note: `line_search_wolfe2` is the public `scipy.optimize.line_search`
186
+
187
+ def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
188
+ old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
189
+ extra_condition=None, maxiter=10):
190
+ """Find alpha that satisfies strong Wolfe conditions.
191
+
192
+ Parameters
193
+ ----------
194
+ f : callable f(x,*args)
195
+ Objective function.
196
+ myfprime : callable f'(x,*args)
197
+ Objective function gradient.
198
+ xk : ndarray
199
+ Starting point.
200
+ pk : ndarray
201
+ Search direction. The search direction must be a descent direction
202
+ for the algorithm to converge.
203
+ gfk : ndarray, optional
204
+ Gradient value for x=xk (xk being the current parameter
205
+ estimate). Will be recomputed if omitted.
206
+ old_fval : float, optional
207
+ Function value for x=xk. Will be recomputed if omitted.
208
+ old_old_fval : float, optional
209
+ Function value for the point preceding x=xk.
210
+ args : tuple, optional
211
+ Additional arguments passed to objective function.
212
+ c1 : float, optional
213
+ Parameter for Armijo condition rule.
214
+ c2 : float, optional
215
+ Parameter for curvature condition rule.
216
+ amax : float, optional
217
+ Maximum step size
218
+ extra_condition : callable, optional
219
+ A callable of the form ``extra_condition(alpha, x, f, g)``
220
+ returning a boolean. Arguments are the proposed step ``alpha``
221
+ and the corresponding ``x``, ``f`` and ``g`` values. The line search
222
+ accepts the value of ``alpha`` only if this
223
+ callable returns ``True``. If the callable returns ``False``
224
+ for the step length, the algorithm will continue with
225
+ new iterates. The callable is only called for iterates
226
+ satisfying the strong Wolfe conditions.
227
+ maxiter : int, optional
228
+ Maximum number of iterations to perform.
229
+
230
+ Returns
231
+ -------
232
+ alpha : float or None
233
+ Alpha for which ``x_new = x0 + alpha * pk``,
234
+ or None if the line search algorithm did not converge.
235
+ fc : int
236
+ Number of function evaluations made.
237
+ gc : int
238
+ Number of gradient evaluations made.
239
+ new_fval : float or None
240
+ New function value ``f(x_new)=f(x0+alpha*pk)``,
241
+ or None if the line search algorithm did not converge.
242
+ old_fval : float
243
+ Old function value ``f(x0)``.
244
+ new_slope : float or None
245
+ The local slope along the search direction at the
246
+ new value ``<myfprime(x_new), pk>``,
247
+ or None if the line search algorithm did not converge.
248
+
249
+
250
+ Notes
251
+ -----
252
+ Uses the line search algorithm to enforce strong Wolfe
253
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
254
+ 1999, pp. 59-61.
255
+
256
+ The search direction `pk` must be a descent direction (e.g.
257
+ ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe
258
+ conditions. If the search direction is not a descent direction (e.g.
259
+ ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None.
260
+
261
+ Examples
262
+ --------
263
+ >>> import numpy as np
264
+ >>> from scipy.optimize import line_search
265
+
266
+ A objective function and its gradient are defined.
267
+
268
+ >>> def obj_func(x):
269
+ ... return (x[0])**2+(x[1])**2
270
+ >>> def obj_grad(x):
271
+ ... return [2*x[0], 2*x[1]]
272
+
273
+ We can find alpha that satisfies strong Wolfe conditions.
274
+
275
+ >>> start_point = np.array([1.8, 1.7])
276
+ >>> search_gradient = np.array([-1.0, -1.0])
277
+ >>> line_search(obj_func, obj_grad, start_point, search_gradient)
278
+ (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
279
+
280
+ """
281
+ fc = [0]
282
+ gc = [0]
283
+ gval = [None]
284
+ gval_alpha = [None]
285
+
286
+ def phi(alpha):
287
+ fc[0] += 1
288
+ return f(xk + alpha * pk, *args)
289
+
290
+ fprime = myfprime
291
+
292
+ def derphi(alpha):
293
+ gc[0] += 1
294
+ gval[0] = fprime(xk + alpha * pk, *args) # store for later use
295
+ gval_alpha[0] = alpha
296
+ return np.dot(gval[0], pk)
297
+
298
+ if gfk is None:
299
+ gfk = fprime(xk, *args)
300
+ derphi0 = np.dot(gfk, pk)
301
+
302
+ if extra_condition is not None:
303
+ # Add the current gradient as argument, to avoid needless
304
+ # re-evaluation
305
+ def extra_condition2(alpha, phi):
306
+ if gval_alpha[0] != alpha:
307
+ derphi(alpha)
308
+ x = xk + alpha * pk
309
+ return extra_condition(alpha, x, phi, gval[0])
310
+ else:
311
+ extra_condition2 = None
312
+
313
+ alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
314
+ phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
315
+ extra_condition2, maxiter=maxiter)
316
+
317
+ if derphi_star is None:
318
+ warn('The line search algorithm did not converge',
319
+ LineSearchWarning, stacklevel=2)
320
+ else:
321
+ # derphi_star is a number (derphi) -- so use the most recently
322
+ # calculated gradient used in computing it derphi = gfk*pk
323
+ # this is the gradient at the next step no need to compute it
324
+ # again in the outer loop.
325
+ derphi_star = gval[0]
326
+
327
+ return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
328
+
329
+
330
+ def scalar_search_wolfe2(phi, derphi, phi0=None,
331
+ old_phi0=None, derphi0=None,
332
+ c1=1e-4, c2=0.9, amax=None,
333
+ extra_condition=None, maxiter=10):
334
+ """Find alpha that satisfies strong Wolfe conditions.
335
+
336
+ alpha > 0 is assumed to be a descent direction.
337
+
338
+ Parameters
339
+ ----------
340
+ phi : callable phi(alpha)
341
+ Objective scalar function.
342
+ derphi : callable phi'(alpha)
343
+ Objective function derivative. Returns a scalar.
344
+ phi0 : float, optional
345
+ Value of phi at 0.
346
+ old_phi0 : float, optional
347
+ Value of phi at previous point.
348
+ derphi0 : float, optional
349
+ Value of derphi at 0
350
+ c1 : float, optional
351
+ Parameter for Armijo condition rule.
352
+ c2 : float, optional
353
+ Parameter for curvature condition rule.
354
+ amax : float, optional
355
+ Maximum step size.
356
+ extra_condition : callable, optional
357
+ A callable of the form ``extra_condition(alpha, phi_value)``
358
+ returning a boolean. The line search accepts the value
359
+ of ``alpha`` only if this callable returns ``True``.
360
+ If the callable returns ``False`` for the step length,
361
+ the algorithm will continue with new iterates.
362
+ The callable is only called for iterates satisfying
363
+ the strong Wolfe conditions.
364
+ maxiter : int, optional
365
+ Maximum number of iterations to perform.
366
+
367
+ Returns
368
+ -------
369
+ alpha_star : float or None
370
+ Best alpha, or None if the line search algorithm did not converge.
371
+ phi_star : float
372
+ phi at alpha_star.
373
+ phi0 : float
374
+ phi at 0.
375
+ derphi_star : float or None
376
+ derphi at alpha_star, or None if the line search algorithm
377
+ did not converge.
378
+
379
+ Notes
380
+ -----
381
+ Uses the line search algorithm to enforce strong Wolfe
382
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
383
+ 1999, pp. 59-61.
384
+
385
+ """
386
+ _check_c1_c2(c1, c2)
387
+
388
+ if phi0 is None:
389
+ phi0 = phi(0.)
390
+
391
+ if derphi0 is None:
392
+ derphi0 = derphi(0.)
393
+
394
+ alpha0 = 0
395
+ if old_phi0 is not None and derphi0 != 0:
396
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
397
+ else:
398
+ alpha1 = 1.0
399
+
400
+ if alpha1 < 0:
401
+ alpha1 = 1.0
402
+
403
+ if amax is not None:
404
+ alpha1 = min(alpha1, amax)
405
+
406
+ phi_a1 = phi(alpha1)
407
+ #derphi_a1 = derphi(alpha1) evaluated below
408
+
409
+ phi_a0 = phi0
410
+ derphi_a0 = derphi0
411
+
412
+ if extra_condition is None:
413
+ def extra_condition(alpha, phi):
414
+ return True
415
+
416
+ for i in range(maxiter):
417
+ if alpha1 == 0 or (amax is not None and alpha0 > amax):
418
+ # alpha1 == 0: This shouldn't happen. Perhaps the increment has
419
+ # slipped below machine precision?
420
+ alpha_star = None
421
+ phi_star = phi0
422
+ phi0 = old_phi0
423
+ derphi_star = None
424
+
425
+ if alpha1 == 0:
426
+ msg = 'Rounding errors prevent the line search from converging'
427
+ else:
428
+ msg = "The line search algorithm could not find a solution " + \
429
+ "less than or equal to amax: %s" % amax
430
+
431
+ warn(msg, LineSearchWarning, stacklevel=2)
432
+ break
433
+
434
+ not_first_iteration = i > 0
435
+ if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
436
+ ((phi_a1 >= phi_a0) and not_first_iteration):
437
+ alpha_star, phi_star, derphi_star = \
438
+ _zoom(alpha0, alpha1, phi_a0,
439
+ phi_a1, derphi_a0, phi, derphi,
440
+ phi0, derphi0, c1, c2, extra_condition)
441
+ break
442
+
443
+ derphi_a1 = derphi(alpha1)
444
+ if (abs(derphi_a1) <= -c2*derphi0):
445
+ if extra_condition(alpha1, phi_a1):
446
+ alpha_star = alpha1
447
+ phi_star = phi_a1
448
+ derphi_star = derphi_a1
449
+ break
450
+
451
+ if (derphi_a1 >= 0):
452
+ alpha_star, phi_star, derphi_star = \
453
+ _zoom(alpha1, alpha0, phi_a1,
454
+ phi_a0, derphi_a1, phi, derphi,
455
+ phi0, derphi0, c1, c2, extra_condition)
456
+ break
457
+
458
+ alpha2 = 2 * alpha1 # increase by factor of two on each iteration
459
+ if amax is not None:
460
+ alpha2 = min(alpha2, amax)
461
+ alpha0 = alpha1
462
+ alpha1 = alpha2
463
+ phi_a0 = phi_a1
464
+ phi_a1 = phi(alpha1)
465
+ derphi_a0 = derphi_a1
466
+
467
+ else:
468
+ # stopping test maxiter reached
469
+ alpha_star = alpha1
470
+ phi_star = phi_a1
471
+ derphi_star = None
472
+ warn('The line search algorithm did not converge',
473
+ LineSearchWarning, stacklevel=2)
474
+
475
+ return alpha_star, phi_star, phi0, derphi_star
476
+
477
+
478
+ def _cubicmin(a, fa, fpa, b, fb, c, fc):
479
+ """
480
+ Finds the minimizer for a cubic polynomial that goes through the
481
+ points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
482
+
483
+ If no minimizer can be found, return None.
484
+
485
+ """
486
+ # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
487
+
488
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
489
+ try:
490
+ C = fpa
491
+ db = b - a
492
+ dc = c - a
493
+ denom = (db * dc) ** 2 * (db - dc)
494
+ d1 = np.empty((2, 2))
495
+ d1[0, 0] = dc ** 2
496
+ d1[0, 1] = -db ** 2
497
+ d1[1, 0] = -dc ** 3
498
+ d1[1, 1] = db ** 3
499
+ [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
500
+ fc - fa - C * dc]).flatten())
501
+ A /= denom
502
+ B /= denom
503
+ radical = B * B - 3 * A * C
504
+ xmin = a + (-B + np.sqrt(radical)) / (3 * A)
505
+ except ArithmeticError:
506
+ return None
507
+ if not np.isfinite(xmin):
508
+ return None
509
+ return xmin
510
+
511
+
512
+ def _quadmin(a, fa, fpa, b, fb):
513
+ """
514
+ Finds the minimizer for a quadratic polynomial that goes through
515
+ the points (a,fa), (b,fb) with derivative at a of fpa.
516
+
517
+ """
518
+ # f(x) = B*(x-a)^2 + C*(x-a) + D
519
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
520
+ try:
521
+ D = fa
522
+ C = fpa
523
+ db = b - a * 1.0
524
+ B = (fb - D - C * db) / (db * db)
525
+ xmin = a - C / (2.0 * B)
526
+ except ArithmeticError:
527
+ return None
528
+ if not np.isfinite(xmin):
529
+ return None
530
+ return xmin
531
+
532
+
533
+ def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
534
+ phi, derphi, phi0, derphi0, c1, c2, extra_condition):
535
+ """Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
536
+
537
+ Part of the optimization algorithm in `scalar_search_wolfe2`.
538
+
539
+ Notes
540
+ -----
541
+ Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
542
+ 'Numerical Optimization', 1999, pp. 61.
543
+
544
+ """
545
+
546
+ maxiter = 10
547
+ i = 0
548
+ delta1 = 0.2 # cubic interpolant check
549
+ delta2 = 0.1 # quadratic interpolant check
550
+ phi_rec = phi0
551
+ a_rec = 0
552
+ while True:
553
+ # interpolate to find a trial step length between a_lo and
554
+ # a_hi Need to choose interpolation here. Use cubic
555
+ # interpolation and then if the result is within delta *
556
+ # dalpha or outside of the interval bounded by a_lo or a_hi
557
+ # then use quadratic interpolation, if the result is still too
558
+ # close, then use bisection
559
+
560
+ dalpha = a_hi - a_lo
561
+ if dalpha < 0:
562
+ a, b = a_hi, a_lo
563
+ else:
564
+ a, b = a_lo, a_hi
565
+
566
+ # minimizer of cubic interpolant
567
+ # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
568
+ #
569
+ # if the result is too close to the end points (or out of the
570
+ # interval), then use quadratic interpolation with phi_lo,
571
+ # derphi_lo and phi_hi if the result is still too close to the
572
+ # end points (or out of the interval) then use bisection
573
+
574
+ if (i > 0):
575
+ cchk = delta1 * dalpha
576
+ a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
577
+ a_rec, phi_rec)
578
+ if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
579
+ qchk = delta2 * dalpha
580
+ a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
581
+ if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
582
+ a_j = a_lo + 0.5*dalpha
583
+
584
+ # Check new value of a_j
585
+
586
+ phi_aj = phi(a_j)
587
+ if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
588
+ phi_rec = phi_hi
589
+ a_rec = a_hi
590
+ a_hi = a_j
591
+ phi_hi = phi_aj
592
+ else:
593
+ derphi_aj = derphi(a_j)
594
+ if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
595
+ a_star = a_j
596
+ val_star = phi_aj
597
+ valprime_star = derphi_aj
598
+ break
599
+ if derphi_aj*(a_hi - a_lo) >= 0:
600
+ phi_rec = phi_hi
601
+ a_rec = a_hi
602
+ a_hi = a_lo
603
+ phi_hi = phi_lo
604
+ else:
605
+ phi_rec = phi_lo
606
+ a_rec = a_lo
607
+ a_lo = a_j
608
+ phi_lo = phi_aj
609
+ derphi_lo = derphi_aj
610
+ i += 1
611
+ if (i > maxiter):
612
+ # Failed to find a conforming step size
613
+ a_star = None
614
+ val_star = None
615
+ valprime_star = None
616
+ break
617
+ return a_star, val_star, valprime_star
618
+
619
+
620
+ #------------------------------------------------------------------------------
621
+ # Armijo line and scalar searches
622
+ #------------------------------------------------------------------------------
623
+
624
+ def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
625
+ """Minimize over alpha, the function ``f(xk+alpha pk)``.
626
+
627
+ Parameters
628
+ ----------
629
+ f : callable
630
+ Function to be minimized.
631
+ xk : array_like
632
+ Current point.
633
+ pk : array_like
634
+ Search direction.
635
+ gfk : array_like
636
+ Gradient of `f` at point `xk`.
637
+ old_fval : float
638
+ Value of `f` at point `xk`.
639
+ args : tuple, optional
640
+ Optional arguments.
641
+ c1 : float, optional
642
+ Value to control stopping criterion.
643
+ alpha0 : scalar, optional
644
+ Value of `alpha` at start of the optimization.
645
+
646
+ Returns
647
+ -------
648
+ alpha
649
+ f_count
650
+ f_val_at_alpha
651
+
652
+ Notes
653
+ -----
654
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
655
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
656
+
657
+ """
658
+ xk = np.atleast_1d(xk)
659
+ fc = [0]
660
+
661
+ def phi(alpha1):
662
+ fc[0] += 1
663
+ return f(xk + alpha1*pk, *args)
664
+
665
+ if old_fval is None:
666
+ phi0 = phi(0.)
667
+ else:
668
+ phi0 = old_fval # compute f(xk) -- done in past loop
669
+
670
+ derphi0 = np.dot(gfk, pk)
671
+ alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
672
+ alpha0=alpha0)
673
+ return alpha, fc[0], phi1
674
+
675
+
676
+ def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
677
+ """
678
+ Compatibility wrapper for `line_search_armijo`
679
+ """
680
+ r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
681
+ alpha0=alpha0)
682
+ return r[0], r[1], 0, r[2]
683
+
684
+
685
+ def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
686
+ """Minimize over alpha, the function ``phi(alpha)``.
687
+
688
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
689
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
690
+
691
+ alpha > 0 is assumed to be a descent direction.
692
+
693
+ Returns
694
+ -------
695
+ alpha
696
+ phi1
697
+
698
+ """
699
+ phi_a0 = phi(alpha0)
700
+ if phi_a0 <= phi0 + c1*alpha0*derphi0:
701
+ return alpha0, phi_a0
702
+
703
+ # Otherwise, compute the minimizer of a quadratic interpolant:
704
+
705
+ alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
706
+ phi_a1 = phi(alpha1)
707
+
708
+ if (phi_a1 <= phi0 + c1*alpha1*derphi0):
709
+ return alpha1, phi_a1
710
+
711
+ # Otherwise, loop with cubic interpolation until we find an alpha which
712
+ # satisfies the first Wolfe condition (since we are backtracking, we will
713
+ # assume that the value of alpha is not too small and satisfies the second
714
+ # condition.
715
+
716
+ while alpha1 > amin: # we are assuming alpha>0 is a descent direction
717
+ factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
718
+ a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
719
+ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
720
+ a = a / factor
721
+ b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
722
+ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
723
+ b = b / factor
724
+
725
+ alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
726
+ phi_a2 = phi(alpha2)
727
+
728
+ if (phi_a2 <= phi0 + c1*alpha2*derphi0):
729
+ return alpha2, phi_a2
730
+
731
+ if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
732
+ alpha2 = alpha1 / 2.0
733
+
734
+ alpha0 = alpha1
735
+ alpha1 = alpha2
736
+ phi_a0 = phi_a1
737
+ phi_a1 = phi_a2
738
+
739
+ # Failed to find a suitable step length
740
+ return None, phi_a1
741
+
742
+
743
+ #------------------------------------------------------------------------------
744
+ # Non-monotone line search for DF-SANE
745
+ #------------------------------------------------------------------------------
746
+
747
+ def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
748
+ gamma=1e-4, tau_min=0.1, tau_max=0.5):
749
+ """
750
+ Nonmonotone backtracking line search as described in [1]_
751
+
752
+ Parameters
753
+ ----------
754
+ f : callable
755
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
756
+ of a merit function and ``F`` the residual.
757
+ x_k : ndarray
758
+ Initial position.
759
+ d : ndarray
760
+ Search direction.
761
+ prev_fs : float
762
+ List of previous merit function values. Should have ``len(prev_fs) <= M``
763
+ where ``M`` is the nonmonotonicity window parameter.
764
+ eta : float
765
+ Allowed merit function increase, see [1]_
766
+ gamma, tau_min, tau_max : float, optional
767
+ Search parameters, see [1]_
768
+
769
+ Returns
770
+ -------
771
+ alpha : float
772
+ Step length
773
+ xp : ndarray
774
+ Next position
775
+ fp : float
776
+ Merit function value at next position
777
+ Fp : ndarray
778
+ Residual at next position
779
+
780
+ References
781
+ ----------
782
+ [1] "Spectral residual method without gradient information for solving
783
+ large-scale nonlinear systems of equations." W. La Cruz,
784
+ J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
785
+
786
+ """
787
+ f_k = prev_fs[-1]
788
+ f_bar = max(prev_fs)
789
+
790
+ alpha_p = 1
791
+ alpha_m = 1
792
+ alpha = 1
793
+
794
+ while True:
795
+ xp = x_k + alpha_p * d
796
+ fp, Fp = f(xp)
797
+
798
+ if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
799
+ alpha = alpha_p
800
+ break
801
+
802
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
803
+
804
+ xp = x_k - alpha_m * d
805
+ fp, Fp = f(xp)
806
+
807
+ if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
808
+ alpha = -alpha_m
809
+ break
810
+
811
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
812
+
813
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
814
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
815
+
816
+ return alpha, xp, fp, Fp
817
+
818
+
819
+ def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
820
+ gamma=1e-4, tau_min=0.1, tau_max=0.5,
821
+ nu=0.85):
822
+ """
823
+ Nonmonotone line search from [1]
824
+
825
+ Parameters
826
+ ----------
827
+ f : callable
828
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
829
+ of a merit function and ``F`` the residual.
830
+ x_k : ndarray
831
+ Initial position.
832
+ d : ndarray
833
+ Search direction.
834
+ f_k : float
835
+ Initial merit function value.
836
+ C, Q : float
837
+ Control parameters. On the first iteration, give values
838
+ Q=1.0, C=f_k
839
+ eta : float
840
+ Allowed merit function increase, see [1]_
841
+ nu, gamma, tau_min, tau_max : float, optional
842
+ Search parameters, see [1]_
843
+
844
+ Returns
845
+ -------
846
+ alpha : float
847
+ Step length
848
+ xp : ndarray
849
+ Next position
850
+ fp : float
851
+ Merit function value at next position
852
+ Fp : ndarray
853
+ Residual at next position
854
+ C : float
855
+ New value for the control parameter C
856
+ Q : float
857
+ New value for the control parameter Q
858
+
859
+ References
860
+ ----------
861
+ .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
862
+ search and its application to the spectral residual
863
+ method'', IMA J. Numer. Anal. 29, 814 (2009).
864
+
865
+ """
866
+ alpha_p = 1
867
+ alpha_m = 1
868
+ alpha = 1
869
+
870
+ while True:
871
+ xp = x_k + alpha_p * d
872
+ fp, Fp = f(xp)
873
+
874
+ if fp <= C + eta - gamma * alpha_p**2 * f_k:
875
+ alpha = alpha_p
876
+ break
877
+
878
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
879
+
880
+ xp = x_k - alpha_m * d
881
+ fp, Fp = f(xp)
882
+
883
+ if fp <= C + eta - gamma * alpha_m**2 * f_k:
884
+ alpha = -alpha_m
885
+ break
886
+
887
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
888
+
889
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
890
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
891
+
892
+ # Update C and Q
893
+ Q_next = nu * Q + 1
894
+ C = (nu * Q * (C + eta) + fp) / Q_next
895
+ Q = Q_next
896
+
897
+ return alpha, xp, fp, Fp, C, Q
venv/lib/python3.10/site-packages/scipy/optimize/_linprog.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A top-level linear programming interface.
3
+
4
+ .. versionadded:: 0.15.0
5
+
6
+ Functions
7
+ ---------
8
+ .. autosummary::
9
+ :toctree: generated/
10
+
11
+ linprog
12
+ linprog_verbose_callback
13
+ linprog_terse_callback
14
+
15
+ """
16
+
17
+ import numpy as np
18
+
19
+ from ._optimize import OptimizeResult, OptimizeWarning
20
+ from warnings import warn
21
+ from ._linprog_highs import _linprog_highs
22
+ from ._linprog_ip import _linprog_ip
23
+ from ._linprog_simplex import _linprog_simplex
24
+ from ._linprog_rs import _linprog_rs
25
+ from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401
26
+ _linprog_rs_doc, _linprog_simplex_doc,
27
+ _linprog_highs_ipm_doc, _linprog_highs_ds_doc)
28
+ from ._linprog_util import (
29
+ _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
30
+ _postsolve, _check_result, _display_summary)
31
+ from copy import deepcopy
32
+
33
+ __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
34
+
35
+ __docformat__ = "restructuredtext en"
36
+
37
+ LINPROG_METHODS = [
38
+ 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'
39
+ ]
40
+
41
+
42
+ def linprog_verbose_callback(res):
43
+ """
44
+ A sample callback function demonstrating the linprog callback interface.
45
+ This callback produces detailed output to sys.stdout before each iteration
46
+ and after the final iteration of the simplex algorithm.
47
+
48
+ Parameters
49
+ ----------
50
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
51
+
52
+ x : 1-D array
53
+ The independent variable vector which optimizes the linear
54
+ programming problem.
55
+ fun : float
56
+ Value of the objective function.
57
+ success : bool
58
+ True if the algorithm succeeded in finding an optimal solution.
59
+ slack : 1-D array
60
+ The values of the slack variables. Each slack variable corresponds
61
+ to an inequality constraint. If the slack is zero, then the
62
+ corresponding constraint is active.
63
+ con : 1-D array
64
+ The (nominally zero) residuals of the equality constraints, that is,
65
+ ``b - A_eq @ x``
66
+ phase : int
67
+ The phase of the optimization being executed. In phase 1 a basic
68
+ feasible solution is sought and the T has an additional row
69
+ representing an alternate objective function.
70
+ status : int
71
+ An integer representing the exit status of the optimization::
72
+
73
+ 0 : Optimization terminated successfully
74
+ 1 : Iteration limit reached
75
+ 2 : Problem appears to be infeasible
76
+ 3 : Problem appears to be unbounded
77
+ 4 : Serious numerical difficulties encountered
78
+
79
+ nit : int
80
+ The number of iterations performed.
81
+ message : str
82
+ A string descriptor of the exit status of the optimization.
83
+ """
84
+ x = res['x']
85
+ fun = res['fun']
86
+ phase = res['phase']
87
+ status = res['status']
88
+ nit = res['nit']
89
+ message = res['message']
90
+ complete = res['complete']
91
+
92
+ saved_printoptions = np.get_printoptions()
93
+ np.set_printoptions(linewidth=500,
94
+ formatter={'float': lambda x: f"{x: 12.4f}"})
95
+ if status:
96
+ print('--------- Simplex Early Exit -------\n')
97
+ print(f'The simplex method exited early with status {status:d}')
98
+ print(message)
99
+ elif complete:
100
+ print('--------- Simplex Complete --------\n')
101
+ print(f'Iterations required: {nit}')
102
+ else:
103
+ print(f'--------- Iteration {nit:d} ---------\n')
104
+
105
+ if nit > 0:
106
+ if phase == 1:
107
+ print('Current Pseudo-Objective Value:')
108
+ else:
109
+ print('Current Objective Value:')
110
+ print('f = ', fun)
111
+ print()
112
+ print('Current Solution Vector:')
113
+ print('x = ', x)
114
+ print()
115
+
116
+ np.set_printoptions(**saved_printoptions)
117
+
118
+
119
+ def linprog_terse_callback(res):
120
+ """
121
+ A sample callback function demonstrating the linprog callback interface.
122
+ This callback produces brief output to sys.stdout before each iteration
123
+ and after the final iteration of the simplex algorithm.
124
+
125
+ Parameters
126
+ ----------
127
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
128
+
129
+ x : 1-D array
130
+ The independent variable vector which optimizes the linear
131
+ programming problem.
132
+ fun : float
133
+ Value of the objective function.
134
+ success : bool
135
+ True if the algorithm succeeded in finding an optimal solution.
136
+ slack : 1-D array
137
+ The values of the slack variables. Each slack variable corresponds
138
+ to an inequality constraint. If the slack is zero, then the
139
+ corresponding constraint is active.
140
+ con : 1-D array
141
+ The (nominally zero) residuals of the equality constraints, that is,
142
+ ``b - A_eq @ x``.
143
+ phase : int
144
+ The phase of the optimization being executed. In phase 1 a basic
145
+ feasible solution is sought and the T has an additional row
146
+ representing an alternate objective function.
147
+ status : int
148
+ An integer representing the exit status of the optimization::
149
+
150
+ 0 : Optimization terminated successfully
151
+ 1 : Iteration limit reached
152
+ 2 : Problem appears to be infeasible
153
+ 3 : Problem appears to be unbounded
154
+ 4 : Serious numerical difficulties encountered
155
+
156
+ nit : int
157
+ The number of iterations performed.
158
+ message : str
159
+ A string descriptor of the exit status of the optimization.
160
+ """
161
+ nit = res['nit']
162
+ x = res['x']
163
+
164
+ if nit == 0:
165
+ print("Iter: X:")
166
+ print(f"{nit: <5d} ", end="")
167
+ print(x)
168
+
169
+
170
+ def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
171
+ bounds=(0, None), method='highs', callback=None,
172
+ options=None, x0=None, integrality=None):
173
+ r"""
174
+ Linear programming: minimize a linear objective function subject to linear
175
+ equality and inequality constraints.
176
+
177
+ Linear programming solves problems of the following form:
178
+
179
+ .. math::
180
+
181
+ \min_x \ & c^T x \\
182
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
183
+ & A_{eq} x = b_{eq},\\
184
+ & l \leq x \leq u ,
185
+
186
+ where :math:`x` is a vector of decision variables; :math:`c`,
187
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
188
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
189
+
190
+ Alternatively, that's:
191
+
192
+ - minimize ::
193
+
194
+ c @ x
195
+
196
+ - such that ::
197
+
198
+ A_ub @ x <= b_ub
199
+ A_eq @ x == b_eq
200
+ lb <= x <= ub
201
+
202
+ Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be
203
+ specified with ``bounds``.
204
+
205
+ Parameters
206
+ ----------
207
+ c : 1-D array
208
+ The coefficients of the linear objective function to be minimized.
209
+ A_ub : 2-D array, optional
210
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
211
+ coefficients of a linear inequality constraint on ``x``.
212
+ b_ub : 1-D array, optional
213
+ The inequality constraint vector. Each element represents an
214
+ upper bound on the corresponding value of ``A_ub @ x``.
215
+ A_eq : 2-D array, optional
216
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
217
+ coefficients of a linear equality constraint on ``x``.
218
+ b_eq : 1-D array, optional
219
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
220
+ the corresponding element of ``b_eq``.
221
+ bounds : sequence, optional
222
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
223
+ the minimum and maximum values of that decision variable.
224
+ If a single tuple ``(min, max)`` is provided, then ``min`` and ``max``
225
+ will serve as bounds for all decision variables.
226
+ Use ``None`` to indicate that there is no bound. For instance, the
227
+ default bound ``(0, None)`` means that all decision variables are
228
+ non-negative, and the pair ``(None, None)`` means no bounds at all,
229
+ i.e. all variables are allowed to be any real.
230
+ method : str, optional
231
+ The algorithm used to solve the standard form problem.
232
+ :ref:`'highs' <optimize.linprog-highs>` (default),
233
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
234
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
235
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
236
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
237
+ and
238
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
239
+ The legacy methods are deprecated and will be removed in SciPy 1.11.0.
240
+ callback : callable, optional
241
+ If a callback function is provided, it will be called at least once per
242
+ iteration of the algorithm. The callback function must accept a single
243
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
244
+
245
+ x : 1-D array
246
+ The current solution vector.
247
+ fun : float
248
+ The current value of the objective function ``c @ x``.
249
+ success : bool
250
+ ``True`` when the algorithm has completed successfully.
251
+ slack : 1-D array
252
+ The (nominally positive) values of the slack,
253
+ ``b_ub - A_ub @ x``.
254
+ con : 1-D array
255
+ The (nominally zero) residuals of the equality constraints,
256
+ ``b_eq - A_eq @ x``.
257
+ phase : int
258
+ The phase of the algorithm being executed.
259
+ status : int
260
+ An integer representing the status of the algorithm.
261
+
262
+ ``0`` : Optimization proceeding nominally.
263
+
264
+ ``1`` : Iteration limit reached.
265
+
266
+ ``2`` : Problem appears to be infeasible.
267
+
268
+ ``3`` : Problem appears to be unbounded.
269
+
270
+ ``4`` : Numerical difficulties encountered.
271
+
272
+ nit : int
273
+ The current iteration number.
274
+ message : str
275
+ A string descriptor of the algorithm status.
276
+
277
+ Callback functions are not currently supported by the HiGHS methods.
278
+
279
+ options : dict, optional
280
+ A dictionary of solver options. All methods accept the following
281
+ options:
282
+
283
+ maxiter : int
284
+ Maximum number of iterations to perform.
285
+ Default: see method-specific documentation.
286
+ disp : bool
287
+ Set to ``True`` to print convergence messages.
288
+ Default: ``False``.
289
+ presolve : bool
290
+ Set to ``False`` to disable automatic presolve.
291
+ Default: ``True``.
292
+
293
+ All methods except the HiGHS solvers also accept:
294
+
295
+ tol : float
296
+ A tolerance which determines when a residual is "close enough" to
297
+ zero to be considered exactly zero.
298
+ autoscale : bool
299
+ Set to ``True`` to automatically perform equilibration.
300
+ Consider using this option if the numerical values in the
301
+ constraints are separated by several orders of magnitude.
302
+ Default: ``False``.
303
+ rr : bool
304
+ Set to ``False`` to disable automatic redundancy removal.
305
+ Default: ``True``.
306
+ rr_method : string
307
+ Method used to identify and remove redundant rows from the
308
+ equality constraint matrix after presolve. For problems with
309
+ dense input, the available methods for redundancy removal are:
310
+
311
+ "SVD":
312
+ Repeatedly performs singular value decomposition on
313
+ the matrix, detecting redundant rows based on nonzeros
314
+ in the left singular vectors that correspond with
315
+ zero singular values. May be fast when the matrix is
316
+ nearly full rank.
317
+ "pivot":
318
+ Uses the algorithm presented in [5]_ to identify
319
+ redundant rows.
320
+ "ID":
321
+ Uses a randomized interpolative decomposition.
322
+ Identifies columns of the matrix transpose not used in
323
+ a full-rank interpolative decomposition of the matrix.
324
+ None:
325
+ Uses "svd" if the matrix is nearly full rank, that is,
326
+ the difference between the matrix rank and the number
327
+ of rows is less than five. If not, uses "pivot". The
328
+ behavior of this default is subject to change without
329
+ prior notice.
330
+
331
+ Default: None.
332
+ For problems with sparse input, this option is ignored, and the
333
+ pivot-based algorithm presented in [5]_ is used.
334
+
335
+ For method-specific options, see
336
+ :func:`show_options('linprog') <show_options>`.
337
+
338
+ x0 : 1-D array, optional
339
+ Guess values of the decision variables, which will be refined by
340
+ the optimization algorithm. This argument is currently used only by the
341
+ 'revised simplex' method, and can only be used if `x0` represents a
342
+ basic feasible solution.
343
+
344
+ integrality : 1-D array or int, optional
345
+ Indicates the type of integrality constraint on each decision variable.
346
+
347
+ ``0`` : Continuous variable; no integrality constraint.
348
+
349
+ ``1`` : Integer variable; decision variable must be an integer
350
+ within `bounds`.
351
+
352
+ ``2`` : Semi-continuous variable; decision variable must be within
353
+ `bounds` or take value ``0``.
354
+
355
+ ``3`` : Semi-integer variable; decision variable must be an integer
356
+ within `bounds` or take value ``0``.
357
+
358
+ By default, all variables are continuous.
359
+
360
+ For mixed integrality constraints, supply an array of shape `c.shape`.
361
+ To infer a constraint on each decision variable from shorter inputs,
362
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
363
+
364
+ This argument is currently used only by the ``'highs'`` method and
365
+ ignored otherwise.
366
+
367
+ Returns
368
+ -------
369
+ res : OptimizeResult
370
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields
371
+ below. Note that the return types of the fields may depend on whether
372
+ the optimization was successful, therefore it is recommended to check
373
+ `OptimizeResult.status` before relying on the other fields:
374
+
375
+ x : 1-D array
376
+ The values of the decision variables that minimizes the
377
+ objective function while satisfying the constraints.
378
+ fun : float
379
+ The optimal value of the objective function ``c @ x``.
380
+ slack : 1-D array
381
+ The (nominally positive) values of the slack variables,
382
+ ``b_ub - A_ub @ x``.
383
+ con : 1-D array
384
+ The (nominally zero) residuals of the equality constraints,
385
+ ``b_eq - A_eq @ x``.
386
+ success : bool
387
+ ``True`` when the algorithm succeeds in finding an optimal
388
+ solution.
389
+ status : int
390
+ An integer representing the exit status of the algorithm.
391
+
392
+ ``0`` : Optimization terminated successfully.
393
+
394
+ ``1`` : Iteration limit reached.
395
+
396
+ ``2`` : Problem appears to be infeasible.
397
+
398
+ ``3`` : Problem appears to be unbounded.
399
+
400
+ ``4`` : Numerical difficulties encountered.
401
+
402
+ nit : int
403
+ The total number of iterations performed in all phases.
404
+ message : str
405
+ A string descriptor of the exit status of the algorithm.
406
+
407
+ See Also
408
+ --------
409
+ show_options : Additional options accepted by the solvers.
410
+
411
+ Notes
412
+ -----
413
+ This section describes the available solvers that can be selected by the
414
+ 'method' parameter.
415
+
416
+ `'highs-ds'` and
417
+ `'highs-ipm'` are interfaces to the
418
+ HiGHS simplex and interior-point method solvers [13]_, respectively.
419
+ `'highs'` (default) chooses between
420
+ the two automatically. These are the fastest linear
421
+ programming solvers in SciPy, especially for large, sparse problems;
422
+ which of these two is faster is problem-dependent.
423
+ The other solvers (`'interior-point'`, `'revised simplex'`, and
424
+ `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
425
+
426
+ Method *highs-ds* is a wrapper of the C++ high performance dual
427
+ revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
428
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
429
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
430
+ as a simplex solver. Method *highs* chooses between the two automatically.
431
+ For new code involving `linprog`, we recommend explicitly choosing one of
432
+ these three method values.
433
+
434
+ .. versionadded:: 1.6.0
435
+
436
+ Method *interior-point* uses the primal-dual path following algorithm
437
+ as outlined in [4]_. This algorithm supports sparse constraint matrices and
438
+ is typically faster than the simplex methods, especially for large, sparse
439
+ problems. Note, however, that the solution returned may be slightly less
440
+ accurate than those of the simplex methods and will not, in general,
441
+ correspond with a vertex of the polytope defined by the constraints.
442
+
443
+ .. versionadded:: 1.0.0
444
+
445
+ Method *revised simplex* uses the revised simplex method as described in
446
+ [9]_, except that a factorization [11]_ of the basis matrix, rather than
447
+ its inverse, is efficiently maintained and used to solve the linear systems
448
+ at each iteration of the algorithm.
449
+
450
+ .. versionadded:: 1.3.0
451
+
452
+ Method *simplex* uses a traditional, full-tableau implementation of
453
+ Dantzig's simplex algorithm [1]_, [2]_ (*not* the
454
+ Nelder-Mead simplex). This algorithm is included for backwards
455
+ compatibility and educational purposes.
456
+
457
+ .. versionadded:: 0.15.0
458
+
459
+ Before applying *interior-point*, *revised simplex*, or *simplex*,
460
+ a presolve procedure based on [8]_ attempts
461
+ to identify trivial infeasibilities, trivial unboundedness, and potential
462
+ problem simplifications. Specifically, it checks for:
463
+
464
+ - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
465
+ - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
466
+ variables;
467
+ - column singletons in ``A_eq``, representing fixed variables; and
468
+ - column singletons in ``A_ub``, representing simple bounds.
469
+
470
+ If presolve reveals that the problem is unbounded (e.g. an unconstrained
471
+ and unbounded variable has negative cost) or infeasible (e.g., a row of
472
+ zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
473
+ terminates with the appropriate status code. Note that presolve terminates
474
+ as soon as any sign of unboundedness is detected; consequently, a problem
475
+ may be reported as unbounded when in reality the problem is infeasible
476
+ (but infeasibility has not been detected yet). Therefore, if it is
477
+ important to know whether the problem is actually infeasible, solve the
478
+ problem again with option ``presolve=False``.
479
+
480
+ If neither infeasibility nor unboundedness are detected in a single pass
481
+ of the presolve, bounds are tightened where possible and fixed
482
+ variables are removed from the problem. Then, linearly dependent rows
483
+ of the ``A_eq`` matrix are removed, (unless they represent an
484
+ infeasibility) to avoid numerical difficulties in the primary solve
485
+ routine. Note that rows that are nearly linearly dependent (within a
486
+ prescribed tolerance) may also be removed, which can change the optimal
487
+ solution in rare cases. If this is a concern, eliminate redundancy from
488
+ your problem formulation and run with option ``rr=False`` or
489
+ ``presolve=False``.
490
+
491
+ Several potential improvements can be made here: additional presolve
492
+ checks outlined in [8]_ should be implemented, the presolve routine should
493
+ be run multiple times (until no further simplifications can be made), and
494
+ more of the efficiency improvements from [5]_ should be implemented in the
495
+ redundancy removal routines.
496
+
497
+ After presolve, the problem is transformed to standard form by converting
498
+ the (tightened) simple bounds to upper bound constraints, introducing
499
+ non-negative slack variables for inequality constraints, and expressing
500
+ unbounded variables as the difference between two non-negative variables.
501
+ Optionally, the problem is automatically scaled via equilibration [12]_.
502
+ The selected algorithm solves the standard form problem, and a
503
+ postprocessing routine converts the result to a solution to the original
504
+ problem.
505
+
506
+ References
507
+ ----------
508
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
509
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
510
+ 1963
511
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
512
+ Mathematical Programming", McGraw-Hill, Chapter 4.
513
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
514
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
515
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
516
+ optimizer for linear programming: an implementation of the
517
+ homogeneous algorithm." High performance optimization. Springer US,
518
+ 2000. 197-232.
519
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
520
+ large-scale linear programming." Optimization Methods and Software
521
+ 6.3 (1995): 219-227.
522
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
523
+ Programming based on Newton's Method." Unpublished Course Notes,
524
+ March 2004. Available 2/25/2017 at
525
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
526
+ .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
527
+ Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
528
+ http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
529
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
530
+ programming." Mathematical Programming 71.2 (1995): 221-245.
531
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
532
+ programming." Athena Scientific 1 (1997): 997.
533
+ .. [10] Andersen, Erling D., et al. Implementation of interior point
534
+ methods for large scale linear programming. HEC/Universite de
535
+ Geneve, 1996.
536
+ .. [11] Bartels, Richard H. "A stabilization of the simplex method."
537
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
538
+ .. [12] Tomlin, J. A. "On scaling linear programming problems."
539
+ Mathematical Programming Study 4 (1975): 146-166.
540
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
541
+ "HiGHS - high performance software for linear optimization."
542
+ https://highs.dev/
543
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
544
+ simplex method." Mathematical Programming Computation, 10 (1),
545
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
546
+
547
+ Examples
548
+ --------
549
+ Consider the following problem:
550
+
551
+ .. math::
552
+
553
+ \min_{x_0, x_1} \ -x_0 + 4x_1 & \\
554
+ \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
555
+ -x_0 - 2x_1 & \geq -4,\\
556
+ x_1 & \geq -3.
557
+
558
+ The problem is not presented in the form accepted by `linprog`. This is
559
+ easily remedied by converting the "greater than" inequality
560
+ constraint to a "less than" inequality constraint by
561
+ multiplying both sides by a factor of :math:`-1`. Note also that the last
562
+ constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
563
+ Finally, since there are no bounds on :math:`x_0`, we must explicitly
564
+ specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
565
+ default is for variables to be non-negative. After collecting coeffecients
566
+ into arrays and tuples, the input for this problem is:
567
+
568
+ >>> from scipy.optimize import linprog
569
+ >>> c = [-1, 4]
570
+ >>> A = [[-3, 1], [1, 2]]
571
+ >>> b = [6, 4]
572
+ >>> x0_bounds = (None, None)
573
+ >>> x1_bounds = (-3, None)
574
+ >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
575
+ >>> res.fun
576
+ -22.0
577
+ >>> res.x
578
+ array([10., -3.])
579
+ >>> res.message
580
+ 'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
581
+
582
+ The marginals (AKA dual values / shadow prices / Lagrange multipliers)
583
+ and residuals (slacks) are also available.
584
+
585
+ >>> res.ineqlin
586
+ residual: [ 3.900e+01 0.000e+00]
587
+ marginals: [-0.000e+00 -1.000e+00]
588
+
589
+ For example, because the marginal associated with the second inequality
590
+ constraint is -1, we expect the optimal value of the objective function
591
+ to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
592
+ side of the second inequality constraint:
593
+
594
+ >>> eps = 0.05
595
+ >>> b[1] += eps
596
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
597
+ -22.05
598
+
599
+ Also, because the residual on the first inequality constraint is 39, we
600
+ can decrease the right hand side of the first constraint by 39 without
601
+ affecting the optimal solution.
602
+
603
+ >>> b = [6, 4] # reset to original values
604
+ >>> b[0] -= 39
605
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
606
+ -22.0
607
+
608
+ """
609
+
610
+ meth = method.lower()
611
+ methods = {"highs", "highs-ds", "highs-ipm",
612
+ "simplex", "revised simplex", "interior-point"}
613
+
614
+ if meth not in methods:
615
+ raise ValueError(f"Unknown solver '{method}'")
616
+
617
+ if x0 is not None and meth != "revised simplex":
618
+ warning_message = "x0 is used only when method is 'revised simplex'. "
619
+ warn(warning_message, OptimizeWarning, stacklevel=2)
620
+
621
+ if np.any(integrality) and not meth == "highs":
622
+ integrality = None
623
+ warning_message = ("Only `method='highs'` supports integer "
624
+ "constraints. Ignoring `integrality`.")
625
+ warn(warning_message, OptimizeWarning, stacklevel=2)
626
+ elif np.any(integrality):
627
+ integrality = np.broadcast_to(integrality, np.shape(c))
628
+
629
+ lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
630
+ lp, solver_options = _parse_linprog(lp, options, meth)
631
+ tol = solver_options.get('tol', 1e-9)
632
+
633
+ # Give unmodified problem to HiGHS
634
+ if meth.startswith('highs'):
635
+ if callback is not None:
636
+ raise NotImplementedError("HiGHS solvers do not support the "
637
+ "callback interface.")
638
+ highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
639
+ 'highs': None}
640
+
641
+ sol = _linprog_highs(lp, solver=highs_solvers[meth],
642
+ **solver_options)
643
+ sol['status'], sol['message'] = (
644
+ _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
645
+ sol['con'], lp.bounds, tol, sol['message'],
646
+ integrality))
647
+ sol['success'] = sol['status'] == 0
648
+ return OptimizeResult(sol)
649
+
650
+ warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
651
+ "1.11.0. Please use one of the HiGHS solvers (e.g. "
652
+ "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
653
+
654
+ iteration = 0
655
+ complete = False # will become True if solved in presolve
656
+ undo = []
657
+
658
+ # Keep the original arrays to calculate slack/residuals for original
659
+ # problem.
660
+ lp_o = deepcopy(lp)
661
+
662
+ # Solve trivial problem, eliminate variables, tighten bounds, etc.
663
+ rr_method = solver_options.pop('rr_method', None) # need to pop these;
664
+ rr = solver_options.pop('rr', True) # they're not passed to methods
665
+ c0 = 0 # we might get a constant term in the objective
666
+ if solver_options.pop('presolve', True):
667
+ (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
668
+ rr_method,
669
+ tol)
670
+
671
+ C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
672
+ postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
673
+
674
+ if not complete:
675
+ A, b, c, c0, x0 = _get_Abc(lp, c0)
676
+ if solver_options.pop('autoscale', False):
677
+ A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
678
+ postsolve_args = postsolve_args[:-2] + (C, b_scale)
679
+
680
+ if meth == 'simplex':
681
+ x, status, message, iteration = _linprog_simplex(
682
+ c, c0=c0, A=A, b=b, callback=callback,
683
+ postsolve_args=postsolve_args, **solver_options)
684
+ elif meth == 'interior-point':
685
+ x, status, message, iteration = _linprog_ip(
686
+ c, c0=c0, A=A, b=b, callback=callback,
687
+ postsolve_args=postsolve_args, **solver_options)
688
+ elif meth == 'revised simplex':
689
+ x, status, message, iteration = _linprog_rs(
690
+ c, c0=c0, A=A, b=b, x0=x0, callback=callback,
691
+ postsolve_args=postsolve_args, **solver_options)
692
+
693
+ # Eliminate artificial variables, re-introduce presolved variables, etc.
694
+ disp = solver_options.get('disp', False)
695
+
696
+ x, fun, slack, con = _postsolve(x, postsolve_args, complete)
697
+
698
+ status, message = _check_result(x, fun, status, slack, con, lp_o.bounds,
699
+ tol, message, integrality)
700
+
701
+ if disp:
702
+ _display_summary(message, status, fun, iteration)
703
+
704
+ sol = {
705
+ 'x': x,
706
+ 'fun': fun,
707
+ 'slack': slack,
708
+ 'con': con,
709
+ 'status': status,
710
+ 'message': message,
711
+ 'nit': iteration,
712
+ 'success': status == 0}
713
+
714
+ return OptimizeResult(sol)
venv/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HiGHS Linear Optimization Methods
2
+
3
+ Interface to HiGHS linear optimization software.
4
+ https://highs.dev/
5
+
6
+ .. versionadded:: 1.5.0
7
+
8
+ References
9
+ ----------
10
+ .. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
11
+ method." Mathematical Programming Computation, 10 (1), 119-142,
12
+ 2018. DOI: 10.1007/s12532-017-0130-5
13
+
14
+ """
15
+
16
+ import inspect
17
+ import numpy as np
18
+ from ._optimize import OptimizeWarning, OptimizeResult
19
+ from warnings import warn
20
+ from ._highs._highs_wrapper import _highs_wrapper
21
+ from ._highs._highs_constants import (
22
+ CONST_INF,
23
+ MESSAGE_LEVEL_NONE,
24
+ HIGHS_OBJECTIVE_SENSE_MINIMIZE,
25
+
26
+ MODEL_STATUS_NOTSET,
27
+ MODEL_STATUS_LOAD_ERROR,
28
+ MODEL_STATUS_MODEL_ERROR,
29
+ MODEL_STATUS_PRESOLVE_ERROR,
30
+ MODEL_STATUS_SOLVE_ERROR,
31
+ MODEL_STATUS_POSTSOLVE_ERROR,
32
+ MODEL_STATUS_MODEL_EMPTY,
33
+ MODEL_STATUS_OPTIMAL,
34
+ MODEL_STATUS_INFEASIBLE,
35
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
36
+ MODEL_STATUS_UNBOUNDED,
37
+ MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
38
+ as MODEL_STATUS_RDOVUB,
39
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
40
+ MODEL_STATUS_REACHED_TIME_LIMIT,
41
+ MODEL_STATUS_REACHED_ITERATION_LIMIT,
42
+
43
+ HIGHS_SIMPLEX_STRATEGY_DUAL,
44
+
45
+ HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
46
+
47
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
48
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
49
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
50
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
51
+ )
52
+ from scipy.sparse import csc_matrix, vstack, issparse
53
+
54
+
55
+ def _highs_to_scipy_status_message(highs_status, highs_message):
56
+ """Converts HiGHS status number/message to SciPy status number/message"""
57
+
58
+ scipy_statuses_messages = {
59
+ None: (4, "HiGHS did not provide a status code. "),
60
+ MODEL_STATUS_NOTSET: (4, ""),
61
+ MODEL_STATUS_LOAD_ERROR: (4, ""),
62
+ MODEL_STATUS_MODEL_ERROR: (2, ""),
63
+ MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
64
+ MODEL_STATUS_SOLVE_ERROR: (4, ""),
65
+ MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
66
+ MODEL_STATUS_MODEL_EMPTY: (4, ""),
67
+ MODEL_STATUS_RDOVUB: (4, ""),
68
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
69
+ MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
70
+ MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
71
+ MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
72
+ MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
73
+ MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
74
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
75
+ "or infeasible. ")}
76
+ unrecognized = (4, "The HiGHS status code was not recognized. ")
77
+ scipy_status, scipy_message = (
78
+ scipy_statuses_messages.get(highs_status, unrecognized))
79
+ scipy_message = (f"{scipy_message}"
80
+ f"(HiGHS Status {highs_status}: {highs_message})")
81
+ return scipy_status, scipy_message
82
+
83
+
84
+ def _replace_inf(x):
85
+ # Replace `np.inf` with CONST_INF
86
+ infs = np.isinf(x)
87
+ with np.errstate(invalid="ignore"):
88
+ x[infs] = np.sign(x[infs])*CONST_INF
89
+ return x
90
+
91
+
92
+ def _convert_to_highs_enum(option, option_str, choices):
93
+ # If option is in the choices we can look it up, if not use
94
+ # the default value taken from function signature and warn:
95
+ try:
96
+ return choices[option.lower()]
97
+ except AttributeError:
98
+ return choices[option]
99
+ except KeyError:
100
+ sig = inspect.signature(_linprog_highs)
101
+ default_str = sig.parameters[option_str].default
102
+ warn(f"Option {option_str} is {option}, but only values in "
103
+ f"{set(choices.keys())} are allowed. Using default: "
104
+ f"{default_str}.",
105
+ OptimizeWarning, stacklevel=3)
106
+ return choices[default_str]
107
+
108
+
109
+ def _linprog_highs(lp, solver, time_limit=None, presolve=True,
110
+ disp=False, maxiter=None,
111
+ dual_feasibility_tolerance=None,
112
+ primal_feasibility_tolerance=None,
113
+ ipm_optimality_tolerance=None,
114
+ simplex_dual_edge_weight_strategy=None,
115
+ mip_rel_gap=None,
116
+ mip_max_nodes=None,
117
+ **unknown_options):
118
+ r"""
119
+ Solve the following linear programming problem using one of the HiGHS
120
+ solvers:
121
+
122
+ User-facing documentation is in _linprog_doc.py.
123
+
124
+ Parameters
125
+ ----------
126
+ lp : _LPProblem
127
+ A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
128
+ solver : "ipm" or "simplex" or None
129
+ Which HiGHS solver to use. If ``None``, "simplex" will be used.
130
+
131
+ Options
132
+ -------
133
+ maxiter : int
134
+ The maximum number of iterations to perform in either phase. For
135
+ ``solver='ipm'``, this does not include the number of crossover
136
+ iterations. Default is the largest possible value for an ``int``
137
+ on the platform.
138
+ disp : bool
139
+ Set to ``True`` if indicators of optimization status are to be printed
140
+ to the console each iteration; default ``False``.
141
+ time_limit : float
142
+ The maximum time in seconds allotted to solve the problem; default is
143
+ the largest possible value for a ``double`` on the platform.
144
+ presolve : bool
145
+ Presolve attempts to identify trivial infeasibilities,
146
+ identify trivial unboundedness, and simplify the problem before
147
+ sending it to the main solver. It is generally recommended
148
+ to keep the default setting ``True``; set to ``False`` if presolve is
149
+ to be disabled.
150
+ dual_feasibility_tolerance : double
151
+ Dual feasibility tolerance. Default is 1e-07.
152
+ The minimum of this and ``primal_feasibility_tolerance``
153
+ is used for the feasibility tolerance when ``solver='ipm'``.
154
+ primal_feasibility_tolerance : double
155
+ Primal feasibility tolerance. Default is 1e-07.
156
+ The minimum of this and ``dual_feasibility_tolerance``
157
+ is used for the feasibility tolerance when ``solver='ipm'``.
158
+ ipm_optimality_tolerance : double
159
+ Optimality tolerance for ``solver='ipm'``. Default is 1e-08.
160
+ Minimum possible value is 1e-12 and must be smaller than the largest
161
+ possible value for a ``double`` on the platform.
162
+ simplex_dual_edge_weight_strategy : str (default: None)
163
+ Strategy for simplex dual edge weights. The default, ``None``,
164
+ automatically selects one of the following.
165
+
166
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
167
+ negative reduced cost.
168
+
169
+ ``'devex'`` uses the strategy described in [15]_.
170
+
171
+ ``steepest`` uses the exact steepest edge strategy as described in
172
+ [16]_.
173
+
174
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
175
+ until the computation is too costly or inexact and then switches to
176
+ the devex method.
177
+
178
+ Currently, using ``None`` always selects ``'steepest-devex'``, but this
179
+ may change as new options become available.
180
+
181
+ mip_max_nodes : int
182
+ The maximum number of nodes allotted to solve the problem; default is
183
+ the largest possible value for a ``HighsInt`` on the platform.
184
+ Ignored if not using the MIP solver.
185
+ unknown_options : dict
186
+ Optional arguments not used by this particular solver. If
187
+ ``unknown_options`` is non-empty, a warning is issued listing all
188
+ unused options.
189
+
190
+ Returns
191
+ -------
192
+ sol : dict
193
+ A dictionary consisting of the fields:
194
+
195
+ x : 1D array
196
+ The values of the decision variables that minimizes the
197
+ objective function while satisfying the constraints.
198
+ fun : float
199
+ The optimal value of the objective function ``c @ x``.
200
+ slack : 1D array
201
+ The (nominally positive) values of the slack,
202
+ ``b_ub - A_ub @ x``.
203
+ con : 1D array
204
+ The (nominally zero) residuals of the equality constraints,
205
+ ``b_eq - A_eq @ x``.
206
+ success : bool
207
+ ``True`` when the algorithm succeeds in finding an optimal
208
+ solution.
209
+ status : int
210
+ An integer representing the exit status of the algorithm.
211
+
212
+ ``0`` : Optimization terminated successfully.
213
+
214
+ ``1`` : Iteration or time limit reached.
215
+
216
+ ``2`` : Problem appears to be infeasible.
217
+
218
+ ``3`` : Problem appears to be unbounded.
219
+
220
+ ``4`` : The HiGHS solver ran into a problem.
221
+
222
+ message : str
223
+ A string descriptor of the exit status of the algorithm.
224
+ nit : int
225
+ The total number of iterations performed.
226
+ For ``solver='simplex'``, this includes iterations in all
227
+ phases. For ``solver='ipm'``, this does not include
228
+ crossover iterations.
229
+ crossover_nit : int
230
+ The number of primal/dual pushes performed during the
231
+ crossover routine for ``solver='ipm'``. This is ``0``
232
+ for ``solver='simplex'``.
233
+ ineqlin : OptimizeResult
234
+ Solution and sensitivity information corresponding to the
235
+ inequality constraints, `b_ub`. A dictionary consisting of the
236
+ fields:
237
+
238
+ residual : np.ndnarray
239
+ The (nominally positive) values of the slack variables,
240
+ ``b_ub - A_ub @ x``. This quantity is also commonly
241
+ referred to as "slack".
242
+
243
+ marginals : np.ndarray
244
+ The sensitivity (partial derivative) of the objective
245
+ function with respect to the right-hand side of the
246
+ inequality constraints, `b_ub`.
247
+
248
+ eqlin : OptimizeResult
249
+ Solution and sensitivity information corresponding to the
250
+ equality constraints, `b_eq`. A dictionary consisting of the
251
+ fields:
252
+
253
+ residual : np.ndarray
254
+ The (nominally zero) residuals of the equality constraints,
255
+ ``b_eq - A_eq @ x``.
256
+
257
+ marginals : np.ndarray
258
+ The sensitivity (partial derivative) of the objective
259
+ function with respect to the right-hand side of the
260
+ equality constraints, `b_eq`.
261
+
262
+ lower, upper : OptimizeResult
263
+ Solution and sensitivity information corresponding to the
264
+ lower and upper bounds on decision variables, `bounds`.
265
+
266
+ residual : np.ndarray
267
+ The (nominally positive) values of the quantity
268
+ ``x - lb`` (lower) or ``ub - x`` (upper).
269
+
270
+ marginals : np.ndarray
271
+ The sensitivity (partial derivative) of the objective
272
+ function with respect to the lower and upper
273
+ `bounds`.
274
+
275
+ mip_node_count : int
276
+ The number of subproblems or "nodes" solved by the MILP
277
+ solver. Only present when `integrality` is not `None`.
278
+
279
+ mip_dual_bound : float
280
+ The MILP solver's final estimate of the lower bound on the
281
+ optimal solution. Only present when `integrality` is not
282
+ `None`.
283
+
284
+ mip_gap : float
285
+ The difference between the final objective function value
286
+ and the final dual bound, scaled by the final objective
287
+ function value. Only present when `integrality` is not
288
+ `None`.
289
+
290
+ Notes
291
+ -----
292
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
293
+ `marginals`, or partial derivatives of the objective function with respect
294
+ to the right-hand side of each constraint. These partial derivatives are
295
+ also referred to as "Lagrange multipliers", "dual values", and
296
+ "shadow prices". The sign convention of `marginals` is opposite that
297
+ of Lagrange multipliers produced by many nonlinear solvers.
298
+
299
+ References
300
+ ----------
301
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
302
+ Mathematical programming 5.1 (1973): 1-28.
303
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
304
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
305
+ """
306
+ if unknown_options:
307
+ message = (f"Unrecognized options detected: {unknown_options}. "
308
+ "These will be passed to HiGHS verbatim.")
309
+ warn(message, OptimizeWarning, stacklevel=3)
310
+
311
+ # Map options to HiGHS enum values
312
+ simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
313
+ simplex_dual_edge_weight_strategy,
314
+ 'simplex_dual_edge_weight_strategy',
315
+ choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
316
+ 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
317
+ 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
318
+ 'steepest':
319
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
320
+ None: None})
321
+
322
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
323
+
324
+ lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs
325
+ # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
326
+ with np.errstate(invalid="ignore"):
327
+ lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf
328
+ rhs_ub = b_ub # RHS of UB constraints is b_ub
329
+ lhs_eq = b_eq # Equality constraint is inequality
330
+ rhs_eq = b_eq # constraint with LHS=RHS
331
+ lhs = np.concatenate((lhs_ub, lhs_eq))
332
+ rhs = np.concatenate((rhs_ub, rhs_eq))
333
+
334
+ if issparse(A_ub) or issparse(A_eq):
335
+ A = vstack((A_ub, A_eq))
336
+ else:
337
+ A = np.vstack((A_ub, A_eq))
338
+ A = csc_matrix(A)
339
+
340
+ options = {
341
+ 'presolve': presolve,
342
+ 'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
343
+ 'solver': solver,
344
+ 'time_limit': time_limit,
345
+ 'highs_debug_level': MESSAGE_LEVEL_NONE,
346
+ 'dual_feasibility_tolerance': dual_feasibility_tolerance,
347
+ 'ipm_optimality_tolerance': ipm_optimality_tolerance,
348
+ 'log_to_console': disp,
349
+ 'mip_max_nodes': mip_max_nodes,
350
+ 'output_flag': disp,
351
+ 'primal_feasibility_tolerance': primal_feasibility_tolerance,
352
+ 'simplex_dual_edge_weight_strategy':
353
+ simplex_dual_edge_weight_strategy_enum,
354
+ 'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
355
+ 'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
356
+ 'ipm_iteration_limit': maxiter,
357
+ 'simplex_iteration_limit': maxiter,
358
+ 'mip_rel_gap': mip_rel_gap,
359
+ }
360
+ options.update(unknown_options)
361
+
362
+ # np.inf doesn't work; use very large constant
363
+ rhs = _replace_inf(rhs)
364
+ lhs = _replace_inf(lhs)
365
+ lb = _replace_inf(lb)
366
+ ub = _replace_inf(ub)
367
+
368
+ if integrality is None or np.sum(integrality) == 0:
369
+ integrality = np.empty(0)
370
+ else:
371
+ integrality = np.array(integrality)
372
+
373
+ res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
374
+ lb, ub, integrality.astype(np.uint8), options)
375
+
376
+ # HiGHS represents constraints as lhs/rhs, so
377
+ # Ax + s = b => Ax = b - s
378
+ # and we need to split up s by A_ub and A_eq
379
+ if 'slack' in res:
380
+ slack = res['slack']
381
+ con = np.array(slack[len(b_ub):])
382
+ slack = np.array(slack[:len(b_ub)])
383
+ else:
384
+ slack, con = None, None
385
+
386
+ # lagrange multipliers for equalities/inequalities and upper/lower bounds
387
+ if 'lambda' in res:
388
+ lamda = res['lambda']
389
+ marg_ineqlin = np.array(lamda[:len(b_ub)])
390
+ marg_eqlin = np.array(lamda[len(b_ub):])
391
+ marg_upper = np.array(res['marg_bnds'][1, :])
392
+ marg_lower = np.array(res['marg_bnds'][0, :])
393
+ else:
394
+ marg_ineqlin, marg_eqlin = None, None
395
+ marg_upper, marg_lower = None, None
396
+
397
+ # this needs to be updated if we start choosing the solver intelligently
398
+
399
+ # Convert to scipy-style status and message
400
+ highs_status = res.get('status', None)
401
+ highs_message = res.get('message', None)
402
+ status, message = _highs_to_scipy_status_message(highs_status,
403
+ highs_message)
404
+
405
+ x = np.array(res['x']) if 'x' in res else None
406
+ sol = {'x': x,
407
+ 'slack': slack,
408
+ 'con': con,
409
+ 'ineqlin': OptimizeResult({
410
+ 'residual': slack,
411
+ 'marginals': marg_ineqlin,
412
+ }),
413
+ 'eqlin': OptimizeResult({
414
+ 'residual': con,
415
+ 'marginals': marg_eqlin,
416
+ }),
417
+ 'lower': OptimizeResult({
418
+ 'residual': None if x is None else x - lb,
419
+ 'marginals': marg_lower,
420
+ }),
421
+ 'upper': OptimizeResult({
422
+ 'residual': None if x is None else ub - x,
423
+ 'marginals': marg_upper
424
+ }),
425
+ 'fun': res.get('fun'),
426
+ 'status': status,
427
+ 'success': res['status'] == MODEL_STATUS_OPTIMAL,
428
+ 'message': message,
429
+ 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
430
+ 'crossover_nit': res.get('crossover_nit'),
431
+ }
432
+
433
+ if np.any(x) and integrality is not None:
434
+ sol.update({
435
+ 'mip_node_count': res.get('mip_node_count', 0),
436
+ 'mip_dual_bound': res.get('mip_dual_bound', 0.0),
437
+ 'mip_gap': res.get('mip_gap', 0.0),
438
+ })
439
+
440
+ return sol
venv/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Revised simplex method for linear programming
2
+
3
+ The *revised simplex* method uses the method described in [1]_, except
4
+ that a factorization [2]_ of the basis matrix, rather than its inverse,
5
+ is efficiently maintained and used to solve the linear systems at each
6
+ iteration of the algorithm.
7
+
8
+ .. versionadded:: 1.3.0
9
+
10
+ References
11
+ ----------
12
+ .. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
13
+ programming." Athena Scientific 1 (1997): 997.
14
+ .. [2] Bartels, Richard H. "A stabilization of the simplex method."
15
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
16
+
17
+ """
18
+ # Author: Matt Haberland
19
+
20
+ import numpy as np
21
+ from numpy.linalg import LinAlgError
22
+
23
+ from scipy.linalg import solve
24
+ from ._optimize import _check_unknown_options
25
+ from ._bglu_dense import LU
26
+ from ._bglu_dense import BGLU as BGLU
27
+ from ._linprog_util import _postsolve
28
+ from ._optimize import OptimizeResult
29
+
30
+
31
+ def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
32
+ maxupdate, mast, pivot):
33
+ """
34
+ The purpose of phase one is to find an initial basic feasible solution
35
+ (BFS) to the original problem.
36
+
37
+ Generates an auxiliary problem with a trivial BFS and an objective that
38
+ minimizes infeasibility of the original problem. Solves the auxiliary
39
+ problem using the main simplex routine (phase two). This either yields
40
+ a BFS to the original problem or determines that the original problem is
41
+ infeasible. If feasible, phase one detects redundant rows in the original
42
+ constraint matrix and removes them, then chooses additional indices as
43
+ necessary to complete a basis/BFS for the original problem.
44
+ """
45
+
46
+ m, n = A.shape
47
+ status = 0
48
+
49
+ # generate auxiliary problem to get initial BFS
50
+ A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
51
+
52
+ if status == 6:
53
+ residual = c.dot(x)
54
+ iter_k = 0
55
+ return x, basis, A, b, residual, status, iter_k
56
+
57
+ # solve auxiliary problem
58
+ phase_one_n = n
59
+ iter_k = 0
60
+ x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
61
+ postsolve_args,
62
+ maxiter, tol, disp,
63
+ maxupdate, mast, pivot,
64
+ iter_k, phase_one_n)
65
+
66
+ # check for infeasibility
67
+ residual = c.dot(x)
68
+ if status == 0 and residual > tol:
69
+ status = 2
70
+
71
+ # drive artificial variables out of basis
72
+ # TODO: test redundant row removal better
73
+ # TODO: make solve more efficient with BGLU? This could take a while.
74
+ keep_rows = np.ones(m, dtype=bool)
75
+ for basis_column in basis[basis >= n]:
76
+ B = A[:, basis]
77
+ try:
78
+ basis_finder = np.abs(solve(B, A)) # inefficient
79
+ pertinent_row = np.argmax(basis_finder[:, basis_column])
80
+ eligible_columns = np.ones(n, dtype=bool)
81
+ eligible_columns[basis[basis < n]] = 0
82
+ eligible_column_indices = np.where(eligible_columns)[0]
83
+ index = np.argmax(basis_finder[:, :n]
84
+ [pertinent_row, eligible_columns])
85
+ new_basis_column = eligible_column_indices[index]
86
+ if basis_finder[pertinent_row, new_basis_column] < tol:
87
+ keep_rows[pertinent_row] = False
88
+ else:
89
+ basis[basis == basis_column] = new_basis_column
90
+ except LinAlgError:
91
+ status = 4
92
+
93
+ # form solution to original problem
94
+ A = A[keep_rows, :n]
95
+ basis = basis[keep_rows]
96
+ x = x[:n]
97
+ m = A.shape[0]
98
+ return x, basis, A, b, residual, status, iter_k
99
+
100
+
101
+ def _get_more_basis_columns(A, basis):
102
+ """
103
+ Called when the auxiliary problem terminates with artificial columns in
104
+ the basis, which must be removed and replaced with non-artificial
105
+ columns. Finds additional columns that do not make the matrix singular.
106
+ """
107
+ m, n = A.shape
108
+
109
+ # options for inclusion are those that aren't already in the basis
110
+ a = np.arange(m+n)
111
+ bl = np.zeros(len(a), dtype=bool)
112
+ bl[basis] = 1
113
+ options = a[~bl]
114
+ options = options[options < n] # and they have to be non-artificial
115
+
116
+ # form basis matrix
117
+ B = np.zeros((m, m))
118
+ B[:, 0:len(basis)] = A[:, basis]
119
+
120
+ if (basis.size > 0 and
121
+ np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
122
+ raise Exception("Basis has dependent columns")
123
+
124
+ rank = 0 # just enter the loop
125
+ for i in range(n): # somewhat arbitrary, but we need another way out
126
+ # permute the options, and take as many as needed
127
+ new_basis = np.random.permutation(options)[:m-len(basis)]
128
+ B[:, len(basis):] = A[:, new_basis] # update the basis matrix
129
+ rank = np.linalg.matrix_rank(B) # check the rank
130
+ if rank == m:
131
+ break
132
+
133
+ return np.concatenate((basis, new_basis))
134
+
135
+
136
+ def _generate_auxiliary_problem(A, b, x0, tol):
137
+ """
138
+ Modifies original problem to create an auxiliary problem with a trivial
139
+ initial basic feasible solution and an objective that minimizes
140
+ infeasibility in the original problem.
141
+
142
+ Conceptually, this is done by stacking an identity matrix on the right of
143
+ the original constraint matrix, adding artificial variables to correspond
144
+ with each of these new columns, and generating a cost vector that is all
145
+ zeros except for ones corresponding with each of the new variables.
146
+
147
+ A initial basic feasible solution is trivial: all variables are zero
148
+ except for the artificial variables, which are set equal to the
149
+ corresponding element of the right hand side `b`.
150
+
151
+ Running the simplex method on this auxiliary problem drives all of the
152
+ artificial variables - and thus the cost - to zero if the original problem
153
+ is feasible. The original problem is declared infeasible otherwise.
154
+
155
+ Much of the complexity below is to improve efficiency by using singleton
156
+ columns in the original problem where possible, thus generating artificial
157
+ variables only as necessary, and using an initial 'guess' basic feasible
158
+ solution.
159
+ """
160
+ status = 0
161
+ m, n = A.shape
162
+
163
+ if x0 is not None:
164
+ x = x0
165
+ else:
166
+ x = np.zeros(n)
167
+
168
+ r = b - A@x # residual; this must be all zeros for feasibility
169
+
170
+ A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
171
+ b[r < 0] = -b[r < 0] # to the auxiliary problem
172
+ r[r < 0] *= -1
173
+
174
+ # Rows which we will need to find a trivial way to zero.
175
+ # This should just be the rows where there is a nonzero residual.
176
+ # But then we would not necessarily have a column singleton in every row.
177
+ # This makes it difficult to find an initial basis.
178
+ if x0 is None:
179
+ nonzero_constraints = np.arange(m)
180
+ else:
181
+ nonzero_constraints = np.where(r > tol)[0]
182
+
183
+ # these are (at least some of) the initial basis columns
184
+ basis = np.where(np.abs(x) > tol)[0]
185
+
186
+ if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
187
+ c = np.zeros(n)
188
+ basis = _get_more_basis_columns(A, basis)
189
+ return A, b, c, basis, x, status
190
+ elif (len(nonzero_constraints) > m - len(basis) or
191
+ np.any(x < 0)): # can't get trivial BFS
192
+ c = np.zeros(n)
193
+ status = 6
194
+ return A, b, c, basis, x, status
195
+
196
+ # chooses existing columns appropriate for inclusion in initial basis
197
+ cols, rows = _select_singleton_columns(A, r)
198
+
199
+ # find the rows we need to zero that we _can_ zero with column singletons
200
+ i_tofix = np.isin(rows, nonzero_constraints)
201
+ # these columns can't already be in the basis, though
202
+ # we are going to add them to the basis and change the corresponding x val
203
+ i_notinbasis = np.logical_not(np.isin(cols, basis))
204
+ i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
205
+ rows = rows[i_fix_without_aux]
206
+ cols = cols[i_fix_without_aux]
207
+
208
+ # indices of the rows we can only zero with auxiliary variable
209
+ # these rows will get a one in each auxiliary column
210
+ arows = nonzero_constraints[np.logical_not(
211
+ np.isin(nonzero_constraints, rows))]
212
+ n_aux = len(arows)
213
+ acols = n + np.arange(n_aux) # indices of auxiliary columns
214
+
215
+ basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
216
+ basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
217
+
218
+ # add auxiliary singleton columns
219
+ A = np.hstack((A, np.zeros((m, n_aux))))
220
+ A[arows, acols] = 1
221
+
222
+ # generate initial BFS
223
+ x = np.concatenate((x, np.zeros(n_aux)))
224
+ x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
225
+
226
+ # generate costs to minimize infeasibility
227
+ c = np.zeros(n_aux + n)
228
+ c[acols] = 1
229
+
230
+ # basis columns correspond with nonzeros in guess, those with column
231
+ # singletons we used to zero remaining constraints, and any additional
232
+ # columns to get a full set (m columns)
233
+ basis = np.concatenate((basis, basis_ng))
234
+ basis = _get_more_basis_columns(A, basis) # add columns as needed
235
+
236
+ return A, b, c, basis, x, status
237
+
238
+
239
+ def _select_singleton_columns(A, b):
240
+ """
241
+ Finds singleton columns for which the singleton entry is of the same sign
242
+ as the right-hand side; these columns are eligible for inclusion in an
243
+ initial basis. Determines the rows in which the singleton entries are
244
+ located. For each of these rows, returns the indices of the one singleton
245
+ column and its corresponding row.
246
+ """
247
+ # find indices of all singleton columns and corresponding row indices
248
+ column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
249
+ columns = A[:, column_indices] # array of singleton columns
250
+ row_indices = np.zeros(len(column_indices), dtype=int)
251
+ nonzero_rows, nonzero_columns = np.nonzero(columns)
252
+ row_indices[nonzero_columns] = nonzero_rows # corresponding row indices
253
+
254
+ # keep only singletons with entries that have same sign as RHS
255
+ # this is necessary because all elements of BFS must be non-negative
256
+ same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
257
+ column_indices = column_indices[same_sign][::-1]
258
+ row_indices = row_indices[same_sign][::-1]
259
+ # Reversing the order so that steps below select rightmost columns
260
+ # for initial basis, which will tend to be slack variables. (If the
261
+ # guess corresponds with a basic feasible solution but a constraint
262
+ # is not satisfied with the corresponding slack variable zero, the slack
263
+ # variable must be basic.)
264
+
265
+ # for each row, keep rightmost singleton column with an entry in that row
266
+ unique_row_indices, first_columns = np.unique(row_indices,
267
+ return_index=True)
268
+ return column_indices[first_columns], unique_row_indices
269
+
270
+
271
+ def _find_nonzero_rows(A, tol):
272
+ """
273
+ Returns logical array indicating the locations of rows with at least
274
+ one nonzero element.
275
+ """
276
+ return np.any(np.abs(A) > tol, axis=1)
277
+
278
+
279
+ def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
280
+ """
281
+ Selects a pivot to enter the basis. Currently Bland's rule - the smallest
282
+ index that has a negative reduced cost - is the default.
283
+ """
284
+ if rule.lower() == "mrc": # index with minimum reduced cost
285
+ return a[~bl][np.argmin(c_hat)]
286
+ else: # smallest index w/ negative reduced cost
287
+ return a[~bl][c_hat < -tol][0]
288
+
289
+
290
+ def _display_iter(phase, iteration, slack, con, fun):
291
+ """
292
+ Print indicators of optimization status to the console.
293
+ """
294
+ header = True if not iteration % 20 else False
295
+
296
+ if header:
297
+ print("Phase",
298
+ "Iteration",
299
+ "Minimum Slack ",
300
+ "Constraint Residual",
301
+ "Objective ")
302
+
303
+ # :<X.Y left aligns Y digits in X digit spaces
304
+ fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
305
+ try:
306
+ slack = np.min(slack)
307
+ except ValueError:
308
+ slack = "NA"
309
+ print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
310
+
311
+
312
+ def _display_and_callback(phase_one_n, x, postsolve_args, status,
313
+ iteration, disp, callback):
314
+ if phase_one_n is not None:
315
+ phase = 1
316
+ x_postsolve = x[:phase_one_n]
317
+ else:
318
+ phase = 2
319
+ x_postsolve = x
320
+ x_o, fun, slack, con = _postsolve(x_postsolve,
321
+ postsolve_args)
322
+
323
+ if callback is not None:
324
+ res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
325
+ 'con': con, 'nit': iteration,
326
+ 'phase': phase, 'complete': False,
327
+ 'status': status, 'message': "",
328
+ 'success': False})
329
+ callback(res)
330
+ if disp:
331
+ _display_iter(phase, iteration, slack, con, fun)
332
+
333
+
334
+ def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
335
+ maxupdate, mast, pivot, iteration=0, phase_one_n=None):
336
+ """
337
+ The heart of the simplex method. Beginning with a basic feasible solution,
338
+ moves to adjacent basic feasible solutions successively lower reduced cost.
339
+ Terminates when there are no basic feasible solutions with lower reduced
340
+ cost or if the problem is determined to be unbounded.
341
+
342
+ This implementation follows the revised simplex method based on LU
343
+ decomposition. Rather than maintaining a tableau or an inverse of the
344
+ basis matrix, we keep a factorization of the basis matrix that allows
345
+ efficient solution of linear systems while avoiding stability issues
346
+ associated with inverted matrices.
347
+ """
348
+ m, n = A.shape
349
+ status = 0
350
+ a = np.arange(n) # indices of columns of A
351
+ ab = np.arange(m) # indices of columns of B
352
+ if maxupdate:
353
+ # basis matrix factorization object; similar to B = A[:, b]
354
+ B = BGLU(A, b, maxupdate, mast)
355
+ else:
356
+ B = LU(A, b)
357
+
358
+ for iteration in range(iteration, maxiter):
359
+
360
+ if disp or callback is not None:
361
+ _display_and_callback(phase_one_n, x, postsolve_args, status,
362
+ iteration, disp, callback)
363
+
364
+ bl = np.zeros(len(a), dtype=bool)
365
+ bl[b] = 1
366
+
367
+ xb = x[b] # basic variables
368
+ cb = c[b] # basic costs
369
+
370
+ try:
371
+ v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
372
+ except LinAlgError:
373
+ status = 4
374
+ break
375
+
376
+ # TODO: cythonize?
377
+ c_hat = c - v.dot(A) # reduced cost
378
+ c_hat = c_hat[~bl]
379
+ # Above is much faster than:
380
+ # N = A[:, ~bl] # slow!
381
+ # c_hat = c[~bl] - v.T.dot(N)
382
+ # Can we perform the multiplication only on the nonbasic columns?
383
+
384
+ if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
385
+ break
386
+
387
+ j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
388
+ u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
389
+
390
+ i = u > tol # if none of the u are positive, unbounded
391
+ if not np.any(i):
392
+ status = 3
393
+ break
394
+
395
+ th = xb[i]/u[i]
396
+ l = np.argmin(th) # implicitly selects smallest subscript
397
+ th_star = th[l] # step size
398
+
399
+ x[b] = x[b] - th_star*u # take step
400
+ x[j] = th_star
401
+ B.update(ab[i][l], j) # modify basis
402
+ b = B.b # similar to b[ab[i][l]] =
403
+
404
+ else:
405
+ # If the end of the for loop is reached (without a break statement),
406
+ # then another step has been taken, so the iteration counter should
407
+ # increment, info should be displayed, and callback should be called.
408
+ iteration += 1
409
+ status = 1
410
+ if disp or callback is not None:
411
+ _display_and_callback(phase_one_n, x, postsolve_args, status,
412
+ iteration, disp, callback)
413
+
414
+ return x, b, status, iteration
415
+
416
+
417
+ def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
418
+ maxiter=5000, tol=1e-12, disp=False,
419
+ maxupdate=10, mast=False, pivot="mrc",
420
+ **unknown_options):
421
+ """
422
+ Solve the following linear programming problem via a two-phase
423
+ revised simplex algorithm.::
424
+
425
+ minimize: c @ x
426
+
427
+ subject to: A @ x == b
428
+ 0 <= x < oo
429
+
430
+ User-facing documentation is in _linprog_doc.py.
431
+
432
+ Parameters
433
+ ----------
434
+ c : 1-D array
435
+ Coefficients of the linear objective function to be minimized.
436
+ c0 : float
437
+ Constant term in objective function due to fixed (and eliminated)
438
+ variables. (Currently unused.)
439
+ A : 2-D array
440
+ 2-D array which, when matrix-multiplied by ``x``, gives the values of
441
+ the equality constraints at ``x``.
442
+ b : 1-D array
443
+ 1-D array of values representing the RHS of each equality constraint
444
+ (row) in ``A_eq``.
445
+ x0 : 1-D array, optional
446
+ Starting values of the independent variables, which will be refined by
447
+ the optimization algorithm. For the revised simplex method, these must
448
+ correspond with a basic feasible solution.
449
+ callback : callable, optional
450
+ If a callback function is provided, it will be called within each
451
+ iteration of the algorithm. The callback function must accept a single
452
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
453
+
454
+ x : 1-D array
455
+ Current solution vector.
456
+ fun : float
457
+ Current value of the objective function ``c @ x``.
458
+ success : bool
459
+ True only when an algorithm has completed successfully,
460
+ so this is always False as the callback function is called
461
+ only while the algorithm is still iterating.
462
+ slack : 1-D array
463
+ The values of the slack variables. Each slack variable
464
+ corresponds to an inequality constraint. If the slack is zero,
465
+ the corresponding constraint is active.
466
+ con : 1-D array
467
+ The (nominally zero) residuals of the equality constraints,
468
+ that is, ``b - A_eq @ x``.
469
+ phase : int
470
+ The phase of the algorithm being executed.
471
+ status : int
472
+ For revised simplex, this is always 0 because if a different
473
+ status is detected, the algorithm terminates.
474
+ nit : int
475
+ The number of iterations performed.
476
+ message : str
477
+ A string descriptor of the exit status of the optimization.
478
+ postsolve_args : tuple
479
+ Data needed by _postsolve to convert the solution to the standard-form
480
+ problem into the solution to the original problem.
481
+
482
+ Options
483
+ -------
484
+ maxiter : int
485
+ The maximum number of iterations to perform in either phase.
486
+ tol : float
487
+ The tolerance which determines when a solution is "close enough" to
488
+ zero in Phase 1 to be considered a basic feasible solution or close
489
+ enough to positive to serve as an optimal solution.
490
+ disp : bool
491
+ Set to ``True`` if indicators of optimization status are to be printed
492
+ to the console each iteration.
493
+ maxupdate : int
494
+ The maximum number of updates performed on the LU factorization.
495
+ After this many updates is reached, the basis matrix is factorized
496
+ from scratch.
497
+ mast : bool
498
+ Minimize Amortized Solve Time. If enabled, the average time to solve
499
+ a linear system using the basis factorization is measured. Typically,
500
+ the average solve time will decrease with each successive solve after
501
+ initial factorization, as factorization takes much more time than the
502
+ solve operation (and updates). Eventually, however, the updated
503
+ factorization becomes sufficiently complex that the average solve time
504
+ begins to increase. When this is detected, the basis is refactorized
505
+ from scratch. Enable this option to maximize speed at the risk of
506
+ nondeterministic behavior. Ignored if ``maxupdate`` is 0.
507
+ pivot : "mrc" or "bland"
508
+ Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
509
+ Bland's rule if iteration limit is reached and cycling is suspected.
510
+ unknown_options : dict
511
+ Optional arguments not used by this particular solver. If
512
+ `unknown_options` is non-empty a warning is issued listing all
513
+ unused options.
514
+
515
+ Returns
516
+ -------
517
+ x : 1-D array
518
+ Solution vector.
519
+ status : int
520
+ An integer representing the exit status of the optimization::
521
+
522
+ 0 : Optimization terminated successfully
523
+ 1 : Iteration limit reached
524
+ 2 : Problem appears to be infeasible
525
+ 3 : Problem appears to be unbounded
526
+ 4 : Numerical difficulties encountered
527
+ 5 : No constraints; turn presolve on
528
+ 6 : Guess x0 cannot be converted to a basic feasible solution
529
+
530
+ message : str
531
+ A string descriptor of the exit status of the optimization.
532
+ iteration : int
533
+ The number of iterations taken to solve the problem.
534
+ """
535
+
536
+ _check_unknown_options(unknown_options)
537
+
538
+ messages = ["Optimization terminated successfully.",
539
+ "Iteration limit reached.",
540
+ "The problem appears infeasible, as the phase one auxiliary "
541
+ "problem terminated successfully with a residual of {0:.1e}, "
542
+ "greater than the tolerance {1} required for the solution to "
543
+ "be considered feasible. Consider increasing the tolerance to "
544
+ "be greater than {0:.1e}. If this tolerance is unnaceptably "
545
+ "large, the problem is likely infeasible.",
546
+ "The problem is unbounded, as the simplex algorithm found "
547
+ "a basic feasible solution from which there is a direction "
548
+ "with negative reduced cost in which all decision variables "
549
+ "increase.",
550
+ "Numerical difficulties encountered; consider trying "
551
+ "method='interior-point'.",
552
+ "Problems with no constraints are trivially solved; please "
553
+ "turn presolve on.",
554
+ "The guess x0 cannot be converted to a basic feasible "
555
+ "solution. "
556
+ ]
557
+
558
+ if A.size == 0: # address test_unbounded_below_no_presolve_corrected
559
+ return np.zeros(c.shape), 5, messages[5], 0
560
+
561
+ x, basis, A, b, residual, status, iteration = (
562
+ _phase_one(A, b, x0, callback, postsolve_args,
563
+ maxiter, tol, disp, maxupdate, mast, pivot))
564
+
565
+ if status == 0:
566
+ x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
567
+ postsolve_args,
568
+ maxiter, tol, disp,
569
+ maxupdate, mast, pivot,
570
+ iteration)
571
+
572
+ return x, status, messages[status].format(residual, tol), iteration
venv/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Method agnostic utility functions for linear programming
3
+ """
4
+
5
+ import numpy as np
6
+ import scipy.sparse as sps
7
+ from warnings import warn
8
+ from ._optimize import OptimizeWarning
9
+ from scipy.optimize._remove_redundancy import (
10
+ _remove_redundancy_svd, _remove_redundancy_pivot_sparse,
11
+ _remove_redundancy_pivot_dense, _remove_redundancy_id
12
+ )
13
+ from collections import namedtuple
14
+
15
+ _LPProblem = namedtuple('_LPProblem',
16
+ 'c A_ub b_ub A_eq b_eq bounds x0 integrality')
17
+ _LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg
18
+ _LPProblem.__doc__ = \
19
+ """ Represents a linear-programming problem.
20
+
21
+ Attributes
22
+ ----------
23
+ c : 1D array
24
+ The coefficients of the linear objective function to be minimized.
25
+ A_ub : 2D array, optional
26
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
27
+ coefficients of a linear inequality constraint on ``x``.
28
+ b_ub : 1D array, optional
29
+ The inequality constraint vector. Each element represents an
30
+ upper bound on the corresponding value of ``A_ub @ x``.
31
+ A_eq : 2D array, optional
32
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
33
+ coefficients of a linear equality constraint on ``x``.
34
+ b_eq : 1D array, optional
35
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
36
+ the corresponding element of ``b_eq``.
37
+ bounds : various valid formats, optional
38
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
39
+ If bounds are specified for all N variables separately, valid formats
40
+ are:
41
+ * a 2D array (N x 2);
42
+ * a sequence of N sequences, each with 2 values.
43
+ If all variables have the same bounds, the bounds can be specified as
44
+ a 1-D or 2-D array or sequence with 2 scalar values.
45
+ If all variables have a lower bound of 0 and no upper bound, the bounds
46
+ parameter can be omitted (or given as None).
47
+ Absent lower and/or upper bounds can be specified as -numpy.inf (no
48
+ lower bound), numpy.inf (no upper bound) or None (both).
49
+ x0 : 1D array, optional
50
+ Guess values of the decision variables, which will be refined by
51
+ the optimization algorithm. This argument is currently used only by the
52
+ 'revised simplex' method, and can only be used if `x0` represents a
53
+ basic feasible solution.
54
+ integrality : 1-D array or int, optional
55
+ Indicates the type of integrality constraint on each decision variable.
56
+
57
+ ``0`` : Continuous variable; no integrality constraint.
58
+
59
+ ``1`` : Integer variable; decision variable must be an integer
60
+ within `bounds`.
61
+
62
+ ``2`` : Semi-continuous variable; decision variable must be within
63
+ `bounds` or take value ``0``.
64
+
65
+ ``3`` : Semi-integer variable; decision variable must be an integer
66
+ within `bounds` or take value ``0``.
67
+
68
+ By default, all variables are continuous.
69
+
70
+ For mixed integrality constraints, supply an array of shape `c.shape`.
71
+ To infer a constraint on each decision variable from shorter inputs,
72
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
73
+
74
+ This argument is currently used only by the ``'highs'`` method and
75
+ ignored otherwise.
76
+
77
+ Notes
78
+ -----
79
+ This namedtuple supports 2 ways of initialization:
80
+ >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
81
+ >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
82
+
83
+ Note that only ``c`` is a required argument here, whereas all other arguments
84
+ ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
85
+ default values of None.
86
+ For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
87
+ >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
88
+ """
89
+
90
+
91
+ def _check_sparse_inputs(options, meth, A_ub, A_eq):
92
+ """
93
+ Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
94
+ optional sparsity variables.
95
+
96
+ Parameters
97
+ ----------
98
+ A_ub : 2-D array, optional
99
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
100
+ inequality constraints at ``x``.
101
+ A_eq : 2-D array, optional
102
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
103
+ constraints at ``x``.
104
+ options : dict
105
+ A dictionary of solver options. All methods accept the following
106
+ generic options:
107
+
108
+ maxiter : int
109
+ Maximum number of iterations to perform.
110
+ disp : bool
111
+ Set to True to print convergence messages.
112
+
113
+ For method-specific options, see :func:`show_options('linprog')`.
114
+ method : str, optional
115
+ The algorithm used to solve the standard form problem.
116
+
117
+ Returns
118
+ -------
119
+ A_ub : 2-D array, optional
120
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
121
+ inequality constraints at ``x``.
122
+ A_eq : 2-D array, optional
123
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
124
+ constraints at ``x``.
125
+ options : dict
126
+ A dictionary of solver options. All methods accept the following
127
+ generic options:
128
+
129
+ maxiter : int
130
+ Maximum number of iterations to perform.
131
+ disp : bool
132
+ Set to True to print convergence messages.
133
+
134
+ For method-specific options, see :func:`show_options('linprog')`.
135
+ """
136
+ # This is an undocumented option for unit testing sparse presolve
137
+ _sparse_presolve = options.pop('_sparse_presolve', False)
138
+ if _sparse_presolve and A_eq is not None:
139
+ A_eq = sps.coo_matrix(A_eq)
140
+ if _sparse_presolve and A_ub is not None:
141
+ A_ub = sps.coo_matrix(A_ub)
142
+
143
+ sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
144
+
145
+ preferred_methods = {"highs", "highs-ds", "highs-ipm"}
146
+ dense_methods = {"simplex", "revised simplex"}
147
+ if meth in dense_methods and sparse_constraint:
148
+ raise ValueError(f"Method '{meth}' does not support sparse "
149
+ "constraint matrices. Please consider using one of "
150
+ f"{preferred_methods}.")
151
+
152
+ sparse = options.get('sparse', False)
153
+ if not sparse and sparse_constraint and meth == 'interior-point':
154
+ options['sparse'] = True
155
+ warn("Sparse constraint matrix detected; setting 'sparse':True.",
156
+ OptimizeWarning, stacklevel=4)
157
+ return options, A_ub, A_eq
158
+
159
+
160
+ def _format_A_constraints(A, n_x, sparse_lhs=False):
161
+ """Format the left hand side of the constraints to a 2-D array
162
+
163
+ Parameters
164
+ ----------
165
+ A : 2-D array
166
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
167
+ (in)equality constraints at ``x``.
168
+ n_x : int
169
+ The number of variables in the linear programming problem.
170
+ sparse_lhs : bool
171
+ Whether either of `A_ub` or `A_eq` are sparse. If true return a
172
+ coo_matrix instead of a numpy array.
173
+
174
+ Returns
175
+ -------
176
+ np.ndarray or sparse.coo_matrix
177
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
178
+ (in)equality constraints at ``x``.
179
+
180
+ """
181
+ if sparse_lhs:
182
+ return sps.coo_matrix(
183
+ (0, n_x) if A is None else A, dtype=float, copy=True
184
+ )
185
+ elif A is None:
186
+ return np.zeros((0, n_x), dtype=float)
187
+ else:
188
+ return np.array(A, dtype=float, copy=True)
189
+
190
+
191
+ def _format_b_constraints(b):
192
+ """Format the upper bounds of the constraints to a 1-D array
193
+
194
+ Parameters
195
+ ----------
196
+ b : 1-D array
197
+ 1-D array of values representing the upper-bound of each (in)equality
198
+ constraint (row) in ``A``.
199
+
200
+ Returns
201
+ -------
202
+ 1-D np.array
203
+ 1-D array of values representing the upper-bound of each (in)equality
204
+ constraint (row) in ``A``.
205
+
206
+ """
207
+ if b is None:
208
+ return np.array([], dtype=float)
209
+ b = np.array(b, dtype=float, copy=True).squeeze()
210
+ return b if b.size != 1 else b.reshape(-1)
211
+
212
+
213
+ def _clean_inputs(lp):
214
+ """
215
+ Given user inputs for a linear programming problem, return the
216
+ objective vector, upper bound constraints, equality constraints,
217
+ and simple bounds in a preferred format.
218
+
219
+ Parameters
220
+ ----------
221
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
222
+
223
+ c : 1D array
224
+ The coefficients of the linear objective function to be minimized.
225
+ A_ub : 2D array, optional
226
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
227
+ coefficients of a linear inequality constraint on ``x``.
228
+ b_ub : 1D array, optional
229
+ The inequality constraint vector. Each element represents an
230
+ upper bound on the corresponding value of ``A_ub @ x``.
231
+ A_eq : 2D array, optional
232
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
233
+ coefficients of a linear equality constraint on ``x``.
234
+ b_eq : 1D array, optional
235
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
236
+ the corresponding element of ``b_eq``.
237
+ bounds : various valid formats, optional
238
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
239
+ If bounds are specified for all N variables separately, valid formats are:
240
+ * a 2D array (2 x N or N x 2);
241
+ * a sequence of N sequences, each with 2 values.
242
+ If all variables have the same bounds, a single pair of values can
243
+ be specified. Valid formats are:
244
+ * a sequence with 2 scalar values;
245
+ * a sequence with a single element containing 2 scalar values.
246
+ If all variables have a lower bound of 0 and no upper bound, the bounds
247
+ parameter can be omitted (or given as None).
248
+ x0 : 1D array, optional
249
+ Guess values of the decision variables, which will be refined by
250
+ the optimization algorithm. This argument is currently used only by the
251
+ 'revised simplex' method, and can only be used if `x0` represents a
252
+ basic feasible solution.
253
+
254
+ Returns
255
+ -------
256
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
257
+
258
+ c : 1D array
259
+ The coefficients of the linear objective function to be minimized.
260
+ A_ub : 2D array, optional
261
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
262
+ coefficients of a linear inequality constraint on ``x``.
263
+ b_ub : 1D array, optional
264
+ The inequality constraint vector. Each element represents an
265
+ upper bound on the corresponding value of ``A_ub @ x``.
266
+ A_eq : 2D array, optional
267
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
268
+ coefficients of a linear equality constraint on ``x``.
269
+ b_eq : 1D array, optional
270
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
271
+ the corresponding element of ``b_eq``.
272
+ bounds : 2D array
273
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
274
+ elements of ``x``. The N x 2 array contains lower bounds in the first
275
+ column and upper bounds in the 2nd. Unbounded variables have lower
276
+ bound -np.inf and/or upper bound np.inf.
277
+ x0 : 1D array, optional
278
+ Guess values of the decision variables, which will be refined by
279
+ the optimization algorithm. This argument is currently used only by the
280
+ 'revised simplex' method, and can only be used if `x0` represents a
281
+ basic feasible solution.
282
+
283
+ """
284
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
285
+
286
+ if c is None:
287
+ raise TypeError
288
+
289
+ try:
290
+ c = np.array(c, dtype=np.float64, copy=True).squeeze()
291
+ except ValueError as e:
292
+ raise TypeError(
293
+ "Invalid input for linprog: c must be a 1-D array of numerical "
294
+ "coefficients") from e
295
+ else:
296
+ # If c is a single value, convert it to a 1-D array.
297
+ if c.size == 1:
298
+ c = c.reshape(-1)
299
+
300
+ n_x = len(c)
301
+ if n_x == 0 or len(c.shape) != 1:
302
+ raise ValueError(
303
+ "Invalid input for linprog: c must be a 1-D array and must "
304
+ "not have more than one non-singleton dimension")
305
+ if not np.isfinite(c).all():
306
+ raise ValueError(
307
+ "Invalid input for linprog: c must not contain values "
308
+ "inf, nan, or None")
309
+
310
+ sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
311
+ try:
312
+ A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
313
+ except ValueError as e:
314
+ raise TypeError(
315
+ "Invalid input for linprog: A_ub must be a 2-D array "
316
+ "of numerical values") from e
317
+ else:
318
+ n_ub = A_ub.shape[0]
319
+ if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
320
+ raise ValueError(
321
+ "Invalid input for linprog: A_ub must have exactly two "
322
+ "dimensions, and the number of columns in A_ub must be "
323
+ "equal to the size of c")
324
+ if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
325
+ or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
326
+ raise ValueError(
327
+ "Invalid input for linprog: A_ub must not contain values "
328
+ "inf, nan, or None")
329
+
330
+ try:
331
+ b_ub = _format_b_constraints(b_ub)
332
+ except ValueError as e:
333
+ raise TypeError(
334
+ "Invalid input for linprog: b_ub must be a 1-D array of "
335
+ "numerical values, each representing the upper bound of an "
336
+ "inequality constraint (row) in A_ub") from e
337
+ else:
338
+ if b_ub.shape != (n_ub,):
339
+ raise ValueError(
340
+ "Invalid input for linprog: b_ub must be a 1-D array; b_ub "
341
+ "must not have more than one non-singleton dimension and "
342
+ "the number of rows in A_ub must equal the number of values "
343
+ "in b_ub")
344
+ if not np.isfinite(b_ub).all():
345
+ raise ValueError(
346
+ "Invalid input for linprog: b_ub must not contain values "
347
+ "inf, nan, or None")
348
+
349
+ try:
350
+ A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
351
+ except ValueError as e:
352
+ raise TypeError(
353
+ "Invalid input for linprog: A_eq must be a 2-D array "
354
+ "of numerical values") from e
355
+ else:
356
+ n_eq = A_eq.shape[0]
357
+ if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
358
+ raise ValueError(
359
+ "Invalid input for linprog: A_eq must have exactly two "
360
+ "dimensions, and the number of columns in A_eq must be "
361
+ "equal to the size of c")
362
+
363
+ if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
364
+ or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
365
+ raise ValueError(
366
+ "Invalid input for linprog: A_eq must not contain values "
367
+ "inf, nan, or None")
368
+
369
+ try:
370
+ b_eq = _format_b_constraints(b_eq)
371
+ except ValueError as e:
372
+ raise TypeError(
373
+ "Invalid input for linprog: b_eq must be a dense, 1-D array of "
374
+ "numerical values, each representing the right hand side of an "
375
+ "equality constraint (row) in A_eq") from e
376
+ else:
377
+ if b_eq.shape != (n_eq,):
378
+ raise ValueError(
379
+ "Invalid input for linprog: b_eq must be a 1-D array; b_eq "
380
+ "must not have more than one non-singleton dimension and "
381
+ "the number of rows in A_eq must equal the number of values "
382
+ "in b_eq")
383
+ if not np.isfinite(b_eq).all():
384
+ raise ValueError(
385
+ "Invalid input for linprog: b_eq must not contain values "
386
+ "inf, nan, or None")
387
+
388
+ # x0 gives a (optional) starting solution to the solver. If x0 is None,
389
+ # skip the checks. Initial solution will be generated automatically.
390
+ if x0 is not None:
391
+ try:
392
+ x0 = np.array(x0, dtype=float, copy=True).squeeze()
393
+ except ValueError as e:
394
+ raise TypeError(
395
+ "Invalid input for linprog: x0 must be a 1-D array of "
396
+ "numerical coefficients") from e
397
+ if x0.ndim == 0:
398
+ x0 = x0.reshape(-1)
399
+ if len(x0) == 0 or x0.ndim != 1:
400
+ raise ValueError(
401
+ "Invalid input for linprog: x0 should be a 1-D array; it "
402
+ "must not have more than one non-singleton dimension")
403
+ if not x0.size == c.size:
404
+ raise ValueError(
405
+ "Invalid input for linprog: x0 and c should contain the "
406
+ "same number of elements")
407
+ if not np.isfinite(x0).all():
408
+ raise ValueError(
409
+ "Invalid input for linprog: x0 must not contain values "
410
+ "inf, nan, or None")
411
+
412
+ # Bounds can be one of these formats:
413
+ # (1) a 2-D array or sequence, with shape N x 2
414
+ # (2) a 1-D or 2-D sequence or array with 2 scalars
415
+ # (3) None (or an empty sequence or array)
416
+ # Unspecified bounds can be represented by None or (-)np.inf.
417
+ # All formats are converted into a N x 2 np.array with (-)np.inf where
418
+ # bounds are unspecified.
419
+
420
+ # Prepare clean bounds array
421
+ bounds_clean = np.zeros((n_x, 2), dtype=float)
422
+
423
+ # Convert to a numpy array.
424
+ # np.array(..,dtype=float) raises an error if dimensions are inconsistent
425
+ # or if there are invalid data types in bounds. Just add a linprog prefix
426
+ # to the error and re-raise.
427
+ # Creating at least a 2-D array simplifies the cases to distinguish below.
428
+ if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
429
+ bounds = (0, np.inf)
430
+ try:
431
+ bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
432
+ except ValueError as e:
433
+ raise ValueError(
434
+ "Invalid input for linprog: unable to interpret bounds, "
435
+ "check values and dimensions: " + e.args[0]) from e
436
+ except TypeError as e:
437
+ raise TypeError(
438
+ "Invalid input for linprog: unable to interpret bounds, "
439
+ "check values and dimensions: " + e.args[0]) from e
440
+
441
+ # Check bounds options
442
+ bsh = bounds_conv.shape
443
+ if len(bsh) > 2:
444
+ # Do not try to handle multidimensional bounds input
445
+ raise ValueError(
446
+ "Invalid input for linprog: provide a 2-D array for bounds, "
447
+ f"not a {len(bsh):d}-D array.")
448
+ elif np.all(bsh == (n_x, 2)):
449
+ # Regular N x 2 array
450
+ bounds_clean = bounds_conv
451
+ elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
452
+ # 2 values: interpret as overall lower and upper bound
453
+ bounds_flat = bounds_conv.flatten()
454
+ bounds_clean[:, 0] = bounds_flat[0]
455
+ bounds_clean[:, 1] = bounds_flat[1]
456
+ elif np.all(bsh == (2, n_x)):
457
+ # Reject a 2 x N array
458
+ raise ValueError(
459
+ f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, "
460
+ f"not a 2 x {n_x:d} array.")
461
+ else:
462
+ raise ValueError(
463
+ "Invalid input for linprog: unable to interpret bounds with this "
464
+ f"dimension tuple: {bsh}.")
465
+
466
+ # The process above creates nan-s where the input specified None
467
+ # Convert the nan-s in the 1st column to -np.inf and in the 2nd column
468
+ # to np.inf
469
+ i_none = np.isnan(bounds_clean[:, 0])
470
+ bounds_clean[i_none, 0] = -np.inf
471
+ i_none = np.isnan(bounds_clean[:, 1])
472
+ bounds_clean[i_none, 1] = np.inf
473
+
474
+ return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
475
+
476
+
477
+ def _presolve(lp, rr, rr_method, tol=1e-9):
478
+ """
479
+ Given inputs for a linear programming problem in preferred format,
480
+ presolve the problem: identify trivial infeasibilities, redundancies,
481
+ and unboundedness, tighten bounds where possible, and eliminate fixed
482
+ variables.
483
+
484
+ Parameters
485
+ ----------
486
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
487
+
488
+ c : 1D array
489
+ The coefficients of the linear objective function to be minimized.
490
+ A_ub : 2D array, optional
491
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
492
+ coefficients of a linear inequality constraint on ``x``.
493
+ b_ub : 1D array, optional
494
+ The inequality constraint vector. Each element represents an
495
+ upper bound on the corresponding value of ``A_ub @ x``.
496
+ A_eq : 2D array, optional
497
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
498
+ coefficients of a linear equality constraint on ``x``.
499
+ b_eq : 1D array, optional
500
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
501
+ the corresponding element of ``b_eq``.
502
+ bounds : 2D array
503
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
504
+ elements of ``x``. The N x 2 array contains lower bounds in the first
505
+ column and upper bounds in the 2nd. Unbounded variables have lower
506
+ bound -np.inf and/or upper bound np.inf.
507
+ x0 : 1D array, optional
508
+ Guess values of the decision variables, which will be refined by
509
+ the optimization algorithm. This argument is currently used only by the
510
+ 'revised simplex' method, and can only be used if `x0` represents a
511
+ basic feasible solution.
512
+
513
+ rr : bool
514
+ If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
515
+ Set False if ``A_eq`` is known to be of full row rank, or if you are
516
+ looking for a potential speedup (at the expense of reliability).
517
+ rr_method : string
518
+ Method used to identify and remove redundant rows from the
519
+ equality constraint matrix after presolve.
520
+ tol : float
521
+ The tolerance which determines when a solution is "close enough" to
522
+ zero in Phase 1 to be considered a basic feasible solution or close
523
+ enough to positive to serve as an optimal solution.
524
+
525
+ Returns
526
+ -------
527
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
528
+
529
+ c : 1D array
530
+ The coefficients of the linear objective function to be minimized.
531
+ A_ub : 2D array, optional
532
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
533
+ coefficients of a linear inequality constraint on ``x``.
534
+ b_ub : 1D array, optional
535
+ The inequality constraint vector. Each element represents an
536
+ upper bound on the corresponding value of ``A_ub @ x``.
537
+ A_eq : 2D array, optional
538
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
539
+ coefficients of a linear equality constraint on ``x``.
540
+ b_eq : 1D array, optional
541
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
542
+ the corresponding element of ``b_eq``.
543
+ bounds : 2D array
544
+ The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
545
+ x0 : 1D array, optional
546
+ Guess values of the decision variables, which will be refined by
547
+ the optimization algorithm. This argument is currently used only by the
548
+ 'revised simplex' method, and can only be used if `x0` represents a
549
+ basic feasible solution.
550
+
551
+ c0 : 1D array
552
+ Constant term in objective function due to fixed (and eliminated)
553
+ variables.
554
+ x : 1D array
555
+ Solution vector (when the solution is trivial and can be determined
556
+ in presolve)
557
+ revstack: list of functions
558
+ the functions in the list reverse the operations of _presolve()
559
+ the function signature is x_org = f(x_mod), where x_mod is the result
560
+ of a presolve step and x_org the value at the start of the step
561
+ (currently, the revstack contains only one function)
562
+ complete: bool
563
+ Whether the solution is complete (solved or determined to be infeasible
564
+ or unbounded in presolve)
565
+ status : int
566
+ An integer representing the exit status of the optimization::
567
+
568
+ 0 : Optimization terminated successfully
569
+ 1 : Iteration limit reached
570
+ 2 : Problem appears to be infeasible
571
+ 3 : Problem appears to be unbounded
572
+ 4 : Serious numerical difficulties encountered
573
+
574
+ message : str
575
+ A string descriptor of the exit status of the optimization.
576
+
577
+ References
578
+ ----------
579
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
580
+ large-scale linear programming." Optimization Methods and Software
581
+ 6.3 (1995): 219-227.
582
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
583
+ programming." Mathematical Programming 71.2 (1995): 221-245.
584
+
585
+ """
586
+ # ideas from Reference [5] by Andersen and Andersen
587
+ # however, unlike the reference, this is performed before converting
588
+ # problem to standard form
589
+ # There are a few advantages:
590
+ # * artificial variables have not been added, so matrices are smaller
591
+ # * bounds have not been converted to constraints yet. (It is better to
592
+ # do that after presolve because presolve may adjust the simple bounds.)
593
+ # There are many improvements that can be made, namely:
594
+ # * implement remaining checks from [5]
595
+ # * loop presolve until no additional changes are made
596
+ # * implement additional efficiency improvements in redundancy removal [2]
597
+
598
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
599
+
600
+ revstack = [] # record of variables eliminated from problem
601
+ # constant term in cost function may be added if variables are eliminated
602
+ c0 = 0
603
+ complete = False # complete is True if detected infeasible/unbounded
604
+ x = np.zeros(c.shape) # this is solution vector if completed in presolve
605
+
606
+ status = 0 # all OK unless determined otherwise
607
+ message = ""
608
+
609
+ # Lower and upper bounds. Copy to prevent feedback.
610
+ lb = bounds[:, 0].copy()
611
+ ub = bounds[:, 1].copy()
612
+
613
+ m_eq, n = A_eq.shape
614
+ m_ub, n = A_ub.shape
615
+
616
+ if (rr_method is not None
617
+ and rr_method.lower() not in {"svd", "pivot", "id"}):
618
+ message = ("'" + str(rr_method) + "' is not a valid option "
619
+ "for redundancy removal. Valid options are 'SVD', "
620
+ "'pivot', and 'ID'.")
621
+ raise ValueError(message)
622
+
623
+ if sps.issparse(A_eq):
624
+ A_eq = A_eq.tocsr()
625
+ A_ub = A_ub.tocsr()
626
+
627
+ def where(A):
628
+ return A.nonzero()
629
+
630
+ vstack = sps.vstack
631
+ else:
632
+ where = np.where
633
+ vstack = np.vstack
634
+
635
+ # upper bounds > lower bounds
636
+ if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
637
+ status = 2
638
+ message = ("The problem is (trivially) infeasible since one "
639
+ "or more upper bounds are smaller than the corresponding "
640
+ "lower bounds, a lower bound is np.inf or an upper bound "
641
+ "is -np.inf.")
642
+ complete = True
643
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
644
+ c0, x, revstack, complete, status, message)
645
+
646
+ # zero row in equality constraints
647
+ zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
648
+ if np.any(zero_row):
649
+ if np.any(
650
+ np.logical_and(
651
+ zero_row,
652
+ np.abs(b_eq) > tol)): # test_zero_row_1
653
+ # infeasible if RHS is not zero
654
+ status = 2
655
+ message = ("The problem is (trivially) infeasible due to a row "
656
+ "of zeros in the equality constraint matrix with a "
657
+ "nonzero corresponding constraint value.")
658
+ complete = True
659
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
660
+ c0, x, revstack, complete, status, message)
661
+ else: # test_zero_row_2
662
+ # if RHS is zero, we can eliminate this equation entirely
663
+ A_eq = A_eq[np.logical_not(zero_row), :]
664
+ b_eq = b_eq[np.logical_not(zero_row)]
665
+
666
+ # zero row in inequality constraints
667
+ zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
668
+ if np.any(zero_row):
669
+ if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
670
+ # infeasible if RHS is less than zero (because LHS is zero)
671
+ status = 2
672
+ message = ("The problem is (trivially) infeasible due to a row "
673
+ "of zeros in the equality constraint matrix with a "
674
+ "nonzero corresponding constraint value.")
675
+ complete = True
676
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
677
+ c0, x, revstack, complete, status, message)
678
+ else: # test_zero_row_2
679
+ # if LHS is >= 0, we can eliminate this constraint entirely
680
+ A_ub = A_ub[np.logical_not(zero_row), :]
681
+ b_ub = b_ub[np.logical_not(zero_row)]
682
+
683
+ # zero column in (both) constraints
684
+ # this indicates that a variable isn't constrained and can be removed
685
+ A = vstack((A_eq, A_ub))
686
+ if A.shape[0] > 0:
687
+ zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
688
+ # variable will be at upper or lower bound, depending on objective
689
+ x[np.logical_and(zero_col, c < 0)] = ub[
690
+ np.logical_and(zero_col, c < 0)]
691
+ x[np.logical_and(zero_col, c > 0)] = lb[
692
+ np.logical_and(zero_col, c > 0)]
693
+ if np.any(np.isinf(x)): # if an unconstrained variable has no bound
694
+ status = 3
695
+ message = ("If feasible, the problem is (trivially) unbounded "
696
+ "due to a zero column in the constraint matrices. If "
697
+ "you wish to check whether the problem is infeasible, "
698
+ "turn presolve off.")
699
+ complete = True
700
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
701
+ c0, x, revstack, complete, status, message)
702
+ # variables will equal upper/lower bounds will be removed later
703
+ lb[np.logical_and(zero_col, c < 0)] = ub[
704
+ np.logical_and(zero_col, c < 0)]
705
+ ub[np.logical_and(zero_col, c > 0)] = lb[
706
+ np.logical_and(zero_col, c > 0)]
707
+
708
+ # row singleton in equality constraints
709
+ # this fixes a variable and removes the constraint
710
+ singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
711
+ rows = where(singleton_row)[0]
712
+ cols = where(A_eq[rows, :])[1]
713
+ if len(rows) > 0:
714
+ for row, col in zip(rows, cols):
715
+ val = b_eq[row] / A_eq[row, col]
716
+ if not lb[col] - tol <= val <= ub[col] + tol:
717
+ # infeasible if fixed value is not within bounds
718
+ status = 2
719
+ message = ("The problem is (trivially) infeasible because a "
720
+ "singleton row in the equality constraints is "
721
+ "inconsistent with the bounds.")
722
+ complete = True
723
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
724
+ c0, x, revstack, complete, status, message)
725
+ else:
726
+ # sets upper and lower bounds at that fixed value - variable
727
+ # will be removed later
728
+ lb[col] = val
729
+ ub[col] = val
730
+ A_eq = A_eq[np.logical_not(singleton_row), :]
731
+ b_eq = b_eq[np.logical_not(singleton_row)]
732
+
733
+ # row singleton in inequality constraints
734
+ # this indicates a simple bound and the constraint can be removed
735
+ # simple bounds may be adjusted here
736
+ # After all of the simple bound information is combined here, get_Abc will
737
+ # turn the simple bounds into constraints
738
+ singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
739
+ cols = where(A_ub[singleton_row, :])[1]
740
+ rows = where(singleton_row)[0]
741
+ if len(rows) > 0:
742
+ for row, col in zip(rows, cols):
743
+ val = b_ub[row] / A_ub[row, col]
744
+ if A_ub[row, col] > 0: # upper bound
745
+ if val < lb[col] - tol: # infeasible
746
+ complete = True
747
+ elif val < ub[col]: # new upper bound
748
+ ub[col] = val
749
+ else: # lower bound
750
+ if val > ub[col] + tol: # infeasible
751
+ complete = True
752
+ elif val > lb[col]: # new lower bound
753
+ lb[col] = val
754
+ if complete:
755
+ status = 2
756
+ message = ("The problem is (trivially) infeasible because a "
757
+ "singleton row in the upper bound constraints is "
758
+ "inconsistent with the bounds.")
759
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
760
+ c0, x, revstack, complete, status, message)
761
+ A_ub = A_ub[np.logical_not(singleton_row), :]
762
+ b_ub = b_ub[np.logical_not(singleton_row)]
763
+
764
+ # identical bounds indicate that variable can be removed
765
+ i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
766
+ i_nf = np.logical_not(i_f) # indices of "not fixed" variables
767
+
768
+ # test_bounds_equal_but_infeasible
769
+ if np.all(i_f): # if bounds define solution, check for consistency
770
+ residual = b_eq - A_eq.dot(lb)
771
+ slack = b_ub - A_ub.dot(lb)
772
+ if ((A_ub.size > 0 and np.any(slack < 0)) or
773
+ (A_eq.size > 0 and not np.allclose(residual, 0))):
774
+ status = 2
775
+ message = ("The problem is (trivially) infeasible because the "
776
+ "bounds fix all variables to values inconsistent with "
777
+ "the constraints")
778
+ complete = True
779
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
780
+ c0, x, revstack, complete, status, message)
781
+
782
+ ub_mod = ub
783
+ lb_mod = lb
784
+ if np.any(i_f):
785
+ c0 += c[i_f].dot(lb[i_f])
786
+ b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
787
+ b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
788
+ c = c[i_nf]
789
+ x_undo = lb[i_f] # not x[i_f], x is just zeroes
790
+ x = x[i_nf]
791
+ # user guess x0 stays separate from presolve solution x
792
+ if x0 is not None:
793
+ x0 = x0[i_nf]
794
+ A_eq = A_eq[:, i_nf]
795
+ A_ub = A_ub[:, i_nf]
796
+ # modify bounds
797
+ lb_mod = lb[i_nf]
798
+ ub_mod = ub[i_nf]
799
+
800
+ def rev(x_mod):
801
+ # Function to restore x: insert x_undo into x_mod.
802
+ # When elements have been removed at positions k1, k2, k3, ...
803
+ # then these must be replaced at (after) positions k1-1, k2-2,
804
+ # k3-3, ... in the modified array to recreate the original
805
+ i = np.flatnonzero(i_f)
806
+ # Number of variables to restore
807
+ N = len(i)
808
+ index_offset = np.arange(N)
809
+ # Create insert indices
810
+ insert_indices = i - index_offset
811
+ x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
812
+ return x_rev
813
+
814
+ # Use revstack as a list of functions, currently just this one.
815
+ revstack.append(rev)
816
+
817
+ # no constraints indicates that problem is trivial
818
+ if A_eq.size == 0 and A_ub.size == 0:
819
+ b_eq = np.array([])
820
+ b_ub = np.array([])
821
+ # test_empty_constraint_1
822
+ if c.size == 0:
823
+ status = 0
824
+ message = ("The solution was determined in presolve as there are "
825
+ "no non-trivial constraints.")
826
+ elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
827
+ np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
828
+ # test_no_constraints()
829
+ # test_unbounded_no_nontrivial_constraints_1
830
+ # test_unbounded_no_nontrivial_constraints_2
831
+ status = 3
832
+ message = ("The problem is (trivially) unbounded "
833
+ "because there are no non-trivial constraints and "
834
+ "a) at least one decision variable is unbounded "
835
+ "above and its corresponding cost is negative, or "
836
+ "b) at least one decision variable is unbounded below "
837
+ "and its corresponding cost is positive. ")
838
+ else: # test_empty_constraint_2
839
+ status = 0
840
+ message = ("The solution was determined in presolve as there are "
841
+ "no non-trivial constraints.")
842
+ complete = True
843
+ x[c < 0] = ub_mod[c < 0]
844
+ x[c > 0] = lb_mod[c > 0]
845
+ # where c is zero, set x to a finite bound or zero
846
+ x_zero_c = ub_mod[c == 0]
847
+ x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
848
+ x_zero_c[np.isinf(x_zero_c)] = 0
849
+ x[c == 0] = x_zero_c
850
+ # if this is not the last step of presolve, should convert bounds back
851
+ # to array and return here
852
+
853
+ # Convert modified lb and ub back into N x 2 bounds
854
+ bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
855
+
856
+ # remove redundant (linearly dependent) rows from equality constraints
857
+ n_rows_A = A_eq.shape[0]
858
+ redundancy_warning = ("A_eq does not appear to be of full row rank. To "
859
+ "improve performance, check the problem formulation "
860
+ "for redundant equality constraints.")
861
+ if (sps.issparse(A_eq)):
862
+ if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
863
+ rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
864
+ A_eq, b_eq, status, message = rr_res
865
+ if A_eq.shape[0] < n_rows_A:
866
+ warn(redundancy_warning, OptimizeWarning, stacklevel=1)
867
+ if status != 0:
868
+ complete = True
869
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
870
+ c0, x, revstack, complete, status, message)
871
+
872
+ # This is a wild guess for which redundancy removal algorithm will be
873
+ # faster. More testing would be good.
874
+ small_nullspace = 5
875
+ if rr and A_eq.size > 0:
876
+ try: # TODO: use results of first SVD in _remove_redundancy_svd
877
+ rank = np.linalg.matrix_rank(A_eq)
878
+ # oh well, we'll have to go with _remove_redundancy_pivot_dense
879
+ except Exception:
880
+ rank = 0
881
+ if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
882
+ warn(redundancy_warning, OptimizeWarning, stacklevel=3)
883
+ dim_row_nullspace = A_eq.shape[0]-rank
884
+ if rr_method is None:
885
+ if dim_row_nullspace <= small_nullspace:
886
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
887
+ A_eq, b_eq, status, message = rr_res
888
+ if dim_row_nullspace > small_nullspace or status == 4:
889
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
890
+ A_eq, b_eq, status, message = rr_res
891
+
892
+ else:
893
+ rr_method = rr_method.lower()
894
+ if rr_method == "svd":
895
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
896
+ A_eq, b_eq, status, message = rr_res
897
+ elif rr_method == "pivot":
898
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
899
+ A_eq, b_eq, status, message = rr_res
900
+ elif rr_method == "id":
901
+ rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
902
+ A_eq, b_eq, status, message = rr_res
903
+ else: # shouldn't get here; option validity checked above
904
+ pass
905
+ if A_eq.shape[0] < rank:
906
+ message = ("Due to numerical issues, redundant equality "
907
+ "constraints could not be removed automatically. "
908
+ "Try providing your constraint matrices as sparse "
909
+ "matrices to activate sparse presolve, try turning "
910
+ "off redundancy removal, or try turning off presolve "
911
+ "altogether.")
912
+ status = 4
913
+ if status != 0:
914
+ complete = True
915
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
916
+ c0, x, revstack, complete, status, message)
917
+
918
+
919
+ def _parse_linprog(lp, options, meth):
920
+ """
921
+ Parse the provided linear programming problem
922
+
923
+ ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
924
+ ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
925
+ provided constraints (``A_ub`` and ``A_eq) and if these match the provided
926
+ sparsity optional values.
927
+
928
+ ``_clean inputs`` checks of the provided inputs. If no violations are
929
+ identified the objective vector, upper bound constraints, equality
930
+ constraints, and simple bounds are returned in the expected format.
931
+
932
+ Parameters
933
+ ----------
934
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
935
+
936
+ c : 1D array
937
+ The coefficients of the linear objective function to be minimized.
938
+ A_ub : 2D array, optional
939
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
940
+ coefficients of a linear inequality constraint on ``x``.
941
+ b_ub : 1D array, optional
942
+ The inequality constraint vector. Each element represents an
943
+ upper bound on the corresponding value of ``A_ub @ x``.
944
+ A_eq : 2D array, optional
945
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
946
+ coefficients of a linear equality constraint on ``x``.
947
+ b_eq : 1D array, optional
948
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
949
+ the corresponding element of ``b_eq``.
950
+ bounds : various valid formats, optional
951
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
952
+ If bounds are specified for all N variables separately, valid formats are:
953
+ * a 2D array (2 x N or N x 2);
954
+ * a sequence of N sequences, each with 2 values.
955
+ If all variables have the same bounds, a single pair of values can
956
+ be specified. Valid formats are:
957
+ * a sequence with 2 scalar values;
958
+ * a sequence with a single element containing 2 scalar values.
959
+ If all variables have a lower bound of 0 and no upper bound, the bounds
960
+ parameter can be omitted (or given as None).
961
+ x0 : 1D array, optional
962
+ Guess values of the decision variables, which will be refined by
963
+ the optimization algorithm. This argument is currently used only by the
964
+ 'revised simplex' method, and can only be used if `x0` represents a
965
+ basic feasible solution.
966
+
967
+ options : dict
968
+ A dictionary of solver options. All methods accept the following
969
+ generic options:
970
+
971
+ maxiter : int
972
+ Maximum number of iterations to perform.
973
+ disp : bool
974
+ Set to True to print convergence messages.
975
+
976
+ For method-specific options, see :func:`show_options('linprog')`.
977
+
978
+ Returns
979
+ -------
980
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
981
+
982
+ c : 1D array
983
+ The coefficients of the linear objective function to be minimized.
984
+ A_ub : 2D array, optional
985
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
986
+ coefficients of a linear inequality constraint on ``x``.
987
+ b_ub : 1D array, optional
988
+ The inequality constraint vector. Each element represents an
989
+ upper bound on the corresponding value of ``A_ub @ x``.
990
+ A_eq : 2D array, optional
991
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
992
+ coefficients of a linear equality constraint on ``x``.
993
+ b_eq : 1D array, optional
994
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
995
+ the corresponding element of ``b_eq``.
996
+ bounds : 2D array
997
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
998
+ elements of ``x``. The N x 2 array contains lower bounds in the first
999
+ column and upper bounds in the 2nd. Unbounded variables have lower
1000
+ bound -np.inf and/or upper bound np.inf.
1001
+ x0 : 1D array, optional
1002
+ Guess values of the decision variables, which will be refined by
1003
+ the optimization algorithm. This argument is currently used only by the
1004
+ 'revised simplex' method, and can only be used if `x0` represents a
1005
+ basic feasible solution.
1006
+
1007
+ options : dict, optional
1008
+ A dictionary of solver options. All methods accept the following
1009
+ generic options:
1010
+
1011
+ maxiter : int
1012
+ Maximum number of iterations to perform.
1013
+ disp : bool
1014
+ Set to True to print convergence messages.
1015
+
1016
+ For method-specific options, see :func:`show_options('linprog')`.
1017
+
1018
+ """
1019
+ if options is None:
1020
+ options = {}
1021
+
1022
+ solver_options = {k: v for k, v in options.items()}
1023
+ solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
1024
+ lp.A_ub, lp.A_eq)
1025
+ # Convert lists to numpy arrays, etc...
1026
+ lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
1027
+ return lp, solver_options
1028
+
1029
+
1030
+ def _get_Abc(lp, c0):
1031
+ """
1032
+ Given a linear programming problem of the form:
1033
+
1034
+ Minimize::
1035
+
1036
+ c @ x
1037
+
1038
+ Subject to::
1039
+
1040
+ A_ub @ x <= b_ub
1041
+ A_eq @ x == b_eq
1042
+ lb <= x <= ub
1043
+
1044
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
1045
+
1046
+ Return the problem in standard form:
1047
+
1048
+ Minimize::
1049
+
1050
+ c @ x
1051
+
1052
+ Subject to::
1053
+
1054
+ A @ x == b
1055
+ x >= 0
1056
+
1057
+ by adding slack variables and making variable substitutions as necessary.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1062
+
1063
+ c : 1D array
1064
+ The coefficients of the linear objective function to be minimized.
1065
+ A_ub : 2D array, optional
1066
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1067
+ coefficients of a linear inequality constraint on ``x``.
1068
+ b_ub : 1D array, optional
1069
+ The inequality constraint vector. Each element represents an
1070
+ upper bound on the corresponding value of ``A_ub @ x``.
1071
+ A_eq : 2D array, optional
1072
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1073
+ coefficients of a linear equality constraint on ``x``.
1074
+ b_eq : 1D array, optional
1075
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1076
+ the corresponding element of ``b_eq``.
1077
+ bounds : 2D array
1078
+ The bounds of ``x``, lower bounds in the 1st column, upper
1079
+ bounds in the 2nd column. The bounds are possibly tightened
1080
+ by the presolve procedure.
1081
+ x0 : 1D array, optional
1082
+ Guess values of the decision variables, which will be refined by
1083
+ the optimization algorithm. This argument is currently used only by the
1084
+ 'revised simplex' method, and can only be used if `x0` represents a
1085
+ basic feasible solution.
1086
+
1087
+ c0 : float
1088
+ Constant term in objective function due to fixed (and eliminated)
1089
+ variables.
1090
+
1091
+ Returns
1092
+ -------
1093
+ A : 2-D array
1094
+ 2-D array such that ``A`` @ ``x``, gives the values of the equality
1095
+ constraints at ``x``.
1096
+ b : 1-D array
1097
+ 1-D array of values representing the RHS of each equality constraint
1098
+ (row) in A (for standard form problem).
1099
+ c : 1-D array
1100
+ Coefficients of the linear objective function to be minimized (for
1101
+ standard form problem).
1102
+ c0 : float
1103
+ Constant term in objective function due to fixed (and eliminated)
1104
+ variables.
1105
+ x0 : 1-D array
1106
+ Starting values of the independent variables, which will be refined by
1107
+ the optimization algorithm
1108
+
1109
+ References
1110
+ ----------
1111
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1112
+ programming." Athena Scientific 1 (1997): 997.
1113
+
1114
+ """
1115
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
1116
+
1117
+ if sps.issparse(A_eq):
1118
+ sparse = True
1119
+ A_eq = sps.csr_matrix(A_eq)
1120
+ A_ub = sps.csr_matrix(A_ub)
1121
+
1122
+ def hstack(blocks):
1123
+ return sps.hstack(blocks, format="csr")
1124
+
1125
+ def vstack(blocks):
1126
+ return sps.vstack(blocks, format="csr")
1127
+
1128
+ zeros = sps.csr_matrix
1129
+ eye = sps.eye
1130
+ else:
1131
+ sparse = False
1132
+ hstack = np.hstack
1133
+ vstack = np.vstack
1134
+ zeros = np.zeros
1135
+ eye = np.eye
1136
+
1137
+ # Variables lbs and ubs (see below) may be changed, which feeds back into
1138
+ # bounds, so copy.
1139
+ bounds = np.array(bounds, copy=True)
1140
+
1141
+ # modify problem such that all variables have only non-negativity bounds
1142
+ lbs = bounds[:, 0]
1143
+ ubs = bounds[:, 1]
1144
+ m_ub, n_ub = A_ub.shape
1145
+
1146
+ lb_none = np.equal(lbs, -np.inf)
1147
+ ub_none = np.equal(ubs, np.inf)
1148
+ lb_some = np.logical_not(lb_none)
1149
+ ub_some = np.logical_not(ub_none)
1150
+
1151
+ # unbounded below: substitute xi = -xi' (unbounded above)
1152
+ # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
1153
+ l_nolb_someub = np.logical_and(lb_none, ub_some)
1154
+ i_nolb = np.nonzero(l_nolb_someub)[0]
1155
+ lbs[l_nolb_someub], ubs[l_nolb_someub] = (
1156
+ -ubs[l_nolb_someub], -lbs[l_nolb_someub])
1157
+ lb_none = np.equal(lbs, -np.inf)
1158
+ ub_none = np.equal(ubs, np.inf)
1159
+ lb_some = np.logical_not(lb_none)
1160
+ ub_some = np.logical_not(ub_none)
1161
+ c[i_nolb] *= -1
1162
+ if x0 is not None:
1163
+ x0[i_nolb] *= -1
1164
+ if len(i_nolb) > 0:
1165
+ if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
1166
+ A_ub[:, i_nolb] *= -1
1167
+ if A_eq.shape[0] > 0:
1168
+ A_eq[:, i_nolb] *= -1
1169
+
1170
+ # upper bound: add inequality constraint
1171
+ i_newub, = ub_some.nonzero()
1172
+ ub_newub = ubs[ub_some]
1173
+ n_bounds = len(i_newub)
1174
+ if n_bounds > 0:
1175
+ shape = (n_bounds, A_ub.shape[1])
1176
+ if sparse:
1177
+ idxs = (np.arange(n_bounds), i_newub)
1178
+ A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
1179
+ shape=shape)))
1180
+ else:
1181
+ A_ub = vstack((A_ub, np.zeros(shape)))
1182
+ A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
1183
+ b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
1184
+ b_ub[m_ub:] = ub_newub
1185
+
1186
+ A1 = vstack((A_ub, A_eq))
1187
+ b = np.concatenate((b_ub, b_eq))
1188
+ c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
1189
+ if x0 is not None:
1190
+ x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
1191
+ # unbounded: substitute xi = xi+ + xi-
1192
+ l_free = np.logical_and(lb_none, ub_none)
1193
+ i_free = np.nonzero(l_free)[0]
1194
+ n_free = len(i_free)
1195
+ c = np.concatenate((c, np.zeros(n_free)))
1196
+ if x0 is not None:
1197
+ x0 = np.concatenate((x0, np.zeros(n_free)))
1198
+ A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
1199
+ c[n_ub:n_ub+n_free] = -c[i_free]
1200
+ if x0 is not None:
1201
+ i_free_neg = x0[i_free] < 0
1202
+ x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
1203
+ x0[i_free[i_free_neg]] = 0
1204
+
1205
+ # add slack variables
1206
+ A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
1207
+
1208
+ A = hstack([A1, A2])
1209
+
1210
+ # lower bound: substitute xi = xi' + lb
1211
+ # now there is a constant term in objective
1212
+ i_shift = np.nonzero(lb_some)[0]
1213
+ lb_shift = lbs[lb_some].astype(float)
1214
+ c0 += np.sum(lb_shift * c[i_shift])
1215
+ if sparse:
1216
+ b = b.reshape(-1, 1)
1217
+ A = A.tocsc()
1218
+ b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
1219
+ b = b.ravel()
1220
+ else:
1221
+ b -= (A[:, i_shift] * lb_shift).sum(axis=1)
1222
+ if x0 is not None:
1223
+ x0[i_shift] -= lb_shift
1224
+
1225
+ return A, b, c, c0, x0
1226
+
1227
+
1228
+ def _round_to_power_of_two(x):
1229
+ """
1230
+ Round elements of the array to the nearest power of two.
1231
+ """
1232
+ return 2**np.around(np.log2(x))
1233
+
1234
+
1235
+ def _autoscale(A, b, c, x0):
1236
+ """
1237
+ Scales the problem according to equilibration from [12].
1238
+ Also normalizes the right hand side vector by its maximum element.
1239
+ """
1240
+ m, n = A.shape
1241
+
1242
+ C = 1
1243
+ R = 1
1244
+
1245
+ if A.size > 0:
1246
+
1247
+ R = np.max(np.abs(A), axis=1)
1248
+ if sps.issparse(A):
1249
+ R = R.toarray().flatten()
1250
+ R[R == 0] = 1
1251
+ R = 1/_round_to_power_of_two(R)
1252
+ A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
1253
+ b = b*R
1254
+
1255
+ C = np.max(np.abs(A), axis=0)
1256
+ if sps.issparse(A):
1257
+ C = C.toarray().flatten()
1258
+ C[C == 0] = 1
1259
+ C = 1/_round_to_power_of_two(C)
1260
+ A = A*sps.diags(C) if sps.issparse(A) else A*C
1261
+ c = c*C
1262
+
1263
+ b_scale = np.max(np.abs(b)) if b.size > 0 else 1
1264
+ if b_scale == 0:
1265
+ b_scale = 1.
1266
+ b = b/b_scale
1267
+
1268
+ if x0 is not None:
1269
+ x0 = x0/b_scale*(1/C)
1270
+ return A, b, c, x0, C, b_scale
1271
+
1272
+
1273
+ def _unscale(x, C, b_scale):
1274
+ """
1275
+ Converts solution to _autoscale problem -> solution to original problem.
1276
+ """
1277
+
1278
+ try:
1279
+ n = len(C)
1280
+ # fails if sparse or scalar; that's OK.
1281
+ # this is only needed for original simplex (never sparse)
1282
+ except TypeError:
1283
+ n = len(x)
1284
+
1285
+ return x[:n]*b_scale*C
1286
+
1287
+
1288
+ def _display_summary(message, status, fun, iteration):
1289
+ """
1290
+ Print the termination summary of the linear program
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ message : str
1295
+ A string descriptor of the exit status of the optimization.
1296
+ status : int
1297
+ An integer representing the exit status of the optimization::
1298
+
1299
+ 0 : Optimization terminated successfully
1300
+ 1 : Iteration limit reached
1301
+ 2 : Problem appears to be infeasible
1302
+ 3 : Problem appears to be unbounded
1303
+ 4 : Serious numerical difficulties encountered
1304
+
1305
+ fun : float
1306
+ Value of the objective function.
1307
+ iteration : iteration
1308
+ The number of iterations performed.
1309
+ """
1310
+ print(message)
1311
+ if status in (0, 1):
1312
+ print(f" Current function value: {fun: <12.6f}")
1313
+ print(f" Iterations: {iteration:d}")
1314
+
1315
+
1316
+ def _postsolve(x, postsolve_args, complete=False):
1317
+ """
1318
+ Given solution x to presolved, standard form linear program x, add
1319
+ fixed variables back into the problem and undo the variable substitutions
1320
+ to get solution to original linear program. Also, calculate the objective
1321
+ function value, slack in original upper bound constraints, and residuals
1322
+ in original equality constraints.
1323
+
1324
+ Parameters
1325
+ ----------
1326
+ x : 1-D array
1327
+ Solution vector to the standard-form problem.
1328
+ postsolve_args : tuple
1329
+ Data needed by _postsolve to convert the solution to the standard-form
1330
+ problem into the solution to the original problem, including:
1331
+
1332
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1333
+
1334
+ c : 1D array
1335
+ The coefficients of the linear objective function to be minimized.
1336
+ A_ub : 2D array, optional
1337
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1338
+ coefficients of a linear inequality constraint on ``x``.
1339
+ b_ub : 1D array, optional
1340
+ The inequality constraint vector. Each element represents an
1341
+ upper bound on the corresponding value of ``A_ub @ x``.
1342
+ A_eq : 2D array, optional
1343
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1344
+ coefficients of a linear equality constraint on ``x``.
1345
+ b_eq : 1D array, optional
1346
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1347
+ the corresponding element of ``b_eq``.
1348
+ bounds : 2D array
1349
+ The bounds of ``x``, lower bounds in the 1st column, upper
1350
+ bounds in the 2nd column. The bounds are possibly tightened
1351
+ by the presolve procedure.
1352
+ x0 : 1D array, optional
1353
+ Guess values of the decision variables, which will be refined by
1354
+ the optimization algorithm. This argument is currently used only by the
1355
+ 'revised simplex' method, and can only be used if `x0` represents a
1356
+ basic feasible solution.
1357
+
1358
+ revstack: list of functions
1359
+ the functions in the list reverse the operations of _presolve()
1360
+ the function signature is x_org = f(x_mod), where x_mod is the result
1361
+ of a presolve step and x_org the value at the start of the step
1362
+ complete : bool
1363
+ Whether the solution is was determined in presolve (``True`` if so)
1364
+
1365
+ Returns
1366
+ -------
1367
+ x : 1-D array
1368
+ Solution vector to original linear programming problem
1369
+ fun: float
1370
+ optimal objective value for original problem
1371
+ slack : 1-D array
1372
+ The (non-negative) slack in the upper bound constraints, that is,
1373
+ ``b_ub - A_ub @ x``
1374
+ con : 1-D array
1375
+ The (nominally zero) residuals of the equality constraints, that is,
1376
+ ``b - A_eq @ x``
1377
+ """
1378
+ # note that all the inputs are the ORIGINAL, unmodified versions
1379
+ # no rows, columns have been removed
1380
+
1381
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
1382
+ revstack, C, b_scale = postsolve_args[1:]
1383
+
1384
+ x = _unscale(x, C, b_scale)
1385
+
1386
+ # Undo variable substitutions of _get_Abc()
1387
+ # if "complete", problem was solved in presolve; don't do anything here
1388
+ n_x = bounds.shape[0]
1389
+ if not complete and bounds is not None: # bounds are never none, probably
1390
+ n_unbounded = 0
1391
+ for i, bi in enumerate(bounds):
1392
+ lbi = bi[0]
1393
+ ubi = bi[1]
1394
+ if lbi == -np.inf and ubi == np.inf:
1395
+ n_unbounded += 1
1396
+ x[i] = x[i] - x[n_x + n_unbounded - 1]
1397
+ else:
1398
+ if lbi == -np.inf:
1399
+ x[i] = ubi - x[i]
1400
+ else:
1401
+ x[i] += lbi
1402
+ # all the rest of the variables were artificial
1403
+ x = x[:n_x]
1404
+
1405
+ # If there were variables removed from the problem, add them back into the
1406
+ # solution vector
1407
+ # Apply the functions in revstack (reverse direction)
1408
+ for rev in reversed(revstack):
1409
+ x = rev(x)
1410
+
1411
+ fun = x.dot(c)
1412
+ slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
1413
+ # report residuals of ORIGINAL EQ constraints
1414
+ con = b_eq - A_eq.dot(x)
1415
+
1416
+ return x, fun, slack, con
1417
+
1418
+
1419
+ def _check_result(x, fun, status, slack, con, bounds, tol, message,
1420
+ integrality):
1421
+ """
1422
+ Check the validity of the provided solution.
1423
+
1424
+ A valid (optimal) solution satisfies all bounds, all slack variables are
1425
+ negative and all equality constraint residuals are strictly non-zero.
1426
+ Further, the lower-bounds, upper-bounds, slack and residuals contain
1427
+ no nan values.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ x : 1-D array
1432
+ Solution vector to original linear programming problem
1433
+ fun: float
1434
+ optimal objective value for original problem
1435
+ status : int
1436
+ An integer representing the exit status of the optimization::
1437
+
1438
+ 0 : Optimization terminated successfully
1439
+ 1 : Iteration limit reached
1440
+ 2 : Problem appears to be infeasible
1441
+ 3 : Problem appears to be unbounded
1442
+ 4 : Serious numerical difficulties encountered
1443
+
1444
+ slack : 1-D array
1445
+ The (non-negative) slack in the upper bound constraints, that is,
1446
+ ``b_ub - A_ub @ x``
1447
+ con : 1-D array
1448
+ The (nominally zero) residuals of the equality constraints, that is,
1449
+ ``b - A_eq @ x``
1450
+ bounds : 2D array
1451
+ The bounds on the original variables ``x``
1452
+ message : str
1453
+ A string descriptor of the exit status of the optimization.
1454
+ tol : float
1455
+ Termination tolerance; see [1]_ Section 4.5.
1456
+
1457
+ Returns
1458
+ -------
1459
+ status : int
1460
+ An integer representing the exit status of the optimization::
1461
+
1462
+ 0 : Optimization terminated successfully
1463
+ 1 : Iteration limit reached
1464
+ 2 : Problem appears to be infeasible
1465
+ 3 : Problem appears to be unbounded
1466
+ 4 : Serious numerical difficulties encountered
1467
+
1468
+ message : str
1469
+ A string descriptor of the exit status of the optimization.
1470
+ """
1471
+ # Somewhat arbitrary
1472
+ tol = np.sqrt(tol) * 10
1473
+
1474
+ if x is None:
1475
+ # HiGHS does not provide x if infeasible/unbounded
1476
+ if status == 0: # Observed with HiGHS Simplex Primal
1477
+ status = 4
1478
+ message = ("The solver did not provide a solution nor did it "
1479
+ "report a failure. Please submit a bug report.")
1480
+ return status, message
1481
+
1482
+ contains_nans = (
1483
+ np.isnan(x).any()
1484
+ or np.isnan(fun)
1485
+ or np.isnan(slack).any()
1486
+ or np.isnan(con).any()
1487
+ )
1488
+
1489
+ if contains_nans:
1490
+ is_feasible = False
1491
+ else:
1492
+ if integrality is None:
1493
+ integrality = 0
1494
+ valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol)
1495
+ # When integrality is 2 or 3, x must be within bounds OR take value 0
1496
+ valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol)
1497
+ invalid_bounds = not np.all(valid_bounds)
1498
+
1499
+ invalid_slack = status != 3 and (slack < -tol).any()
1500
+ invalid_con = status != 3 and (np.abs(con) > tol).any()
1501
+ is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
1502
+
1503
+ if status == 0 and not is_feasible:
1504
+ status = 4
1505
+ message = ("The solution does not satisfy the constraints within the "
1506
+ "required tolerance of " + f"{tol:.2E}" + ", yet "
1507
+ "no errors were raised and there is no certificate of "
1508
+ "infeasibility or unboundedness. Check whether "
1509
+ "the slack and constraint residuals are acceptable; "
1510
+ "if not, consider enabling presolve, adjusting the "
1511
+ "tolerance option(s), and/or using a different method. "
1512
+ "Please consider submitting a bug report.")
1513
+ elif status == 2 and is_feasible:
1514
+ # Occurs if the simplex method exits after phase one with a very
1515
+ # nearly basic feasible solution. Postsolving can make the solution
1516
+ # basic, however, this solution is NOT optimal
1517
+ status = 4
1518
+ message = ("The solution is feasible, but the solver did not report "
1519
+ "that the solution was optimal. Please try a different "
1520
+ "method.")
1521
+
1522
+ return status, message
venv/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (27.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_milp.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import numpy as np
3
+ from scipy.sparse import csc_array, vstack, issparse
4
+ from scipy._lib._util import VisibleDeprecationWarning
5
+ from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import]
6
+ from ._constraints import LinearConstraint, Bounds
7
+ from ._optimize import OptimizeResult
8
+ from ._linprog_highs import _highs_to_scipy_status_message
9
+
10
+
11
+ def _constraints_to_components(constraints):
12
+ """
13
+ Convert sequence of constraints to a single set of components A, b_l, b_u.
14
+
15
+ `constraints` could be
16
+
17
+ 1. A LinearConstraint
18
+ 2. A tuple representing a LinearConstraint
19
+ 3. An invalid object
20
+ 4. A sequence of composed entirely of objects of type 1/2
21
+ 5. A sequence containing at least one object of type 3
22
+
23
+ We want to accept 1, 2, and 4 and reject 3 and 5.
24
+ """
25
+ message = ("`constraints` (or each element within `constraints`) must be "
26
+ "convertible into an instance of "
27
+ "`scipy.optimize.LinearConstraint`.")
28
+ As = []
29
+ b_ls = []
30
+ b_us = []
31
+
32
+ # Accept case 1 by standardizing as case 4
33
+ if isinstance(constraints, LinearConstraint):
34
+ constraints = [constraints]
35
+ else:
36
+ # Reject case 3
37
+ try:
38
+ iter(constraints)
39
+ except TypeError as exc:
40
+ raise ValueError(message) from exc
41
+
42
+ # Accept case 2 by standardizing as case 4
43
+ if len(constraints) == 3:
44
+ # argument could be a single tuple representing a LinearConstraint
45
+ try:
46
+ constraints = [LinearConstraint(*constraints)]
47
+ except (TypeError, ValueError, VisibleDeprecationWarning):
48
+ # argument was not a tuple representing a LinearConstraint
49
+ pass
50
+
51
+ # Address cases 4/5
52
+ for constraint in constraints:
53
+ # if it's not a LinearConstraint or something that represents a
54
+ # LinearConstraint at this point, it's invalid
55
+ if not isinstance(constraint, LinearConstraint):
56
+ try:
57
+ constraint = LinearConstraint(*constraint)
58
+ except TypeError as exc:
59
+ raise ValueError(message) from exc
60
+ As.append(csc_array(constraint.A))
61
+ b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64))
62
+ b_us.append(np.atleast_1d(constraint.ub).astype(np.float64))
63
+
64
+ if len(As) > 1:
65
+ A = vstack(As, format="csc")
66
+ b_l = np.concatenate(b_ls)
67
+ b_u = np.concatenate(b_us)
68
+ else: # avoid unnecessary copying
69
+ A = As[0]
70
+ b_l = b_ls[0]
71
+ b_u = b_us[0]
72
+
73
+ return A, b_l, b_u
74
+
75
+
76
+ def _milp_iv(c, integrality, bounds, constraints, options):
77
+ # objective IV
78
+ if issparse(c):
79
+ raise ValueError("`c` must be a dense array.")
80
+ c = np.atleast_1d(c).astype(np.float64)
81
+ if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)):
82
+ message = ("`c` must be a one-dimensional array of finite numbers "
83
+ "with at least one element.")
84
+ raise ValueError(message)
85
+
86
+ # integrality IV
87
+ if issparse(integrality):
88
+ raise ValueError("`integrality` must be a dense array.")
89
+ message = ("`integrality` must contain integers 0-3 and be broadcastable "
90
+ "to `c.shape`.")
91
+ if integrality is None:
92
+ integrality = 0
93
+ try:
94
+ integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
95
+ except ValueError:
96
+ raise ValueError(message)
97
+ if integrality.min() < 0 or integrality.max() > 3:
98
+ raise ValueError(message)
99
+
100
+ # bounds IV
101
+ if bounds is None:
102
+ bounds = Bounds(0, np.inf)
103
+ elif not isinstance(bounds, Bounds):
104
+ message = ("`bounds` must be convertible into an instance of "
105
+ "`scipy.optimize.Bounds`.")
106
+ try:
107
+ bounds = Bounds(*bounds)
108
+ except TypeError as exc:
109
+ raise ValueError(message) from exc
110
+
111
+ try:
112
+ lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64)
113
+ ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64)
114
+ except (ValueError, TypeError) as exc:
115
+ message = ("`bounds.lb` and `bounds.ub` must contain reals and "
116
+ "be broadcastable to `c.shape`.")
117
+ raise ValueError(message) from exc
118
+
119
+ # constraints IV
120
+ if not constraints:
121
+ constraints = [LinearConstraint(np.empty((0, c.size)),
122
+ np.empty((0,)), np.empty((0,)))]
123
+ try:
124
+ A, b_l, b_u = _constraints_to_components(constraints)
125
+ except ValueError as exc:
126
+ message = ("`constraints` (or each element within `constraints`) must "
127
+ "be convertible into an instance of "
128
+ "`scipy.optimize.LinearConstraint`.")
129
+ raise ValueError(message) from exc
130
+
131
+ if A.shape != (b_l.size, c.size):
132
+ message = "The shape of `A` must be (len(b_l), len(c))."
133
+ raise ValueError(message)
134
+ indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64)
135
+
136
+ # options IV
137
+ options = options or {}
138
+ supported_options = {'disp', 'presolve', 'time_limit', 'node_limit',
139
+ 'mip_rel_gap'}
140
+ unsupported_options = set(options).difference(supported_options)
141
+ if unsupported_options:
142
+ message = (f"Unrecognized options detected: {unsupported_options}. "
143
+ "These will be passed to HiGHS verbatim.")
144
+ warnings.warn(message, RuntimeWarning, stacklevel=3)
145
+ options_iv = {'log_to_console': options.pop("disp", False),
146
+ 'mip_max_nodes': options.pop("node_limit", None)}
147
+ options_iv.update(options)
148
+
149
+ return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
150
+
151
+
152
+ def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):
153
+ r"""
154
+ Mixed-integer linear programming
155
+
156
+ Solves problems of the following form:
157
+
158
+ .. math::
159
+
160
+ \min_x \ & c^T x \\
161
+ \mbox{such that} \ & b_l \leq A x \leq b_u,\\
162
+ & l \leq x \leq u, \\
163
+ & x_i \in \mathbb{Z}, i \in X_i
164
+
165
+ where :math:`x` is a vector of decision variables;
166
+ :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;
167
+ :math:`A` is a matrix, and :math:`X_i` is the set of indices of
168
+ decision variables that must be integral. (In this context, a
169
+ variable that can assume only integer values is said to be "integral";
170
+ it has an "integrality" constraint.)
171
+
172
+ Alternatively, that's:
173
+
174
+ minimize::
175
+
176
+ c @ x
177
+
178
+ such that::
179
+
180
+ b_l <= A @ x <= b_u
181
+ l <= x <= u
182
+ Specified elements of x must be integers
183
+
184
+ By default, ``l = 0`` and ``u = np.inf`` unless specified with
185
+ ``bounds``.
186
+
187
+ Parameters
188
+ ----------
189
+ c : 1D dense array_like
190
+ The coefficients of the linear objective function to be minimized.
191
+ `c` is converted to a double precision array before the problem is
192
+ solved.
193
+ integrality : 1D dense array_like, optional
194
+ Indicates the type of integrality constraint on each decision variable.
195
+
196
+ ``0`` : Continuous variable; no integrality constraint.
197
+
198
+ ``1`` : Integer variable; decision variable must be an integer
199
+ within `bounds`.
200
+
201
+ ``2`` : Semi-continuous variable; decision variable must be within
202
+ `bounds` or take value ``0``.
203
+
204
+ ``3`` : Semi-integer variable; decision variable must be an integer
205
+ within `bounds` or take value ``0``.
206
+
207
+ By default, all variables are continuous. `integrality` is converted
208
+ to an array of integers before the problem is solved.
209
+
210
+ bounds : scipy.optimize.Bounds, optional
211
+ Bounds on the decision variables. Lower and upper bounds are converted
212
+ to double precision arrays before the problem is solved. The
213
+ ``keep_feasible`` parameter of the `Bounds` object is ignored. If
214
+ not specified, all decision variables are constrained to be
215
+ non-negative.
216
+ constraints : sequence of scipy.optimize.LinearConstraint, optional
217
+ Linear constraints of the optimization problem. Arguments may be
218
+ one of the following:
219
+
220
+ 1. A single `LinearConstraint` object
221
+ 2. A single tuple that can be converted to a `LinearConstraint` object
222
+ as ``LinearConstraint(*constraints)``
223
+ 3. A sequence composed entirely of objects of type 1. and 2.
224
+
225
+ Before the problem is solved, all values are converted to double
226
+ precision, and the matrices of constraint coefficients are converted to
227
+ instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter
228
+ of `LinearConstraint` objects is ignored.
229
+ options : dict, optional
230
+ A dictionary of solver options. The following keys are recognized.
231
+
232
+ disp : bool (default: ``False``)
233
+ Set to ``True`` if indicators of optimization status are to be
234
+ printed to the console during optimization.
235
+ node_limit : int, optional
236
+ The maximum number of nodes (linear program relaxations) to solve
237
+ before stopping. Default is no maximum number of nodes.
238
+ presolve : bool (default: ``True``)
239
+ Presolve attempts to identify trivial infeasibilities,
240
+ identify trivial unboundedness, and simplify the problem before
241
+ sending it to the main solver.
242
+ time_limit : float, optional
243
+ The maximum number of seconds allotted to solve the problem.
244
+ Default is no time limit.
245
+ mip_rel_gap : float, optional
246
+ Termination criterion for MIP solver: solver will terminate when
247
+ the gap between the primal objective value and the dual objective
248
+ bound, scaled by the primal objective value, is <= mip_rel_gap.
249
+
250
+ Returns
251
+ -------
252
+ res : OptimizeResult
253
+ An instance of :class:`scipy.optimize.OptimizeResult`. The object
254
+ is guaranteed to have the following attributes.
255
+
256
+ status : int
257
+ An integer representing the exit status of the algorithm.
258
+
259
+ ``0`` : Optimal solution found.
260
+
261
+ ``1`` : Iteration or time limit reached.
262
+
263
+ ``2`` : Problem is infeasible.
264
+
265
+ ``3`` : Problem is unbounded.
266
+
267
+ ``4`` : Other; see message for details.
268
+
269
+ success : bool
270
+ ``True`` when an optimal solution is found and ``False`` otherwise.
271
+
272
+ message : str
273
+ A string descriptor of the exit status of the algorithm.
274
+
275
+ The following attributes will also be present, but the values may be
276
+ ``None``, depending on the solution status.
277
+
278
+ x : ndarray
279
+ The values of the decision variables that minimize the
280
+ objective function while satisfying the constraints.
281
+ fun : float
282
+ The optimal value of the objective function ``c @ x``.
283
+ mip_node_count : int
284
+ The number of subproblems or "nodes" solved by the MILP solver.
285
+ mip_dual_bound : float
286
+ The MILP solver's final estimate of the lower bound on the optimal
287
+ solution.
288
+ mip_gap : float
289
+ The difference between the primal objective value and the dual
290
+ objective bound, scaled by the primal objective value.
291
+
292
+ Notes
293
+ -----
294
+ `milp` is a wrapper of the HiGHS linear optimization software [1]_. The
295
+ algorithm is deterministic, and it typically finds the global optimum of
296
+ moderately challenging mixed-integer linear programs (when it exists).
297
+
298
+ References
299
+ ----------
300
+ .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
301
+ "HiGHS - high performance software for linear optimization."
302
+ https://highs.dev/
303
+ .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
304
+ simplex method." Mathematical Programming Computation, 10 (1),
305
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
306
+
307
+ Examples
308
+ --------
309
+ Consider the problem at
310
+ https://en.wikipedia.org/wiki/Integer_programming#Example, which is
311
+ expressed as a maximization problem of two variables. Since `milp` requires
312
+ that the problem be expressed as a minimization problem, the objective
313
+ function coefficients on the decision variables are:
314
+
315
+ >>> import numpy as np
316
+ >>> c = -np.array([0, 1])
317
+
318
+ Note the negative sign: we maximize the original objective function
319
+ by minimizing the negative of the objective function.
320
+
321
+ We collect the coefficients of the constraints into arrays like:
322
+
323
+ >>> A = np.array([[-1, 1], [3, 2], [2, 3]])
324
+ >>> b_u = np.array([1, 12, 12])
325
+ >>> b_l = np.full_like(b_u, -np.inf)
326
+
327
+ Because there is no lower limit on these constraints, we have defined a
328
+ variable ``b_l`` full of values representing negative infinity. This may
329
+ be unfamiliar to users of `scipy.optimize.linprog`, which only accepts
330
+ "less than" (or "upper bound") inequality constraints of the form
331
+ ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints
332
+ ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than"
333
+ inequality constraints, "less than" inequality constraints, and equality
334
+ constraints concisely.
335
+
336
+ These arrays are collected into a single `LinearConstraint` object like:
337
+
338
+ >>> from scipy.optimize import LinearConstraint
339
+ >>> constraints = LinearConstraint(A, b_l, b_u)
340
+
341
+ The non-negativity bounds on the decision variables are enforced by
342
+ default, so we do not need to provide an argument for `bounds`.
343
+
344
+ Finally, the problem states that both decision variables must be integers:
345
+
346
+ >>> integrality = np.ones_like(c)
347
+
348
+ We solve the problem like:
349
+
350
+ >>> from scipy.optimize import milp
351
+ >>> res = milp(c=c, constraints=constraints, integrality=integrality)
352
+ >>> res.x
353
+ [1.0, 2.0]
354
+
355
+ Note that had we solved the relaxed problem (without integrality
356
+ constraints):
357
+
358
+ >>> res = milp(c=c, constraints=constraints) # OR:
359
+ >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)
360
+ >>> res.x
361
+ [1.8, 2.8]
362
+
363
+ we would not have obtained the correct solution by rounding to the nearest
364
+ integers.
365
+
366
+ Other examples are given :ref:`in the tutorial <tutorial-optimize_milp>`.
367
+
368
+ """
369
+ args_iv = _milp_iv(c, integrality, bounds, constraints, options)
370
+ c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv
371
+
372
+ highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,
373
+ lb, ub, integrality, options)
374
+
375
+ res = {}
376
+
377
+ # Convert to scipy-style status and message
378
+ highs_status = highs_res.get('status', None)
379
+ highs_message = highs_res.get('message', None)
380
+ status, message = _highs_to_scipy_status_message(highs_status,
381
+ highs_message)
382
+ res['status'] = status
383
+ res['message'] = message
384
+ res['success'] = (status == 0)
385
+ x = highs_res.get('x', None)
386
+ res['x'] = np.array(x) if x is not None else None
387
+ res['fun'] = highs_res.get('fun', None)
388
+ res['mip_node_count'] = highs_res.get('mip_node_count', None)
389
+ res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)
390
+ res['mip_gap'] = highs_res.get('mip_gap', None)
391
+
392
+ return OptimizeResult(res)
venv/lib/python3.10/site-packages/scipy/optimize/_minimize.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to minimization algorithms.
3
+
4
+ Functions
5
+ ---------
6
+ - minimize : minimization of a function of several variables.
7
+ - minimize_scalar : minimization of a function of one variable.
8
+ """
9
+
10
+ __all__ = ['minimize', 'minimize_scalar']
11
+
12
+
13
+ from warnings import warn
14
+
15
+ import numpy as np
16
+
17
+ # unconstrained minimization
18
+ from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
19
+ _minimize_bfgs, _minimize_newtoncg,
20
+ _minimize_scalar_brent, _minimize_scalar_bounded,
21
+ _minimize_scalar_golden, MemoizeJac, OptimizeResult,
22
+ _wrap_callback, _recover_from_bracket_error)
23
+ from ._trustregion_dogleg import _minimize_dogleg
24
+ from ._trustregion_ncg import _minimize_trust_ncg
25
+ from ._trustregion_krylov import _minimize_trust_krylov
26
+ from ._trustregion_exact import _minimize_trustregion_exact
27
+ from ._trustregion_constr import _minimize_trustregion_constr
28
+
29
+ # constrained minimization
30
+ from ._lbfgsb_py import _minimize_lbfgsb
31
+ from ._tnc import _minimize_tnc
32
+ from ._cobyla_py import _minimize_cobyla
33
+ from ._slsqp_py import _minimize_slsqp
34
+ from ._constraints import (old_bound_to_new, new_bounds_to_old,
35
+ old_constraint_to_new, new_constraint_to_old,
36
+ NonlinearConstraint, LinearConstraint, Bounds,
37
+ PreparedConstraint)
38
+ from ._differentiable_functions import FD_METHODS
39
+
40
+ MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
41
+ 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
42
+ 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
43
+
44
+ # These methods support the new callback interface (passed an OptimizeResult)
45
+ MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
46
+ 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg',
47
+ 'trust-exact', 'trust-krylov']
48
+
49
+ MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
50
+
51
+ def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
52
+ hessp=None, bounds=None, constraints=(), tol=None,
53
+ callback=None, options=None):
54
+ """Minimization of scalar function of one or more variables.
55
+
56
+ Parameters
57
+ ----------
58
+ fun : callable
59
+ The objective function to be minimized.
60
+
61
+ ``fun(x, *args) -> float``
62
+
63
+ where ``x`` is a 1-D array with shape (n,) and ``args``
64
+ is a tuple of the fixed parameters needed to completely
65
+ specify the function.
66
+ x0 : ndarray, shape (n,)
67
+ Initial guess. Array of real elements of size (n,),
68
+ where ``n`` is the number of independent variables.
69
+ args : tuple, optional
70
+ Extra arguments passed to the objective function and its
71
+ derivatives (`fun`, `jac` and `hess` functions).
72
+ method : str or callable, optional
73
+ Type of solver. Should be one of
74
+
75
+ - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
76
+ - 'Powell' :ref:`(see here) <optimize.minimize-powell>`
77
+ - 'CG' :ref:`(see here) <optimize.minimize-cg>`
78
+ - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
79
+ - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
80
+ - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
81
+ - 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
82
+ - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
83
+ - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
84
+ - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
85
+ - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
86
+ - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
87
+ - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
88
+ - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
89
+ - custom - a callable object, see below for description.
90
+
91
+ If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
92
+ depending on whether or not the problem has constraints or bounds.
93
+ jac : {callable, '2-point', '3-point', 'cs', bool}, optional
94
+ Method for computing the gradient vector. Only for CG, BFGS,
95
+ Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
96
+ trust-exact and trust-constr.
97
+ If it is a callable, it should be a function that returns the gradient
98
+ vector:
99
+
100
+ ``jac(x, *args) -> array_like, shape (n,)``
101
+
102
+ where ``x`` is an array with shape (n,) and ``args`` is a tuple with
103
+ the fixed parameters. If `jac` is a Boolean and is True, `fun` is
104
+ assumed to return a tuple ``(f, g)`` containing the objective
105
+ function and the gradient.
106
+ Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
107
+ 'trust-krylov' require that either a callable be supplied, or that
108
+ `fun` return the objective and gradient.
109
+ If None or False, the gradient will be estimated using 2-point finite
110
+ difference estimation with an absolute step size.
111
+ Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
112
+ to select a finite difference scheme for numerical estimation of the
113
+ gradient with a relative step size. These finite difference schemes
114
+ obey any specified `bounds`.
115
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
116
+ Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
117
+ trust-ncg, trust-krylov, trust-exact and trust-constr.
118
+ If it is callable, it should return the Hessian matrix:
119
+
120
+ ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
121
+
122
+ where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
123
+ parameters.
124
+ The keywords {'2-point', '3-point', 'cs'} can also be used to select
125
+ a finite difference scheme for numerical estimation of the hessian.
126
+ Alternatively, objects implementing the `HessianUpdateStrategy`
127
+ interface can be used to approximate the Hessian. Available
128
+ quasi-Newton methods implementing this interface are:
129
+
130
+ - `BFGS`;
131
+ - `SR1`.
132
+
133
+ Not all of the options are available for each of the methods; for
134
+ availability refer to the notes.
135
+ hessp : callable, optional
136
+ Hessian of objective function times an arbitrary vector p. Only for
137
+ Newton-CG, trust-ncg, trust-krylov, trust-constr.
138
+ Only one of `hessp` or `hess` needs to be given. If `hess` is
139
+ provided, then `hessp` will be ignored. `hessp` must compute the
140
+ Hessian times an arbitrary vector:
141
+
142
+ ``hessp(x, p, *args) -> ndarray shape (n,)``
143
+
144
+ where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
145
+ dimension (n,) and ``args`` is a tuple with the fixed
146
+ parameters.
147
+ bounds : sequence or `Bounds`, optional
148
+ Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell,
149
+ trust-constr, and COBYLA methods. There are two ways to specify the
150
+ bounds:
151
+
152
+ 1. Instance of `Bounds` class.
153
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`. None
154
+ is used to specify no bound.
155
+
156
+ constraints : {Constraint, dict} or List of {Constraint, dict}, optional
157
+ Constraints definition. Only for COBYLA, SLSQP and trust-constr.
158
+
159
+ Constraints for 'trust-constr' are defined as a single object or a
160
+ list of objects specifying constraints to the optimization problem.
161
+ Available constraints are:
162
+
163
+ - `LinearConstraint`
164
+ - `NonlinearConstraint`
165
+
166
+ Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
167
+ Each dictionary with fields:
168
+
169
+ type : str
170
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
171
+ fun : callable
172
+ The function defining the constraint.
173
+ jac : callable, optional
174
+ The Jacobian of `fun` (only for SLSQP).
175
+ args : sequence, optional
176
+ Extra arguments to be passed to the function and Jacobian.
177
+
178
+ Equality constraint means that the constraint function result is to
179
+ be zero whereas inequality means that it is to be non-negative.
180
+ Note that COBYLA only supports inequality constraints.
181
+ tol : float, optional
182
+ Tolerance for termination. When `tol` is specified, the selected
183
+ minimization algorithm sets some relevant solver-specific tolerance(s)
184
+ equal to `tol`. For detailed control, use solver-specific
185
+ options.
186
+ options : dict, optional
187
+ A dictionary of solver options. All methods except `TNC` accept the
188
+ following generic options:
189
+
190
+ maxiter : int
191
+ Maximum number of iterations to perform. Depending on the
192
+ method each iteration may use several function evaluations.
193
+
194
+ For `TNC` use `maxfun` instead of `maxiter`.
195
+ disp : bool
196
+ Set to True to print convergence messages.
197
+
198
+ For method-specific options, see :func:`show_options()`.
199
+ callback : callable, optional
200
+ A callable called after each iteration.
201
+
202
+ All methods except TNC, SLSQP, and COBYLA support a callable with
203
+ the signature:
204
+
205
+ ``callback(intermediate_result: OptimizeResult)``
206
+
207
+ where ``intermediate_result`` is a keyword parameter containing an
208
+ `OptimizeResult` with attributes ``x`` and ``fun``, the present values
209
+ of the parameter vector and objective function. Note that the name
210
+ of the parameter must be ``intermediate_result`` for the callback
211
+ to be passed an `OptimizeResult`. These methods will also terminate if
212
+ the callback raises ``StopIteration``.
213
+
214
+ All methods except trust-constr (also) support a signature like:
215
+
216
+ ``callback(xk)``
217
+
218
+ where ``xk`` is the current parameter vector.
219
+
220
+ Introspection is used to determine which of the signatures above to
221
+ invoke.
222
+
223
+ Returns
224
+ -------
225
+ res : OptimizeResult
226
+ The optimization result represented as a ``OptimizeResult`` object.
227
+ Important attributes are: ``x`` the solution array, ``success`` a
228
+ Boolean flag indicating if the optimizer exited successfully and
229
+ ``message`` which describes the cause of the termination. See
230
+ `OptimizeResult` for a description of other attributes.
231
+
232
+ See also
233
+ --------
234
+ minimize_scalar : Interface to minimization algorithms for scalar
235
+ univariate functions
236
+ show_options : Additional options accepted by the solvers
237
+
238
+ Notes
239
+ -----
240
+ This section describes the available solvers that can be selected by the
241
+ 'method' parameter. The default method is *BFGS*.
242
+
243
+ **Unconstrained minimization**
244
+
245
+ Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
246
+ gradient algorithm by Polak and Ribiere, a variant of the
247
+ Fletcher-Reeves method described in [5]_ pp.120-122. Only the
248
+ first derivatives are used.
249
+
250
+ Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
251
+ method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
252
+ pp. 136. It uses the first derivatives only. BFGS has proven good
253
+ performance even for non-smooth optimizations. This method also
254
+ returns an approximation of the Hessian inverse, stored as
255
+ `hess_inv` in the OptimizeResult object.
256
+
257
+ Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
258
+ Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
259
+ Newton method). It uses a CG method to the compute the search
260
+ direction. See also *TNC* method for a box-constrained
261
+ minimization with a similar algorithm. Suitable for large-scale
262
+ problems.
263
+
264
+ Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
265
+ trust-region algorithm [5]_ for unconstrained minimization. This
266
+ algorithm requires the gradient and Hessian; furthermore the
267
+ Hessian is required to be positive definite.
268
+
269
+ Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
270
+ Newton conjugate gradient trust-region algorithm [5]_ for
271
+ unconstrained minimization. This algorithm requires the gradient
272
+ and either the Hessian or a function that computes the product of
273
+ the Hessian with a given vector. Suitable for large-scale problems.
274
+
275
+ Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
276
+ the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
277
+ minimization. This algorithm requires the gradient
278
+ and either the Hessian or a function that computes the product of
279
+ the Hessian with a given vector. Suitable for large-scale problems.
280
+ On indefinite problems it requires usually less iterations than the
281
+ `trust-ncg` method and is recommended for medium and large-scale problems.
282
+
283
+ Method :ref:`trust-exact <optimize.minimize-trustexact>`
284
+ is a trust-region method for unconstrained minimization in which
285
+ quadratic subproblems are solved almost exactly [13]_. This
286
+ algorithm requires the gradient and the Hessian (which is
287
+ *not* required to be positive definite). It is, in many
288
+ situations, the Newton method to converge in fewer iterations
289
+ and the most recommended for small and medium-size problems.
290
+
291
+ **Bound-Constrained minimization**
292
+
293
+ Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
294
+ Simplex algorithm [1]_, [2]_. This algorithm is robust in many
295
+ applications. However, if numerical computation of derivative can be
296
+ trusted, other algorithms using the first and/or second derivatives
297
+ information might be preferred for their better performance in
298
+ general.
299
+
300
+ Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
301
+ algorithm [6]_, [7]_ for bound constrained minimization.
302
+
303
+ Method :ref:`Powell <optimize.minimize-powell>` is a modification
304
+ of Powell's method [3]_, [4]_ which is a conjugate direction
305
+ method. It performs sequential one-dimensional minimizations along
306
+ each vector of the directions set (`direc` field in `options` and
307
+ `info`), which is updated at each iteration of the main
308
+ minimization loop. The function need not be differentiable, and no
309
+ derivatives are taken. If bounds are not provided, then an
310
+ unbounded line search will be used. If bounds are provided and
311
+ the initial guess is within the bounds, then every function
312
+ evaluation throughout the minimization procedure will be within
313
+ the bounds. If bounds are provided, the initial guess is outside
314
+ the bounds, and `direc` is full rank (default has full rank), then
315
+ some function evaluations during the first iteration may be
316
+ outside the bounds, but every function evaluation after the first
317
+ iteration will be within the bounds. If `direc` is not full rank,
318
+ then some parameters may not be optimized and the solution is not
319
+ guaranteed to be within the bounds.
320
+
321
+ Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
322
+ algorithm [5]_, [8]_ to minimize a function with variables subject
323
+ to bounds. This algorithm uses gradient information; it is also
324
+ called Newton Conjugate-Gradient. It differs from the *Newton-CG*
325
+ method described above as it wraps a C implementation and allows
326
+ each variable to be given upper and lower bounds.
327
+
328
+ **Constrained Minimization**
329
+
330
+ Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
331
+ Constrained Optimization BY Linear Approximation (COBYLA) method
332
+ [9]_, [10]_, [11]_. The algorithm is based on linear
333
+ approximations to the objective function and each constraint. The
334
+ method wraps a FORTRAN implementation of the algorithm. The
335
+ constraints functions 'fun' may return either a single number
336
+ or an array or list of numbers.
337
+
338
+ Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
339
+ Least SQuares Programming to minimize a function of several
340
+ variables with any combination of bounds, equality and inequality
341
+ constraints. The method wraps the SLSQP Optimization subroutine
342
+ originally implemented by Dieter Kraft [12]_. Note that the
343
+ wrapper handles infinite values in bounds by converting them into
344
+ large floating values.
345
+
346
+ Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
347
+ trust-region algorithm for constrained optimization. It switches
348
+ between two implementations depending on the problem definition.
349
+ It is the most versatile constrained minimization algorithm
350
+ implemented in SciPy and the most appropriate for large-scale problems.
351
+ For equality constrained problems it is an implementation of Byrd-Omojokun
352
+ Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
353
+ inequality constraints are imposed as well, it switches to the trust-region
354
+ interior point method described in [16]_. This interior point algorithm,
355
+ in turn, solves inequality constraints by introducing slack variables
356
+ and solving a sequence of equality-constrained barrier problems
357
+ for progressively smaller values of the barrier parameter.
358
+ The previously described equality constrained SQP method is
359
+ used to solve the subproblems with increasing levels of accuracy
360
+ as the iterate gets closer to a solution.
361
+
362
+ **Finite-Difference Options**
363
+
364
+ For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
365
+ the gradient and the Hessian may be approximated using
366
+ three finite-difference schemes: {'2-point', '3-point', 'cs'}.
367
+ The scheme 'cs' is, potentially, the most accurate but it
368
+ requires the function to correctly handle complex inputs and to
369
+ be differentiable in the complex plane. The scheme '3-point' is more
370
+ accurate than '2-point' but requires twice as many operations. If the
371
+ gradient is estimated via finite-differences the Hessian must be
372
+ estimated using one of the quasi-Newton strategies.
373
+
374
+ **Method specific options for the** `hess` **keyword**
375
+
376
+ +--------------+------+----------+-------------------------+-----+
377
+ | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
378
+ +==============+======+==========+=========================+=====+
379
+ | Newton-CG | x | (n, n) | x | x |
380
+ | | | LO | | |
381
+ +--------------+------+----------+-------------------------+-----+
382
+ | dogleg | | (n, n) | | |
383
+ +--------------+------+----------+-------------------------+-----+
384
+ | trust-ncg | | (n, n) | x | x |
385
+ +--------------+------+----------+-------------------------+-----+
386
+ | trust-krylov | | (n, n) | x | x |
387
+ +--------------+------+----------+-------------------------+-----+
388
+ | trust-exact | | (n, n) | | |
389
+ +--------------+------+----------+-------------------------+-----+
390
+ | trust-constr | x | (n, n) | x | x |
391
+ | | | LO | | |
392
+ | | | sp | | |
393
+ +--------------+------+----------+-------------------------+-----+
394
+
395
+ where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
396
+
397
+ **Custom minimizers**
398
+
399
+ It may be useful to pass a custom minimization method, for example
400
+ when using a frontend to this method such as `scipy.optimize.basinhopping`
401
+ or a different library. You can simply pass a callable as the ``method``
402
+ parameter.
403
+
404
+ The callable is called as ``method(fun, x0, args, **kwargs, **options)``
405
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
406
+ (such as `callback`, `hess`, etc.), except the `options` dict, which has
407
+ its contents also passed as `method` parameters pair by pair. Also, if
408
+ `jac` has been passed as a bool type, `jac` and `fun` are mangled so that
409
+ `fun` returns just the function values and `jac` is converted to a function
410
+ returning the Jacobian. The method shall return an `OptimizeResult`
411
+ object.
412
+
413
+ The provided `method` callable must be able to accept (and possibly ignore)
414
+ arbitrary parameters; the set of parameters accepted by `minimize` may
415
+ expand in future versions and then these parameters will be passed to
416
+ the method. You can find an example in the scipy.optimize tutorial.
417
+
418
+ References
419
+ ----------
420
+ .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
421
+ Minimization. The Computer Journal 7: 308-13.
422
+ .. [2] Wright M H. 1996. Direct search methods: Once scorned, now
423
+ respectable, in Numerical Analysis 1995: Proceedings of the 1995
424
+ Dundee Biennial Conference in Numerical Analysis (Eds. D F
425
+ Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
426
+ 191-208.
427
+ .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
428
+ a function of several variables without calculating derivatives. The
429
+ Computer Journal 7: 155-162.
430
+ .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
431
+ Numerical Recipes (any edition), Cambridge University Press.
432
+ .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
433
+ Springer New York.
434
+ .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
435
+ Algorithm for Bound Constrained Optimization. SIAM Journal on
436
+ Scientific and Statistical Computing 16 (5): 1190-1208.
437
+ .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
438
+ 778: L-BFGS-B, FORTRAN routines for large scale bound constrained
439
+ optimization. ACM Transactions on Mathematical Software 23 (4):
440
+ 550-560.
441
+ .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
442
+ 1984. SIAM Journal of Numerical Analysis 21: 770-778.
443
+ .. [9] Powell, M J D. A direct search optimization method that models
444
+ the objective and constraint functions by linear interpolation.
445
+ 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
446
+ and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
447
+ .. [10] Powell M J D. Direct search algorithms for optimization
448
+ calculations. 1998. Acta Numerica 7: 287-336.
449
+ .. [11] Powell M J D. A view of algorithms for optimization without
450
+ derivatives. 2007.Cambridge University Technical Report DAMTP
451
+ 2007/NA03
452
+ .. [12] Kraft, D. A software package for sequential quadratic
453
+ programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
454
+ Center -- Institute for Flight Mechanics, Koln, Germany.
455
+ .. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
456
+ Trust region methods. 2000. Siam. pp. 169-200.
457
+ .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
458
+ implementation of the GLTR method for iterative solution of
459
+ the trust region problem", :arxiv:`1611.04718`
460
+ .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
461
+ Trust-Region Subproblem using the Lanczos Method",
462
+ SIAM J. Optim., 9(2), 504--525, (1999).
463
+ .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
464
+ An interior point algorithm for large-scale nonlinear programming.
465
+ SIAM Journal on Optimization 9.4: 877-900.
466
+ .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
467
+ implementation of an algorithm for large-scale equality constrained
468
+ optimization. SIAM Journal on Optimization 8.3: 682-706.
469
+
470
+ Examples
471
+ --------
472
+ Let us consider the problem of minimizing the Rosenbrock function. This
473
+ function (and its respective derivatives) is implemented in `rosen`
474
+ (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
475
+
476
+ >>> from scipy.optimize import minimize, rosen, rosen_der
477
+
478
+ A simple application of the *Nelder-Mead* method is:
479
+
480
+ >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
481
+ >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
482
+ >>> res.x
483
+ array([ 1., 1., 1., 1., 1.])
484
+
485
+ Now using the *BFGS* algorithm, using the first derivative and a few
486
+ options:
487
+
488
+ >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
489
+ ... options={'gtol': 1e-6, 'disp': True})
490
+ Optimization terminated successfully.
491
+ Current function value: 0.000000
492
+ Iterations: 26
493
+ Function evaluations: 31
494
+ Gradient evaluations: 31
495
+ >>> res.x
496
+ array([ 1., 1., 1., 1., 1.])
497
+ >>> print(res.message)
498
+ Optimization terminated successfully.
499
+ >>> res.hess_inv
500
+ array([
501
+ [ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
502
+ [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
503
+ [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
504
+ [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
505
+ [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]
506
+ ])
507
+
508
+
509
+ Next, consider a minimization problem with several constraints (namely
510
+ Example 16.4 from [5]_). The objective function is:
511
+
512
+ >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
513
+
514
+ There are three constraints defined as:
515
+
516
+ >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
517
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
518
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
519
+
520
+ And variables must be positive, hence the following bounds:
521
+
522
+ >>> bnds = ((0, None), (0, None))
523
+
524
+ The optimization problem is solved using the SLSQP method as:
525
+
526
+ >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
527
+ ... constraints=cons)
528
+
529
+ It should converge to the theoretical solution (1.4 ,1.7).
530
+
531
+ """
532
+ x0 = np.atleast_1d(np.asarray(x0))
533
+
534
+ if x0.ndim != 1:
535
+ raise ValueError("'x0' must only have one dimension.")
536
+
537
+ if x0.dtype.kind in np.typecodes["AllInteger"]:
538
+ x0 = np.asarray(x0, dtype=float)
539
+
540
+ if not isinstance(args, tuple):
541
+ args = (args,)
542
+
543
+ if method is None:
544
+ # Select automatically
545
+ if constraints:
546
+ method = 'SLSQP'
547
+ elif bounds is not None:
548
+ method = 'L-BFGS-B'
549
+ else:
550
+ method = 'BFGS'
551
+
552
+ if callable(method):
553
+ meth = "_custom"
554
+ else:
555
+ meth = method.lower()
556
+
557
+ if options is None:
558
+ options = {}
559
+ # check if optional parameters are supported by the selected method
560
+ # - jac
561
+ if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
562
+ warn('Method %s does not use gradient information (jac).' % method,
563
+ RuntimeWarning, stacklevel=2)
564
+ # - hess
565
+ if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
566
+ 'trust-krylov', 'trust-exact', '_custom') and hess is not None:
567
+ warn('Method %s does not use Hessian information (hess).' % method,
568
+ RuntimeWarning, stacklevel=2)
569
+ # - hessp
570
+ if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
571
+ 'trust-krylov', '_custom') \
572
+ and hessp is not None:
573
+ warn('Method %s does not use Hessian-vector product '
574
+ 'information (hessp).' % method,
575
+ RuntimeWarning, stacklevel=2)
576
+ # - constraints or bounds
577
+ if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and
578
+ np.any(constraints)):
579
+ warn('Method %s cannot handle constraints.' % method,
580
+ RuntimeWarning, stacklevel=2)
581
+ if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'slsqp',
582
+ 'tnc', 'trust-constr', '_custom') and bounds is not None:
583
+ warn('Method %s cannot handle bounds.' % method,
584
+ RuntimeWarning, stacklevel=2)
585
+ # - return_all
586
+ if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
587
+ options.get('return_all', False)):
588
+ warn('Method %s does not support the return_all option.' % method,
589
+ RuntimeWarning, stacklevel=2)
590
+
591
+ # check gradient vector
592
+ if callable(jac):
593
+ pass
594
+ elif jac is True:
595
+ # fun returns func and grad
596
+ fun = MemoizeJac(fun)
597
+ jac = fun.derivative
598
+ elif (jac in FD_METHODS and
599
+ meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
600
+ # finite differences with relative step
601
+ pass
602
+ elif meth in ['trust-constr']:
603
+ # default jac calculation for this method
604
+ jac = '2-point'
605
+ elif jac is None or bool(jac) is False:
606
+ # this will cause e.g. LBFGS to use forward difference, absolute step
607
+ jac = None
608
+ else:
609
+ # default if jac option is not understood
610
+ jac = None
611
+
612
+ # set default tolerances
613
+ if tol is not None:
614
+ options = dict(options)
615
+ if meth == 'nelder-mead':
616
+ options.setdefault('xatol', tol)
617
+ options.setdefault('fatol', tol)
618
+ if meth in ('newton-cg', 'powell', 'tnc'):
619
+ options.setdefault('xtol', tol)
620
+ if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
621
+ options.setdefault('ftol', tol)
622
+ if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
623
+ 'trust-ncg', 'trust-exact', 'trust-krylov'):
624
+ options.setdefault('gtol', tol)
625
+ if meth in ('cobyla', '_custom'):
626
+ options.setdefault('tol', tol)
627
+ if meth == 'trust-constr':
628
+ options.setdefault('xtol', tol)
629
+ options.setdefault('gtol', tol)
630
+ options.setdefault('barrier_tol', tol)
631
+
632
+ if meth == '_custom':
633
+ # custom method called before bounds and constraints are 'standardised'
634
+ # custom method should be able to accept whatever bounds/constraints
635
+ # are provided to it.
636
+ return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
637
+ bounds=bounds, constraints=constraints,
638
+ callback=callback, **options)
639
+
640
+ constraints = standardize_constraints(constraints, x0, meth)
641
+
642
+ remove_vars = False
643
+ if bounds is not None:
644
+ # convert to new-style bounds so we only have to consider one case
645
+ bounds = standardize_bounds(bounds, x0, 'new')
646
+ bounds = _validate_bounds(bounds, x0, meth)
647
+
648
+ if meth in {"tnc", "slsqp", "l-bfgs-b"}:
649
+ # These methods can't take the finite-difference derivatives they
650
+ # need when a variable is fixed by the bounds. To avoid this issue,
651
+ # remove fixed variables from the problem.
652
+ # NOTE: if this list is expanded, then be sure to update the
653
+ # accompanying tests and test_optimize.eb_data. Consider also if
654
+ # default OptimizeResult will need updating.
655
+
656
+ # determine whether any variables are fixed
657
+ i_fixed = (bounds.lb == bounds.ub)
658
+
659
+ if np.all(i_fixed):
660
+ # all the parameters are fixed, a minimizer is not able to do
661
+ # anything
662
+ return _optimize_result_for_equal_bounds(
663
+ fun, bounds, meth, args=args, constraints=constraints
664
+ )
665
+
666
+ # determine whether finite differences are needed for any grad/jac
667
+ fd_needed = (not callable(jac))
668
+ for con in constraints:
669
+ if not callable(con.get('jac', None)):
670
+ fd_needed = True
671
+
672
+ # If finite differences are ever used, remove all fixed variables
673
+ # Always remove fixed variables for TNC; see gh-14565
674
+ remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
675
+ if remove_vars:
676
+ x_fixed = (bounds.lb)[i_fixed]
677
+ x0 = x0[~i_fixed]
678
+ bounds = _remove_from_bounds(bounds, i_fixed)
679
+ fun = _remove_from_func(fun, i_fixed, x_fixed)
680
+ if callable(callback):
681
+ callback = _remove_from_func(callback, i_fixed, x_fixed)
682
+ if callable(jac):
683
+ jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
684
+
685
+ # make a copy of the constraints so the user's version doesn't
686
+ # get changed. (Shallow copy is ok)
687
+ constraints = [con.copy() for con in constraints]
688
+ for con in constraints: # yes, guaranteed to be a list
689
+ con['fun'] = _remove_from_func(con['fun'], i_fixed,
690
+ x_fixed, min_dim=1,
691
+ remove=0)
692
+ if callable(con.get('jac', None)):
693
+ con['jac'] = _remove_from_func(con['jac'], i_fixed,
694
+ x_fixed, min_dim=2,
695
+ remove=1)
696
+ bounds = standardize_bounds(bounds, x0, meth)
697
+
698
+ callback = _wrap_callback(callback, meth)
699
+
700
+ if meth == 'nelder-mead':
701
+ res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
702
+ **options)
703
+ elif meth == 'powell':
704
+ res = _minimize_powell(fun, x0, args, callback, bounds, **options)
705
+ elif meth == 'cg':
706
+ res = _minimize_cg(fun, x0, args, jac, callback, **options)
707
+ elif meth == 'bfgs':
708
+ res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
709
+ elif meth == 'newton-cg':
710
+ res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
711
+ **options)
712
+ elif meth == 'l-bfgs-b':
713
+ res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
714
+ callback=callback, **options)
715
+ elif meth == 'tnc':
716
+ res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
717
+ **options)
718
+ elif meth == 'cobyla':
719
+ res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
720
+ bounds=bounds, **options)
721
+ elif meth == 'slsqp':
722
+ res = _minimize_slsqp(fun, x0, args, jac, bounds,
723
+ constraints, callback=callback, **options)
724
+ elif meth == 'trust-constr':
725
+ res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
726
+ bounds, constraints,
727
+ callback=callback, **options)
728
+ elif meth == 'dogleg':
729
+ res = _minimize_dogleg(fun, x0, args, jac, hess,
730
+ callback=callback, **options)
731
+ elif meth == 'trust-ncg':
732
+ res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
733
+ callback=callback, **options)
734
+ elif meth == 'trust-krylov':
735
+ res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
736
+ callback=callback, **options)
737
+ elif meth == 'trust-exact':
738
+ res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
739
+ callback=callback, **options)
740
+ else:
741
+ raise ValueError('Unknown solver %s' % method)
742
+
743
+ if remove_vars:
744
+ res.x = _add_to_array(res.x, i_fixed, x_fixed)
745
+ res.jac = _add_to_array(res.jac, i_fixed, np.nan)
746
+ if "hess_inv" in res:
747
+ res.hess_inv = None # unknown
748
+
749
+ if getattr(callback, 'stop_iteration', False):
750
+ res.success = False
751
+ res.status = 99
752
+ res.message = "`callback` raised `StopIteration`."
753
+
754
+ return res
755
+
756
+
757
+ def minimize_scalar(fun, bracket=None, bounds=None, args=(),
758
+ method=None, tol=None, options=None):
759
+ """Local minimization of scalar function of one variable.
760
+
761
+ Parameters
762
+ ----------
763
+ fun : callable
764
+ Objective function.
765
+ Scalar function, must return a scalar.
766
+ bracket : sequence, optional
767
+ For methods 'brent' and 'golden', `bracket` defines the bracketing
768
+ interval and is required.
769
+ Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and
770
+ ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair
771
+ ``(xa, xb)`` to be used as initial points for a downhill bracket search
772
+ (see `scipy.optimize.bracket`).
773
+ The minimizer ``res.x`` will not necessarily satisfy
774
+ ``xa <= res.x <= xb``.
775
+ bounds : sequence, optional
776
+ For method 'bounded', `bounds` is mandatory and must have two finite
777
+ items corresponding to the optimization bounds.
778
+ args : tuple, optional
779
+ Extra arguments passed to the objective function.
780
+ method : str or callable, optional
781
+ Type of solver. Should be one of:
782
+
783
+ - :ref:`Brent <optimize.minimize_scalar-brent>`
784
+ - :ref:`Bounded <optimize.minimize_scalar-bounded>`
785
+ - :ref:`Golden <optimize.minimize_scalar-golden>`
786
+ - custom - a callable object (added in version 0.14.0), see below
787
+
788
+ Default is "Bounded" if bounds are provided and "Brent" otherwise.
789
+ See the 'Notes' section for details of each solver.
790
+
791
+ tol : float, optional
792
+ Tolerance for termination. For detailed control, use solver-specific
793
+ options.
794
+ options : dict, optional
795
+ A dictionary of solver options.
796
+
797
+ maxiter : int
798
+ Maximum number of iterations to perform.
799
+ disp : bool
800
+ Set to True to print convergence messages.
801
+
802
+ See :func:`show_options()` for solver-specific options.
803
+
804
+ Returns
805
+ -------
806
+ res : OptimizeResult
807
+ The optimization result represented as a ``OptimizeResult`` object.
808
+ Important attributes are: ``x`` the solution array, ``success`` a
809
+ Boolean flag indicating if the optimizer exited successfully and
810
+ ``message`` which describes the cause of the termination. See
811
+ `OptimizeResult` for a description of other attributes.
812
+
813
+ See also
814
+ --------
815
+ minimize : Interface to minimization algorithms for scalar multivariate
816
+ functions
817
+ show_options : Additional options accepted by the solvers
818
+
819
+ Notes
820
+ -----
821
+ This section describes the available solvers that can be selected by the
822
+ 'method' parameter. The default method is the ``"Bounded"`` Brent method if
823
+ `bounds` are passed and unbounded ``"Brent"`` otherwise.
824
+
825
+ Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
826
+ algorithm [1]_ to find a local minimum. The algorithm uses inverse
827
+ parabolic interpolation when possible to speed up convergence of
828
+ the golden section method.
829
+
830
+ Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
831
+ golden section search technique [1]_. It uses analog of the bisection
832
+ method to decrease the bracketed interval. It is usually
833
+ preferable to use the *Brent* method.
834
+
835
+ Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
836
+ perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
837
+ local minimum in the interval x1 < xopt < x2.
838
+
839
+ Note that the Brent and Golden methods do not guarantee success unless a
840
+ valid ``bracket`` triple is provided. If a three-point bracket cannot be
841
+ found, consider `scipy.optimize.minimize`. Also, all methods are intended
842
+ only for local minimization. When the function of interest has more than
843
+ one local minimum, consider :ref:`global_optimization`.
844
+
845
+ **Custom minimizers**
846
+
847
+ It may be useful to pass a custom minimization method, for example
848
+ when using some library frontend to minimize_scalar. You can simply
849
+ pass a callable as the ``method`` parameter.
850
+
851
+ The callable is called as ``method(fun, args, **kwargs, **options)``
852
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
853
+ (such as `bracket`, `tol`, etc.), except the `options` dict, which has
854
+ its contents also passed as `method` parameters pair by pair. The method
855
+ shall return an `OptimizeResult` object.
856
+
857
+ The provided `method` callable must be able to accept (and possibly ignore)
858
+ arbitrary parameters; the set of parameters accepted by `minimize` may
859
+ expand in future versions and then these parameters will be passed to
860
+ the method. You can find an example in the scipy.optimize tutorial.
861
+
862
+ .. versionadded:: 0.11.0
863
+
864
+ References
865
+ ----------
866
+ .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
867
+ Numerical Recipes in C. Cambridge University Press.
868
+ .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
869
+ for Mathematical Computations." Prentice-Hall Series in Automatic
870
+ Computation 259 (1977).
871
+ .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
872
+ Courier Corporation, 2013.
873
+
874
+ Examples
875
+ --------
876
+ Consider the problem of minimizing the following function.
877
+
878
+ >>> def f(x):
879
+ ... return (x - 2) * x * (x + 2)**2
880
+
881
+ Using the *Brent* method, we find the local minimum as:
882
+
883
+ >>> from scipy.optimize import minimize_scalar
884
+ >>> res = minimize_scalar(f)
885
+ >>> res.fun
886
+ -9.9149495908
887
+
888
+ The minimizer is:
889
+
890
+ >>> res.x
891
+ 1.28077640403
892
+
893
+ Using the *Bounded* method, we find a local minimum with specified
894
+ bounds as:
895
+
896
+ >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
897
+ >>> res.fun # minimum
898
+ 3.28365179850e-13
899
+ >>> res.x # minimizer
900
+ -2.0000002026
901
+
902
+ """
903
+ if not isinstance(args, tuple):
904
+ args = (args,)
905
+
906
+ if callable(method):
907
+ meth = "_custom"
908
+ elif method is None:
909
+ meth = 'brent' if bounds is None else 'bounded'
910
+ else:
911
+ meth = method.lower()
912
+ if options is None:
913
+ options = {}
914
+
915
+ if bounds is not None and meth in {'brent', 'golden'}:
916
+ message = f"Use of `bounds` is incompatible with 'method={method}'."
917
+ raise ValueError(message)
918
+
919
+ if tol is not None:
920
+ options = dict(options)
921
+ if meth == 'bounded' and 'xatol' not in options:
922
+ warn("Method 'bounded' does not support relative tolerance in x; "
923
+ "defaulting to absolute tolerance.",
924
+ RuntimeWarning, stacklevel=2)
925
+ options['xatol'] = tol
926
+ elif meth == '_custom':
927
+ options.setdefault('tol', tol)
928
+ else:
929
+ options.setdefault('xtol', tol)
930
+
931
+ # replace boolean "disp" option, if specified, by an integer value.
932
+ disp = options.get('disp')
933
+ if isinstance(disp, bool):
934
+ options['disp'] = 2 * int(disp)
935
+
936
+ if meth == '_custom':
937
+ res = method(fun, args=args, bracket=bracket, bounds=bounds, **options)
938
+ elif meth == 'brent':
939
+ res = _recover_from_bracket_error(_minimize_scalar_brent,
940
+ fun, bracket, args, **options)
941
+ elif meth == 'bounded':
942
+ if bounds is None:
943
+ raise ValueError('The `bounds` parameter is mandatory for '
944
+ 'method `bounded`.')
945
+ res = _minimize_scalar_bounded(fun, bounds, args, **options)
946
+ elif meth == 'golden':
947
+ res = _recover_from_bracket_error(_minimize_scalar_golden,
948
+ fun, bracket, args, **options)
949
+ else:
950
+ raise ValueError('Unknown solver %s' % method)
951
+
952
+ # gh-16196 reported inconsistencies in the output shape of `res.x`. While
953
+ # fixing this, future-proof it for when the function is vectorized:
954
+ # the shape of `res.x` should match that of `res.fun`.
955
+ res.fun = np.asarray(res.fun)[()]
956
+ res.x = np.reshape(res.x, res.fun.shape)[()]
957
+ return res
958
+
959
+
960
+ def _remove_from_bounds(bounds, i_fixed):
961
+ """Removes fixed variables from a `Bounds` instance"""
962
+ lb = bounds.lb[~i_fixed]
963
+ ub = bounds.ub[~i_fixed]
964
+ return Bounds(lb, ub) # don't mutate original Bounds object
965
+
966
+
967
+ def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
968
+ """Wraps a function such that fixed variables need not be passed in"""
969
+ def fun_out(x_in, *args, **kwargs):
970
+ x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
971
+ x_out[i_fixed] = x_fixed
972
+ x_out[~i_fixed] = x_in
973
+ y_out = fun_in(x_out, *args, **kwargs)
974
+ y_out = np.array(y_out)
975
+
976
+ if min_dim == 1:
977
+ y_out = np.atleast_1d(y_out)
978
+ elif min_dim == 2:
979
+ y_out = np.atleast_2d(y_out)
980
+
981
+ if remove == 1:
982
+ y_out = y_out[..., ~i_fixed]
983
+ elif remove == 2:
984
+ y_out = y_out[~i_fixed, ~i_fixed]
985
+
986
+ return y_out
987
+ return fun_out
988
+
989
+
990
+ def _add_to_array(x_in, i_fixed, x_fixed):
991
+ """Adds fixed variables back to an array"""
992
+ i_free = ~i_fixed
993
+ if x_in.ndim == 2:
994
+ i_free = i_free[:, None] @ i_free[None, :]
995
+ x_out = np.zeros_like(i_free, dtype=x_in.dtype)
996
+ x_out[~i_free] = x_fixed
997
+ x_out[i_free] = x_in.ravel()
998
+ return x_out
999
+
1000
+
1001
+ def _validate_bounds(bounds, x0, meth):
1002
+ """Check that bounds are valid."""
1003
+
1004
+ msg = "An upper bound is less than the corresponding lower bound."
1005
+ if np.any(bounds.ub < bounds.lb):
1006
+ raise ValueError(msg)
1007
+
1008
+ msg = "The number of bounds is not compatible with the length of `x0`."
1009
+ try:
1010
+ bounds.lb = np.broadcast_to(bounds.lb, x0.shape)
1011
+ bounds.ub = np.broadcast_to(bounds.ub, x0.shape)
1012
+ except Exception as e:
1013
+ raise ValueError(msg) from e
1014
+
1015
+ return bounds
1016
+
1017
+ def standardize_bounds(bounds, x0, meth):
1018
+ """Converts bounds to the form required by the solver."""
1019
+ if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'new'}:
1020
+ if not isinstance(bounds, Bounds):
1021
+ lb, ub = old_bound_to_new(bounds)
1022
+ bounds = Bounds(lb, ub)
1023
+ elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
1024
+ if isinstance(bounds, Bounds):
1025
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
1026
+ return bounds
1027
+
1028
+
1029
+ def standardize_constraints(constraints, x0, meth):
1030
+ """Converts constraints to the form required by the solver."""
1031
+ all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
1032
+ new_constraint_types = all_constraint_types[:-1]
1033
+ if constraints is None:
1034
+ constraints = []
1035
+ elif isinstance(constraints, all_constraint_types):
1036
+ constraints = [constraints]
1037
+ else:
1038
+ constraints = list(constraints) # ensure it's a mutable sequence
1039
+
1040
+ if meth in ['trust-constr', 'new']:
1041
+ for i, con in enumerate(constraints):
1042
+ if not isinstance(con, new_constraint_types):
1043
+ constraints[i] = old_constraint_to_new(i, con)
1044
+ else:
1045
+ # iterate over copy, changing original
1046
+ for i, con in enumerate(list(constraints)):
1047
+ if isinstance(con, new_constraint_types):
1048
+ old_constraints = new_constraint_to_old(con, x0)
1049
+ constraints[i] = old_constraints[0]
1050
+ constraints.extend(old_constraints[1:]) # appends 1 if present
1051
+
1052
+ return constraints
1053
+
1054
+
1055
+ def _optimize_result_for_equal_bounds(
1056
+ fun, bounds, method, args=(), constraints=()
1057
+ ):
1058
+ """
1059
+ Provides a default OptimizeResult for when a bounded minimization method
1060
+ has (lb == ub).all().
1061
+
1062
+ Parameters
1063
+ ----------
1064
+ fun: callable
1065
+ bounds: Bounds
1066
+ method: str
1067
+ constraints: Constraint
1068
+ """
1069
+ success = True
1070
+ message = 'All independent variables were fixed by bounds.'
1071
+
1072
+ # bounds is new-style
1073
+ x0 = bounds.lb
1074
+
1075
+ if constraints:
1076
+ message = ("All independent variables were fixed by bounds at values"
1077
+ " that satisfy the constraints.")
1078
+ constraints = standardize_constraints(constraints, x0, 'new')
1079
+
1080
+ maxcv = 0
1081
+ for c in constraints:
1082
+ pc = PreparedConstraint(c, x0)
1083
+ violation = pc.violation(x0)
1084
+ if np.sum(violation):
1085
+ maxcv = max(maxcv, np.max(violation))
1086
+ success = False
1087
+ message = (f"All independent variables were fixed by bounds, but "
1088
+ f"the independent variables do not satisfy the "
1089
+ f"constraints exactly. (Maximum violation: {maxcv}).")
1090
+
1091
+ return OptimizeResult(
1092
+ x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
1093
+ njev=0, nhev=0,
1094
+ )
venv/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (61 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from . import _minpack
3
+
4
+ import numpy as np
5
+ from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater,
6
+ asarray, inf,
7
+ finfo, inexact, issubdtype, dtype)
8
+ from scipy import linalg
9
+ from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
10
+ from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan
11
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
12
+ from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
13
+ from ._lsq import least_squares
14
+ # from ._lsq.common import make_strictly_feasible
15
+ from ._lsq.least_squares import prepare_bounds
16
+ from scipy.optimize._minimize import Bounds
17
+
18
+ # deprecated imports to be removed in SciPy 1.13.0
19
+ from numpy import dot, eye, take # noqa: F401
20
+ from numpy.linalg import inv # noqa: F401
21
+
22
+ error = _minpack.error
23
+
24
+ __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
25
+
26
+
27
+ def _check_func(checker, argname, thefunc, x0, args, numinputs,
28
+ output_shape=None):
29
+ res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
30
+ if (output_shape is not None) and (shape(res) != output_shape):
31
+ if (output_shape[0] != 1):
32
+ if len(output_shape) > 1:
33
+ if output_shape[1] == 1:
34
+ return shape(res)
35
+ msg = f"{checker}: there is a mismatch between the input and output " \
36
+ f"shape of the '{argname}' argument"
37
+ func_name = getattr(thefunc, '__name__', None)
38
+ if func_name:
39
+ msg += " '%s'." % func_name
40
+ else:
41
+ msg += "."
42
+ msg += f'Shape should be {output_shape} but it is {shape(res)}.'
43
+ raise TypeError(msg)
44
+ if issubdtype(res.dtype, inexact):
45
+ dt = res.dtype
46
+ else:
47
+ dt = dtype(float)
48
+ return shape(res), dt
49
+
50
+
51
+ def fsolve(func, x0, args=(), fprime=None, full_output=0,
52
+ col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
53
+ epsfcn=None, factor=100, diag=None):
54
+ """
55
+ Find the roots of a function.
56
+
57
+ Return the roots of the (non-linear) equations defined by
58
+ ``func(x) = 0`` given a starting estimate.
59
+
60
+ Parameters
61
+ ----------
62
+ func : callable ``f(x, *args)``
63
+ A function that takes at least one (possibly vector) argument,
64
+ and returns a value of the same length.
65
+ x0 : ndarray
66
+ The starting estimate for the roots of ``func(x) = 0``.
67
+ args : tuple, optional
68
+ Any extra arguments to `func`.
69
+ fprime : callable ``f(x, *args)``, optional
70
+ A function to compute the Jacobian of `func` with derivatives
71
+ across the rows. By default, the Jacobian will be estimated.
72
+ full_output : bool, optional
73
+ If True, return optional outputs.
74
+ col_deriv : bool, optional
75
+ Specify whether the Jacobian function computes derivatives down
76
+ the columns (faster, because there is no transpose operation).
77
+ xtol : float, optional
78
+ The calculation will terminate if the relative error between two
79
+ consecutive iterates is at most `xtol`.
80
+ maxfev : int, optional
81
+ The maximum number of calls to the function. If zero, then
82
+ ``100*(N+1)`` is the maximum where N is the number of elements
83
+ in `x0`.
84
+ band : tuple, optional
85
+ If set to a two-sequence containing the number of sub- and
86
+ super-diagonals within the band of the Jacobi matrix, the
87
+ Jacobi matrix is considered banded (only for ``fprime=None``).
88
+ epsfcn : float, optional
89
+ A suitable step length for the forward-difference
90
+ approximation of the Jacobian (for ``fprime=None``). If
91
+ `epsfcn` is less than the machine precision, it is assumed
92
+ that the relative errors in the functions are of the order of
93
+ the machine precision.
94
+ factor : float, optional
95
+ A parameter determining the initial step bound
96
+ (``factor * || diag * x||``). Should be in the interval
97
+ ``(0.1, 100)``.
98
+ diag : sequence, optional
99
+ N positive entries that serve as a scale factors for the
100
+ variables.
101
+
102
+ Returns
103
+ -------
104
+ x : ndarray
105
+ The solution (or the result of the last iteration for
106
+ an unsuccessful call).
107
+ infodict : dict
108
+ A dictionary of optional outputs with the keys:
109
+
110
+ ``nfev``
111
+ number of function calls
112
+ ``njev``
113
+ number of Jacobian calls
114
+ ``fvec``
115
+ function evaluated at the output
116
+ ``fjac``
117
+ the orthogonal matrix, q, produced by the QR
118
+ factorization of the final approximate Jacobian
119
+ matrix, stored column wise
120
+ ``r``
121
+ upper triangular matrix produced by QR factorization
122
+ of the same matrix
123
+ ``qtf``
124
+ the vector ``(transpose(q) * fvec)``
125
+
126
+ ier : int
127
+ An integer flag. Set to 1 if a solution was found, otherwise refer
128
+ to `mesg` for more information.
129
+ mesg : str
130
+ If no solution is found, `mesg` details the cause of failure.
131
+
132
+ See Also
133
+ --------
134
+ root : Interface to root finding algorithms for multivariate
135
+ functions. See the ``method='hybr'`` in particular.
136
+
137
+ Notes
138
+ -----
139
+ ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
140
+
141
+ Examples
142
+ --------
143
+ Find a solution to the system of equations:
144
+ ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
145
+
146
+ >>> import numpy as np
147
+ >>> from scipy.optimize import fsolve
148
+ >>> def func(x):
149
+ ... return [x[0] * np.cos(x[1]) - 4,
150
+ ... x[1] * x[0] - x[1] - 5]
151
+ >>> root = fsolve(func, [1, 1])
152
+ >>> root
153
+ array([6.50409711, 0.90841421])
154
+ >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
155
+ array([ True, True])
156
+
157
+ """
158
+ options = {'col_deriv': col_deriv,
159
+ 'xtol': xtol,
160
+ 'maxfev': maxfev,
161
+ 'band': band,
162
+ 'eps': epsfcn,
163
+ 'factor': factor,
164
+ 'diag': diag}
165
+
166
+ res = _root_hybr(func, x0, args, jac=fprime, **options)
167
+ if full_output:
168
+ x = res['x']
169
+ info = {k: res.get(k)
170
+ for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res}
171
+ info['fvec'] = res['fun']
172
+ return x, info, res['status'], res['message']
173
+ else:
174
+ status = res['status']
175
+ msg = res['message']
176
+ if status == 0:
177
+ raise TypeError(msg)
178
+ elif status == 1:
179
+ pass
180
+ elif status in [2, 3, 4, 5]:
181
+ warnings.warn(msg, RuntimeWarning, stacklevel=2)
182
+ else:
183
+ raise TypeError(msg)
184
+ return res['x']
185
+
186
+
187
+ def _root_hybr(func, x0, args=(), jac=None,
188
+ col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
189
+ factor=100, diag=None, **unknown_options):
190
+ """
191
+ Find the roots of a multivariate function using MINPACK's hybrd and
192
+ hybrj routines (modified Powell method).
193
+
194
+ Options
195
+ -------
196
+ col_deriv : bool
197
+ Specify whether the Jacobian function computes derivatives down
198
+ the columns (faster, because there is no transpose operation).
199
+ xtol : float
200
+ The calculation will terminate if the relative error between two
201
+ consecutive iterates is at most `xtol`.
202
+ maxfev : int
203
+ The maximum number of calls to the function. If zero, then
204
+ ``100*(N+1)`` is the maximum where N is the number of elements
205
+ in `x0`.
206
+ band : tuple
207
+ If set to a two-sequence containing the number of sub- and
208
+ super-diagonals within the band of the Jacobi matrix, the
209
+ Jacobi matrix is considered banded (only for ``fprime=None``).
210
+ eps : float
211
+ A suitable step length for the forward-difference
212
+ approximation of the Jacobian (for ``fprime=None``). If
213
+ `eps` is less than the machine precision, it is assumed
214
+ that the relative errors in the functions are of the order of
215
+ the machine precision.
216
+ factor : float
217
+ A parameter determining the initial step bound
218
+ (``factor * || diag * x||``). Should be in the interval
219
+ ``(0.1, 100)``.
220
+ diag : sequence
221
+ N positive entries that serve as a scale factors for the
222
+ variables.
223
+
224
+ """
225
+ _check_unknown_options(unknown_options)
226
+ epsfcn = eps
227
+
228
+ x0 = asarray(x0).flatten()
229
+ n = len(x0)
230
+ if not isinstance(args, tuple):
231
+ args = (args,)
232
+ shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
233
+ if epsfcn is None:
234
+ epsfcn = finfo(dtype).eps
235
+ Dfun = jac
236
+ if Dfun is None:
237
+ if band is None:
238
+ ml, mu = -10, -10
239
+ else:
240
+ ml, mu = band[:2]
241
+ if maxfev == 0:
242
+ maxfev = 200 * (n + 1)
243
+ retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
244
+ ml, mu, epsfcn, factor, diag)
245
+ else:
246
+ _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
247
+ if (maxfev == 0):
248
+ maxfev = 100 * (n + 1)
249
+ retval = _minpack._hybrj(func, Dfun, x0, args, 1,
250
+ col_deriv, xtol, maxfev, factor, diag)
251
+
252
+ x, status = retval[0], retval[-1]
253
+
254
+ errors = {0: "Improper input parameters were entered.",
255
+ 1: "The solution converged.",
256
+ 2: "The number of calls to function has "
257
+ "reached maxfev = %d." % maxfev,
258
+ 3: "xtol=%f is too small, no further improvement "
259
+ "in the approximate\n solution "
260
+ "is possible." % xtol,
261
+ 4: "The iteration is not making good progress, as measured "
262
+ "by the \n improvement from the last five "
263
+ "Jacobian evaluations.",
264
+ 5: "The iteration is not making good progress, "
265
+ "as measured by the \n improvement from the last "
266
+ "ten iterations.",
267
+ 'unknown': "An error occurred."}
268
+
269
+ info = retval[1]
270
+ info['fun'] = info.pop('fvec')
271
+ sol = OptimizeResult(x=x, success=(status == 1), status=status,
272
+ method="hybr")
273
+ sol.update(info)
274
+ try:
275
+ sol['message'] = errors[status]
276
+ except KeyError:
277
+ sol['message'] = errors['unknown']
278
+
279
+ return sol
280
+
281
+
282
+ LEASTSQ_SUCCESS = [1, 2, 3, 4]
283
+ LEASTSQ_FAILURE = [5, 6, 7, 8]
284
+
285
+
286
+ def leastsq(func, x0, args=(), Dfun=None, full_output=False,
287
+ col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8,
288
+ gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
289
+ """
290
+ Minimize the sum of squares of a set of equations.
291
+
292
+ ::
293
+
294
+ x = arg min(sum(func(y)**2,axis=0))
295
+ y
296
+
297
+ Parameters
298
+ ----------
299
+ func : callable
300
+ Should take at least one (possibly length ``N`` vector) argument and
301
+ returns ``M`` floating point numbers. It must not return NaNs or
302
+ fitting might fail. ``M`` must be greater than or equal to ``N``.
303
+ x0 : ndarray
304
+ The starting estimate for the minimization.
305
+ args : tuple, optional
306
+ Any extra arguments to func are placed in this tuple.
307
+ Dfun : callable, optional
308
+ A function or method to compute the Jacobian of func with derivatives
309
+ across the rows. If this is None, the Jacobian will be estimated.
310
+ full_output : bool, optional
311
+ If ``True``, return all optional outputs (not just `x` and `ier`).
312
+ col_deriv : bool, optional
313
+ If ``True``, specify that the Jacobian function computes derivatives
314
+ down the columns (faster, because there is no transpose operation).
315
+ ftol : float, optional
316
+ Relative error desired in the sum of squares.
317
+ xtol : float, optional
318
+ Relative error desired in the approximate solution.
319
+ gtol : float, optional
320
+ Orthogonality desired between the function vector and the columns of
321
+ the Jacobian.
322
+ maxfev : int, optional
323
+ The maximum number of calls to the function. If `Dfun` is provided,
324
+ then the default `maxfev` is 100*(N+1) where N is the number of elements
325
+ in x0, otherwise the default `maxfev` is 200*(N+1).
326
+ epsfcn : float, optional
327
+ A variable used in determining a suitable step length for the forward-
328
+ difference approximation of the Jacobian (for Dfun=None).
329
+ Normally the actual step length will be sqrt(epsfcn)*x
330
+ If epsfcn is less than the machine precision, it is assumed that the
331
+ relative errors are of the order of the machine precision.
332
+ factor : float, optional
333
+ A parameter determining the initial step bound
334
+ (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
335
+ diag : sequence, optional
336
+ N positive entries that serve as a scale factors for the variables.
337
+
338
+ Returns
339
+ -------
340
+ x : ndarray
341
+ The solution (or the result of the last iteration for an unsuccessful
342
+ call).
343
+ cov_x : ndarray
344
+ The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
345
+ estimate of the Hessian. A value of None indicates a singular matrix,
346
+ which means the curvature in parameters `x` is numerically flat. To
347
+ obtain the covariance matrix of the parameters `x`, `cov_x` must be
348
+ multiplied by the variance of the residuals -- see curve_fit. Only
349
+ returned if `full_output` is ``True``.
350
+ infodict : dict
351
+ a dictionary of optional outputs with the keys:
352
+
353
+ ``nfev``
354
+ The number of function calls
355
+ ``fvec``
356
+ The function evaluated at the output
357
+ ``fjac``
358
+ A permutation of the R matrix of a QR
359
+ factorization of the final approximate
360
+ Jacobian matrix, stored column wise.
361
+ Together with ipvt, the covariance of the
362
+ estimate can be approximated.
363
+ ``ipvt``
364
+ An integer array of length N which defines
365
+ a permutation matrix, p, such that
366
+ fjac*p = q*r, where r is upper triangular
367
+ with diagonal elements of nonincreasing
368
+ magnitude. Column j of p is column ipvt(j)
369
+ of the identity matrix.
370
+ ``qtf``
371
+ The vector (transpose(q) * fvec).
372
+
373
+ Only returned if `full_output` is ``True``.
374
+ mesg : str
375
+ A string message giving information about the cause of failure.
376
+ Only returned if `full_output` is ``True``.
377
+ ier : int
378
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
379
+ found. Otherwise, the solution was not found. In either case, the
380
+ optional output variable 'mesg' gives more information.
381
+
382
+ See Also
383
+ --------
384
+ least_squares : Newer interface to solve nonlinear least-squares problems
385
+ with bounds on the variables. See ``method='lm'`` in particular.
386
+
387
+ Notes
388
+ -----
389
+ "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
390
+
391
+ cov_x is a Jacobian approximation to the Hessian of the least squares
392
+ objective function.
393
+ This approximation assumes that the objective function is based on the
394
+ difference between some observed target data (ydata) and a (non-linear)
395
+ function of the parameters `f(xdata, params)` ::
396
+
397
+ func(params) = ydata - f(xdata, params)
398
+
399
+ so that the objective function is ::
400
+
401
+ min sum((ydata - f(xdata, params))**2, axis=0)
402
+ params
403
+
404
+ The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
405
+ or whether `x0` is a scalar.
406
+
407
+ Examples
408
+ --------
409
+ >>> from scipy.optimize import leastsq
410
+ >>> def func(x):
411
+ ... return 2*(x-3)**2+1
412
+ >>> leastsq(func, 0)
413
+ (array([2.99999999]), 1)
414
+
415
+ """
416
+ x0 = asarray(x0).flatten()
417
+ n = len(x0)
418
+ if not isinstance(args, tuple):
419
+ args = (args,)
420
+ shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
421
+ m = shape[0]
422
+
423
+ if n > m:
424
+ raise TypeError(f"Improper input: func input vector length N={n} must"
425
+ f" not exceed func output vector length M={m}")
426
+
427
+ if epsfcn is None:
428
+ epsfcn = finfo(dtype).eps
429
+
430
+ if Dfun is None:
431
+ if maxfev == 0:
432
+ maxfev = 200*(n + 1)
433
+ retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
434
+ gtol, maxfev, epsfcn, factor, diag)
435
+ else:
436
+ if col_deriv:
437
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
438
+ else:
439
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
440
+ if maxfev == 0:
441
+ maxfev = 100 * (n + 1)
442
+ retval = _minpack._lmder(func, Dfun, x0, args, full_output,
443
+ col_deriv, ftol, xtol, gtol, maxfev,
444
+ factor, diag)
445
+
446
+ errors = {0: ["Improper input parameters.", TypeError],
447
+ 1: ["Both actual and predicted relative reductions "
448
+ "in the sum of squares\n are at most %f" % ftol, None],
449
+ 2: ["The relative error between two consecutive "
450
+ "iterates is at most %f" % xtol, None],
451
+ 3: ["Both actual and predicted relative reductions in "
452
+ f"the sum of squares\n are at most {ftol:f} and the "
453
+ "relative error between two consecutive "
454
+ f"iterates is at \n most {xtol:f}", None],
455
+ 4: ["The cosine of the angle between func(x) and any "
456
+ "column of the\n Jacobian is at most %f in "
457
+ "absolute value" % gtol, None],
458
+ 5: ["Number of calls to function has reached "
459
+ "maxfev = %d." % maxfev, ValueError],
460
+ 6: ["ftol=%f is too small, no further reduction "
461
+ "in the sum of squares\n is possible." % ftol,
462
+ ValueError],
463
+ 7: ["xtol=%f is too small, no further improvement in "
464
+ "the approximate\n solution is possible." % xtol,
465
+ ValueError],
466
+ 8: ["gtol=%f is too small, func(x) is orthogonal to the "
467
+ "columns of\n the Jacobian to machine "
468
+ "precision." % gtol, ValueError]}
469
+
470
+ # The FORTRAN return value (possible return values are >= 0 and <= 8)
471
+ info = retval[-1]
472
+
473
+ if full_output:
474
+ cov_x = None
475
+ if info in LEASTSQ_SUCCESS:
476
+ # This was
477
+ # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
478
+ # r = triu(transpose(retval[1]['fjac'])[:n, :])
479
+ # R = dot(r, perm)
480
+ # cov_x = inv(dot(transpose(R), R))
481
+ # but the explicit dot product was not necessary and sometimes
482
+ # the result was not symmetric positive definite. See gh-4555.
483
+ perm = retval[1]['ipvt'] - 1
484
+ n = len(perm)
485
+ r = triu(transpose(retval[1]['fjac'])[:n, :])
486
+ inv_triu = linalg.get_lapack_funcs('trtri', (r,))
487
+ try:
488
+ # inverse of permuted matrix is a permutation of matrix inverse
489
+ invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
490
+ if trtri_info != 0: # explicit comparison for readability
491
+ raise LinAlgError(f'trtri returned info {trtri_info}')
492
+ invR[perm] = invR.copy()
493
+ cov_x = invR @ invR.T
494
+ except (LinAlgError, ValueError):
495
+ pass
496
+ return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
497
+ else:
498
+ if info in LEASTSQ_FAILURE:
499
+ warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2)
500
+ elif info == 0:
501
+ raise errors[info][1](errors[info][0])
502
+ return retval[0], info
503
+
504
+
505
+ def _lightweight_memoizer(f):
506
+ # very shallow memoization to address gh-13670: only remember the first set
507
+ # of parameters and corresponding function value, and only attempt to use
508
+ # them twice (the number of times the function is evaluated at x0).
509
+ def _memoized_func(params):
510
+ if _memoized_func.skip_lookup:
511
+ return f(params)
512
+
513
+ if np.all(_memoized_func.last_params == params):
514
+ return _memoized_func.last_val
515
+ elif _memoized_func.last_params is not None:
516
+ _memoized_func.skip_lookup = True
517
+
518
+ val = f(params)
519
+
520
+ if _memoized_func.last_params is None:
521
+ _memoized_func.last_params = np.copy(params)
522
+ _memoized_func.last_val = val
523
+
524
+ return val
525
+
526
+ _memoized_func.last_params = None
527
+ _memoized_func.last_val = None
528
+ _memoized_func.skip_lookup = False
529
+ return _memoized_func
530
+
531
+
532
+ def _wrap_func(func, xdata, ydata, transform):
533
+ if transform is None:
534
+ def func_wrapped(params):
535
+ return func(xdata, *params) - ydata
536
+ elif transform.size == 1 or transform.ndim == 1:
537
+ def func_wrapped(params):
538
+ return transform * (func(xdata, *params) - ydata)
539
+ else:
540
+ # Chisq = (y - yd)^T C^{-1} (y-yd)
541
+ # transform = L such that C = L L^T
542
+ # C^{-1} = L^{-T} L^{-1}
543
+ # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
544
+ # Define (y-yd)' = L^{-1} (y-yd)
545
+ # by solving
546
+ # L (y-yd)' = (y-yd)
547
+ # and minimize (y-yd)'^T (y-yd)'
548
+ def func_wrapped(params):
549
+ return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
550
+ return func_wrapped
551
+
552
+
553
+ def _wrap_jac(jac, xdata, transform):
554
+ if transform is None:
555
+ def jac_wrapped(params):
556
+ return jac(xdata, *params)
557
+ elif transform.ndim == 1:
558
+ def jac_wrapped(params):
559
+ return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
560
+ else:
561
+ def jac_wrapped(params):
562
+ return solve_triangular(transform,
563
+ np.asarray(jac(xdata, *params)),
564
+ lower=True)
565
+ return jac_wrapped
566
+
567
+
568
+ def _initialize_feasible(lb, ub):
569
+ p0 = np.ones_like(lb)
570
+ lb_finite = np.isfinite(lb)
571
+ ub_finite = np.isfinite(ub)
572
+
573
+ mask = lb_finite & ub_finite
574
+ p0[mask] = 0.5 * (lb[mask] + ub[mask])
575
+
576
+ mask = lb_finite & ~ub_finite
577
+ p0[mask] = lb[mask] + 1
578
+
579
+ mask = ~lb_finite & ub_finite
580
+ p0[mask] = ub[mask] - 1
581
+
582
+ return p0
583
+
584
+
585
+ def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
586
+ check_finite=None, bounds=(-np.inf, np.inf), method=None,
587
+ jac=None, *, full_output=False, nan_policy=None,
588
+ **kwargs):
589
+ """
590
+ Use non-linear least squares to fit a function, f, to data.
591
+
592
+ Assumes ``ydata = f(xdata, *params) + eps``.
593
+
594
+ Parameters
595
+ ----------
596
+ f : callable
597
+ The model function, f(x, ...). It must take the independent
598
+ variable as the first argument and the parameters to fit as
599
+ separate remaining arguments.
600
+ xdata : array_like
601
+ The independent variable where the data is measured.
602
+ Should usually be an M-length sequence or an (k,M)-shaped array for
603
+ functions with k predictors, and each element should be float
604
+ convertible if it is an array like object.
605
+ ydata : array_like
606
+ The dependent data, a length M array - nominally ``f(xdata, ...)``.
607
+ p0 : array_like, optional
608
+ Initial guess for the parameters (length N). If None, then the
609
+ initial values will all be 1 (if the number of parameters for the
610
+ function can be determined using introspection, otherwise a
611
+ ValueError is raised).
612
+ sigma : None or scalar or M-length sequence or MxM array, optional
613
+ Determines the uncertainty in `ydata`. If we define residuals as
614
+ ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
615
+ depends on its number of dimensions:
616
+
617
+ - A scalar or 1-D `sigma` should contain values of standard deviations of
618
+ errors in `ydata`. In this case, the optimized function is
619
+ ``chisq = sum((r / sigma) ** 2)``.
620
+
621
+ - A 2-D `sigma` should contain the covariance matrix of
622
+ errors in `ydata`. In this case, the optimized function is
623
+ ``chisq = r.T @ inv(sigma) @ r``.
624
+
625
+ .. versionadded:: 0.19
626
+
627
+ None (default) is equivalent of 1-D `sigma` filled with ones.
628
+ absolute_sigma : bool, optional
629
+ If True, `sigma` is used in an absolute sense and the estimated parameter
630
+ covariance `pcov` reflects these absolute values.
631
+
632
+ If False (default), only the relative magnitudes of the `sigma` values matter.
633
+ The returned parameter covariance matrix `pcov` is based on scaling
634
+ `sigma` by a constant factor. This constant is set by demanding that the
635
+ reduced `chisq` for the optimal parameters `popt` when using the
636
+ *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
637
+ match the sample variance of the residuals after the fit. Default is False.
638
+ Mathematically,
639
+ ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
640
+ check_finite : bool, optional
641
+ If True, check that the input arrays do not contain nans of infs,
642
+ and raise a ValueError if they do. Setting this parameter to
643
+ False may silently produce nonsensical results if the input arrays
644
+ do contain nans. Default is True if `nan_policy` is not specified
645
+ explicitly and False otherwise.
646
+ bounds : 2-tuple of array_like or `Bounds`, optional
647
+ Lower and upper bounds on parameters. Defaults to no bounds.
648
+ There are two ways to specify the bounds:
649
+
650
+ - Instance of `Bounds` class.
651
+
652
+ - 2-tuple of array_like: Each element of the tuple must be either
653
+ an array with the length equal to the number of parameters, or a
654
+ scalar (in which case the bound is taken to be the same for all
655
+ parameters). Use ``np.inf`` with an appropriate sign to disable
656
+ bounds on all or some parameters.
657
+
658
+ method : {'lm', 'trf', 'dogbox'}, optional
659
+ Method to use for optimization. See `least_squares` for more details.
660
+ Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
661
+ provided. The method 'lm' won't work when the number of observations
662
+ is less than the number of variables, use 'trf' or 'dogbox' in this
663
+ case.
664
+
665
+ .. versionadded:: 0.17
666
+ jac : callable, string or None, optional
667
+ Function with signature ``jac(x, ...)`` which computes the Jacobian
668
+ matrix of the model function with respect to parameters as a dense
669
+ array_like structure. It will be scaled according to provided `sigma`.
670
+ If None (default), the Jacobian will be estimated numerically.
671
+ String keywords for 'trf' and 'dogbox' methods can be used to select
672
+ a finite difference scheme, see `least_squares`.
673
+
674
+ .. versionadded:: 0.18
675
+ full_output : boolean, optional
676
+ If True, this function returns additioal information: `infodict`,
677
+ `mesg`, and `ier`.
678
+
679
+ .. versionadded:: 1.9
680
+ nan_policy : {'raise', 'omit', None}, optional
681
+ Defines how to handle when input contains nan.
682
+ The following options are available (default is None):
683
+
684
+ * 'raise': throws an error
685
+ * 'omit': performs the calculations ignoring nan values
686
+ * None: no special handling of NaNs is performed
687
+ (except what is done by check_finite); the behavior when NaNs
688
+ are present is implementation-dependent and may change.
689
+
690
+ Note that if this value is specified explicitly (not None),
691
+ `check_finite` will be set as False.
692
+
693
+ .. versionadded:: 1.11
694
+ **kwargs
695
+ Keyword arguments passed to `leastsq` for ``method='lm'`` or
696
+ `least_squares` otherwise.
697
+
698
+ Returns
699
+ -------
700
+ popt : array
701
+ Optimal values for the parameters so that the sum of the squared
702
+ residuals of ``f(xdata, *popt) - ydata`` is minimized.
703
+ pcov : 2-D array
704
+ The estimated approximate covariance of popt. The diagonals provide
705
+ the variance of the parameter estimate. To compute one standard
706
+ deviation errors on the parameters, use
707
+ ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between
708
+ `cov` and parameter error estimates is derived based on a linear
709
+ approximation to the model function around the optimum [1].
710
+ When this approximation becomes inaccurate, `cov` may not provide an
711
+ accurate measure of uncertainty.
712
+
713
+ How the `sigma` parameter affects the estimated covariance
714
+ depends on `absolute_sigma` argument, as described above.
715
+
716
+ If the Jacobian matrix at the solution doesn't have a full rank, then
717
+ 'lm' method returns a matrix filled with ``np.inf``, on the other hand
718
+ 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
719
+ the covariance matrix. Covariance matrices with large condition numbers
720
+ (e.g. computed with `numpy.linalg.cond`) may indicate that results are
721
+ unreliable.
722
+ infodict : dict (returned only if `full_output` is True)
723
+ a dictionary of optional outputs with the keys:
724
+
725
+ ``nfev``
726
+ The number of function calls. Methods 'trf' and 'dogbox' do not
727
+ count function calls for numerical Jacobian approximation,
728
+ as opposed to 'lm' method.
729
+ ``fvec``
730
+ The residual values evaluated at the solution, for a 1-D `sigma`
731
+ this is ``(f(x, *popt) - ydata)/sigma``.
732
+ ``fjac``
733
+ A permutation of the R matrix of a QR
734
+ factorization of the final approximate
735
+ Jacobian matrix, stored column wise.
736
+ Together with ipvt, the covariance of the
737
+ estimate can be approximated.
738
+ Method 'lm' only provides this information.
739
+ ``ipvt``
740
+ An integer array of length N which defines
741
+ a permutation matrix, p, such that
742
+ fjac*p = q*r, where r is upper triangular
743
+ with diagonal elements of nonincreasing
744
+ magnitude. Column j of p is column ipvt(j)
745
+ of the identity matrix.
746
+ Method 'lm' only provides this information.
747
+ ``qtf``
748
+ The vector (transpose(q) * fvec).
749
+ Method 'lm' only provides this information.
750
+
751
+ .. versionadded:: 1.9
752
+ mesg : str (returned only if `full_output` is True)
753
+ A string message giving information about the solution.
754
+
755
+ .. versionadded:: 1.9
756
+ ier : int (returned only if `full_output` is True)
757
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
758
+ found. Otherwise, the solution was not found. In either case, the
759
+ optional output variable `mesg` gives more information.
760
+
761
+ .. versionadded:: 1.9
762
+
763
+ Raises
764
+ ------
765
+ ValueError
766
+ if either `ydata` or `xdata` contain NaNs, or if incompatible options
767
+ are used.
768
+
769
+ RuntimeError
770
+ if the least-squares minimization fails.
771
+
772
+ OptimizeWarning
773
+ if covariance of the parameters can not be estimated.
774
+
775
+ See Also
776
+ --------
777
+ least_squares : Minimize the sum of squares of nonlinear functions.
778
+ scipy.stats.linregress : Calculate a linear least squares regression for
779
+ two sets of measurements.
780
+
781
+ Notes
782
+ -----
783
+ Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
784
+ are ``float64``, or else the optimization may return incorrect results.
785
+
786
+ With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
787
+ through `leastsq`. Note that this algorithm can only deal with
788
+ unconstrained problems.
789
+
790
+ Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
791
+ the docstring of `least_squares` for more information.
792
+
793
+ Parameters to be fitted must have similar scale. Differences of multiple
794
+ orders of magnitude can lead to incorrect results. For the 'trf' and
795
+ 'dogbox' methods, the `x_scale` keyword argument can be used to scale
796
+ the parameters.
797
+
798
+ References
799
+ ----------
800
+ [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear
801
+ regression in groundwater flow: Three case studies. Water Resources
802
+ Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804`
803
+
804
+ Examples
805
+ --------
806
+ >>> import numpy as np
807
+ >>> import matplotlib.pyplot as plt
808
+ >>> from scipy.optimize import curve_fit
809
+
810
+ >>> def func(x, a, b, c):
811
+ ... return a * np.exp(-b * x) + c
812
+
813
+ Define the data to be fit with some noise:
814
+
815
+ >>> xdata = np.linspace(0, 4, 50)
816
+ >>> y = func(xdata, 2.5, 1.3, 0.5)
817
+ >>> rng = np.random.default_rng()
818
+ >>> y_noise = 0.2 * rng.normal(size=xdata.size)
819
+ >>> ydata = y + y_noise
820
+ >>> plt.plot(xdata, ydata, 'b-', label='data')
821
+
822
+ Fit for the parameters a, b, c of the function `func`:
823
+
824
+ >>> popt, pcov = curve_fit(func, xdata, ydata)
825
+ >>> popt
826
+ array([2.56274217, 1.37268521, 0.47427475])
827
+ >>> plt.plot(xdata, func(xdata, *popt), 'r-',
828
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
829
+
830
+ Constrain the optimization to the region of ``0 <= a <= 3``,
831
+ ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
832
+
833
+ >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
834
+ >>> popt
835
+ array([2.43736712, 1. , 0.34463856])
836
+ >>> plt.plot(xdata, func(xdata, *popt), 'g--',
837
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
838
+
839
+ >>> plt.xlabel('x')
840
+ >>> plt.ylabel('y')
841
+ >>> plt.legend()
842
+ >>> plt.show()
843
+
844
+ For reliable results, the model `func` should not be overparametrized;
845
+ redundant parameters can cause unreliable covariance matrices and, in some
846
+ cases, poorer quality fits. As a quick check of whether the model may be
847
+ overparameterized, calculate the condition number of the covariance matrix:
848
+
849
+ >>> np.linalg.cond(pcov)
850
+ 34.571092161547405 # may vary
851
+
852
+ The value is small, so it does not raise much concern. If, however, we were
853
+ to add a fourth parameter ``d`` to `func` with the same effect as ``a``:
854
+
855
+ >>> def func2(x, a, b, c, d):
856
+ ... return a * d * np.exp(-b * x) + c # a and d are redundant
857
+ >>> popt, pcov = curve_fit(func2, xdata, ydata)
858
+ >>> np.linalg.cond(pcov)
859
+ 1.13250718925596e+32 # may vary
860
+
861
+ Such a large value is cause for concern. The diagonal elements of the
862
+ covariance matrix, which is related to uncertainty of the fit, gives more
863
+ information:
864
+
865
+ >>> np.diag(pcov)
866
+ array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary
867
+
868
+ Note that the first and last terms are much larger than the other elements,
869
+ suggesting that the optimal values of these parameters are ambiguous and
870
+ that only one of these parameters is needed in the model.
871
+
872
+ If the optimal parameters of `f` differ by multiple orders of magnitude, the
873
+ resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any
874
+ results:
875
+
876
+ >>> ydata = func(xdata, 500000, 0.01, 15)
877
+ >>> try:
878
+ ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf')
879
+ ... except RuntimeError as e:
880
+ ... print(e)
881
+ Optimal parameters not found: The maximum number of function evaluations is exceeded.
882
+
883
+ If parameter scale is roughly known beforehand, it can be defined in
884
+ `x_scale` argument:
885
+
886
+ >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf',
887
+ ... x_scale = [1000, 1, 1])
888
+ >>> popt
889
+ array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01])
890
+ """
891
+ if p0 is None:
892
+ # determine number of parameters by inspecting the function
893
+ sig = _getfullargspec(f)
894
+ args = sig.args
895
+ if len(args) < 2:
896
+ raise ValueError("Unable to determine number of fit parameters.")
897
+ n = len(args) - 1
898
+ else:
899
+ p0 = np.atleast_1d(p0)
900
+ n = p0.size
901
+
902
+ if isinstance(bounds, Bounds):
903
+ lb, ub = bounds.lb, bounds.ub
904
+ else:
905
+ lb, ub = prepare_bounds(bounds, n)
906
+ if p0 is None:
907
+ p0 = _initialize_feasible(lb, ub)
908
+
909
+ bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
910
+ if method is None:
911
+ if bounded_problem:
912
+ method = 'trf'
913
+ else:
914
+ method = 'lm'
915
+
916
+ if method == 'lm' and bounded_problem:
917
+ raise ValueError("Method 'lm' only works for unconstrained problems. "
918
+ "Use 'trf' or 'dogbox' instead.")
919
+
920
+ if check_finite is None:
921
+ check_finite = True if nan_policy is None else False
922
+
923
+ # optimization may produce garbage for float32 inputs, cast them to float64
924
+ if check_finite:
925
+ ydata = np.asarray_chkfinite(ydata, float)
926
+ else:
927
+ ydata = np.asarray(ydata, float)
928
+
929
+ if isinstance(xdata, (list, tuple, np.ndarray)):
930
+ # `xdata` is passed straight to the user-defined `f`, so allow
931
+ # non-array_like `xdata`.
932
+ if check_finite:
933
+ xdata = np.asarray_chkfinite(xdata, float)
934
+ else:
935
+ xdata = np.asarray(xdata, float)
936
+
937
+ if ydata.size == 0:
938
+ raise ValueError("`ydata` must not be empty!")
939
+
940
+ # nan handling is needed only if check_finite is False because if True,
941
+ # the x-y data are already checked, and they don't contain nans.
942
+ if not check_finite and nan_policy is not None:
943
+ if nan_policy == "propagate":
944
+ raise ValueError("`nan_policy='propagate'` is not supported "
945
+ "by this function.")
946
+
947
+ policies = [None, 'raise', 'omit']
948
+ x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy,
949
+ policies=policies)
950
+ y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy,
951
+ policies=policies)
952
+
953
+ if (x_contains_nan or y_contains_nan) and nan_policy == 'omit':
954
+ # ignore NaNs for N dimensional arrays
955
+ has_nan = np.isnan(xdata)
956
+ has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1)))
957
+ has_nan |= np.isnan(ydata)
958
+
959
+ xdata = xdata[..., ~has_nan]
960
+ ydata = ydata[~has_nan]
961
+
962
+ # Determine type of sigma
963
+ if sigma is not None:
964
+ sigma = np.asarray(sigma)
965
+
966
+ # if 1-D or a scalar, sigma are errors, define transform = 1/sigma
967
+ if sigma.size == 1 or sigma.shape == (ydata.size, ):
968
+ transform = 1.0 / sigma
969
+ # if 2-D, sigma is the covariance matrix,
970
+ # define transform = L such that L L^T = C
971
+ elif sigma.shape == (ydata.size, ydata.size):
972
+ try:
973
+ # scipy.linalg.cholesky requires lower=True to return L L^T = A
974
+ transform = cholesky(sigma, lower=True)
975
+ except LinAlgError as e:
976
+ raise ValueError("`sigma` must be positive definite.") from e
977
+ else:
978
+ raise ValueError("`sigma` has incorrect shape.")
979
+ else:
980
+ transform = None
981
+
982
+ func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform))
983
+
984
+ if callable(jac):
985
+ jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform))
986
+ elif jac is None and method != 'lm':
987
+ jac = '2-point'
988
+
989
+ if 'args' in kwargs:
990
+ # The specification for the model function `f` does not support
991
+ # additional arguments. Refer to the `curve_fit` docstring for
992
+ # acceptable call signatures of `f`.
993
+ raise ValueError("'args' is not a supported keyword argument.")
994
+
995
+ if method == 'lm':
996
+ # if ydata.size == 1, this might be used for broadcast.
997
+ if ydata.size != 1 and n > ydata.size:
998
+ raise TypeError(f"The number of func parameters={n} must not"
999
+ f" exceed the number of data points={ydata.size}")
1000
+ res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
1001
+ popt, pcov, infodict, errmsg, ier = res
1002
+ ysize = len(infodict['fvec'])
1003
+ cost = np.sum(infodict['fvec'] ** 2)
1004
+ if ier not in [1, 2, 3, 4]:
1005
+ raise RuntimeError("Optimal parameters not found: " + errmsg)
1006
+ else:
1007
+ # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
1008
+ if 'max_nfev' not in kwargs:
1009
+ kwargs['max_nfev'] = kwargs.pop('maxfev', None)
1010
+
1011
+ res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
1012
+ **kwargs)
1013
+
1014
+ if not res.success:
1015
+ raise RuntimeError("Optimal parameters not found: " + res.message)
1016
+
1017
+ infodict = dict(nfev=res.nfev, fvec=res.fun)
1018
+ ier = res.status
1019
+ errmsg = res.message
1020
+
1021
+ ysize = len(res.fun)
1022
+ cost = 2 * res.cost # res.cost is half sum of squares!
1023
+ popt = res.x
1024
+
1025
+ # Do Moore-Penrose inverse discarding zero singular values.
1026
+ _, s, VT = svd(res.jac, full_matrices=False)
1027
+ threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
1028
+ s = s[s > threshold]
1029
+ VT = VT[:s.size]
1030
+ pcov = np.dot(VT.T / s**2, VT)
1031
+
1032
+ warn_cov = False
1033
+ if pcov is None or np.isnan(pcov).any():
1034
+ # indeterminate covariance
1035
+ pcov = zeros((len(popt), len(popt)), dtype=float)
1036
+ pcov.fill(inf)
1037
+ warn_cov = True
1038
+ elif not absolute_sigma:
1039
+ if ysize > p0.size:
1040
+ s_sq = cost / (ysize - p0.size)
1041
+ pcov = pcov * s_sq
1042
+ else:
1043
+ pcov.fill(inf)
1044
+ warn_cov = True
1045
+
1046
+ if warn_cov:
1047
+ warnings.warn('Covariance of the parameters could not be estimated',
1048
+ category=OptimizeWarning, stacklevel=2)
1049
+
1050
+ if full_output:
1051
+ return popt, pcov, infodict, errmsg, ier
1052
+ else:
1053
+ return popt, pcov
1054
+
1055
+
1056
+ def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
1057
+ """Perform a simple check on the gradient for correctness.
1058
+
1059
+ """
1060
+
1061
+ x = atleast_1d(x0)
1062
+ n = len(x)
1063
+ x = x.reshape((n,))
1064
+ fvec = atleast_1d(fcn(x, *args))
1065
+ m = len(fvec)
1066
+ fvec = fvec.reshape((m,))
1067
+ ldfjac = m
1068
+ fjac = atleast_1d(Dfcn(x, *args))
1069
+ fjac = fjac.reshape((m, n))
1070
+ if col_deriv == 0:
1071
+ fjac = transpose(fjac)
1072
+
1073
+ xp = zeros((n,), float)
1074
+ err = zeros((m,), float)
1075
+ fvecp = None
1076
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
1077
+
1078
+ fvecp = atleast_1d(fcn(xp, *args))
1079
+ fvecp = fvecp.reshape((m,))
1080
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
1081
+
1082
+ good = (prod(greater(err, 0.5), axis=0))
1083
+
1084
+ return (good, err)
1085
+
1086
+
1087
+ def _del2(p0, p1, d):
1088
+ return p0 - np.square(p1 - p0) / d
1089
+
1090
+
1091
+ def _relerr(actual, desired):
1092
+ return (actual - desired) / desired
1093
+
1094
+
1095
+ def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
1096
+ p0 = x0
1097
+ for i in range(maxiter):
1098
+ p1 = func(p0, *args)
1099
+ if use_accel:
1100
+ p2 = func(p1, *args)
1101
+ d = p2 - 2.0 * p1 + p0
1102
+ p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
1103
+ else:
1104
+ p = p1
1105
+ relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
1106
+ if np.all(np.abs(relerr) < xtol):
1107
+ return p
1108
+ p0 = p
1109
+ msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
1110
+ raise RuntimeError(msg)
1111
+
1112
+
1113
+ def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
1114
+ """
1115
+ Find a fixed point of the function.
1116
+
1117
+ Given a function of one or more variables and a starting point, find a
1118
+ fixed point of the function: i.e., where ``func(x0) == x0``.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ func : function
1123
+ Function to evaluate.
1124
+ x0 : array_like
1125
+ Fixed point of function.
1126
+ args : tuple, optional
1127
+ Extra arguments to `func`.
1128
+ xtol : float, optional
1129
+ Convergence tolerance, defaults to 1e-08.
1130
+ maxiter : int, optional
1131
+ Maximum number of iterations, defaults to 500.
1132
+ method : {"del2", "iteration"}, optional
1133
+ Method of finding the fixed-point, defaults to "del2",
1134
+ which uses Steffensen's Method with Aitken's ``Del^2``
1135
+ convergence acceleration [1]_. The "iteration" method simply iterates
1136
+ the function until convergence is detected, without attempting to
1137
+ accelerate the convergence.
1138
+
1139
+ References
1140
+ ----------
1141
+ .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
1142
+
1143
+ Examples
1144
+ --------
1145
+ >>> import numpy as np
1146
+ >>> from scipy import optimize
1147
+ >>> def func(x, c1, c2):
1148
+ ... return np.sqrt(c1/(x+c2))
1149
+ >>> c1 = np.array([10,12.])
1150
+ >>> c2 = np.array([3, 5.])
1151
+ >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
1152
+ array([ 1.4920333 , 1.37228132])
1153
+
1154
+ """
1155
+ use_accel = {'del2': True, 'iteration': False}[method]
1156
+ x0 = _asarray_validated(x0, as_inexact=True)
1157
+ return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
venv/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (152 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_nnls.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import solve, LinAlgWarning
3
+ import warnings
4
+
5
+ __all__ = ['nnls']
6
+
7
+
8
+ def nnls(A, b, maxiter=None, *, atol=None):
9
+ """
10
+ Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.
11
+
12
+ This problem, often called as NonNegative Least Squares, is a convex
13
+ optimization problem with convex constraints. It typically arises when
14
+ the ``x`` models quantities for which only nonnegative values are
15
+ attainable; weight of ingredients, component costs and so on.
16
+
17
+ Parameters
18
+ ----------
19
+ A : (m, n) ndarray
20
+ Coefficient array
21
+ b : (m,) ndarray, float
22
+ Right-hand side vector.
23
+ maxiter: int, optional
24
+ Maximum number of iterations, optional. Default value is ``3 * n``.
25
+ atol: float
26
+ Tolerance value used in the algorithm to assess closeness to zero in
27
+ the projected residual ``(A.T @ (A x - b)`` entries. Increasing this
28
+ value relaxes the solution constraints. A typical relaxation value can
29
+ be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``.
30
+ This value is not set as default since the norm operation becomes
31
+ expensive for large problems hence can be used only when necessary.
32
+
33
+ Returns
34
+ -------
35
+ x : ndarray
36
+ Solution vector.
37
+ rnorm : float
38
+ The 2-norm of the residual, ``|| Ax-b ||_2``.
39
+
40
+ See Also
41
+ --------
42
+ lsq_linear : Linear least squares with bounds on the variables
43
+
44
+ Notes
45
+ -----
46
+ The code is based on [2]_ which is an improved version of the classical
47
+ algorithm of [1]_. It utilizes an active set method and solves the KKT
48
+ (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem.
49
+
50
+ References
51
+ ----------
52
+ .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM,
53
+ 1995, :doi:`10.1137/1.9781611971217`
54
+ .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity-
55
+ Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997,
56
+ :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L`
57
+
58
+ Examples
59
+ --------
60
+ >>> import numpy as np
61
+ >>> from scipy.optimize import nnls
62
+ ...
63
+ >>> A = np.array([[1, 0], [1, 0], [0, 1]])
64
+ >>> b = np.array([2, 1, 1])
65
+ >>> nnls(A, b)
66
+ (array([1.5, 1. ]), 0.7071067811865475)
67
+
68
+ >>> b = np.array([-1, -1, -1])
69
+ >>> nnls(A, b)
70
+ (array([0., 0.]), 1.7320508075688772)
71
+
72
+ """
73
+
74
+ A = np.asarray_chkfinite(A)
75
+ b = np.asarray_chkfinite(b)
76
+
77
+ if len(A.shape) != 2:
78
+ raise ValueError("Expected a two-dimensional array (matrix)" +
79
+ f", but the shape of A is {A.shape}")
80
+ if len(b.shape) != 1:
81
+ raise ValueError("Expected a one-dimensional array (vector)" +
82
+ f", but the shape of b is {b.shape}")
83
+
84
+ m, n = A.shape
85
+
86
+ if m != b.shape[0]:
87
+ raise ValueError(
88
+ "Incompatible dimensions. The first dimension of " +
89
+ f"A is {m}, while the shape of b is {(b.shape[0], )}")
90
+
91
+ x, rnorm, mode = _nnls(A, b, maxiter, tol=atol)
92
+ if mode != 1:
93
+ raise RuntimeError("Maximum number of iterations reached.")
94
+
95
+ return x, rnorm
96
+
97
+
98
+ def _nnls(A, b, maxiter=None, tol=None):
99
+ """
100
+ This is a single RHS algorithm from ref [2] above. For multiple RHS
101
+ support, the algorithm is given in :doi:`10.1002/cem.889`
102
+ """
103
+ m, n = A.shape
104
+
105
+ AtA = A.T @ A
106
+ Atb = b @ A # Result is 1D - let NumPy figure it out
107
+
108
+ if not maxiter:
109
+ maxiter = 3*n
110
+ if tol is None:
111
+ tol = 10 * max(m, n) * np.spacing(1.)
112
+
113
+ # Initialize vars
114
+ x = np.zeros(n, dtype=np.float64)
115
+ s = np.zeros(n, dtype=np.float64)
116
+ # Inactive constraint switches
117
+ P = np.zeros(n, dtype=bool)
118
+
119
+ # Projected residual
120
+ w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term
121
+
122
+ # Overall iteration counter
123
+ # Outer loop is not counted, inner iter is counted across outer spins
124
+ iter = 0
125
+
126
+ while (not P.all()) and (w[~P] > tol).any(): # B
127
+ # Get the "most" active coeff index and move to inactive set
128
+ k = np.argmax(w * (~P)) # B.2
129
+ P[k] = True # B.3
130
+
131
+ # Iteration solution
132
+ s[:] = 0.
133
+ # B.4
134
+ with warnings.catch_warnings():
135
+ warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
136
+ category=LinAlgWarning)
137
+ s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False)
138
+
139
+ # Inner loop
140
+ while (iter < maxiter) and (s[P].min() < 0): # C.1
141
+ iter += 1
142
+ inds = P * (s < 0)
143
+ alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2
144
+ x *= (1 - alpha)
145
+ x += alpha*s
146
+ P[x <= tol] = False
147
+ with warnings.catch_warnings():
148
+ warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
149
+ category=LinAlgWarning)
150
+ s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym',
151
+ check_finite=False)
152
+ s[~P] = 0 # C.6
153
+
154
+ x[:] = s[:]
155
+ w[:] = Atb - AtA @ x
156
+
157
+ if iter == maxiter:
158
+ # Typically following line should return
159
+ # return x, np.linalg.norm(A@x - b), -1
160
+ # however at the top level, -1 raises an exception wasting norm
161
+ # Instead return dummy number 0.
162
+ return x, 0., -1
163
+
164
+ return x, np.linalg.norm(A@x - b), 1
venv/lib/python3.10/site-packages/scipy/optimize/_nonlin.py ADDED
@@ -0,0 +1,1584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2009, Pauli Virtanen <[email protected]>
2
+ # Distributed under the same license as SciPy.
3
+
4
+ import inspect
5
+ import sys
6
+ import warnings
7
+
8
+ import numpy as np
9
+ from numpy import asarray, dot, vdot
10
+
11
+ from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
12
+ import scipy.sparse.linalg
13
+ import scipy.sparse
14
+ from scipy.linalg import get_blas_funcs
15
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
16
+ from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
17
+
18
+
19
+ __all__ = [
20
+ 'broyden1', 'broyden2', 'anderson', 'linearmixing',
21
+ 'diagbroyden', 'excitingmixing', 'newton_krylov',
22
+ 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence']
23
+
24
+ #------------------------------------------------------------------------------
25
+ # Utility functions
26
+ #------------------------------------------------------------------------------
27
+
28
+
29
+ class NoConvergence(Exception):
30
+ """Exception raised when nonlinear solver fails to converge within the specified
31
+ `maxiter`."""
32
+ pass
33
+
34
+
35
+ def maxnorm(x):
36
+ return np.absolute(x).max()
37
+
38
+
39
+ def _as_inexact(x):
40
+ """Return `x` as an array, of either floats or complex floats"""
41
+ x = asarray(x)
42
+ if not np.issubdtype(x.dtype, np.inexact):
43
+ return asarray(x, dtype=np.float64)
44
+ return x
45
+
46
+
47
+ def _array_like(x, x0):
48
+ """Return ndarray `x` as same array subclass and shape as `x0`"""
49
+ x = np.reshape(x, np.shape(x0))
50
+ wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
51
+ return wrap(x)
52
+
53
+
54
+ def _safe_norm(v):
55
+ if not np.isfinite(v).all():
56
+ return np.array(np.inf)
57
+ return norm(v)
58
+
59
+ #------------------------------------------------------------------------------
60
+ # Generic nonlinear solver machinery
61
+ #------------------------------------------------------------------------------
62
+
63
+
64
+ _doc_parts = dict(
65
+ params_basic="""
66
+ F : function(x) -> f
67
+ Function whose root to find; should take and return an array-like
68
+ object.
69
+ xin : array_like
70
+ Initial guess for the solution
71
+ """.strip(),
72
+ params_extra="""
73
+ iter : int, optional
74
+ Number of iterations to make. If omitted (default), make as many
75
+ as required to meet tolerances.
76
+ verbose : bool, optional
77
+ Print status to stdout on every iteration.
78
+ maxiter : int, optional
79
+ Maximum number of iterations to make. If more are needed to
80
+ meet convergence, `NoConvergence` is raised.
81
+ f_tol : float, optional
82
+ Absolute tolerance (in max-norm) for the residual.
83
+ If omitted, default is 6e-6.
84
+ f_rtol : float, optional
85
+ Relative tolerance for the residual. If omitted, not used.
86
+ x_tol : float, optional
87
+ Absolute minimum step size, as determined from the Jacobian
88
+ approximation. If the step size is smaller than this, optimization
89
+ is terminated as successful. If omitted, not used.
90
+ x_rtol : float, optional
91
+ Relative minimum step size. If omitted, not used.
92
+ tol_norm : function(vector) -> scalar, optional
93
+ Norm to use in convergence check. Default is the maximum norm.
94
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
95
+ Which type of a line search to use to determine the step size in the
96
+ direction given by the Jacobian approximation. Defaults to 'armijo'.
97
+ callback : function, optional
98
+ Optional callback function. It is called on every iteration as
99
+ ``callback(x, f)`` where `x` is the current solution and `f`
100
+ the corresponding residual.
101
+
102
+ Returns
103
+ -------
104
+ sol : ndarray
105
+ An array (of similar array type as `x0`) containing the final solution.
106
+
107
+ Raises
108
+ ------
109
+ NoConvergence
110
+ When a solution was not found.
111
+
112
+ """.strip()
113
+ )
114
+
115
+
116
+ def _set_doc(obj):
117
+ if obj.__doc__:
118
+ obj.__doc__ = obj.__doc__ % _doc_parts
119
+
120
+
121
+ def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
122
+ maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
123
+ tol_norm=None, line_search='armijo', callback=None,
124
+ full_output=False, raise_exception=True):
125
+ """
126
+ Find a root of a function, in a way suitable for large-scale problems.
127
+
128
+ Parameters
129
+ ----------
130
+ %(params_basic)s
131
+ jacobian : Jacobian
132
+ A Jacobian approximation: `Jacobian` object or something that
133
+ `asjacobian` can transform to one. Alternatively, a string specifying
134
+ which of the builtin Jacobian approximations to use:
135
+
136
+ krylov, broyden1, broyden2, anderson
137
+ diagbroyden, linearmixing, excitingmixing
138
+
139
+ %(params_extra)s
140
+ full_output : bool
141
+ If true, returns a dictionary `info` containing convergence
142
+ information.
143
+ raise_exception : bool
144
+ If True, a `NoConvergence` exception is raise if no solution is found.
145
+
146
+ See Also
147
+ --------
148
+ asjacobian, Jacobian
149
+
150
+ Notes
151
+ -----
152
+ This algorithm implements the inexact Newton method, with
153
+ backtracking or full line searches. Several Jacobian
154
+ approximations are available, including Krylov and Quasi-Newton
155
+ methods.
156
+
157
+ References
158
+ ----------
159
+ .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
160
+ Equations\". Society for Industrial and Applied Mathematics. (1995)
161
+ https://archive.siam.org/books/kelley/fr16/
162
+
163
+ """
164
+ # Can't use default parameters because it's being explicitly passed as None
165
+ # from the calling function, so we need to set it here.
166
+ tol_norm = maxnorm if tol_norm is None else tol_norm
167
+ condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
168
+ x_tol=x_tol, x_rtol=x_rtol,
169
+ iter=iter, norm=tol_norm)
170
+
171
+ x0 = _as_inexact(x0)
172
+ def func(z):
173
+ return _as_inexact(F(_array_like(z, x0))).flatten()
174
+ x = x0.flatten()
175
+
176
+ dx = np.full_like(x, np.inf)
177
+ Fx = func(x)
178
+ Fx_norm = norm(Fx)
179
+
180
+ jacobian = asjacobian(jacobian)
181
+ jacobian.setup(x.copy(), Fx, func)
182
+
183
+ if maxiter is None:
184
+ if iter is not None:
185
+ maxiter = iter + 1
186
+ else:
187
+ maxiter = 100*(x.size+1)
188
+
189
+ if line_search is True:
190
+ line_search = 'armijo'
191
+ elif line_search is False:
192
+ line_search = None
193
+
194
+ if line_search not in (None, 'armijo', 'wolfe'):
195
+ raise ValueError("Invalid line search")
196
+
197
+ # Solver tolerance selection
198
+ gamma = 0.9
199
+ eta_max = 0.9999
200
+ eta_treshold = 0.1
201
+ eta = 1e-3
202
+
203
+ for n in range(maxiter):
204
+ status = condition.check(Fx, x, dx)
205
+ if status:
206
+ break
207
+
208
+ # The tolerance, as computed for scipy.sparse.linalg.* routines
209
+ tol = min(eta, eta*Fx_norm)
210
+ dx = -jacobian.solve(Fx, tol=tol)
211
+
212
+ if norm(dx) == 0:
213
+ raise ValueError("Jacobian inversion yielded zero vector. "
214
+ "This indicates a bug in the Jacobian "
215
+ "approximation.")
216
+
217
+ # Line search, or Newton step
218
+ if line_search:
219
+ s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
220
+ line_search)
221
+ else:
222
+ s = 1.0
223
+ x = x + dx
224
+ Fx = func(x)
225
+ Fx_norm_new = norm(Fx)
226
+
227
+ jacobian.update(x.copy(), Fx)
228
+
229
+ if callback:
230
+ callback(x, Fx)
231
+
232
+ # Adjust forcing parameters for inexact methods
233
+ eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
234
+ if gamma * eta**2 < eta_treshold:
235
+ eta = min(eta_max, eta_A)
236
+ else:
237
+ eta = min(eta_max, max(eta_A, gamma*eta**2))
238
+
239
+ Fx_norm = Fx_norm_new
240
+
241
+ # Print status
242
+ if verbose:
243
+ sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
244
+ n, tol_norm(Fx), s))
245
+ sys.stdout.flush()
246
+ else:
247
+ if raise_exception:
248
+ raise NoConvergence(_array_like(x, x0))
249
+ else:
250
+ status = 2
251
+
252
+ if full_output:
253
+ info = {'nit': condition.iteration,
254
+ 'fun': Fx,
255
+ 'status': status,
256
+ 'success': status == 1,
257
+ 'message': {1: 'A solution was found at the specified '
258
+ 'tolerance.',
259
+ 2: 'The maximum number of iterations allowed '
260
+ 'has been reached.'
261
+ }[status]
262
+ }
263
+ return _array_like(x, x0), info
264
+ else:
265
+ return _array_like(x, x0)
266
+
267
+
268
+ _set_doc(nonlin_solve)
269
+
270
+
271
+ def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
272
+ smin=1e-2):
273
+ tmp_s = [0]
274
+ tmp_Fx = [Fx]
275
+ tmp_phi = [norm(Fx)**2]
276
+ s_norm = norm(x) / norm(dx)
277
+
278
+ def phi(s, store=True):
279
+ if s == tmp_s[0]:
280
+ return tmp_phi[0]
281
+ xt = x + s*dx
282
+ v = func(xt)
283
+ p = _safe_norm(v)**2
284
+ if store:
285
+ tmp_s[0] = s
286
+ tmp_phi[0] = p
287
+ tmp_Fx[0] = v
288
+ return p
289
+
290
+ def derphi(s):
291
+ ds = (abs(s) + s_norm + 1) * rdiff
292
+ return (phi(s+ds, store=False) - phi(s)) / ds
293
+
294
+ if search_type == 'wolfe':
295
+ s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
296
+ xtol=1e-2, amin=smin)
297
+ elif search_type == 'armijo':
298
+ s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
299
+ amin=smin)
300
+
301
+ if s is None:
302
+ # XXX: No suitable step length found. Take the full Newton step,
303
+ # and hope for the best.
304
+ s = 1.0
305
+
306
+ x = x + s*dx
307
+ if s == tmp_s[0]:
308
+ Fx = tmp_Fx[0]
309
+ else:
310
+ Fx = func(x)
311
+ Fx_norm = norm(Fx)
312
+
313
+ return s, x, Fx, Fx_norm
314
+
315
+
316
+ class TerminationCondition:
317
+ """
318
+ Termination condition for an iteration. It is terminated if
319
+
320
+ - |F| < f_rtol*|F_0|, AND
321
+ - |F| < f_tol
322
+
323
+ AND
324
+
325
+ - |dx| < x_rtol*|x|, AND
326
+ - |dx| < x_tol
327
+
328
+ """
329
+ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
330
+ iter=None, norm=maxnorm):
331
+
332
+ if f_tol is None:
333
+ f_tol = np.finfo(np.float64).eps ** (1./3)
334
+ if f_rtol is None:
335
+ f_rtol = np.inf
336
+ if x_tol is None:
337
+ x_tol = np.inf
338
+ if x_rtol is None:
339
+ x_rtol = np.inf
340
+
341
+ self.x_tol = x_tol
342
+ self.x_rtol = x_rtol
343
+ self.f_tol = f_tol
344
+ self.f_rtol = f_rtol
345
+
346
+ self.norm = norm
347
+
348
+ self.iter = iter
349
+
350
+ self.f0_norm = None
351
+ self.iteration = 0
352
+
353
+ def check(self, f, x, dx):
354
+ self.iteration += 1
355
+ f_norm = self.norm(f)
356
+ x_norm = self.norm(x)
357
+ dx_norm = self.norm(dx)
358
+
359
+ if self.f0_norm is None:
360
+ self.f0_norm = f_norm
361
+
362
+ if f_norm == 0:
363
+ return 1
364
+
365
+ if self.iter is not None:
366
+ # backwards compatibility with SciPy 0.6.0
367
+ return 2 * (self.iteration > self.iter)
368
+
369
+ # NB: condition must succeed for rtol=inf even if norm == 0
370
+ return int((f_norm <= self.f_tol
371
+ and f_norm/self.f_rtol <= self.f0_norm)
372
+ and (dx_norm <= self.x_tol
373
+ and dx_norm/self.x_rtol <= x_norm))
374
+
375
+
376
+ #------------------------------------------------------------------------------
377
+ # Generic Jacobian approximation
378
+ #------------------------------------------------------------------------------
379
+
380
+ class Jacobian:
381
+ """
382
+ Common interface for Jacobians or Jacobian approximations.
383
+
384
+ The optional methods come useful when implementing trust region
385
+ etc., algorithms that often require evaluating transposes of the
386
+ Jacobian.
387
+
388
+ Methods
389
+ -------
390
+ solve
391
+ Returns J^-1 * v
392
+ update
393
+ Updates Jacobian to point `x` (where the function has residual `Fx`)
394
+
395
+ matvec : optional
396
+ Returns J * v
397
+ rmatvec : optional
398
+ Returns A^H * v
399
+ rsolve : optional
400
+ Returns A^-H * v
401
+ matmat : optional
402
+ Returns A * V, where V is a dense matrix with dimensions (N,K).
403
+ todense : optional
404
+ Form the dense Jacobian matrix. Necessary for dense trust region
405
+ algorithms, and useful for testing.
406
+
407
+ Attributes
408
+ ----------
409
+ shape
410
+ Matrix dimensions (M, N)
411
+ dtype
412
+ Data type of the matrix.
413
+ func : callable, optional
414
+ Function the Jacobian corresponds to
415
+
416
+ """
417
+
418
+ def __init__(self, **kw):
419
+ names = ["solve", "update", "matvec", "rmatvec", "rsolve",
420
+ "matmat", "todense", "shape", "dtype"]
421
+ for name, value in kw.items():
422
+ if name not in names:
423
+ raise ValueError("Unknown keyword argument %s" % name)
424
+ if value is not None:
425
+ setattr(self, name, kw[name])
426
+
427
+
428
+ if hasattr(self, "todense"):
429
+ def __array__(self, dtype=None, copy=None):
430
+ if dtype is not None:
431
+ raise ValueError(f"`dtype` must be None, was {dtype}")
432
+ return self.todense()
433
+
434
+ def aspreconditioner(self):
435
+ return InverseJacobian(self)
436
+
437
+ def solve(self, v, tol=0):
438
+ raise NotImplementedError
439
+
440
+ def update(self, x, F):
441
+ pass
442
+
443
+ def setup(self, x, F, func):
444
+ self.func = func
445
+ self.shape = (F.size, x.size)
446
+ self.dtype = F.dtype
447
+ if self.__class__.setup is Jacobian.setup:
448
+ # Call on the first point unless overridden
449
+ self.update(x, F)
450
+
451
+
452
+ class InverseJacobian:
453
+ def __init__(self, jacobian):
454
+ self.jacobian = jacobian
455
+ self.matvec = jacobian.solve
456
+ self.update = jacobian.update
457
+ if hasattr(jacobian, 'setup'):
458
+ self.setup = jacobian.setup
459
+ if hasattr(jacobian, 'rsolve'):
460
+ self.rmatvec = jacobian.rsolve
461
+
462
+ @property
463
+ def shape(self):
464
+ return self.jacobian.shape
465
+
466
+ @property
467
+ def dtype(self):
468
+ return self.jacobian.dtype
469
+
470
+
471
+ def asjacobian(J):
472
+ """
473
+ Convert given object to one suitable for use as a Jacobian.
474
+ """
475
+ spsolve = scipy.sparse.linalg.spsolve
476
+ if isinstance(J, Jacobian):
477
+ return J
478
+ elif inspect.isclass(J) and issubclass(J, Jacobian):
479
+ return J()
480
+ elif isinstance(J, np.ndarray):
481
+ if J.ndim > 2:
482
+ raise ValueError('array must have rank <= 2')
483
+ J = np.atleast_2d(np.asarray(J))
484
+ if J.shape[0] != J.shape[1]:
485
+ raise ValueError('array must be square')
486
+
487
+ return Jacobian(matvec=lambda v: dot(J, v),
488
+ rmatvec=lambda v: dot(J.conj().T, v),
489
+ solve=lambda v, tol=0: solve(J, v),
490
+ rsolve=lambda v, tol=0: solve(J.conj().T, v),
491
+ dtype=J.dtype, shape=J.shape)
492
+ elif scipy.sparse.issparse(J):
493
+ if J.shape[0] != J.shape[1]:
494
+ raise ValueError('matrix must be square')
495
+ return Jacobian(matvec=lambda v: J @ v,
496
+ rmatvec=lambda v: J.conj().T @ v,
497
+ solve=lambda v, tol=0: spsolve(J, v),
498
+ rsolve=lambda v, tol=0: spsolve(J.conj().T, v),
499
+ dtype=J.dtype, shape=J.shape)
500
+ elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
501
+ return Jacobian(matvec=getattr(J, 'matvec'),
502
+ rmatvec=getattr(J, 'rmatvec'),
503
+ solve=J.solve,
504
+ rsolve=getattr(J, 'rsolve'),
505
+ update=getattr(J, 'update'),
506
+ setup=getattr(J, 'setup'),
507
+ dtype=J.dtype,
508
+ shape=J.shape)
509
+ elif callable(J):
510
+ # Assume it's a function J(x) that returns the Jacobian
511
+ class Jac(Jacobian):
512
+ def update(self, x, F):
513
+ self.x = x
514
+
515
+ def solve(self, v, tol=0):
516
+ m = J(self.x)
517
+ if isinstance(m, np.ndarray):
518
+ return solve(m, v)
519
+ elif scipy.sparse.issparse(m):
520
+ return spsolve(m, v)
521
+ else:
522
+ raise ValueError("Unknown matrix type")
523
+
524
+ def matvec(self, v):
525
+ m = J(self.x)
526
+ if isinstance(m, np.ndarray):
527
+ return dot(m, v)
528
+ elif scipy.sparse.issparse(m):
529
+ return m @ v
530
+ else:
531
+ raise ValueError("Unknown matrix type")
532
+
533
+ def rsolve(self, v, tol=0):
534
+ m = J(self.x)
535
+ if isinstance(m, np.ndarray):
536
+ return solve(m.conj().T, v)
537
+ elif scipy.sparse.issparse(m):
538
+ return spsolve(m.conj().T, v)
539
+ else:
540
+ raise ValueError("Unknown matrix type")
541
+
542
+ def rmatvec(self, v):
543
+ m = J(self.x)
544
+ if isinstance(m, np.ndarray):
545
+ return dot(m.conj().T, v)
546
+ elif scipy.sparse.issparse(m):
547
+ return m.conj().T @ v
548
+ else:
549
+ raise ValueError("Unknown matrix type")
550
+ return Jac()
551
+ elif isinstance(J, str):
552
+ return dict(broyden1=BroydenFirst,
553
+ broyden2=BroydenSecond,
554
+ anderson=Anderson,
555
+ diagbroyden=DiagBroyden,
556
+ linearmixing=LinearMixing,
557
+ excitingmixing=ExcitingMixing,
558
+ krylov=KrylovJacobian)[J]()
559
+ else:
560
+ raise TypeError('Cannot convert object to a Jacobian')
561
+
562
+
563
+ #------------------------------------------------------------------------------
564
+ # Broyden
565
+ #------------------------------------------------------------------------------
566
+
567
+ class GenericBroyden(Jacobian):
568
+ def setup(self, x0, f0, func):
569
+ Jacobian.setup(self, x0, f0, func)
570
+ self.last_f = f0
571
+ self.last_x = x0
572
+
573
+ if hasattr(self, 'alpha') and self.alpha is None:
574
+ # Autoscale the initial Jacobian parameter
575
+ # unless we have already guessed the solution.
576
+ normf0 = norm(f0)
577
+ if normf0:
578
+ self.alpha = 0.5*max(norm(x0), 1) / normf0
579
+ else:
580
+ self.alpha = 1.0
581
+
582
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
583
+ raise NotImplementedError
584
+
585
+ def update(self, x, f):
586
+ df = f - self.last_f
587
+ dx = x - self.last_x
588
+ self._update(x, f, dx, df, norm(dx), norm(df))
589
+ self.last_f = f
590
+ self.last_x = x
591
+
592
+
593
+ class LowRankMatrix:
594
+ r"""
595
+ A matrix represented as
596
+
597
+ .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
598
+
599
+ However, if the rank of the matrix reaches the dimension of the vectors,
600
+ full matrix representation will be used thereon.
601
+
602
+ """
603
+
604
+ def __init__(self, alpha, n, dtype):
605
+ self.alpha = alpha
606
+ self.cs = []
607
+ self.ds = []
608
+ self.n = n
609
+ self.dtype = dtype
610
+ self.collapsed = None
611
+
612
+ @staticmethod
613
+ def _matvec(v, alpha, cs, ds):
614
+ axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
615
+ cs[:1] + [v])
616
+ w = alpha * v
617
+ for c, d in zip(cs, ds):
618
+ a = dotc(d, v)
619
+ w = axpy(c, w, w.size, a)
620
+ return w
621
+
622
+ @staticmethod
623
+ def _solve(v, alpha, cs, ds):
624
+ """Evaluate w = M^-1 v"""
625
+ if len(cs) == 0:
626
+ return v/alpha
627
+
628
+ # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
629
+
630
+ axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
631
+
632
+ c0 = cs[0]
633
+ A = alpha * np.identity(len(cs), dtype=c0.dtype)
634
+ for i, d in enumerate(ds):
635
+ for j, c in enumerate(cs):
636
+ A[i,j] += dotc(d, c)
637
+
638
+ q = np.zeros(len(cs), dtype=c0.dtype)
639
+ for j, d in enumerate(ds):
640
+ q[j] = dotc(d, v)
641
+ q /= alpha
642
+ q = solve(A, q)
643
+
644
+ w = v/alpha
645
+ for c, qc in zip(cs, q):
646
+ w = axpy(c, w, w.size, -qc)
647
+
648
+ return w
649
+
650
+ def matvec(self, v):
651
+ """Evaluate w = M v"""
652
+ if self.collapsed is not None:
653
+ return np.dot(self.collapsed, v)
654
+ return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
655
+
656
+ def rmatvec(self, v):
657
+ """Evaluate w = M^H v"""
658
+ if self.collapsed is not None:
659
+ return np.dot(self.collapsed.T.conj(), v)
660
+ return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
661
+
662
+ def solve(self, v, tol=0):
663
+ """Evaluate w = M^-1 v"""
664
+ if self.collapsed is not None:
665
+ return solve(self.collapsed, v)
666
+ return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
667
+
668
+ def rsolve(self, v, tol=0):
669
+ """Evaluate w = M^-H v"""
670
+ if self.collapsed is not None:
671
+ return solve(self.collapsed.T.conj(), v)
672
+ return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
673
+
674
+ def append(self, c, d):
675
+ if self.collapsed is not None:
676
+ self.collapsed += c[:,None] * d[None,:].conj()
677
+ return
678
+
679
+ self.cs.append(c)
680
+ self.ds.append(d)
681
+
682
+ if len(self.cs) > c.size:
683
+ self.collapse()
684
+
685
+ def __array__(self, dtype=None, copy=None):
686
+ if dtype is not None:
687
+ warnings.warn("LowRankMatrix is scipy-internal code, `dtype` "
688
+ f"should only be None but was {dtype} (not handled)",
689
+ stacklevel=3)
690
+ if copy is not None:
691
+ warnings.warn("LowRankMatrix is scipy-internal code, `copy` "
692
+ f"should only be None but was {copy} (not handled)",
693
+ stacklevel=3)
694
+ if self.collapsed is not None:
695
+ return self.collapsed
696
+
697
+ Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
698
+ for c, d in zip(self.cs, self.ds):
699
+ Gm += c[:,None]*d[None,:].conj()
700
+ return Gm
701
+
702
+ def collapse(self):
703
+ """Collapse the low-rank matrix to a full-rank one."""
704
+ self.collapsed = np.array(self)
705
+ self.cs = None
706
+ self.ds = None
707
+ self.alpha = None
708
+
709
+ def restart_reduce(self, rank):
710
+ """
711
+ Reduce the rank of the matrix by dropping all vectors.
712
+ """
713
+ if self.collapsed is not None:
714
+ return
715
+ assert rank > 0
716
+ if len(self.cs) > rank:
717
+ del self.cs[:]
718
+ del self.ds[:]
719
+
720
+ def simple_reduce(self, rank):
721
+ """
722
+ Reduce the rank of the matrix by dropping oldest vectors.
723
+ """
724
+ if self.collapsed is not None:
725
+ return
726
+ assert rank > 0
727
+ while len(self.cs) > rank:
728
+ del self.cs[0]
729
+ del self.ds[0]
730
+
731
+ def svd_reduce(self, max_rank, to_retain=None):
732
+ """
733
+ Reduce the rank of the matrix by retaining some SVD components.
734
+
735
+ This corresponds to the \"Broyden Rank Reduction Inverse\"
736
+ algorithm described in [1]_.
737
+
738
+ Note that the SVD decomposition can be done by solving only a
739
+ problem whose size is the effective rank of this matrix, which
740
+ is viable even for large problems.
741
+
742
+ Parameters
743
+ ----------
744
+ max_rank : int
745
+ Maximum rank of this matrix after reduction.
746
+ to_retain : int, optional
747
+ Number of SVD components to retain when reduction is done
748
+ (ie. rank > max_rank). Default is ``max_rank - 2``.
749
+
750
+ References
751
+ ----------
752
+ .. [1] B.A. van der Rotten, PhD thesis,
753
+ \"A limited memory Broyden method to solve high-dimensional
754
+ systems of nonlinear equations\". Mathematisch Instituut,
755
+ Universiteit Leiden, The Netherlands (2003).
756
+
757
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
758
+
759
+ """
760
+ if self.collapsed is not None:
761
+ return
762
+
763
+ p = max_rank
764
+ if to_retain is not None:
765
+ q = to_retain
766
+ else:
767
+ q = p - 2
768
+
769
+ if self.cs:
770
+ p = min(p, len(self.cs[0]))
771
+ q = max(0, min(q, p-1))
772
+
773
+ m = len(self.cs)
774
+ if m < p:
775
+ # nothing to do
776
+ return
777
+
778
+ C = np.array(self.cs).T
779
+ D = np.array(self.ds).T
780
+
781
+ D, R = qr(D, mode='economic')
782
+ C = dot(C, R.T.conj())
783
+
784
+ U, S, WH = svd(C, full_matrices=False)
785
+
786
+ C = dot(C, inv(WH))
787
+ D = dot(D, WH.T.conj())
788
+
789
+ for k in range(q):
790
+ self.cs[k] = C[:,k].copy()
791
+ self.ds[k] = D[:,k].copy()
792
+
793
+ del self.cs[q:]
794
+ del self.ds[q:]
795
+
796
+
797
+ _doc_parts['broyden_params'] = """
798
+ alpha : float, optional
799
+ Initial guess for the Jacobian is ``(-1/alpha)``.
800
+ reduction_method : str or tuple, optional
801
+ Method used in ensuring that the rank of the Broyden matrix
802
+ stays low. Can either be a string giving the name of the method,
803
+ or a tuple of the form ``(method, param1, param2, ...)``
804
+ that gives the name of the method and values for additional parameters.
805
+
806
+ Methods available:
807
+
808
+ - ``restart``: drop all matrix columns. Has no extra parameters.
809
+ - ``simple``: drop oldest matrix column. Has no extra parameters.
810
+ - ``svd``: keep only the most significant SVD components.
811
+ Takes an extra parameter, ``to_retain``, which determines the
812
+ number of SVD components to retain when rank reduction is done.
813
+ Default is ``max_rank - 2``.
814
+
815
+ max_rank : int, optional
816
+ Maximum rank for the Broyden matrix.
817
+ Default is infinity (i.e., no rank reduction).
818
+ """.strip()
819
+
820
+
821
+ class BroydenFirst(GenericBroyden):
822
+ r"""
823
+ Find a root of a function, using Broyden's first Jacobian approximation.
824
+
825
+ This method is also known as \"Broyden's good method\".
826
+
827
+ Parameters
828
+ ----------
829
+ %(params_basic)s
830
+ %(broyden_params)s
831
+ %(params_extra)s
832
+
833
+ See Also
834
+ --------
835
+ root : Interface to root finding algorithms for multivariate
836
+ functions. See ``method='broyden1'`` in particular.
837
+
838
+ Notes
839
+ -----
840
+ This algorithm implements the inverse Jacobian Quasi-Newton update
841
+
842
+ .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
843
+
844
+ which corresponds to Broyden's first Jacobian update
845
+
846
+ .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
847
+
848
+
849
+ References
850
+ ----------
851
+ .. [1] B.A. van der Rotten, PhD thesis,
852
+ \"A limited memory Broyden method to solve high-dimensional
853
+ systems of nonlinear equations\". Mathematisch Instituut,
854
+ Universiteit Leiden, The Netherlands (2003).
855
+
856
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
857
+
858
+ Examples
859
+ --------
860
+ The following functions define a system of nonlinear equations
861
+
862
+ >>> def fun(x):
863
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
864
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
865
+
866
+ A solution can be obtained as follows.
867
+
868
+ >>> from scipy import optimize
869
+ >>> sol = optimize.broyden1(fun, [0, 0])
870
+ >>> sol
871
+ array([0.84116396, 0.15883641])
872
+
873
+ """
874
+
875
+ def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
876
+ GenericBroyden.__init__(self)
877
+ self.alpha = alpha
878
+ self.Gm = None
879
+
880
+ if max_rank is None:
881
+ max_rank = np.inf
882
+ self.max_rank = max_rank
883
+
884
+ if isinstance(reduction_method, str):
885
+ reduce_params = ()
886
+ else:
887
+ reduce_params = reduction_method[1:]
888
+ reduction_method = reduction_method[0]
889
+ reduce_params = (max_rank - 1,) + reduce_params
890
+
891
+ if reduction_method == 'svd':
892
+ self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
893
+ elif reduction_method == 'simple':
894
+ self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
895
+ elif reduction_method == 'restart':
896
+ self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
897
+ else:
898
+ raise ValueError("Unknown rank reduction method '%s'" %
899
+ reduction_method)
900
+
901
+ def setup(self, x, F, func):
902
+ GenericBroyden.setup(self, x, F, func)
903
+ self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
904
+
905
+ def todense(self):
906
+ return inv(self.Gm)
907
+
908
+ def solve(self, f, tol=0):
909
+ r = self.Gm.matvec(f)
910
+ if not np.isfinite(r).all():
911
+ # singular; reset the Jacobian approximation
912
+ self.setup(self.last_x, self.last_f, self.func)
913
+ return self.Gm.matvec(f)
914
+ return r
915
+
916
+ def matvec(self, f):
917
+ return self.Gm.solve(f)
918
+
919
+ def rsolve(self, f, tol=0):
920
+ return self.Gm.rmatvec(f)
921
+
922
+ def rmatvec(self, f):
923
+ return self.Gm.rsolve(f)
924
+
925
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
926
+ self._reduce() # reduce first to preserve secant condition
927
+
928
+ v = self.Gm.rmatvec(dx)
929
+ c = dx - self.Gm.matvec(df)
930
+ d = v / vdot(df, v)
931
+
932
+ self.Gm.append(c, d)
933
+
934
+
935
+ class BroydenSecond(BroydenFirst):
936
+ """
937
+ Find a root of a function, using Broyden\'s second Jacobian approximation.
938
+
939
+ This method is also known as \"Broyden's bad method\".
940
+
941
+ Parameters
942
+ ----------
943
+ %(params_basic)s
944
+ %(broyden_params)s
945
+ %(params_extra)s
946
+
947
+ See Also
948
+ --------
949
+ root : Interface to root finding algorithms for multivariate
950
+ functions. See ``method='broyden2'`` in particular.
951
+
952
+ Notes
953
+ -----
954
+ This algorithm implements the inverse Jacobian Quasi-Newton update
955
+
956
+ .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
957
+
958
+ corresponding to Broyden's second method.
959
+
960
+ References
961
+ ----------
962
+ .. [1] B.A. van der Rotten, PhD thesis,
963
+ \"A limited memory Broyden method to solve high-dimensional
964
+ systems of nonlinear equations\". Mathematisch Instituut,
965
+ Universiteit Leiden, The Netherlands (2003).
966
+
967
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
968
+
969
+ Examples
970
+ --------
971
+ The following functions define a system of nonlinear equations
972
+
973
+ >>> def fun(x):
974
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
975
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
976
+
977
+ A solution can be obtained as follows.
978
+
979
+ >>> from scipy import optimize
980
+ >>> sol = optimize.broyden2(fun, [0, 0])
981
+ >>> sol
982
+ array([0.84116365, 0.15883529])
983
+
984
+ """
985
+
986
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
987
+ self._reduce() # reduce first to preserve secant condition
988
+
989
+ v = df
990
+ c = dx - self.Gm.matvec(df)
991
+ d = v / df_norm**2
992
+ self.Gm.append(c, d)
993
+
994
+
995
+ #------------------------------------------------------------------------------
996
+ # Broyden-like (restricted memory)
997
+ #------------------------------------------------------------------------------
998
+
999
+ class Anderson(GenericBroyden):
1000
+ """
1001
+ Find a root of a function, using (extended) Anderson mixing.
1002
+
1003
+ The Jacobian is formed by for a 'best' solution in the space
1004
+ spanned by last `M` vectors. As a result, only a MxM matrix
1005
+ inversions and MxN multiplications are required. [Ey]_
1006
+
1007
+ Parameters
1008
+ ----------
1009
+ %(params_basic)s
1010
+ alpha : float, optional
1011
+ Initial guess for the Jacobian is (-1/alpha).
1012
+ M : float, optional
1013
+ Number of previous vectors to retain. Defaults to 5.
1014
+ w0 : float, optional
1015
+ Regularization parameter for numerical stability.
1016
+ Compared to unity, good values of the order of 0.01.
1017
+ %(params_extra)s
1018
+
1019
+ See Also
1020
+ --------
1021
+ root : Interface to root finding algorithms for multivariate
1022
+ functions. See ``method='anderson'`` in particular.
1023
+
1024
+ References
1025
+ ----------
1026
+ .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
1027
+
1028
+ Examples
1029
+ --------
1030
+ The following functions define a system of nonlinear equations
1031
+
1032
+ >>> def fun(x):
1033
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
1034
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
1035
+
1036
+ A solution can be obtained as follows.
1037
+
1038
+ >>> from scipy import optimize
1039
+ >>> sol = optimize.anderson(fun, [0, 0])
1040
+ >>> sol
1041
+ array([0.84116588, 0.15883789])
1042
+
1043
+ """
1044
+
1045
+ # Note:
1046
+ #
1047
+ # Anderson method maintains a rank M approximation of the inverse Jacobian,
1048
+ #
1049
+ # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
1050
+ # A = W + dF^H dF
1051
+ # W = w0^2 diag(dF^H dF)
1052
+ #
1053
+ # so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
1054
+ #
1055
+ # J^-1 df_j = dx_j
1056
+ #
1057
+ # for all j = 0 ... M-1.
1058
+ #
1059
+ # Moreover, (from Sherman-Morrison-Woodbury formula)
1060
+ #
1061
+ # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
1062
+ # C = (dX + alpha dF) A^-1
1063
+ # b = -1/alpha
1064
+ #
1065
+ # and after simplification
1066
+ #
1067
+ # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
1068
+ #
1069
+
1070
+ def __init__(self, alpha=None, w0=0.01, M=5):
1071
+ GenericBroyden.__init__(self)
1072
+ self.alpha = alpha
1073
+ self.M = M
1074
+ self.dx = []
1075
+ self.df = []
1076
+ self.gamma = None
1077
+ self.w0 = w0
1078
+
1079
+ def solve(self, f, tol=0):
1080
+ dx = -self.alpha*f
1081
+
1082
+ n = len(self.dx)
1083
+ if n == 0:
1084
+ return dx
1085
+
1086
+ df_f = np.empty(n, dtype=f.dtype)
1087
+ for k in range(n):
1088
+ df_f[k] = vdot(self.df[k], f)
1089
+
1090
+ try:
1091
+ gamma = solve(self.a, df_f)
1092
+ except LinAlgError:
1093
+ # singular; reset the Jacobian approximation
1094
+ del self.dx[:]
1095
+ del self.df[:]
1096
+ return dx
1097
+
1098
+ for m in range(n):
1099
+ dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
1100
+ return dx
1101
+
1102
+ def matvec(self, f):
1103
+ dx = -f/self.alpha
1104
+
1105
+ n = len(self.dx)
1106
+ if n == 0:
1107
+ return dx
1108
+
1109
+ df_f = np.empty(n, dtype=f.dtype)
1110
+ for k in range(n):
1111
+ df_f[k] = vdot(self.df[k], f)
1112
+
1113
+ b = np.empty((n, n), dtype=f.dtype)
1114
+ for i in range(n):
1115
+ for j in range(n):
1116
+ b[i,j] = vdot(self.df[i], self.dx[j])
1117
+ if i == j and self.w0 != 0:
1118
+ b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
1119
+ gamma = solve(b, df_f)
1120
+
1121
+ for m in range(n):
1122
+ dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
1123
+ return dx
1124
+
1125
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1126
+ if self.M == 0:
1127
+ return
1128
+
1129
+ self.dx.append(dx)
1130
+ self.df.append(df)
1131
+
1132
+ while len(self.dx) > self.M:
1133
+ self.dx.pop(0)
1134
+ self.df.pop(0)
1135
+
1136
+ n = len(self.dx)
1137
+ a = np.zeros((n, n), dtype=f.dtype)
1138
+
1139
+ for i in range(n):
1140
+ for j in range(i, n):
1141
+ if i == j:
1142
+ wd = self.w0**2
1143
+ else:
1144
+ wd = 0
1145
+ a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
1146
+
1147
+ a += np.triu(a, 1).T.conj()
1148
+ self.a = a
1149
+
1150
+ #------------------------------------------------------------------------------
1151
+ # Simple iterations
1152
+ #------------------------------------------------------------------------------
1153
+
1154
+
1155
+ class DiagBroyden(GenericBroyden):
1156
+ """
1157
+ Find a root of a function, using diagonal Broyden Jacobian approximation.
1158
+
1159
+ The Jacobian approximation is derived from previous iterations, by
1160
+ retaining only the diagonal of Broyden matrices.
1161
+
1162
+ .. warning::
1163
+
1164
+ This algorithm may be useful for specific problems, but whether
1165
+ it will work may depend strongly on the problem.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ %(params_basic)s
1170
+ alpha : float, optional
1171
+ Initial guess for the Jacobian is (-1/alpha).
1172
+ %(params_extra)s
1173
+
1174
+ See Also
1175
+ --------
1176
+ root : Interface to root finding algorithms for multivariate
1177
+ functions. See ``method='diagbroyden'`` in particular.
1178
+
1179
+ Examples
1180
+ --------
1181
+ The following functions define a system of nonlinear equations
1182
+
1183
+ >>> def fun(x):
1184
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
1185
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
1186
+
1187
+ A solution can be obtained as follows.
1188
+
1189
+ >>> from scipy import optimize
1190
+ >>> sol = optimize.diagbroyden(fun, [0, 0])
1191
+ >>> sol
1192
+ array([0.84116403, 0.15883384])
1193
+
1194
+ """
1195
+
1196
+ def __init__(self, alpha=None):
1197
+ GenericBroyden.__init__(self)
1198
+ self.alpha = alpha
1199
+
1200
+ def setup(self, x, F, func):
1201
+ GenericBroyden.setup(self, x, F, func)
1202
+ self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
1203
+
1204
+ def solve(self, f, tol=0):
1205
+ return -f / self.d
1206
+
1207
+ def matvec(self, f):
1208
+ return -f * self.d
1209
+
1210
+ def rsolve(self, f, tol=0):
1211
+ return -f / self.d.conj()
1212
+
1213
+ def rmatvec(self, f):
1214
+ return -f * self.d.conj()
1215
+
1216
+ def todense(self):
1217
+ return np.diag(-self.d)
1218
+
1219
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1220
+ self.d -= (df + self.d*dx)*dx/dx_norm**2
1221
+
1222
+
1223
+ class LinearMixing(GenericBroyden):
1224
+ """
1225
+ Find a root of a function, using a scalar Jacobian approximation.
1226
+
1227
+ .. warning::
1228
+
1229
+ This algorithm may be useful for specific problems, but whether
1230
+ it will work may depend strongly on the problem.
1231
+
1232
+ Parameters
1233
+ ----------
1234
+ %(params_basic)s
1235
+ alpha : float, optional
1236
+ The Jacobian approximation is (-1/alpha).
1237
+ %(params_extra)s
1238
+
1239
+ See Also
1240
+ --------
1241
+ root : Interface to root finding algorithms for multivariate
1242
+ functions. See ``method='linearmixing'`` in particular.
1243
+
1244
+ """
1245
+
1246
+ def __init__(self, alpha=None):
1247
+ GenericBroyden.__init__(self)
1248
+ self.alpha = alpha
1249
+
1250
+ def solve(self, f, tol=0):
1251
+ return -f*self.alpha
1252
+
1253
+ def matvec(self, f):
1254
+ return -f/self.alpha
1255
+
1256
+ def rsolve(self, f, tol=0):
1257
+ return -f*np.conj(self.alpha)
1258
+
1259
+ def rmatvec(self, f):
1260
+ return -f/np.conj(self.alpha)
1261
+
1262
+ def todense(self):
1263
+ return np.diag(np.full(self.shape[0], -1/self.alpha))
1264
+
1265
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1266
+ pass
1267
+
1268
+
1269
+ class ExcitingMixing(GenericBroyden):
1270
+ """
1271
+ Find a root of a function, using a tuned diagonal Jacobian approximation.
1272
+
1273
+ The Jacobian matrix is diagonal and is tuned on each iteration.
1274
+
1275
+ .. warning::
1276
+
1277
+ This algorithm may be useful for specific problems, but whether
1278
+ it will work may depend strongly on the problem.
1279
+
1280
+ See Also
1281
+ --------
1282
+ root : Interface to root finding algorithms for multivariate
1283
+ functions. See ``method='excitingmixing'`` in particular.
1284
+
1285
+ Parameters
1286
+ ----------
1287
+ %(params_basic)s
1288
+ alpha : float, optional
1289
+ Initial Jacobian approximation is (-1/alpha).
1290
+ alphamax : float, optional
1291
+ The entries of the diagonal Jacobian are kept in the range
1292
+ ``[alpha, alphamax]``.
1293
+ %(params_extra)s
1294
+ """
1295
+
1296
+ def __init__(self, alpha=None, alphamax=1.0):
1297
+ GenericBroyden.__init__(self)
1298
+ self.alpha = alpha
1299
+ self.alphamax = alphamax
1300
+ self.beta = None
1301
+
1302
+ def setup(self, x, F, func):
1303
+ GenericBroyden.setup(self, x, F, func)
1304
+ self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
1305
+
1306
+ def solve(self, f, tol=0):
1307
+ return -f*self.beta
1308
+
1309
+ def matvec(self, f):
1310
+ return -f/self.beta
1311
+
1312
+ def rsolve(self, f, tol=0):
1313
+ return -f*self.beta.conj()
1314
+
1315
+ def rmatvec(self, f):
1316
+ return -f/self.beta.conj()
1317
+
1318
+ def todense(self):
1319
+ return np.diag(-1/self.beta)
1320
+
1321
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1322
+ incr = f*self.last_f > 0
1323
+ self.beta[incr] += self.alpha
1324
+ self.beta[~incr] = self.alpha
1325
+ np.clip(self.beta, 0, self.alphamax, out=self.beta)
1326
+
1327
+
1328
+ #------------------------------------------------------------------------------
1329
+ # Iterative/Krylov approximated Jacobians
1330
+ #------------------------------------------------------------------------------
1331
+
1332
+ class KrylovJacobian(Jacobian):
1333
+ r"""
1334
+ Find a root of a function, using Krylov approximation for inverse Jacobian.
1335
+
1336
+ This method is suitable for solving large-scale problems.
1337
+
1338
+ Parameters
1339
+ ----------
1340
+ %(params_basic)s
1341
+ rdiff : float, optional
1342
+ Relative step size to use in numerical differentiation.
1343
+ method : str or callable, optional
1344
+ Krylov method to use to approximate the Jacobian. Can be a string,
1345
+ or a function implementing the same interface as the iterative
1346
+ solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
1347
+ ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
1348
+ ``'tfqmr'``.
1349
+
1350
+ The default is `scipy.sparse.linalg.lgmres`.
1351
+ inner_maxiter : int, optional
1352
+ Parameter to pass to the "inner" Krylov solver: maximum number of
1353
+ iterations. Iteration will stop after maxiter steps even if the
1354
+ specified tolerance has not been achieved.
1355
+ inner_M : LinearOperator or InverseJacobian
1356
+ Preconditioner for the inner Krylov iteration.
1357
+ Note that you can use also inverse Jacobians as (adaptive)
1358
+ preconditioners. For example,
1359
+
1360
+ >>> from scipy.optimize import BroydenFirst, KrylovJacobian
1361
+ >>> from scipy.optimize import InverseJacobian
1362
+ >>> jac = BroydenFirst()
1363
+ >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
1364
+
1365
+ If the preconditioner has a method named 'update', it will be called
1366
+ as ``update(x, f)`` after each nonlinear step, with ``x`` giving
1367
+ the current point, and ``f`` the current function value.
1368
+ outer_k : int, optional
1369
+ Size of the subspace kept across LGMRES nonlinear iterations.
1370
+ See `scipy.sparse.linalg.lgmres` for details.
1371
+ inner_kwargs : kwargs
1372
+ Keyword parameters for the "inner" Krylov solver
1373
+ (defined with `method`). Parameter names must start with
1374
+ the `inner_` prefix which will be stripped before passing on
1375
+ the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
1376
+ %(params_extra)s
1377
+
1378
+ See Also
1379
+ --------
1380
+ root : Interface to root finding algorithms for multivariate
1381
+ functions. See ``method='krylov'`` in particular.
1382
+ scipy.sparse.linalg.gmres
1383
+ scipy.sparse.linalg.lgmres
1384
+
1385
+ Notes
1386
+ -----
1387
+ This function implements a Newton-Krylov solver. The basic idea is
1388
+ to compute the inverse of the Jacobian with an iterative Krylov
1389
+ method. These methods require only evaluating the Jacobian-vector
1390
+ products, which are conveniently approximated by a finite difference:
1391
+
1392
+ .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
1393
+
1394
+ Due to the use of iterative matrix inverses, these methods can
1395
+ deal with large nonlinear problems.
1396
+
1397
+ SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
1398
+ solvers to choose from. The default here is `lgmres`, which is a
1399
+ variant of restarted GMRES iteration that reuses some of the
1400
+ information obtained in the previous Newton steps to invert
1401
+ Jacobians in subsequent steps.
1402
+
1403
+ For a review on Newton-Krylov methods, see for example [1]_,
1404
+ and for the LGMRES sparse inverse method, see [2]_.
1405
+
1406
+ References
1407
+ ----------
1408
+ .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
1409
+ SIAM, pp.57-83, 2003.
1410
+ :doi:`10.1137/1.9780898718898.ch3`
1411
+ .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
1412
+ :doi:`10.1016/j.jcp.2003.08.010`
1413
+ .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
1414
+ SIAM J. Matrix Anal. Appl. 26, 962 (2005).
1415
+ :doi:`10.1137/S0895479803422014`
1416
+
1417
+ Examples
1418
+ --------
1419
+ The following functions define a system of nonlinear equations
1420
+
1421
+ >>> def fun(x):
1422
+ ... return [x[0] + 0.5 * x[1] - 1.0,
1423
+ ... 0.5 * (x[1] - x[0]) ** 2]
1424
+
1425
+ A solution can be obtained as follows.
1426
+
1427
+ >>> from scipy import optimize
1428
+ >>> sol = optimize.newton_krylov(fun, [0, 0])
1429
+ >>> sol
1430
+ array([0.66731771, 0.66536458])
1431
+
1432
+ """
1433
+
1434
+ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
1435
+ inner_M=None, outer_k=10, **kw):
1436
+ self.preconditioner = inner_M
1437
+ self.rdiff = rdiff
1438
+ # Note that this retrieves one of the named functions, or otherwise
1439
+ # uses `method` as is (i.e., for a user-provided callable).
1440
+ self.method = dict(
1441
+ bicgstab=scipy.sparse.linalg.bicgstab,
1442
+ gmres=scipy.sparse.linalg.gmres,
1443
+ lgmres=scipy.sparse.linalg.lgmres,
1444
+ cgs=scipy.sparse.linalg.cgs,
1445
+ minres=scipy.sparse.linalg.minres,
1446
+ tfqmr=scipy.sparse.linalg.tfqmr,
1447
+ ).get(method, method)
1448
+
1449
+ self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
1450
+
1451
+ if self.method is scipy.sparse.linalg.gmres:
1452
+ # Replace GMRES's outer iteration with Newton steps
1453
+ self.method_kw['restart'] = inner_maxiter
1454
+ self.method_kw['maxiter'] = 1
1455
+ self.method_kw.setdefault('atol', 0)
1456
+ elif self.method in (scipy.sparse.linalg.gcrotmk,
1457
+ scipy.sparse.linalg.bicgstab,
1458
+ scipy.sparse.linalg.cgs):
1459
+ self.method_kw.setdefault('atol', 0)
1460
+ elif self.method is scipy.sparse.linalg.lgmres:
1461
+ self.method_kw['outer_k'] = outer_k
1462
+ # Replace LGMRES's outer iteration with Newton steps
1463
+ self.method_kw['maxiter'] = 1
1464
+ # Carry LGMRES's `outer_v` vectors across nonlinear iterations
1465
+ self.method_kw.setdefault('outer_v', [])
1466
+ self.method_kw.setdefault('prepend_outer_v', True)
1467
+ # But don't carry the corresponding Jacobian*v products, in case
1468
+ # the Jacobian changes a lot in the nonlinear step
1469
+ #
1470
+ # XXX: some trust-region inspired ideas might be more efficient...
1471
+ # See e.g., Brown & Saad. But needs to be implemented separately
1472
+ # since it's not an inexact Newton method.
1473
+ self.method_kw.setdefault('store_outer_Av', False)
1474
+ self.method_kw.setdefault('atol', 0)
1475
+
1476
+ for key, value in kw.items():
1477
+ if not key.startswith('inner_'):
1478
+ raise ValueError("Unknown parameter %s" % key)
1479
+ self.method_kw[key[6:]] = value
1480
+
1481
+ def _update_diff_step(self):
1482
+ mx = abs(self.x0).max()
1483
+ mf = abs(self.f0).max()
1484
+ self.omega = self.rdiff * max(1, mx) / max(1, mf)
1485
+
1486
+ def matvec(self, v):
1487
+ nv = norm(v)
1488
+ if nv == 0:
1489
+ return 0*v
1490
+ sc = self.omega / nv
1491
+ r = (self.func(self.x0 + sc*v) - self.f0) / sc
1492
+ if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
1493
+ raise ValueError('Function returned non-finite results')
1494
+ return r
1495
+
1496
+ def solve(self, rhs, tol=0):
1497
+ if 'rtol' in self.method_kw:
1498
+ sol, info = self.method(self.op, rhs, **self.method_kw)
1499
+ else:
1500
+ sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw)
1501
+ return sol
1502
+
1503
+ def update(self, x, f):
1504
+ self.x0 = x
1505
+ self.f0 = f
1506
+ self._update_diff_step()
1507
+
1508
+ # Update also the preconditioner, if possible
1509
+ if self.preconditioner is not None:
1510
+ if hasattr(self.preconditioner, 'update'):
1511
+ self.preconditioner.update(x, f)
1512
+
1513
+ def setup(self, x, f, func):
1514
+ Jacobian.setup(self, x, f, func)
1515
+ self.x0 = x
1516
+ self.f0 = f
1517
+ self.op = scipy.sparse.linalg.aslinearoperator(self)
1518
+
1519
+ if self.rdiff is None:
1520
+ self.rdiff = np.finfo(x.dtype).eps ** (1./2)
1521
+
1522
+ self._update_diff_step()
1523
+
1524
+ # Setup also the preconditioner, if possible
1525
+ if self.preconditioner is not None:
1526
+ if hasattr(self.preconditioner, 'setup'):
1527
+ self.preconditioner.setup(x, f, func)
1528
+
1529
+
1530
+ #------------------------------------------------------------------------------
1531
+ # Wrapper functions
1532
+ #------------------------------------------------------------------------------
1533
+
1534
+ def _nonlin_wrapper(name, jac):
1535
+ """
1536
+ Construct a solver wrapper with given name and Jacobian approx.
1537
+
1538
+ It inspects the keyword arguments of ``jac.__init__``, and allows to
1539
+ use the same arguments in the wrapper function, in addition to the
1540
+ keyword arguments of `nonlin_solve`
1541
+
1542
+ """
1543
+ signature = _getfullargspec(jac.__init__)
1544
+ args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
1545
+ kwargs = list(zip(args[-len(defaults):], defaults))
1546
+ kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs])
1547
+ if kw_str:
1548
+ kw_str = ", " + kw_str
1549
+ kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs])
1550
+ if kwkw_str:
1551
+ kwkw_str = kwkw_str + ", "
1552
+ if kwonlyargs:
1553
+ raise ValueError('Unexpected signature %s' % signature)
1554
+
1555
+ # Construct the wrapper function so that its keyword arguments
1556
+ # are visible in pydoc.help etc.
1557
+ wrapper = """
1558
+ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
1559
+ f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
1560
+ tol_norm=None, line_search='armijo', callback=None, **kw):
1561
+ jac = %(jac)s(%(kwkw)s **kw)
1562
+ return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
1563
+ f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
1564
+ callback)
1565
+ """
1566
+
1567
+ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
1568
+ kwkw=kwkw_str)
1569
+ ns = {}
1570
+ ns.update(globals())
1571
+ exec(wrapper, ns)
1572
+ func = ns[name]
1573
+ func.__doc__ = jac.__doc__
1574
+ _set_doc(func)
1575
+ return func
1576
+
1577
+
1578
+ broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
1579
+ broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
1580
+ anderson = _nonlin_wrapper('anderson', Anderson)
1581
+ linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
1582
+ diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
1583
+ excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
1584
+ newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
venv/lib/python3.10/site-packages/scipy/optimize/_numdiff.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines for numerical differentiation."""
2
+ import functools
3
+ import numpy as np
4
+ from numpy.linalg import norm
5
+
6
+ from scipy.sparse.linalg import LinearOperator
7
+ from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
8
+ from ._group_columns import group_dense, group_sparse
9
+ from scipy._lib._array_api import atleast_nd, array_namespace
10
+
11
+
12
+ def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
13
+ """Adjust final difference scheme to the presence of bounds.
14
+
15
+ Parameters
16
+ ----------
17
+ x0 : ndarray, shape (n,)
18
+ Point at which we wish to estimate derivative.
19
+ h : ndarray, shape (n,)
20
+ Desired absolute finite difference steps.
21
+ num_steps : int
22
+ Number of `h` steps in one direction required to implement finite
23
+ difference scheme. For example, 2 means that we need to evaluate
24
+ f(x0 + 2 * h) or f(x0 - 2 * h)
25
+ scheme : {'1-sided', '2-sided'}
26
+ Whether steps in one or both directions are required. In other
27
+ words '1-sided' applies to forward and backward schemes, '2-sided'
28
+ applies to center schemes.
29
+ lb : ndarray, shape (n,)
30
+ Lower bounds on independent variables.
31
+ ub : ndarray, shape (n,)
32
+ Upper bounds on independent variables.
33
+
34
+ Returns
35
+ -------
36
+ h_adjusted : ndarray, shape (n,)
37
+ Adjusted absolute step sizes. Step size decreases only if a sign flip
38
+ or switching to one-sided scheme doesn't allow to take a full step.
39
+ use_one_sided : ndarray of bool, shape (n,)
40
+ Whether to switch to one-sided scheme. Informative only for
41
+ ``scheme='2-sided'``.
42
+ """
43
+ if scheme == '1-sided':
44
+ use_one_sided = np.ones_like(h, dtype=bool)
45
+ elif scheme == '2-sided':
46
+ h = np.abs(h)
47
+ use_one_sided = np.zeros_like(h, dtype=bool)
48
+ else:
49
+ raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
50
+
51
+ if np.all((lb == -np.inf) & (ub == np.inf)):
52
+ return h, use_one_sided
53
+
54
+ h_total = h * num_steps
55
+ h_adjusted = h.copy()
56
+
57
+ lower_dist = x0 - lb
58
+ upper_dist = ub - x0
59
+
60
+ if scheme == '1-sided':
61
+ x = x0 + h_total
62
+ violated = (x < lb) | (x > ub)
63
+ fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
64
+ h_adjusted[violated & fitting] *= -1
65
+
66
+ forward = (upper_dist >= lower_dist) & ~fitting
67
+ h_adjusted[forward] = upper_dist[forward] / num_steps
68
+ backward = (upper_dist < lower_dist) & ~fitting
69
+ h_adjusted[backward] = -lower_dist[backward] / num_steps
70
+ elif scheme == '2-sided':
71
+ central = (lower_dist >= h_total) & (upper_dist >= h_total)
72
+
73
+ forward = (upper_dist >= lower_dist) & ~central
74
+ h_adjusted[forward] = np.minimum(
75
+ h[forward], 0.5 * upper_dist[forward] / num_steps)
76
+ use_one_sided[forward] = True
77
+
78
+ backward = (upper_dist < lower_dist) & ~central
79
+ h_adjusted[backward] = -np.minimum(
80
+ h[backward], 0.5 * lower_dist[backward] / num_steps)
81
+ use_one_sided[backward] = True
82
+
83
+ min_dist = np.minimum(upper_dist, lower_dist) / num_steps
84
+ adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
85
+ h_adjusted[adjusted_central] = min_dist[adjusted_central]
86
+ use_one_sided[adjusted_central] = False
87
+
88
+ return h_adjusted, use_one_sided
89
+
90
+
91
+ @functools.lru_cache
92
+ def _eps_for_method(x0_dtype, f0_dtype, method):
93
+ """
94
+ Calculates relative EPS step to use for a given data type
95
+ and numdiff step method.
96
+
97
+ Progressively smaller steps are used for larger floating point types.
98
+
99
+ Parameters
100
+ ----------
101
+ f0_dtype: np.dtype
102
+ dtype of function evaluation
103
+
104
+ x0_dtype: np.dtype
105
+ dtype of parameter vector
106
+
107
+ method: {'2-point', '3-point', 'cs'}
108
+
109
+ Returns
110
+ -------
111
+ EPS: float
112
+ relative step size. May be np.float16, np.float32, np.float64
113
+
114
+ Notes
115
+ -----
116
+ The default relative step will be np.float64. However, if x0 or f0 are
117
+ smaller floating point types (np.float16, np.float32), then the smallest
118
+ floating point type is chosen.
119
+ """
120
+ # the default EPS value
121
+ EPS = np.finfo(np.float64).eps
122
+
123
+ x0_is_fp = False
124
+ if np.issubdtype(x0_dtype, np.inexact):
125
+ # if you're a floating point type then over-ride the default EPS
126
+ EPS = np.finfo(x0_dtype).eps
127
+ x0_itemsize = np.dtype(x0_dtype).itemsize
128
+ x0_is_fp = True
129
+
130
+ if np.issubdtype(f0_dtype, np.inexact):
131
+ f0_itemsize = np.dtype(f0_dtype).itemsize
132
+ # choose the smallest itemsize between x0 and f0
133
+ if x0_is_fp and f0_itemsize < x0_itemsize:
134
+ EPS = np.finfo(f0_dtype).eps
135
+
136
+ if method in ["2-point", "cs"]:
137
+ return EPS**0.5
138
+ elif method in ["3-point"]:
139
+ return EPS**(1/3)
140
+ else:
141
+ raise RuntimeError("Unknown step method, should be one of "
142
+ "{'2-point', '3-point', 'cs'}")
143
+
144
+
145
+ def _compute_absolute_step(rel_step, x0, f0, method):
146
+ """
147
+ Computes an absolute step from a relative step for finite difference
148
+ calculation.
149
+
150
+ Parameters
151
+ ----------
152
+ rel_step: None or array-like
153
+ Relative step for the finite difference calculation
154
+ x0 : np.ndarray
155
+ Parameter vector
156
+ f0 : np.ndarray or scalar
157
+ method : {'2-point', '3-point', 'cs'}
158
+
159
+ Returns
160
+ -------
161
+ h : float
162
+ The absolute step size
163
+
164
+ Notes
165
+ -----
166
+ `h` will always be np.float64. However, if `x0` or `f0` are
167
+ smaller floating point dtypes (e.g. np.float32), then the absolute
168
+ step size will be calculated from the smallest floating point size.
169
+ """
170
+ # this is used instead of np.sign(x0) because we need
171
+ # sign_x0 to be 1 when x0 == 0.
172
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
173
+
174
+ rstep = _eps_for_method(x0.dtype, f0.dtype, method)
175
+
176
+ if rel_step is None:
177
+ abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
178
+ else:
179
+ # User has requested specific relative steps.
180
+ # Don't multiply by max(1, abs(x0) because if x0 < 1 then their
181
+ # requested step is not used.
182
+ abs_step = rel_step * sign_x0 * np.abs(x0)
183
+
184
+ # however we don't want an abs_step of 0, which can happen if
185
+ # rel_step is 0, or x0 is 0. Instead, substitute a realistic step
186
+ dx = ((x0 + abs_step) - x0)
187
+ abs_step = np.where(dx == 0,
188
+ rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
189
+ abs_step)
190
+
191
+ return abs_step
192
+
193
+
194
+ def _prepare_bounds(bounds, x0):
195
+ """
196
+ Prepares new-style bounds from a two-tuple specifying the lower and upper
197
+ limits for values in x0. If a value is not bound then the lower/upper bound
198
+ will be expected to be -np.inf/np.inf.
199
+
200
+ Examples
201
+ --------
202
+ >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
203
+ (array([0., 1., 2.]), array([ 1., 2., inf]))
204
+ """
205
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
206
+ if lb.ndim == 0:
207
+ lb = np.resize(lb, x0.shape)
208
+
209
+ if ub.ndim == 0:
210
+ ub = np.resize(ub, x0.shape)
211
+
212
+ return lb, ub
213
+
214
+
215
+ def group_columns(A, order=0):
216
+ """Group columns of a 2-D matrix for sparse finite differencing [1]_.
217
+
218
+ Two columns are in the same group if in each row at least one of them
219
+ has zero. A greedy sequential algorithm is used to construct groups.
220
+
221
+ Parameters
222
+ ----------
223
+ A : array_like or sparse matrix, shape (m, n)
224
+ Matrix of which to group columns.
225
+ order : int, iterable of int with shape (n,) or None
226
+ Permutation array which defines the order of columns enumeration.
227
+ If int or None, a random permutation is used with `order` used as
228
+ a random seed. Default is 0, that is use a random permutation but
229
+ guarantee repeatability.
230
+
231
+ Returns
232
+ -------
233
+ groups : ndarray of int, shape (n,)
234
+ Contains values from 0 to n_groups-1, where n_groups is the number
235
+ of found groups. Each value ``groups[i]`` is an index of a group to
236
+ which ith column assigned. The procedure was helpful only if
237
+ n_groups is significantly less than n.
238
+
239
+ References
240
+ ----------
241
+ .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
242
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
243
+ and its Applications, 13 (1974), pp. 117-120.
244
+ """
245
+ if issparse(A):
246
+ A = csc_matrix(A)
247
+ else:
248
+ A = np.atleast_2d(A)
249
+ A = (A != 0).astype(np.int32)
250
+
251
+ if A.ndim != 2:
252
+ raise ValueError("`A` must be 2-dimensional.")
253
+
254
+ m, n = A.shape
255
+
256
+ if order is None or np.isscalar(order):
257
+ rng = np.random.RandomState(order)
258
+ order = rng.permutation(n)
259
+ else:
260
+ order = np.asarray(order)
261
+ if order.shape != (n,):
262
+ raise ValueError("`order` has incorrect shape.")
263
+
264
+ A = A[:, order]
265
+
266
+ if issparse(A):
267
+ groups = group_sparse(m, n, A.indices, A.indptr)
268
+ else:
269
+ groups = group_dense(m, n, A)
270
+
271
+ groups[order] = groups.copy()
272
+
273
+ return groups
274
+
275
+
276
+ def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
277
+ f0=None, bounds=(-np.inf, np.inf), sparsity=None,
278
+ as_linear_operator=False, args=(), kwargs={}):
279
+ """Compute finite difference approximation of the derivatives of a
280
+ vector-valued function.
281
+
282
+ If a function maps from R^n to R^m, its derivatives form m-by-n matrix
283
+ called the Jacobian, where an element (i, j) is a partial derivative of
284
+ f[i] with respect to x[j].
285
+
286
+ Parameters
287
+ ----------
288
+ fun : callable
289
+ Function of which to estimate the derivatives. The argument x
290
+ passed to this function is ndarray of shape (n,) (never a scalar
291
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
292
+ x0 : array_like of shape (n,) or float
293
+ Point at which to estimate the derivatives. Float will be converted
294
+ to a 1-D array.
295
+ method : {'3-point', '2-point', 'cs'}, optional
296
+ Finite difference method to use:
297
+ - '2-point' - use the first order accuracy forward or backward
298
+ difference.
299
+ - '3-point' - use central difference in interior points and the
300
+ second order accuracy forward or backward difference
301
+ near the boundary.
302
+ - 'cs' - use a complex-step finite difference scheme. This assumes
303
+ that the user function is real-valued and can be
304
+ analytically continued to the complex plane. Otherwise,
305
+ produces bogus results.
306
+ rel_step : None or array_like, optional
307
+ Relative step size to use. If None (default) the absolute step size is
308
+ computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
309
+ `rel_step` being selected automatically, see Notes. Otherwise
310
+ ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
311
+ sign of `h` is ignored. The calculated step size is possibly adjusted
312
+ to fit into the bounds.
313
+ abs_step : array_like, optional
314
+ Absolute step size to use, possibly adjusted to fit into the bounds.
315
+ For ``method='3-point'`` the sign of `abs_step` is ignored. By default
316
+ relative steps are used, only if ``abs_step is not None`` are absolute
317
+ steps used.
318
+ f0 : None or array_like, optional
319
+ If not None it is assumed to be equal to ``fun(x0)``, in this case
320
+ the ``fun(x0)`` is not called. Default is None.
321
+ bounds : tuple of array_like, optional
322
+ Lower and upper bounds on independent variables. Defaults to no bounds.
323
+ Each bound must match the size of `x0` or be a scalar, in the latter
324
+ case the bound will be the same for all variables. Use it to limit the
325
+ range of function evaluation. Bounds checking is not implemented
326
+ when `as_linear_operator` is True.
327
+ sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
328
+ Defines a sparsity structure of the Jacobian matrix. If the Jacobian
329
+ matrix is known to have only few non-zero elements in each row, then
330
+ it's possible to estimate its several columns by a single function
331
+ evaluation [3]_. To perform such economic computations two ingredients
332
+ are required:
333
+
334
+ * structure : array_like or sparse matrix of shape (m, n). A zero
335
+ element means that a corresponding element of the Jacobian
336
+ identically equals to zero.
337
+ * groups : array_like of shape (n,). A column grouping for a given
338
+ sparsity structure, use `group_columns` to obtain it.
339
+
340
+ A single array or a sparse matrix is interpreted as a sparsity
341
+ structure, and groups are computed inside the function. A tuple is
342
+ interpreted as (structure, groups). If None (default), a standard
343
+ dense differencing will be used.
344
+
345
+ Note, that sparse differencing makes sense only for large Jacobian
346
+ matrices where each row contains few non-zero elements.
347
+ as_linear_operator : bool, optional
348
+ When True the function returns an `scipy.sparse.linalg.LinearOperator`.
349
+ Otherwise it returns a dense array or a sparse matrix depending on
350
+ `sparsity`. The linear operator provides an efficient way of computing
351
+ ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
352
+ direct access to individual elements of the matrix. By default
353
+ `as_linear_operator` is False.
354
+ args, kwargs : tuple and dict, optional
355
+ Additional arguments passed to `fun`. Both empty by default.
356
+ The calling signature is ``fun(x, *args, **kwargs)``.
357
+
358
+ Returns
359
+ -------
360
+ J : {ndarray, sparse matrix, LinearOperator}
361
+ Finite difference approximation of the Jacobian matrix.
362
+ If `as_linear_operator` is True returns a LinearOperator
363
+ with shape (m, n). Otherwise it returns a dense array or sparse
364
+ matrix depending on how `sparsity` is defined. If `sparsity`
365
+ is None then a ndarray with shape (m, n) is returned. If
366
+ `sparsity` is not None returns a csr_matrix with shape (m, n).
367
+ For sparse matrices and linear operators it is always returned as
368
+ a 2-D structure, for ndarrays, if m=1 it is returned
369
+ as a 1-D gradient array with shape (n,).
370
+
371
+ See Also
372
+ --------
373
+ check_derivative : Check correctness of a function computing derivatives.
374
+
375
+ Notes
376
+ -----
377
+ If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
378
+ determined from the smallest floating point dtype of `x0` or `fun(x0)`,
379
+ ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
380
+ s=3 for '3-point' method. Such relative step approximately minimizes a sum
381
+ of truncation and round-off errors, see [1]_. Relative steps are used by
382
+ default. However, absolute steps are used when ``abs_step is not None``.
383
+ If any of the absolute or relative steps produces an indistinguishable
384
+ difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
385
+ automatic step size is substituted for that particular entry.
386
+
387
+ A finite difference scheme for '3-point' method is selected automatically.
388
+ The well-known central difference scheme is used for points sufficiently
389
+ far from the boundary, and 3-point forward or backward scheme is used for
390
+ points near the boundary. Both schemes have the second-order accuracy in
391
+ terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
392
+ forward and backward difference schemes.
393
+
394
+ For dense differencing when m=1 Jacobian is returned with a shape (n,),
395
+ on the other hand when n=1 Jacobian is returned with a shape (m, 1).
396
+ Our motivation is the following: a) It handles a case of gradient
397
+ computation (m=1) in a conventional way. b) It clearly separates these two
398
+ different cases. b) In all cases np.atleast_2d can be called to get 2-D
399
+ Jacobian with correct dimensions.
400
+
401
+ References
402
+ ----------
403
+ .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
404
+ Computing. 3rd edition", sec. 5.7.
405
+
406
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
407
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
408
+ and its Applications, 13 (1974), pp. 117-120.
409
+
410
+ .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
411
+ Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
412
+
413
+ Examples
414
+ --------
415
+ >>> import numpy as np
416
+ >>> from scipy.optimize._numdiff import approx_derivative
417
+ >>>
418
+ >>> def f(x, c1, c2):
419
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
420
+ ... x[0] * np.cos(c2 * x[1])])
421
+ ...
422
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
423
+ >>> approx_derivative(f, x0, args=(1, 2))
424
+ array([[ 1., 0.],
425
+ [-1., 0.]])
426
+
427
+ Bounds can be used to limit the region of function evaluation.
428
+ In the example below we compute left and right derivative at point 1.0.
429
+
430
+ >>> def g(x):
431
+ ... return x**2 if x >= 1 else x
432
+ ...
433
+ >>> x0 = 1.0
434
+ >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
435
+ array([ 1.])
436
+ >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
437
+ array([ 2.])
438
+ """
439
+ if method not in ['2-point', '3-point', 'cs']:
440
+ raise ValueError("Unknown method '%s'. " % method)
441
+
442
+ xp = array_namespace(x0)
443
+ _x = atleast_nd(x0, ndim=1, xp=xp)
444
+ _dtype = xp.float64
445
+ if xp.isdtype(_x.dtype, "real floating"):
446
+ _dtype = _x.dtype
447
+
448
+ # promotes to floating
449
+ x0 = xp.astype(_x, _dtype)
450
+
451
+ if x0.ndim > 1:
452
+ raise ValueError("`x0` must have at most 1 dimension.")
453
+
454
+ lb, ub = _prepare_bounds(bounds, x0)
455
+
456
+ if lb.shape != x0.shape or ub.shape != x0.shape:
457
+ raise ValueError("Inconsistent shapes between bounds and `x0`.")
458
+
459
+ if as_linear_operator and not (np.all(np.isinf(lb))
460
+ and np.all(np.isinf(ub))):
461
+ raise ValueError("Bounds not supported when "
462
+ "`as_linear_operator` is True.")
463
+
464
+ def fun_wrapped(x):
465
+ # send user function same fp type as x0. (but only if cs is not being
466
+ # used
467
+ if xp.isdtype(x.dtype, "real floating"):
468
+ x = xp.astype(x, x0.dtype)
469
+
470
+ f = np.atleast_1d(fun(x, *args, **kwargs))
471
+ if f.ndim > 1:
472
+ raise RuntimeError("`fun` return value has "
473
+ "more than 1 dimension.")
474
+ return f
475
+
476
+ if f0 is None:
477
+ f0 = fun_wrapped(x0)
478
+ else:
479
+ f0 = np.atleast_1d(f0)
480
+ if f0.ndim > 1:
481
+ raise ValueError("`f0` passed has more than 1 dimension.")
482
+
483
+ if np.any((x0 < lb) | (x0 > ub)):
484
+ raise ValueError("`x0` violates bound constraints.")
485
+
486
+ if as_linear_operator:
487
+ if rel_step is None:
488
+ rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
489
+
490
+ return _linear_operator_difference(fun_wrapped, x0,
491
+ f0, rel_step, method)
492
+ else:
493
+ # by default we use rel_step
494
+ if abs_step is None:
495
+ h = _compute_absolute_step(rel_step, x0, f0, method)
496
+ else:
497
+ # user specifies an absolute step
498
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
499
+ h = abs_step
500
+
501
+ # cannot have a zero step. This might happen if x0 is very large
502
+ # or small. In which case fall back to relative step.
503
+ dx = ((x0 + h) - x0)
504
+ h = np.where(dx == 0,
505
+ _eps_for_method(x0.dtype, f0.dtype, method) *
506
+ sign_x0 * np.maximum(1.0, np.abs(x0)),
507
+ h)
508
+
509
+ if method == '2-point':
510
+ h, use_one_sided = _adjust_scheme_to_bounds(
511
+ x0, h, 1, '1-sided', lb, ub)
512
+ elif method == '3-point':
513
+ h, use_one_sided = _adjust_scheme_to_bounds(
514
+ x0, h, 1, '2-sided', lb, ub)
515
+ elif method == 'cs':
516
+ use_one_sided = False
517
+
518
+ if sparsity is None:
519
+ return _dense_difference(fun_wrapped, x0, f0, h,
520
+ use_one_sided, method)
521
+ else:
522
+ if not issparse(sparsity) and len(sparsity) == 2:
523
+ structure, groups = sparsity
524
+ else:
525
+ structure = sparsity
526
+ groups = group_columns(sparsity)
527
+
528
+ if issparse(structure):
529
+ structure = csc_matrix(structure)
530
+ else:
531
+ structure = np.atleast_2d(structure)
532
+
533
+ groups = np.atleast_1d(groups)
534
+ return _sparse_difference(fun_wrapped, x0, f0, h,
535
+ use_one_sided, structure,
536
+ groups, method)
537
+
538
+
539
+ def _linear_operator_difference(fun, x0, f0, h, method):
540
+ m = f0.size
541
+ n = x0.size
542
+
543
+ if method == '2-point':
544
+ def matvec(p):
545
+ if np.array_equal(p, np.zeros_like(p)):
546
+ return np.zeros(m)
547
+ dx = h / norm(p)
548
+ x = x0 + dx*p
549
+ df = fun(x) - f0
550
+ return df / dx
551
+
552
+ elif method == '3-point':
553
+ def matvec(p):
554
+ if np.array_equal(p, np.zeros_like(p)):
555
+ return np.zeros(m)
556
+ dx = 2*h / norm(p)
557
+ x1 = x0 - (dx/2)*p
558
+ x2 = x0 + (dx/2)*p
559
+ f1 = fun(x1)
560
+ f2 = fun(x2)
561
+ df = f2 - f1
562
+ return df / dx
563
+
564
+ elif method == 'cs':
565
+ def matvec(p):
566
+ if np.array_equal(p, np.zeros_like(p)):
567
+ return np.zeros(m)
568
+ dx = h / norm(p)
569
+ x = x0 + dx*p*1.j
570
+ f1 = fun(x)
571
+ df = f1.imag
572
+ return df / dx
573
+
574
+ else:
575
+ raise RuntimeError("Never be here.")
576
+
577
+ return LinearOperator((m, n), matvec)
578
+
579
+
580
+ def _dense_difference(fun, x0, f0, h, use_one_sided, method):
581
+ m = f0.size
582
+ n = x0.size
583
+ J_transposed = np.empty((n, m))
584
+ h_vecs = np.diag(h)
585
+
586
+ for i in range(h.size):
587
+ if method == '2-point':
588
+ x = x0 + h_vecs[i]
589
+ dx = x[i] - x0[i] # Recompute dx as exactly representable number.
590
+ df = fun(x) - f0
591
+ elif method == '3-point' and use_one_sided[i]:
592
+ x1 = x0 + h_vecs[i]
593
+ x2 = x0 + 2 * h_vecs[i]
594
+ dx = x2[i] - x0[i]
595
+ f1 = fun(x1)
596
+ f2 = fun(x2)
597
+ df = -3.0 * f0 + 4 * f1 - f2
598
+ elif method == '3-point' and not use_one_sided[i]:
599
+ x1 = x0 - h_vecs[i]
600
+ x2 = x0 + h_vecs[i]
601
+ dx = x2[i] - x1[i]
602
+ f1 = fun(x1)
603
+ f2 = fun(x2)
604
+ df = f2 - f1
605
+ elif method == 'cs':
606
+ f1 = fun(x0 + h_vecs[i]*1.j)
607
+ df = f1.imag
608
+ dx = h_vecs[i, i]
609
+ else:
610
+ raise RuntimeError("Never be here.")
611
+
612
+ J_transposed[i] = df / dx
613
+
614
+ if m == 1:
615
+ J_transposed = np.ravel(J_transposed)
616
+
617
+ return J_transposed.T
618
+
619
+
620
+ def _sparse_difference(fun, x0, f0, h, use_one_sided,
621
+ structure, groups, method):
622
+ m = f0.size
623
+ n = x0.size
624
+ row_indices = []
625
+ col_indices = []
626
+ fractions = []
627
+
628
+ n_groups = np.max(groups) + 1
629
+ for group in range(n_groups):
630
+ # Perturb variables which are in the same group simultaneously.
631
+ e = np.equal(group, groups)
632
+ h_vec = h * e
633
+ if method == '2-point':
634
+ x = x0 + h_vec
635
+ dx = x - x0
636
+ df = fun(x) - f0
637
+ # The result is written to columns which correspond to perturbed
638
+ # variables.
639
+ cols, = np.nonzero(e)
640
+ # Find all non-zero elements in selected columns of Jacobian.
641
+ i, j, _ = find(structure[:, cols])
642
+ # Restore column indices in the full array.
643
+ j = cols[j]
644
+ elif method == '3-point':
645
+ # Here we do conceptually the same but separate one-sided
646
+ # and two-sided schemes.
647
+ x1 = x0.copy()
648
+ x2 = x0.copy()
649
+
650
+ mask_1 = use_one_sided & e
651
+ x1[mask_1] += h_vec[mask_1]
652
+ x2[mask_1] += 2 * h_vec[mask_1]
653
+
654
+ mask_2 = ~use_one_sided & e
655
+ x1[mask_2] -= h_vec[mask_2]
656
+ x2[mask_2] += h_vec[mask_2]
657
+
658
+ dx = np.zeros(n)
659
+ dx[mask_1] = x2[mask_1] - x0[mask_1]
660
+ dx[mask_2] = x2[mask_2] - x1[mask_2]
661
+
662
+ f1 = fun(x1)
663
+ f2 = fun(x2)
664
+
665
+ cols, = np.nonzero(e)
666
+ i, j, _ = find(structure[:, cols])
667
+ j = cols[j]
668
+
669
+ mask = use_one_sided[j]
670
+ df = np.empty(m)
671
+
672
+ rows = i[mask]
673
+ df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
674
+
675
+ rows = i[~mask]
676
+ df[rows] = f2[rows] - f1[rows]
677
+ elif method == 'cs':
678
+ f1 = fun(x0 + h_vec*1.j)
679
+ df = f1.imag
680
+ dx = h_vec
681
+ cols, = np.nonzero(e)
682
+ i, j, _ = find(structure[:, cols])
683
+ j = cols[j]
684
+ else:
685
+ raise ValueError("Never be here.")
686
+
687
+ # All that's left is to compute the fraction. We store i, j and
688
+ # fractions as separate arrays and later construct coo_matrix.
689
+ row_indices.append(i)
690
+ col_indices.append(j)
691
+ fractions.append(df[i] / dx[j])
692
+
693
+ row_indices = np.hstack(row_indices)
694
+ col_indices = np.hstack(col_indices)
695
+ fractions = np.hstack(fractions)
696
+ J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
697
+ return csr_matrix(J)
698
+
699
+
700
+ def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
701
+ kwargs={}):
702
+ """Check correctness of a function computing derivatives (Jacobian or
703
+ gradient) by comparison with a finite difference approximation.
704
+
705
+ Parameters
706
+ ----------
707
+ fun : callable
708
+ Function of which to estimate the derivatives. The argument x
709
+ passed to this function is ndarray of shape (n,) (never a scalar
710
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
711
+ jac : callable
712
+ Function which computes Jacobian matrix of `fun`. It must work with
713
+ argument x the same way as `fun`. The return value must be array_like
714
+ or sparse matrix with an appropriate shape.
715
+ x0 : array_like of shape (n,) or float
716
+ Point at which to estimate the derivatives. Float will be converted
717
+ to 1-D array.
718
+ bounds : 2-tuple of array_like, optional
719
+ Lower and upper bounds on independent variables. Defaults to no bounds.
720
+ Each bound must match the size of `x0` or be a scalar, in the latter
721
+ case the bound will be the same for all variables. Use it to limit the
722
+ range of function evaluation.
723
+ args, kwargs : tuple and dict, optional
724
+ Additional arguments passed to `fun` and `jac`. Both empty by default.
725
+ The calling signature is ``fun(x, *args, **kwargs)`` and the same
726
+ for `jac`.
727
+
728
+ Returns
729
+ -------
730
+ accuracy : float
731
+ The maximum among all relative errors for elements with absolute values
732
+ higher than 1 and absolute errors for elements with absolute values
733
+ less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
734
+ then it is likely that your `jac` implementation is correct.
735
+
736
+ See Also
737
+ --------
738
+ approx_derivative : Compute finite difference approximation of derivative.
739
+
740
+ Examples
741
+ --------
742
+ >>> import numpy as np
743
+ >>> from scipy.optimize._numdiff import check_derivative
744
+ >>>
745
+ >>>
746
+ >>> def f(x, c1, c2):
747
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
748
+ ... x[0] * np.cos(c2 * x[1])])
749
+ ...
750
+ >>> def jac(x, c1, c2):
751
+ ... return np.array([
752
+ ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
753
+ ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
754
+ ... ])
755
+ ...
756
+ >>>
757
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
758
+ >>> check_derivative(f, jac, x0, args=(1, 2))
759
+ 2.4492935982947064e-16
760
+ """
761
+ J_to_test = jac(x0, *args, **kwargs)
762
+ if issparse(J_to_test):
763
+ J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
764
+ args=args, kwargs=kwargs)
765
+ J_to_test = csr_matrix(J_to_test)
766
+ abs_err = J_to_test - J_diff
767
+ i, j, abs_err_data = find(abs_err)
768
+ J_diff_data = np.asarray(J_diff[i, j]).ravel()
769
+ return np.max(np.abs(abs_err_data) /
770
+ np.maximum(1, np.abs(J_diff_data)))
771
+ else:
772
+ J_diff = approx_derivative(fun, x0, bounds=bounds,
773
+ args=args, kwargs=kwargs)
774
+ abs_err = np.abs(J_to_test - J_diff)
775
+ return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
venv/lib/python3.10/site-packages/scipy/optimize/_optimize.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (224 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_qap.py ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import operator
3
+ from . import (linear_sum_assignment, OptimizeResult)
4
+ from ._optimize import _check_unknown_options
5
+
6
+ from scipy._lib._util import check_random_state
7
+ import itertools
8
+
9
+ QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt']
10
+
11
+ def quadratic_assignment(A, B, method="faq", options=None):
12
+ r"""
13
+ Approximates solution to the quadratic assignment problem and
14
+ the graph matching problem.
15
+
16
+ Quadratic assignment solves problems of the following form:
17
+
18
+ .. math::
19
+
20
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
21
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
22
+
23
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
24
+ and :math:`A` and :math:`B` are square matrices.
25
+
26
+ Graph matching tries to *maximize* the same objective function.
27
+ This algorithm can be thought of as finding the alignment of the
28
+ nodes of two graphs that minimizes the number of induced edge
29
+ disagreements, or, in the case of weighted graphs, the sum of squared
30
+ edge weight differences.
31
+
32
+ Note that the quadratic assignment problem is NP-hard. The results given
33
+ here are approximations and are not guaranteed to be optimal.
34
+
35
+
36
+ Parameters
37
+ ----------
38
+ A : 2-D array, square
39
+ The square matrix :math:`A` in the objective function above.
40
+
41
+ B : 2-D array, square
42
+ The square matrix :math:`B` in the objective function above.
43
+
44
+ method : str in {'faq', '2opt'} (default: 'faq')
45
+ The algorithm used to solve the problem.
46
+ :ref:`'faq' <optimize.qap-faq>` (default) and
47
+ :ref:`'2opt' <optimize.qap-2opt>` are available.
48
+
49
+ options : dict, optional
50
+ A dictionary of solver options. All solvers support the following:
51
+
52
+ maximize : bool (default: False)
53
+ Maximizes the objective function if ``True``.
54
+
55
+ partial_match : 2-D array of integers, optional (default: None)
56
+ Fixes part of the matching. Also known as a "seed" [2]_.
57
+
58
+ Each row of `partial_match` specifies a pair of matched nodes:
59
+ node ``partial_match[i, 0]`` of `A` is matched to node
60
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
61
+ where ``m`` is not greater than the number of nodes, :math:`n`.
62
+
63
+ rng : {None, int, `numpy.random.Generator`,
64
+ `numpy.random.RandomState`}, optional
65
+
66
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
67
+ singleton is used.
68
+ If `seed` is an int, a new ``RandomState`` instance is used,
69
+ seeded with `seed`.
70
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
71
+ that instance is used.
72
+
73
+ For method-specific options, see
74
+ :func:`show_options('quadratic_assignment') <show_options>`.
75
+
76
+ Returns
77
+ -------
78
+ res : OptimizeResult
79
+ `OptimizeResult` containing the following fields.
80
+
81
+ col_ind : 1-D array
82
+ Column indices corresponding to the best permutation found of the
83
+ nodes of `B`.
84
+ fun : float
85
+ The objective value of the solution.
86
+ nit : int
87
+ The number of iterations performed during optimization.
88
+
89
+ Notes
90
+ -----
91
+ The default method :ref:`'faq' <optimize.qap-faq>` uses the Fast
92
+ Approximate QAP algorithm [1]_; it typically offers the best combination of
93
+ speed and accuracy.
94
+ Method :ref:`'2opt' <optimize.qap-2opt>` can be computationally expensive,
95
+ but may be a useful alternative, or it can be used to refine the solution
96
+ returned by another method.
97
+
98
+ References
99
+ ----------
100
+ .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
101
+ S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
102
+ C.E. Priebe, "Fast approximate quadratic programming for graph
103
+ matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
104
+ :doi:`10.1371/journal.pone.0121002`
105
+
106
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
107
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
108
+ 203-215, :doi:`10.1016/j.patcog.2018.09.014`
109
+
110
+ .. [3] "2-opt," Wikipedia.
111
+ https://en.wikipedia.org/wiki/2-opt
112
+
113
+ Examples
114
+ --------
115
+ >>> import numpy as np
116
+ >>> from scipy.optimize import quadratic_assignment
117
+ >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100],
118
+ ... [150, 130, 0, 120], [170, 100, 120, 0]])
119
+ >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8],
120
+ ... [0, 0, 0, 3], [0, 0, 0, 0]])
121
+ >>> res = quadratic_assignment(A, B)
122
+ >>> print(res)
123
+ fun: 3260
124
+ col_ind: [0 3 2 1]
125
+ nit: 9
126
+
127
+ The see the relationship between the returned ``col_ind`` and ``fun``,
128
+ use ``col_ind`` to form the best permutation matrix found, then evaluate
129
+ the objective function :math:`f(P) = trace(A^T P B P^T )`.
130
+
131
+ >>> perm = res['col_ind']
132
+ >>> P = np.eye(len(A), dtype=int)[perm]
133
+ >>> fun = np.trace(A.T @ P @ B @ P.T)
134
+ >>> print(fun)
135
+ 3260
136
+
137
+ Alternatively, to avoid constructing the permutation matrix explicitly,
138
+ directly permute the rows and columns of the distance matrix.
139
+
140
+ >>> fun = np.trace(A.T @ B[perm][:, perm])
141
+ >>> print(fun)
142
+ 3260
143
+
144
+ Although not guaranteed in general, ``quadratic_assignment`` happens to
145
+ have found the globally optimal solution.
146
+
147
+ >>> from itertools import permutations
148
+ >>> perm_opt, fun_opt = None, np.inf
149
+ >>> for perm in permutations([0, 1, 2, 3]):
150
+ ... perm = np.array(perm)
151
+ ... fun = np.trace(A.T @ B[perm][:, perm])
152
+ ... if fun < fun_opt:
153
+ ... fun_opt, perm_opt = fun, perm
154
+ >>> print(np.array_equal(perm_opt, res['col_ind']))
155
+ True
156
+
157
+ Here is an example for which the default method,
158
+ :ref:`'faq' <optimize.qap-faq>`, does not find the global optimum.
159
+
160
+ >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1],
161
+ ... [8, 5, 0, 2], [6, 1, 2, 0]])
162
+ >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2],
163
+ ... [8, 5, 0, 5], [4, 2, 5, 0]])
164
+ >>> res = quadratic_assignment(A, B)
165
+ >>> print(res)
166
+ fun: 178
167
+ col_ind: [1 0 3 2]
168
+ nit: 13
169
+
170
+ If accuracy is important, consider using :ref:`'2opt' <optimize.qap-2opt>`
171
+ to refine the solution.
172
+
173
+ >>> guess = np.array([np.arange(len(A)), res.col_ind]).T
174
+ >>> res = quadratic_assignment(A, B, method="2opt",
175
+ ... options = {'partial_guess': guess})
176
+ >>> print(res)
177
+ fun: 176
178
+ col_ind: [1 2 3 0]
179
+ nit: 17
180
+
181
+ """
182
+
183
+ if options is None:
184
+ options = {}
185
+
186
+ method = method.lower()
187
+ methods = {"faq": _quadratic_assignment_faq,
188
+ "2opt": _quadratic_assignment_2opt}
189
+ if method not in methods:
190
+ raise ValueError(f"method {method} must be in {methods}.")
191
+ res = methods[method](A, B, **options)
192
+ return res
193
+
194
+
195
+ def _calc_score(A, B, perm):
196
+ # equivalent to objective function but avoids matmul
197
+ return np.sum(A * B[perm][:, perm])
198
+
199
+
200
+ def _common_input_validation(A, B, partial_match):
201
+ A = np.atleast_2d(A)
202
+ B = np.atleast_2d(B)
203
+
204
+ if partial_match is None:
205
+ partial_match = np.array([[], []]).T
206
+ partial_match = np.atleast_2d(partial_match).astype(int)
207
+
208
+ msg = None
209
+ if A.shape[0] != A.shape[1]:
210
+ msg = "`A` must be square"
211
+ elif B.shape[0] != B.shape[1]:
212
+ msg = "`B` must be square"
213
+ elif A.ndim != 2 or B.ndim != 2:
214
+ msg = "`A` and `B` must have exactly two dimensions"
215
+ elif A.shape != B.shape:
216
+ msg = "`A` and `B` matrices must be of equal size"
217
+ elif partial_match.shape[0] > A.shape[0]:
218
+ msg = "`partial_match` can have only as many seeds as there are nodes"
219
+ elif partial_match.shape[1] != 2:
220
+ msg = "`partial_match` must have two columns"
221
+ elif partial_match.ndim != 2:
222
+ msg = "`partial_match` must have exactly two dimensions"
223
+ elif (partial_match < 0).any():
224
+ msg = "`partial_match` must contain only positive indices"
225
+ elif (partial_match >= len(A)).any():
226
+ msg = "`partial_match` entries must be less than number of nodes"
227
+ elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or
228
+ not len(set(partial_match[:, 1])) == len(partial_match[:, 1])):
229
+ msg = "`partial_match` column entries must be unique"
230
+
231
+ if msg is not None:
232
+ raise ValueError(msg)
233
+
234
+ return A, B, partial_match
235
+
236
+
237
+ def _quadratic_assignment_faq(A, B,
238
+ maximize=False, partial_match=None, rng=None,
239
+ P0="barycenter", shuffle_input=False, maxiter=30,
240
+ tol=0.03, **unknown_options):
241
+ r"""Solve the quadratic assignment problem (approximately).
242
+
243
+ This function solves the Quadratic Assignment Problem (QAP) and the
244
+ Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
245
+ (FAQ) [1]_.
246
+
247
+ Quadratic assignment solves problems of the following form:
248
+
249
+ .. math::
250
+
251
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
252
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
253
+
254
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
255
+ and :math:`A` and :math:`B` are square matrices.
256
+
257
+ Graph matching tries to *maximize* the same objective function.
258
+ This algorithm can be thought of as finding the alignment of the
259
+ nodes of two graphs that minimizes the number of induced edge
260
+ disagreements, or, in the case of weighted graphs, the sum of squared
261
+ edge weight differences.
262
+
263
+ Note that the quadratic assignment problem is NP-hard. The results given
264
+ here are approximations and are not guaranteed to be optimal.
265
+
266
+ Parameters
267
+ ----------
268
+ A : 2-D array, square
269
+ The square matrix :math:`A` in the objective function above.
270
+ B : 2-D array, square
271
+ The square matrix :math:`B` in the objective function above.
272
+ method : str in {'faq', '2opt'} (default: 'faq')
273
+ The algorithm used to solve the problem. This is the method-specific
274
+ documentation for 'faq'.
275
+ :ref:`'2opt' <optimize.qap-2opt>` is also available.
276
+
277
+ Options
278
+ -------
279
+ maximize : bool (default: False)
280
+ Maximizes the objective function if ``True``.
281
+ partial_match : 2-D array of integers, optional (default: None)
282
+ Fixes part of the matching. Also known as a "seed" [2]_.
283
+
284
+ Each row of `partial_match` specifies a pair of matched nodes:
285
+ node ``partial_match[i, 0]`` of `A` is matched to node
286
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where
287
+ ``m`` is not greater than the number of nodes, :math:`n`.
288
+
289
+ rng : {None, int, `numpy.random.Generator`,
290
+ `numpy.random.RandomState`}, optional
291
+
292
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
293
+ singleton is used.
294
+ If `seed` is an int, a new ``RandomState`` instance is used,
295
+ seeded with `seed`.
296
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
297
+ that instance is used.
298
+ P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter")
299
+ Initial position. Must be a doubly-stochastic matrix [3]_.
300
+
301
+ If the initial position is an array, it must be a doubly stochastic
302
+ matrix of size :math:`m' \times m'` where :math:`m' = n - m`.
303
+
304
+ If ``"barycenter"`` (default), the initial position is the barycenter
305
+ of the Birkhoff polytope (the space of doubly stochastic matrices).
306
+ This is a :math:`m' \times m'` matrix with all entries equal to
307
+ :math:`1 / m'`.
308
+
309
+ If ``"randomized"`` the initial search position is
310
+ :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and
311
+ :math:`K` is a random doubly stochastic matrix.
312
+ shuffle_input : bool (default: False)
313
+ Set to `True` to resolve degenerate gradients randomly. For
314
+ non-degenerate gradients this option has no effect.
315
+ maxiter : int, positive (default: 30)
316
+ Integer specifying the max number of Frank-Wolfe iterations performed.
317
+ tol : float (default: 0.03)
318
+ Tolerance for termination. Frank-Wolfe iteration terminates when
319
+ :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`,
320
+ where :math:`i` is the iteration number.
321
+
322
+ Returns
323
+ -------
324
+ res : OptimizeResult
325
+ `OptimizeResult` containing the following fields.
326
+
327
+ col_ind : 1-D array
328
+ Column indices corresponding to the best permutation found of the
329
+ nodes of `B`.
330
+ fun : float
331
+ The objective value of the solution.
332
+ nit : int
333
+ The number of Frank-Wolfe iterations performed.
334
+
335
+ Notes
336
+ -----
337
+ The algorithm may be sensitive to the initial permutation matrix (or
338
+ search "position") due to the possibility of several local minima
339
+ within the feasible region. A barycenter initialization is more likely to
340
+ result in a better solution than a single random initialization. However,
341
+ calling ``quadratic_assignment`` several times with different random
342
+ initializations may result in a better optimum at the cost of longer
343
+ total execution time.
344
+
345
+ Examples
346
+ --------
347
+ As mentioned above, a barycenter initialization often results in a better
348
+ solution than a single random initialization.
349
+
350
+ >>> from numpy.random import default_rng
351
+ >>> rng = default_rng()
352
+ >>> n = 15
353
+ >>> A = rng.random((n, n))
354
+ >>> B = rng.random((n, n))
355
+ >>> res = quadratic_assignment(A, B) # FAQ is default method
356
+ >>> print(res.fun)
357
+ 46.871483385480545 # may vary
358
+
359
+ >>> options = {"P0": "randomized"} # use randomized initialization
360
+ >>> res = quadratic_assignment(A, B, options=options)
361
+ >>> print(res.fun)
362
+ 47.224831071310625 # may vary
363
+
364
+ However, consider running from several randomized initializations and
365
+ keeping the best result.
366
+
367
+ >>> res = min([quadratic_assignment(A, B, options=options)
368
+ ... for i in range(30)], key=lambda x: x.fun)
369
+ >>> print(res.fun)
370
+ 46.671852533681516 # may vary
371
+
372
+ The '2-opt' method can be used to further refine the results.
373
+
374
+ >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
375
+ >>> res = quadratic_assignment(A, B, method="2opt", options=options)
376
+ >>> print(res.fun)
377
+ 46.47160735721583 # may vary
378
+
379
+ References
380
+ ----------
381
+ .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
382
+ S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
383
+ C.E. Priebe, "Fast approximate quadratic programming for graph
384
+ matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
385
+ :doi:`10.1371/journal.pone.0121002`
386
+
387
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
388
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
389
+ 203-215, :doi:`10.1016/j.patcog.2018.09.014`
390
+
391
+ .. [3] "Doubly stochastic Matrix," Wikipedia.
392
+ https://en.wikipedia.org/wiki/Doubly_stochastic_matrix
393
+
394
+ """
395
+
396
+ _check_unknown_options(unknown_options)
397
+
398
+ maxiter = operator.index(maxiter)
399
+
400
+ # ValueError check
401
+ A, B, partial_match = _common_input_validation(A, B, partial_match)
402
+
403
+ msg = None
404
+ if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}:
405
+ msg = "Invalid 'P0' parameter string"
406
+ elif maxiter <= 0:
407
+ msg = "'maxiter' must be a positive integer"
408
+ elif tol <= 0:
409
+ msg = "'tol' must be a positive float"
410
+ if msg is not None:
411
+ raise ValueError(msg)
412
+
413
+ rng = check_random_state(rng)
414
+ n = len(A) # number of vertices in graphs
415
+ n_seeds = len(partial_match) # number of seeds
416
+ n_unseed = n - n_seeds
417
+
418
+ # [1] Algorithm 1 Line 1 - choose initialization
419
+ if not isinstance(P0, str):
420
+ P0 = np.atleast_2d(P0)
421
+ if P0.shape != (n_unseed, n_unseed):
422
+ msg = "`P0` matrix must have shape m' x m', where m'=n-m"
423
+ elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1)
424
+ or not np.allclose(np.sum(P0, axis=1), 1)):
425
+ msg = "`P0` matrix must be doubly stochastic"
426
+ if msg is not None:
427
+ raise ValueError(msg)
428
+ elif P0 == 'barycenter':
429
+ P0 = np.ones((n_unseed, n_unseed)) / n_unseed
430
+ elif P0 == 'randomized':
431
+ J = np.ones((n_unseed, n_unseed)) / n_unseed
432
+ # generate a nxn matrix where each entry is a random number [0, 1]
433
+ # would use rand, but Generators don't have it
434
+ # would use random, but old mtrand.RandomStates don't have it
435
+ K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed)))
436
+ P0 = (J + K) / 2
437
+
438
+ # check trivial cases
439
+ if n == 0 or n_seeds == n:
440
+ score = _calc_score(A, B, partial_match[:, 1])
441
+ res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
442
+ return OptimizeResult(res)
443
+
444
+ obj_func_scalar = 1
445
+ if maximize:
446
+ obj_func_scalar = -1
447
+
448
+ nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
449
+ if shuffle_input:
450
+ nonseed_B = rng.permutation(nonseed_B)
451
+
452
+ nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
453
+ perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
454
+ perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
455
+
456
+ # definitions according to Seeded Graph Matching [2].
457
+ A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
458
+ B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
459
+ const_sum = A21 @ B21.T + A12.T @ B12
460
+
461
+ P = P0
462
+ # [1] Algorithm 1 Line 2 - loop while stopping criteria not met
463
+ for n_iter in range(1, maxiter+1):
464
+ # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
465
+ grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22)
466
+ # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
467
+ _, cols = linear_sum_assignment(grad_fp, maximize=maximize)
468
+ Q = np.eye(n_unseed)[cols]
469
+
470
+ # [1] Algorithm 1 Line 5 - compute the step size
471
+ # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
472
+ # terms as ax**2 + bx + c. c does not affect location of minimum
473
+ # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
474
+ # apply where possible for efficiency.
475
+ R = P - Q
476
+ b21 = ((R.T @ A21) * B21).sum()
477
+ b12 = ((R.T @ A12.T) * B12.T).sum()
478
+ AR22 = A22.T @ R
479
+ BR22 = B22 @ R.T
480
+ b22a = (AR22 * B22.T[cols]).sum()
481
+ b22b = (A22 * BR22[cols]).sum()
482
+ a = (AR22.T * BR22).sum()
483
+ b = b21 + b12 + b22a + b22b
484
+ # critical point of ax^2 + bx + c is at x = -d/(2*e)
485
+ # if a * obj_func_scalar > 0, it is a minimum
486
+ # if minimum is not in [0, 1], only endpoints need to be considered
487
+ if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1:
488
+ alpha = -b/(2*a)
489
+ else:
490
+ alpha = np.argmin([0, (b + a)*obj_func_scalar])
491
+
492
+ # [1] Algorithm 1 Line 6 - Update P
493
+ P_i1 = alpha * P + (1 - alpha) * Q
494
+ if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
495
+ P = P_i1
496
+ break
497
+ P = P_i1
498
+ # [1] Algorithm 1 Line 7 - end main loop
499
+
500
+ # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
501
+ _, col = linear_sum_assignment(P, maximize=True)
502
+ perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
503
+
504
+ unshuffled_perm = np.zeros(n, dtype=int)
505
+ unshuffled_perm[perm_A] = perm_B[perm]
506
+
507
+ score = _calc_score(A, B, unshuffled_perm)
508
+ res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
509
+ return OptimizeResult(res)
510
+
511
+
512
+ def _split_matrix(X, n):
513
+ # definitions according to Seeded Graph Matching [2].
514
+ upper, lower = X[:n], X[n:]
515
+ return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
516
+
517
+
518
+ def _doubly_stochastic(P, tol=1e-3):
519
+ # Adapted from @btaba implementation
520
+ # https://github.com/btaba/sinkhorn_knopp
521
+ # of Sinkhorn-Knopp algorithm
522
+ # https://projecteuclid.org/euclid.pjm/1102992505
523
+
524
+ max_iter = 1000
525
+ c = 1 / P.sum(axis=0)
526
+ r = 1 / (P @ c)
527
+ P_eps = P
528
+
529
+ for it in range(max_iter):
530
+ if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
531
+ (np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
532
+ # All column/row sums ~= 1 within threshold
533
+ break
534
+
535
+ c = 1 / (r @ P)
536
+ r = 1 / (P @ c)
537
+ P_eps = r[:, None] * P * c
538
+
539
+ return P_eps
540
+
541
+
542
+ def _quadratic_assignment_2opt(A, B, maximize=False, rng=None,
543
+ partial_match=None,
544
+ partial_guess=None,
545
+ **unknown_options):
546
+ r"""Solve the quadratic assignment problem (approximately).
547
+
548
+ This function solves the Quadratic Assignment Problem (QAP) and the
549
+ Graph Matching Problem (GMP) using the 2-opt algorithm [1]_.
550
+
551
+ Quadratic assignment solves problems of the following form:
552
+
553
+ .. math::
554
+
555
+ \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
556
+ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
557
+
558
+ where :math:`\mathcal{P}` is the set of all permutation matrices,
559
+ and :math:`A` and :math:`B` are square matrices.
560
+
561
+ Graph matching tries to *maximize* the same objective function.
562
+ This algorithm can be thought of as finding the alignment of the
563
+ nodes of two graphs that minimizes the number of induced edge
564
+ disagreements, or, in the case of weighted graphs, the sum of squared
565
+ edge weight differences.
566
+
567
+ Note that the quadratic assignment problem is NP-hard. The results given
568
+ here are approximations and are not guaranteed to be optimal.
569
+
570
+ Parameters
571
+ ----------
572
+ A : 2-D array, square
573
+ The square matrix :math:`A` in the objective function above.
574
+ B : 2-D array, square
575
+ The square matrix :math:`B` in the objective function above.
576
+ method : str in {'faq', '2opt'} (default: 'faq')
577
+ The algorithm used to solve the problem. This is the method-specific
578
+ documentation for '2opt'.
579
+ :ref:`'faq' <optimize.qap-faq>` is also available.
580
+
581
+ Options
582
+ -------
583
+ maximize : bool (default: False)
584
+ Maximizes the objective function if ``True``.
585
+ rng : {None, int, `numpy.random.Generator`,
586
+ `numpy.random.RandomState`}, optional
587
+
588
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
589
+ singleton is used.
590
+ If `seed` is an int, a new ``RandomState`` instance is used,
591
+ seeded with `seed`.
592
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
593
+ that instance is used.
594
+ partial_match : 2-D array of integers, optional (default: None)
595
+ Fixes part of the matching. Also known as a "seed" [2]_.
596
+
597
+ Each row of `partial_match` specifies a pair of matched nodes: node
598
+ ``partial_match[i, 0]`` of `A` is matched to node
599
+ ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
600
+ where ``m`` is not greater than the number of nodes, :math:`n`.
601
+
602
+ .. note::
603
+ `partial_match` must be sorted by the first column.
604
+
605
+ partial_guess : 2-D array of integers, optional (default: None)
606
+ A guess for the matching between the two matrices. Unlike
607
+ `partial_match`, `partial_guess` does not fix the indices; they are
608
+ still free to be optimized.
609
+
610
+ Each row of `partial_guess` specifies a pair of matched nodes: node
611
+ ``partial_guess[i, 0]`` of `A` is matched to node
612
+ ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``,
613
+ where ``m`` is not greater than the number of nodes, :math:`n`.
614
+
615
+ .. note::
616
+ `partial_guess` must be sorted by the first column.
617
+
618
+ Returns
619
+ -------
620
+ res : OptimizeResult
621
+ `OptimizeResult` containing the following fields.
622
+
623
+ col_ind : 1-D array
624
+ Column indices corresponding to the best permutation found of the
625
+ nodes of `B`.
626
+ fun : float
627
+ The objective value of the solution.
628
+ nit : int
629
+ The number of iterations performed during optimization.
630
+
631
+ Notes
632
+ -----
633
+ This is a greedy algorithm that works similarly to bubble sort: beginning
634
+ with an initial permutation, it iteratively swaps pairs of indices to
635
+ improve the objective function until no such improvements are possible.
636
+
637
+ References
638
+ ----------
639
+ .. [1] "2-opt," Wikipedia.
640
+ https://en.wikipedia.org/wiki/2-opt
641
+
642
+ .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
643
+ C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
644
+ 203-215, https://doi.org/10.1016/j.patcog.2018.09.014
645
+
646
+ """
647
+ _check_unknown_options(unknown_options)
648
+ rng = check_random_state(rng)
649
+ A, B, partial_match = _common_input_validation(A, B, partial_match)
650
+
651
+ N = len(A)
652
+ # check trivial cases
653
+ if N == 0 or partial_match.shape[0] == N:
654
+ score = _calc_score(A, B, partial_match[:, 1])
655
+ res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
656
+ return OptimizeResult(res)
657
+
658
+ if partial_guess is None:
659
+ partial_guess = np.array([[], []]).T
660
+ partial_guess = np.atleast_2d(partial_guess).astype(int)
661
+
662
+ msg = None
663
+ if partial_guess.shape[0] > A.shape[0]:
664
+ msg = ("`partial_guess` can have only as "
665
+ "many entries as there are nodes")
666
+ elif partial_guess.shape[1] != 2:
667
+ msg = "`partial_guess` must have two columns"
668
+ elif partial_guess.ndim != 2:
669
+ msg = "`partial_guess` must have exactly two dimensions"
670
+ elif (partial_guess < 0).any():
671
+ msg = "`partial_guess` must contain only positive indices"
672
+ elif (partial_guess >= len(A)).any():
673
+ msg = "`partial_guess` entries must be less than number of nodes"
674
+ elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or
675
+ not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])):
676
+ msg = "`partial_guess` column entries must be unique"
677
+ if msg is not None:
678
+ raise ValueError(msg)
679
+
680
+ fixed_rows = None
681
+ if partial_match.size or partial_guess.size:
682
+ # use partial_match and partial_guess for initial permutation,
683
+ # but randomly permute the rest.
684
+ guess_rows = np.zeros(N, dtype=bool)
685
+ guess_cols = np.zeros(N, dtype=bool)
686
+ fixed_rows = np.zeros(N, dtype=bool)
687
+ fixed_cols = np.zeros(N, dtype=bool)
688
+ perm = np.zeros(N, dtype=int)
689
+
690
+ rg, cg = partial_guess.T
691
+ guess_rows[rg] = True
692
+ guess_cols[cg] = True
693
+ perm[guess_rows] = cg
694
+
695
+ # match overrides guess
696
+ rf, cf = partial_match.T
697
+ fixed_rows[rf] = True
698
+ fixed_cols[cf] = True
699
+ perm[fixed_rows] = cf
700
+
701
+ random_rows = ~fixed_rows & ~guess_rows
702
+ random_cols = ~fixed_cols & ~guess_cols
703
+ perm[random_rows] = rng.permutation(np.arange(N)[random_cols])
704
+ else:
705
+ perm = rng.permutation(np.arange(N))
706
+
707
+ best_score = _calc_score(A, B, perm)
708
+
709
+ i_free = np.arange(N)
710
+ if fixed_rows is not None:
711
+ i_free = i_free[~fixed_rows]
712
+
713
+ better = operator.gt if maximize else operator.lt
714
+ n_iter = 0
715
+ done = False
716
+ while not done:
717
+ # equivalent to nested for loops i in range(N), j in range(i, N)
718
+ for i, j in itertools.combinations_with_replacement(i_free, 2):
719
+ n_iter += 1
720
+ perm[i], perm[j] = perm[j], perm[i]
721
+ score = _calc_score(A, B, perm)
722
+ if better(score, best_score):
723
+ best_score = score
724
+ break
725
+ # faster to swap back than to create a new list every time
726
+ perm[i], perm[j] = perm[j], perm[i]
727
+ else: # no swaps made
728
+ done = True
729
+
730
+ res = {"col_ind": perm, "fun": best_score, "nit": n_iter}
731
+ return OptimizeResult(res)
venv/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for removing redundant (linearly dependent) equations from linear
3
+ programming equality constraints.
4
+ """
5
+ # Author: Matt Haberland
6
+
7
+ import numpy as np
8
+ from scipy.linalg import svd
9
+ from scipy.linalg.interpolative import interp_decomp
10
+ import scipy
11
+ from scipy.linalg.blas import dtrsm
12
+
13
+
14
+ def _row_count(A):
15
+ """
16
+ Counts the number of nonzeros in each row of input array A.
17
+ Nonzeros are defined as any element with absolute value greater than
18
+ tol = 1e-13. This value should probably be an input to the function.
19
+
20
+ Parameters
21
+ ----------
22
+ A : 2-D array
23
+ An array representing a matrix
24
+
25
+ Returns
26
+ -------
27
+ rowcount : 1-D array
28
+ Number of nonzeros in each row of A
29
+
30
+ """
31
+ tol = 1e-13
32
+ return np.array((abs(A) > tol).sum(axis=1)).flatten()
33
+
34
+
35
+ def _get_densest(A, eligibleRows):
36
+ """
37
+ Returns the index of the densest row of A. Ignores rows that are not
38
+ eligible for consideration.
39
+
40
+ Parameters
41
+ ----------
42
+ A : 2-D array
43
+ An array representing a matrix
44
+ eligibleRows : 1-D logical array
45
+ Values indicate whether the corresponding row of A is eligible
46
+ to be considered
47
+
48
+ Returns
49
+ -------
50
+ i_densest : int
51
+ Index of the densest row in A eligible for consideration
52
+
53
+ """
54
+ rowCounts = _row_count(A)
55
+ return np.argmax(rowCounts * eligibleRows)
56
+
57
+
58
+ def _remove_zero_rows(A, b):
59
+ """
60
+ Eliminates trivial equations from system of equations defined by Ax = b
61
+ and identifies trivial infeasibilities
62
+
63
+ Parameters
64
+ ----------
65
+ A : 2-D array
66
+ An array representing the left-hand side of a system of equations
67
+ b : 1-D array
68
+ An array representing the right-hand side of a system of equations
69
+
70
+ Returns
71
+ -------
72
+ A : 2-D array
73
+ An array representing the left-hand side of a system of equations
74
+ b : 1-D array
75
+ An array representing the right-hand side of a system of equations
76
+ status: int
77
+ An integer indicating the status of the removal operation
78
+ 0: No infeasibility identified
79
+ 2: Trivially infeasible
80
+ message : str
81
+ A string descriptor of the exit status of the optimization.
82
+
83
+ """
84
+ status = 0
85
+ message = ""
86
+ i_zero = _row_count(A) == 0
87
+ A = A[np.logical_not(i_zero), :]
88
+ if not np.allclose(b[i_zero], 0):
89
+ status = 2
90
+ message = "There is a zero row in A_eq with a nonzero corresponding " \
91
+ "entry in b_eq. The problem is infeasible."
92
+ b = b[np.logical_not(i_zero)]
93
+ return A, b, status, message
94
+
95
+
96
+ def bg_update_dense(plu, perm_r, v, j):
97
+ LU, p = plu
98
+
99
+ vperm = v[perm_r]
100
+ u = dtrsm(1, LU, vperm, lower=1, diag=1)
101
+ LU[:j+1, j] = u[:j+1]
102
+ l = u[j+1:]
103
+ piv = LU[j, j]
104
+ LU[j+1:, j] += (l/piv)
105
+ return LU, p
106
+
107
+
108
+ def _remove_redundancy_pivot_dense(A, rhs, true_rank=None):
109
+ """
110
+ Eliminates redundant equations from system of equations defined by Ax = b
111
+ and identifies infeasibilities.
112
+
113
+ Parameters
114
+ ----------
115
+ A : 2-D sparse matrix
116
+ An matrix representing the left-hand side of a system of equations
117
+ rhs : 1-D array
118
+ An array representing the right-hand side of a system of equations
119
+
120
+ Returns
121
+ -------
122
+ A : 2-D sparse matrix
123
+ A matrix representing the left-hand side of a system of equations
124
+ rhs : 1-D array
125
+ An array representing the right-hand side of a system of equations
126
+ status: int
127
+ An integer indicating the status of the system
128
+ 0: No infeasibility identified
129
+ 2: Trivially infeasible
130
+ message : str
131
+ A string descriptor of the exit status of the optimization.
132
+
133
+ References
134
+ ----------
135
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
136
+ large-scale linear programming." Optimization Methods and Software
137
+ 6.3 (1995): 219-227.
138
+
139
+ """
140
+ tolapiv = 1e-8
141
+ tolprimal = 1e-8
142
+ status = 0
143
+ message = ""
144
+ inconsistent = ("There is a linear combination of rows of A_eq that "
145
+ "results in zero, suggesting a redundant constraint. "
146
+ "However the same linear combination of b_eq is "
147
+ "nonzero, suggesting that the constraints conflict "
148
+ "and the problem is infeasible.")
149
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
150
+
151
+ if status != 0:
152
+ return A, rhs, status, message
153
+
154
+ m, n = A.shape
155
+
156
+ v = list(range(m)) # Artificial column indices.
157
+ b = list(v) # Basis column indices.
158
+ # This is better as a list than a set because column order of basis matrix
159
+ # needs to be consistent.
160
+ d = [] # Indices of dependent rows
161
+ perm_r = None
162
+
163
+ A_orig = A
164
+ A = np.zeros((m, m + n), order='F')
165
+ np.fill_diagonal(A, 1)
166
+ A[:, m:] = A_orig
167
+ e = np.zeros(m)
168
+
169
+ js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis
170
+ # manual masking was faster than masked array
171
+ js_mask = np.ones(js_candidates.shape, dtype=bool)
172
+
173
+ # Implements basic algorithm from [2]
174
+ # Uses some of the suggested improvements (removing zero rows and
175
+ # Bartels-Golub update idea).
176
+ # Removing column singletons would be easy, but it is not as important
177
+ # because the procedure is performed only on the equality constraint
178
+ # matrix from the original problem - not on the canonical form matrix,
179
+ # which would have many more column singletons due to slack variables
180
+ # from the inequality constraints.
181
+ # The thoughts on "crashing" the initial basis are only really useful if
182
+ # the matrix is sparse.
183
+
184
+ lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial
185
+ perm_r = lu[1]
186
+ for i in v:
187
+
188
+ e[i] = 1
189
+ if i > 0:
190
+ e[i-1] = 0
191
+
192
+ try: # fails for i==0 and any time it gets ill-conditioned
193
+ j = b[i-1]
194
+ lu = bg_update_dense(lu, perm_r, A[:, j], i-1)
195
+ except Exception:
196
+ lu = scipy.linalg.lu_factor(A[:, b])
197
+ LU, p = lu
198
+ perm_r = list(range(m))
199
+ for i1, i2 in enumerate(p):
200
+ perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1]
201
+
202
+ pi = scipy.linalg.lu_solve(lu, e, trans=1)
203
+
204
+ js = js_candidates[js_mask]
205
+ batch = 50
206
+
207
+ # This is a tiny bit faster than looping over columns individually,
208
+ # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv:
209
+ for j_index in range(0, len(js), batch):
210
+ j_indices = js[j_index: min(j_index+batch, len(js))]
211
+
212
+ c = abs(A[:, j_indices].transpose().dot(pi))
213
+ if (c > tolapiv).any():
214
+ j = js[j_index + np.argmax(c)] # very independent column
215
+ b[i] = j
216
+ js_mask[j-m] = False
217
+ break
218
+ else:
219
+ bibar = pi.T.dot(rhs.reshape(-1, 1))
220
+ bnorm = np.linalg.norm(rhs)
221
+ if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent
222
+ status = 2
223
+ message = inconsistent
224
+ return A_orig, rhs, status, message
225
+ else: # dependent
226
+ d.append(i)
227
+ if true_rank is not None and len(d) == m - true_rank:
228
+ break # found all redundancies
229
+
230
+ keep = set(range(m))
231
+ keep = list(keep - set(d))
232
+ return A_orig[keep, :], rhs[keep], status, message
233
+
234
+
235
+ def _remove_redundancy_pivot_sparse(A, rhs):
236
+ """
237
+ Eliminates redundant equations from system of equations defined by Ax = b
238
+ and identifies infeasibilities.
239
+
240
+ Parameters
241
+ ----------
242
+ A : 2-D sparse matrix
243
+ An matrix representing the left-hand side of a system of equations
244
+ rhs : 1-D array
245
+ An array representing the right-hand side of a system of equations
246
+
247
+ Returns
248
+ -------
249
+ A : 2-D sparse matrix
250
+ A matrix representing the left-hand side of a system of equations
251
+ rhs : 1-D array
252
+ An array representing the right-hand side of a system of equations
253
+ status: int
254
+ An integer indicating the status of the system
255
+ 0: No infeasibility identified
256
+ 2: Trivially infeasible
257
+ message : str
258
+ A string descriptor of the exit status of the optimization.
259
+
260
+ References
261
+ ----------
262
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
263
+ large-scale linear programming." Optimization Methods and Software
264
+ 6.3 (1995): 219-227.
265
+
266
+ """
267
+
268
+ tolapiv = 1e-8
269
+ tolprimal = 1e-8
270
+ status = 0
271
+ message = ""
272
+ inconsistent = ("There is a linear combination of rows of A_eq that "
273
+ "results in zero, suggesting a redundant constraint. "
274
+ "However the same linear combination of b_eq is "
275
+ "nonzero, suggesting that the constraints conflict "
276
+ "and the problem is infeasible.")
277
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
278
+
279
+ if status != 0:
280
+ return A, rhs, status, message
281
+
282
+ m, n = A.shape
283
+
284
+ v = list(range(m)) # Artificial column indices.
285
+ b = list(v) # Basis column indices.
286
+ # This is better as a list than a set because column order of basis matrix
287
+ # needs to be consistent.
288
+ k = set(range(m, m+n)) # Structural column indices.
289
+ d = [] # Indices of dependent rows
290
+
291
+ A_orig = A
292
+ A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc()
293
+ e = np.zeros(m)
294
+
295
+ # Implements basic algorithm from [2]
296
+ # Uses only one of the suggested improvements (removing zero rows).
297
+ # Removing column singletons would be easy, but it is not as important
298
+ # because the procedure is performed only on the equality constraint
299
+ # matrix from the original problem - not on the canonical form matrix,
300
+ # which would have many more column singletons due to slack variables
301
+ # from the inequality constraints.
302
+ # The thoughts on "crashing" the initial basis sound useful, but the
303
+ # description of the procedure seems to assume a lot of familiarity with
304
+ # the subject; it is not very explicit. I already went through enough
305
+ # trouble getting the basic algorithm working, so I was not interested in
306
+ # trying to decipher this, too. (Overall, the paper is fraught with
307
+ # mistakes and ambiguities - which is strange, because the rest of
308
+ # Andersen's papers are quite good.)
309
+ # I tried and tried and tried to improve performance using the
310
+ # Bartels-Golub update. It works, but it's only practical if the LU
311
+ # factorization can be specialized as described, and that is not possible
312
+ # until the SciPy SuperLU interface permits control over column
313
+ # permutation - see issue #7700.
314
+
315
+ for i in v:
316
+ B = A[:, b]
317
+
318
+ e[i] = 1
319
+ if i > 0:
320
+ e[i-1] = 0
321
+
322
+ pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1)
323
+
324
+ js = list(k-set(b)) # not efficient, but this is not the time sink...
325
+
326
+ # Due to overhead, it tends to be faster (for problems tested) to
327
+ # compute the full matrix-vector product rather than individual
328
+ # vector-vector products (with the chance of terminating as soon
329
+ # as any are nonzero). For very large matrices, it might be worth
330
+ # it to compute, say, 100 or 1000 at a time and stop when a nonzero
331
+ # is found.
332
+
333
+ c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0]
334
+ if len(c) > 0: # independent
335
+ j = js[c[0]]
336
+ # in a previous commit, the previous line was changed to choose
337
+ # index j corresponding with the maximum dot product.
338
+ # While this avoided issues with almost
339
+ # singular matrices, it slowed the routine in most NETLIB tests.
340
+ # I think this is because these columns were denser than the
341
+ # first column with nonzero dot product (c[0]).
342
+ # It would be nice to have a heuristic that balances sparsity with
343
+ # high dot product, but I don't think it's worth the time to
344
+ # develop one right now. Bartels-Golub update is a much higher
345
+ # priority.
346
+ b[i] = j # replace artificial column
347
+ else:
348
+ bibar = pi.T.dot(rhs.reshape(-1, 1))
349
+ bnorm = np.linalg.norm(rhs)
350
+ if abs(bibar)/(1 + bnorm) > tolprimal:
351
+ status = 2
352
+ message = inconsistent
353
+ return A_orig, rhs, status, message
354
+ else: # dependent
355
+ d.append(i)
356
+
357
+ keep = set(range(m))
358
+ keep = list(keep - set(d))
359
+ return A_orig[keep, :], rhs[keep], status, message
360
+
361
+
362
+ def _remove_redundancy_svd(A, b):
363
+ """
364
+ Eliminates redundant equations from system of equations defined by Ax = b
365
+ and identifies infeasibilities.
366
+
367
+ Parameters
368
+ ----------
369
+ A : 2-D array
370
+ An array representing the left-hand side of a system of equations
371
+ b : 1-D array
372
+ An array representing the right-hand side of a system of equations
373
+
374
+ Returns
375
+ -------
376
+ A : 2-D array
377
+ An array representing the left-hand side of a system of equations
378
+ b : 1-D array
379
+ An array representing the right-hand side of a system of equations
380
+ status: int
381
+ An integer indicating the status of the system
382
+ 0: No infeasibility identified
383
+ 2: Trivially infeasible
384
+ message : str
385
+ A string descriptor of the exit status of the optimization.
386
+
387
+ References
388
+ ----------
389
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
390
+ large-scale linear programming." Optimization Methods and Software
391
+ 6.3 (1995): 219-227.
392
+
393
+ """
394
+
395
+ A, b, status, message = _remove_zero_rows(A, b)
396
+
397
+ if status != 0:
398
+ return A, b, status, message
399
+
400
+ U, s, Vh = svd(A)
401
+ eps = np.finfo(float).eps
402
+ tol = s.max() * max(A.shape) * eps
403
+
404
+ m, n = A.shape
405
+ s_min = s[-1] if m <= n else 0
406
+
407
+ # this algorithm is faster than that of [2] when the nullspace is small
408
+ # but it could probably be improvement by randomized algorithms and with
409
+ # a sparse implementation.
410
+ # it relies on repeated singular value decomposition to find linearly
411
+ # dependent rows (as identified by columns of U that correspond with zero
412
+ # singular values). Unfortunately, only one row can be removed per
413
+ # decomposition (I tried otherwise; doing so can cause problems.)
414
+ # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds
415
+ # but that function is unreliable at finding singular values near zero.
416
+ # Finding max eigenvalue L of A A^T, then largest eigenvalue (and
417
+ # associated eigenvector) of -A A^T + L I (I is identity) via power
418
+ # iteration would also work in theory, but is only efficient if the
419
+ # smallest nonzero eigenvalue of A A^T is close to the largest nonzero
420
+ # eigenvalue.
421
+
422
+ while abs(s_min) < tol:
423
+ v = U[:, -1] # TODO: return these so user can eliminate from problem?
424
+ # rows need to be represented in significant amount
425
+ eligibleRows = np.abs(v) > tol * 10e6
426
+ if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol):
427
+ status = 4
428
+ message = ("Due to numerical issues, redundant equality "
429
+ "constraints could not be removed automatically. "
430
+ "Try providing your constraint matrices as sparse "
431
+ "matrices to activate sparse presolve, try turning "
432
+ "off redundancy removal, or try turning off presolve "
433
+ "altogether.")
434
+ break
435
+ if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349
436
+ status = 2
437
+ message = ("There is a linear combination of rows of A_eq that "
438
+ "results in zero, suggesting a redundant constraint. "
439
+ "However the same linear combination of b_eq is "
440
+ "nonzero, suggesting that the constraints conflict "
441
+ "and the problem is infeasible.")
442
+ break
443
+
444
+ i_remove = _get_densest(A, eligibleRows)
445
+ A = np.delete(A, i_remove, axis=0)
446
+ b = np.delete(b, i_remove)
447
+ U, s, Vh = svd(A)
448
+ m, n = A.shape
449
+ s_min = s[-1] if m <= n else 0
450
+
451
+ return A, b, status, message
452
+
453
+
454
+ def _remove_redundancy_id(A, rhs, rank=None, randomized=True):
455
+ """Eliminates redundant equations from a system of equations.
456
+
457
+ Eliminates redundant equations from system of equations defined by Ax = b
458
+ and identifies infeasibilities.
459
+
460
+ Parameters
461
+ ----------
462
+ A : 2-D array
463
+ An array representing the left-hand side of a system of equations
464
+ rhs : 1-D array
465
+ An array representing the right-hand side of a system of equations
466
+ rank : int, optional
467
+ The rank of A
468
+ randomized: bool, optional
469
+ True for randomized interpolative decomposition
470
+
471
+ Returns
472
+ -------
473
+ A : 2-D array
474
+ An array representing the left-hand side of a system of equations
475
+ rhs : 1-D array
476
+ An array representing the right-hand side of a system of equations
477
+ status: int
478
+ An integer indicating the status of the system
479
+ 0: No infeasibility identified
480
+ 2: Trivially infeasible
481
+ message : str
482
+ A string descriptor of the exit status of the optimization.
483
+
484
+ """
485
+
486
+ status = 0
487
+ message = ""
488
+ inconsistent = ("There is a linear combination of rows of A_eq that "
489
+ "results in zero, suggesting a redundant constraint. "
490
+ "However the same linear combination of b_eq is "
491
+ "nonzero, suggesting that the constraints conflict "
492
+ "and the problem is infeasible.")
493
+
494
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
495
+
496
+ if status != 0:
497
+ return A, rhs, status, message
498
+
499
+ m, n = A.shape
500
+
501
+ k = rank
502
+ if rank is None:
503
+ k = np.linalg.matrix_rank(A)
504
+
505
+ idx, proj = interp_decomp(A.T, k, rand=randomized)
506
+
507
+ # first k entries in idx are indices of the independent rows
508
+ # remaining entries are the indices of the m-k dependent rows
509
+ # proj provides a linear combinations of rows of A2 that form the
510
+ # remaining m-k (dependent) rows. The same linear combination of entries
511
+ # in rhs2 must give the remaining m-k entries. If not, the system is
512
+ # inconsistent, and the problem is infeasible.
513
+ if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]):
514
+ status = 2
515
+ message = inconsistent
516
+
517
+ # sort indices because the other redundancy removal routines leave rows
518
+ # in original order and tests were written with that in mind
519
+ idx = sorted(idx[:k])
520
+ A2 = A[idx, :]
521
+ rhs2 = rhs[idx]
522
+ return A2, rhs2, status, message
venv/lib/python3.10/site-packages/scipy/optimize/_root.py ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to root finding algorithms.
3
+
4
+ Functions
5
+ ---------
6
+ - root : find a root of a vector function.
7
+ """
8
+ __all__ = ['root']
9
+
10
+ import numpy as np
11
+
12
+ from warnings import warn
13
+
14
+ from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options
15
+ from ._minpack_py import _root_hybr, leastsq
16
+ from ._spectral import _root_df_sane
17
+ from . import _nonlin as nonlin
18
+
19
+
20
+ ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
21
+ 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov',
22
+ 'df-sane']
23
+
24
+
25
+ def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
26
+ options=None):
27
+ r"""
28
+ Find a root of a vector function.
29
+
30
+ Parameters
31
+ ----------
32
+ fun : callable
33
+ A vector function to find a root of.
34
+ x0 : ndarray
35
+ Initial guess.
36
+ args : tuple, optional
37
+ Extra arguments passed to the objective function and its Jacobian.
38
+ method : str, optional
39
+ Type of solver. Should be one of
40
+
41
+ - 'hybr' :ref:`(see here) <optimize.root-hybr>`
42
+ - 'lm' :ref:`(see here) <optimize.root-lm>`
43
+ - 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
44
+ - 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
45
+ - 'anderson' :ref:`(see here) <optimize.root-anderson>`
46
+ - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
47
+ - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
48
+ - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
49
+ - 'krylov' :ref:`(see here) <optimize.root-krylov>`
50
+ - 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
51
+
52
+ jac : bool or callable, optional
53
+ If `jac` is a Boolean and is True, `fun` is assumed to return the
54
+ value of Jacobian along with the objective function. If False, the
55
+ Jacobian will be estimated numerically.
56
+ `jac` can also be a callable returning the Jacobian of `fun`. In
57
+ this case, it must accept the same arguments as `fun`.
58
+ tol : float, optional
59
+ Tolerance for termination. For detailed control, use solver-specific
60
+ options.
61
+ callback : function, optional
62
+ Optional callback function. It is called on every iteration as
63
+ ``callback(x, f)`` where `x` is the current solution and `f`
64
+ the corresponding residual. For all methods but 'hybr' and 'lm'.
65
+ options : dict, optional
66
+ A dictionary of solver options. E.g., `xtol` or `maxiter`, see
67
+ :obj:`show_options()` for details.
68
+
69
+ Returns
70
+ -------
71
+ sol : OptimizeResult
72
+ The solution represented as a ``OptimizeResult`` object.
73
+ Important attributes are: ``x`` the solution array, ``success`` a
74
+ Boolean flag indicating if the algorithm exited successfully and
75
+ ``message`` which describes the cause of the termination. See
76
+ `OptimizeResult` for a description of other attributes.
77
+
78
+ See also
79
+ --------
80
+ show_options : Additional options accepted by the solvers
81
+
82
+ Notes
83
+ -----
84
+ This section describes the available solvers that can be selected by the
85
+ 'method' parameter. The default method is *hybr*.
86
+
87
+ Method *hybr* uses a modification of the Powell hybrid method as
88
+ implemented in MINPACK [1]_.
89
+
90
+ Method *lm* solves the system of nonlinear equations in a least squares
91
+ sense using a modification of the Levenberg-Marquardt algorithm as
92
+ implemented in MINPACK [1]_.
93
+
94
+ Method *df-sane* is a derivative-free spectral method. [3]_
95
+
96
+ Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
97
+ *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
98
+ with backtracking or full line searches [2]_. Each method corresponds
99
+ to a particular Jacobian approximations.
100
+
101
+ - Method *broyden1* uses Broyden's first Jacobian approximation, it is
102
+ known as Broyden's good method.
103
+ - Method *broyden2* uses Broyden's second Jacobian approximation, it
104
+ is known as Broyden's bad method.
105
+ - Method *anderson* uses (extended) Anderson mixing.
106
+ - Method *Krylov* uses Krylov approximation for inverse Jacobian. It
107
+ is suitable for large-scale problem.
108
+ - Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
109
+ - Method *linearmixing* uses a scalar Jacobian approximation.
110
+ - Method *excitingmixing* uses a tuned diagonal Jacobian
111
+ approximation.
112
+
113
+ .. warning::
114
+
115
+ The algorithms implemented for methods *diagbroyden*,
116
+ *linearmixing* and *excitingmixing* may be useful for specific
117
+ problems, but whether they will work may depend strongly on the
118
+ problem.
119
+
120
+ .. versionadded:: 0.11.0
121
+
122
+ References
123
+ ----------
124
+ .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
125
+ 1980. User Guide for MINPACK-1.
126
+ .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
127
+ Equations. Society for Industrial and Applied Mathematics.
128
+ <https://archive.siam.org/books/kelley/fr16/>
129
+ .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
130
+
131
+ Examples
132
+ --------
133
+ The following functions define a system of nonlinear equations and its
134
+ jacobian.
135
+
136
+ >>> import numpy as np
137
+ >>> def fun(x):
138
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
139
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
140
+
141
+ >>> def jac(x):
142
+ ... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
143
+ ... -1.5 * (x[0] - x[1])**2],
144
+ ... [-1.5 * (x[1] - x[0])**2,
145
+ ... 1 + 1.5 * (x[1] - x[0])**2]])
146
+
147
+ A solution can be obtained as follows.
148
+
149
+ >>> from scipy import optimize
150
+ >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
151
+ >>> sol.x
152
+ array([ 0.8411639, 0.1588361])
153
+
154
+ **Large problem**
155
+
156
+ Suppose that we needed to solve the following integrodifferential
157
+ equation on the square :math:`[0,1]\times[0,1]`:
158
+
159
+ .. math::
160
+
161
+ \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
162
+
163
+ with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
164
+ the square.
165
+
166
+ The solution can be found using the ``method='krylov'`` solver:
167
+
168
+ >>> from scipy import optimize
169
+ >>> # parameters
170
+ >>> nx, ny = 75, 75
171
+ >>> hx, hy = 1./(nx-1), 1./(ny-1)
172
+
173
+ >>> P_left, P_right = 0, 0
174
+ >>> P_top, P_bottom = 1, 0
175
+
176
+ >>> def residual(P):
177
+ ... d2x = np.zeros_like(P)
178
+ ... d2y = np.zeros_like(P)
179
+ ...
180
+ ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
181
+ ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
182
+ ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
183
+ ...
184
+ ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
185
+ ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
186
+ ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
187
+ ...
188
+ ... return d2x + d2y - 10*np.cosh(P).mean()**2
189
+
190
+ >>> guess = np.zeros((nx, ny), float)
191
+ >>> sol = optimize.root(residual, guess, method='krylov')
192
+ >>> print('Residual: %g' % abs(residual(sol.x)).max())
193
+ Residual: 5.7972e-06 # may vary
194
+
195
+ >>> import matplotlib.pyplot as plt
196
+ >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
197
+ >>> plt.pcolormesh(x, y, sol.x, shading='gouraud')
198
+ >>> plt.colorbar()
199
+ >>> plt.show()
200
+
201
+ """
202
+ if not isinstance(args, tuple):
203
+ args = (args,)
204
+
205
+ meth = method.lower()
206
+ if options is None:
207
+ options = {}
208
+
209
+ if callback is not None and meth in ('hybr', 'lm'):
210
+ warn('Method %s does not accept callback.' % method,
211
+ RuntimeWarning, stacklevel=2)
212
+
213
+ # fun also returns the Jacobian
214
+ if not callable(jac) and meth in ('hybr', 'lm'):
215
+ if bool(jac):
216
+ fun = MemoizeJac(fun)
217
+ jac = fun.derivative
218
+ else:
219
+ jac = None
220
+
221
+ # set default tolerances
222
+ if tol is not None:
223
+ options = dict(options)
224
+ if meth in ('hybr', 'lm'):
225
+ options.setdefault('xtol', tol)
226
+ elif meth in ('df-sane',):
227
+ options.setdefault('ftol', tol)
228
+ elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
229
+ 'diagbroyden', 'excitingmixing', 'krylov'):
230
+ options.setdefault('xtol', tol)
231
+ options.setdefault('xatol', np.inf)
232
+ options.setdefault('ftol', np.inf)
233
+ options.setdefault('fatol', np.inf)
234
+
235
+ if meth == 'hybr':
236
+ sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
237
+ elif meth == 'lm':
238
+ sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
239
+ elif meth == 'df-sane':
240
+ _warn_jac_unused(jac, method)
241
+ sol = _root_df_sane(fun, x0, args=args, callback=callback,
242
+ **options)
243
+ elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
244
+ 'diagbroyden', 'excitingmixing', 'krylov'):
245
+ _warn_jac_unused(jac, method)
246
+ sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
247
+ _method=meth, _callback=callback,
248
+ **options)
249
+ else:
250
+ raise ValueError('Unknown solver %s' % method)
251
+
252
+ return sol
253
+
254
+
255
+ def _warn_jac_unused(jac, method):
256
+ if jac is not None:
257
+ warn(f'Method {method} does not use the jacobian (jac).',
258
+ RuntimeWarning, stacklevel=2)
259
+
260
+
261
+ def _root_leastsq(fun, x0, args=(), jac=None,
262
+ col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
263
+ gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
264
+ **unknown_options):
265
+ """
266
+ Solve for least squares with Levenberg-Marquardt
267
+
268
+ Options
269
+ -------
270
+ col_deriv : bool
271
+ non-zero to specify that the Jacobian function computes derivatives
272
+ down the columns (faster, because there is no transpose operation).
273
+ ftol : float
274
+ Relative error desired in the sum of squares.
275
+ xtol : float
276
+ Relative error desired in the approximate solution.
277
+ gtol : float
278
+ Orthogonality desired between the function vector and the columns
279
+ of the Jacobian.
280
+ maxiter : int
281
+ The maximum number of calls to the function. If zero, then
282
+ 100*(N+1) is the maximum where N is the number of elements in x0.
283
+ epsfcn : float
284
+ A suitable step length for the forward-difference approximation of
285
+ the Jacobian (for Dfun=None). If epsfcn is less than the machine
286
+ precision, it is assumed that the relative errors in the functions
287
+ are of the order of the machine precision.
288
+ factor : float
289
+ A parameter determining the initial step bound
290
+ (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
291
+ diag : sequence
292
+ N positive entries that serve as a scale factors for the variables.
293
+ """
294
+
295
+ _check_unknown_options(unknown_options)
296
+ x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac,
297
+ full_output=True,
298
+ col_deriv=col_deriv, xtol=xtol,
299
+ ftol=ftol, gtol=gtol,
300
+ maxfev=maxiter, epsfcn=eps,
301
+ factor=factor, diag=diag)
302
+ sol = OptimizeResult(x=x, message=msg, status=ier,
303
+ success=ier in (1, 2, 3, 4), cov_x=cov_x,
304
+ fun=info.pop('fvec'), method="lm")
305
+ sol.update(info)
306
+ return sol
307
+
308
+
309
+ def _root_nonlin_solve(fun, x0, args=(), jac=None,
310
+ _callback=None, _method=None,
311
+ nit=None, disp=False, maxiter=None,
312
+ ftol=None, fatol=None, xtol=None, xatol=None,
313
+ tol_norm=None, line_search='armijo', jac_options=None,
314
+ **unknown_options):
315
+ _check_unknown_options(unknown_options)
316
+
317
+ f_tol = fatol
318
+ f_rtol = ftol
319
+ x_tol = xatol
320
+ x_rtol = xtol
321
+ verbose = disp
322
+ if jac_options is None:
323
+ jac_options = dict()
324
+
325
+ jacobian = {'broyden1': nonlin.BroydenFirst,
326
+ 'broyden2': nonlin.BroydenSecond,
327
+ 'anderson': nonlin.Anderson,
328
+ 'linearmixing': nonlin.LinearMixing,
329
+ 'diagbroyden': nonlin.DiagBroyden,
330
+ 'excitingmixing': nonlin.ExcitingMixing,
331
+ 'krylov': nonlin.KrylovJacobian
332
+ }[_method]
333
+
334
+ if args:
335
+ if jac is True:
336
+ def f(x):
337
+ return fun(x, *args)[0]
338
+ else:
339
+ def f(x):
340
+ return fun(x, *args)
341
+ else:
342
+ f = fun
343
+
344
+ x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
345
+ iter=nit, verbose=verbose,
346
+ maxiter=maxiter, f_tol=f_tol,
347
+ f_rtol=f_rtol, x_tol=x_tol,
348
+ x_rtol=x_rtol, tol_norm=tol_norm,
349
+ line_search=line_search,
350
+ callback=_callback, full_output=True,
351
+ raise_exception=False)
352
+ sol = OptimizeResult(x=x, method=_method)
353
+ sol.update(info)
354
+ return sol
355
+
356
+ def _root_broyden1_doc():
357
+ """
358
+ Options
359
+ -------
360
+ nit : int, optional
361
+ Number of iterations to make. If omitted (default), make as many
362
+ as required to meet tolerances.
363
+ disp : bool, optional
364
+ Print status to stdout on every iteration.
365
+ maxiter : int, optional
366
+ Maximum number of iterations to make.
367
+ ftol : float, optional
368
+ Relative tolerance for the residual. If omitted, not used.
369
+ fatol : float, optional
370
+ Absolute tolerance (in max-norm) for the residual.
371
+ If omitted, default is 6e-6.
372
+ xtol : float, optional
373
+ Relative minimum step size. If omitted, not used.
374
+ xatol : float, optional
375
+ Absolute minimum step size, as determined from the Jacobian
376
+ approximation. If the step size is smaller than this, optimization
377
+ is terminated as successful. If omitted, not used.
378
+ tol_norm : function(vector) -> scalar, optional
379
+ Norm to use in convergence check. Default is the maximum norm.
380
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
381
+ Which type of a line search to use to determine the step size in
382
+ the direction given by the Jacobian approximation. Defaults to
383
+ 'armijo'.
384
+ jac_options : dict, optional
385
+ Options for the respective Jacobian approximation.
386
+ alpha : float, optional
387
+ Initial guess for the Jacobian is (-1/alpha).
388
+ reduction_method : str or tuple, optional
389
+ Method used in ensuring that the rank of the Broyden
390
+ matrix stays low. Can either be a string giving the
391
+ name of the method, or a tuple of the form ``(method,
392
+ param1, param2, ...)`` that gives the name of the
393
+ method and values for additional parameters.
394
+
395
+ Methods available:
396
+
397
+ - ``restart``
398
+ Drop all matrix columns. Has no
399
+ extra parameters.
400
+ - ``simple``
401
+ Drop oldest matrix column. Has no
402
+ extra parameters.
403
+ - ``svd``
404
+ Keep only the most significant SVD
405
+ components.
406
+
407
+ Extra parameters:
408
+
409
+ - ``to_retain``
410
+ Number of SVD components to
411
+ retain when rank reduction is done.
412
+ Default is ``max_rank - 2``.
413
+ max_rank : int, optional
414
+ Maximum rank for the Broyden matrix.
415
+ Default is infinity (i.e., no rank reduction).
416
+
417
+ Examples
418
+ --------
419
+ >>> def func(x):
420
+ ... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
421
+ ...
422
+ >>> from scipy import optimize
423
+ >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14)
424
+ >>> x = res.x
425
+ >>> x
426
+ array([4.04674914, 3.91158389, 2.71791677, 1.61756251])
427
+ >>> np.cos(x) + x[::-1]
428
+ array([1., 2., 3., 4.])
429
+
430
+ """
431
+ pass
432
+
433
+ def _root_broyden2_doc():
434
+ """
435
+ Options
436
+ -------
437
+ nit : int, optional
438
+ Number of iterations to make. If omitted (default), make as many
439
+ as required to meet tolerances.
440
+ disp : bool, optional
441
+ Print status to stdout on every iteration.
442
+ maxiter : int, optional
443
+ Maximum number of iterations to make.
444
+ ftol : float, optional
445
+ Relative tolerance for the residual. If omitted, not used.
446
+ fatol : float, optional
447
+ Absolute tolerance (in max-norm) for the residual.
448
+ If omitted, default is 6e-6.
449
+ xtol : float, optional
450
+ Relative minimum step size. If omitted, not used.
451
+ xatol : float, optional
452
+ Absolute minimum step size, as determined from the Jacobian
453
+ approximation. If the step size is smaller than this, optimization
454
+ is terminated as successful. If omitted, not used.
455
+ tol_norm : function(vector) -> scalar, optional
456
+ Norm to use in convergence check. Default is the maximum norm.
457
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
458
+ Which type of a line search to use to determine the step size in
459
+ the direction given by the Jacobian approximation. Defaults to
460
+ 'armijo'.
461
+ jac_options : dict, optional
462
+ Options for the respective Jacobian approximation.
463
+
464
+ alpha : float, optional
465
+ Initial guess for the Jacobian is (-1/alpha).
466
+ reduction_method : str or tuple, optional
467
+ Method used in ensuring that the rank of the Broyden
468
+ matrix stays low. Can either be a string giving the
469
+ name of the method, or a tuple of the form ``(method,
470
+ param1, param2, ...)`` that gives the name of the
471
+ method and values for additional parameters.
472
+
473
+ Methods available:
474
+
475
+ - ``restart``
476
+ Drop all matrix columns. Has no
477
+ extra parameters.
478
+ - ``simple``
479
+ Drop oldest matrix column. Has no
480
+ extra parameters.
481
+ - ``svd``
482
+ Keep only the most significant SVD
483
+ components.
484
+
485
+ Extra parameters:
486
+
487
+ - ``to_retain``
488
+ Number of SVD components to
489
+ retain when rank reduction is done.
490
+ Default is ``max_rank - 2``.
491
+ max_rank : int, optional
492
+ Maximum rank for the Broyden matrix.
493
+ Default is infinity (i.e., no rank reduction).
494
+ """
495
+ pass
496
+
497
+ def _root_anderson_doc():
498
+ """
499
+ Options
500
+ -------
501
+ nit : int, optional
502
+ Number of iterations to make. If omitted (default), make as many
503
+ as required to meet tolerances.
504
+ disp : bool, optional
505
+ Print status to stdout on every iteration.
506
+ maxiter : int, optional
507
+ Maximum number of iterations to make.
508
+ ftol : float, optional
509
+ Relative tolerance for the residual. If omitted, not used.
510
+ fatol : float, optional
511
+ Absolute tolerance (in max-norm) for the residual.
512
+ If omitted, default is 6e-6.
513
+ xtol : float, optional
514
+ Relative minimum step size. If omitted, not used.
515
+ xatol : float, optional
516
+ Absolute minimum step size, as determined from the Jacobian
517
+ approximation. If the step size is smaller than this, optimization
518
+ is terminated as successful. If omitted, not used.
519
+ tol_norm : function(vector) -> scalar, optional
520
+ Norm to use in convergence check. Default is the maximum norm.
521
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
522
+ Which type of a line search to use to determine the step size in
523
+ the direction given by the Jacobian approximation. Defaults to
524
+ 'armijo'.
525
+ jac_options : dict, optional
526
+ Options for the respective Jacobian approximation.
527
+
528
+ alpha : float, optional
529
+ Initial guess for the Jacobian is (-1/alpha).
530
+ M : float, optional
531
+ Number of previous vectors to retain. Defaults to 5.
532
+ w0 : float, optional
533
+ Regularization parameter for numerical stability.
534
+ Compared to unity, good values of the order of 0.01.
535
+ """
536
+ pass
537
+
538
+ def _root_linearmixing_doc():
539
+ """
540
+ Options
541
+ -------
542
+ nit : int, optional
543
+ Number of iterations to make. If omitted (default), make as many
544
+ as required to meet tolerances.
545
+ disp : bool, optional
546
+ Print status to stdout on every iteration.
547
+ maxiter : int, optional
548
+ Maximum number of iterations to make.
549
+ ftol : float, optional
550
+ Relative tolerance for the residual. If omitted, not used.
551
+ fatol : float, optional
552
+ Absolute tolerance (in max-norm) for the residual.
553
+ If omitted, default is 6e-6.
554
+ xtol : float, optional
555
+ Relative minimum step size. If omitted, not used.
556
+ xatol : float, optional
557
+ Absolute minimum step size, as determined from the Jacobian
558
+ approximation. If the step size is smaller than this, optimization
559
+ is terminated as successful. If omitted, not used.
560
+ tol_norm : function(vector) -> scalar, optional
561
+ Norm to use in convergence check. Default is the maximum norm.
562
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
563
+ Which type of a line search to use to determine the step size in
564
+ the direction given by the Jacobian approximation. Defaults to
565
+ 'armijo'.
566
+ jac_options : dict, optional
567
+ Options for the respective Jacobian approximation.
568
+
569
+ alpha : float, optional
570
+ initial guess for the jacobian is (-1/alpha).
571
+ """
572
+ pass
573
+
574
+ def _root_diagbroyden_doc():
575
+ """
576
+ Options
577
+ -------
578
+ nit : int, optional
579
+ Number of iterations to make. If omitted (default), make as many
580
+ as required to meet tolerances.
581
+ disp : bool, optional
582
+ Print status to stdout on every iteration.
583
+ maxiter : int, optional
584
+ Maximum number of iterations to make.
585
+ ftol : float, optional
586
+ Relative tolerance for the residual. If omitted, not used.
587
+ fatol : float, optional
588
+ Absolute tolerance (in max-norm) for the residual.
589
+ If omitted, default is 6e-6.
590
+ xtol : float, optional
591
+ Relative minimum step size. If omitted, not used.
592
+ xatol : float, optional
593
+ Absolute minimum step size, as determined from the Jacobian
594
+ approximation. If the step size is smaller than this, optimization
595
+ is terminated as successful. If omitted, not used.
596
+ tol_norm : function(vector) -> scalar, optional
597
+ Norm to use in convergence check. Default is the maximum norm.
598
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
599
+ Which type of a line search to use to determine the step size in
600
+ the direction given by the Jacobian approximation. Defaults to
601
+ 'armijo'.
602
+ jac_options : dict, optional
603
+ Options for the respective Jacobian approximation.
604
+
605
+ alpha : float, optional
606
+ initial guess for the jacobian is (-1/alpha).
607
+ """
608
+ pass
609
+
610
+ def _root_excitingmixing_doc():
611
+ """
612
+ Options
613
+ -------
614
+ nit : int, optional
615
+ Number of iterations to make. If omitted (default), make as many
616
+ as required to meet tolerances.
617
+ disp : bool, optional
618
+ Print status to stdout on every iteration.
619
+ maxiter : int, optional
620
+ Maximum number of iterations to make.
621
+ ftol : float, optional
622
+ Relative tolerance for the residual. If omitted, not used.
623
+ fatol : float, optional
624
+ Absolute tolerance (in max-norm) for the residual.
625
+ If omitted, default is 6e-6.
626
+ xtol : float, optional
627
+ Relative minimum step size. If omitted, not used.
628
+ xatol : float, optional
629
+ Absolute minimum step size, as determined from the Jacobian
630
+ approximation. If the step size is smaller than this, optimization
631
+ is terminated as successful. If omitted, not used.
632
+ tol_norm : function(vector) -> scalar, optional
633
+ Norm to use in convergence check. Default is the maximum norm.
634
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
635
+ Which type of a line search to use to determine the step size in
636
+ the direction given by the Jacobian approximation. Defaults to
637
+ 'armijo'.
638
+ jac_options : dict, optional
639
+ Options for the respective Jacobian approximation.
640
+
641
+ alpha : float, optional
642
+ Initial Jacobian approximation is (-1/alpha).
643
+ alphamax : float, optional
644
+ The entries of the diagonal Jacobian are kept in the range
645
+ ``[alpha, alphamax]``.
646
+ """
647
+ pass
648
+
649
+ def _root_krylov_doc():
650
+ """
651
+ Options
652
+ -------
653
+ nit : int, optional
654
+ Number of iterations to make. If omitted (default), make as many
655
+ as required to meet tolerances.
656
+ disp : bool, optional
657
+ Print status to stdout on every iteration.
658
+ maxiter : int, optional
659
+ Maximum number of iterations to make.
660
+ ftol : float, optional
661
+ Relative tolerance for the residual. If omitted, not used.
662
+ fatol : float, optional
663
+ Absolute tolerance (in max-norm) for the residual.
664
+ If omitted, default is 6e-6.
665
+ xtol : float, optional
666
+ Relative minimum step size. If omitted, not used.
667
+ xatol : float, optional
668
+ Absolute minimum step size, as determined from the Jacobian
669
+ approximation. If the step size is smaller than this, optimization
670
+ is terminated as successful. If omitted, not used.
671
+ tol_norm : function(vector) -> scalar, optional
672
+ Norm to use in convergence check. Default is the maximum norm.
673
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
674
+ Which type of a line search to use to determine the step size in
675
+ the direction given by the Jacobian approximation. Defaults to
676
+ 'armijo'.
677
+ jac_options : dict, optional
678
+ Options for the respective Jacobian approximation.
679
+
680
+ rdiff : float, optional
681
+ Relative step size to use in numerical differentiation.
682
+ method : str or callable, optional
683
+ Krylov method to use to approximate the Jacobian. Can be a string,
684
+ or a function implementing the same interface as the iterative
685
+ solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
686
+ ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
687
+ ``'tfqmr'``.
688
+
689
+ The default is `scipy.sparse.linalg.lgmres`.
690
+ inner_M : LinearOperator or InverseJacobian
691
+ Preconditioner for the inner Krylov iteration.
692
+ Note that you can use also inverse Jacobians as (adaptive)
693
+ preconditioners. For example,
694
+
695
+ >>> jac = BroydenFirst()
696
+ >>> kjac = KrylovJacobian(inner_M=jac.inverse).
697
+
698
+ If the preconditioner has a method named 'update', it will
699
+ be called as ``update(x, f)`` after each nonlinear step,
700
+ with ``x`` giving the current point, and ``f`` the current
701
+ function value.
702
+ inner_tol, inner_maxiter, ...
703
+ Parameters to pass on to the "inner" Krylov solver.
704
+ See `scipy.sparse.linalg.gmres` for details.
705
+ outer_k : int, optional
706
+ Size of the subspace kept across LGMRES nonlinear
707
+ iterations.
708
+
709
+ See `scipy.sparse.linalg.lgmres` for details.
710
+ """
711
+ pass
venv/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to root finding algorithms for real or complex
3
+ scalar functions.
4
+
5
+ Functions
6
+ ---------
7
+ - root : find a root of a scalar function.
8
+ """
9
+ import numpy as np
10
+
11
+ from . import _zeros_py as optzeros
12
+ from ._numdiff import approx_derivative
13
+
14
+ __all__ = ['root_scalar']
15
+
16
+ ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748',
17
+ 'newton', 'secant', 'halley']
18
+
19
+
20
+ class MemoizeDer:
21
+ """Decorator that caches the value and derivative(s) of function each
22
+ time it is called.
23
+
24
+ This is a simplistic memoizer that calls and caches a single value
25
+ of `f(x, *args)`.
26
+ It assumes that `args` does not change between invocations.
27
+ It supports the use case of a root-finder where `args` is fixed,
28
+ `x` changes, and only rarely, if at all, does x assume the same value
29
+ more than once."""
30
+ def __init__(self, fun):
31
+ self.fun = fun
32
+ self.vals = None
33
+ self.x = None
34
+ self.n_calls = 0
35
+
36
+ def __call__(self, x, *args):
37
+ r"""Calculate f or use cached value if available"""
38
+ # Derivative may be requested before the function itself, always check
39
+ if self.vals is None or x != self.x:
40
+ fg = self.fun(x, *args)
41
+ self.x = x
42
+ self.n_calls += 1
43
+ self.vals = fg[:]
44
+ return self.vals[0]
45
+
46
+ def fprime(self, x, *args):
47
+ r"""Calculate f' or use a cached value if available"""
48
+ if self.vals is None or x != self.x:
49
+ self(x, *args)
50
+ return self.vals[1]
51
+
52
+ def fprime2(self, x, *args):
53
+ r"""Calculate f'' or use a cached value if available"""
54
+ if self.vals is None or x != self.x:
55
+ self(x, *args)
56
+ return self.vals[2]
57
+
58
+ def ncalls(self):
59
+ return self.n_calls
60
+
61
+
62
+ def root_scalar(f, args=(), method=None, bracket=None,
63
+ fprime=None, fprime2=None,
64
+ x0=None, x1=None,
65
+ xtol=None, rtol=None, maxiter=None,
66
+ options=None):
67
+ """
68
+ Find a root of a scalar function.
69
+
70
+ Parameters
71
+ ----------
72
+ f : callable
73
+ A function to find a root of.
74
+ args : tuple, optional
75
+ Extra arguments passed to the objective function and its derivative(s).
76
+ method : str, optional
77
+ Type of solver. Should be one of
78
+
79
+ - 'bisect' :ref:`(see here) <optimize.root_scalar-bisect>`
80
+ - 'brentq' :ref:`(see here) <optimize.root_scalar-brentq>`
81
+ - 'brenth' :ref:`(see here) <optimize.root_scalar-brenth>`
82
+ - 'ridder' :ref:`(see here) <optimize.root_scalar-ridder>`
83
+ - 'toms748' :ref:`(see here) <optimize.root_scalar-toms748>`
84
+ - 'newton' :ref:`(see here) <optimize.root_scalar-newton>`
85
+ - 'secant' :ref:`(see here) <optimize.root_scalar-secant>`
86
+ - 'halley' :ref:`(see here) <optimize.root_scalar-halley>`
87
+
88
+ bracket: A sequence of 2 floats, optional
89
+ An interval bracketing a root. `f(x, *args)` must have different
90
+ signs at the two endpoints.
91
+ x0 : float, optional
92
+ Initial guess.
93
+ x1 : float, optional
94
+ A second guess.
95
+ fprime : bool or callable, optional
96
+ If `fprime` is a boolean and is True, `f` is assumed to return the
97
+ value of the objective function and of the derivative.
98
+ `fprime` can also be a callable returning the derivative of `f`. In
99
+ this case, it must accept the same arguments as `f`.
100
+ fprime2 : bool or callable, optional
101
+ If `fprime2` is a boolean and is True, `f` is assumed to return the
102
+ value of the objective function and of the
103
+ first and second derivatives.
104
+ `fprime2` can also be a callable returning the second derivative of `f`.
105
+ In this case, it must accept the same arguments as `f`.
106
+ xtol : float, optional
107
+ Tolerance (absolute) for termination.
108
+ rtol : float, optional
109
+ Tolerance (relative) for termination.
110
+ maxiter : int, optional
111
+ Maximum number of iterations.
112
+ options : dict, optional
113
+ A dictionary of solver options. E.g., ``k``, see
114
+ :obj:`show_options()` for details.
115
+
116
+ Returns
117
+ -------
118
+ sol : RootResults
119
+ The solution represented as a ``RootResults`` object.
120
+ Important attributes are: ``root`` the solution , ``converged`` a
121
+ boolean flag indicating if the algorithm exited successfully and
122
+ ``flag`` which describes the cause of the termination. See
123
+ `RootResults` for a description of other attributes.
124
+
125
+ See also
126
+ --------
127
+ show_options : Additional options accepted by the solvers
128
+ root : Find a root of a vector function.
129
+
130
+ Notes
131
+ -----
132
+ This section describes the available solvers that can be selected by the
133
+ 'method' parameter.
134
+
135
+ The default is to use the best method available for the situation
136
+ presented.
137
+ If a bracket is provided, it may use one of the bracketing methods.
138
+ If a derivative and an initial value are specified, it may
139
+ select one of the derivative-based methods.
140
+ If no method is judged applicable, it will raise an Exception.
141
+
142
+ Arguments for each method are as follows (x=required, o=optional).
143
+
144
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
145
+ | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options |
146
+ +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+
147
+ | :ref:`bisect <optimize.root_scalar-bisect>` | x | o | x | | | | | o | o | o | o |
148
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
149
+ | :ref:`brentq <optimize.root_scalar-brentq>` | x | o | x | | | | | o | o | o | o |
150
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
151
+ | :ref:`brenth <optimize.root_scalar-brenth>` | x | o | x | | | | | o | o | o | o |
152
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
153
+ | :ref:`ridder <optimize.root_scalar-ridder>` | x | o | x | | | | | o | o | o | o |
154
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
155
+ | :ref:`toms748 <optimize.root_scalar-toms748>` | x | o | x | | | | | o | o | o | o |
156
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
157
+ | :ref:`secant <optimize.root_scalar-secant>` | x | o | | x | o | | | o | o | o | o |
158
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
159
+ | :ref:`newton <optimize.root_scalar-newton>` | x | o | | x | | o | | o | o | o | o |
160
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
161
+ | :ref:`halley <optimize.root_scalar-halley>` | x | o | | x | | x | x | o | o | o | o |
162
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
163
+
164
+ Examples
165
+ --------
166
+
167
+ Find the root of a simple cubic
168
+
169
+ >>> from scipy import optimize
170
+ >>> def f(x):
171
+ ... return (x**3 - 1) # only one real root at x = 1
172
+
173
+ >>> def fprime(x):
174
+ ... return 3*x**2
175
+
176
+ The `brentq` method takes as input a bracket
177
+
178
+ >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq')
179
+ >>> sol.root, sol.iterations, sol.function_calls
180
+ (1.0, 10, 11)
181
+
182
+ The `newton` method takes as input a single point and uses the
183
+ derivative(s).
184
+
185
+ >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton')
186
+ >>> sol.root, sol.iterations, sol.function_calls
187
+ (1.0, 11, 22)
188
+
189
+ The function can provide the value and derivative(s) in a single call.
190
+
191
+ >>> def f_p_pp(x):
192
+ ... return (x**3 - 1), 3*x**2, 6*x
193
+
194
+ >>> sol = optimize.root_scalar(
195
+ ... f_p_pp, x0=0.2, fprime=True, method='newton'
196
+ ... )
197
+ >>> sol.root, sol.iterations, sol.function_calls
198
+ (1.0, 11, 11)
199
+
200
+ >>> sol = optimize.root_scalar(
201
+ ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley'
202
+ ... )
203
+ >>> sol.root, sol.iterations, sol.function_calls
204
+ (1.0, 7, 8)
205
+
206
+
207
+ """ # noqa: E501
208
+ if not isinstance(args, tuple):
209
+ args = (args,)
210
+
211
+ if options is None:
212
+ options = {}
213
+
214
+ # fun also returns the derivative(s)
215
+ is_memoized = False
216
+ if fprime2 is not None and not callable(fprime2):
217
+ if bool(fprime2):
218
+ f = MemoizeDer(f)
219
+ is_memoized = True
220
+ fprime2 = f.fprime2
221
+ fprime = f.fprime
222
+ else:
223
+ fprime2 = None
224
+ if fprime is not None and not callable(fprime):
225
+ if bool(fprime):
226
+ f = MemoizeDer(f)
227
+ is_memoized = True
228
+ fprime = f.fprime
229
+ else:
230
+ fprime = None
231
+
232
+ # respect solver-specific default tolerances - only pass in if actually set
233
+ kwargs = {}
234
+ for k in ['xtol', 'rtol', 'maxiter']:
235
+ v = locals().get(k)
236
+ if v is not None:
237
+ kwargs[k] = v
238
+
239
+ # Set any solver-specific options
240
+ if options:
241
+ kwargs.update(options)
242
+ # Always request full_output from the underlying method as _root_scalar
243
+ # always returns a RootResults object
244
+ kwargs.update(full_output=True, disp=False)
245
+
246
+ # Pick a method if not specified.
247
+ # Use the "best" method available for the situation.
248
+ if not method:
249
+ if bracket:
250
+ method = 'brentq'
251
+ elif x0 is not None:
252
+ if fprime:
253
+ if fprime2:
254
+ method = 'halley'
255
+ else:
256
+ method = 'newton'
257
+ elif x1 is not None:
258
+ method = 'secant'
259
+ else:
260
+ method = 'newton'
261
+ if not method:
262
+ raise ValueError('Unable to select a solver as neither bracket '
263
+ 'nor starting point provided.')
264
+
265
+ meth = method.lower()
266
+ map2underlying = {'halley': 'newton', 'secant': 'newton'}
267
+
268
+ try:
269
+ methodc = getattr(optzeros, map2underlying.get(meth, meth))
270
+ except AttributeError as e:
271
+ raise ValueError('Unknown solver %s' % meth) from e
272
+
273
+ if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']:
274
+ if not isinstance(bracket, (list, tuple, np.ndarray)):
275
+ raise ValueError('Bracket needed for %s' % method)
276
+
277
+ a, b = bracket[:2]
278
+ try:
279
+ r, sol = methodc(f, a, b, args=args, **kwargs)
280
+ except ValueError as e:
281
+ # gh-17622 fixed some bugs in low-level solvers by raising an error
282
+ # (rather than returning incorrect results) when the callable
283
+ # returns a NaN. It did so by wrapping the callable rather than
284
+ # modifying compiled code, so the iteration count is not available.
285
+ if hasattr(e, "_x"):
286
+ sol = optzeros.RootResults(root=e._x,
287
+ iterations=np.nan,
288
+ function_calls=e._function_calls,
289
+ flag=str(e), method=method)
290
+ else:
291
+ raise
292
+
293
+ elif meth in ['secant']:
294
+ if x0 is None:
295
+ raise ValueError('x0 must not be None for %s' % method)
296
+ if 'xtol' in kwargs:
297
+ kwargs['tol'] = kwargs.pop('xtol')
298
+ r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None,
299
+ x1=x1, **kwargs)
300
+ elif meth in ['newton']:
301
+ if x0 is None:
302
+ raise ValueError('x0 must not be None for %s' % method)
303
+ if not fprime:
304
+ # approximate fprime with finite differences
305
+
306
+ def fprime(x, *args):
307
+ # `root_scalar` doesn't actually seem to support vectorized
308
+ # use of `newton`. In that case, `approx_derivative` will
309
+ # always get scalar input. Nonetheless, it always returns an
310
+ # array, so we extract the element to produce scalar output.
311
+ return approx_derivative(f, x, method='2-point', args=args)[0]
312
+
313
+ if 'xtol' in kwargs:
314
+ kwargs['tol'] = kwargs.pop('xtol')
315
+ r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None,
316
+ **kwargs)
317
+ elif meth in ['halley']:
318
+ if x0 is None:
319
+ raise ValueError('x0 must not be None for %s' % method)
320
+ if not fprime:
321
+ raise ValueError('fprime must be specified for %s' % method)
322
+ if not fprime2:
323
+ raise ValueError('fprime2 must be specified for %s' % method)
324
+ if 'xtol' in kwargs:
325
+ kwargs['tol'] = kwargs.pop('xtol')
326
+ r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs)
327
+ else:
328
+ raise ValueError('Unknown solver %s' % method)
329
+
330
+ if is_memoized:
331
+ # Replace the function_calls count with the memoized count.
332
+ # Avoids double and triple-counting.
333
+ n_calls = f.n_calls
334
+ sol.function_calls = n_calls
335
+
336
+ return sol
337
+
338
+
339
+ def _root_scalar_brentq_doc():
340
+ r"""
341
+ Options
342
+ -------
343
+ args : tuple, optional
344
+ Extra arguments passed to the objective function.
345
+ bracket: A sequence of 2 floats, optional
346
+ An interval bracketing a root. `f(x, *args)` must have different
347
+ signs at the two endpoints.
348
+ xtol : float, optional
349
+ Tolerance (absolute) for termination.
350
+ rtol : float, optional
351
+ Tolerance (relative) for termination.
352
+ maxiter : int, optional
353
+ Maximum number of iterations.
354
+ options: dict, optional
355
+ Specifies any method-specific options not covered above
356
+
357
+ """
358
+ pass
359
+
360
+
361
+ def _root_scalar_brenth_doc():
362
+ r"""
363
+ Options
364
+ -------
365
+ args : tuple, optional
366
+ Extra arguments passed to the objective function.
367
+ bracket: A sequence of 2 floats, optional
368
+ An interval bracketing a root. `f(x, *args)` must have different
369
+ signs at the two endpoints.
370
+ xtol : float, optional
371
+ Tolerance (absolute) for termination.
372
+ rtol : float, optional
373
+ Tolerance (relative) for termination.
374
+ maxiter : int, optional
375
+ Maximum number of iterations.
376
+ options: dict, optional
377
+ Specifies any method-specific options not covered above.
378
+
379
+ """
380
+ pass
381
+
382
+ def _root_scalar_toms748_doc():
383
+ r"""
384
+ Options
385
+ -------
386
+ args : tuple, optional
387
+ Extra arguments passed to the objective function.
388
+ bracket: A sequence of 2 floats, optional
389
+ An interval bracketing a root. `f(x, *args)` must have different
390
+ signs at the two endpoints.
391
+ xtol : float, optional
392
+ Tolerance (absolute) for termination.
393
+ rtol : float, optional
394
+ Tolerance (relative) for termination.
395
+ maxiter : int, optional
396
+ Maximum number of iterations.
397
+ options: dict, optional
398
+ Specifies any method-specific options not covered above.
399
+
400
+ """
401
+ pass
402
+
403
+
404
+ def _root_scalar_secant_doc():
405
+ r"""
406
+ Options
407
+ -------
408
+ args : tuple, optional
409
+ Extra arguments passed to the objective function.
410
+ xtol : float, optional
411
+ Tolerance (absolute) for termination.
412
+ rtol : float, optional
413
+ Tolerance (relative) for termination.
414
+ maxiter : int, optional
415
+ Maximum number of iterations.
416
+ x0 : float, required
417
+ Initial guess.
418
+ x1 : float, required
419
+ A second guess.
420
+ options: dict, optional
421
+ Specifies any method-specific options not covered above.
422
+
423
+ """
424
+ pass
425
+
426
+
427
+ def _root_scalar_newton_doc():
428
+ r"""
429
+ Options
430
+ -------
431
+ args : tuple, optional
432
+ Extra arguments passed to the objective function and its derivative.
433
+ xtol : float, optional
434
+ Tolerance (absolute) for termination.
435
+ rtol : float, optional
436
+ Tolerance (relative) for termination.
437
+ maxiter : int, optional
438
+ Maximum number of iterations.
439
+ x0 : float, required
440
+ Initial guess.
441
+ fprime : bool or callable, optional
442
+ If `fprime` is a boolean and is True, `f` is assumed to return the
443
+ value of derivative along with the objective function.
444
+ `fprime` can also be a callable returning the derivative of `f`. In
445
+ this case, it must accept the same arguments as `f`.
446
+ options: dict, optional
447
+ Specifies any method-specific options not covered above.
448
+
449
+ """
450
+ pass
451
+
452
+
453
+ def _root_scalar_halley_doc():
454
+ r"""
455
+ Options
456
+ -------
457
+ args : tuple, optional
458
+ Extra arguments passed to the objective function and its derivatives.
459
+ xtol : float, optional
460
+ Tolerance (absolute) for termination.
461
+ rtol : float, optional
462
+ Tolerance (relative) for termination.
463
+ maxiter : int, optional
464
+ Maximum number of iterations.
465
+ x0 : float, required
466
+ Initial guess.
467
+ fprime : bool or callable, required
468
+ If `fprime` is a boolean and is True, `f` is assumed to return the
469
+ value of derivative along with the objective function.
470
+ `fprime` can also be a callable returning the derivative of `f`. In
471
+ this case, it must accept the same arguments as `f`.
472
+ fprime2 : bool or callable, required
473
+ If `fprime2` is a boolean and is True, `f` is assumed to return the
474
+ value of 1st and 2nd derivatives along with the objective function.
475
+ `fprime2` can also be a callable returning the 2nd derivative of `f`.
476
+ In this case, it must accept the same arguments as `f`.
477
+ options: dict, optional
478
+ Specifies any method-specific options not covered above.
479
+
480
+ """
481
+ pass
482
+
483
+
484
+ def _root_scalar_ridder_doc():
485
+ r"""
486
+ Options
487
+ -------
488
+ args : tuple, optional
489
+ Extra arguments passed to the objective function.
490
+ bracket: A sequence of 2 floats, optional
491
+ An interval bracketing a root. `f(x, *args)` must have different
492
+ signs at the two endpoints.
493
+ xtol : float, optional
494
+ Tolerance (absolute) for termination.
495
+ rtol : float, optional
496
+ Tolerance (relative) for termination.
497
+ maxiter : int, optional
498
+ Maximum number of iterations.
499
+ options: dict, optional
500
+ Specifies any method-specific options not covered above.
501
+
502
+ """
503
+ pass
504
+
505
+
506
+ def _root_scalar_bisect_doc():
507
+ r"""
508
+ Options
509
+ -------
510
+ args : tuple, optional
511
+ Extra arguments passed to the objective function.
512
+ bracket: A sequence of 2 floats, optional
513
+ An interval bracketing a root. `f(x, *args)` must have different
514
+ signs at the two endpoints.
515
+ xtol : float, optional
516
+ Tolerance (absolute) for termination.
517
+ rtol : float, optional
518
+ Tolerance (relative) for termination.
519
+ maxiter : int, optional
520
+ Maximum number of iterations.
521
+ options: dict, optional
522
+ Specifies any method-specific options not covered above.
523
+
524
+ """
525
+ pass