applied-ai-018 commited on
Commit
e266372
·
verified ·
1 Parent(s): 6fdfda2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so +0 -0
  2. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bracket.py +663 -0
  3. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py +524 -0
  4. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so +0 -0
  5. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py +316 -0
  6. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_constraints.py +590 -0
  7. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiate.py +669 -0
  8. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py +0 -0
  9. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so +0 -0
  11. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd +106 -0
  12. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd +56 -0
  13. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd +20 -0
  14. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd +22 -0
  15. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd +46 -0
  16. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd +9 -0
  17. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd +10 -0
  18. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd +110 -0
  19. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd +9 -0
  20. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd +12 -0
  21. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd +95 -0
  22. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd +7 -0
  23. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so +0 -0
  24. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py +543 -0
  25. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linesearch.py +897 -0
  26. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog.py +714 -0
  27. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py +1434 -0
  28. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py +440 -0
  29. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py +661 -0
  30. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py +1522 -0
  31. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minimize.py +1094 -0
  32. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so +0 -0
  33. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py +1157 -0
  34. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_numdiff.py +775 -0
  35. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_optimize.py +0 -0
  36. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  37. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root.py +711 -0
  38. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py +525 -0
  39. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo.py +1595 -0
  40. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py +0 -0
  41. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py +1225 -0
  45. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py +460 -0
  46. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so +0 -0
  47. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py +513 -0
  48. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_spectral.py +260 -0
  49. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tnc.py +430 -0
  50. llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py +12 -0
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (364 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bracket.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy._lib._elementwise_iterative_method as eim
3
+ from scipy._lib._util import _RichResult
4
+
5
+ _ELIMITS = -1 # used in _bracket_root
6
+ _ESTOPONESIDE = 2 # used in _bracket_root
7
+
8
+ def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter):
9
+
10
+ if not callable(func):
11
+ raise ValueError('`func` must be callable.')
12
+
13
+ if not np.iterable(args):
14
+ args = (args,)
15
+
16
+ xl0 = np.asarray(xl0)[()]
17
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
18
+ raise ValueError('`xl0` must be numeric and real.')
19
+
20
+ xr0 = xl0 + 1 if xr0 is None else xr0
21
+ xmin = -np.inf if xmin is None else xmin
22
+ xmax = np.inf if xmax is None else xmax
23
+ factor = 2. if factor is None else factor
24
+ xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor)
25
+
26
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
27
+ raise ValueError('`xr0` must be numeric and real.')
28
+
29
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
30
+ raise ValueError('`xmin` must be numeric and real.')
31
+
32
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
33
+ raise ValueError('`xmax` must be numeric and real.')
34
+
35
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
36
+ raise ValueError('`factor` must be numeric and real.')
37
+ if not np.all(factor > 1):
38
+ raise ValueError('All elements of `factor` must be greater than 1.')
39
+
40
+ maxiter = np.asarray(maxiter)
41
+ message = '`maxiter` must be a non-negative integer.'
42
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
43
+ or np.iscomplex(maxiter)):
44
+ raise ValueError(message)
45
+ maxiter_int = int(maxiter[()])
46
+ if not maxiter == maxiter_int or maxiter < 0:
47
+ raise ValueError(message)
48
+
49
+ if not np.all((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)):
50
+ raise ValueError('`xmin <= xl0 < xr0 <= xmax` must be True (elementwise).')
51
+
52
+ return func, xl0, xr0, xmin, xmax, factor, args, maxiter
53
+
54
+
55
+ def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None,
56
+ args=(), maxiter=1000):
57
+ """Bracket the root of a monotonic scalar function of one variable
58
+
59
+ This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and
60
+ the elements of `args` are broadcastable arrays.
61
+
62
+ Parameters
63
+ ----------
64
+ func : callable
65
+ The function for which the root is to be bracketed.
66
+ The signature must be::
67
+
68
+ func(x: ndarray, *args) -> ndarray
69
+
70
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
71
+ which may contain an arbitrary number of arrays that are broadcastable
72
+ with `x`. ``func`` must be an elementwise function: each element
73
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
74
+ xl0, xr0: float array_like
75
+ Starting guess of bracket, which need not contain a root. If `xr0` is
76
+ not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another.
77
+ xmin, xmax : float array_like, optional
78
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
79
+ be broadcastable with `xl0` and `xr0`.
80
+ factor : float array_like, default: 2
81
+ The factor used to grow the bracket. See notes for details.
82
+ args : tuple, optional
83
+ Additional positional arguments to be passed to `func`. Must be arrays
84
+ broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be
85
+ bracketed requires arguments that are not broadcastable with these
86
+ arrays, wrap that callable with `func` such that `func` accepts
87
+ only `x` and broadcastable arrays.
88
+ maxiter : int, optional
89
+ The maximum number of iterations of the algorithm to perform.
90
+
91
+ Returns
92
+ -------
93
+ res : _RichResult
94
+ An instance of `scipy._lib._util._RichResult` with the following
95
+ attributes. The descriptions are written as though the values will be
96
+ scalars; however, if `func` returns an array, the outputs will be
97
+ arrays of the same shape.
98
+
99
+ xl, xr : float
100
+ The lower and upper ends of the bracket, if the algorithm
101
+ terminated successfully.
102
+ fl, fr : float
103
+ The function value at the lower and upper ends of the bracket.
104
+ nfev : int
105
+ The number of function evaluations required to find the bracket.
106
+ This is distinct from the number of times `func` is *called*
107
+ because the function may evaluated at multiple points in a single
108
+ call.
109
+ nit : int
110
+ The number of iterations of the algorithm that were performed.
111
+ status : int
112
+ An integer representing the exit status of the algorithm.
113
+
114
+ - ``0`` : The algorithm produced a valid bracket.
115
+ - ``-1`` : The bracket expanded to the allowable limits without finding a bracket.
116
+ - ``-2`` : The maximum number of iterations was reached.
117
+ - ``-3`` : A non-finite value was encountered.
118
+ - ``-4`` : Iteration was terminated by `callback`.
119
+ - ``1`` : The algorithm is proceeding normally (in `callback` only).
120
+ - ``2`` : A bracket was found in the opposite search direction (in `callback` only).
121
+
122
+ success : bool
123
+ ``True`` when the algorithm terminated successfully (status ``0``).
124
+
125
+ Notes
126
+ -----
127
+ This function generalizes an algorithm found in pieces throughout
128
+ `scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)`
129
+ until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows.
130
+
131
+ - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively
132
+ increased by `factor`.
133
+ - If `xmin` is provided, the distance between `xmin` and `l` is iteratively
134
+ decreased by `factor`. Note that this also *increases* the bracket size.
135
+
136
+ Growth of the bracket to the right is analogous.
137
+
138
+ Growth of the bracket in one direction stops when the endpoint is no longer
139
+ finite, the function value at the endpoint is no longer finite, or the
140
+ endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates
141
+ when the bracket stops growing in both directions, the bracket surrounds
142
+ the root, or a root is found (accidentally).
143
+
144
+ If two brackets are found - that is, a bracket is found on both sides in
145
+ the same iteration, the smaller of the two is returned.
146
+ If roots of the function are found, both `l` and `r` are set to the
147
+ leftmost root.
148
+
149
+ """ # noqa: E501
150
+ # Todo:
151
+ # - find bracket with sign change in specified direction
152
+ # - Add tolerance
153
+ # - allow factor < 1?
154
+
155
+ callback = None # works; I just don't want to test it
156
+ temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter)
157
+ func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
158
+
159
+ xs = (xl0, xr0)
160
+ temp = eim._initialize(func, xs, args)
161
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
162
+
163
+ # The approach is to treat the left and right searches as though they were
164
+ # (almost) totally independent one-sided bracket searches. (The interaction
165
+ # is considered when checking for termination and preparing the result
166
+ # object.)
167
+ # `x` is the "moving" end of the bracket
168
+ x = np.concatenate(xs)
169
+ f = np.concatenate(fs)
170
+ n = len(x) // 2
171
+
172
+ # `x_last` is the previous location of the moving end of the bracket. If
173
+ # the signs of `f` and `f_last` are different, `x` and `x_last` form a
174
+ # bracket.
175
+ x_last = np.concatenate((x[n:], x[:n]))
176
+ f_last = np.concatenate((f[n:], f[:n]))
177
+ # `x0` is the "fixed" end of the bracket.
178
+ x0 = x_last
179
+ # We don't need to retain the corresponding function value, since the
180
+ # fixed end of the bracket is only needed to compute the new value of the
181
+ # moving end; it is never returned.
182
+
183
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
184
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
185
+ limit = np.concatenate((xmin, xmax))
186
+
187
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel()
188
+ factor = np.concatenate((factor, factor))
189
+
190
+ active = np.arange(2*n)
191
+ args = [np.concatenate((arg, arg)) for arg in args]
192
+
193
+ # This is needed due to inner workings of `eim._loop`.
194
+ # We're abusing it a tiny bit.
195
+ shape = shape + (2,)
196
+
197
+ # `d` is for "distance".
198
+ # For searches without a limit, the distance between the fixed end of the
199
+ # bracket `x0` and the moving end `x` will grow by `factor` each iteration.
200
+ # For searches with a limit, the distance between the `limit` and moving
201
+ # end of the bracket `x` will shrink by `factor` each iteration.
202
+ i = np.isinf(limit)
203
+ ni = ~i
204
+ d = np.zeros_like(x)
205
+ d[i] = x[i] - x0[i]
206
+ d[ni] = limit[ni] - x[ni]
207
+
208
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
209
+ nit, nfev = 0, 1 # one function evaluation per side performed above
210
+
211
+ work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor,
212
+ active=active, d=d, x_last=x_last, f_last=f_last,
213
+ nit=nit, nfev=nfev, status=status, args=args,
214
+ xl=None, xr=None, fl=None, fr=None, n=n)
215
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'),
216
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'),
217
+ ('fr', 'fr'), ('x', 'x'), ('f', 'f'),
218
+ ('x_last', 'x_last'), ('f_last', 'f_last')]
219
+
220
+ def pre_func_eval(work):
221
+ # Initialize moving end of bracket
222
+ x = np.zeros_like(work.x)
223
+
224
+ # Unlimited brackets grow by `factor` by increasing distance from fixed
225
+ # end to moving end.
226
+ i = np.isinf(work.limit) # indices of unlimited brackets
227
+ work.d[i] *= work.factor[i]
228
+ x[i] = work.x0[i] + work.d[i]
229
+
230
+ # Limited brackets grow by decreasing the distance from the limit to
231
+ # the moving end.
232
+ ni = ~i # indices of limited brackets
233
+ work.d[ni] /= work.factor[ni]
234
+ x[ni] = work.limit[ni] - work.d[ni]
235
+
236
+ return x
237
+
238
+ def post_func_eval(x, f, work):
239
+ # Keep track of the previous location of the moving end so that we can
240
+ # return a narrower bracket. (The alternative is to remember the
241
+ # original fixed end, but then the bracket would be wider than needed.)
242
+ work.x_last = work.x
243
+ work.f_last = work.f
244
+ work.x = x
245
+ work.f = f
246
+
247
+ def check_termination(work):
248
+ stop = np.zeros_like(work.x, dtype=bool)
249
+
250
+ # Condition 1: a valid bracket (or the root itself) has been found
251
+ sf = np.sign(work.f)
252
+ sf_last = np.sign(work.f_last)
253
+ i = (sf_last == -sf) | (sf_last == 0) | (sf == 0)
254
+ work.status[i] = eim._ECONVERGED
255
+ stop[i] = True
256
+
257
+ # Condition 2: the other side's search found a valid bracket.
258
+ # (If we just found a bracket with the rightward search, we can stop
259
+ # the leftward search, and vice-versa.)
260
+ # To do this, we need to set the status of the other side's search;
261
+ # this is tricky because `work.status` contains only the *active*
262
+ # elements, so we don't immediately know the index of the element we
263
+ # need to set - or even if it's still there. (That search may have
264
+ # terminated already, e.g. by reaching its `limit`.)
265
+ # To facilitate this, `work.active` contains a unit integer index of
266
+ # each search. Index `k` (`k < n)` and `k + n` correspond with a
267
+ # leftward and rightward search, respectively. Elements are removed
268
+ # from `work.active` just as they are removed from `work.status`, so
269
+ # we use `work.active` to help find the right location in
270
+ # `work.status`.
271
+ # Get the integer indices of the elements that can also stop
272
+ also_stop = (work.active[i] + work.n) % (2*work.n)
273
+ # Check whether they are still active.
274
+ # To start, we need to find out where in `work.active` they would
275
+ # appear if they are indeed there.
276
+ j = np.searchsorted(work.active, also_stop)
277
+ # If the location exceeds the length of the `work.active`, they are
278
+ # not there.
279
+ j = j[j < len(work.active)]
280
+ # Check whether they are still there.
281
+ j = j[also_stop == work.active[j]]
282
+ # Now convert these to boolean indices to use with `work.status`.
283
+ i = np.zeros_like(stop)
284
+ i[j] = True # boolean indices of elements that can also stop
285
+ i = i & ~stop
286
+ work.status[i] = _ESTOPONESIDE
287
+ stop[i] = True
288
+
289
+ # Condition 3: moving end of bracket reaches limit
290
+ i = (work.x == work.limit) & ~stop
291
+ work.status[i] = _ELIMITS
292
+ stop[i] = True
293
+
294
+ # Condition 4: non-finite value encountered
295
+ i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop
296
+ work.status[i] = eim._EVALUEERR
297
+ stop[i] = True
298
+
299
+ return stop
300
+
301
+ def post_termination_check(work):
302
+ pass
303
+
304
+ def customize_result(res, shape):
305
+ n = len(res['x']) // 2
306
+
307
+ # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint
308
+ # as `a` and `xr0`, the initial right endpoint, as `b`.
309
+ # Because we treat the two one-sided searches as though they were
310
+ # independent, what we keep track of in `work` and what we want to
311
+ # return in `res` look quite different. Combine the results from the
312
+ # two one-sided searches before reporting the results to the user.
313
+ # - "a" refers to the leftward search (the moving end started at `a`)
314
+ # - "b" refers to the rightward search (the moving end started at `b`)
315
+ # - "l" refers to the left end of the bracket (closer to -oo)
316
+ # - "r" refers to the right end of the bracket (closer to +oo)
317
+ xal = res['x'][:n]
318
+ xar = res['x_last'][:n]
319
+ xbl = res['x_last'][n:]
320
+ xbr = res['x'][n:]
321
+
322
+ fal = res['f'][:n]
323
+ far = res['f_last'][:n]
324
+ fbl = res['f_last'][n:]
325
+ fbr = res['f'][n:]
326
+
327
+ # Initialize the brackets and corresponding function values to return
328
+ # to the user. Brackets may not be valid (e.g. there is no root,
329
+ # there weren't enough iterations, NaN encountered), but we still need
330
+ # to return something. One option would be all NaNs, but what I've
331
+ # chosen here is the left- and right-most points at which the function
332
+ # has been evaluated. This gives the user some information about what
333
+ # interval of the real line has been searched and shows that there is
334
+ # no sign change between the two ends.
335
+ xl = xal.copy()
336
+ fl = fal.copy()
337
+ xr = xbr.copy()
338
+ fr = fbr.copy()
339
+
340
+ # `status` indicates whether the bracket is valid or not. If so,
341
+ # we want to adjust the bracket we return to be the narrowest possible
342
+ # given the points at which we evaluated the function.
343
+ # For example if bracket "a" is valid and smaller than bracket "b" OR
344
+ # if bracket "a" is valid and bracket "b" is not valid, we want to
345
+ # return bracket "a" (and vice versa).
346
+ sa = res['status'][:n]
347
+ sb = res['status'][n:]
348
+
349
+ da = xar - xal
350
+ db = xbr - xbl
351
+
352
+ i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0))
353
+ i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0))
354
+
355
+ xr[i1] = xar[i1]
356
+ fr[i1] = far[i1]
357
+ xl[i2] = xbl[i2]
358
+ fl[i2] = fbl[i2]
359
+
360
+ # Finish assembling the result object
361
+ res['xl'] = xl
362
+ res['xr'] = xr
363
+ res['fl'] = fl
364
+ res['fr'] = fr
365
+
366
+ res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:])
367
+ res['nfev'] = res['nfev'][:n] + res['nfev'][n:]
368
+ # If the status on one side is zero, the status is zero. In any case,
369
+ # report the status from one side only.
370
+ res['status'] = np.choose(sa == 0, (sb, sa))
371
+ res['success'] = (res['status'] == 0)
372
+
373
+ del res['x']
374
+ del res['f']
375
+ del res['x_last']
376
+ del res['f_last']
377
+
378
+ return shape[:-1]
379
+
380
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
381
+ pre_func_eval, post_func_eval, check_termination,
382
+ post_termination_check, customize_result, res_work_pairs)
383
+
384
+
385
+ def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter):
386
+
387
+ if not callable(func):
388
+ raise ValueError('`func` must be callable.')
389
+
390
+ if not np.iterable(args):
391
+ args = (args,)
392
+
393
+ xm0 = np.asarray(xm0)[()]
394
+ if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any():
395
+ raise ValueError('`xm0` must be numeric and real.')
396
+
397
+ xmin = -np.inf if xmin is None else xmin
398
+ xmax = np.inf if xmax is None else xmax
399
+
400
+ xl0_not_supplied = False
401
+ if xl0 is None:
402
+ xl0 = xm0 - 0.5
403
+ xl0_not_supplied = True
404
+
405
+ xr0_not_supplied = False
406
+ if xr0 is None:
407
+ xr0 = xm0 + 0.5
408
+ xr0_not_supplied = True
409
+
410
+ factor = 2.0 if factor is None else factor
411
+ xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays(
412
+ xl0, xm0, xr0, xmin, xmax, factor
413
+ )
414
+
415
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
416
+ raise ValueError('`xl0` must be numeric and real.')
417
+
418
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
419
+ raise ValueError('`xr0` must be numeric and real.')
420
+
421
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
422
+ raise ValueError('`xmin` must be numeric and real.')
423
+
424
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
425
+ raise ValueError('`xmax` must be numeric and real.')
426
+
427
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
428
+ raise ValueError('`factor` must be numeric and real.')
429
+ if not np.all(factor > 1):
430
+ raise ValueError('All elements of `factor` must be greater than 1.')
431
+
432
+ # Default choices for xl or xr might have exceeded xmin or xmax. Adjust
433
+ # to make sure this doesn't happen. We replace with copies because xl, and xr
434
+ # are read-only views produced by broadcast_arrays.
435
+ if xl0_not_supplied:
436
+ xl0 = xl0.copy()
437
+ cond = ~np.isinf(xmin) & (xl0 < xmin)
438
+ xl0[cond] = (
439
+ xm0[cond] - xmin[cond]
440
+ ) / np.array(16, dtype=xl0.dtype)
441
+ if xr0_not_supplied:
442
+ xr0 = xr0.copy()
443
+ cond = ~np.isinf(xmax) & (xmax < xr0)
444
+ xr0[cond] = (
445
+ xmax[cond] - xm0[cond]
446
+ ) / np.array(16, dtype=xr0.dtype)
447
+
448
+ maxiter = np.asarray(maxiter)
449
+ message = '`maxiter` must be a non-negative integer.'
450
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
451
+ or np.iscomplex(maxiter)):
452
+ raise ValueError(message)
453
+ maxiter_int = int(maxiter[()])
454
+ if not maxiter == maxiter_int or maxiter < 0:
455
+ raise ValueError(message)
456
+
457
+ if not np.all((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)):
458
+ raise ValueError(
459
+ '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True (elementwise).'
460
+ )
461
+
462
+ return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter
463
+
464
+
465
+ def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None,
466
+ factor=None, args=(), maxiter=1000):
467
+ """Bracket the minimum of a unimodal scalar function of one variable
468
+
469
+ This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`,
470
+ and the elements of `args` are broadcastable arrays.
471
+
472
+ Parameters
473
+ ----------
474
+ func : callable
475
+ The function for which the minimum is to be bracketed.
476
+ The signature must be::
477
+
478
+ func(x: ndarray, *args) -> ndarray
479
+
480
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
481
+ which may contain an arbitrary number of arrays that are broadcastable
482
+ with ``x``. `func` must be an elementwise function: each element
483
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`.
484
+ xm0: float array_like
485
+ Starting guess for middle point of bracket.
486
+ xl0, xr0: float array_like, optional
487
+ Starting guesses for left and right endpoints of the bracket. Must be
488
+ broadcastable with one another and with `xm0`.
489
+ xmin, xmax : float array_like, optional
490
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
491
+ be broadcastable with `xl0`, `xm0`, and `xr0`.
492
+ factor : float array_like, optional
493
+ Controls expansion of bracket endpoint in downhill direction. Works
494
+ differently in the cases where a limit is set in the downhill direction
495
+ with `xmax` or `xmin`. See Notes.
496
+ args : tuple, optional
497
+ Additional positional arguments to be passed to `func`. Must be arrays
498
+ broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the
499
+ callable to be bracketed requires arguments that are not broadcastable
500
+ with these arrays, wrap that callable with `func` such that `func`
501
+ accepts only ``x`` and broadcastable arrays.
502
+ maxiter : int, optional
503
+ The maximum number of iterations of the algorithm to perform. The number
504
+ of function evaluations is three greater than the number of iterations.
505
+
506
+ Returns
507
+ -------
508
+ res : _RichResult
509
+ An instance of `scipy._lib._util._RichResult` with the following
510
+ attributes. The descriptions are written as though the values will be
511
+ scalars; however, if `func` returns an array, the outputs will be
512
+ arrays of the same shape.
513
+
514
+ xl, xm, xr : float
515
+ The left, middle, and right points of the bracket, if the algorithm
516
+ terminated successfully.
517
+ fl, fm, fr : float
518
+ The function value at the left, middle, and right points of the bracket.
519
+ nfev : int
520
+ The number of function evaluations required to find the bracket.
521
+ nit : int
522
+ The number of iterations of the algorithm that were performed.
523
+ status : int
524
+ An integer representing the exit status of the algorithm.
525
+
526
+ - ``0`` : The algorithm produced a valid bracket.
527
+ - ``-1`` : The bracket expanded to the allowable limits. Assuming
528
+ unimodality, this implies the endpoint at the limit is a
529
+ minimizer.
530
+ - ``-2`` : The maximum number of iterations was reached.
531
+ - ``-3`` : A non-finite value was encountered.
532
+
533
+ success : bool
534
+ ``True`` when the algorithm terminated successfully (status ``0``).
535
+
536
+ Notes
537
+ -----
538
+ Similar to `scipy.optimize.bracket`, this function seeks to find real
539
+ points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``,
540
+ where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`,
541
+ this function can operate in a vectorized manner on array input, so long as
542
+ the input arrays are broadcastable with each other. Also unlike
543
+ `scipy.optimize.bracket`, users may specify minimum and maximum endpoints
544
+ for the desired bracket.
545
+
546
+ Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``,
547
+ the algorithm checks if these points already give a valid bracket. If not,
548
+ a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new
549
+ opposite endpoint, and either `xl` or `xr` becomes the new middle point,
550
+ depending on which direction is downhill. The algorithm repeats from here.
551
+
552
+ The new endpoint `w` is chosen differently depending on whether or not a
553
+ boundary `xmin` or `xmax` has been set in the downhill direction. Without
554
+ loss of generality, suppose the downhill direction is to the right, so that
555
+ ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w`
556
+ is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by
557
+ the user (defaults to 2.0) so that step sizes increase in geometric proportion.
558
+ If there is a boundary, `xmax` in this case, then `w` is chosen to be
559
+ ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at
560
+ `xmax`. This cautious approach ensures that a minimum near but distinct from
561
+ the boundary isn't missed while also detecting whether or not the `xmax` is
562
+ a minimizer when `xmax` is reached after a finite number of steps.
563
+ """ # noqa: E501
564
+ callback = None # works; I just don't want to test it
565
+
566
+ temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter)
567
+ func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
568
+
569
+ xs = (xl0, xm0, xr0)
570
+ func, xs, fs, args, shape, dtype = eim._initialize(func, xs, args)
571
+
572
+ xl0, xm0, xr0 = xs
573
+ fl0, fm0, fr0 = fs
574
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
575
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
576
+ # We will modify factor later on so make a copy. np.broadcast_to returns
577
+ # a read-only view.
578
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel()
579
+
580
+ # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be
581
+ # marching downhill in the direction from xl to xr.
582
+ comp = fl0 < fr0
583
+ xl0[comp], xr0[comp] = xr0[comp], xl0[comp]
584
+ fl0[comp], fr0[comp] = fr0[comp], fl0[comp]
585
+ # We only need the boundary in the direction we're traveling.
586
+ limit = np.where(comp, xmin, xmax)
587
+
588
+ unlimited = np.isinf(limit)
589
+ limited = ~unlimited
590
+ step = np.empty_like(xl0)
591
+
592
+ step[unlimited] = (xr0[unlimited] - xm0[unlimited])
593
+ step[limited] = (limit[limited] - xr0[limited])
594
+
595
+ # Step size is divided by factor for case where there is a limit.
596
+ factor[limited] = 1 / factor[limited]
597
+
598
+ status = np.full_like(xl0, eim._EINPROGRESS, dtype=int)
599
+ nit, nfev = 0, 3
600
+
601
+ work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0,
602
+ step=step, limit=limit, limited=limited, factor=factor, nit=nit,
603
+ nfev=nfev, status=status, args=args)
604
+
605
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'),
606
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'),
607
+ ('fr', 'fr')]
608
+
609
+ def pre_func_eval(work):
610
+ work.step *= work.factor
611
+ x = np.empty_like(work.xr)
612
+ x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited]
613
+ x[work.limited] = work.limit[work.limited] - work.step[work.limited]
614
+ # Since the new bracket endpoint is calculated from an offset with the
615
+ # limit, it may be the case that the new endpoint equals the old endpoint,
616
+ # when the old endpoint is sufficiently close to the limit. We use the
617
+ # limit itself as the new endpoint in these cases.
618
+ x[work.limited] = np.where(
619
+ x[work.limited] == work.xr[work.limited],
620
+ work.limit[work.limited],
621
+ x[work.limited],
622
+ )
623
+ return x
624
+
625
+ def post_func_eval(x, f, work):
626
+ work.xl, work.xm, work.xr = work.xm, work.xr, x
627
+ work.fl, work.fm, work.fr = work.fm, work.fr, f
628
+
629
+ def check_termination(work):
630
+ # Condition 1: A valid bracket has been found.
631
+ stop = (
632
+ (work.fl >= work.fm) & (work.fr > work.fm)
633
+ | (work.fl > work.fm) & (work.fr >= work.fm)
634
+ )
635
+ work.status[stop] = eim._ECONVERGED
636
+
637
+ # Condition 2: Moving end of bracket reaches limit.
638
+ i = (work.xr == work.limit) & ~stop
639
+ work.status[i] = _ELIMITS
640
+ stop[i] = True
641
+
642
+ # Condition 3: non-finite value encountered
643
+ i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop
644
+ work.status[i] = eim._EVALUEERR
645
+ stop[i] = True
646
+
647
+ return stop
648
+
649
+ def post_termination_check(work):
650
+ pass
651
+
652
+ def customize_result(res, shape):
653
+ # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0).
654
+ comp = res['xl'] > res['xr']
655
+ res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp]
656
+ res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp]
657
+ return shape
658
+
659
+ return eim._loop(work, callback, shape,
660
+ maxiter, func, args, dtype,
661
+ pre_func_eval, post_func_eval,
662
+ check_termination, post_termination_check,
663
+ customize_result, res_work_pairs)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from ._zeros_py import _xtol, _rtol, _iter
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol,
7
+ fatol=None, frtol=0, maxiter=_iter, callback=None):
8
+ """Find the root of an elementwise function using Chandrupatla's algorithm.
9
+
10
+ For each element of the output of `func`, `chandrupatla` seeks the scalar
11
+ root that makes the element 0. This function allows for `a`, `b`, and the
12
+ output of `func` to be of any broadcastable shapes.
13
+
14
+ Parameters
15
+ ----------
16
+ func : callable
17
+ The function whose root is desired. The signature must be::
18
+
19
+ func(x: ndarray, *args) -> ndarray
20
+
21
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
22
+ which may contain an arbitrary number of components of any type(s).
23
+ ``func`` must be an elementwise function: each element ``func(x)[i]``
24
+ must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla`
25
+ seeks an array ``x`` such that ``func(x)`` is an array of zeros.
26
+ a, b : array_like
27
+ The lower and upper bounds of the root of the function. Must be
28
+ broadcastable with one another.
29
+ args : tuple, optional
30
+ Additional positional arguments to be passed to `func`.
31
+ xatol, xrtol, fatol, frtol : float, optional
32
+ Absolute and relative tolerances on the root and function value.
33
+ See Notes for details.
34
+ maxiter : int, optional
35
+ The maximum number of iterations of the algorithm to perform.
36
+ callback : callable, optional
37
+ An optional user-supplied function to be called before the first
38
+ iteration and after each iteration.
39
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
40
+ similar to that returned by `_chandrupatla` (but containing the current
41
+ iterate's values of all variables). If `callback` raises a
42
+ ``StopIteration``, the algorithm will terminate immediately and
43
+ `_chandrupatla` will return a result.
44
+
45
+ Returns
46
+ -------
47
+ res : _RichResult
48
+ An instance of `scipy._lib._util._RichResult` with the following
49
+ attributes. The descriptions are written as though the values will be
50
+ scalars; however, if `func` returns an array, the outputs will be
51
+ arrays of the same shape.
52
+
53
+ x : float
54
+ The root of the function, if the algorithm terminated successfully.
55
+ nfev : int
56
+ The number of times the function was called to find the root.
57
+ nit : int
58
+ The number of iterations of Chandrupatla's algorithm performed.
59
+ status : int
60
+ An integer representing the exit status of the algorithm.
61
+ ``0`` : The algorithm converged to the specified tolerances.
62
+ ``-1`` : The algorithm encountered an invalid bracket.
63
+ ``-2`` : The maximum number of iterations was reached.
64
+ ``-3`` : A non-finite value was encountered.
65
+ ``-4`` : Iteration was terminated by `callback`.
66
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
67
+ success : bool
68
+ ``True`` when the algorithm terminated successfully (status ``0``).
69
+ fun : float
70
+ The value of `func` evaluated at `x`.
71
+ xl, xr : float
72
+ The lower and upper ends of the bracket.
73
+ fl, fr : float
74
+ The function value at the lower and upper ends of the bracket.
75
+
76
+ Notes
77
+ -----
78
+ Implemented based on Chandrupatla's original paper [1]_.
79
+
80
+ If ``xl`` and ``xr`` are the left and right ends of the bracket,
81
+ ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``,
82
+ and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to
83
+ have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or
84
+ ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the
85
+ termination condition described in [1]_ with ``xrtol = 4e-10``,
86
+ ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are
87
+ ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``,
88
+ and ``fatol`` is the smallest normal number of the ``dtype`` returned
89
+ by ``func``.
90
+
91
+ References
92
+ ----------
93
+
94
+ .. [1] Chandrupatla, Tirupathi R.
95
+ "A new hybrid quadratic/bisection algorithm for finding the zero of a
96
+ nonlinear function without using derivatives".
97
+ Advances in Engineering Software, 28(3), 145-149.
98
+ https://doi.org/10.1016/s0965-9978(96)00051-8
99
+
100
+ See Also
101
+ --------
102
+ brentq, brenth, ridder, bisect, newton
103
+
104
+ Examples
105
+ --------
106
+ >>> from scipy import optimize
107
+ >>> def f(x, c):
108
+ ... return x**3 - 2*x - c
109
+ >>> c = 5
110
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
111
+ >>> res.x
112
+ 2.0945514818937463
113
+
114
+ >>> c = [3, 4, 5]
115
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
116
+ >>> res.x
117
+ array([1.8932892 , 2. , 2.09455148])
118
+
119
+ """
120
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
121
+ fatol, frtol, maxiter, callback)
122
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
123
+
124
+ # Initialization
125
+ temp = eim._initialize(func, (a, b), args)
126
+ func, xs, fs, args, shape, dtype = temp
127
+ x1, x2 = xs
128
+ f1, f2 = fs
129
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
130
+ nit, nfev = 0, 2 # two function evaluations performed above
131
+ xatol = _xtol if xatol is None else xatol
132
+ xrtol = _rtol if xrtol is None else xrtol
133
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
134
+ frtol = frtol * np.minimum(np.abs(f1), np.abs(f2))
135
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5,
136
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
137
+ nit=nit, nfev=nfev, status=status)
138
+ res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'),
139
+ ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'),
140
+ ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')]
141
+
142
+ def pre_func_eval(work):
143
+ # [1] Figure 1 (first box)
144
+ x = work.x1 + work.t * (work.x2 - work.x1)
145
+ return x
146
+
147
+ def post_func_eval(x, f, work):
148
+ # [1] Figure 1 (first diamond and boxes)
149
+ # Note: y/n are reversed in figure; compare to BASIC in appendix
150
+ work.x3, work.f3 = work.x2.copy(), work.f2.copy()
151
+ j = np.sign(f) == np.sign(work.f1)
152
+ nj = ~j
153
+ work.x3[j], work.f3[j] = work.x1[j], work.f1[j]
154
+ work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj]
155
+ work.x1, work.f1 = x, f
156
+
157
+ def check_termination(work):
158
+ # [1] Figure 1 (second diamond)
159
+ # Check for all terminal conditions and record statuses.
160
+
161
+ # See [1] Section 4 (first two sentences)
162
+ i = np.abs(work.f1) < np.abs(work.f2)
163
+ work.xmin = np.choose(i, (work.x2, work.x1))
164
+ work.fmin = np.choose(i, (work.f2, work.f1))
165
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
166
+
167
+ # This is the convergence criterion used in bisect. Chandrupatla's
168
+ # criterion is equivalent to this except with a factor of 4 on `xrtol`.
169
+ work.dx = abs(work.x2 - work.x1)
170
+ work.tol = abs(work.xmin) * work.xrtol + work.xatol
171
+ i = work.dx < work.tol
172
+ # Modify in place to incorporate tolerance on function value. Note that
173
+ # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`,
174
+ # where `f1` and `f2` are the function evaluated at the original ends of
175
+ # the bracket.
176
+ i |= np.abs(work.fmin) <= work.fatol + work.frtol
177
+ work.status[i] = eim._ECONVERGED
178
+ stop[i] = True
179
+
180
+ i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop
181
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._ESIGNERR
182
+ stop[i] = True
183
+
184
+ i = ~((np.isfinite(work.x1) & np.isfinite(work.x2)
185
+ & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop)
186
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._EVALUEERR
187
+ stop[i] = True
188
+
189
+ return stop
190
+
191
+ def post_termination_check(work):
192
+ # [1] Figure 1 (third diamond and boxes / Equation 1)
193
+ xi1 = (work.x1 - work.x2) / (work.x3 - work.x2)
194
+ phi1 = (work.f1 - work.f2) / (work.f3 - work.f2)
195
+ alpha = (work.x3 - work.x1) / (work.x2 - work.x1)
196
+ j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1))
197
+
198
+ f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j]
199
+ t = np.full_like(alpha, 0.5)
200
+ t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j)
201
+ - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j))
202
+
203
+ # [1] Figure 1 (last box; see also BASIC in appendix with comment
204
+ # "Adjust T Away from the Interval Boundary")
205
+ tl = 0.5 * work.tol / work.dx
206
+ work.t = np.clip(t, tl, 1 - tl)
207
+
208
+ def customize_result(res, shape):
209
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
210
+ i = res['xl'] < res['xr']
211
+ res['xl'] = np.choose(i, (xr, xl))
212
+ res['xr'] = np.choose(i, (xl, xr))
213
+ res['fl'] = np.choose(i, (fr, fl))
214
+ res['fr'] = np.choose(i, (fl, fr))
215
+ return shape
216
+
217
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
218
+ pre_func_eval, post_func_eval, check_termination,
219
+ post_termination_check, customize_result, res_work_pairs)
220
+
221
+
222
+ def _chandrupatla_iv(func, args, xatol, xrtol,
223
+ fatol, frtol, maxiter, callback):
224
+ # Input validation for `_chandrupatla`
225
+
226
+ if not callable(func):
227
+ raise ValueError('`func` must be callable.')
228
+
229
+ if not np.iterable(args):
230
+ args = (args,)
231
+
232
+ tols = np.asarray([xatol if xatol is not None else 1,
233
+ xrtol if xrtol is not None else 1,
234
+ fatol if fatol is not None else 1,
235
+ frtol if frtol is not None else 1])
236
+ if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0)
237
+ or np.any(np.isnan(tols)) or tols.shape != (4,)):
238
+ raise ValueError('Tolerances must be non-negative scalars.')
239
+
240
+ maxiter_int = int(maxiter)
241
+ if maxiter != maxiter_int or maxiter < 0:
242
+ raise ValueError('`maxiter` must be a non-negative integer.')
243
+
244
+ if callback is not None and not callable(callback):
245
+ raise ValueError('`callback` must be callable.')
246
+
247
+ return func, args, xatol, xrtol, fatol, frtol, maxiter, callback
248
+
249
+
250
+ def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None,
251
+ xrtol=None, fatol=None, frtol=None, maxiter=100,
252
+ callback=None):
253
+ """Find the minimizer of an elementwise function.
254
+
255
+ For each element of the output of `func`, `_chandrupatla_minimize` seeks
256
+ the scalar minimizer that minimizes the element. This function allows for
257
+ `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any
258
+ broadcastable shapes.
259
+
260
+ Parameters
261
+ ----------
262
+ func : callable
263
+ The function whose minimizer is desired. The signature must be::
264
+
265
+ func(x: ndarray, *args) -> ndarray
266
+
267
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
268
+ which may contain an arbitrary number of arrays that are broadcastable
269
+ with `x`. ``func`` must be an elementwise function: each element
270
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
271
+ `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array
272
+ of minima.
273
+ x1, x2, x3 : array_like
274
+ The abscissae of a standard scalar minimization bracket. A bracket is
275
+ valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``.
276
+ Must be broadcastable with one another and `args`.
277
+ args : tuple, optional
278
+ Additional positional arguments to be passed to `func`. Must be arrays
279
+ broadcastable with `x1`, `x2`, and `x3`. If the callable to be
280
+ differentiated requires arguments that are not broadcastable with `x`,
281
+ wrap that callable with `func` such that `func` accepts only `x` and
282
+ broadcastable arrays.
283
+ xatol, xrtol, fatol, frtol : float, optional
284
+ Absolute and relative tolerances on the minimizer and function value.
285
+ See Notes for details.
286
+ maxiter : int, optional
287
+ The maximum number of iterations of the algorithm to perform.
288
+ callback : callable, optional
289
+ An optional user-supplied function to be called before the first
290
+ iteration and after each iteration.
291
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
292
+ similar to that returned by `_chandrupatla_minimize` (but containing
293
+ the current iterate's values of all variables). If `callback` raises a
294
+ ``StopIteration``, the algorithm will terminate immediately and
295
+ `_chandrupatla_minimize` will return a result.
296
+
297
+ Returns
298
+ -------
299
+ res : _RichResult
300
+ An instance of `scipy._lib._util._RichResult` with the following
301
+ attributes. (The descriptions are written as though the values will be
302
+ scalars; however, if `func` returns an array, the outputs will be
303
+ arrays of the same shape.)
304
+
305
+ success : bool
306
+ ``True`` when the algorithm terminated successfully (status ``0``).
307
+ status : int
308
+ An integer representing the exit status of the algorithm.
309
+ ``0`` : The algorithm converged to the specified tolerances.
310
+ ``-1`` : The algorithm encountered an invalid bracket.
311
+ ``-2`` : The maximum number of iterations was reached.
312
+ ``-3`` : A non-finite value was encountered.
313
+ ``-4`` : Iteration was terminated by `callback`.
314
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
315
+ x : float
316
+ The minimizer of the function, if the algorithm terminated
317
+ successfully.
318
+ fun : float
319
+ The value of `func` evaluated at `x`.
320
+ nfev : int
321
+ The number of points at which `func` was evaluated.
322
+ nit : int
323
+ The number of iterations of the algorithm that were performed.
324
+ xl, xm, xr : float
325
+ The final three-point bracket.
326
+ fl, fm, fr : float
327
+ The function value at the bracket points.
328
+
329
+ Notes
330
+ -----
331
+ Implemented based on Chandrupatla's original paper [1]_.
332
+
333
+ If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3``
334
+ are the values of ``func`` at those points, then the algorithm is
335
+ considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol``
336
+ or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of
337
+ these differs from the termination conditions described in [1]_. The
338
+ default values of `xrtol` is the square root of the precision of the
339
+ appropriate dtype, and ``xatol=fatol = frtol`` is the smallest normal
340
+ number of the appropriate dtype.
341
+
342
+ References
343
+ ----------
344
+ .. [1] Chandrupatla, Tirupathi R. (1998).
345
+ "An efficient quadratic fit-sectioning algorithm for minimization
346
+ without derivatives".
347
+ Computer Methods in Applied Mechanics and Engineering, 152 (1-2),
348
+ 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4
349
+
350
+ See Also
351
+ --------
352
+ golden, brent, bounded
353
+
354
+ Examples
355
+ --------
356
+ >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize
357
+ >>> def f(x, args=1):
358
+ ... return (x - args)**2
359
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5)
360
+ >>> res.x
361
+ 1.0
362
+ >>> c = [1, 1.5, 2]
363
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,))
364
+ >>> res.x
365
+ array([1. , 1.5, 2. ])
366
+ """
367
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
368
+ fatol, frtol, maxiter, callback)
369
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
370
+
371
+ # Initialization
372
+ xs = (x1, x2, x3)
373
+ temp = eim._initialize(func, xs, args)
374
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
375
+ x1, x2, x3 = xs
376
+ f1, f2, f3 = fs
377
+ phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio
378
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
379
+ nit, nfev = 0, 3 # three function evaluations performed above
380
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
381
+ frtol = np.finfo(dtype).tiny if frtol is None else frtol
382
+ xatol = np.finfo(dtype).tiny if xatol is None else xatol
383
+ xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol
384
+
385
+ # Ensure that x1 < x2 < x3 initially.
386
+ xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3))
387
+ i = np.argsort(xs, axis=0)
388
+ x1, x2, x3 = np.take_along_axis(xs, i, axis=0)
389
+ f1, f2, f3 = np.take_along_axis(fs, i, axis=0)
390
+ q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7))
391
+
392
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi,
393
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
394
+ nit=nit, nfev=nfev, status=status, q0=q0, args=args)
395
+ res_work_pairs = [('status', 'status'),
396
+ ('x', 'x2'), ('fun', 'f2'),
397
+ ('nit', 'nit'), ('nfev', 'nfev'),
398
+ ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'),
399
+ ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')]
400
+
401
+ def pre_func_eval(work):
402
+ # `_check_termination` is called first -> `x3 - x2 > x2 - x1`
403
+ # But let's calculate a few terms that we'll reuse
404
+ x21 = work.x2 - work.x1
405
+ x32 = work.x3 - work.x2
406
+
407
+ # [1] Section 3. "The quadratic minimum point Q1 is calculated using
408
+ # the relations developed in the previous section." [1] Section 2 (5/6)
409
+ A = x21 * (work.f3 - work.f2)
410
+ B = x32 * (work.f1 - work.f2)
411
+ C = A / (A + B)
412
+ # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2
413
+ q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster
414
+ # this is an array, so multiplying by 0.5 does not change dtype
415
+
416
+ # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is
417
+ # sufficiently away from the inside point x2"
418
+ i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7)
419
+ xi = q1[i]
420
+ # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of
421
+ # x2, the new point is chosen in the larger interval at a distance
422
+ # tol away from x2."
423
+ # See also QBASIC code after "Accept Ql adjust if close to X2".
424
+ j = abs(q1[i] - work.x2[i]) <= work.xtol[i]
425
+ xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j]
426
+
427
+ # "If condition (7) is not satisfied, golden sectioning of the larger
428
+ # interval is carried out to introduce the new point."
429
+ # (For simplicity, we go ahead and calculate it for all points, but we
430
+ # change the elements for which the condition was satisfied.)
431
+ x = work.x2 + (2 - work.phi) * x32
432
+ x[i] = xi
433
+
434
+ # "We define Q0 as the value of Q1 at the previous iteration."
435
+ work.q0 = q1
436
+ return x
437
+
438
+ def post_func_eval(x, f, work):
439
+ # Standard logic for updating a three-point bracket based on a new
440
+ # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...".
441
+ # There is an awful lot of data copying going on here; this would
442
+ # probably benefit from code optimization or implementation in Pythran.
443
+ i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2)
444
+ xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i],
445
+ fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i]
446
+ j = fi > f2i
447
+ x3i[j], f3i[j] = xi[j], fi[j]
448
+ j = ~j
449
+ x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j]
450
+
451
+ ni = ~i
452
+ xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni],
453
+ fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni]
454
+ j = fni > f2ni
455
+ x1ni[j], f1ni[j] = xni[j], fni[j]
456
+ j = ~j
457
+ x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j]
458
+
459
+ work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i
460
+ work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i
461
+ work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni,
462
+ work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni
463
+
464
+ def check_termination(work):
465
+ # Check for all terminal conditions and record statuses.
466
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
467
+
468
+ # Bracket is invalid; stop and don't return minimizer/minimum
469
+ i = ((work.f2 > work.f1) | (work.f2 > work.f3))
470
+ work.x2[i], work.f2[i] = np.nan, np.nan
471
+ stop[i], work.status[i] = True, eim._ESIGNERR
472
+
473
+ # Non-finite values; stop and don't return minimizer/minimum
474
+ finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3)
475
+ i = ~(finite | stop)
476
+ work.x2[i], work.f2[i] = np.nan, np.nan
477
+ stop[i], work.status[i] = True, eim._EVALUEERR
478
+
479
+ # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make
480
+ # the (x2, x3) the larger interval."
481
+ # Note: I had used np.choose; this is much faster. This would be a good
482
+ # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and
483
+ # didn't notice a speed boost, so let's keep it simple.
484
+ i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1)
485
+ temp = work.x1[i]
486
+ work.x1[i] = work.x3[i]
487
+ work.x3[i] = temp
488
+ temp = work.f1[i]
489
+ work.f1[i] = work.f3[i]
490
+ work.f3[i] = temp
491
+
492
+ # [1] Section 3 (bottom of page 212)
493
+ # "We set a tolerance value xtol..."
494
+ work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8)
495
+ # "The convergence based on interval is achieved when..."
496
+ # Note: Equality allowed in case of `xtol=0`
497
+ i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9)
498
+
499
+ # "We define ftol using..."
500
+ ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10)
501
+ # "The convergence based on function values is achieved when..."
502
+ # Note 1: modify in place to incorporate tolerance on function value.
503
+ # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop
504
+ i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11)
505
+ i &= ~stop
506
+ stop[i], work.status[i] = True, eim._ECONVERGED
507
+
508
+ return stop
509
+
510
+ def post_termination_check(work):
511
+ pass
512
+
513
+ def customize_result(res, shape):
514
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
515
+ i = res['xl'] < res['xr']
516
+ res['xl'] = np.choose(i, (xr, xl))
517
+ res['xr'] = np.choose(i, (xl, xr))
518
+ res['fl'] = np.choose(i, (fr, fl))
519
+ res['fr'] = np.choose(i, (fl, fr))
520
+ return shape
521
+
522
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
523
+ pre_func_eval, post_func_eval, check_termination,
524
+ post_termination_check, customize_result, res_work_pairs)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (101 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface to Constrained Optimization By Linear Approximation
3
+
4
+ Functions
5
+ ---------
6
+ .. autosummary::
7
+ :toctree: generated/
8
+
9
+ fmin_cobyla
10
+
11
+ """
12
+
13
+ import functools
14
+ from threading import RLock
15
+
16
+ import numpy as np
17
+ from scipy.optimize import _cobyla as cobyla
18
+ from ._optimize import (OptimizeResult, _check_unknown_options,
19
+ _prepare_scalar_function)
20
+ try:
21
+ from itertools import izip
22
+ except ImportError:
23
+ izip = zip
24
+
25
+ __all__ = ['fmin_cobyla']
26
+
27
+ # Workaround as _cobyla.minimize is not threadsafe
28
+ # due to an unknown f2py bug and can segfault,
29
+ # see gh-9658.
30
+ _module_lock = RLock()
31
+ def synchronized(func):
32
+ @functools.wraps(func)
33
+ def wrapper(*args, **kwargs):
34
+ with _module_lock:
35
+ return func(*args, **kwargs)
36
+ return wrapper
37
+
38
+ @synchronized
39
+ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
40
+ rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4,
41
+ *, callback=None):
42
+ """
43
+ Minimize a function using the Constrained Optimization By Linear
44
+ Approximation (COBYLA) method. This method wraps a FORTRAN
45
+ implementation of the algorithm.
46
+
47
+ Parameters
48
+ ----------
49
+ func : callable
50
+ Function to minimize. In the form func(x, \\*args).
51
+ x0 : ndarray
52
+ Initial guess.
53
+ cons : sequence
54
+ Constraint functions; must all be ``>=0`` (a single function
55
+ if only 1 constraint). Each function takes the parameters `x`
56
+ as its first argument, and it can return either a single number or
57
+ an array or list of numbers.
58
+ args : tuple, optional
59
+ Extra arguments to pass to function.
60
+ consargs : tuple, optional
61
+ Extra arguments to pass to constraint functions (default of None means
62
+ use same extra arguments as those passed to func).
63
+ Use ``()`` for no extra arguments.
64
+ rhobeg : float, optional
65
+ Reasonable initial changes to the variables.
66
+ rhoend : float, optional
67
+ Final accuracy in the optimization (not precisely guaranteed). This
68
+ is a lower bound on the size of the trust region.
69
+ disp : {0, 1, 2, 3}, optional
70
+ Controls the frequency of output; 0 implies no output.
71
+ maxfun : int, optional
72
+ Maximum number of function evaluations.
73
+ catol : float, optional
74
+ Absolute tolerance for constraint violations.
75
+ callback : callable, optional
76
+ Called after each iteration, as ``callback(x)``, where ``x`` is the
77
+ current parameter vector.
78
+
79
+ Returns
80
+ -------
81
+ x : ndarray
82
+ The argument that minimises `f`.
83
+
84
+ See also
85
+ --------
86
+ minimize: Interface to minimization algorithms for multivariate
87
+ functions. See the 'COBYLA' `method` in particular.
88
+
89
+ Notes
90
+ -----
91
+ This algorithm is based on linear approximations to the objective
92
+ function and each constraint. We briefly describe the algorithm.
93
+
94
+ Suppose the function is being minimized over k variables. At the
95
+ jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
96
+ an approximate solution x_j, and a radius RHO_j.
97
+ (i.e., linear plus a constant) approximations to the objective
98
+ function and constraint functions such that their function values
99
+ agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
100
+ This gives a linear program to solve (where the linear approximations
101
+ of the constraint functions are constrained to be non-negative).
102
+
103
+ However, the linear approximations are likely only good
104
+ approximations near the current simplex, so the linear program is
105
+ given the further requirement that the solution, which
106
+ will become x_(j+1), must be within RHO_j from x_j. RHO_j only
107
+ decreases, never increases. The initial RHO_j is rhobeg and the
108
+ final RHO_j is rhoend. In this way COBYLA's iterations behave
109
+ like a trust region algorithm.
110
+
111
+ Additionally, the linear program may be inconsistent, or the
112
+ approximation may give poor improvement. For details about
113
+ how these issues are resolved, as well as how the points v_i are
114
+ updated, refer to the source code or the references below.
115
+
116
+
117
+ References
118
+ ----------
119
+ Powell M.J.D. (1994), "A direct search optimization method that models
120
+ the objective and constraint functions by linear interpolation.", in
121
+ Advances in Optimization and Numerical Analysis, eds. S. Gomez and
122
+ J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
123
+
124
+ Powell M.J.D. (1998), "Direct search algorithms for optimization
125
+ calculations", Acta Numerica 7, 287-336
126
+
127
+ Powell M.J.D. (2007), "A view of algorithms for optimization without
128
+ derivatives", Cambridge University Technical Report DAMTP 2007/NA03
129
+
130
+
131
+ Examples
132
+ --------
133
+ Minimize the objective function f(x,y) = x*y subject
134
+ to the constraints x**2 + y**2 < 1 and y > 0::
135
+
136
+ >>> def objective(x):
137
+ ... return x[0]*x[1]
138
+ ...
139
+ >>> def constr1(x):
140
+ ... return 1 - (x[0]**2 + x[1]**2)
141
+ ...
142
+ >>> def constr2(x):
143
+ ... return x[1]
144
+ ...
145
+ >>> from scipy.optimize import fmin_cobyla
146
+ >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
147
+ array([-0.70710685, 0.70710671])
148
+
149
+ The exact solution is (-sqrt(2)/2, sqrt(2)/2).
150
+
151
+
152
+
153
+ """
154
+ err = "cons must be a sequence of callable functions or a single"\
155
+ " callable function."
156
+ try:
157
+ len(cons)
158
+ except TypeError as e:
159
+ if callable(cons):
160
+ cons = [cons]
161
+ else:
162
+ raise TypeError(err) from e
163
+ else:
164
+ for thisfunc in cons:
165
+ if not callable(thisfunc):
166
+ raise TypeError(err)
167
+
168
+ if consargs is None:
169
+ consargs = args
170
+
171
+ # build constraints
172
+ con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
173
+
174
+ # options
175
+ opts = {'rhobeg': rhobeg,
176
+ 'tol': rhoend,
177
+ 'disp': disp,
178
+ 'maxiter': maxfun,
179
+ 'catol': catol,
180
+ 'callback': callback}
181
+
182
+ sol = _minimize_cobyla(func, x0, args, constraints=con,
183
+ **opts)
184
+ if disp and not sol['success']:
185
+ print(f"COBYLA failed to find a solution: {sol.message}")
186
+ return sol['x']
187
+
188
+
189
+ @synchronized
190
+ def _minimize_cobyla(fun, x0, args=(), constraints=(),
191
+ rhobeg=1.0, tol=1e-4, maxiter=1000,
192
+ disp=False, catol=2e-4, callback=None, bounds=None,
193
+ **unknown_options):
194
+ """
195
+ Minimize a scalar function of one or more variables using the
196
+ Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
197
+
198
+ Options
199
+ -------
200
+ rhobeg : float
201
+ Reasonable initial changes to the variables.
202
+ tol : float
203
+ Final accuracy in the optimization (not precisely guaranteed).
204
+ This is a lower bound on the size of the trust region.
205
+ disp : bool
206
+ Set to True to print convergence messages. If False,
207
+ `verbosity` is ignored as set to 0.
208
+ maxiter : int
209
+ Maximum number of function evaluations.
210
+ catol : float
211
+ Tolerance (absolute) for constraint violations
212
+
213
+ """
214
+ _check_unknown_options(unknown_options)
215
+ maxfun = maxiter
216
+ rhoend = tol
217
+ iprint = int(bool(disp))
218
+
219
+ # check constraints
220
+ if isinstance(constraints, dict):
221
+ constraints = (constraints, )
222
+
223
+ if bounds:
224
+ i_lb = np.isfinite(bounds.lb)
225
+ if np.any(i_lb):
226
+ def lb_constraint(x, *args, **kwargs):
227
+ return x[i_lb] - bounds.lb[i_lb]
228
+
229
+ constraints.append({'type': 'ineq', 'fun': lb_constraint})
230
+
231
+ i_ub = np.isfinite(bounds.ub)
232
+ if np.any(i_ub):
233
+ def ub_constraint(x):
234
+ return bounds.ub[i_ub] - x[i_ub]
235
+
236
+ constraints.append({'type': 'ineq', 'fun': ub_constraint})
237
+
238
+ for ic, con in enumerate(constraints):
239
+ # check type
240
+ try:
241
+ ctype = con['type'].lower()
242
+ except KeyError as e:
243
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
244
+ except TypeError as e:
245
+ raise TypeError('Constraints must be defined using a '
246
+ 'dictionary.') from e
247
+ except AttributeError as e:
248
+ raise TypeError("Constraint's type must be a string.") from e
249
+ else:
250
+ if ctype != 'ineq':
251
+ raise ValueError("Constraints of type '%s' not handled by "
252
+ "COBYLA." % con['type'])
253
+
254
+ # check function
255
+ if 'fun' not in con:
256
+ raise KeyError('Constraint %d has no function defined.' % ic)
257
+
258
+ # check extra arguments
259
+ if 'args' not in con:
260
+ con['args'] = ()
261
+
262
+ # m is the total number of constraint values
263
+ # it takes into account that some constraints may be vector-valued
264
+ cons_lengths = []
265
+ for c in constraints:
266
+ f = c['fun'](x0, *c['args'])
267
+ try:
268
+ cons_length = len(f)
269
+ except TypeError:
270
+ cons_length = 1
271
+ cons_lengths.append(cons_length)
272
+ m = sum(cons_lengths)
273
+
274
+ # create the ScalarFunction, cobyla doesn't require derivative function
275
+ def _jac(x, *args):
276
+ return None
277
+
278
+ sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac)
279
+
280
+ def calcfc(x, con):
281
+ f = sf.fun(x)
282
+ i = 0
283
+ for size, c in izip(cons_lengths, constraints):
284
+ con[i: i + size] = c['fun'](x, *c['args'])
285
+ i += size
286
+ return f
287
+
288
+ def wrapped_callback(x):
289
+ if callback is not None:
290
+ callback(np.copy(x))
291
+
292
+ info = np.zeros(4, np.float64)
293
+ xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
294
+ rhoend=rhoend, iprint=iprint, maxfun=maxfun,
295
+ dinfo=info, callback=wrapped_callback)
296
+
297
+ if info[3] > catol:
298
+ # Check constraint violation
299
+ info[0] = 4
300
+
301
+ return OptimizeResult(x=xopt,
302
+ status=int(info[0]),
303
+ success=info[0] == 1,
304
+ message={1: 'Optimization terminated successfully.',
305
+ 2: 'Maximum number of function evaluations '
306
+ 'has been exceeded.',
307
+ 3: 'Rounding errors are becoming damaging '
308
+ 'in COBYLA subroutine.',
309
+ 4: 'Did not converge to a solution '
310
+ 'satisfying the constraints. See '
311
+ '`maxcv` for magnitude of violation.',
312
+ 5: 'NaN result encountered.'
313
+ }.get(info[0], 'Unknown exit status.'),
314
+ nfev=int(info[1]),
315
+ fun=info[2],
316
+ maxcv=info[3])
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_constraints.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Constraints definition for minimize."""
2
+ import numpy as np
3
+ from ._hessian_update_strategy import BFGS
4
+ from ._differentiable_functions import (
5
+ VectorFunction, LinearVectorFunction, IdentityVectorFunction)
6
+ from ._optimize import OptimizeWarning
7
+ from warnings import warn, catch_warnings, simplefilter, filterwarnings
8
+ from scipy.sparse import issparse
9
+
10
+
11
+ def _arr_to_scalar(x):
12
+ # If x is a numpy array, return x.item(). This will
13
+ # fail if the array has more than one element.
14
+ return x.item() if isinstance(x, np.ndarray) else x
15
+
16
+
17
+ class NonlinearConstraint:
18
+ """Nonlinear constraint on the variables.
19
+
20
+ The constraint has the general inequality form::
21
+
22
+ lb <= fun(x) <= ub
23
+
24
+ Here the vector of independent variables x is passed as ndarray of shape
25
+ (n,) and ``fun`` returns a vector with m components.
26
+
27
+ It is possible to use equal bounds to represent an equality constraint or
28
+ infinite bounds to represent a one-sided constraint.
29
+
30
+ Parameters
31
+ ----------
32
+ fun : callable
33
+ The function defining the constraint.
34
+ The signature is ``fun(x) -> array_like, shape (m,)``.
35
+ lb, ub : array_like
36
+ Lower and upper bounds on the constraint. Each array must have the
37
+ shape (m,) or be a scalar, in the latter case a bound will be the same
38
+ for all components of the constraint. Use ``np.inf`` with an
39
+ appropriate sign to specify a one-sided constraint.
40
+ Set components of `lb` and `ub` equal to represent an equality
41
+ constraint. Note that you can mix constraints of different types:
42
+ interval, one-sided or equality, by setting different components of
43
+ `lb` and `ub` as necessary.
44
+ jac : {callable, '2-point', '3-point', 'cs'}, optional
45
+ Method of computing the Jacobian matrix (an m-by-n matrix,
46
+ where element (i, j) is the partial derivative of f[i] with
47
+ respect to x[j]). The keywords {'2-point', '3-point',
48
+ 'cs'} select a finite difference scheme for the numerical estimation.
49
+ A callable must have the following signature:
50
+ ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
51
+ Default is '2-point'.
52
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
53
+ Method for computing the Hessian matrix. The keywords
54
+ {'2-point', '3-point', 'cs'} select a finite difference scheme for
55
+ numerical estimation. Alternatively, objects implementing
56
+ `HessianUpdateStrategy` interface can be used to approximate the
57
+ Hessian. Currently available implementations are:
58
+
59
+ - `BFGS` (default option)
60
+ - `SR1`
61
+
62
+ A callable must return the Hessian matrix of ``dot(fun, v)`` and
63
+ must have the following signature:
64
+ ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
65
+ Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
66
+ keep_feasible : array_like of bool, optional
67
+ Whether to keep the constraint components feasible throughout
68
+ iterations. A single value set this property for all components.
69
+ Default is False. Has no effect for equality constraints.
70
+ finite_diff_rel_step: None or array_like, optional
71
+ Relative step size for the finite difference approximation. Default is
72
+ None, which will select a reasonable value automatically depending
73
+ on a finite difference scheme.
74
+ finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
75
+ Defines the sparsity structure of the Jacobian matrix for finite
76
+ difference estimation, its shape must be (m, n). If the Jacobian has
77
+ only few non-zero elements in *each* row, providing the sparsity
78
+ structure will greatly speed up the computations. A zero entry means
79
+ that a corresponding element in the Jacobian is identically zero.
80
+ If provided, forces the use of 'lsmr' trust-region solver.
81
+ If None (default) then dense differencing will be used.
82
+
83
+ Notes
84
+ -----
85
+ Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
86
+ approximating either the Jacobian or the Hessian. We, however, do not allow
87
+ its use for approximating both simultaneously. Hence whenever the Jacobian
88
+ is estimated via finite-differences, we require the Hessian to be estimated
89
+ using one of the quasi-Newton strategies.
90
+
91
+ The scheme 'cs' is potentially the most accurate, but requires the function
92
+ to correctly handles complex inputs and be analytically continuable to the
93
+ complex plane. The scheme '3-point' is more accurate than '2-point' but
94
+ requires twice as many operations.
95
+
96
+ Examples
97
+ --------
98
+ Constrain ``x[0] < sin(x[1]) + 1.9``
99
+
100
+ >>> from scipy.optimize import NonlinearConstraint
101
+ >>> import numpy as np
102
+ >>> con = lambda x: x[0] - np.sin(x[1])
103
+ >>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
104
+
105
+ """
106
+ def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
107
+ keep_feasible=False, finite_diff_rel_step=None,
108
+ finite_diff_jac_sparsity=None):
109
+ self.fun = fun
110
+ self.lb = lb
111
+ self.ub = ub
112
+ self.finite_diff_rel_step = finite_diff_rel_step
113
+ self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
114
+ self.jac = jac
115
+ self.hess = hess
116
+ self.keep_feasible = keep_feasible
117
+
118
+
119
+ class LinearConstraint:
120
+ """Linear constraint on the variables.
121
+
122
+ The constraint has the general inequality form::
123
+
124
+ lb <= A.dot(x) <= ub
125
+
126
+ Here the vector of independent variables x is passed as ndarray of shape
127
+ (n,) and the matrix A has shape (m, n).
128
+
129
+ It is possible to use equal bounds to represent an equality constraint or
130
+ infinite bounds to represent a one-sided constraint.
131
+
132
+ Parameters
133
+ ----------
134
+ A : {array_like, sparse matrix}, shape (m, n)
135
+ Matrix defining the constraint.
136
+ lb, ub : dense array_like, optional
137
+ Lower and upper limits on the constraint. Each array must have the
138
+ shape (m,) or be a scalar, in the latter case a bound will be the same
139
+ for all components of the constraint. Use ``np.inf`` with an
140
+ appropriate sign to specify a one-sided constraint.
141
+ Set components of `lb` and `ub` equal to represent an equality
142
+ constraint. Note that you can mix constraints of different types:
143
+ interval, one-sided or equality, by setting different components of
144
+ `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
145
+ and ``ub = np.inf`` (no limits).
146
+ keep_feasible : dense array_like of bool, optional
147
+ Whether to keep the constraint components feasible throughout
148
+ iterations. A single value set this property for all components.
149
+ Default is False. Has no effect for equality constraints.
150
+ """
151
+ def _input_validation(self):
152
+ if self.A.ndim != 2:
153
+ message = "`A` must have exactly two dimensions."
154
+ raise ValueError(message)
155
+
156
+ try:
157
+ shape = self.A.shape[0:1]
158
+ self.lb = np.broadcast_to(self.lb, shape)
159
+ self.ub = np.broadcast_to(self.ub, shape)
160
+ self.keep_feasible = np.broadcast_to(self.keep_feasible, shape)
161
+ except ValueError:
162
+ message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable "
163
+ "to shape `A.shape[0:1]`")
164
+ raise ValueError(message)
165
+
166
+ def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False):
167
+ if not issparse(A):
168
+ # In some cases, if the constraint is not valid, this emits a
169
+ # VisibleDeprecationWarning about ragged nested sequences
170
+ # before eventually causing an error. `scipy.optimize.milp` would
171
+ # prefer that this just error out immediately so it can handle it
172
+ # rather than concerning the user.
173
+ with catch_warnings():
174
+ simplefilter("error")
175
+ self.A = np.atleast_2d(A).astype(np.float64)
176
+ else:
177
+ self.A = A
178
+ if issparse(lb) or issparse(ub):
179
+ raise ValueError("Constraint limits must be dense arrays.")
180
+ self.lb = np.atleast_1d(lb).astype(np.float64)
181
+ self.ub = np.atleast_1d(ub).astype(np.float64)
182
+
183
+ if issparse(keep_feasible):
184
+ raise ValueError("`keep_feasible` must be a dense array.")
185
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
186
+ self._input_validation()
187
+
188
+ def residual(self, x):
189
+ """
190
+ Calculate the residual between the constraint function and the limits
191
+
192
+ For a linear constraint of the form::
193
+
194
+ lb <= A@x <= ub
195
+
196
+ the lower and upper residuals between ``A@x`` and the limits are values
197
+ ``sl`` and ``sb`` such that::
198
+
199
+ lb + sl == A@x == ub - sb
200
+
201
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
202
+ the constraint are satisfied; a negative element in ``sl`` or ``sb``
203
+ indicates that the corresponding element of the constraint is not
204
+ satisfied.
205
+
206
+ Parameters
207
+ ----------
208
+ x: array_like
209
+ Vector of independent variables
210
+
211
+ Returns
212
+ -------
213
+ sl, sb : array-like
214
+ The lower and upper residuals
215
+ """
216
+ return self.A@x - self.lb, self.ub - self.A@x
217
+
218
+
219
+ class Bounds:
220
+ """Bounds constraint on the variables.
221
+
222
+ The constraint has the general inequality form::
223
+
224
+ lb <= x <= ub
225
+
226
+ It is possible to use equal bounds to represent an equality constraint or
227
+ infinite bounds to represent a one-sided constraint.
228
+
229
+ Parameters
230
+ ----------
231
+ lb, ub : dense array_like, optional
232
+ Lower and upper bounds on independent variables. `lb`, `ub`, and
233
+ `keep_feasible` must be the same shape or broadcastable.
234
+ Set components of `lb` and `ub` equal
235
+ to fix a variable. Use ``np.inf`` with an appropriate sign to disable
236
+ bounds on all or some variables. Note that you can mix constraints of
237
+ different types: interval, one-sided or equality, by setting different
238
+ components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
239
+ and ``ub = np.inf`` (no bounds).
240
+ keep_feasible : dense array_like of bool, optional
241
+ Whether to keep the constraint components feasible throughout
242
+ iterations. Must be broadcastable with `lb` and `ub`.
243
+ Default is False. Has no effect for equality constraints.
244
+ """
245
+ def _input_validation(self):
246
+ try:
247
+ res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible)
248
+ self.lb, self.ub, self.keep_feasible = res
249
+ except ValueError:
250
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
251
+ raise ValueError(message)
252
+
253
+ def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False):
254
+ if issparse(lb) or issparse(ub):
255
+ raise ValueError("Lower and upper bounds must be dense arrays.")
256
+ self.lb = np.atleast_1d(lb)
257
+ self.ub = np.atleast_1d(ub)
258
+
259
+ if issparse(keep_feasible):
260
+ raise ValueError("`keep_feasible` must be a dense array.")
261
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
262
+ self._input_validation()
263
+
264
+ def __repr__(self):
265
+ start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
266
+ if np.any(self.keep_feasible):
267
+ end = f", keep_feasible={self.keep_feasible!r})"
268
+ else:
269
+ end = ")"
270
+ return start + end
271
+
272
+ def residual(self, x):
273
+ """Calculate the residual (slack) between the input and the bounds
274
+
275
+ For a bound constraint of the form::
276
+
277
+ lb <= x <= ub
278
+
279
+ the lower and upper residuals between `x` and the bounds are values
280
+ ``sl`` and ``sb`` such that::
281
+
282
+ lb + sl == x == ub - sb
283
+
284
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
285
+ ``x`` lie within the bounds; a negative element in ``sl`` or ``sb``
286
+ indicates that the corresponding element of ``x`` is out of bounds.
287
+
288
+ Parameters
289
+ ----------
290
+ x: array_like
291
+ Vector of independent variables
292
+
293
+ Returns
294
+ -------
295
+ sl, sb : array-like
296
+ The lower and upper residuals
297
+ """
298
+ return x - self.lb, self.ub - x
299
+
300
+
301
+ class PreparedConstraint:
302
+ """Constraint prepared from a user defined constraint.
303
+
304
+ On creation it will check whether a constraint definition is valid and
305
+ the initial point is feasible. If created successfully, it will contain
306
+ the attributes listed below.
307
+
308
+ Parameters
309
+ ----------
310
+ constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
311
+ Constraint to check and prepare.
312
+ x0 : array_like
313
+ Initial vector of independent variables.
314
+ sparse_jacobian : bool or None, optional
315
+ If bool, then the Jacobian of the constraint will be converted
316
+ to the corresponded format if necessary. If None (default), such
317
+ conversion is not made.
318
+ finite_diff_bounds : 2-tuple, optional
319
+ Lower and upper bounds on the independent variables for the finite
320
+ difference approximation, if applicable. Defaults to no bounds.
321
+
322
+ Attributes
323
+ ----------
324
+ fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
325
+ Function defining the constraint wrapped by one of the convenience
326
+ classes.
327
+ bounds : 2-tuple
328
+ Contains lower and upper bounds for the constraints --- lb and ub.
329
+ These are converted to ndarray and have a size equal to the number of
330
+ the constraints.
331
+ keep_feasible : ndarray
332
+ Array indicating which components must be kept feasible with a size
333
+ equal to the number of the constraints.
334
+ """
335
+ def __init__(self, constraint, x0, sparse_jacobian=None,
336
+ finite_diff_bounds=(-np.inf, np.inf)):
337
+ if isinstance(constraint, NonlinearConstraint):
338
+ fun = VectorFunction(constraint.fun, x0,
339
+ constraint.jac, constraint.hess,
340
+ constraint.finite_diff_rel_step,
341
+ constraint.finite_diff_jac_sparsity,
342
+ finite_diff_bounds, sparse_jacobian)
343
+ elif isinstance(constraint, LinearConstraint):
344
+ fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
345
+ elif isinstance(constraint, Bounds):
346
+ fun = IdentityVectorFunction(x0, sparse_jacobian)
347
+ else:
348
+ raise ValueError("`constraint` of an unknown type is passed.")
349
+
350
+ m = fun.m
351
+
352
+ lb = np.asarray(constraint.lb, dtype=float)
353
+ ub = np.asarray(constraint.ub, dtype=float)
354
+ keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
355
+
356
+ lb = np.broadcast_to(lb, m)
357
+ ub = np.broadcast_to(ub, m)
358
+ keep_feasible = np.broadcast_to(keep_feasible, m)
359
+
360
+ if keep_feasible.shape != (m,):
361
+ raise ValueError("`keep_feasible` has a wrong shape.")
362
+
363
+ mask = keep_feasible & (lb != ub)
364
+ f0 = fun.f
365
+ if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
366
+ raise ValueError("`x0` is infeasible with respect to some "
367
+ "inequality constraint with `keep_feasible` "
368
+ "set to True.")
369
+
370
+ self.fun = fun
371
+ self.bounds = (lb, ub)
372
+ self.keep_feasible = keep_feasible
373
+
374
+ def violation(self, x):
375
+ """How much the constraint is exceeded by.
376
+
377
+ Parameters
378
+ ----------
379
+ x : array-like
380
+ Vector of independent variables
381
+
382
+ Returns
383
+ -------
384
+ excess : array-like
385
+ How much the constraint is exceeded by, for each of the
386
+ constraints specified by `PreparedConstraint.fun`.
387
+ """
388
+ with catch_warnings():
389
+ # Ignore the following warning, it's not important when
390
+ # figuring out total violation
391
+ # UserWarning: delta_grad == 0.0. Check if the approximated
392
+ # function is linear
393
+ filterwarnings("ignore", "delta_grad", UserWarning)
394
+ ev = self.fun.fun(np.asarray(x))
395
+
396
+ excess_lb = np.maximum(self.bounds[0] - ev, 0)
397
+ excess_ub = np.maximum(ev - self.bounds[1], 0)
398
+
399
+ return excess_lb + excess_ub
400
+
401
+
402
+ def new_bounds_to_old(lb, ub, n):
403
+ """Convert the new bounds representation to the old one.
404
+
405
+ The new representation is a tuple (lb, ub) and the old one is a list
406
+ containing n tuples, ith containing lower and upper bound on a ith
407
+ variable.
408
+ If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
409
+ None.
410
+ """
411
+ lb = np.broadcast_to(lb, n)
412
+ ub = np.broadcast_to(ub, n)
413
+
414
+ lb = [float(x) if x > -np.inf else None for x in lb]
415
+ ub = [float(x) if x < np.inf else None for x in ub]
416
+
417
+ return list(zip(lb, ub))
418
+
419
+
420
+ def old_bound_to_new(bounds):
421
+ """Convert the old bounds representation to the new one.
422
+
423
+ The new representation is a tuple (lb, ub) and the old one is a list
424
+ containing n tuples, ith containing lower and upper bound on a ith
425
+ variable.
426
+ If any of the entries in lb/ub are None they are replaced by
427
+ -np.inf/np.inf.
428
+ """
429
+ lb, ub = zip(*bounds)
430
+
431
+ # Convert occurrences of None to -inf or inf, and replace occurrences of
432
+ # any numpy array x with x.item(). Then wrap the results in numpy arrays.
433
+ lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
434
+ for x in lb])
435
+ ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
436
+ for x in ub])
437
+
438
+ return lb, ub
439
+
440
+
441
+ def strict_bounds(lb, ub, keep_feasible, n_vars):
442
+ """Remove bounds which are not asked to be kept feasible."""
443
+ strict_lb = np.resize(lb, n_vars).astype(float)
444
+ strict_ub = np.resize(ub, n_vars).astype(float)
445
+ keep_feasible = np.resize(keep_feasible, n_vars)
446
+ strict_lb[~keep_feasible] = -np.inf
447
+ strict_ub[~keep_feasible] = np.inf
448
+ return strict_lb, strict_ub
449
+
450
+
451
+ def new_constraint_to_old(con, x0):
452
+ """
453
+ Converts new-style constraint objects to old-style constraint dictionaries.
454
+ """
455
+ if isinstance(con, NonlinearConstraint):
456
+ if (con.finite_diff_jac_sparsity is not None or
457
+ con.finite_diff_rel_step is not None or
458
+ not isinstance(con.hess, BFGS) or # misses user specified BFGS
459
+ con.keep_feasible):
460
+ warn("Constraint options `finite_diff_jac_sparsity`, "
461
+ "`finite_diff_rel_step`, `keep_feasible`, and `hess`"
462
+ "are ignored by this method.",
463
+ OptimizeWarning, stacklevel=3)
464
+
465
+ fun = con.fun
466
+ if callable(con.jac):
467
+ jac = con.jac
468
+ else:
469
+ jac = None
470
+
471
+ else: # LinearConstraint
472
+ if np.any(con.keep_feasible):
473
+ warn("Constraint option `keep_feasible` is ignored by this method.",
474
+ OptimizeWarning, stacklevel=3)
475
+
476
+ A = con.A
477
+ if issparse(A):
478
+ A = A.toarray()
479
+ def fun(x):
480
+ return np.dot(A, x)
481
+ def jac(x):
482
+ return A
483
+
484
+ # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
485
+ # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
486
+ pcon = PreparedConstraint(con, x0)
487
+ lb, ub = pcon.bounds
488
+
489
+ i_eq = lb == ub
490
+ i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
491
+ i_bound_above = np.logical_xor(ub != np.inf, i_eq)
492
+ i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
493
+
494
+ if np.any(i_unbounded):
495
+ warn("At least one constraint is unbounded above and below. Such "
496
+ "constraints are ignored.",
497
+ OptimizeWarning, stacklevel=3)
498
+
499
+ ceq = []
500
+ if np.any(i_eq):
501
+ def f_eq(x):
502
+ y = np.array(fun(x)).flatten()
503
+ return y[i_eq] - lb[i_eq]
504
+ ceq = [{"type": "eq", "fun": f_eq}]
505
+
506
+ if jac is not None:
507
+ def j_eq(x):
508
+ dy = jac(x)
509
+ if issparse(dy):
510
+ dy = dy.toarray()
511
+ dy = np.atleast_2d(dy)
512
+ return dy[i_eq, :]
513
+ ceq[0]["jac"] = j_eq
514
+
515
+ cineq = []
516
+ n_bound_below = np.sum(i_bound_below)
517
+ n_bound_above = np.sum(i_bound_above)
518
+ if n_bound_below + n_bound_above:
519
+ def f_ineq(x):
520
+ y = np.zeros(n_bound_below + n_bound_above)
521
+ y_all = np.array(fun(x)).flatten()
522
+ y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
523
+ y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
524
+ return y
525
+ cineq = [{"type": "ineq", "fun": f_ineq}]
526
+
527
+ if jac is not None:
528
+ def j_ineq(x):
529
+ dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
530
+ dy_all = jac(x)
531
+ if issparse(dy_all):
532
+ dy_all = dy_all.toarray()
533
+ dy_all = np.atleast_2d(dy_all)
534
+ dy[:n_bound_below, :] = dy_all[i_bound_below]
535
+ dy[n_bound_below:, :] = -dy_all[i_bound_above]
536
+ return dy
537
+ cineq[0]["jac"] = j_ineq
538
+
539
+ old_constraints = ceq + cineq
540
+
541
+ if len(old_constraints) > 1:
542
+ warn("Equality and inequality constraints are specified in the same "
543
+ "element of the constraint list. For efficient use with this "
544
+ "method, equality and inequality constraints should be specified "
545
+ "in separate elements of the constraint list. ",
546
+ OptimizeWarning, stacklevel=3)
547
+ return old_constraints
548
+
549
+
550
+ def old_constraint_to_new(ic, con):
551
+ """
552
+ Converts old-style constraint dictionaries to new-style constraint objects.
553
+ """
554
+ # check type
555
+ try:
556
+ ctype = con['type'].lower()
557
+ except KeyError as e:
558
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
559
+ except TypeError as e:
560
+ raise TypeError(
561
+ 'Constraints must be a sequence of dictionaries.'
562
+ ) from e
563
+ except AttributeError as e:
564
+ raise TypeError("Constraint's type must be a string.") from e
565
+ else:
566
+ if ctype not in ['eq', 'ineq']:
567
+ raise ValueError("Unknown constraint type '%s'." % con['type'])
568
+ if 'fun' not in con:
569
+ raise ValueError('Constraint %d has no function defined.' % ic)
570
+
571
+ lb = 0
572
+ if ctype == 'eq':
573
+ ub = 0
574
+ else:
575
+ ub = np.inf
576
+
577
+ jac = '2-point'
578
+ if 'args' in con:
579
+ args = con['args']
580
+ def fun(x):
581
+ return con["fun"](x, *args)
582
+ if 'jac' in con:
583
+ def jac(x):
584
+ return con["jac"](x, *args)
585
+ else:
586
+ fun = con['fun']
587
+ if 'jac' in con:
588
+ jac = con['jac']
589
+
590
+ return NonlinearConstraint(fun, lb, ub, jac)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiate.py ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ _EERRORINCREASE = -1 # used in _differentiate
7
+
8
+ def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
9
+ step_factor, step_direction, preserve_shape, callback):
10
+ # Input validation for `_differentiate`
11
+
12
+ if not callable(func):
13
+ raise ValueError('`func` must be callable.')
14
+
15
+ # x has more complex IV that is taken care of during initialization
16
+ x = np.asarray(x)
17
+ dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64
18
+
19
+ if not np.iterable(args):
20
+ args = (args,)
21
+
22
+ if atol is None:
23
+ atol = np.finfo(dtype).tiny
24
+
25
+ if rtol is None:
26
+ rtol = np.sqrt(np.finfo(dtype).eps)
27
+
28
+ message = 'Tolerances and step parameters must be non-negative scalars.'
29
+ tols = np.asarray([atol, rtol, initial_step, step_factor])
30
+ if (not np.issubdtype(tols.dtype, np.number)
31
+ or np.any(tols < 0)
32
+ or tols.shape != (4,)):
33
+ raise ValueError(message)
34
+ initial_step, step_factor = tols[2:].astype(dtype)
35
+
36
+ maxiter_int = int(maxiter)
37
+ if maxiter != maxiter_int or maxiter <= 0:
38
+ raise ValueError('`maxiter` must be a positive integer.')
39
+
40
+ order_int = int(order)
41
+ if order_int != order or order <= 0:
42
+ raise ValueError('`order` must be a positive integer.')
43
+
44
+ step_direction = np.sign(step_direction).astype(dtype)
45
+ x, step_direction = np.broadcast_arrays(x, step_direction)
46
+ x, step_direction = x[()], step_direction[()]
47
+
48
+ message = '`preserve_shape` must be True or False.'
49
+ if preserve_shape not in {True, False}:
50
+ raise ValueError(message)
51
+
52
+ if callback is not None and not callable(callback):
53
+ raise ValueError('`callback` must be callable.')
54
+
55
+ return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step,
56
+ step_factor, step_direction, preserve_shape, callback)
57
+
58
+
59
+ def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10,
60
+ order=8, initial_step=0.5, step_factor=2.0,
61
+ step_direction=0, preserve_shape=False, callback=None):
62
+ """Evaluate the derivative of an elementwise scalar function numerically.
63
+
64
+ Parameters
65
+ ----------
66
+ func : callable
67
+ The function whose derivative is desired. The signature must be::
68
+
69
+ func(x: ndarray, *fargs) -> ndarray
70
+
71
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
72
+ which may contain an arbitrary number of arrays that are broadcastable
73
+ with `x`. ``func`` must be an elementwise function: each element
74
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
75
+ x : array_like
76
+ Abscissae at which to evaluate the derivative.
77
+ args : tuple, optional
78
+ Additional positional arguments to be passed to `func`. Must be arrays
79
+ broadcastable with `x`. If the callable to be differentiated requires
80
+ arguments that are not broadcastable with `x`, wrap that callable with
81
+ `func`. See Examples.
82
+ atol, rtol : float, optional
83
+ Absolute and relative tolerances for the stopping condition: iteration
84
+ will stop when ``res.error < atol + rtol * abs(res.df)``. The default
85
+ `atol` is the smallest normal number of the appropriate dtype, and
86
+ the default `rtol` is the square root of the precision of the
87
+ appropriate dtype.
88
+ order : int, default: 8
89
+ The (positive integer) order of the finite difference formula to be
90
+ used. Odd integers will be rounded up to the next even integer.
91
+ initial_step : float, default: 0.5
92
+ The (absolute) initial step size for the finite difference derivative
93
+ approximation.
94
+ step_factor : float, default: 2.0
95
+ The factor by which the step size is *reduced* in each iteration; i.e.
96
+ the step size in iteration 1 is ``initial_step/step_factor``. If
97
+ ``step_factor < 1``, subsequent steps will be greater than the initial
98
+ step; this may be useful if steps smaller than some threshold are
99
+ undesirable (e.g. due to subtractive cancellation error).
100
+ maxiter : int, default: 10
101
+ The maximum number of iterations of the algorithm to perform. See
102
+ notes.
103
+ step_direction : array_like
104
+ An array representing the direction of the finite difference steps (for
105
+ use when `x` lies near to the boundary of the domain of the function.)
106
+ Must be broadcastable with `x` and all `args`.
107
+ Where 0 (default), central differences are used; where negative (e.g.
108
+ -1), steps are non-positive; and where positive (e.g. 1), all steps are
109
+ non-negative.
110
+ preserve_shape : bool, default: False
111
+ In the following, "arguments of `func`" refers to the array ``x`` and
112
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
113
+ of `x` and all elements of `args` (which is conceptually
114
+ distinct from ``fargs`` passed into `f`).
115
+
116
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
117
+ of *any* broadcastable shapes.
118
+
119
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
120
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
121
+ abscissae at which the function is being evaluated.
122
+
123
+ In either case, for each scalar element ``xi`` within `x`, the array
124
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
125
+ Consequently, the shape of the output is always the shape of the input
126
+ ``x``.
127
+
128
+ See Examples.
129
+ callback : callable, optional
130
+ An optional user-supplied function to be called before the first
131
+ iteration and after each iteration.
132
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
133
+ similar to that returned by `_differentiate` (but containing the
134
+ current iterate's values of all variables). If `callback` raises a
135
+ ``StopIteration``, the algorithm will terminate immediately and
136
+ `_differentiate` will return a result.
137
+
138
+ Returns
139
+ -------
140
+ res : _RichResult
141
+ An instance of `scipy._lib._util._RichResult` with the following
142
+ attributes. (The descriptions are written as though the values will be
143
+ scalars; however, if `func` returns an array, the outputs will be
144
+ arrays of the same shape.)
145
+
146
+ success : bool
147
+ ``True`` when the algorithm terminated successfully (status ``0``).
148
+ status : int
149
+ An integer representing the exit status of the algorithm.
150
+ ``0`` : The algorithm converged to the specified tolerances.
151
+ ``-1`` : The error estimate increased, so iteration was terminated.
152
+ ``-2`` : The maximum number of iterations was reached.
153
+ ``-3`` : A non-finite value was encountered.
154
+ ``-4`` : Iteration was terminated by `callback`.
155
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
156
+ df : float
157
+ The derivative of `func` at `x`, if the algorithm terminated
158
+ successfully.
159
+ error : float
160
+ An estimate of the error: the magnitude of the difference between
161
+ the current estimate of the derivative and the estimate in the
162
+ previous iteration.
163
+ nit : int
164
+ The number of iterations performed.
165
+ nfev : int
166
+ The number of points at which `func` was evaluated.
167
+ x : float
168
+ The value at which the derivative of `func` was evaluated
169
+ (after broadcasting with `args` and `step_direction`).
170
+
171
+ Notes
172
+ -----
173
+ The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
174
+ DERIVEST [3]_, but the implementation follows the theory of Taylor series
175
+ more straightforwardly (and arguably naively so).
176
+ In the first iteration, the derivative is estimated using a finite
177
+ difference formula of order `order` with maximum step size `initial_step`.
178
+ Each subsequent iteration, the maximum step size is reduced by
179
+ `step_factor`, and the derivative is estimated again until a termination
180
+ condition is reached. The error estimate is the magnitude of the difference
181
+ between the current derivative approximation and that of the previous
182
+ iteration.
183
+
184
+ The stencils of the finite difference formulae are designed such that
185
+ abscissae are "nested": after `func` is evaluated at ``order + 1``
186
+ points in the first iteration, `func` is evaluated at only two new points
187
+ in each subsequent iteration; ``order - 1`` previously evaluated function
188
+ values required by the finite difference formula are reused, and two
189
+ function values (evaluations at the points furthest from `x`) are unused.
190
+
191
+ Step sizes are absolute. When the step size is small relative to the
192
+ magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
193
+ default initial step size of ``0.5`` cannot be resolved. Accordingly,
194
+ consider using larger initial step sizes for large magnitudes of `x`.
195
+
196
+ The default tolerances are challenging to satisfy at points where the
197
+ true derivative is exactly zero. If the derivative may be exactly zero,
198
+ consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to
199
+ improve convergence.
200
+
201
+ References
202
+ ----------
203
+ [1]_ Hans Dembinski (@HDembinski). jacobi.
204
+ https://github.com/HDembinski/jacobi
205
+ [2]_ Per A. Brodtkorb and John D'Errico. numdifftools.
206
+ https://numdifftools.readthedocs.io/en/latest/
207
+ [3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
208
+ https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
209
+ [4]_ Numerical Differentition. Wikipedia.
210
+ https://en.wikipedia.org/wiki/Numerical_differentiation
211
+
212
+ Examples
213
+ --------
214
+ Evaluate the derivative of ``np.exp`` at several points ``x``.
215
+
216
+ >>> import numpy as np
217
+ >>> from scipy.optimize._differentiate import _differentiate
218
+ >>> f = np.exp
219
+ >>> df = np.exp # true derivative
220
+ >>> x = np.linspace(1, 2, 5)
221
+ >>> res = _differentiate(f, x)
222
+ >>> res.df # approximation of the derivative
223
+ array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
224
+ >>> res.error # estimate of the error
225
+ array(
226
+ [7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11]
227
+ )
228
+ >>> abs(res.df - df(x)) # true error
229
+ array(
230
+ [3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14]
231
+ )
232
+
233
+ Show the convergence of the approximation as the step size is reduced.
234
+ Each iteration, the step size is reduced by `step_factor`, so for
235
+ sufficiently small initial step, each iteration reduces the error by a
236
+ factor of ``1/step_factor**order`` until finite precision arithmetic
237
+ inhibits further improvement.
238
+
239
+ >>> iter = list(range(1, 12)) # maximum iterations
240
+ >>> hfac = 2 # step size reduction per iteration
241
+ >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
242
+ >>> order = 4 # order of differentiation formula
243
+ >>> x = 1
244
+ >>> ref = df(x)
245
+ >>> errors = [] # true error
246
+ >>> for i in iter:
247
+ ... res = _differentiate(f, x, maxiter=i, step_factor=hfac,
248
+ ... step_direction=hdir, order=order,
249
+ ... atol=0, rtol=0) # prevent early termination
250
+ ... errors.append(abs(res.df - ref))
251
+ >>> errors = np.array(errors)
252
+ >>> plt.semilogy(iter, errors[:, 0], label='left differences')
253
+ >>> plt.semilogy(iter, errors[:, 1], label='central differences')
254
+ >>> plt.semilogy(iter, errors[:, 2], label='right differences')
255
+ >>> plt.xlabel('iteration')
256
+ >>> plt.ylabel('error')
257
+ >>> plt.legend()
258
+ >>> plt.show()
259
+ >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
260
+ (0.06215223140159822, 0.0625)
261
+
262
+ The implementation is vectorized over `x`, `step_direction`, and `args`.
263
+ The function is evaluated once before the first iteration to perform input
264
+ validation and standardization, and once per iteration thereafter.
265
+
266
+ >>> def f(x, p):
267
+ ... print('here')
268
+ ... f.nit += 1
269
+ ... return x**p
270
+ >>> f.nit = 0
271
+ >>> def df(x, p):
272
+ ... return p*x**(p-1)
273
+ >>> x = np.arange(1, 5)
274
+ >>> p = np.arange(1, 6).reshape((-1, 1))
275
+ >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
276
+ >>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1)
277
+ >>> np.allclose(res.df, df(x, p))
278
+ True
279
+ >>> res.df.shape
280
+ (3, 5, 4)
281
+ >>> f.nit
282
+ 2
283
+
284
+ By default, `preserve_shape` is False, and therefore the callable
285
+ `f` may be called with arrays of any broadcastable shapes.
286
+ For example:
287
+
288
+ >>> shapes = []
289
+ >>> def f(x, c):
290
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
291
+ ... shapes.append(shape)
292
+ ... return np.sin(c*x)
293
+ >>>
294
+ >>> c = [1, 5, 10, 20]
295
+ >>> res = _differentiate(f, 0, args=(c,))
296
+ >>> shapes
297
+ [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
298
+
299
+ To understand where these shapes are coming from - and to better
300
+ understand how `_differentiate` computes accurate results - note that
301
+ higher values of ``c`` correspond with higher frequency sinusoids.
302
+ The higher frequency sinusoids make the function's derivative change
303
+ faster, so more function evaluations are required to achieve the target
304
+ accuracy:
305
+
306
+ >>> res.nfev
307
+ array([11, 13, 15, 17])
308
+
309
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
310
+ function at a single abscissa and all four frequencies; this is used
311
+ for input validation and to determine the size and dtype of the arrays
312
+ that store results. The next shape corresponds with evaluating the
313
+ function at an initial grid of abscissae and all four frequencies.
314
+ Successive calls to the function evaluate the function at two more
315
+ abscissae, increasing the effective order of the approximation by two.
316
+ However, in later function evaluations, the function is evaluated at
317
+ fewer frequencies because the corresponding derivative has already
318
+ converged to the required tolerance. This saves function evaluations to
319
+ improve performance, but it requires the function to accept arguments of
320
+ any shape.
321
+
322
+ "Vector-valued" functions are unlikely to satisfy this requirement.
323
+ For example, consider
324
+
325
+ >>> def f(x):
326
+ ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
327
+
328
+ This integrand is not compatible with `_differentiate` as written; for instance,
329
+ the shape of the output will not be the same as the shape of ``x``. Such a
330
+ function *could* be converted to a compatible form with the introduction of
331
+ additional parameters, but this would be inconvenient. In such cases,
332
+ a simpler solution would be to use `preserve_shape`.
333
+
334
+ >>> shapes = []
335
+ >>> def f(x):
336
+ ... shapes.append(x.shape)
337
+ ... x0, x1, x2, x3 = x
338
+ ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
339
+ >>>
340
+ >>> x = np.zeros(4)
341
+ >>> res = _differentiate(f, x, preserve_shape=True)
342
+ >>> shapes
343
+ [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
344
+
345
+ Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
346
+ function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
347
+ and this is what we observe.
348
+
349
+ """
350
+ # TODO (followup):
351
+ # - investigate behavior at saddle points
352
+ # - array initial_step / step_factor?
353
+ # - multivariate functions?
354
+
355
+ res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
356
+ step_factor, step_direction, preserve_shape, callback)
357
+ (func, x, args, atol, rtol, maxiter, order,
358
+ h0, fac, hdir, preserve_shape, callback) = res
359
+
360
+ # Initialization
361
+ # Since f(x) (no step) is not needed for central differences, it may be
362
+ # possible to eliminate this function evaluation. However, it's useful for
363
+ # input validation and standardization, and everything else is designed to
364
+ # reduce function calls, so let's keep it simple.
365
+ temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
366
+ func, xs, fs, args, shape, dtype = temp
367
+ x, f = xs[0], fs[0]
368
+ df = np.full_like(f, np.nan)
369
+ # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
370
+ # it's simpler to do it here than to generalize `_elementwise_algo_init` further.
371
+ # `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know
372
+ # that `hdir` can be broadcasted to the final shape.
373
+ hdir = np.broadcast_to(hdir, shape).flatten()
374
+
375
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
376
+ nit, nfev = 0, 1 # one function evaluations performed above
377
+ # Boolean indices of left, central, right, and (all) one-sided steps
378
+ il = hdir < 0
379
+ ic = hdir == 0
380
+ ir = hdir > 0
381
+ io = il | ir
382
+
383
+ # Most of these attributes are reasonably obvious, but:
384
+ # - `fs` holds all the function values of all active `x`. The zeroth
385
+ # axis corresponds with active points `x`, the first axis corresponds
386
+ # with the different steps (in the order described in
387
+ # `_differentiate_weights`).
388
+ # - `terms` (which could probably use a better name) is half the `order`,
389
+ # which is always even.
390
+ work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0,
391
+ df_last=np.nan, error_last=np.nan, h0=h0, fac=fac,
392
+ atol=atol, rtol=rtol, nit=nit, nfev=nfev,
393
+ status=status, dtype=dtype, terms=(order+1)//2,
394
+ hdir=hdir, il=il, ic=ic, ir=ir, io=io)
395
+ # This is the correspondence between terms in the `work` object and the
396
+ # final result. In this case, the mapping is trivial. Note that `success`
397
+ # is prepended automatically.
398
+ res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
399
+ ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
400
+
401
+ def pre_func_eval(work):
402
+ """Determine the abscissae at which the function needs to be evaluated.
403
+
404
+ See `_differentiate_weights` for a description of the stencil (pattern
405
+ of the abscissae).
406
+
407
+ In the first iteration, there is only one stored function value in
408
+ `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
409
+ subsequent iterations, we evaluate at two new points. Note that
410
+ `work.x` is always flattened into a 1D array after broadcasting with
411
+ all `args`, so we add a new axis at the end and evaluate all point
412
+ in one call to the function.
413
+
414
+ For improvement:
415
+ - Consider measuring the step size actually taken, since `(x + h) - x`
416
+ is not identically equal to `h` with floating point arithmetic.
417
+ - Adjust the step size automatically if `x` is too big to resolve the
418
+ step.
419
+ - We could probably save some work if there are no central difference
420
+ steps or no one-sided steps.
421
+ """
422
+ n = work.terms # half the order
423
+ h = work.h # step size
424
+ c = work.fac # step reduction factor
425
+ d = c**0.5 # square root of step reduction factor (one-sided stencil)
426
+ # Note - no need to be careful about dtypes until we allocate `x_eval`
427
+
428
+ if work.nit == 0:
429
+ hc = h / c**np.arange(n)
430
+ hc = np.concatenate((-hc[::-1], hc))
431
+ else:
432
+ hc = np.asarray([-h, h]) / c**(n-1)
433
+
434
+ if work.nit == 0:
435
+ hr = h / d**np.arange(2*n)
436
+ else:
437
+ hr = np.asarray([h, h/d]) / c**(n-1)
438
+
439
+ n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
440
+ x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype)
441
+ il, ic, ir = work.il, work.ic, work.ir
442
+ x_eval[ir] = work.x[ir, np.newaxis] + hr
443
+ x_eval[ic] = work.x[ic, np.newaxis] + hc
444
+ x_eval[il] = work.x[il, np.newaxis] - hr
445
+ return x_eval
446
+
447
+ def post_func_eval(x, f, work):
448
+ """ Estimate the derivative and error from the function evaluations
449
+
450
+ As in `pre_func_eval`: in the first iteration, there is only one stored
451
+ function value in `work.fs`, `f(x)`, so we need to add the `order` new
452
+ points. In subsequent iterations, we add two new points. The tricky
453
+ part is getting the order to match that of the weights, which is
454
+ described in `_differentiate_weights`.
455
+
456
+ For improvement:
457
+ - Change the order of the weights (and steps in `pre_func_eval`) to
458
+ simplify `work_fc` concatenation and eliminate `fc` concatenation.
459
+ - It would be simple to do one-step Richardson extrapolation with `df`
460
+ and `df_last` to increase the order of the estimate and/or improve
461
+ the error estimate.
462
+ - Process the function evaluations in a more numerically favorable
463
+ way. For instance, combining the pairs of central difference evals
464
+ into a second-order approximation and using Richardson extrapolation
465
+ to produce a higher order approximation seemed to retain accuracy up
466
+ to very high order.
467
+ - Alternatively, we could use `polyfit` like Jacobi. An advantage of
468
+ fitting polynomial to more points than necessary is improved noise
469
+ tolerance.
470
+ """
471
+ n = work.terms
472
+ n_new = n if work.nit == 0 else 1
473
+ il, ic, io = work.il, work.ic, work.io
474
+
475
+ # Central difference
476
+ # `work_fc` is *all* the points at which the function has been evaluated
477
+ # `fc` is the points we're using *this iteration* to produce the estimate
478
+ work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:])
479
+ work_fc = np.concatenate(work_fc, axis=-1)
480
+ if work.nit == 0:
481
+ fc = work_fc
482
+ else:
483
+ fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
484
+ fc = np.concatenate(fc, axis=-1)
485
+
486
+ # One-sided difference
487
+ work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1)
488
+ if work.nit == 0:
489
+ fo = work_fo
490
+ else:
491
+ fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
492
+
493
+ work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new))
494
+ work.fs[ic] = work_fc
495
+ work.fs[io] = work_fo
496
+
497
+ wc, wo = _differentiate_weights(work, n)
498
+ work.df_last = work.df.copy()
499
+ work.df[ic] = fc @ wc / work.h
500
+ work.df[io] = fo @ wo / work.h
501
+ work.df[il] *= -1
502
+
503
+ work.h /= work.fac
504
+ work.error_last = work.error
505
+ # Simple error estimate - the difference in derivative estimates between
506
+ # this iteration and the last. This is typically conservative because if
507
+ # convergence has begin, the true error is much closer to the difference
508
+ # between the current estimate and the *next* error estimate. However,
509
+ # we could use Richarson extrapolation to produce an error estimate that
510
+ # is one order higher, and take the difference between that and
511
+ # `work.df` (which would just be constant factor that depends on `fac`.)
512
+ work.error = abs(work.df - work.df_last)
513
+
514
+ def check_termination(work):
515
+ """Terminate due to convergence, non-finite values, or error increase"""
516
+ stop = np.zeros_like(work.df).astype(bool)
517
+
518
+ i = work.error < work.atol + work.rtol*abs(work.df)
519
+ work.status[i] = eim._ECONVERGED
520
+ stop[i] = True
521
+
522
+ if work.nit > 0:
523
+ i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop)
524
+ work.df[i], work.status[i] = np.nan, eim._EVALUEERR
525
+ stop[i] = True
526
+
527
+ # With infinite precision, there is a step size below which
528
+ # all smaller step sizes will reduce the error. But in floating point
529
+ # arithmetic, catastrophic cancellation will begin to cause the error
530
+ # to increase again. This heuristic tries to avoid step sizes that are
531
+ # too small. There may be more theoretically sound approaches for
532
+ # detecting a step size that minimizes the total error, but this
533
+ # heuristic seems simple and effective.
534
+ i = (work.error > work.error_last*10) & ~stop
535
+ work.status[i] = _EERRORINCREASE
536
+ stop[i] = True
537
+
538
+ return stop
539
+
540
+ def post_termination_check(work):
541
+ return
542
+
543
+ def customize_result(res, shape):
544
+ return shape
545
+
546
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
547
+ pre_func_eval, post_func_eval, check_termination,
548
+ post_termination_check, customize_result, res_work_pairs,
549
+ preserve_shape)
550
+
551
+
552
+ def _differentiate_weights(work, n):
553
+ # This produces the weights of the finite difference formula for a given
554
+ # stencil. In experiments, use of a second-order central difference formula
555
+ # with Richardson extrapolation was more accurate numerically, but it was
556
+ # more complicated, and it would have become even more complicated when
557
+ # adding support for one-sided differences. However, now that all the
558
+ # function evaluation values are stored, they can be processed in whatever
559
+ # way is desired to produce the derivative estimate. We leave alternative
560
+ # approaches to future work. To be more self-contained, here is the theory
561
+ # for deriving the weights below.
562
+ #
563
+ # Recall that the Taylor expansion of a univariate, scalar-values function
564
+ # about a point `x` may be expressed as:
565
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
566
+ # Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
567
+ # f(x) = f(x)
568
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
569
+ # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
570
+ # We can solve for weights `wi` such that:
571
+ # w1*f(x) = w1*(f(x))
572
+ # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
573
+ # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
574
+ # = 0 + f'(x)*h + 0 + O(h**3)
575
+ # Then
576
+ # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
577
+ # is a finite difference derivative approximation with error O(h**2),
578
+ # and so it is said to be a "second-order" approximation. Under certain
579
+ # conditions (e.g. well-behaved function, `h` sufficiently small), the
580
+ # error in the approximation will decrease with h**2; that is, if `h` is
581
+ # reduced by a factor of 2, the error is reduced by a factor of 4.
582
+ #
583
+ # By default, we use eighth-order formulae. Our central-difference formula
584
+ # uses abscissae:
585
+ # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
586
+ # where `c` is the step factor. (Typically, the step factor is greater than
587
+ # one, so the outermost points - as written above - are actually closest to
588
+ # `x`.) This "stencil" is chosen so that each iteration, the step can be
589
+ # reduced by the factor `c`, and most of the function evaluations can be
590
+ # reused with the new step size. For example, in the next iteration, we
591
+ # will have:
592
+ # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
593
+ # We do not reuse `x-h` and `x+h` for the new derivative estimate.
594
+ # While this would increase the order of the formula and thus the
595
+ # theoretical convergence rate, it is also less stable numerically.
596
+ # (As noted above, there are other ways of processing the values that are
597
+ # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
598
+ # to simplify future development of this sort of improvement.)
599
+ #
600
+ # The (right) one-sided formula is produced similarly using abscissae
601
+ # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
602
+ # where `d` is the square root of `c`. (The left one-sided formula simply
603
+ # uses -h.) When the step size is reduced by factor `c = d**2`, we have
604
+ # abscissae:
605
+ # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
606
+ # `d` is chosen as the square root of `c` so that the rate of the step-size
607
+ # reduction is the same per iteration as in the central difference case.
608
+ # Note that because the central difference formulas are inherently of even
609
+ # order, for simplicity, we use only even-order formulas for one-sided
610
+ # differences, too.
611
+
612
+ # It's possible for the user to specify `fac` in, say, double precision but
613
+ # `x` and `args` in single precision. `fac` gets converted to single
614
+ # precision, but we should always use double precision for the intermediate
615
+ # calculations here to avoid additional error in the weights.
616
+ fac = work.fac.astype(np.float64)
617
+
618
+ # Note that if the user switches back to floating point precision with
619
+ # `x` and `args`, then `fac` will not necessarily equal the (lower
620
+ # precision) cached `_differentiate_weights.fac`, and the weights will
621
+ # need to be recalculated. This could be fixed, but it's late, and of
622
+ # low consequence.
623
+ if fac != _differentiate_weights.fac:
624
+ _differentiate_weights.central = []
625
+ _differentiate_weights.right = []
626
+ _differentiate_weights.fac = fac
627
+
628
+ if len(_differentiate_weights.central) != 2*n + 1:
629
+ # Central difference weights. Consider refactoring this; it could
630
+ # probably be more compact.
631
+ i = np.arange(-n, n + 1)
632
+ p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
633
+ s = np.sign(i)
634
+
635
+ h = s / fac ** p
636
+ A = np.vander(h, increasing=True).T
637
+ b = np.zeros(2*n + 1)
638
+ b[1] = 1
639
+ weights = np.linalg.solve(A, b)
640
+
641
+ # Enforce identities to improve accuracy
642
+ weights[n] = 0
643
+ for i in range(n):
644
+ weights[-i-1] = -weights[i]
645
+
646
+ # Cache the weights. We only need to calculate them once unless
647
+ # the step factor changes.
648
+ _differentiate_weights.central = weights
649
+
650
+ # One-sided difference weights. The left one-sided weights (with
651
+ # negative steps) are simply the negative of the right one-sided
652
+ # weights, so no need to compute them separately.
653
+ i = np.arange(2*n + 1)
654
+ p = i - 1.
655
+ s = np.sign(i)
656
+
657
+ h = s / np.sqrt(fac) ** p
658
+ A = np.vander(h, increasing=True).T
659
+ b = np.zeros(2 * n + 1)
660
+ b[1] = 1
661
+ weights = np.linalg.solve(A, b)
662
+
663
+ _differentiate_weights.right = weights
664
+
665
+ return (_differentiate_weights.central.astype(work.dtype, copy=False),
666
+ _differentiate_weights.right.astype(work.dtype, copy=False))
667
+ _differentiate_weights.central = []
668
+ _differentiate_weights.right = []
669
+ _differentiate_weights.fac = None
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (36.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+ from libcpp.string cimport string
5
+
6
+ cdef extern from "HConst.h" nogil:
7
+
8
+ const int HIGHS_CONST_I_INF "kHighsIInf"
9
+ const double HIGHS_CONST_INF "kHighsInf"
10
+ const double kHighsTiny
11
+ const double kHighsZero
12
+ const int kHighsThreadLimit
13
+
14
+ cdef enum HighsDebugLevel:
15
+ HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0
16
+ HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap"
17
+ HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly"
18
+ HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive"
19
+ HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone
20
+ HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive
21
+
22
+ ctypedef enum HighsModelStatus:
23
+ HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0
24
+ HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError"
25
+ HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError"
26
+ HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError"
27
+ HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError"
28
+ HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError"
29
+ HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty"
30
+ HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal"
31
+ HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible"
32
+ HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible"
33
+ HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded"
34
+ HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound"
35
+ HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget"
36
+ HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit"
37
+ HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit"
38
+ HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown"
39
+ HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET
40
+ HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN
41
+
42
+ cdef enum HighsBasisStatus:
43
+ HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables]
44
+ HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic
45
+ HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound
46
+ HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero
47
+ HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve
48
+
49
+ cdef enum SolverOption:
50
+ SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1
51
+ SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE"
52
+ SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM"
53
+
54
+ cdef enum PrimalDualStatus:
55
+ PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1
56
+ PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET
57
+ PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION"
58
+ PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN"
59
+ PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT"
60
+ PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT"
61
+ PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT
62
+
63
+ cdef enum HighsOptionType:
64
+ HighsOptionTypeBOOL "HighsOptionType::kBool" = 0
65
+ HighsOptionTypeINT "HighsOptionType::kInt"
66
+ HighsOptionTypeDOUBLE "HighsOptionType::kDouble"
67
+ HighsOptionTypeSTRING "HighsOptionType::kString"
68
+
69
+ # workaround for lack of enum class support in Cython < 3.x
70
+ # cdef enum class ObjSense(int):
71
+ # ObjSenseMINIMIZE "ObjSense::kMinimize" = 1
72
+ # ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1
73
+
74
+ cdef cppclass ObjSense:
75
+ pass
76
+
77
+ cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize"
78
+ cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize"
79
+
80
+ # cdef enum class MatrixFormat(int):
81
+ # MatrixFormatkColwise "MatrixFormat::kColwise" = 1
82
+ # MatrixFormatkRowwise "MatrixFormat::kRowwise"
83
+ # MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
84
+
85
+ cdef cppclass MatrixFormat:
86
+ pass
87
+
88
+ cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise"
89
+ cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise"
90
+ cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
91
+
92
+ # cdef enum class HighsVarType(int):
93
+ # kContinuous "HighsVarType::kContinuous"
94
+ # kInteger "HighsVarType::kInteger"
95
+ # kSemiContinuous "HighsVarType::kSemiContinuous"
96
+ # kSemiInteger "HighsVarType::kSemiInteger"
97
+ # kImplicitInteger "HighsVarType::kImplicitInteger"
98
+
99
+ cdef cppclass HighsVarType:
100
+ pass
101
+
102
+ cdef HighsVarType kContinuous "HighsVarType::kContinuous"
103
+ cdef HighsVarType kInteger "HighsVarType::kInteger"
104
+ cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous"
105
+ cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger"
106
+ cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger"
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+
8
+ from .HighsStatus cimport HighsStatus
9
+ from .HighsOptions cimport HighsOptions
10
+ from .HighsInfo cimport HighsInfo
11
+ from .HighsLp cimport (
12
+ HighsLp,
13
+ HighsSolution,
14
+ HighsBasis,
15
+ ObjSense,
16
+ )
17
+ from .HConst cimport HighsModelStatus
18
+
19
+ cdef extern from "Highs.h":
20
+ # From HiGHS/src/Highs.h
21
+ cdef cppclass Highs:
22
+ HighsStatus passHighsOptions(const HighsOptions& options)
23
+ HighsStatus passModel(const HighsLp& lp)
24
+ HighsStatus run()
25
+ HighsStatus setHighsLogfile(FILE* logfile)
26
+ HighsStatus setHighsOutput(FILE* output)
27
+ HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true)
28
+
29
+ # split up for cython below
30
+ #const HighsModelStatus& getModelStatus(const bool scaled_model = False) const
31
+ const HighsModelStatus & getModelStatus() const
32
+
33
+ const HighsInfo& getHighsInfo "getInfo" () const
34
+ string modelStatusToString(const HighsModelStatus model_status) const
35
+ #HighsStatus getHighsInfoValue(const string& info, int& value)
36
+ HighsStatus getHighsInfoValue(const string& info, double& value) const
37
+ const HighsOptions& getHighsOptions() const
38
+
39
+ const HighsLp& getLp() const
40
+
41
+ HighsStatus writeSolution(const string filename, const bool pretty) const
42
+
43
+ HighsStatus setBasis()
44
+ const HighsSolution& getSolution() const
45
+ const HighsBasis& getBasis() const
46
+
47
+ bool changeObjectiveSense(const ObjSense sense)
48
+
49
+ HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value)
50
+ HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value)
51
+ HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value)
52
+ HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value)
53
+
54
+ string primalDualStatusToString(const int primal_dual_status)
55
+
56
+ void resetGlobalScheduler(bool blocking)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+
4
+ cdef extern from "HighsIO.h" nogil:
5
+ # workaround for lack of enum class support in Cython < 3.x
6
+ # cdef enum class HighsLogType(int):
7
+ # kInfo "HighsLogType::kInfo" = 1
8
+ # kDetailed "HighsLogType::kDetailed"
9
+ # kVerbose "HighsLogType::kVerbose"
10
+ # kWarning "HighsLogType::kWarning"
11
+ # kError "HighsLogType::kError"
12
+
13
+ cdef cppclass HighsLogType:
14
+ pass
15
+
16
+ cdef HighsLogType kInfo "HighsLogType::kInfo"
17
+ cdef HighsLogType kDetailed "HighsLogType::kDetailed"
18
+ cdef HighsLogType kVerbose "HighsLogType::kVerbose"
19
+ cdef HighsLogType kWarning "HighsLogType::kWarning"
20
+ cdef HighsLogType kError "HighsLogType::kError"
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ cdef extern from "HighsInfo.h" nogil:
4
+ # From HiGHS/src/lp_data/HighsInfo.h
5
+ cdef cppclass HighsInfo:
6
+ # Inherited from HighsInfoStruct:
7
+ int mip_node_count
8
+ int simplex_iteration_count
9
+ int ipm_iteration_count
10
+ int crossover_iteration_count
11
+ int primal_solution_status
12
+ int dual_solution_status
13
+ int basis_validity
14
+ double objective_function_value
15
+ double mip_dual_bound
16
+ double mip_gap
17
+ int num_primal_infeasibilities
18
+ double max_primal_infeasibility
19
+ double sum_primal_infeasibilities
20
+ int num_dual_infeasibilities
21
+ double max_dual_infeasibility
22
+ double sum_dual_infeasibilities
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+ from libcpp.string cimport string
5
+ from libcpp.vector cimport vector
6
+
7
+ from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType
8
+ from .HighsSparseMatrix cimport HighsSparseMatrix
9
+
10
+
11
+ cdef extern from "HighsLp.h" nogil:
12
+ # From HiGHS/src/lp_data/HighsLp.h
13
+ cdef cppclass HighsLp:
14
+ int num_col_
15
+ int num_row_
16
+
17
+ vector[double] col_cost_
18
+ vector[double] col_lower_
19
+ vector[double] col_upper_
20
+ vector[double] row_lower_
21
+ vector[double] row_upper_
22
+
23
+ HighsSparseMatrix a_matrix_
24
+
25
+ ObjSense sense_
26
+ double offset_
27
+
28
+ string model_name_
29
+
30
+ vector[string] row_names_
31
+ vector[string] col_names_
32
+
33
+ vector[HighsVarType] integrality_
34
+
35
+ bool isMip() const
36
+
37
+ cdef cppclass HighsSolution:
38
+ vector[double] col_value
39
+ vector[double] col_dual
40
+ vector[double] row_value
41
+ vector[double] row_dual
42
+
43
+ cdef cppclass HighsBasis:
44
+ bool valid_
45
+ vector[HighsBasisStatus] col_status
46
+ vector[HighsBasisStatus] row_status
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from .HighsStatus cimport HighsStatus
4
+ from .HighsLp cimport HighsLp
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsLpUtils.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsLpUtils.h
9
+ HighsStatus assessLp(HighsLp& lp, const HighsOptions& options)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp.string cimport string
4
+
5
+ from .HConst cimport HighsModelStatus
6
+
7
+ cdef extern from "HighsModelUtils.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsModelUtils.h
9
+ string utilHighsModelStatusToString(const HighsModelStatus model_status)
10
+ string utilBasisStatusToString(const int primal_dual_status)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+ from libcpp.vector cimport vector
8
+
9
+ from .HConst cimport HighsOptionType
10
+
11
+ cdef extern from "HighsOptions.h" nogil:
12
+
13
+ cdef cppclass OptionRecord:
14
+ HighsOptionType type
15
+ string name
16
+ string description
17
+ bool advanced
18
+
19
+ cdef cppclass OptionRecordBool(OptionRecord):
20
+ bool* value
21
+ bool default_value
22
+
23
+ cdef cppclass OptionRecordInt(OptionRecord):
24
+ int* value
25
+ int lower_bound
26
+ int default_value
27
+ int upper_bound
28
+
29
+ cdef cppclass OptionRecordDouble(OptionRecord):
30
+ double* value
31
+ double lower_bound
32
+ double default_value
33
+ double upper_bound
34
+
35
+ cdef cppclass OptionRecordString(OptionRecord):
36
+ string* value
37
+ string default_value
38
+
39
+ cdef cppclass HighsOptions:
40
+ # From HighsOptionsStruct:
41
+
42
+ # Options read from the command line
43
+ string model_file
44
+ string presolve
45
+ string solver
46
+ string parallel
47
+ double time_limit
48
+ string options_file
49
+
50
+ # Options read from the file
51
+ double infinite_cost
52
+ double infinite_bound
53
+ double small_matrix_value
54
+ double large_matrix_value
55
+ double primal_feasibility_tolerance
56
+ double dual_feasibility_tolerance
57
+ double ipm_optimality_tolerance
58
+ double dual_objective_value_upper_bound
59
+ int highs_debug_level
60
+ int simplex_strategy
61
+ int simplex_scale_strategy
62
+ int simplex_crash_strategy
63
+ int simplex_dual_edge_weight_strategy
64
+ int simplex_primal_edge_weight_strategy
65
+ int simplex_iteration_limit
66
+ int simplex_update_limit
67
+ int ipm_iteration_limit
68
+ int highs_min_threads
69
+ int highs_max_threads
70
+ int message_level
71
+ string solution_file
72
+ bool write_solution_to_file
73
+ bool write_solution_pretty
74
+
75
+ # Advanced options
76
+ bool run_crossover
77
+ bool mps_parser_type_free
78
+ int keep_n_rows
79
+ int allowed_simplex_matrix_scale_factor
80
+ int allowed_simplex_cost_scale_factor
81
+ int simplex_dualise_strategy
82
+ int simplex_permute_strategy
83
+ int dual_simplex_cleanup_strategy
84
+ int simplex_price_strategy
85
+ int dual_chuzc_sort_strategy
86
+ bool simplex_initial_condition_check
87
+ double simplex_initial_condition_tolerance
88
+ double dual_steepest_edge_weight_log_error_threshhold
89
+ double dual_simplex_cost_perturbation_multiplier
90
+ double start_crossover_tolerance
91
+ bool less_infeasible_DSE_check
92
+ bool less_infeasible_DSE_choose_row
93
+ bool use_original_HFactor_logic
94
+
95
+ # Options for MIP solver
96
+ int mip_max_nodes
97
+ int mip_report_level
98
+
99
+ # Switch for MIP solver
100
+ bool mip
101
+
102
+ # Options for HighsPrintMessage and HighsLogMessage
103
+ FILE* logfile
104
+ FILE* output
105
+ int message_level
106
+ string solution_file
107
+ bool write_solution_to_file
108
+ bool write_solution_pretty
109
+
110
+ vector[OptionRecord*] records
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsRuntimeOptions.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsRuntimeOptions.h
9
+ bool loadOptions(int argc, char** argv, HighsOptions& options)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp.string cimport string
4
+
5
+ cdef extern from "HighsStatus.h" nogil:
6
+ ctypedef enum HighsStatus:
7
+ HighsStatusError "HighsStatus::kError" = -1
8
+ HighsStatusOK "HighsStatus::kOk" = 0
9
+ HighsStatusWarning "HighsStatus::kWarning" = 1
10
+
11
+
12
+ string highsStatusToString(HighsStatus status)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ cdef extern from "SimplexConst.h" nogil:
6
+
7
+ cdef enum SimplexAlgorithm:
8
+ PRIMAL "SimplexAlgorithm::kPrimal" = 0
9
+ DUAL "SimplexAlgorithm::kDual"
10
+
11
+ cdef enum SimplexStrategy:
12
+ SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0
13
+ SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN
14
+ SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual"
15
+ SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL
16
+ SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks"
17
+ SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti"
18
+ SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal"
19
+ SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL
20
+ SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum"
21
+
22
+ cdef enum SimplexCrashStrategy:
23
+ SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0
24
+ SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN
25
+ SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK"
26
+ SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K
27
+ SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby"
28
+ SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri"
29
+ SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK"
30
+ SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri"
31
+ SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf"
32
+ SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts"
33
+ SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic"
34
+ SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing"
35
+ SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING
36
+
37
+ cdef enum SimplexEdgeWeightStrategy:
38
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1
39
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN
40
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig"
41
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex"
42
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge"
43
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial"
44
+ SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL
45
+
46
+ cdef enum SimplexPriceStrategy:
47
+ SIMPLEX_PRICE_STRATEGY_MIN = 0
48
+ SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN
49
+ SIMPLEX_PRICE_STRATEGY_ROW
50
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH
51
+ SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
52
+ SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
53
+
54
+ cdef enum SimplexDualChuzcStrategy:
55
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0
56
+ SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN
57
+ SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD
58
+ SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP
59
+ SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
60
+ SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
61
+
62
+ cdef enum InvertHint:
63
+ INVERT_HINT_NO = 0
64
+ INVERT_HINT_UPDATE_LIMIT_REACHED
65
+ INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT
66
+ INVERT_HINT_POSSIBLY_OPTIMAL
67
+ INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED
68
+ INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED
69
+ INVERT_HINT_POSSIBLY_SINGULAR_BASIS
70
+ INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX
71
+ INVERT_HINT_CHOOSE_COLUMN_FAIL
72
+ INVERT_HINT_Count
73
+
74
+ cdef enum DualEdgeWeightMode:
75
+ DANTZIG "DualEdgeWeightMode::DANTZIG" = 0
76
+ DEVEX "DualEdgeWeightMode::DEVEX"
77
+ STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE"
78
+ Count "DualEdgeWeightMode::Count"
79
+
80
+ cdef enum PriceMode:
81
+ ROW "PriceMode::ROW" = 0
82
+ COL "PriceMode::COL"
83
+
84
+ const int PARALLEL_THREADS_DEFAULT
85
+ const int DUAL_TASKS_MIN_THREADS
86
+ const int DUAL_MULTI_MIN_THREADS
87
+
88
+ const bool invert_if_row_out_negative
89
+
90
+ const int NONBASIC_FLAG_TRUE
91
+ const int NONBASIC_FLAG_FALSE
92
+
93
+ const int NONBASIC_MOVE_UP
94
+ const int NONBASIC_MOVE_DN
95
+ const int NONBASIC_MOVE_ZE
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ cdef extern from "highs_c_api.h" nogil:
4
+ int Highs_passLp(void* highs, int numcol, int numrow, int numnz,
5
+ double* colcost, double* collower, double* colupper,
6
+ double* rowlower, double* rowupper,
7
+ int* astart, int* aindex, double* avalue)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (125 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ fmin_l_bfgs_b
8
+
9
+ """
10
+
11
+ ## License for the Python wrapper
12
+ ## ==============================
13
+
14
+ ## Copyright (c) 2004 David M. Cooke <[email protected]>
15
+
16
+ ## Permission is hereby granted, free of charge, to any person obtaining a
17
+ ## copy of this software and associated documentation files (the "Software"),
18
+ ## to deal in the Software without restriction, including without limitation
19
+ ## the rights to use, copy, modify, merge, publish, distribute, sublicense,
20
+ ## and/or sell copies of the Software, and to permit persons to whom the
21
+ ## Software is furnished to do so, subject to the following conditions:
22
+
23
+ ## The above copyright notice and this permission notice shall be included in
24
+ ## all copies or substantial portions of the Software.
25
+
26
+ ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
+ ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
+ ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29
+ ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30
+ ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31
+ ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
32
+ ## DEALINGS IN THE SOFTWARE.
33
+
34
+ ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
35
+
36
+ import numpy as np
37
+ from numpy import array, asarray, float64, zeros
38
+ from . import _lbfgsb
39
+ from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt,
40
+ _wrap_callback, _check_unknown_options,
41
+ _prepare_scalar_function)
42
+ from ._constraints import old_bound_to_new
43
+
44
+ from scipy.sparse.linalg import LinearOperator
45
+
46
+ __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
47
+
48
+
49
+ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
50
+ approx_grad=0,
51
+ bounds=None, m=10, factr=1e7, pgtol=1e-5,
52
+ epsilon=1e-8,
53
+ iprint=-1, maxfun=15000, maxiter=15000, disp=None,
54
+ callback=None, maxls=20):
55
+ """
56
+ Minimize a function func using the L-BFGS-B algorithm.
57
+
58
+ Parameters
59
+ ----------
60
+ func : callable f(x,*args)
61
+ Function to minimize.
62
+ x0 : ndarray
63
+ Initial guess.
64
+ fprime : callable fprime(x,*args), optional
65
+ The gradient of `func`. If None, then `func` returns the function
66
+ value and the gradient (``f, g = func(x, *args)``), unless
67
+ `approx_grad` is True in which case `func` returns only ``f``.
68
+ args : sequence, optional
69
+ Arguments to pass to `func` and `fprime`.
70
+ approx_grad : bool, optional
71
+ Whether to approximate the gradient numerically (in which case
72
+ `func` returns only the function value).
73
+ bounds : list, optional
74
+ ``(min, max)`` pairs for each element in ``x``, defining
75
+ the bounds on that parameter. Use None or +-inf for one of ``min`` or
76
+ ``max`` when there is no bound in that direction.
77
+ m : int, optional
78
+ The maximum number of variable metric corrections
79
+ used to define the limited memory matrix. (The limited memory BFGS
80
+ method does not store the full hessian but uses this many terms in an
81
+ approximation to it.)
82
+ factr : float, optional
83
+ The iteration stops when
84
+ ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
85
+ where ``eps`` is the machine precision, which is automatically
86
+ generated by the code. Typical values for `factr` are: 1e12 for
87
+ low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
88
+ high accuracy. See Notes for relationship to `ftol`, which is exposed
89
+ (instead of `factr`) by the `scipy.optimize.minimize` interface to
90
+ L-BFGS-B.
91
+ pgtol : float, optional
92
+ The iteration will stop when
93
+ ``max{|proj g_i | i = 1, ..., n} <= pgtol``
94
+ where ``proj g_i`` is the i-th component of the projected gradient.
95
+ epsilon : float, optional
96
+ Step size used when `approx_grad` is True, for numerically
97
+ calculating the gradient
98
+ iprint : int, optional
99
+ Controls the frequency of output. ``iprint < 0`` means no output;
100
+ ``iprint = 0`` print only one line at the last iteration;
101
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
102
+ ``iprint = 99`` print details of every iteration except n-vectors;
103
+ ``iprint = 100`` print also the changes of active set and final x;
104
+ ``iprint > 100`` print details of every iteration including x and g.
105
+ disp : int, optional
106
+ If zero, then no output. If a positive number, then this over-rides
107
+ `iprint` (i.e., `iprint` gets the value of `disp`).
108
+ maxfun : int, optional
109
+ Maximum number of function evaluations. Note that this function
110
+ may violate the limit because of evaluating gradients by numerical
111
+ differentiation.
112
+ maxiter : int, optional
113
+ Maximum number of iterations.
114
+ callback : callable, optional
115
+ Called after each iteration, as ``callback(xk)``, where ``xk`` is the
116
+ current parameter vector.
117
+ maxls : int, optional
118
+ Maximum number of line search steps (per iteration). Default is 20.
119
+
120
+ Returns
121
+ -------
122
+ x : array_like
123
+ Estimated position of the minimum.
124
+ f : float
125
+ Value of `func` at the minimum.
126
+ d : dict
127
+ Information dictionary.
128
+
129
+ * d['warnflag'] is
130
+
131
+ - 0 if converged,
132
+ - 1 if too many function evaluations or too many iterations,
133
+ - 2 if stopped for another reason, given in d['task']
134
+
135
+ * d['grad'] is the gradient at the minimum (should be 0 ish)
136
+ * d['funcalls'] is the number of function calls made.
137
+ * d['nit'] is the number of iterations.
138
+
139
+ See also
140
+ --------
141
+ minimize: Interface to minimization algorithms for multivariate
142
+ functions. See the 'L-BFGS-B' `method` in particular. Note that the
143
+ `ftol` option is made available via that interface, while `factr` is
144
+ provided via this interface, where `factr` is the factor multiplying
145
+ the default machine floating-point precision to arrive at `ftol`:
146
+ ``ftol = factr * numpy.finfo(float).eps``.
147
+
148
+ Notes
149
+ -----
150
+ License of L-BFGS-B (FORTRAN code):
151
+
152
+ The version included here (in fortran code) is 3.0
153
+ (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
154
+ and Jorge Nocedal <[email protected]>. It carries the following
155
+ condition for use:
156
+
157
+ This software is freely available, but we expect that all publications
158
+ describing work using this software, or all commercial products using it,
159
+ quote at least one of the references given below. This software is released
160
+ under the BSD License.
161
+
162
+ References
163
+ ----------
164
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
165
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
166
+ Statistical Computing, 16, 5, pp. 1190-1208.
167
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
168
+ FORTRAN routines for large scale bound constrained optimization (1997),
169
+ ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
170
+ * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
171
+ FORTRAN routines for large scale bound constrained optimization (2011),
172
+ ACM Transactions on Mathematical Software, 38, 1.
173
+
174
+ Examples
175
+ --------
176
+ Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define
177
+ an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the
178
+ observations and `y_model` the prediction of the linear model as
179
+ ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily
180
+ chosen as ``(0,5)`` and ``(5,10)`` for this example.
181
+
182
+ >>> import numpy as np
183
+ >>> from scipy.optimize import fmin_l_bfgs_b
184
+ >>> X = np.arange(0, 10, 1)
185
+ >>> M = 2
186
+ >>> B = 3
187
+ >>> Y = M * X + B
188
+ >>> def func(parameters, *args):
189
+ ... x = args[0]
190
+ ... y = args[1]
191
+ ... m, b = parameters
192
+ ... y_model = m*x + b
193
+ ... error = sum(np.power((y - y_model), 2))
194
+ ... return error
195
+
196
+ >>> initial_values = np.array([0.0, 1.0])
197
+
198
+ >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
199
+ ... approx_grad=True)
200
+ >>> x_opt, f_opt
201
+ array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary
202
+
203
+ The optimized parameters in ``x_opt`` agree with the ground truth parameters
204
+ ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds`
205
+ parameter.
206
+
207
+ >>> bounds = [(0, 5), (5, 10)]
208
+ >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
209
+ ... approx_grad=True, bounds=bounds)
210
+ >>> x_opt, f_opt
211
+ array([1.65990508, 5.31649385]), 15.721334516453945 # may vary
212
+ """
213
+ # handle fprime/approx_grad
214
+ if approx_grad:
215
+ fun = func
216
+ jac = None
217
+ elif fprime is None:
218
+ fun = MemoizeJac(func)
219
+ jac = fun.derivative
220
+ else:
221
+ fun = func
222
+ jac = fprime
223
+
224
+ # build options
225
+ callback = _wrap_callback(callback)
226
+ opts = {'disp': disp,
227
+ 'iprint': iprint,
228
+ 'maxcor': m,
229
+ 'ftol': factr * np.finfo(float).eps,
230
+ 'gtol': pgtol,
231
+ 'eps': epsilon,
232
+ 'maxfun': maxfun,
233
+ 'maxiter': maxiter,
234
+ 'callback': callback,
235
+ 'maxls': maxls}
236
+
237
+ res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
238
+ **opts)
239
+ d = {'grad': res['jac'],
240
+ 'task': res['message'],
241
+ 'funcalls': res['nfev'],
242
+ 'nit': res['nit'],
243
+ 'warnflag': res['status']}
244
+ f = res['fun']
245
+ x = res['x']
246
+
247
+ return x, f, d
248
+
249
+
250
+ def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
251
+ disp=None, maxcor=10, ftol=2.2204460492503131e-09,
252
+ gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
253
+ iprint=-1, callback=None, maxls=20,
254
+ finite_diff_rel_step=None, **unknown_options):
255
+ """
256
+ Minimize a scalar function of one or more variables using the L-BFGS-B
257
+ algorithm.
258
+
259
+ Options
260
+ -------
261
+ disp : None or int
262
+ If `disp is None` (the default), then the supplied version of `iprint`
263
+ is used. If `disp is not None`, then it overrides the supplied version
264
+ of `iprint` with the behaviour you outlined.
265
+ maxcor : int
266
+ The maximum number of variable metric corrections used to
267
+ define the limited memory matrix. (The limited memory BFGS
268
+ method does not store the full hessian but uses this many terms
269
+ in an approximation to it.)
270
+ ftol : float
271
+ The iteration stops when ``(f^k -
272
+ f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
273
+ gtol : float
274
+ The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
275
+ <= gtol`` where ``proj g_i`` is the i-th component of the
276
+ projected gradient.
277
+ eps : float or ndarray
278
+ If `jac is None` the absolute step size used for numerical
279
+ approximation of the jacobian via forward differences.
280
+ maxfun : int
281
+ Maximum number of function evaluations. Note that this function
282
+ may violate the limit because of evaluating gradients by numerical
283
+ differentiation.
284
+ maxiter : int
285
+ Maximum number of iterations.
286
+ iprint : int, optional
287
+ Controls the frequency of output. ``iprint < 0`` means no output;
288
+ ``iprint = 0`` print only one line at the last iteration;
289
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
290
+ ``iprint = 99`` print details of every iteration except n-vectors;
291
+ ``iprint = 100`` print also the changes of active set and final x;
292
+ ``iprint > 100`` print details of every iteration including x and g.
293
+ maxls : int, optional
294
+ Maximum number of line search steps (per iteration). Default is 20.
295
+ finite_diff_rel_step : None or array_like, optional
296
+ If `jac in ['2-point', '3-point', 'cs']` the relative step size to
297
+ use for numerical approximation of the jacobian. The absolute step
298
+ size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
299
+ possibly adjusted to fit into the bounds. For ``method='3-point'``
300
+ the sign of `h` is ignored. If None (default) then step is selected
301
+ automatically.
302
+
303
+ Notes
304
+ -----
305
+ The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
306
+ but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
307
+ relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
308
+ I.e., `factr` multiplies the default machine floating-point precision to
309
+ arrive at `ftol`.
310
+
311
+ """
312
+ _check_unknown_options(unknown_options)
313
+ m = maxcor
314
+ pgtol = gtol
315
+ factr = ftol / np.finfo(float).eps
316
+
317
+ x0 = asarray(x0).ravel()
318
+ n, = x0.shape
319
+
320
+ # historically old-style bounds were/are expected by lbfgsb.
321
+ # That's still the case but we'll deal with new-style from here on,
322
+ # it's easier
323
+ if bounds is None:
324
+ pass
325
+ elif len(bounds) != n:
326
+ raise ValueError('length of x0 != length of bounds')
327
+ else:
328
+ bounds = np.array(old_bound_to_new(bounds))
329
+
330
+ # check bounds
331
+ if (bounds[0] > bounds[1]).any():
332
+ raise ValueError(
333
+ "LBFGSB - one of the lower bounds is greater than an upper bound."
334
+ )
335
+
336
+ # initial vector must lie within the bounds. Otherwise ScalarFunction and
337
+ # approx_derivative will cause problems
338
+ x0 = np.clip(x0, bounds[0], bounds[1])
339
+
340
+ if disp is not None:
341
+ if disp == 0:
342
+ iprint = -1
343
+ else:
344
+ iprint = disp
345
+
346
+ # _prepare_scalar_function can use bounds=None to represent no bounds
347
+ sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
348
+ bounds=bounds,
349
+ finite_diff_rel_step=finite_diff_rel_step)
350
+
351
+ func_and_grad = sf.fun_and_grad
352
+
353
+ fortran_int = _lbfgsb.types.intvar.dtype
354
+
355
+ nbd = zeros(n, fortran_int)
356
+ low_bnd = zeros(n, float64)
357
+ upper_bnd = zeros(n, float64)
358
+ bounds_map = {(-np.inf, np.inf): 0,
359
+ (1, np.inf): 1,
360
+ (1, 1): 2,
361
+ (-np.inf, 1): 3}
362
+
363
+ if bounds is not None:
364
+ for i in range(0, n):
365
+ l, u = bounds[0, i], bounds[1, i]
366
+ if not np.isinf(l):
367
+ low_bnd[i] = l
368
+ l = 1
369
+ if not np.isinf(u):
370
+ upper_bnd[i] = u
371
+ u = 1
372
+ nbd[i] = bounds_map[l, u]
373
+
374
+ if not maxls > 0:
375
+ raise ValueError('maxls must be positive.')
376
+
377
+ x = array(x0, float64)
378
+ f = array(0.0, float64)
379
+ g = zeros((n,), float64)
380
+ wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
381
+ iwa = zeros(3*n, fortran_int)
382
+ task = zeros(1, 'S60')
383
+ csave = zeros(1, 'S60')
384
+ lsave = zeros(4, fortran_int)
385
+ isave = zeros(44, fortran_int)
386
+ dsave = zeros(29, float64)
387
+
388
+ task[:] = 'START'
389
+
390
+ n_iterations = 0
391
+
392
+ while 1:
393
+ # g may become float32 if a user provides a function that calculates
394
+ # the Jacobian in float32 (see gh-18730). The underlying Fortran code
395
+ # expects float64, so upcast it
396
+ g = g.astype(np.float64)
397
+ # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
398
+ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
399
+ pgtol, wa, iwa, task, iprint, csave, lsave,
400
+ isave, dsave, maxls)
401
+ task_str = task.tobytes()
402
+ if task_str.startswith(b'FG'):
403
+ # The minimization routine wants f and g at the current x.
404
+ # Note that interruptions due to maxfun are postponed
405
+ # until the completion of the current minimization iteration.
406
+ # Overwrite f and g:
407
+ f, g = func_and_grad(x)
408
+ elif task_str.startswith(b'NEW_X'):
409
+ # new iteration
410
+ n_iterations += 1
411
+
412
+ intermediate_result = OptimizeResult(x=x, fun=f)
413
+ if _call_callback_maybe_halt(callback, intermediate_result):
414
+ task[:] = 'STOP: CALLBACK REQUESTED HALT'
415
+ if n_iterations >= maxiter:
416
+ task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
417
+ elif sf.nfev > maxfun:
418
+ task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
419
+ 'EXCEEDS LIMIT')
420
+ else:
421
+ break
422
+
423
+ task_str = task.tobytes().strip(b'\x00').strip()
424
+ if task_str.startswith(b'CONV'):
425
+ warnflag = 0
426
+ elif sf.nfev > maxfun or n_iterations >= maxiter:
427
+ warnflag = 1
428
+ else:
429
+ warnflag = 2
430
+
431
+ # These two portions of the workspace are described in the mainlb
432
+ # subroutine in lbfgsb.f. See line 363.
433
+ s = wa[0: m*n].reshape(m, n)
434
+ y = wa[m*n: 2*m*n].reshape(m, n)
435
+
436
+ # See lbfgsb.f line 160 for this portion of the workspace.
437
+ # isave(31) = the total number of BFGS updates prior the current iteration;
438
+ n_bfgs_updates = isave[30]
439
+
440
+ n_corrs = min(n_bfgs_updates, maxcor)
441
+ hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
442
+
443
+ task_str = task_str.decode()
444
+ return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
445
+ njev=sf.ngev,
446
+ nit=n_iterations, status=warnflag, message=task_str,
447
+ x=x, success=(warnflag == 0), hess_inv=hess_inv)
448
+
449
+
450
+ class LbfgsInvHessProduct(LinearOperator):
451
+ """Linear operator for the L-BFGS approximate inverse Hessian.
452
+
453
+ This operator computes the product of a vector with the approximate inverse
454
+ of the Hessian of the objective function, using the L-BFGS limited
455
+ memory approximation to the inverse Hessian, accumulated during the
456
+ optimization.
457
+
458
+ Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
459
+ interface.
460
+
461
+ Parameters
462
+ ----------
463
+ sk : array_like, shape=(n_corr, n)
464
+ Array of `n_corr` most recent updates to the solution vector.
465
+ (See [1]).
466
+ yk : array_like, shape=(n_corr, n)
467
+ Array of `n_corr` most recent updates to the gradient. (See [1]).
468
+
469
+ References
470
+ ----------
471
+ .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
472
+ storage." Mathematics of computation 35.151 (1980): 773-782.
473
+
474
+ """
475
+
476
+ def __init__(self, sk, yk):
477
+ """Construct the operator."""
478
+ if sk.shape != yk.shape or sk.ndim != 2:
479
+ raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
480
+ n_corrs, n = sk.shape
481
+
482
+ super().__init__(dtype=np.float64, shape=(n, n))
483
+
484
+ self.sk = sk
485
+ self.yk = yk
486
+ self.n_corrs = n_corrs
487
+ self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
488
+
489
+ def _matvec(self, x):
490
+ """Efficient matrix-vector multiply with the BFGS matrices.
491
+
492
+ This calculation is described in Section (4) of [1].
493
+
494
+ Parameters
495
+ ----------
496
+ x : ndarray
497
+ An array with shape (n,) or (n,1).
498
+
499
+ Returns
500
+ -------
501
+ y : ndarray
502
+ The matrix-vector product
503
+
504
+ """
505
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
506
+ q = np.array(x, dtype=self.dtype, copy=True)
507
+ if q.ndim == 2 and q.shape[1] == 1:
508
+ q = q.reshape(-1)
509
+
510
+ alpha = np.empty(n_corrs)
511
+
512
+ for i in range(n_corrs-1, -1, -1):
513
+ alpha[i] = rho[i] * np.dot(s[i], q)
514
+ q = q - alpha[i]*y[i]
515
+
516
+ r = q
517
+ for i in range(n_corrs):
518
+ beta = rho[i] * np.dot(y[i], r)
519
+ r = r + s[i] * (alpha[i] - beta)
520
+
521
+ return r
522
+
523
+ def todense(self):
524
+ """Return a dense array representation of this operator.
525
+
526
+ Returns
527
+ -------
528
+ arr : ndarray, shape=(n, n)
529
+ An array with the same shape and containing
530
+ the same data represented by this `LinearOperator`.
531
+
532
+ """
533
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
534
+ I = np.eye(*self.shape, dtype=self.dtype)
535
+ Hk = I
536
+
537
+ for i in range(n_corrs):
538
+ A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
539
+ A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
540
+
541
+ Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
542
+ s[i][np.newaxis, :])
543
+ return Hk
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linesearch.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ line_search_armijo
8
+ line_search_wolfe1
9
+ line_search_wolfe2
10
+ scalar_search_wolfe1
11
+ scalar_search_wolfe2
12
+
13
+ """
14
+ from warnings import warn
15
+
16
+ from scipy.optimize import _minpack2 as minpack2 # noqa: F401
17
+ from ._dcsrch import DCSRCH
18
+ import numpy as np
19
+
20
+ __all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
21
+ 'scalar_search_wolfe1', 'scalar_search_wolfe2',
22
+ 'line_search_armijo']
23
+
24
+ class LineSearchWarning(RuntimeWarning):
25
+ pass
26
+
27
+
28
+ def _check_c1_c2(c1, c2):
29
+ if not (0 < c1 < c2 < 1):
30
+ raise ValueError("'c1' and 'c2' do not satisfy"
31
+ "'0 < c1 < c2 < 1'.")
32
+
33
+
34
+ #------------------------------------------------------------------------------
35
+ # Minpack's Wolfe line and scalar searches
36
+ #------------------------------------------------------------------------------
37
+
38
+ def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
39
+ old_fval=None, old_old_fval=None,
40
+ args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
41
+ xtol=1e-14):
42
+ """
43
+ As `scalar_search_wolfe1` but do a line search to direction `pk`
44
+
45
+ Parameters
46
+ ----------
47
+ f : callable
48
+ Function `f(x)`
49
+ fprime : callable
50
+ Gradient of `f`
51
+ xk : array_like
52
+ Current point
53
+ pk : array_like
54
+ Search direction
55
+ gfk : array_like, optional
56
+ Gradient of `f` at point `xk`
57
+ old_fval : float, optional
58
+ Value of `f` at point `xk`
59
+ old_old_fval : float, optional
60
+ Value of `f` at point preceding `xk`
61
+
62
+ The rest of the parameters are the same as for `scalar_search_wolfe1`.
63
+
64
+ Returns
65
+ -------
66
+ stp, f_count, g_count, fval, old_fval
67
+ As in `line_search_wolfe1`
68
+ gval : array
69
+ Gradient of `f` at the final point
70
+
71
+ Notes
72
+ -----
73
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``.
74
+
75
+ """
76
+ if gfk is None:
77
+ gfk = fprime(xk, *args)
78
+
79
+ gval = [gfk]
80
+ gc = [0]
81
+ fc = [0]
82
+
83
+ def phi(s):
84
+ fc[0] += 1
85
+ return f(xk + s*pk, *args)
86
+
87
+ def derphi(s):
88
+ gval[0] = fprime(xk + s*pk, *args)
89
+ gc[0] += 1
90
+ return np.dot(gval[0], pk)
91
+
92
+ derphi0 = np.dot(gfk, pk)
93
+
94
+ stp, fval, old_fval = scalar_search_wolfe1(
95
+ phi, derphi, old_fval, old_old_fval, derphi0,
96
+ c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
97
+
98
+ return stp, fc[0], gc[0], fval, old_fval, gval[0]
99
+
100
+
101
+ def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
102
+ c1=1e-4, c2=0.9,
103
+ amax=50, amin=1e-8, xtol=1e-14):
104
+ """
105
+ Scalar function search for alpha that satisfies strong Wolfe conditions
106
+
107
+ alpha > 0 is assumed to be a descent direction.
108
+
109
+ Parameters
110
+ ----------
111
+ phi : callable phi(alpha)
112
+ Function at point `alpha`
113
+ derphi : callable phi'(alpha)
114
+ Objective function derivative. Returns a scalar.
115
+ phi0 : float, optional
116
+ Value of phi at 0
117
+ old_phi0 : float, optional
118
+ Value of phi at previous point
119
+ derphi0 : float, optional
120
+ Value derphi at 0
121
+ c1 : float, optional
122
+ Parameter for Armijo condition rule.
123
+ c2 : float, optional
124
+ Parameter for curvature condition rule.
125
+ amax, amin : float, optional
126
+ Maximum and minimum step size
127
+ xtol : float, optional
128
+ Relative tolerance for an acceptable step.
129
+
130
+ Returns
131
+ -------
132
+ alpha : float
133
+ Step size, or None if no suitable step was found
134
+ phi : float
135
+ Value of `phi` at the new point `alpha`
136
+ phi0 : float
137
+ Value of `phi` at `alpha=0`
138
+
139
+ Notes
140
+ -----
141
+ Uses routine DCSRCH from MINPACK.
142
+
143
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_.
144
+
145
+ References
146
+ ----------
147
+
148
+ .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization.
149
+ In Springer Series in Operations Research and Financial Engineering.
150
+ (Springer Series in Operations Research and Financial Engineering).
151
+ Springer Nature.
152
+
153
+ """
154
+ _check_c1_c2(c1, c2)
155
+
156
+ if phi0 is None:
157
+ phi0 = phi(0.)
158
+ if derphi0 is None:
159
+ derphi0 = derphi(0.)
160
+
161
+ if old_phi0 is not None and derphi0 != 0:
162
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
163
+ if alpha1 < 0:
164
+ alpha1 = 1.0
165
+ else:
166
+ alpha1 = 1.0
167
+
168
+ maxiter = 100
169
+
170
+ dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
171
+ stp, phi1, phi0, task = dcsrch(
172
+ alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter
173
+ )
174
+
175
+ return stp, phi1, phi0
176
+
177
+
178
+ line_search = line_search_wolfe1
179
+
180
+
181
+ #------------------------------------------------------------------------------
182
+ # Pure-Python Wolfe line and scalar searches
183
+ #------------------------------------------------------------------------------
184
+
185
+ # Note: `line_search_wolfe2` is the public `scipy.optimize.line_search`
186
+
187
+ def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
188
+ old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
189
+ extra_condition=None, maxiter=10):
190
+ """Find alpha that satisfies strong Wolfe conditions.
191
+
192
+ Parameters
193
+ ----------
194
+ f : callable f(x,*args)
195
+ Objective function.
196
+ myfprime : callable f'(x,*args)
197
+ Objective function gradient.
198
+ xk : ndarray
199
+ Starting point.
200
+ pk : ndarray
201
+ Search direction. The search direction must be a descent direction
202
+ for the algorithm to converge.
203
+ gfk : ndarray, optional
204
+ Gradient value for x=xk (xk being the current parameter
205
+ estimate). Will be recomputed if omitted.
206
+ old_fval : float, optional
207
+ Function value for x=xk. Will be recomputed if omitted.
208
+ old_old_fval : float, optional
209
+ Function value for the point preceding x=xk.
210
+ args : tuple, optional
211
+ Additional arguments passed to objective function.
212
+ c1 : float, optional
213
+ Parameter for Armijo condition rule.
214
+ c2 : float, optional
215
+ Parameter for curvature condition rule.
216
+ amax : float, optional
217
+ Maximum step size
218
+ extra_condition : callable, optional
219
+ A callable of the form ``extra_condition(alpha, x, f, g)``
220
+ returning a boolean. Arguments are the proposed step ``alpha``
221
+ and the corresponding ``x``, ``f`` and ``g`` values. The line search
222
+ accepts the value of ``alpha`` only if this
223
+ callable returns ``True``. If the callable returns ``False``
224
+ for the step length, the algorithm will continue with
225
+ new iterates. The callable is only called for iterates
226
+ satisfying the strong Wolfe conditions.
227
+ maxiter : int, optional
228
+ Maximum number of iterations to perform.
229
+
230
+ Returns
231
+ -------
232
+ alpha : float or None
233
+ Alpha for which ``x_new = x0 + alpha * pk``,
234
+ or None if the line search algorithm did not converge.
235
+ fc : int
236
+ Number of function evaluations made.
237
+ gc : int
238
+ Number of gradient evaluations made.
239
+ new_fval : float or None
240
+ New function value ``f(x_new)=f(x0+alpha*pk)``,
241
+ or None if the line search algorithm did not converge.
242
+ old_fval : float
243
+ Old function value ``f(x0)``.
244
+ new_slope : float or None
245
+ The local slope along the search direction at the
246
+ new value ``<myfprime(x_new), pk>``,
247
+ or None if the line search algorithm did not converge.
248
+
249
+
250
+ Notes
251
+ -----
252
+ Uses the line search algorithm to enforce strong Wolfe
253
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
254
+ 1999, pp. 59-61.
255
+
256
+ The search direction `pk` must be a descent direction (e.g.
257
+ ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe
258
+ conditions. If the search direction is not a descent direction (e.g.
259
+ ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None.
260
+
261
+ Examples
262
+ --------
263
+ >>> import numpy as np
264
+ >>> from scipy.optimize import line_search
265
+
266
+ A objective function and its gradient are defined.
267
+
268
+ >>> def obj_func(x):
269
+ ... return (x[0])**2+(x[1])**2
270
+ >>> def obj_grad(x):
271
+ ... return [2*x[0], 2*x[1]]
272
+
273
+ We can find alpha that satisfies strong Wolfe conditions.
274
+
275
+ >>> start_point = np.array([1.8, 1.7])
276
+ >>> search_gradient = np.array([-1.0, -1.0])
277
+ >>> line_search(obj_func, obj_grad, start_point, search_gradient)
278
+ (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
279
+
280
+ """
281
+ fc = [0]
282
+ gc = [0]
283
+ gval = [None]
284
+ gval_alpha = [None]
285
+
286
+ def phi(alpha):
287
+ fc[0] += 1
288
+ return f(xk + alpha * pk, *args)
289
+
290
+ fprime = myfprime
291
+
292
+ def derphi(alpha):
293
+ gc[0] += 1
294
+ gval[0] = fprime(xk + alpha * pk, *args) # store for later use
295
+ gval_alpha[0] = alpha
296
+ return np.dot(gval[0], pk)
297
+
298
+ if gfk is None:
299
+ gfk = fprime(xk, *args)
300
+ derphi0 = np.dot(gfk, pk)
301
+
302
+ if extra_condition is not None:
303
+ # Add the current gradient as argument, to avoid needless
304
+ # re-evaluation
305
+ def extra_condition2(alpha, phi):
306
+ if gval_alpha[0] != alpha:
307
+ derphi(alpha)
308
+ x = xk + alpha * pk
309
+ return extra_condition(alpha, x, phi, gval[0])
310
+ else:
311
+ extra_condition2 = None
312
+
313
+ alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
314
+ phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
315
+ extra_condition2, maxiter=maxiter)
316
+
317
+ if derphi_star is None:
318
+ warn('The line search algorithm did not converge',
319
+ LineSearchWarning, stacklevel=2)
320
+ else:
321
+ # derphi_star is a number (derphi) -- so use the most recently
322
+ # calculated gradient used in computing it derphi = gfk*pk
323
+ # this is the gradient at the next step no need to compute it
324
+ # again in the outer loop.
325
+ derphi_star = gval[0]
326
+
327
+ return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
328
+
329
+
330
+ def scalar_search_wolfe2(phi, derphi, phi0=None,
331
+ old_phi0=None, derphi0=None,
332
+ c1=1e-4, c2=0.9, amax=None,
333
+ extra_condition=None, maxiter=10):
334
+ """Find alpha that satisfies strong Wolfe conditions.
335
+
336
+ alpha > 0 is assumed to be a descent direction.
337
+
338
+ Parameters
339
+ ----------
340
+ phi : callable phi(alpha)
341
+ Objective scalar function.
342
+ derphi : callable phi'(alpha)
343
+ Objective function derivative. Returns a scalar.
344
+ phi0 : float, optional
345
+ Value of phi at 0.
346
+ old_phi0 : float, optional
347
+ Value of phi at previous point.
348
+ derphi0 : float, optional
349
+ Value of derphi at 0
350
+ c1 : float, optional
351
+ Parameter for Armijo condition rule.
352
+ c2 : float, optional
353
+ Parameter for curvature condition rule.
354
+ amax : float, optional
355
+ Maximum step size.
356
+ extra_condition : callable, optional
357
+ A callable of the form ``extra_condition(alpha, phi_value)``
358
+ returning a boolean. The line search accepts the value
359
+ of ``alpha`` only if this callable returns ``True``.
360
+ If the callable returns ``False`` for the step length,
361
+ the algorithm will continue with new iterates.
362
+ The callable is only called for iterates satisfying
363
+ the strong Wolfe conditions.
364
+ maxiter : int, optional
365
+ Maximum number of iterations to perform.
366
+
367
+ Returns
368
+ -------
369
+ alpha_star : float or None
370
+ Best alpha, or None if the line search algorithm did not converge.
371
+ phi_star : float
372
+ phi at alpha_star.
373
+ phi0 : float
374
+ phi at 0.
375
+ derphi_star : float or None
376
+ derphi at alpha_star, or None if the line search algorithm
377
+ did not converge.
378
+
379
+ Notes
380
+ -----
381
+ Uses the line search algorithm to enforce strong Wolfe
382
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
383
+ 1999, pp. 59-61.
384
+
385
+ """
386
+ _check_c1_c2(c1, c2)
387
+
388
+ if phi0 is None:
389
+ phi0 = phi(0.)
390
+
391
+ if derphi0 is None:
392
+ derphi0 = derphi(0.)
393
+
394
+ alpha0 = 0
395
+ if old_phi0 is not None and derphi0 != 0:
396
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
397
+ else:
398
+ alpha1 = 1.0
399
+
400
+ if alpha1 < 0:
401
+ alpha1 = 1.0
402
+
403
+ if amax is not None:
404
+ alpha1 = min(alpha1, amax)
405
+
406
+ phi_a1 = phi(alpha1)
407
+ #derphi_a1 = derphi(alpha1) evaluated below
408
+
409
+ phi_a0 = phi0
410
+ derphi_a0 = derphi0
411
+
412
+ if extra_condition is None:
413
+ def extra_condition(alpha, phi):
414
+ return True
415
+
416
+ for i in range(maxiter):
417
+ if alpha1 == 0 or (amax is not None and alpha0 > amax):
418
+ # alpha1 == 0: This shouldn't happen. Perhaps the increment has
419
+ # slipped below machine precision?
420
+ alpha_star = None
421
+ phi_star = phi0
422
+ phi0 = old_phi0
423
+ derphi_star = None
424
+
425
+ if alpha1 == 0:
426
+ msg = 'Rounding errors prevent the line search from converging'
427
+ else:
428
+ msg = "The line search algorithm could not find a solution " + \
429
+ "less than or equal to amax: %s" % amax
430
+
431
+ warn(msg, LineSearchWarning, stacklevel=2)
432
+ break
433
+
434
+ not_first_iteration = i > 0
435
+ if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
436
+ ((phi_a1 >= phi_a0) and not_first_iteration):
437
+ alpha_star, phi_star, derphi_star = \
438
+ _zoom(alpha0, alpha1, phi_a0,
439
+ phi_a1, derphi_a0, phi, derphi,
440
+ phi0, derphi0, c1, c2, extra_condition)
441
+ break
442
+
443
+ derphi_a1 = derphi(alpha1)
444
+ if (abs(derphi_a1) <= -c2*derphi0):
445
+ if extra_condition(alpha1, phi_a1):
446
+ alpha_star = alpha1
447
+ phi_star = phi_a1
448
+ derphi_star = derphi_a1
449
+ break
450
+
451
+ if (derphi_a1 >= 0):
452
+ alpha_star, phi_star, derphi_star = \
453
+ _zoom(alpha1, alpha0, phi_a1,
454
+ phi_a0, derphi_a1, phi, derphi,
455
+ phi0, derphi0, c1, c2, extra_condition)
456
+ break
457
+
458
+ alpha2 = 2 * alpha1 # increase by factor of two on each iteration
459
+ if amax is not None:
460
+ alpha2 = min(alpha2, amax)
461
+ alpha0 = alpha1
462
+ alpha1 = alpha2
463
+ phi_a0 = phi_a1
464
+ phi_a1 = phi(alpha1)
465
+ derphi_a0 = derphi_a1
466
+
467
+ else:
468
+ # stopping test maxiter reached
469
+ alpha_star = alpha1
470
+ phi_star = phi_a1
471
+ derphi_star = None
472
+ warn('The line search algorithm did not converge',
473
+ LineSearchWarning, stacklevel=2)
474
+
475
+ return alpha_star, phi_star, phi0, derphi_star
476
+
477
+
478
+ def _cubicmin(a, fa, fpa, b, fb, c, fc):
479
+ """
480
+ Finds the minimizer for a cubic polynomial that goes through the
481
+ points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
482
+
483
+ If no minimizer can be found, return None.
484
+
485
+ """
486
+ # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
487
+
488
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
489
+ try:
490
+ C = fpa
491
+ db = b - a
492
+ dc = c - a
493
+ denom = (db * dc) ** 2 * (db - dc)
494
+ d1 = np.empty((2, 2))
495
+ d1[0, 0] = dc ** 2
496
+ d1[0, 1] = -db ** 2
497
+ d1[1, 0] = -dc ** 3
498
+ d1[1, 1] = db ** 3
499
+ [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
500
+ fc - fa - C * dc]).flatten())
501
+ A /= denom
502
+ B /= denom
503
+ radical = B * B - 3 * A * C
504
+ xmin = a + (-B + np.sqrt(radical)) / (3 * A)
505
+ except ArithmeticError:
506
+ return None
507
+ if not np.isfinite(xmin):
508
+ return None
509
+ return xmin
510
+
511
+
512
+ def _quadmin(a, fa, fpa, b, fb):
513
+ """
514
+ Finds the minimizer for a quadratic polynomial that goes through
515
+ the points (a,fa), (b,fb) with derivative at a of fpa.
516
+
517
+ """
518
+ # f(x) = B*(x-a)^2 + C*(x-a) + D
519
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
520
+ try:
521
+ D = fa
522
+ C = fpa
523
+ db = b - a * 1.0
524
+ B = (fb - D - C * db) / (db * db)
525
+ xmin = a - C / (2.0 * B)
526
+ except ArithmeticError:
527
+ return None
528
+ if not np.isfinite(xmin):
529
+ return None
530
+ return xmin
531
+
532
+
533
+ def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
534
+ phi, derphi, phi0, derphi0, c1, c2, extra_condition):
535
+ """Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
536
+
537
+ Part of the optimization algorithm in `scalar_search_wolfe2`.
538
+
539
+ Notes
540
+ -----
541
+ Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
542
+ 'Numerical Optimization', 1999, pp. 61.
543
+
544
+ """
545
+
546
+ maxiter = 10
547
+ i = 0
548
+ delta1 = 0.2 # cubic interpolant check
549
+ delta2 = 0.1 # quadratic interpolant check
550
+ phi_rec = phi0
551
+ a_rec = 0
552
+ while True:
553
+ # interpolate to find a trial step length between a_lo and
554
+ # a_hi Need to choose interpolation here. Use cubic
555
+ # interpolation and then if the result is within delta *
556
+ # dalpha or outside of the interval bounded by a_lo or a_hi
557
+ # then use quadratic interpolation, if the result is still too
558
+ # close, then use bisection
559
+
560
+ dalpha = a_hi - a_lo
561
+ if dalpha < 0:
562
+ a, b = a_hi, a_lo
563
+ else:
564
+ a, b = a_lo, a_hi
565
+
566
+ # minimizer of cubic interpolant
567
+ # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
568
+ #
569
+ # if the result is too close to the end points (or out of the
570
+ # interval), then use quadratic interpolation with phi_lo,
571
+ # derphi_lo and phi_hi if the result is still too close to the
572
+ # end points (or out of the interval) then use bisection
573
+
574
+ if (i > 0):
575
+ cchk = delta1 * dalpha
576
+ a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
577
+ a_rec, phi_rec)
578
+ if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
579
+ qchk = delta2 * dalpha
580
+ a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
581
+ if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
582
+ a_j = a_lo + 0.5*dalpha
583
+
584
+ # Check new value of a_j
585
+
586
+ phi_aj = phi(a_j)
587
+ if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
588
+ phi_rec = phi_hi
589
+ a_rec = a_hi
590
+ a_hi = a_j
591
+ phi_hi = phi_aj
592
+ else:
593
+ derphi_aj = derphi(a_j)
594
+ if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
595
+ a_star = a_j
596
+ val_star = phi_aj
597
+ valprime_star = derphi_aj
598
+ break
599
+ if derphi_aj*(a_hi - a_lo) >= 0:
600
+ phi_rec = phi_hi
601
+ a_rec = a_hi
602
+ a_hi = a_lo
603
+ phi_hi = phi_lo
604
+ else:
605
+ phi_rec = phi_lo
606
+ a_rec = a_lo
607
+ a_lo = a_j
608
+ phi_lo = phi_aj
609
+ derphi_lo = derphi_aj
610
+ i += 1
611
+ if (i > maxiter):
612
+ # Failed to find a conforming step size
613
+ a_star = None
614
+ val_star = None
615
+ valprime_star = None
616
+ break
617
+ return a_star, val_star, valprime_star
618
+
619
+
620
+ #------------------------------------------------------------------------------
621
+ # Armijo line and scalar searches
622
+ #------------------------------------------------------------------------------
623
+
624
+ def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
625
+ """Minimize over alpha, the function ``f(xk+alpha pk)``.
626
+
627
+ Parameters
628
+ ----------
629
+ f : callable
630
+ Function to be minimized.
631
+ xk : array_like
632
+ Current point.
633
+ pk : array_like
634
+ Search direction.
635
+ gfk : array_like
636
+ Gradient of `f` at point `xk`.
637
+ old_fval : float
638
+ Value of `f` at point `xk`.
639
+ args : tuple, optional
640
+ Optional arguments.
641
+ c1 : float, optional
642
+ Value to control stopping criterion.
643
+ alpha0 : scalar, optional
644
+ Value of `alpha` at start of the optimization.
645
+
646
+ Returns
647
+ -------
648
+ alpha
649
+ f_count
650
+ f_val_at_alpha
651
+
652
+ Notes
653
+ -----
654
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
655
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
656
+
657
+ """
658
+ xk = np.atleast_1d(xk)
659
+ fc = [0]
660
+
661
+ def phi(alpha1):
662
+ fc[0] += 1
663
+ return f(xk + alpha1*pk, *args)
664
+
665
+ if old_fval is None:
666
+ phi0 = phi(0.)
667
+ else:
668
+ phi0 = old_fval # compute f(xk) -- done in past loop
669
+
670
+ derphi0 = np.dot(gfk, pk)
671
+ alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
672
+ alpha0=alpha0)
673
+ return alpha, fc[0], phi1
674
+
675
+
676
+ def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
677
+ """
678
+ Compatibility wrapper for `line_search_armijo`
679
+ """
680
+ r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
681
+ alpha0=alpha0)
682
+ return r[0], r[1], 0, r[2]
683
+
684
+
685
+ def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
686
+ """Minimize over alpha, the function ``phi(alpha)``.
687
+
688
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
689
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
690
+
691
+ alpha > 0 is assumed to be a descent direction.
692
+
693
+ Returns
694
+ -------
695
+ alpha
696
+ phi1
697
+
698
+ """
699
+ phi_a0 = phi(alpha0)
700
+ if phi_a0 <= phi0 + c1*alpha0*derphi0:
701
+ return alpha0, phi_a0
702
+
703
+ # Otherwise, compute the minimizer of a quadratic interpolant:
704
+
705
+ alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
706
+ phi_a1 = phi(alpha1)
707
+
708
+ if (phi_a1 <= phi0 + c1*alpha1*derphi0):
709
+ return alpha1, phi_a1
710
+
711
+ # Otherwise, loop with cubic interpolation until we find an alpha which
712
+ # satisfies the first Wolfe condition (since we are backtracking, we will
713
+ # assume that the value of alpha is not too small and satisfies the second
714
+ # condition.
715
+
716
+ while alpha1 > amin: # we are assuming alpha>0 is a descent direction
717
+ factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
718
+ a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
719
+ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
720
+ a = a / factor
721
+ b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
722
+ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
723
+ b = b / factor
724
+
725
+ alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
726
+ phi_a2 = phi(alpha2)
727
+
728
+ if (phi_a2 <= phi0 + c1*alpha2*derphi0):
729
+ return alpha2, phi_a2
730
+
731
+ if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
732
+ alpha2 = alpha1 / 2.0
733
+
734
+ alpha0 = alpha1
735
+ alpha1 = alpha2
736
+ phi_a0 = phi_a1
737
+ phi_a1 = phi_a2
738
+
739
+ # Failed to find a suitable step length
740
+ return None, phi_a1
741
+
742
+
743
+ #------------------------------------------------------------------------------
744
+ # Non-monotone line search for DF-SANE
745
+ #------------------------------------------------------------------------------
746
+
747
+ def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
748
+ gamma=1e-4, tau_min=0.1, tau_max=0.5):
749
+ """
750
+ Nonmonotone backtracking line search as described in [1]_
751
+
752
+ Parameters
753
+ ----------
754
+ f : callable
755
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
756
+ of a merit function and ``F`` the residual.
757
+ x_k : ndarray
758
+ Initial position.
759
+ d : ndarray
760
+ Search direction.
761
+ prev_fs : float
762
+ List of previous merit function values. Should have ``len(prev_fs) <= M``
763
+ where ``M`` is the nonmonotonicity window parameter.
764
+ eta : float
765
+ Allowed merit function increase, see [1]_
766
+ gamma, tau_min, tau_max : float, optional
767
+ Search parameters, see [1]_
768
+
769
+ Returns
770
+ -------
771
+ alpha : float
772
+ Step length
773
+ xp : ndarray
774
+ Next position
775
+ fp : float
776
+ Merit function value at next position
777
+ Fp : ndarray
778
+ Residual at next position
779
+
780
+ References
781
+ ----------
782
+ [1] "Spectral residual method without gradient information for solving
783
+ large-scale nonlinear systems of equations." W. La Cruz,
784
+ J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
785
+
786
+ """
787
+ f_k = prev_fs[-1]
788
+ f_bar = max(prev_fs)
789
+
790
+ alpha_p = 1
791
+ alpha_m = 1
792
+ alpha = 1
793
+
794
+ while True:
795
+ xp = x_k + alpha_p * d
796
+ fp, Fp = f(xp)
797
+
798
+ if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
799
+ alpha = alpha_p
800
+ break
801
+
802
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
803
+
804
+ xp = x_k - alpha_m * d
805
+ fp, Fp = f(xp)
806
+
807
+ if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
808
+ alpha = -alpha_m
809
+ break
810
+
811
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
812
+
813
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
814
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
815
+
816
+ return alpha, xp, fp, Fp
817
+
818
+
819
+ def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
820
+ gamma=1e-4, tau_min=0.1, tau_max=0.5,
821
+ nu=0.85):
822
+ """
823
+ Nonmonotone line search from [1]
824
+
825
+ Parameters
826
+ ----------
827
+ f : callable
828
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
829
+ of a merit function and ``F`` the residual.
830
+ x_k : ndarray
831
+ Initial position.
832
+ d : ndarray
833
+ Search direction.
834
+ f_k : float
835
+ Initial merit function value.
836
+ C, Q : float
837
+ Control parameters. On the first iteration, give values
838
+ Q=1.0, C=f_k
839
+ eta : float
840
+ Allowed merit function increase, see [1]_
841
+ nu, gamma, tau_min, tau_max : float, optional
842
+ Search parameters, see [1]_
843
+
844
+ Returns
845
+ -------
846
+ alpha : float
847
+ Step length
848
+ xp : ndarray
849
+ Next position
850
+ fp : float
851
+ Merit function value at next position
852
+ Fp : ndarray
853
+ Residual at next position
854
+ C : float
855
+ New value for the control parameter C
856
+ Q : float
857
+ New value for the control parameter Q
858
+
859
+ References
860
+ ----------
861
+ .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
862
+ search and its application to the spectral residual
863
+ method'', IMA J. Numer. Anal. 29, 814 (2009).
864
+
865
+ """
866
+ alpha_p = 1
867
+ alpha_m = 1
868
+ alpha = 1
869
+
870
+ while True:
871
+ xp = x_k + alpha_p * d
872
+ fp, Fp = f(xp)
873
+
874
+ if fp <= C + eta - gamma * alpha_p**2 * f_k:
875
+ alpha = alpha_p
876
+ break
877
+
878
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
879
+
880
+ xp = x_k - alpha_m * d
881
+ fp, Fp = f(xp)
882
+
883
+ if fp <= C + eta - gamma * alpha_m**2 * f_k:
884
+ alpha = -alpha_m
885
+ break
886
+
887
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
888
+
889
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
890
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
891
+
892
+ # Update C and Q
893
+ Q_next = nu * Q + 1
894
+ C = (nu * Q * (C + eta) + fp) / Q_next
895
+ Q = Q_next
896
+
897
+ return alpha, xp, fp, Fp, C, Q
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A top-level linear programming interface.
3
+
4
+ .. versionadded:: 0.15.0
5
+
6
+ Functions
7
+ ---------
8
+ .. autosummary::
9
+ :toctree: generated/
10
+
11
+ linprog
12
+ linprog_verbose_callback
13
+ linprog_terse_callback
14
+
15
+ """
16
+
17
+ import numpy as np
18
+
19
+ from ._optimize import OptimizeResult, OptimizeWarning
20
+ from warnings import warn
21
+ from ._linprog_highs import _linprog_highs
22
+ from ._linprog_ip import _linprog_ip
23
+ from ._linprog_simplex import _linprog_simplex
24
+ from ._linprog_rs import _linprog_rs
25
+ from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401
26
+ _linprog_rs_doc, _linprog_simplex_doc,
27
+ _linprog_highs_ipm_doc, _linprog_highs_ds_doc)
28
+ from ._linprog_util import (
29
+ _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
30
+ _postsolve, _check_result, _display_summary)
31
+ from copy import deepcopy
32
+
33
+ __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
34
+
35
+ __docformat__ = "restructuredtext en"
36
+
37
+ LINPROG_METHODS = [
38
+ 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'
39
+ ]
40
+
41
+
42
+ def linprog_verbose_callback(res):
43
+ """
44
+ A sample callback function demonstrating the linprog callback interface.
45
+ This callback produces detailed output to sys.stdout before each iteration
46
+ and after the final iteration of the simplex algorithm.
47
+
48
+ Parameters
49
+ ----------
50
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
51
+
52
+ x : 1-D array
53
+ The independent variable vector which optimizes the linear
54
+ programming problem.
55
+ fun : float
56
+ Value of the objective function.
57
+ success : bool
58
+ True if the algorithm succeeded in finding an optimal solution.
59
+ slack : 1-D array
60
+ The values of the slack variables. Each slack variable corresponds
61
+ to an inequality constraint. If the slack is zero, then the
62
+ corresponding constraint is active.
63
+ con : 1-D array
64
+ The (nominally zero) residuals of the equality constraints, that is,
65
+ ``b - A_eq @ x``
66
+ phase : int
67
+ The phase of the optimization being executed. In phase 1 a basic
68
+ feasible solution is sought and the T has an additional row
69
+ representing an alternate objective function.
70
+ status : int
71
+ An integer representing the exit status of the optimization::
72
+
73
+ 0 : Optimization terminated successfully
74
+ 1 : Iteration limit reached
75
+ 2 : Problem appears to be infeasible
76
+ 3 : Problem appears to be unbounded
77
+ 4 : Serious numerical difficulties encountered
78
+
79
+ nit : int
80
+ The number of iterations performed.
81
+ message : str
82
+ A string descriptor of the exit status of the optimization.
83
+ """
84
+ x = res['x']
85
+ fun = res['fun']
86
+ phase = res['phase']
87
+ status = res['status']
88
+ nit = res['nit']
89
+ message = res['message']
90
+ complete = res['complete']
91
+
92
+ saved_printoptions = np.get_printoptions()
93
+ np.set_printoptions(linewidth=500,
94
+ formatter={'float': lambda x: f"{x: 12.4f}"})
95
+ if status:
96
+ print('--------- Simplex Early Exit -------\n')
97
+ print(f'The simplex method exited early with status {status:d}')
98
+ print(message)
99
+ elif complete:
100
+ print('--------- Simplex Complete --------\n')
101
+ print(f'Iterations required: {nit}')
102
+ else:
103
+ print(f'--------- Iteration {nit:d} ---------\n')
104
+
105
+ if nit > 0:
106
+ if phase == 1:
107
+ print('Current Pseudo-Objective Value:')
108
+ else:
109
+ print('Current Objective Value:')
110
+ print('f = ', fun)
111
+ print()
112
+ print('Current Solution Vector:')
113
+ print('x = ', x)
114
+ print()
115
+
116
+ np.set_printoptions(**saved_printoptions)
117
+
118
+
119
+ def linprog_terse_callback(res):
120
+ """
121
+ A sample callback function demonstrating the linprog callback interface.
122
+ This callback produces brief output to sys.stdout before each iteration
123
+ and after the final iteration of the simplex algorithm.
124
+
125
+ Parameters
126
+ ----------
127
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
128
+
129
+ x : 1-D array
130
+ The independent variable vector which optimizes the linear
131
+ programming problem.
132
+ fun : float
133
+ Value of the objective function.
134
+ success : bool
135
+ True if the algorithm succeeded in finding an optimal solution.
136
+ slack : 1-D array
137
+ The values of the slack variables. Each slack variable corresponds
138
+ to an inequality constraint. If the slack is zero, then the
139
+ corresponding constraint is active.
140
+ con : 1-D array
141
+ The (nominally zero) residuals of the equality constraints, that is,
142
+ ``b - A_eq @ x``.
143
+ phase : int
144
+ The phase of the optimization being executed. In phase 1 a basic
145
+ feasible solution is sought and the T has an additional row
146
+ representing an alternate objective function.
147
+ status : int
148
+ An integer representing the exit status of the optimization::
149
+
150
+ 0 : Optimization terminated successfully
151
+ 1 : Iteration limit reached
152
+ 2 : Problem appears to be infeasible
153
+ 3 : Problem appears to be unbounded
154
+ 4 : Serious numerical difficulties encountered
155
+
156
+ nit : int
157
+ The number of iterations performed.
158
+ message : str
159
+ A string descriptor of the exit status of the optimization.
160
+ """
161
+ nit = res['nit']
162
+ x = res['x']
163
+
164
+ if nit == 0:
165
+ print("Iter: X:")
166
+ print(f"{nit: <5d} ", end="")
167
+ print(x)
168
+
169
+
170
+ def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
171
+ bounds=(0, None), method='highs', callback=None,
172
+ options=None, x0=None, integrality=None):
173
+ r"""
174
+ Linear programming: minimize a linear objective function subject to linear
175
+ equality and inequality constraints.
176
+
177
+ Linear programming solves problems of the following form:
178
+
179
+ .. math::
180
+
181
+ \min_x \ & c^T x \\
182
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
183
+ & A_{eq} x = b_{eq},\\
184
+ & l \leq x \leq u ,
185
+
186
+ where :math:`x` is a vector of decision variables; :math:`c`,
187
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
188
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
189
+
190
+ Alternatively, that's:
191
+
192
+ - minimize ::
193
+
194
+ c @ x
195
+
196
+ - such that ::
197
+
198
+ A_ub @ x <= b_ub
199
+ A_eq @ x == b_eq
200
+ lb <= x <= ub
201
+
202
+ Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be
203
+ specified with ``bounds``.
204
+
205
+ Parameters
206
+ ----------
207
+ c : 1-D array
208
+ The coefficients of the linear objective function to be minimized.
209
+ A_ub : 2-D array, optional
210
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
211
+ coefficients of a linear inequality constraint on ``x``.
212
+ b_ub : 1-D array, optional
213
+ The inequality constraint vector. Each element represents an
214
+ upper bound on the corresponding value of ``A_ub @ x``.
215
+ A_eq : 2-D array, optional
216
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
217
+ coefficients of a linear equality constraint on ``x``.
218
+ b_eq : 1-D array, optional
219
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
220
+ the corresponding element of ``b_eq``.
221
+ bounds : sequence, optional
222
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
223
+ the minimum and maximum values of that decision variable.
224
+ If a single tuple ``(min, max)`` is provided, then ``min`` and ``max``
225
+ will serve as bounds for all decision variables.
226
+ Use ``None`` to indicate that there is no bound. For instance, the
227
+ default bound ``(0, None)`` means that all decision variables are
228
+ non-negative, and the pair ``(None, None)`` means no bounds at all,
229
+ i.e. all variables are allowed to be any real.
230
+ method : str, optional
231
+ The algorithm used to solve the standard form problem.
232
+ :ref:`'highs' <optimize.linprog-highs>` (default),
233
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
234
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
235
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
236
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
237
+ and
238
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
239
+ The legacy methods are deprecated and will be removed in SciPy 1.11.0.
240
+ callback : callable, optional
241
+ If a callback function is provided, it will be called at least once per
242
+ iteration of the algorithm. The callback function must accept a single
243
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
244
+
245
+ x : 1-D array
246
+ The current solution vector.
247
+ fun : float
248
+ The current value of the objective function ``c @ x``.
249
+ success : bool
250
+ ``True`` when the algorithm has completed successfully.
251
+ slack : 1-D array
252
+ The (nominally positive) values of the slack,
253
+ ``b_ub - A_ub @ x``.
254
+ con : 1-D array
255
+ The (nominally zero) residuals of the equality constraints,
256
+ ``b_eq - A_eq @ x``.
257
+ phase : int
258
+ The phase of the algorithm being executed.
259
+ status : int
260
+ An integer representing the status of the algorithm.
261
+
262
+ ``0`` : Optimization proceeding nominally.
263
+
264
+ ``1`` : Iteration limit reached.
265
+
266
+ ``2`` : Problem appears to be infeasible.
267
+
268
+ ``3`` : Problem appears to be unbounded.
269
+
270
+ ``4`` : Numerical difficulties encountered.
271
+
272
+ nit : int
273
+ The current iteration number.
274
+ message : str
275
+ A string descriptor of the algorithm status.
276
+
277
+ Callback functions are not currently supported by the HiGHS methods.
278
+
279
+ options : dict, optional
280
+ A dictionary of solver options. All methods accept the following
281
+ options:
282
+
283
+ maxiter : int
284
+ Maximum number of iterations to perform.
285
+ Default: see method-specific documentation.
286
+ disp : bool
287
+ Set to ``True`` to print convergence messages.
288
+ Default: ``False``.
289
+ presolve : bool
290
+ Set to ``False`` to disable automatic presolve.
291
+ Default: ``True``.
292
+
293
+ All methods except the HiGHS solvers also accept:
294
+
295
+ tol : float
296
+ A tolerance which determines when a residual is "close enough" to
297
+ zero to be considered exactly zero.
298
+ autoscale : bool
299
+ Set to ``True`` to automatically perform equilibration.
300
+ Consider using this option if the numerical values in the
301
+ constraints are separated by several orders of magnitude.
302
+ Default: ``False``.
303
+ rr : bool
304
+ Set to ``False`` to disable automatic redundancy removal.
305
+ Default: ``True``.
306
+ rr_method : string
307
+ Method used to identify and remove redundant rows from the
308
+ equality constraint matrix after presolve. For problems with
309
+ dense input, the available methods for redundancy removal are:
310
+
311
+ "SVD":
312
+ Repeatedly performs singular value decomposition on
313
+ the matrix, detecting redundant rows based on nonzeros
314
+ in the left singular vectors that correspond with
315
+ zero singular values. May be fast when the matrix is
316
+ nearly full rank.
317
+ "pivot":
318
+ Uses the algorithm presented in [5]_ to identify
319
+ redundant rows.
320
+ "ID":
321
+ Uses a randomized interpolative decomposition.
322
+ Identifies columns of the matrix transpose not used in
323
+ a full-rank interpolative decomposition of the matrix.
324
+ None:
325
+ Uses "svd" if the matrix is nearly full rank, that is,
326
+ the difference between the matrix rank and the number
327
+ of rows is less than five. If not, uses "pivot". The
328
+ behavior of this default is subject to change without
329
+ prior notice.
330
+
331
+ Default: None.
332
+ For problems with sparse input, this option is ignored, and the
333
+ pivot-based algorithm presented in [5]_ is used.
334
+
335
+ For method-specific options, see
336
+ :func:`show_options('linprog') <show_options>`.
337
+
338
+ x0 : 1-D array, optional
339
+ Guess values of the decision variables, which will be refined by
340
+ the optimization algorithm. This argument is currently used only by the
341
+ 'revised simplex' method, and can only be used if `x0` represents a
342
+ basic feasible solution.
343
+
344
+ integrality : 1-D array or int, optional
345
+ Indicates the type of integrality constraint on each decision variable.
346
+
347
+ ``0`` : Continuous variable; no integrality constraint.
348
+
349
+ ``1`` : Integer variable; decision variable must be an integer
350
+ within `bounds`.
351
+
352
+ ``2`` : Semi-continuous variable; decision variable must be within
353
+ `bounds` or take value ``0``.
354
+
355
+ ``3`` : Semi-integer variable; decision variable must be an integer
356
+ within `bounds` or take value ``0``.
357
+
358
+ By default, all variables are continuous.
359
+
360
+ For mixed integrality constraints, supply an array of shape `c.shape`.
361
+ To infer a constraint on each decision variable from shorter inputs,
362
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
363
+
364
+ This argument is currently used only by the ``'highs'`` method and
365
+ ignored otherwise.
366
+
367
+ Returns
368
+ -------
369
+ res : OptimizeResult
370
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields
371
+ below. Note that the return types of the fields may depend on whether
372
+ the optimization was successful, therefore it is recommended to check
373
+ `OptimizeResult.status` before relying on the other fields:
374
+
375
+ x : 1-D array
376
+ The values of the decision variables that minimizes the
377
+ objective function while satisfying the constraints.
378
+ fun : float
379
+ The optimal value of the objective function ``c @ x``.
380
+ slack : 1-D array
381
+ The (nominally positive) values of the slack variables,
382
+ ``b_ub - A_ub @ x``.
383
+ con : 1-D array
384
+ The (nominally zero) residuals of the equality constraints,
385
+ ``b_eq - A_eq @ x``.
386
+ success : bool
387
+ ``True`` when the algorithm succeeds in finding an optimal
388
+ solution.
389
+ status : int
390
+ An integer representing the exit status of the algorithm.
391
+
392
+ ``0`` : Optimization terminated successfully.
393
+
394
+ ``1`` : Iteration limit reached.
395
+
396
+ ``2`` : Problem appears to be infeasible.
397
+
398
+ ``3`` : Problem appears to be unbounded.
399
+
400
+ ``4`` : Numerical difficulties encountered.
401
+
402
+ nit : int
403
+ The total number of iterations performed in all phases.
404
+ message : str
405
+ A string descriptor of the exit status of the algorithm.
406
+
407
+ See Also
408
+ --------
409
+ show_options : Additional options accepted by the solvers.
410
+
411
+ Notes
412
+ -----
413
+ This section describes the available solvers that can be selected by the
414
+ 'method' parameter.
415
+
416
+ `'highs-ds'` and
417
+ `'highs-ipm'` are interfaces to the
418
+ HiGHS simplex and interior-point method solvers [13]_, respectively.
419
+ `'highs'` (default) chooses between
420
+ the two automatically. These are the fastest linear
421
+ programming solvers in SciPy, especially for large, sparse problems;
422
+ which of these two is faster is problem-dependent.
423
+ The other solvers (`'interior-point'`, `'revised simplex'`, and
424
+ `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
425
+
426
+ Method *highs-ds* is a wrapper of the C++ high performance dual
427
+ revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
428
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
429
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
430
+ as a simplex solver. Method *highs* chooses between the two automatically.
431
+ For new code involving `linprog`, we recommend explicitly choosing one of
432
+ these three method values.
433
+
434
+ .. versionadded:: 1.6.0
435
+
436
+ Method *interior-point* uses the primal-dual path following algorithm
437
+ as outlined in [4]_. This algorithm supports sparse constraint matrices and
438
+ is typically faster than the simplex methods, especially for large, sparse
439
+ problems. Note, however, that the solution returned may be slightly less
440
+ accurate than those of the simplex methods and will not, in general,
441
+ correspond with a vertex of the polytope defined by the constraints.
442
+
443
+ .. versionadded:: 1.0.0
444
+
445
+ Method *revised simplex* uses the revised simplex method as described in
446
+ [9]_, except that a factorization [11]_ of the basis matrix, rather than
447
+ its inverse, is efficiently maintained and used to solve the linear systems
448
+ at each iteration of the algorithm.
449
+
450
+ .. versionadded:: 1.3.0
451
+
452
+ Method *simplex* uses a traditional, full-tableau implementation of
453
+ Dantzig's simplex algorithm [1]_, [2]_ (*not* the
454
+ Nelder-Mead simplex). This algorithm is included for backwards
455
+ compatibility and educational purposes.
456
+
457
+ .. versionadded:: 0.15.0
458
+
459
+ Before applying *interior-point*, *revised simplex*, or *simplex*,
460
+ a presolve procedure based on [8]_ attempts
461
+ to identify trivial infeasibilities, trivial unboundedness, and potential
462
+ problem simplifications. Specifically, it checks for:
463
+
464
+ - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
465
+ - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
466
+ variables;
467
+ - column singletons in ``A_eq``, representing fixed variables; and
468
+ - column singletons in ``A_ub``, representing simple bounds.
469
+
470
+ If presolve reveals that the problem is unbounded (e.g. an unconstrained
471
+ and unbounded variable has negative cost) or infeasible (e.g., a row of
472
+ zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
473
+ terminates with the appropriate status code. Note that presolve terminates
474
+ as soon as any sign of unboundedness is detected; consequently, a problem
475
+ may be reported as unbounded when in reality the problem is infeasible
476
+ (but infeasibility has not been detected yet). Therefore, if it is
477
+ important to know whether the problem is actually infeasible, solve the
478
+ problem again with option ``presolve=False``.
479
+
480
+ If neither infeasibility nor unboundedness are detected in a single pass
481
+ of the presolve, bounds are tightened where possible and fixed
482
+ variables are removed from the problem. Then, linearly dependent rows
483
+ of the ``A_eq`` matrix are removed, (unless they represent an
484
+ infeasibility) to avoid numerical difficulties in the primary solve
485
+ routine. Note that rows that are nearly linearly dependent (within a
486
+ prescribed tolerance) may also be removed, which can change the optimal
487
+ solution in rare cases. If this is a concern, eliminate redundancy from
488
+ your problem formulation and run with option ``rr=False`` or
489
+ ``presolve=False``.
490
+
491
+ Several potential improvements can be made here: additional presolve
492
+ checks outlined in [8]_ should be implemented, the presolve routine should
493
+ be run multiple times (until no further simplifications can be made), and
494
+ more of the efficiency improvements from [5]_ should be implemented in the
495
+ redundancy removal routines.
496
+
497
+ After presolve, the problem is transformed to standard form by converting
498
+ the (tightened) simple bounds to upper bound constraints, introducing
499
+ non-negative slack variables for inequality constraints, and expressing
500
+ unbounded variables as the difference between two non-negative variables.
501
+ Optionally, the problem is automatically scaled via equilibration [12]_.
502
+ The selected algorithm solves the standard form problem, and a
503
+ postprocessing routine converts the result to a solution to the original
504
+ problem.
505
+
506
+ References
507
+ ----------
508
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
509
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
510
+ 1963
511
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
512
+ Mathematical Programming", McGraw-Hill, Chapter 4.
513
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
514
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
515
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
516
+ optimizer for linear programming: an implementation of the
517
+ homogeneous algorithm." High performance optimization. Springer US,
518
+ 2000. 197-232.
519
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
520
+ large-scale linear programming." Optimization Methods and Software
521
+ 6.3 (1995): 219-227.
522
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
523
+ Programming based on Newton's Method." Unpublished Course Notes,
524
+ March 2004. Available 2/25/2017 at
525
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
526
+ .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
527
+ Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
528
+ http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
529
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
530
+ programming." Mathematical Programming 71.2 (1995): 221-245.
531
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
532
+ programming." Athena Scientific 1 (1997): 997.
533
+ .. [10] Andersen, Erling D., et al. Implementation of interior point
534
+ methods for large scale linear programming. HEC/Universite de
535
+ Geneve, 1996.
536
+ .. [11] Bartels, Richard H. "A stabilization of the simplex method."
537
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
538
+ .. [12] Tomlin, J. A. "On scaling linear programming problems."
539
+ Mathematical Programming Study 4 (1975): 146-166.
540
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
541
+ "HiGHS - high performance software for linear optimization."
542
+ https://highs.dev/
543
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
544
+ simplex method." Mathematical Programming Computation, 10 (1),
545
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
546
+
547
+ Examples
548
+ --------
549
+ Consider the following problem:
550
+
551
+ .. math::
552
+
553
+ \min_{x_0, x_1} \ -x_0 + 4x_1 & \\
554
+ \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
555
+ -x_0 - 2x_1 & \geq -4,\\
556
+ x_1 & \geq -3.
557
+
558
+ The problem is not presented in the form accepted by `linprog`. This is
559
+ easily remedied by converting the "greater than" inequality
560
+ constraint to a "less than" inequality constraint by
561
+ multiplying both sides by a factor of :math:`-1`. Note also that the last
562
+ constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
563
+ Finally, since there are no bounds on :math:`x_0`, we must explicitly
564
+ specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
565
+ default is for variables to be non-negative. After collecting coeffecients
566
+ into arrays and tuples, the input for this problem is:
567
+
568
+ >>> from scipy.optimize import linprog
569
+ >>> c = [-1, 4]
570
+ >>> A = [[-3, 1], [1, 2]]
571
+ >>> b = [6, 4]
572
+ >>> x0_bounds = (None, None)
573
+ >>> x1_bounds = (-3, None)
574
+ >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
575
+ >>> res.fun
576
+ -22.0
577
+ >>> res.x
578
+ array([10., -3.])
579
+ >>> res.message
580
+ 'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
581
+
582
+ The marginals (AKA dual values / shadow prices / Lagrange multipliers)
583
+ and residuals (slacks) are also available.
584
+
585
+ >>> res.ineqlin
586
+ residual: [ 3.900e+01 0.000e+00]
587
+ marginals: [-0.000e+00 -1.000e+00]
588
+
589
+ For example, because the marginal associated with the second inequality
590
+ constraint is -1, we expect the optimal value of the objective function
591
+ to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
592
+ side of the second inequality constraint:
593
+
594
+ >>> eps = 0.05
595
+ >>> b[1] += eps
596
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
597
+ -22.05
598
+
599
+ Also, because the residual on the first inequality constraint is 39, we
600
+ can decrease the right hand side of the first constraint by 39 without
601
+ affecting the optimal solution.
602
+
603
+ >>> b = [6, 4] # reset to original values
604
+ >>> b[0] -= 39
605
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
606
+ -22.0
607
+
608
+ """
609
+
610
+ meth = method.lower()
611
+ methods = {"highs", "highs-ds", "highs-ipm",
612
+ "simplex", "revised simplex", "interior-point"}
613
+
614
+ if meth not in methods:
615
+ raise ValueError(f"Unknown solver '{method}'")
616
+
617
+ if x0 is not None and meth != "revised simplex":
618
+ warning_message = "x0 is used only when method is 'revised simplex'. "
619
+ warn(warning_message, OptimizeWarning, stacklevel=2)
620
+
621
+ if np.any(integrality) and not meth == "highs":
622
+ integrality = None
623
+ warning_message = ("Only `method='highs'` supports integer "
624
+ "constraints. Ignoring `integrality`.")
625
+ warn(warning_message, OptimizeWarning, stacklevel=2)
626
+ elif np.any(integrality):
627
+ integrality = np.broadcast_to(integrality, np.shape(c))
628
+
629
+ lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
630
+ lp, solver_options = _parse_linprog(lp, options, meth)
631
+ tol = solver_options.get('tol', 1e-9)
632
+
633
+ # Give unmodified problem to HiGHS
634
+ if meth.startswith('highs'):
635
+ if callback is not None:
636
+ raise NotImplementedError("HiGHS solvers do not support the "
637
+ "callback interface.")
638
+ highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
639
+ 'highs': None}
640
+
641
+ sol = _linprog_highs(lp, solver=highs_solvers[meth],
642
+ **solver_options)
643
+ sol['status'], sol['message'] = (
644
+ _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
645
+ sol['con'], lp.bounds, tol, sol['message'],
646
+ integrality))
647
+ sol['success'] = sol['status'] == 0
648
+ return OptimizeResult(sol)
649
+
650
+ warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
651
+ "1.11.0. Please use one of the HiGHS solvers (e.g. "
652
+ "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
653
+
654
+ iteration = 0
655
+ complete = False # will become True if solved in presolve
656
+ undo = []
657
+
658
+ # Keep the original arrays to calculate slack/residuals for original
659
+ # problem.
660
+ lp_o = deepcopy(lp)
661
+
662
+ # Solve trivial problem, eliminate variables, tighten bounds, etc.
663
+ rr_method = solver_options.pop('rr_method', None) # need to pop these;
664
+ rr = solver_options.pop('rr', True) # they're not passed to methods
665
+ c0 = 0 # we might get a constant term in the objective
666
+ if solver_options.pop('presolve', True):
667
+ (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
668
+ rr_method,
669
+ tol)
670
+
671
+ C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
672
+ postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
673
+
674
+ if not complete:
675
+ A, b, c, c0, x0 = _get_Abc(lp, c0)
676
+ if solver_options.pop('autoscale', False):
677
+ A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
678
+ postsolve_args = postsolve_args[:-2] + (C, b_scale)
679
+
680
+ if meth == 'simplex':
681
+ x, status, message, iteration = _linprog_simplex(
682
+ c, c0=c0, A=A, b=b, callback=callback,
683
+ postsolve_args=postsolve_args, **solver_options)
684
+ elif meth == 'interior-point':
685
+ x, status, message, iteration = _linprog_ip(
686
+ c, c0=c0, A=A, b=b, callback=callback,
687
+ postsolve_args=postsolve_args, **solver_options)
688
+ elif meth == 'revised simplex':
689
+ x, status, message, iteration = _linprog_rs(
690
+ c, c0=c0, A=A, b=b, x0=x0, callback=callback,
691
+ postsolve_args=postsolve_args, **solver_options)
692
+
693
+ # Eliminate artificial variables, re-introduce presolved variables, etc.
694
+ disp = solver_options.get('disp', False)
695
+
696
+ x, fun, slack, con = _postsolve(x, postsolve_args, complete)
697
+
698
+ status, message = _check_result(x, fun, status, slack, con, lp_o.bounds,
699
+ tol, message, integrality)
700
+
701
+ if disp:
702
+ _display_summary(message, status, fun, iteration)
703
+
704
+ sol = {
705
+ 'x': x,
706
+ 'fun': fun,
707
+ 'slack': slack,
708
+ 'con': con,
709
+ 'status': status,
710
+ 'message': message,
711
+ 'nit': iteration,
712
+ 'success': status == 0}
713
+
714
+ return OptimizeResult(sol)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Created on Sat Aug 22 19:49:17 2020
3
+
4
+ @author: matth
5
+ """
6
+
7
+
8
+ def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
9
+ bounds=None, method='highs', callback=None,
10
+ maxiter=None, disp=False, presolve=True,
11
+ time_limit=None,
12
+ dual_feasibility_tolerance=None,
13
+ primal_feasibility_tolerance=None,
14
+ ipm_optimality_tolerance=None,
15
+ simplex_dual_edge_weight_strategy=None,
16
+ mip_rel_gap=None,
17
+ **unknown_options):
18
+ r"""
19
+ Linear programming: minimize a linear objective function subject to linear
20
+ equality and inequality constraints using one of the HiGHS solvers.
21
+
22
+ Linear programming solves problems of the following form:
23
+
24
+ .. math::
25
+
26
+ \min_x \ & c^T x \\
27
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
28
+ & A_{eq} x = b_{eq},\\
29
+ & l \leq x \leq u ,
30
+
31
+ where :math:`x` is a vector of decision variables; :math:`c`,
32
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
33
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
34
+
35
+ Alternatively, that's:
36
+
37
+ minimize::
38
+
39
+ c @ x
40
+
41
+ such that::
42
+
43
+ A_ub @ x <= b_ub
44
+ A_eq @ x == b_eq
45
+ lb <= x <= ub
46
+
47
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
48
+ ``bounds``.
49
+
50
+ Parameters
51
+ ----------
52
+ c : 1-D array
53
+ The coefficients of the linear objective function to be minimized.
54
+ A_ub : 2-D array, optional
55
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
56
+ coefficients of a linear inequality constraint on ``x``.
57
+ b_ub : 1-D array, optional
58
+ The inequality constraint vector. Each element represents an
59
+ upper bound on the corresponding value of ``A_ub @ x``.
60
+ A_eq : 2-D array, optional
61
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
62
+ coefficients of a linear equality constraint on ``x``.
63
+ b_eq : 1-D array, optional
64
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
65
+ the corresponding element of ``b_eq``.
66
+ bounds : sequence, optional
67
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
68
+ the minimum and maximum values of that decision variable. Use ``None``
69
+ to indicate that there is no bound. By default, bounds are
70
+ ``(0, None)`` (all decision variables are non-negative).
71
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
72
+ ``max`` will serve as bounds for all decision variables.
73
+ method : str
74
+
75
+ This is the method-specific documentation for 'highs', which chooses
76
+ automatically between
77
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>` and
78
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
79
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
80
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
81
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
82
+ are also available.
83
+ integrality : 1-D array or int, optional
84
+ Indicates the type of integrality constraint on each decision variable.
85
+
86
+ ``0`` : Continuous variable; no integrality constraint.
87
+
88
+ ``1`` : Integer variable; decision variable must be an integer
89
+ within `bounds`.
90
+
91
+ ``2`` : Semi-continuous variable; decision variable must be within
92
+ `bounds` or take value ``0``.
93
+
94
+ ``3`` : Semi-integer variable; decision variable must be an integer
95
+ within `bounds` or take value ``0``.
96
+
97
+ By default, all variables are continuous.
98
+
99
+ For mixed integrality constraints, supply an array of shape `c.shape`.
100
+ To infer a constraint on each decision variable from shorter inputs,
101
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
102
+
103
+ This argument is currently used only by the ``'highs'`` method and
104
+ ignored otherwise.
105
+
106
+ Options
107
+ -------
108
+ maxiter : int
109
+ The maximum number of iterations to perform in either phase.
110
+ For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
111
+ include the number of crossover iterations. Default is the largest
112
+ possible value for an ``int`` on the platform.
113
+ disp : bool (default: ``False``)
114
+ Set to ``True`` if indicators of optimization status are to be
115
+ printed to the console during optimization.
116
+ presolve : bool (default: ``True``)
117
+ Presolve attempts to identify trivial infeasibilities,
118
+ identify trivial unboundedness, and simplify the problem before
119
+ sending it to the main solver. It is generally recommended
120
+ to keep the default setting ``True``; set to ``False`` if
121
+ presolve is to be disabled.
122
+ time_limit : float
123
+ The maximum time in seconds allotted to solve the problem;
124
+ default is the largest possible value for a ``double`` on the
125
+ platform.
126
+ dual_feasibility_tolerance : double (default: 1e-07)
127
+ Dual feasibility tolerance for
128
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
129
+ The minimum of this and ``primal_feasibility_tolerance``
130
+ is used for the feasibility tolerance of
131
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
132
+ primal_feasibility_tolerance : double (default: 1e-07)
133
+ Primal feasibility tolerance for
134
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
135
+ The minimum of this and ``dual_feasibility_tolerance``
136
+ is used for the feasibility tolerance of
137
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
138
+ ipm_optimality_tolerance : double (default: ``1e-08``)
139
+ Optimality tolerance for
140
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
141
+ Minimum allowable value is 1e-12.
142
+ simplex_dual_edge_weight_strategy : str (default: None)
143
+ Strategy for simplex dual edge weights. The default, ``None``,
144
+ automatically selects one of the following.
145
+
146
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
147
+ negative reduced cost.
148
+
149
+ ``'devex'`` uses the strategy described in [15]_.
150
+
151
+ ``steepest`` uses the exact steepest edge strategy as described in
152
+ [16]_.
153
+
154
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
155
+ until the computation is too costly or inexact and then switches to
156
+ the devex method.
157
+
158
+ Currently, ``None`` always selects ``'steepest-devex'``, but this
159
+ may change as new options become available.
160
+ mip_rel_gap : double (default: None)
161
+ Termination criterion for MIP solver: solver will terminate when the
162
+ gap between the primal objective value and the dual objective bound,
163
+ scaled by the primal objective value, is <= mip_rel_gap.
164
+ unknown_options : dict
165
+ Optional arguments not used by this particular solver. If
166
+ ``unknown_options`` is non-empty, a warning is issued listing
167
+ all unused options.
168
+
169
+ Returns
170
+ -------
171
+ res : OptimizeResult
172
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
173
+
174
+ x : 1D array
175
+ The values of the decision variables that minimizes the
176
+ objective function while satisfying the constraints.
177
+ fun : float
178
+ The optimal value of the objective function ``c @ x``.
179
+ slack : 1D array
180
+ The (nominally positive) values of the slack,
181
+ ``b_ub - A_ub @ x``.
182
+ con : 1D array
183
+ The (nominally zero) residuals of the equality constraints,
184
+ ``b_eq - A_eq @ x``.
185
+ success : bool
186
+ ``True`` when the algorithm succeeds in finding an optimal
187
+ solution.
188
+ status : int
189
+ An integer representing the exit status of the algorithm.
190
+
191
+ ``0`` : Optimization terminated successfully.
192
+
193
+ ``1`` : Iteration or time limit reached.
194
+
195
+ ``2`` : Problem appears to be infeasible.
196
+
197
+ ``3`` : Problem appears to be unbounded.
198
+
199
+ ``4`` : The HiGHS solver ran into a problem.
200
+
201
+ message : str
202
+ A string descriptor of the exit status of the algorithm.
203
+ nit : int
204
+ The total number of iterations performed.
205
+ For the HiGHS simplex method, this includes iterations in all
206
+ phases. For the HiGHS interior-point method, this does not include
207
+ crossover iterations.
208
+ crossover_nit : int
209
+ The number of primal/dual pushes performed during the
210
+ crossover routine for the HiGHS interior-point method.
211
+ This is ``0`` for the HiGHS simplex method.
212
+ ineqlin : OptimizeResult
213
+ Solution and sensitivity information corresponding to the
214
+ inequality constraints, `b_ub`. A dictionary consisting of the
215
+ fields:
216
+
217
+ residual : np.ndnarray
218
+ The (nominally positive) values of the slack variables,
219
+ ``b_ub - A_ub @ x``. This quantity is also commonly
220
+ referred to as "slack".
221
+
222
+ marginals : np.ndarray
223
+ The sensitivity (partial derivative) of the objective
224
+ function with respect to the right-hand side of the
225
+ inequality constraints, `b_ub`.
226
+
227
+ eqlin : OptimizeResult
228
+ Solution and sensitivity information corresponding to the
229
+ equality constraints, `b_eq`. A dictionary consisting of the
230
+ fields:
231
+
232
+ residual : np.ndarray
233
+ The (nominally zero) residuals of the equality constraints,
234
+ ``b_eq - A_eq @ x``.
235
+
236
+ marginals : np.ndarray
237
+ The sensitivity (partial derivative) of the objective
238
+ function with respect to the right-hand side of the
239
+ equality constraints, `b_eq`.
240
+
241
+ lower, upper : OptimizeResult
242
+ Solution and sensitivity information corresponding to the
243
+ lower and upper bounds on decision variables, `bounds`.
244
+
245
+ residual : np.ndarray
246
+ The (nominally positive) values of the quantity
247
+ ``x - lb`` (lower) or ``ub - x`` (upper).
248
+
249
+ marginals : np.ndarray
250
+ The sensitivity (partial derivative) of the objective
251
+ function with respect to the lower and upper
252
+ `bounds`.
253
+
254
+ Notes
255
+ -----
256
+
257
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
258
+ of the C++ high performance dual revised simplex implementation (HSOL)
259
+ [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
260
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
261
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
262
+ as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
263
+ between the two automatically. For new code involving `linprog`, we
264
+ recommend explicitly choosing one of these three method values instead of
265
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
266
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
267
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
268
+
269
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
270
+ `marginals`, or partial derivatives of the objective function with respect
271
+ to the right-hand side of each constraint. These partial derivatives are
272
+ also referred to as "Lagrange multipliers", "dual values", and
273
+ "shadow prices". The sign convention of `marginals` is opposite that
274
+ of Lagrange multipliers produced by many nonlinear solvers.
275
+
276
+ References
277
+ ----------
278
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
279
+ "HiGHS - high performance software for linear optimization."
280
+ https://highs.dev/
281
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
282
+ simplex method." Mathematical Programming Computation, 10 (1),
283
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
284
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
285
+ Mathematical programming 5.1 (1973): 1-28.
286
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
287
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
288
+ """
289
+ pass
290
+
291
+
292
+ def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
293
+ bounds=None, method='highs-ds', callback=None,
294
+ maxiter=None, disp=False, presolve=True,
295
+ time_limit=None,
296
+ dual_feasibility_tolerance=None,
297
+ primal_feasibility_tolerance=None,
298
+ simplex_dual_edge_weight_strategy=None,
299
+ **unknown_options):
300
+ r"""
301
+ Linear programming: minimize a linear objective function subject to linear
302
+ equality and inequality constraints using the HiGHS dual simplex solver.
303
+
304
+ Linear programming solves problems of the following form:
305
+
306
+ .. math::
307
+
308
+ \min_x \ & c^T x \\
309
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
310
+ & A_{eq} x = b_{eq},\\
311
+ & l \leq x \leq u ,
312
+
313
+ where :math:`x` is a vector of decision variables; :math:`c`,
314
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
315
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
316
+
317
+ Alternatively, that's:
318
+
319
+ minimize::
320
+
321
+ c @ x
322
+
323
+ such that::
324
+
325
+ A_ub @ x <= b_ub
326
+ A_eq @ x == b_eq
327
+ lb <= x <= ub
328
+
329
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
330
+ ``bounds``.
331
+
332
+ Parameters
333
+ ----------
334
+ c : 1-D array
335
+ The coefficients of the linear objective function to be minimized.
336
+ A_ub : 2-D array, optional
337
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
338
+ coefficients of a linear inequality constraint on ``x``.
339
+ b_ub : 1-D array, optional
340
+ The inequality constraint vector. Each element represents an
341
+ upper bound on the corresponding value of ``A_ub @ x``.
342
+ A_eq : 2-D array, optional
343
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
344
+ coefficients of a linear equality constraint on ``x``.
345
+ b_eq : 1-D array, optional
346
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
347
+ the corresponding element of ``b_eq``.
348
+ bounds : sequence, optional
349
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
350
+ the minimum and maximum values of that decision variable. Use ``None``
351
+ to indicate that there is no bound. By default, bounds are
352
+ ``(0, None)`` (all decision variables are non-negative).
353
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
354
+ ``max`` will serve as bounds for all decision variables.
355
+ method : str
356
+
357
+ This is the method-specific documentation for 'highs-ds'.
358
+ :ref:`'highs' <optimize.linprog-highs>`,
359
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
360
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
361
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
362
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
363
+ are also available.
364
+
365
+ Options
366
+ -------
367
+ maxiter : int
368
+ The maximum number of iterations to perform in either phase.
369
+ Default is the largest possible value for an ``int`` on the platform.
370
+ disp : bool (default: ``False``)
371
+ Set to ``True`` if indicators of optimization status are to be
372
+ printed to the console during optimization.
373
+ presolve : bool (default: ``True``)
374
+ Presolve attempts to identify trivial infeasibilities,
375
+ identify trivial unboundedness, and simplify the problem before
376
+ sending it to the main solver. It is generally recommended
377
+ to keep the default setting ``True``; set to ``False`` if
378
+ presolve is to be disabled.
379
+ time_limit : float
380
+ The maximum time in seconds allotted to solve the problem;
381
+ default is the largest possible value for a ``double`` on the
382
+ platform.
383
+ dual_feasibility_tolerance : double (default: 1e-07)
384
+ Dual feasibility tolerance for
385
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
386
+ primal_feasibility_tolerance : double (default: 1e-07)
387
+ Primal feasibility tolerance for
388
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`.
389
+ simplex_dual_edge_weight_strategy : str (default: None)
390
+ Strategy for simplex dual edge weights. The default, ``None``,
391
+ automatically selects one of the following.
392
+
393
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
394
+ negative reduced cost.
395
+
396
+ ``'devex'`` uses the strategy described in [15]_.
397
+
398
+ ``steepest`` uses the exact steepest edge strategy as described in
399
+ [16]_.
400
+
401
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
402
+ until the computation is too costly or inexact and then switches to
403
+ the devex method.
404
+
405
+ Currently, ``None`` always selects ``'steepest-devex'``, but this
406
+ may change as new options become available.
407
+ unknown_options : dict
408
+ Optional arguments not used by this particular solver. If
409
+ ``unknown_options`` is non-empty, a warning is issued listing
410
+ all unused options.
411
+
412
+ Returns
413
+ -------
414
+ res : OptimizeResult
415
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
416
+
417
+ x : 1D array
418
+ The values of the decision variables that minimizes the
419
+ objective function while satisfying the constraints.
420
+ fun : float
421
+ The optimal value of the objective function ``c @ x``.
422
+ slack : 1D array
423
+ The (nominally positive) values of the slack,
424
+ ``b_ub - A_ub @ x``.
425
+ con : 1D array
426
+ The (nominally zero) residuals of the equality constraints,
427
+ ``b_eq - A_eq @ x``.
428
+ success : bool
429
+ ``True`` when the algorithm succeeds in finding an optimal
430
+ solution.
431
+ status : int
432
+ An integer representing the exit status of the algorithm.
433
+
434
+ ``0`` : Optimization terminated successfully.
435
+
436
+ ``1`` : Iteration or time limit reached.
437
+
438
+ ``2`` : Problem appears to be infeasible.
439
+
440
+ ``3`` : Problem appears to be unbounded.
441
+
442
+ ``4`` : The HiGHS solver ran into a problem.
443
+
444
+ message : str
445
+ A string descriptor of the exit status of the algorithm.
446
+ nit : int
447
+ The total number of iterations performed. This includes iterations
448
+ in all phases.
449
+ crossover_nit : int
450
+ This is always ``0`` for the HiGHS simplex method.
451
+ For the HiGHS interior-point method, this is the number of
452
+ primal/dual pushes performed during the crossover routine.
453
+ ineqlin : OptimizeResult
454
+ Solution and sensitivity information corresponding to the
455
+ inequality constraints, `b_ub`. A dictionary consisting of the
456
+ fields:
457
+
458
+ residual : np.ndnarray
459
+ The (nominally positive) values of the slack variables,
460
+ ``b_ub - A_ub @ x``. This quantity is also commonly
461
+ referred to as "slack".
462
+
463
+ marginals : np.ndarray
464
+ The sensitivity (partial derivative) of the objective
465
+ function with respect to the right-hand side of the
466
+ inequality constraints, `b_ub`.
467
+
468
+ eqlin : OptimizeResult
469
+ Solution and sensitivity information corresponding to the
470
+ equality constraints, `b_eq`. A dictionary consisting of the
471
+ fields:
472
+
473
+ residual : np.ndarray
474
+ The (nominally zero) residuals of the equality constraints,
475
+ ``b_eq - A_eq @ x``.
476
+
477
+ marginals : np.ndarray
478
+ The sensitivity (partial derivative) of the objective
479
+ function with respect to the right-hand side of the
480
+ equality constraints, `b_eq`.
481
+
482
+ lower, upper : OptimizeResult
483
+ Solution and sensitivity information corresponding to the
484
+ lower and upper bounds on decision variables, `bounds`.
485
+
486
+ residual : np.ndarray
487
+ The (nominally positive) values of the quantity
488
+ ``x - lb`` (lower) or ``ub - x`` (upper).
489
+
490
+ marginals : np.ndarray
491
+ The sensitivity (partial derivative) of the objective
492
+ function with respect to the lower and upper
493
+ `bounds`.
494
+
495
+ Notes
496
+ -----
497
+
498
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
499
+ of the C++ high performance dual revised simplex implementation (HSOL)
500
+ [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
501
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
502
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
503
+ as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
504
+ between the two automatically. For new code involving `linprog`, we
505
+ recommend explicitly choosing one of these three method values instead of
506
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
507
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
508
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
509
+
510
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
511
+ `marginals`, or partial derivatives of the objective function with respect
512
+ to the right-hand side of each constraint. These partial derivatives are
513
+ also referred to as "Lagrange multipliers", "dual values", and
514
+ "shadow prices". The sign convention of `marginals` is opposite that
515
+ of Lagrange multipliers produced by many nonlinear solvers.
516
+
517
+ References
518
+ ----------
519
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
520
+ "HiGHS - high performance software for linear optimization."
521
+ https://highs.dev/
522
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
523
+ simplex method." Mathematical Programming Computation, 10 (1),
524
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
525
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
526
+ Mathematical programming 5.1 (1973): 1-28.
527
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
528
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
529
+ """
530
+ pass
531
+
532
+
533
+ def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
534
+ bounds=None, method='highs-ipm', callback=None,
535
+ maxiter=None, disp=False, presolve=True,
536
+ time_limit=None,
537
+ dual_feasibility_tolerance=None,
538
+ primal_feasibility_tolerance=None,
539
+ ipm_optimality_tolerance=None,
540
+ **unknown_options):
541
+ r"""
542
+ Linear programming: minimize a linear objective function subject to linear
543
+ equality and inequality constraints using the HiGHS interior point solver.
544
+
545
+ Linear programming solves problems of the following form:
546
+
547
+ .. math::
548
+
549
+ \min_x \ & c^T x \\
550
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
551
+ & A_{eq} x = b_{eq},\\
552
+ & l \leq x \leq u ,
553
+
554
+ where :math:`x` is a vector of decision variables; :math:`c`,
555
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
556
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
557
+
558
+ Alternatively, that's:
559
+
560
+ minimize::
561
+
562
+ c @ x
563
+
564
+ such that::
565
+
566
+ A_ub @ x <= b_ub
567
+ A_eq @ x == b_eq
568
+ lb <= x <= ub
569
+
570
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
571
+ ``bounds``.
572
+
573
+ Parameters
574
+ ----------
575
+ c : 1-D array
576
+ The coefficients of the linear objective function to be minimized.
577
+ A_ub : 2-D array, optional
578
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
579
+ coefficients of a linear inequality constraint on ``x``.
580
+ b_ub : 1-D array, optional
581
+ The inequality constraint vector. Each element represents an
582
+ upper bound on the corresponding value of ``A_ub @ x``.
583
+ A_eq : 2-D array, optional
584
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
585
+ coefficients of a linear equality constraint on ``x``.
586
+ b_eq : 1-D array, optional
587
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
588
+ the corresponding element of ``b_eq``.
589
+ bounds : sequence, optional
590
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
591
+ the minimum and maximum values of that decision variable. Use ``None``
592
+ to indicate that there is no bound. By default, bounds are
593
+ ``(0, None)`` (all decision variables are non-negative).
594
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
595
+ ``max`` will serve as bounds for all decision variables.
596
+ method : str
597
+
598
+ This is the method-specific documentation for 'highs-ipm'.
599
+ :ref:`'highs-ipm' <optimize.linprog-highs>`,
600
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
601
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
602
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
603
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
604
+ are also available.
605
+
606
+ Options
607
+ -------
608
+ maxiter : int
609
+ The maximum number of iterations to perform in either phase.
610
+ For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
611
+ include the number of crossover iterations. Default is the largest
612
+ possible value for an ``int`` on the platform.
613
+ disp : bool (default: ``False``)
614
+ Set to ``True`` if indicators of optimization status are to be
615
+ printed to the console during optimization.
616
+ presolve : bool (default: ``True``)
617
+ Presolve attempts to identify trivial infeasibilities,
618
+ identify trivial unboundedness, and simplify the problem before
619
+ sending it to the main solver. It is generally recommended
620
+ to keep the default setting ``True``; set to ``False`` if
621
+ presolve is to be disabled.
622
+ time_limit : float
623
+ The maximum time in seconds allotted to solve the problem;
624
+ default is the largest possible value for a ``double`` on the
625
+ platform.
626
+ dual_feasibility_tolerance : double (default: 1e-07)
627
+ The minimum of this and ``primal_feasibility_tolerance``
628
+ is used for the feasibility tolerance of
629
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
630
+ primal_feasibility_tolerance : double (default: 1e-07)
631
+ The minimum of this and ``dual_feasibility_tolerance``
632
+ is used for the feasibility tolerance of
633
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
634
+ ipm_optimality_tolerance : double (default: ``1e-08``)
635
+ Optimality tolerance for
636
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
637
+ Minimum allowable value is 1e-12.
638
+ unknown_options : dict
639
+ Optional arguments not used by this particular solver. If
640
+ ``unknown_options`` is non-empty, a warning is issued listing
641
+ all unused options.
642
+
643
+ Returns
644
+ -------
645
+ res : OptimizeResult
646
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
647
+
648
+ x : 1D array
649
+ The values of the decision variables that minimizes the
650
+ objective function while satisfying the constraints.
651
+ fun : float
652
+ The optimal value of the objective function ``c @ x``.
653
+ slack : 1D array
654
+ The (nominally positive) values of the slack,
655
+ ``b_ub - A_ub @ x``.
656
+ con : 1D array
657
+ The (nominally zero) residuals of the equality constraints,
658
+ ``b_eq - A_eq @ x``.
659
+ success : bool
660
+ ``True`` when the algorithm succeeds in finding an optimal
661
+ solution.
662
+ status : int
663
+ An integer representing the exit status of the algorithm.
664
+
665
+ ``0`` : Optimization terminated successfully.
666
+
667
+ ``1`` : Iteration or time limit reached.
668
+
669
+ ``2`` : Problem appears to be infeasible.
670
+
671
+ ``3`` : Problem appears to be unbounded.
672
+
673
+ ``4`` : The HiGHS solver ran into a problem.
674
+
675
+ message : str
676
+ A string descriptor of the exit status of the algorithm.
677
+ nit : int
678
+ The total number of iterations performed.
679
+ For the HiGHS interior-point method, this does not include
680
+ crossover iterations.
681
+ crossover_nit : int
682
+ The number of primal/dual pushes performed during the
683
+ crossover routine for the HiGHS interior-point method.
684
+ ineqlin : OptimizeResult
685
+ Solution and sensitivity information corresponding to the
686
+ inequality constraints, `b_ub`. A dictionary consisting of the
687
+ fields:
688
+
689
+ residual : np.ndnarray
690
+ The (nominally positive) values of the slack variables,
691
+ ``b_ub - A_ub @ x``. This quantity is also commonly
692
+ referred to as "slack".
693
+
694
+ marginals : np.ndarray
695
+ The sensitivity (partial derivative) of the objective
696
+ function with respect to the right-hand side of the
697
+ inequality constraints, `b_ub`.
698
+
699
+ eqlin : OptimizeResult
700
+ Solution and sensitivity information corresponding to the
701
+ equality constraints, `b_eq`. A dictionary consisting of the
702
+ fields:
703
+
704
+ residual : np.ndarray
705
+ The (nominally zero) residuals of the equality constraints,
706
+ ``b_eq - A_eq @ x``.
707
+
708
+ marginals : np.ndarray
709
+ The sensitivity (partial derivative) of the objective
710
+ function with respect to the right-hand side of the
711
+ equality constraints, `b_eq`.
712
+
713
+ lower, upper : OptimizeResult
714
+ Solution and sensitivity information corresponding to the
715
+ lower and upper bounds on decision variables, `bounds`.
716
+
717
+ residual : np.ndarray
718
+ The (nominally positive) values of the quantity
719
+ ``x - lb`` (lower) or ``ub - x`` (upper).
720
+
721
+ marginals : np.ndarray
722
+ The sensitivity (partial derivative) of the objective
723
+ function with respect to the lower and upper
724
+ `bounds`.
725
+
726
+ Notes
727
+ -----
728
+
729
+ Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
730
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
731
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
732
+ as a simplex solver.
733
+ Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
734
+ of the C++ high performance dual revised simplex implementation (HSOL)
735
+ [13]_, [14]_. Method :ref:`'highs' <optimize.linprog-highs>` chooses
736
+ between the two automatically. For new code involving `linprog`, we
737
+ recommend explicitly choosing one of these three method values instead of
738
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
739
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
740
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy).
741
+
742
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
743
+ `marginals`, or partial derivatives of the objective function with respect
744
+ to the right-hand side of each constraint. These partial derivatives are
745
+ also referred to as "Lagrange multipliers", "dual values", and
746
+ "shadow prices". The sign convention of `marginals` is opposite that
747
+ of Lagrange multipliers produced by many nonlinear solvers.
748
+
749
+ References
750
+ ----------
751
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
752
+ "HiGHS - high performance software for linear optimization."
753
+ https://highs.dev/
754
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
755
+ simplex method." Mathematical Programming Computation, 10 (1),
756
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
757
+ """
758
+ pass
759
+
760
+
761
+ def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
762
+ bounds=None, method='interior-point', callback=None,
763
+ maxiter=1000, disp=False, presolve=True,
764
+ tol=1e-8, autoscale=False, rr=True,
765
+ alpha0=.99995, beta=0.1, sparse=False,
766
+ lstsq=False, sym_pos=True, cholesky=True, pc=True,
767
+ ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options):
768
+ r"""
769
+ Linear programming: minimize a linear objective function subject to linear
770
+ equality and inequality constraints using the interior-point method of
771
+ [4]_.
772
+
773
+ .. deprecated:: 1.9.0
774
+ `method='interior-point'` will be removed in SciPy 1.11.0.
775
+ It is replaced by `method='highs'` because the latter is
776
+ faster and more robust.
777
+
778
+ Linear programming solves problems of the following form:
779
+
780
+ .. math::
781
+
782
+ \min_x \ & c^T x \\
783
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
784
+ & A_{eq} x = b_{eq},\\
785
+ & l \leq x \leq u ,
786
+
787
+ where :math:`x` is a vector of decision variables; :math:`c`,
788
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
789
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
790
+
791
+ Alternatively, that's:
792
+
793
+ minimize::
794
+
795
+ c @ x
796
+
797
+ such that::
798
+
799
+ A_ub @ x <= b_ub
800
+ A_eq @ x == b_eq
801
+ lb <= x <= ub
802
+
803
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
804
+ ``bounds``.
805
+
806
+ Parameters
807
+ ----------
808
+ c : 1-D array
809
+ The coefficients of the linear objective function to be minimized.
810
+ A_ub : 2-D array, optional
811
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
812
+ coefficients of a linear inequality constraint on ``x``.
813
+ b_ub : 1-D array, optional
814
+ The inequality constraint vector. Each element represents an
815
+ upper bound on the corresponding value of ``A_ub @ x``.
816
+ A_eq : 2-D array, optional
817
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
818
+ coefficients of a linear equality constraint on ``x``.
819
+ b_eq : 1-D array, optional
820
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
821
+ the corresponding element of ``b_eq``.
822
+ bounds : sequence, optional
823
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
824
+ the minimum and maximum values of that decision variable. Use ``None``
825
+ to indicate that there is no bound. By default, bounds are
826
+ ``(0, None)`` (all decision variables are non-negative).
827
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
828
+ ``max`` will serve as bounds for all decision variables.
829
+ method : str
830
+ This is the method-specific documentation for 'interior-point'.
831
+ :ref:`'highs' <optimize.linprog-highs>`,
832
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
833
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
834
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
835
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
836
+ are also available.
837
+ callback : callable, optional
838
+ Callback function to be executed once per iteration.
839
+
840
+ Options
841
+ -------
842
+ maxiter : int (default: 1000)
843
+ The maximum number of iterations of the algorithm.
844
+ disp : bool (default: False)
845
+ Set to ``True`` if indicators of optimization status are to be printed
846
+ to the console each iteration.
847
+ presolve : bool (default: True)
848
+ Presolve attempts to identify trivial infeasibilities,
849
+ identify trivial unboundedness, and simplify the problem before
850
+ sending it to the main solver. It is generally recommended
851
+ to keep the default setting ``True``; set to ``False`` if
852
+ presolve is to be disabled.
853
+ tol : float (default: 1e-8)
854
+ Termination tolerance to be used for all termination criteria;
855
+ see [4]_ Section 4.5.
856
+ autoscale : bool (default: False)
857
+ Set to ``True`` to automatically perform equilibration.
858
+ Consider using this option if the numerical values in the
859
+ constraints are separated by several orders of magnitude.
860
+ rr : bool (default: True)
861
+ Set to ``False`` to disable automatic redundancy removal.
862
+ alpha0 : float (default: 0.99995)
863
+ The maximal step size for Mehrota's predictor-corrector search
864
+ direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
865
+ beta : float (default: 0.1)
866
+ The desired reduction of the path parameter :math:`\mu` (see [6]_)
867
+ when Mehrota's predictor-corrector is not in use (uncommon).
868
+ sparse : bool (default: False)
869
+ Set to ``True`` if the problem is to be treated as sparse after
870
+ presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
871
+ this option will automatically be set ``True``, and the problem
872
+ will be treated as sparse even during presolve. If your constraint
873
+ matrices contain mostly zeros and the problem is not very small (less
874
+ than about 100 constraints or variables), consider setting ``True``
875
+ or providing ``A_eq`` and ``A_ub`` as sparse matrices.
876
+ lstsq : bool (default: ``False``)
877
+ Set to ``True`` if the problem is expected to be very poorly
878
+ conditioned. This should always be left ``False`` unless severe
879
+ numerical difficulties are encountered. Leave this at the default
880
+ unless you receive a warning message suggesting otherwise.
881
+ sym_pos : bool (default: True)
882
+ Leave ``True`` if the problem is expected to yield a well conditioned
883
+ symmetric positive definite normal equation matrix
884
+ (almost always). Leave this at the default unless you receive
885
+ a warning message suggesting otherwise.
886
+ cholesky : bool (default: True)
887
+ Set to ``True`` if the normal equations are to be solved by explicit
888
+ Cholesky decomposition followed by explicit forward/backward
889
+ substitution. This is typically faster for problems
890
+ that are numerically well-behaved.
891
+ pc : bool (default: True)
892
+ Leave ``True`` if the predictor-corrector method of Mehrota is to be
893
+ used. This is almost always (if not always) beneficial.
894
+ ip : bool (default: False)
895
+ Set to ``True`` if the improved initial point suggestion due to [4]_
896
+ Section 4.3 is desired. Whether this is beneficial or not
897
+ depends on the problem.
898
+ permc_spec : str (default: 'MMD_AT_PLUS_A')
899
+ (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
900
+ True``, and no SuiteSparse.)
901
+ A matrix is factorized in each iteration of the algorithm.
902
+ This option specifies how to permute the columns of the matrix for
903
+ sparsity preservation. Acceptable values are:
904
+
905
+ - ``NATURAL``: natural ordering.
906
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
907
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
908
+ - ``COLAMD``: approximate minimum degree column ordering.
909
+
910
+ This option can impact the convergence of the
911
+ interior point algorithm; test different values to determine which
912
+ performs best for your problem. For more information, refer to
913
+ ``scipy.sparse.linalg.splu``.
914
+ unknown_options : dict
915
+ Optional arguments not used by this particular solver. If
916
+ `unknown_options` is non-empty a warning is issued listing all
917
+ unused options.
918
+
919
+ Returns
920
+ -------
921
+ res : OptimizeResult
922
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
923
+
924
+ x : 1-D array
925
+ The values of the decision variables that minimizes the
926
+ objective function while satisfying the constraints.
927
+ fun : float
928
+ The optimal value of the objective function ``c @ x``.
929
+ slack : 1-D array
930
+ The (nominally positive) values of the slack variables,
931
+ ``b_ub - A_ub @ x``.
932
+ con : 1-D array
933
+ The (nominally zero) residuals of the equality constraints,
934
+ ``b_eq - A_eq @ x``.
935
+ success : bool
936
+ ``True`` when the algorithm succeeds in finding an optimal
937
+ solution.
938
+ status : int
939
+ An integer representing the exit status of the algorithm.
940
+
941
+ ``0`` : Optimization terminated successfully.
942
+
943
+ ``1`` : Iteration limit reached.
944
+
945
+ ``2`` : Problem appears to be infeasible.
946
+
947
+ ``3`` : Problem appears to be unbounded.
948
+
949
+ ``4`` : Numerical difficulties encountered.
950
+
951
+ message : str
952
+ A string descriptor of the exit status of the algorithm.
953
+ nit : int
954
+ The total number of iterations performed in all phases.
955
+
956
+
957
+ Notes
958
+ -----
959
+ This method implements the algorithm outlined in [4]_ with ideas from [8]_
960
+ and a structure inspired by the simpler methods of [6]_.
961
+
962
+ The primal-dual path following method begins with initial 'guesses' of
963
+ the primal and dual variables of the standard form problem and iteratively
964
+ attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
965
+ problem with a gradually reduced logarithmic barrier term added to the
966
+ objective. This particular implementation uses a homogeneous self-dual
967
+ formulation, which provides certificates of infeasibility or unboundedness
968
+ where applicable.
969
+
970
+ The default initial point for the primal and dual variables is that
971
+ defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
972
+ point option ``ip=True``), an alternate (potentially improved) starting
973
+ point can be calculated according to the additional recommendations of
974
+ [4]_ Section 4.4.
975
+
976
+ A search direction is calculated using the predictor-corrector method
977
+ (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
978
+ (A potential improvement would be to implement the method of multiple
979
+ corrections described in [4]_ Section 4.2.) In practice, this is
980
+ accomplished by solving the normal equations, [4]_ Section 5.1 Equations
981
+ 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
982
+ 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
983
+ solving the normal equations rather than 8.25 directly is that the
984
+ matrices involved are symmetric positive definite, so Cholesky
985
+ decomposition can be used rather than the more expensive LU factorization.
986
+
987
+ With default options, the solver used to perform the factorization depends
988
+ on third-party software availability and the conditioning of the problem.
989
+
990
+ For dense problems, solvers are tried in the following order:
991
+
992
+ 1. ``scipy.linalg.cho_factor``
993
+
994
+ 2. ``scipy.linalg.solve`` with option ``sym_pos=True``
995
+
996
+ 3. ``scipy.linalg.solve`` with option ``sym_pos=False``
997
+
998
+ 4. ``scipy.linalg.lstsq``
999
+
1000
+ For sparse problems:
1001
+
1002
+ 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are
1003
+ installed)
1004
+
1005
+ 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse
1006
+ are installed)
1007
+
1008
+ 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
1009
+
1010
+ 4. ``scipy.sparse.linalg.lsqr``
1011
+
1012
+ If the solver fails for any reason, successively more robust (but slower)
1013
+ solvers are attempted in the order indicated. Attempting, failing, and
1014
+ re-starting factorization can be time consuming, so if the problem is
1015
+ numerically challenging, options can be set to bypass solvers that are
1016
+ failing. Setting ``cholesky=False`` skips to solver 2,
1017
+ ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
1018
+ to solver 4 for both sparse and dense problems.
1019
+
1020
+ Potential improvements for combatting issues associated with dense
1021
+ columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
1022
+ [10]_ Section 4.1-4.2; the latter also discusses the alleviation of
1023
+ accuracy issues associated with the substitution approach to free
1024
+ variables.
1025
+
1026
+ After calculating the search direction, the maximum possible step size
1027
+ that does not activate the non-negativity constraints is calculated, and
1028
+ the smaller of this step size and unity is applied (as in [4]_ Section
1029
+ 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
1030
+
1031
+ The new point is tested according to the termination conditions of [4]_
1032
+ Section 4.5. The same tolerance, which can be set using the ``tol`` option,
1033
+ is used for all checks. (A potential improvement would be to expose
1034
+ the different tolerances to be set independently.) If optimality,
1035
+ unboundedness, or infeasibility is detected, the solve procedure
1036
+ terminates; otherwise it repeats.
1037
+
1038
+ Whereas the top level ``linprog`` module expects a problem of form:
1039
+
1040
+ Minimize::
1041
+
1042
+ c @ x
1043
+
1044
+ Subject to::
1045
+
1046
+ A_ub @ x <= b_ub
1047
+ A_eq @ x == b_eq
1048
+ lb <= x <= ub
1049
+
1050
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem
1051
+ is automatically converted to the form:
1052
+
1053
+ Minimize::
1054
+
1055
+ c @ x
1056
+
1057
+ Subject to::
1058
+
1059
+ A @ x == b
1060
+ x >= 0
1061
+
1062
+ for solution. That is, the original problem contains equality, upper-bound
1063
+ and variable constraints whereas the method specific solver requires
1064
+ equality constraints and variable non-negativity. ``linprog`` converts the
1065
+ original problem to standard form by converting the simple bounds to upper
1066
+ bound constraints, introducing non-negative slack variables for inequality
1067
+ constraints, and expressing unbounded variables as the difference between
1068
+ two non-negative variables. The problem is converted back to the original
1069
+ form before results are reported.
1070
+
1071
+ References
1072
+ ----------
1073
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
1074
+ optimizer for linear programming: an implementation of the
1075
+ homogeneous algorithm." High performance optimization. Springer US,
1076
+ 2000. 197-232.
1077
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
1078
+ Programming based on Newton's Method." Unpublished Course Notes,
1079
+ March 2004. Available 2/25/2017 at
1080
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
1081
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
1082
+ programming." Mathematical Programming 71.2 (1995): 221-245.
1083
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1084
+ programming." Athena Scientific 1 (1997): 997.
1085
+ .. [10] Andersen, Erling D., et al. Implementation of interior point
1086
+ methods for large scale linear programming. HEC/Universite de
1087
+ Geneve, 1996.
1088
+ """
1089
+ pass
1090
+
1091
+
1092
+ def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
1093
+ bounds=None, method='interior-point', callback=None,
1094
+ x0=None, maxiter=5000, disp=False, presolve=True,
1095
+ tol=1e-12, autoscale=False, rr=True, maxupdate=10,
1096
+ mast=False, pivot="mrc", **unknown_options):
1097
+ r"""
1098
+ Linear programming: minimize a linear objective function subject to linear
1099
+ equality and inequality constraints using the revised simplex method.
1100
+
1101
+ .. deprecated:: 1.9.0
1102
+ `method='revised simplex'` will be removed in SciPy 1.11.0.
1103
+ It is replaced by `method='highs'` because the latter is
1104
+ faster and more robust.
1105
+
1106
+ Linear programming solves problems of the following form:
1107
+
1108
+ .. math::
1109
+
1110
+ \min_x \ & c^T x \\
1111
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
1112
+ & A_{eq} x = b_{eq},\\
1113
+ & l \leq x \leq u ,
1114
+
1115
+ where :math:`x` is a vector of decision variables; :math:`c`,
1116
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
1117
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
1118
+
1119
+ Alternatively, that's:
1120
+
1121
+ minimize::
1122
+
1123
+ c @ x
1124
+
1125
+ such that::
1126
+
1127
+ A_ub @ x <= b_ub
1128
+ A_eq @ x == b_eq
1129
+ lb <= x <= ub
1130
+
1131
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
1132
+ ``bounds``.
1133
+
1134
+ Parameters
1135
+ ----------
1136
+ c : 1-D array
1137
+ The coefficients of the linear objective function to be minimized.
1138
+ A_ub : 2-D array, optional
1139
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1140
+ coefficients of a linear inequality constraint on ``x``.
1141
+ b_ub : 1-D array, optional
1142
+ The inequality constraint vector. Each element represents an
1143
+ upper bound on the corresponding value of ``A_ub @ x``.
1144
+ A_eq : 2-D array, optional
1145
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1146
+ coefficients of a linear equality constraint on ``x``.
1147
+ b_eq : 1-D array, optional
1148
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1149
+ the corresponding element of ``b_eq``.
1150
+ bounds : sequence, optional
1151
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
1152
+ the minimum and maximum values of that decision variable. Use ``None``
1153
+ to indicate that there is no bound. By default, bounds are
1154
+ ``(0, None)`` (all decision variables are non-negative).
1155
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
1156
+ ``max`` will serve as bounds for all decision variables.
1157
+ method : str
1158
+ This is the method-specific documentation for 'revised simplex'.
1159
+ :ref:`'highs' <optimize.linprog-highs>`,
1160
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
1161
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
1162
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
1163
+ and :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
1164
+ are also available.
1165
+ callback : callable, optional
1166
+ Callback function to be executed once per iteration.
1167
+ x0 : 1-D array, optional
1168
+ Guess values of the decision variables, which will be refined by
1169
+ the optimization algorithm. This argument is currently used only by the
1170
+ 'revised simplex' method, and can only be used if `x0` represents a
1171
+ basic feasible solution.
1172
+
1173
+ Options
1174
+ -------
1175
+ maxiter : int (default: 5000)
1176
+ The maximum number of iterations to perform in either phase.
1177
+ disp : bool (default: False)
1178
+ Set to ``True`` if indicators of optimization status are to be printed
1179
+ to the console each iteration.
1180
+ presolve : bool (default: True)
1181
+ Presolve attempts to identify trivial infeasibilities,
1182
+ identify trivial unboundedness, and simplify the problem before
1183
+ sending it to the main solver. It is generally recommended
1184
+ to keep the default setting ``True``; set to ``False`` if
1185
+ presolve is to be disabled.
1186
+ tol : float (default: 1e-12)
1187
+ The tolerance which determines when a solution is "close enough" to
1188
+ zero in Phase 1 to be considered a basic feasible solution or close
1189
+ enough to positive to serve as an optimal solution.
1190
+ autoscale : bool (default: False)
1191
+ Set to ``True`` to automatically perform equilibration.
1192
+ Consider using this option if the numerical values in the
1193
+ constraints are separated by several orders of magnitude.
1194
+ rr : bool (default: True)
1195
+ Set to ``False`` to disable automatic redundancy removal.
1196
+ maxupdate : int (default: 10)
1197
+ The maximum number of updates performed on the LU factorization.
1198
+ After this many updates is reached, the basis matrix is factorized
1199
+ from scratch.
1200
+ mast : bool (default: False)
1201
+ Minimize Amortized Solve Time. If enabled, the average time to solve
1202
+ a linear system using the basis factorization is measured. Typically,
1203
+ the average solve time will decrease with each successive solve after
1204
+ initial factorization, as factorization takes much more time than the
1205
+ solve operation (and updates). Eventually, however, the updated
1206
+ factorization becomes sufficiently complex that the average solve time
1207
+ begins to increase. When this is detected, the basis is refactorized
1208
+ from scratch. Enable this option to maximize speed at the risk of
1209
+ nondeterministic behavior. Ignored if ``maxupdate`` is 0.
1210
+ pivot : "mrc" or "bland" (default: "mrc")
1211
+ Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland").
1212
+ Choose Bland's rule if iteration limit is reached and cycling is
1213
+ suspected.
1214
+ unknown_options : dict
1215
+ Optional arguments not used by this particular solver. If
1216
+ `unknown_options` is non-empty a warning is issued listing all
1217
+ unused options.
1218
+
1219
+ Returns
1220
+ -------
1221
+ res : OptimizeResult
1222
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
1223
+
1224
+ x : 1-D array
1225
+ The values of the decision variables that minimizes the
1226
+ objective function while satisfying the constraints.
1227
+ fun : float
1228
+ The optimal value of the objective function ``c @ x``.
1229
+ slack : 1-D array
1230
+ The (nominally positive) values of the slack variables,
1231
+ ``b_ub - A_ub @ x``.
1232
+ con : 1-D array
1233
+ The (nominally zero) residuals of the equality constraints,
1234
+ ``b_eq - A_eq @ x``.
1235
+ success : bool
1236
+ ``True`` when the algorithm succeeds in finding an optimal
1237
+ solution.
1238
+ status : int
1239
+ An integer representing the exit status of the algorithm.
1240
+
1241
+ ``0`` : Optimization terminated successfully.
1242
+
1243
+ ``1`` : Iteration limit reached.
1244
+
1245
+ ``2`` : Problem appears to be infeasible.
1246
+
1247
+ ``3`` : Problem appears to be unbounded.
1248
+
1249
+ ``4`` : Numerical difficulties encountered.
1250
+
1251
+ ``5`` : Problem has no constraints; turn presolve on.
1252
+
1253
+ ``6`` : Invalid guess provided.
1254
+
1255
+ message : str
1256
+ A string descriptor of the exit status of the algorithm.
1257
+ nit : int
1258
+ The total number of iterations performed in all phases.
1259
+
1260
+
1261
+ Notes
1262
+ -----
1263
+ Method *revised simplex* uses the revised simplex method as described in
1264
+ [9]_, except that a factorization [11]_ of the basis matrix, rather than
1265
+ its inverse, is efficiently maintained and used to solve the linear systems
1266
+ at each iteration of the algorithm.
1267
+
1268
+ References
1269
+ ----------
1270
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1271
+ programming." Athena Scientific 1 (1997): 997.
1272
+ .. [11] Bartels, Richard H. "A stabilization of the simplex method."
1273
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
1274
+ """
1275
+ pass
1276
+
1277
+
1278
+ def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
1279
+ bounds=None, method='interior-point', callback=None,
1280
+ maxiter=5000, disp=False, presolve=True,
1281
+ tol=1e-12, autoscale=False, rr=True, bland=False,
1282
+ **unknown_options):
1283
+ r"""
1284
+ Linear programming: minimize a linear objective function subject to linear
1285
+ equality and inequality constraints using the tableau-based simplex method.
1286
+
1287
+ .. deprecated:: 1.9.0
1288
+ `method='simplex'` will be removed in SciPy 1.11.0.
1289
+ It is replaced by `method='highs'` because the latter is
1290
+ faster and more robust.
1291
+
1292
+ Linear programming solves problems of the following form:
1293
+
1294
+ .. math::
1295
+
1296
+ \min_x \ & c^T x \\
1297
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
1298
+ & A_{eq} x = b_{eq},\\
1299
+ & l \leq x \leq u ,
1300
+
1301
+ where :math:`x` is a vector of decision variables; :math:`c`,
1302
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
1303
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
1304
+
1305
+ Alternatively, that's:
1306
+
1307
+ minimize::
1308
+
1309
+ c @ x
1310
+
1311
+ such that::
1312
+
1313
+ A_ub @ x <= b_ub
1314
+ A_eq @ x == b_eq
1315
+ lb <= x <= ub
1316
+
1317
+ Note that by default ``lb = 0`` and ``ub = None`` unless specified with
1318
+ ``bounds``.
1319
+
1320
+ Parameters
1321
+ ----------
1322
+ c : 1-D array
1323
+ The coefficients of the linear objective function to be minimized.
1324
+ A_ub : 2-D array, optional
1325
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1326
+ coefficients of a linear inequality constraint on ``x``.
1327
+ b_ub : 1-D array, optional
1328
+ The inequality constraint vector. Each element represents an
1329
+ upper bound on the corresponding value of ``A_ub @ x``.
1330
+ A_eq : 2-D array, optional
1331
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1332
+ coefficients of a linear equality constraint on ``x``.
1333
+ b_eq : 1-D array, optional
1334
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1335
+ the corresponding element of ``b_eq``.
1336
+ bounds : sequence, optional
1337
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
1338
+ the minimum and maximum values of that decision variable. Use ``None``
1339
+ to indicate that there is no bound. By default, bounds are
1340
+ ``(0, None)`` (all decision variables are non-negative).
1341
+ If a single tuple ``(min, max)`` is provided, then ``min`` and
1342
+ ``max`` will serve as bounds for all decision variables.
1343
+ method : str
1344
+ This is the method-specific documentation for 'simplex'.
1345
+ :ref:`'highs' <optimize.linprog-highs>`,
1346
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
1347
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
1348
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (default),
1349
+ and :ref:`'revised simplex' <optimize.linprog-revised_simplex>`
1350
+ are also available.
1351
+ callback : callable, optional
1352
+ Callback function to be executed once per iteration.
1353
+
1354
+ Options
1355
+ -------
1356
+ maxiter : int (default: 5000)
1357
+ The maximum number of iterations to perform in either phase.
1358
+ disp : bool (default: False)
1359
+ Set to ``True`` if indicators of optimization status are to be printed
1360
+ to the console each iteration.
1361
+ presolve : bool (default: True)
1362
+ Presolve attempts to identify trivial infeasibilities,
1363
+ identify trivial unboundedness, and simplify the problem before
1364
+ sending it to the main solver. It is generally recommended
1365
+ to keep the default setting ``True``; set to ``False`` if
1366
+ presolve is to be disabled.
1367
+ tol : float (default: 1e-12)
1368
+ The tolerance which determines when a solution is "close enough" to
1369
+ zero in Phase 1 to be considered a basic feasible solution or close
1370
+ enough to positive to serve as an optimal solution.
1371
+ autoscale : bool (default: False)
1372
+ Set to ``True`` to automatically perform equilibration.
1373
+ Consider using this option if the numerical values in the
1374
+ constraints are separated by several orders of magnitude.
1375
+ rr : bool (default: True)
1376
+ Set to ``False`` to disable automatic redundancy removal.
1377
+ bland : bool
1378
+ If True, use Bland's anti-cycling rule [3]_ to choose pivots to
1379
+ prevent cycling. If False, choose pivots which should lead to a
1380
+ converged solution more quickly. The latter method is subject to
1381
+ cycling (non-convergence) in rare instances.
1382
+ unknown_options : dict
1383
+ Optional arguments not used by this particular solver. If
1384
+ `unknown_options` is non-empty a warning is issued listing all
1385
+ unused options.
1386
+
1387
+ Returns
1388
+ -------
1389
+ res : OptimizeResult
1390
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
1391
+
1392
+ x : 1-D array
1393
+ The values of the decision variables that minimizes the
1394
+ objective function while satisfying the constraints.
1395
+ fun : float
1396
+ The optimal value of the objective function ``c @ x``.
1397
+ slack : 1-D array
1398
+ The (nominally positive) values of the slack variables,
1399
+ ``b_ub - A_ub @ x``.
1400
+ con : 1-D array
1401
+ The (nominally zero) residuals of the equality constraints,
1402
+ ``b_eq - A_eq @ x``.
1403
+ success : bool
1404
+ ``True`` when the algorithm succeeds in finding an optimal
1405
+ solution.
1406
+ status : int
1407
+ An integer representing the exit status of the algorithm.
1408
+
1409
+ ``0`` : Optimization terminated successfully.
1410
+
1411
+ ``1`` : Iteration limit reached.
1412
+
1413
+ ``2`` : Problem appears to be infeasible.
1414
+
1415
+ ``3`` : Problem appears to be unbounded.
1416
+
1417
+ ``4`` : Numerical difficulties encountered.
1418
+
1419
+ message : str
1420
+ A string descriptor of the exit status of the algorithm.
1421
+ nit : int
1422
+ The total number of iterations performed in all phases.
1423
+
1424
+ References
1425
+ ----------
1426
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
1427
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1428
+ 1963
1429
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
1430
+ Mathematical Programming", McGraw-Hill, Chapter 4.
1431
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
1432
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
1433
+ """
1434
+ pass
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HiGHS Linear Optimization Methods
2
+
3
+ Interface to HiGHS linear optimization software.
4
+ https://highs.dev/
5
+
6
+ .. versionadded:: 1.5.0
7
+
8
+ References
9
+ ----------
10
+ .. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
11
+ method." Mathematical Programming Computation, 10 (1), 119-142,
12
+ 2018. DOI: 10.1007/s12532-017-0130-5
13
+
14
+ """
15
+
16
+ import inspect
17
+ import numpy as np
18
+ from ._optimize import OptimizeWarning, OptimizeResult
19
+ from warnings import warn
20
+ from ._highs._highs_wrapper import _highs_wrapper
21
+ from ._highs._highs_constants import (
22
+ CONST_INF,
23
+ MESSAGE_LEVEL_NONE,
24
+ HIGHS_OBJECTIVE_SENSE_MINIMIZE,
25
+
26
+ MODEL_STATUS_NOTSET,
27
+ MODEL_STATUS_LOAD_ERROR,
28
+ MODEL_STATUS_MODEL_ERROR,
29
+ MODEL_STATUS_PRESOLVE_ERROR,
30
+ MODEL_STATUS_SOLVE_ERROR,
31
+ MODEL_STATUS_POSTSOLVE_ERROR,
32
+ MODEL_STATUS_MODEL_EMPTY,
33
+ MODEL_STATUS_OPTIMAL,
34
+ MODEL_STATUS_INFEASIBLE,
35
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
36
+ MODEL_STATUS_UNBOUNDED,
37
+ MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
38
+ as MODEL_STATUS_RDOVUB,
39
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
40
+ MODEL_STATUS_REACHED_TIME_LIMIT,
41
+ MODEL_STATUS_REACHED_ITERATION_LIMIT,
42
+
43
+ HIGHS_SIMPLEX_STRATEGY_DUAL,
44
+
45
+ HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
46
+
47
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
48
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
49
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
50
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
51
+ )
52
+ from scipy.sparse import csc_matrix, vstack, issparse
53
+
54
+
55
+ def _highs_to_scipy_status_message(highs_status, highs_message):
56
+ """Converts HiGHS status number/message to SciPy status number/message"""
57
+
58
+ scipy_statuses_messages = {
59
+ None: (4, "HiGHS did not provide a status code. "),
60
+ MODEL_STATUS_NOTSET: (4, ""),
61
+ MODEL_STATUS_LOAD_ERROR: (4, ""),
62
+ MODEL_STATUS_MODEL_ERROR: (2, ""),
63
+ MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
64
+ MODEL_STATUS_SOLVE_ERROR: (4, ""),
65
+ MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
66
+ MODEL_STATUS_MODEL_EMPTY: (4, ""),
67
+ MODEL_STATUS_RDOVUB: (4, ""),
68
+ MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
69
+ MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
70
+ MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
71
+ MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
72
+ MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
73
+ MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
74
+ MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
75
+ "or infeasible. ")}
76
+ unrecognized = (4, "The HiGHS status code was not recognized. ")
77
+ scipy_status, scipy_message = (
78
+ scipy_statuses_messages.get(highs_status, unrecognized))
79
+ scipy_message = (f"{scipy_message}"
80
+ f"(HiGHS Status {highs_status}: {highs_message})")
81
+ return scipy_status, scipy_message
82
+
83
+
84
+ def _replace_inf(x):
85
+ # Replace `np.inf` with CONST_INF
86
+ infs = np.isinf(x)
87
+ with np.errstate(invalid="ignore"):
88
+ x[infs] = np.sign(x[infs])*CONST_INF
89
+ return x
90
+
91
+
92
+ def _convert_to_highs_enum(option, option_str, choices):
93
+ # If option is in the choices we can look it up, if not use
94
+ # the default value taken from function signature and warn:
95
+ try:
96
+ return choices[option.lower()]
97
+ except AttributeError:
98
+ return choices[option]
99
+ except KeyError:
100
+ sig = inspect.signature(_linprog_highs)
101
+ default_str = sig.parameters[option_str].default
102
+ warn(f"Option {option_str} is {option}, but only values in "
103
+ f"{set(choices.keys())} are allowed. Using default: "
104
+ f"{default_str}.",
105
+ OptimizeWarning, stacklevel=3)
106
+ return choices[default_str]
107
+
108
+
109
+ def _linprog_highs(lp, solver, time_limit=None, presolve=True,
110
+ disp=False, maxiter=None,
111
+ dual_feasibility_tolerance=None,
112
+ primal_feasibility_tolerance=None,
113
+ ipm_optimality_tolerance=None,
114
+ simplex_dual_edge_weight_strategy=None,
115
+ mip_rel_gap=None,
116
+ mip_max_nodes=None,
117
+ **unknown_options):
118
+ r"""
119
+ Solve the following linear programming problem using one of the HiGHS
120
+ solvers:
121
+
122
+ User-facing documentation is in _linprog_doc.py.
123
+
124
+ Parameters
125
+ ----------
126
+ lp : _LPProblem
127
+ A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
128
+ solver : "ipm" or "simplex" or None
129
+ Which HiGHS solver to use. If ``None``, "simplex" will be used.
130
+
131
+ Options
132
+ -------
133
+ maxiter : int
134
+ The maximum number of iterations to perform in either phase. For
135
+ ``solver='ipm'``, this does not include the number of crossover
136
+ iterations. Default is the largest possible value for an ``int``
137
+ on the platform.
138
+ disp : bool
139
+ Set to ``True`` if indicators of optimization status are to be printed
140
+ to the console each iteration; default ``False``.
141
+ time_limit : float
142
+ The maximum time in seconds allotted to solve the problem; default is
143
+ the largest possible value for a ``double`` on the platform.
144
+ presolve : bool
145
+ Presolve attempts to identify trivial infeasibilities,
146
+ identify trivial unboundedness, and simplify the problem before
147
+ sending it to the main solver. It is generally recommended
148
+ to keep the default setting ``True``; set to ``False`` if presolve is
149
+ to be disabled.
150
+ dual_feasibility_tolerance : double
151
+ Dual feasibility tolerance. Default is 1e-07.
152
+ The minimum of this and ``primal_feasibility_tolerance``
153
+ is used for the feasibility tolerance when ``solver='ipm'``.
154
+ primal_feasibility_tolerance : double
155
+ Primal feasibility tolerance. Default is 1e-07.
156
+ The minimum of this and ``dual_feasibility_tolerance``
157
+ is used for the feasibility tolerance when ``solver='ipm'``.
158
+ ipm_optimality_tolerance : double
159
+ Optimality tolerance for ``solver='ipm'``. Default is 1e-08.
160
+ Minimum possible value is 1e-12 and must be smaller than the largest
161
+ possible value for a ``double`` on the platform.
162
+ simplex_dual_edge_weight_strategy : str (default: None)
163
+ Strategy for simplex dual edge weights. The default, ``None``,
164
+ automatically selects one of the following.
165
+
166
+ ``'dantzig'`` uses Dantzig's original strategy of choosing the most
167
+ negative reduced cost.
168
+
169
+ ``'devex'`` uses the strategy described in [15]_.
170
+
171
+ ``steepest`` uses the exact steepest edge strategy as described in
172
+ [16]_.
173
+
174
+ ``'steepest-devex'`` begins with the exact steepest edge strategy
175
+ until the computation is too costly or inexact and then switches to
176
+ the devex method.
177
+
178
+ Currently, using ``None`` always selects ``'steepest-devex'``, but this
179
+ may change as new options become available.
180
+
181
+ mip_max_nodes : int
182
+ The maximum number of nodes allotted to solve the problem; default is
183
+ the largest possible value for a ``HighsInt`` on the platform.
184
+ Ignored if not using the MIP solver.
185
+ unknown_options : dict
186
+ Optional arguments not used by this particular solver. If
187
+ ``unknown_options`` is non-empty, a warning is issued listing all
188
+ unused options.
189
+
190
+ Returns
191
+ -------
192
+ sol : dict
193
+ A dictionary consisting of the fields:
194
+
195
+ x : 1D array
196
+ The values of the decision variables that minimizes the
197
+ objective function while satisfying the constraints.
198
+ fun : float
199
+ The optimal value of the objective function ``c @ x``.
200
+ slack : 1D array
201
+ The (nominally positive) values of the slack,
202
+ ``b_ub - A_ub @ x``.
203
+ con : 1D array
204
+ The (nominally zero) residuals of the equality constraints,
205
+ ``b_eq - A_eq @ x``.
206
+ success : bool
207
+ ``True`` when the algorithm succeeds in finding an optimal
208
+ solution.
209
+ status : int
210
+ An integer representing the exit status of the algorithm.
211
+
212
+ ``0`` : Optimization terminated successfully.
213
+
214
+ ``1`` : Iteration or time limit reached.
215
+
216
+ ``2`` : Problem appears to be infeasible.
217
+
218
+ ``3`` : Problem appears to be unbounded.
219
+
220
+ ``4`` : The HiGHS solver ran into a problem.
221
+
222
+ message : str
223
+ A string descriptor of the exit status of the algorithm.
224
+ nit : int
225
+ The total number of iterations performed.
226
+ For ``solver='simplex'``, this includes iterations in all
227
+ phases. For ``solver='ipm'``, this does not include
228
+ crossover iterations.
229
+ crossover_nit : int
230
+ The number of primal/dual pushes performed during the
231
+ crossover routine for ``solver='ipm'``. This is ``0``
232
+ for ``solver='simplex'``.
233
+ ineqlin : OptimizeResult
234
+ Solution and sensitivity information corresponding to the
235
+ inequality constraints, `b_ub`. A dictionary consisting of the
236
+ fields:
237
+
238
+ residual : np.ndnarray
239
+ The (nominally positive) values of the slack variables,
240
+ ``b_ub - A_ub @ x``. This quantity is also commonly
241
+ referred to as "slack".
242
+
243
+ marginals : np.ndarray
244
+ The sensitivity (partial derivative) of the objective
245
+ function with respect to the right-hand side of the
246
+ inequality constraints, `b_ub`.
247
+
248
+ eqlin : OptimizeResult
249
+ Solution and sensitivity information corresponding to the
250
+ equality constraints, `b_eq`. A dictionary consisting of the
251
+ fields:
252
+
253
+ residual : np.ndarray
254
+ The (nominally zero) residuals of the equality constraints,
255
+ ``b_eq - A_eq @ x``.
256
+
257
+ marginals : np.ndarray
258
+ The sensitivity (partial derivative) of the objective
259
+ function with respect to the right-hand side of the
260
+ equality constraints, `b_eq`.
261
+
262
+ lower, upper : OptimizeResult
263
+ Solution and sensitivity information corresponding to the
264
+ lower and upper bounds on decision variables, `bounds`.
265
+
266
+ residual : np.ndarray
267
+ The (nominally positive) values of the quantity
268
+ ``x - lb`` (lower) or ``ub - x`` (upper).
269
+
270
+ marginals : np.ndarray
271
+ The sensitivity (partial derivative) of the objective
272
+ function with respect to the lower and upper
273
+ `bounds`.
274
+
275
+ mip_node_count : int
276
+ The number of subproblems or "nodes" solved by the MILP
277
+ solver. Only present when `integrality` is not `None`.
278
+
279
+ mip_dual_bound : float
280
+ The MILP solver's final estimate of the lower bound on the
281
+ optimal solution. Only present when `integrality` is not
282
+ `None`.
283
+
284
+ mip_gap : float
285
+ The difference between the final objective function value
286
+ and the final dual bound, scaled by the final objective
287
+ function value. Only present when `integrality` is not
288
+ `None`.
289
+
290
+ Notes
291
+ -----
292
+ The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
293
+ `marginals`, or partial derivatives of the objective function with respect
294
+ to the right-hand side of each constraint. These partial derivatives are
295
+ also referred to as "Lagrange multipliers", "dual values", and
296
+ "shadow prices". The sign convention of `marginals` is opposite that
297
+ of Lagrange multipliers produced by many nonlinear solvers.
298
+
299
+ References
300
+ ----------
301
+ .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
302
+ Mathematical programming 5.1 (1973): 1-28.
303
+ .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
304
+ simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
305
+ """
306
+ if unknown_options:
307
+ message = (f"Unrecognized options detected: {unknown_options}. "
308
+ "These will be passed to HiGHS verbatim.")
309
+ warn(message, OptimizeWarning, stacklevel=3)
310
+
311
+ # Map options to HiGHS enum values
312
+ simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
313
+ simplex_dual_edge_weight_strategy,
314
+ 'simplex_dual_edge_weight_strategy',
315
+ choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
316
+ 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
317
+ 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
318
+ 'steepest':
319
+ HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
320
+ None: None})
321
+
322
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
323
+
324
+ lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs
325
+ # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
326
+ with np.errstate(invalid="ignore"):
327
+ lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf
328
+ rhs_ub = b_ub # RHS of UB constraints is b_ub
329
+ lhs_eq = b_eq # Equality constraint is inequality
330
+ rhs_eq = b_eq # constraint with LHS=RHS
331
+ lhs = np.concatenate((lhs_ub, lhs_eq))
332
+ rhs = np.concatenate((rhs_ub, rhs_eq))
333
+
334
+ if issparse(A_ub) or issparse(A_eq):
335
+ A = vstack((A_ub, A_eq))
336
+ else:
337
+ A = np.vstack((A_ub, A_eq))
338
+ A = csc_matrix(A)
339
+
340
+ options = {
341
+ 'presolve': presolve,
342
+ 'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
343
+ 'solver': solver,
344
+ 'time_limit': time_limit,
345
+ 'highs_debug_level': MESSAGE_LEVEL_NONE,
346
+ 'dual_feasibility_tolerance': dual_feasibility_tolerance,
347
+ 'ipm_optimality_tolerance': ipm_optimality_tolerance,
348
+ 'log_to_console': disp,
349
+ 'mip_max_nodes': mip_max_nodes,
350
+ 'output_flag': disp,
351
+ 'primal_feasibility_tolerance': primal_feasibility_tolerance,
352
+ 'simplex_dual_edge_weight_strategy':
353
+ simplex_dual_edge_weight_strategy_enum,
354
+ 'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
355
+ 'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
356
+ 'ipm_iteration_limit': maxiter,
357
+ 'simplex_iteration_limit': maxiter,
358
+ 'mip_rel_gap': mip_rel_gap,
359
+ }
360
+ options.update(unknown_options)
361
+
362
+ # np.inf doesn't work; use very large constant
363
+ rhs = _replace_inf(rhs)
364
+ lhs = _replace_inf(lhs)
365
+ lb = _replace_inf(lb)
366
+ ub = _replace_inf(ub)
367
+
368
+ if integrality is None or np.sum(integrality) == 0:
369
+ integrality = np.empty(0)
370
+ else:
371
+ integrality = np.array(integrality)
372
+
373
+ res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
374
+ lb, ub, integrality.astype(np.uint8), options)
375
+
376
+ # HiGHS represents constraints as lhs/rhs, so
377
+ # Ax + s = b => Ax = b - s
378
+ # and we need to split up s by A_ub and A_eq
379
+ if 'slack' in res:
380
+ slack = res['slack']
381
+ con = np.array(slack[len(b_ub):])
382
+ slack = np.array(slack[:len(b_ub)])
383
+ else:
384
+ slack, con = None, None
385
+
386
+ # lagrange multipliers for equalities/inequalities and upper/lower bounds
387
+ if 'lambda' in res:
388
+ lamda = res['lambda']
389
+ marg_ineqlin = np.array(lamda[:len(b_ub)])
390
+ marg_eqlin = np.array(lamda[len(b_ub):])
391
+ marg_upper = np.array(res['marg_bnds'][1, :])
392
+ marg_lower = np.array(res['marg_bnds'][0, :])
393
+ else:
394
+ marg_ineqlin, marg_eqlin = None, None
395
+ marg_upper, marg_lower = None, None
396
+
397
+ # this needs to be updated if we start choosing the solver intelligently
398
+
399
+ # Convert to scipy-style status and message
400
+ highs_status = res.get('status', None)
401
+ highs_message = res.get('message', None)
402
+ status, message = _highs_to_scipy_status_message(highs_status,
403
+ highs_message)
404
+
405
+ x = np.array(res['x']) if 'x' in res else None
406
+ sol = {'x': x,
407
+ 'slack': slack,
408
+ 'con': con,
409
+ 'ineqlin': OptimizeResult({
410
+ 'residual': slack,
411
+ 'marginals': marg_ineqlin,
412
+ }),
413
+ 'eqlin': OptimizeResult({
414
+ 'residual': con,
415
+ 'marginals': marg_eqlin,
416
+ }),
417
+ 'lower': OptimizeResult({
418
+ 'residual': None if x is None else x - lb,
419
+ 'marginals': marg_lower,
420
+ }),
421
+ 'upper': OptimizeResult({
422
+ 'residual': None if x is None else ub - x,
423
+ 'marginals': marg_upper
424
+ }),
425
+ 'fun': res.get('fun'),
426
+ 'status': status,
427
+ 'success': res['status'] == MODEL_STATUS_OPTIMAL,
428
+ 'message': message,
429
+ 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
430
+ 'crossover_nit': res.get('crossover_nit'),
431
+ }
432
+
433
+ if np.any(x) and integrality is not None:
434
+ sol.update({
435
+ 'mip_node_count': res.get('mip_node_count', 0),
436
+ 'mip_dual_bound': res.get('mip_dual_bound', 0.0),
437
+ 'mip_gap': res.get('mip_gap', 0.0),
438
+ })
439
+
440
+ return sol
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simplex method for linear programming
2
+
3
+ The *simplex* method uses a traditional, full-tableau implementation of
4
+ Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
5
+ This algorithm is included for backwards compatibility and educational
6
+ purposes.
7
+
8
+ .. versionadded:: 0.15.0
9
+
10
+ Warnings
11
+ --------
12
+
13
+ The simplex method may encounter numerical difficulties when pivot
14
+ values are close to the specified tolerance. If encountered try
15
+ remove any redundant constraints, change the pivot strategy to Bland's
16
+ rule or increase the tolerance value.
17
+
18
+ Alternatively, more robust methods maybe be used. See
19
+ :ref:`'interior-point' <optimize.linprog-interior-point>` and
20
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>`.
21
+
22
+ References
23
+ ----------
24
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
25
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
26
+ 1963
27
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
28
+ Mathematical Programming", McGraw-Hill, Chapter 4.
29
+ """
30
+
31
+ import numpy as np
32
+ from warnings import warn
33
+ from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
34
+ from ._linprog_util import _postsolve
35
+
36
+
37
+ def _pivot_col(T, tol=1e-9, bland=False):
38
+ """
39
+ Given a linear programming simplex tableau, determine the column
40
+ of the variable to enter the basis.
41
+
42
+ Parameters
43
+ ----------
44
+ T : 2-D array
45
+ A 2-D array representing the simplex tableau, T, corresponding to the
46
+ linear programming problem. It should have the form:
47
+
48
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
49
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
50
+ .
51
+ .
52
+ .
53
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
54
+ [c[0], c[1], ..., c[n_total], 0]]
55
+
56
+ for a Phase 2 problem, or the form:
57
+
58
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
59
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
60
+ .
61
+ .
62
+ .
63
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
64
+ [c[0], c[1], ..., c[n_total], 0],
65
+ [c'[0], c'[1], ..., c'[n_total], 0]]
66
+
67
+ for a Phase 1 problem (a problem in which a basic feasible solution is
68
+ sought prior to maximizing the actual objective. ``T`` is modified in
69
+ place by ``_solve_simplex``.
70
+ tol : float
71
+ Elements in the objective row larger than -tol will not be considered
72
+ for pivoting. Nominally this value is zero, but numerical issues
73
+ cause a tolerance about zero to be necessary.
74
+ bland : bool
75
+ If True, use Bland's rule for selection of the column (select the
76
+ first column with a negative coefficient in the objective row,
77
+ regardless of magnitude).
78
+
79
+ Returns
80
+ -------
81
+ status: bool
82
+ True if a suitable pivot column was found, otherwise False.
83
+ A return of False indicates that the linear programming simplex
84
+ algorithm is complete.
85
+ col: int
86
+ The index of the column of the pivot element.
87
+ If status is False, col will be returned as nan.
88
+ """
89
+ ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
90
+ if ma.count() == 0:
91
+ return False, np.nan
92
+ if bland:
93
+ # ma.mask is sometimes 0d
94
+ return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
95
+ return True, np.ma.nonzero(ma == ma.min())[0][0]
96
+
97
+
98
+ def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
99
+ """
100
+ Given a linear programming simplex tableau, determine the row for the
101
+ pivot operation.
102
+
103
+ Parameters
104
+ ----------
105
+ T : 2-D array
106
+ A 2-D array representing the simplex tableau, T, corresponding to the
107
+ linear programming problem. It should have the form:
108
+
109
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
110
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
111
+ .
112
+ .
113
+ .
114
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
115
+ [c[0], c[1], ..., c[n_total], 0]]
116
+
117
+ for a Phase 2 problem, or the form:
118
+
119
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
120
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
121
+ .
122
+ .
123
+ .
124
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
125
+ [c[0], c[1], ..., c[n_total], 0],
126
+ [c'[0], c'[1], ..., c'[n_total], 0]]
127
+
128
+ for a Phase 1 problem (a Problem in which a basic feasible solution is
129
+ sought prior to maximizing the actual objective. ``T`` is modified in
130
+ place by ``_solve_simplex``.
131
+ basis : array
132
+ A list of the current basic variables.
133
+ pivcol : int
134
+ The index of the pivot column.
135
+ phase : int
136
+ The phase of the simplex algorithm (1 or 2).
137
+ tol : float
138
+ Elements in the pivot column smaller than tol will not be considered
139
+ for pivoting. Nominally this value is zero, but numerical issues
140
+ cause a tolerance about zero to be necessary.
141
+ bland : bool
142
+ If True, use Bland's rule for selection of the row (if more than one
143
+ row can be used, choose the one with the lowest variable index).
144
+
145
+ Returns
146
+ -------
147
+ status: bool
148
+ True if a suitable pivot row was found, otherwise False. A return
149
+ of False indicates that the linear programming problem is unbounded.
150
+ row: int
151
+ The index of the row of the pivot element. If status is False, row
152
+ will be returned as nan.
153
+ """
154
+ if phase == 1:
155
+ k = 2
156
+ else:
157
+ k = 1
158
+ ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
159
+ if ma.count() == 0:
160
+ return False, np.nan
161
+ mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
162
+ q = mb / ma
163
+ min_rows = np.ma.nonzero(q == q.min())[0]
164
+ if bland:
165
+ return True, min_rows[np.argmin(np.take(basis, min_rows))]
166
+ return True, min_rows[0]
167
+
168
+
169
+ def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
170
+ """
171
+ Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
172
+ The entering variable corresponds to the column given by pivcol forcing
173
+ the variable basis[pivrow] to leave the basis.
174
+
175
+ Parameters
176
+ ----------
177
+ T : 2-D array
178
+ A 2-D array representing the simplex tableau, T, corresponding to the
179
+ linear programming problem. It should have the form:
180
+
181
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
182
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
183
+ .
184
+ .
185
+ .
186
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
187
+ [c[0], c[1], ..., c[n_total], 0]]
188
+
189
+ for a Phase 2 problem, or the form:
190
+
191
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
192
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
193
+ .
194
+ .
195
+ .
196
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
197
+ [c[0], c[1], ..., c[n_total], 0],
198
+ [c'[0], c'[1], ..., c'[n_total], 0]]
199
+
200
+ for a Phase 1 problem (a problem in which a basic feasible solution is
201
+ sought prior to maximizing the actual objective. ``T`` is modified in
202
+ place by ``_solve_simplex``.
203
+ basis : 1-D array
204
+ An array of the indices of the basic variables, such that basis[i]
205
+ contains the column corresponding to the basic variable for row i.
206
+ Basis is modified in place by _apply_pivot.
207
+ pivrow : int
208
+ Row index of the pivot.
209
+ pivcol : int
210
+ Column index of the pivot.
211
+ """
212
+ basis[pivrow] = pivcol
213
+ pivval = T[pivrow, pivcol]
214
+ T[pivrow] = T[pivrow] / pivval
215
+ for irow in range(T.shape[0]):
216
+ if irow != pivrow:
217
+ T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
218
+
219
+ # The selected pivot should never lead to a pivot value less than the tol.
220
+ if np.isclose(pivval, tol, atol=0, rtol=1e4):
221
+ message = (
222
+ f"The pivot operation produces a pivot value of:{pivval: .1e}, "
223
+ "which is only slightly greater than the specified "
224
+ f"tolerance{tol: .1e}. This may lead to issues regarding the "
225
+ "numerical stability of the simplex method. "
226
+ "Removing redundant constraints, changing the pivot strategy "
227
+ "via Bland's rule or increasing the tolerance may "
228
+ "help reduce the issue.")
229
+ warn(message, OptimizeWarning, stacklevel=5)
230
+
231
+
232
+ def _solve_simplex(T, n, basis, callback, postsolve_args,
233
+ maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
234
+ ):
235
+ """
236
+ Solve a linear programming problem in "standard form" using the Simplex
237
+ Method. Linear Programming is intended to solve the following problem form:
238
+
239
+ Minimize::
240
+
241
+ c @ x
242
+
243
+ Subject to::
244
+
245
+ A @ x == b
246
+ x >= 0
247
+
248
+ Parameters
249
+ ----------
250
+ T : 2-D array
251
+ A 2-D array representing the simplex tableau, T, corresponding to the
252
+ linear programming problem. It should have the form:
253
+
254
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
255
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
256
+ .
257
+ .
258
+ .
259
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
260
+ [c[0], c[1], ..., c[n_total], 0]]
261
+
262
+ for a Phase 2 problem, or the form:
263
+
264
+ [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
265
+ [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
266
+ .
267
+ .
268
+ .
269
+ [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
270
+ [c[0], c[1], ..., c[n_total], 0],
271
+ [c'[0], c'[1], ..., c'[n_total], 0]]
272
+
273
+ for a Phase 1 problem (a problem in which a basic feasible solution is
274
+ sought prior to maximizing the actual objective. ``T`` is modified in
275
+ place by ``_solve_simplex``.
276
+ n : int
277
+ The number of true variables in the problem.
278
+ basis : 1-D array
279
+ An array of the indices of the basic variables, such that basis[i]
280
+ contains the column corresponding to the basic variable for row i.
281
+ Basis is modified in place by _solve_simplex
282
+ callback : callable, optional
283
+ If a callback function is provided, it will be called within each
284
+ iteration of the algorithm. The callback must accept a
285
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
286
+
287
+ x : 1-D array
288
+ Current solution vector
289
+ fun : float
290
+ Current value of the objective function
291
+ success : bool
292
+ True only when a phase has completed successfully. This
293
+ will be False for most iterations.
294
+ slack : 1-D array
295
+ The values of the slack variables. Each slack variable
296
+ corresponds to an inequality constraint. If the slack is zero,
297
+ the corresponding constraint is active.
298
+ con : 1-D array
299
+ The (nominally zero) residuals of the equality constraints,
300
+ that is, ``b - A_eq @ x``
301
+ phase : int
302
+ The phase of the optimization being executed. In phase 1 a basic
303
+ feasible solution is sought and the T has an additional row
304
+ representing an alternate objective function.
305
+ status : int
306
+ An integer representing the exit status of the optimization::
307
+
308
+ 0 : Optimization terminated successfully
309
+ 1 : Iteration limit reached
310
+ 2 : Problem appears to be infeasible
311
+ 3 : Problem appears to be unbounded
312
+ 4 : Serious numerical difficulties encountered
313
+
314
+ nit : int
315
+ The number of iterations performed.
316
+ message : str
317
+ A string descriptor of the exit status of the optimization.
318
+ postsolve_args : tuple
319
+ Data needed by _postsolve to convert the solution to the standard-form
320
+ problem into the solution to the original problem.
321
+ maxiter : int
322
+ The maximum number of iterations to perform before aborting the
323
+ optimization.
324
+ tol : float
325
+ The tolerance which determines when a solution is "close enough" to
326
+ zero in Phase 1 to be considered a basic feasible solution or close
327
+ enough to positive to serve as an optimal solution.
328
+ phase : int
329
+ The phase of the optimization being executed. In phase 1 a basic
330
+ feasible solution is sought and the T has an additional row
331
+ representing an alternate objective function.
332
+ bland : bool
333
+ If True, choose pivots using Bland's rule [3]_. In problems which
334
+ fail to converge due to cycling, using Bland's rule can provide
335
+ convergence at the expense of a less optimal path about the simplex.
336
+ nit0 : int
337
+ The initial iteration number used to keep an accurate iteration total
338
+ in a two-phase problem.
339
+
340
+ Returns
341
+ -------
342
+ nit : int
343
+ The number of iterations. Used to keep an accurate iteration total
344
+ in the two-phase problem.
345
+ status : int
346
+ An integer representing the exit status of the optimization::
347
+
348
+ 0 : Optimization terminated successfully
349
+ 1 : Iteration limit reached
350
+ 2 : Problem appears to be infeasible
351
+ 3 : Problem appears to be unbounded
352
+ 4 : Serious numerical difficulties encountered
353
+
354
+ """
355
+ nit = nit0
356
+ status = 0
357
+ message = ''
358
+ complete = False
359
+
360
+ if phase == 1:
361
+ m = T.shape[1]-2
362
+ elif phase == 2:
363
+ m = T.shape[1]-1
364
+ else:
365
+ raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
366
+
367
+ if phase == 2:
368
+ # Check if any artificial variables are still in the basis.
369
+ # If yes, check if any coefficients from this row and a column
370
+ # corresponding to one of the non-artificial variable is non-zero.
371
+ # If found, pivot at this term. If not, start phase 2.
372
+ # Do this for all artificial variables in the basis.
373
+ # Ref: "An Introduction to Linear Programming and Game Theory"
374
+ # by Paul R. Thie, Gerard E. Keough, 3rd Ed,
375
+ # Chapter 3.7 Redundant Systems (pag 102)
376
+ for pivrow in [row for row in range(basis.size)
377
+ if basis[row] > T.shape[1] - 2]:
378
+ non_zero_row = [col for col in range(T.shape[1] - 1)
379
+ if abs(T[pivrow, col]) > tol]
380
+ if len(non_zero_row) > 0:
381
+ pivcol = non_zero_row[0]
382
+ _apply_pivot(T, basis, pivrow, pivcol, tol)
383
+ nit += 1
384
+
385
+ if len(basis[:m]) == 0:
386
+ solution = np.empty(T.shape[1] - 1, dtype=np.float64)
387
+ else:
388
+ solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
389
+ dtype=np.float64)
390
+
391
+ while not complete:
392
+ # Find the pivot column
393
+ pivcol_found, pivcol = _pivot_col(T, tol, bland)
394
+ if not pivcol_found:
395
+ pivcol = np.nan
396
+ pivrow = np.nan
397
+ status = 0
398
+ complete = True
399
+ else:
400
+ # Find the pivot row
401
+ pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
402
+ if not pivrow_found:
403
+ status = 3
404
+ complete = True
405
+
406
+ if callback is not None:
407
+ solution[:] = 0
408
+ solution[basis[:n]] = T[:n, -1]
409
+ x = solution[:m]
410
+ x, fun, slack, con = _postsolve(
411
+ x, postsolve_args
412
+ )
413
+ res = OptimizeResult({
414
+ 'x': x,
415
+ 'fun': fun,
416
+ 'slack': slack,
417
+ 'con': con,
418
+ 'status': status,
419
+ 'message': message,
420
+ 'nit': nit,
421
+ 'success': status == 0 and complete,
422
+ 'phase': phase,
423
+ 'complete': complete,
424
+ })
425
+ callback(res)
426
+
427
+ if not complete:
428
+ if nit >= maxiter:
429
+ # Iteration limit exceeded
430
+ status = 1
431
+ complete = True
432
+ else:
433
+ _apply_pivot(T, basis, pivrow, pivcol, tol)
434
+ nit += 1
435
+ return nit, status
436
+
437
+
438
+ def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
439
+ maxiter=1000, tol=1e-9, disp=False, bland=False,
440
+ **unknown_options):
441
+ """
442
+ Minimize a linear objective function subject to linear equality and
443
+ non-negativity constraints using the two phase simplex method.
444
+ Linear programming is intended to solve problems of the following form:
445
+
446
+ Minimize::
447
+
448
+ c @ x
449
+
450
+ Subject to::
451
+
452
+ A @ x == b
453
+ x >= 0
454
+
455
+ User-facing documentation is in _linprog_doc.py.
456
+
457
+ Parameters
458
+ ----------
459
+ c : 1-D array
460
+ Coefficients of the linear objective function to be minimized.
461
+ c0 : float
462
+ Constant term in objective function due to fixed (and eliminated)
463
+ variables. (Purely for display.)
464
+ A : 2-D array
465
+ 2-D array such that ``A @ x``, gives the values of the equality
466
+ constraints at ``x``.
467
+ b : 1-D array
468
+ 1-D array of values representing the right hand side of each equality
469
+ constraint (row) in ``A``.
470
+ callback : callable, optional
471
+ If a callback function is provided, it will be called within each
472
+ iteration of the algorithm. The callback function must accept a single
473
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
474
+
475
+ x : 1-D array
476
+ Current solution vector
477
+ fun : float
478
+ Current value of the objective function
479
+ success : bool
480
+ True when an algorithm has completed successfully.
481
+ slack : 1-D array
482
+ The values of the slack variables. Each slack variable
483
+ corresponds to an inequality constraint. If the slack is zero,
484
+ the corresponding constraint is active.
485
+ con : 1-D array
486
+ The (nominally zero) residuals of the equality constraints,
487
+ that is, ``b - A_eq @ x``
488
+ phase : int
489
+ The phase of the algorithm being executed.
490
+ status : int
491
+ An integer representing the status of the optimization::
492
+
493
+ 0 : Algorithm proceeding nominally
494
+ 1 : Iteration limit reached
495
+ 2 : Problem appears to be infeasible
496
+ 3 : Problem appears to be unbounded
497
+ 4 : Serious numerical difficulties encountered
498
+ nit : int
499
+ The number of iterations performed.
500
+ message : str
501
+ A string descriptor of the exit status of the optimization.
502
+ postsolve_args : tuple
503
+ Data needed by _postsolve to convert the solution to the standard-form
504
+ problem into the solution to the original problem.
505
+
506
+ Options
507
+ -------
508
+ maxiter : int
509
+ The maximum number of iterations to perform.
510
+ disp : bool
511
+ If True, print exit status message to sys.stdout
512
+ tol : float
513
+ The tolerance which determines when a solution is "close enough" to
514
+ zero in Phase 1 to be considered a basic feasible solution or close
515
+ enough to positive to serve as an optimal solution.
516
+ bland : bool
517
+ If True, use Bland's anti-cycling rule [3]_ to choose pivots to
518
+ prevent cycling. If False, choose pivots which should lead to a
519
+ converged solution more quickly. The latter method is subject to
520
+ cycling (non-convergence) in rare instances.
521
+ unknown_options : dict
522
+ Optional arguments not used by this particular solver. If
523
+ `unknown_options` is non-empty a warning is issued listing all
524
+ unused options.
525
+
526
+ Returns
527
+ -------
528
+ x : 1-D array
529
+ Solution vector.
530
+ status : int
531
+ An integer representing the exit status of the optimization::
532
+
533
+ 0 : Optimization terminated successfully
534
+ 1 : Iteration limit reached
535
+ 2 : Problem appears to be infeasible
536
+ 3 : Problem appears to be unbounded
537
+ 4 : Serious numerical difficulties encountered
538
+
539
+ message : str
540
+ A string descriptor of the exit status of the optimization.
541
+ iteration : int
542
+ The number of iterations taken to solve the problem.
543
+
544
+ References
545
+ ----------
546
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
547
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
548
+ 1963
549
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
550
+ Mathematical Programming", McGraw-Hill, Chapter 4.
551
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
552
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
553
+
554
+
555
+ Notes
556
+ -----
557
+ The expected problem formulation differs between the top level ``linprog``
558
+ module and the method specific solvers. The method specific solvers expect a
559
+ problem in standard form:
560
+
561
+ Minimize::
562
+
563
+ c @ x
564
+
565
+ Subject to::
566
+
567
+ A @ x == b
568
+ x >= 0
569
+
570
+ Whereas the top level ``linprog`` module expects a problem of form:
571
+
572
+ Minimize::
573
+
574
+ c @ x
575
+
576
+ Subject to::
577
+
578
+ A_ub @ x <= b_ub
579
+ A_eq @ x == b_eq
580
+ lb <= x <= ub
581
+
582
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
583
+
584
+ The original problem contains equality, upper-bound and variable constraints
585
+ whereas the method specific solver requires equality constraints and
586
+ variable non-negativity.
587
+
588
+ ``linprog`` module converts the original problem to standard form by
589
+ converting the simple bounds to upper bound constraints, introducing
590
+ non-negative slack variables for inequality constraints, and expressing
591
+ unbounded variables as the difference between two non-negative variables.
592
+ """
593
+ _check_unknown_options(unknown_options)
594
+
595
+ status = 0
596
+ messages = {0: "Optimization terminated successfully.",
597
+ 1: "Iteration limit reached.",
598
+ 2: "Optimization failed. Unable to find a feasible"
599
+ " starting point.",
600
+ 3: "Optimization failed. The problem appears to be unbounded.",
601
+ 4: "Optimization failed. Singular matrix encountered."}
602
+
603
+ n, m = A.shape
604
+
605
+ # All constraints must have b >= 0.
606
+ is_negative_constraint = np.less(b, 0)
607
+ A[is_negative_constraint] *= -1
608
+ b[is_negative_constraint] *= -1
609
+
610
+ # As all constraints are equality constraints the artificial variables
611
+ # will also be basic variables.
612
+ av = np.arange(n) + m
613
+ basis = av.copy()
614
+
615
+ # Format the phase one tableau by adding artificial variables and stacking
616
+ # the constraints, the objective row and pseudo-objective row.
617
+ row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
618
+ row_objective = np.hstack((c, np.zeros(n), c0))
619
+ row_pseudo_objective = -row_constraints.sum(axis=0)
620
+ row_pseudo_objective[av] = 0
621
+ T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
622
+
623
+ nit1, status = _solve_simplex(T, n, basis, callback=callback,
624
+ postsolve_args=postsolve_args,
625
+ maxiter=maxiter, tol=tol, phase=1,
626
+ bland=bland
627
+ )
628
+ # if pseudo objective is zero, remove the last row from the tableau and
629
+ # proceed to phase 2
630
+ nit2 = nit1
631
+ if abs(T[-1, -1]) < tol:
632
+ # Remove the pseudo-objective row from the tableau
633
+ T = T[:-1, :]
634
+ # Remove the artificial variable columns from the tableau
635
+ T = np.delete(T, av, 1)
636
+ else:
637
+ # Failure to find a feasible starting point
638
+ status = 2
639
+ messages[status] = (
640
+ "Phase 1 of the simplex method failed to find a feasible "
641
+ "solution. The pseudo-objective function evaluates to {0:.1e} "
642
+ "which exceeds the required tolerance of {1} for a solution to be "
643
+ "considered 'close enough' to zero to be a basic solution. "
644
+ "Consider increasing the tolerance to be greater than {0:.1e}. "
645
+ "If this tolerance is unacceptably large the problem may be "
646
+ "infeasible.".format(abs(T[-1, -1]), tol)
647
+ )
648
+
649
+ if status == 0:
650
+ # Phase 2
651
+ nit2, status = _solve_simplex(T, n, basis, callback=callback,
652
+ postsolve_args=postsolve_args,
653
+ maxiter=maxiter, tol=tol, phase=2,
654
+ bland=bland, nit0=nit1
655
+ )
656
+
657
+ solution = np.zeros(n + m)
658
+ solution[basis[:n]] = T[:n, -1]
659
+ x = solution[:m]
660
+
661
+ return x, status, messages[status], int(nit2)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Method agnostic utility functions for linear programming
3
+ """
4
+
5
+ import numpy as np
6
+ import scipy.sparse as sps
7
+ from warnings import warn
8
+ from ._optimize import OptimizeWarning
9
+ from scipy.optimize._remove_redundancy import (
10
+ _remove_redundancy_svd, _remove_redundancy_pivot_sparse,
11
+ _remove_redundancy_pivot_dense, _remove_redundancy_id
12
+ )
13
+ from collections import namedtuple
14
+
15
+ _LPProblem = namedtuple('_LPProblem',
16
+ 'c A_ub b_ub A_eq b_eq bounds x0 integrality')
17
+ _LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg
18
+ _LPProblem.__doc__ = \
19
+ """ Represents a linear-programming problem.
20
+
21
+ Attributes
22
+ ----------
23
+ c : 1D array
24
+ The coefficients of the linear objective function to be minimized.
25
+ A_ub : 2D array, optional
26
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
27
+ coefficients of a linear inequality constraint on ``x``.
28
+ b_ub : 1D array, optional
29
+ The inequality constraint vector. Each element represents an
30
+ upper bound on the corresponding value of ``A_ub @ x``.
31
+ A_eq : 2D array, optional
32
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
33
+ coefficients of a linear equality constraint on ``x``.
34
+ b_eq : 1D array, optional
35
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
36
+ the corresponding element of ``b_eq``.
37
+ bounds : various valid formats, optional
38
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
39
+ If bounds are specified for all N variables separately, valid formats
40
+ are:
41
+ * a 2D array (N x 2);
42
+ * a sequence of N sequences, each with 2 values.
43
+ If all variables have the same bounds, the bounds can be specified as
44
+ a 1-D or 2-D array or sequence with 2 scalar values.
45
+ If all variables have a lower bound of 0 and no upper bound, the bounds
46
+ parameter can be omitted (or given as None).
47
+ Absent lower and/or upper bounds can be specified as -numpy.inf (no
48
+ lower bound), numpy.inf (no upper bound) or None (both).
49
+ x0 : 1D array, optional
50
+ Guess values of the decision variables, which will be refined by
51
+ the optimization algorithm. This argument is currently used only by the
52
+ 'revised simplex' method, and can only be used if `x0` represents a
53
+ basic feasible solution.
54
+ integrality : 1-D array or int, optional
55
+ Indicates the type of integrality constraint on each decision variable.
56
+
57
+ ``0`` : Continuous variable; no integrality constraint.
58
+
59
+ ``1`` : Integer variable; decision variable must be an integer
60
+ within `bounds`.
61
+
62
+ ``2`` : Semi-continuous variable; decision variable must be within
63
+ `bounds` or take value ``0``.
64
+
65
+ ``3`` : Semi-integer variable; decision variable must be an integer
66
+ within `bounds` or take value ``0``.
67
+
68
+ By default, all variables are continuous.
69
+
70
+ For mixed integrality constraints, supply an array of shape `c.shape`.
71
+ To infer a constraint on each decision variable from shorter inputs,
72
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
73
+
74
+ This argument is currently used only by the ``'highs'`` method and
75
+ ignored otherwise.
76
+
77
+ Notes
78
+ -----
79
+ This namedtuple supports 2 ways of initialization:
80
+ >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
81
+ >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
82
+
83
+ Note that only ``c`` is a required argument here, whereas all other arguments
84
+ ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
85
+ default values of None.
86
+ For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
87
+ >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
88
+ """
89
+
90
+
91
+ def _check_sparse_inputs(options, meth, A_ub, A_eq):
92
+ """
93
+ Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
94
+ optional sparsity variables.
95
+
96
+ Parameters
97
+ ----------
98
+ A_ub : 2-D array, optional
99
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
100
+ inequality constraints at ``x``.
101
+ A_eq : 2-D array, optional
102
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
103
+ constraints at ``x``.
104
+ options : dict
105
+ A dictionary of solver options. All methods accept the following
106
+ generic options:
107
+
108
+ maxiter : int
109
+ Maximum number of iterations to perform.
110
+ disp : bool
111
+ Set to True to print convergence messages.
112
+
113
+ For method-specific options, see :func:`show_options('linprog')`.
114
+ method : str, optional
115
+ The algorithm used to solve the standard form problem.
116
+
117
+ Returns
118
+ -------
119
+ A_ub : 2-D array, optional
120
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
121
+ inequality constraints at ``x``.
122
+ A_eq : 2-D array, optional
123
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
124
+ constraints at ``x``.
125
+ options : dict
126
+ A dictionary of solver options. All methods accept the following
127
+ generic options:
128
+
129
+ maxiter : int
130
+ Maximum number of iterations to perform.
131
+ disp : bool
132
+ Set to True to print convergence messages.
133
+
134
+ For method-specific options, see :func:`show_options('linprog')`.
135
+ """
136
+ # This is an undocumented option for unit testing sparse presolve
137
+ _sparse_presolve = options.pop('_sparse_presolve', False)
138
+ if _sparse_presolve and A_eq is not None:
139
+ A_eq = sps.coo_matrix(A_eq)
140
+ if _sparse_presolve and A_ub is not None:
141
+ A_ub = sps.coo_matrix(A_ub)
142
+
143
+ sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
144
+
145
+ preferred_methods = {"highs", "highs-ds", "highs-ipm"}
146
+ dense_methods = {"simplex", "revised simplex"}
147
+ if meth in dense_methods and sparse_constraint:
148
+ raise ValueError(f"Method '{meth}' does not support sparse "
149
+ "constraint matrices. Please consider using one of "
150
+ f"{preferred_methods}.")
151
+
152
+ sparse = options.get('sparse', False)
153
+ if not sparse and sparse_constraint and meth == 'interior-point':
154
+ options['sparse'] = True
155
+ warn("Sparse constraint matrix detected; setting 'sparse':True.",
156
+ OptimizeWarning, stacklevel=4)
157
+ return options, A_ub, A_eq
158
+
159
+
160
+ def _format_A_constraints(A, n_x, sparse_lhs=False):
161
+ """Format the left hand side of the constraints to a 2-D array
162
+
163
+ Parameters
164
+ ----------
165
+ A : 2-D array
166
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
167
+ (in)equality constraints at ``x``.
168
+ n_x : int
169
+ The number of variables in the linear programming problem.
170
+ sparse_lhs : bool
171
+ Whether either of `A_ub` or `A_eq` are sparse. If true return a
172
+ coo_matrix instead of a numpy array.
173
+
174
+ Returns
175
+ -------
176
+ np.ndarray or sparse.coo_matrix
177
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
178
+ (in)equality constraints at ``x``.
179
+
180
+ """
181
+ if sparse_lhs:
182
+ return sps.coo_matrix(
183
+ (0, n_x) if A is None else A, dtype=float, copy=True
184
+ )
185
+ elif A is None:
186
+ return np.zeros((0, n_x), dtype=float)
187
+ else:
188
+ return np.array(A, dtype=float, copy=True)
189
+
190
+
191
+ def _format_b_constraints(b):
192
+ """Format the upper bounds of the constraints to a 1-D array
193
+
194
+ Parameters
195
+ ----------
196
+ b : 1-D array
197
+ 1-D array of values representing the upper-bound of each (in)equality
198
+ constraint (row) in ``A``.
199
+
200
+ Returns
201
+ -------
202
+ 1-D np.array
203
+ 1-D array of values representing the upper-bound of each (in)equality
204
+ constraint (row) in ``A``.
205
+
206
+ """
207
+ if b is None:
208
+ return np.array([], dtype=float)
209
+ b = np.array(b, dtype=float, copy=True).squeeze()
210
+ return b if b.size != 1 else b.reshape(-1)
211
+
212
+
213
+ def _clean_inputs(lp):
214
+ """
215
+ Given user inputs for a linear programming problem, return the
216
+ objective vector, upper bound constraints, equality constraints,
217
+ and simple bounds in a preferred format.
218
+
219
+ Parameters
220
+ ----------
221
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
222
+
223
+ c : 1D array
224
+ The coefficients of the linear objective function to be minimized.
225
+ A_ub : 2D array, optional
226
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
227
+ coefficients of a linear inequality constraint on ``x``.
228
+ b_ub : 1D array, optional
229
+ The inequality constraint vector. Each element represents an
230
+ upper bound on the corresponding value of ``A_ub @ x``.
231
+ A_eq : 2D array, optional
232
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
233
+ coefficients of a linear equality constraint on ``x``.
234
+ b_eq : 1D array, optional
235
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
236
+ the corresponding element of ``b_eq``.
237
+ bounds : various valid formats, optional
238
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
239
+ If bounds are specified for all N variables separately, valid formats are:
240
+ * a 2D array (2 x N or N x 2);
241
+ * a sequence of N sequences, each with 2 values.
242
+ If all variables have the same bounds, a single pair of values can
243
+ be specified. Valid formats are:
244
+ * a sequence with 2 scalar values;
245
+ * a sequence with a single element containing 2 scalar values.
246
+ If all variables have a lower bound of 0 and no upper bound, the bounds
247
+ parameter can be omitted (or given as None).
248
+ x0 : 1D array, optional
249
+ Guess values of the decision variables, which will be refined by
250
+ the optimization algorithm. This argument is currently used only by the
251
+ 'revised simplex' method, and can only be used if `x0` represents a
252
+ basic feasible solution.
253
+
254
+ Returns
255
+ -------
256
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
257
+
258
+ c : 1D array
259
+ The coefficients of the linear objective function to be minimized.
260
+ A_ub : 2D array, optional
261
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
262
+ coefficients of a linear inequality constraint on ``x``.
263
+ b_ub : 1D array, optional
264
+ The inequality constraint vector. Each element represents an
265
+ upper bound on the corresponding value of ``A_ub @ x``.
266
+ A_eq : 2D array, optional
267
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
268
+ coefficients of a linear equality constraint on ``x``.
269
+ b_eq : 1D array, optional
270
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
271
+ the corresponding element of ``b_eq``.
272
+ bounds : 2D array
273
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
274
+ elements of ``x``. The N x 2 array contains lower bounds in the first
275
+ column and upper bounds in the 2nd. Unbounded variables have lower
276
+ bound -np.inf and/or upper bound np.inf.
277
+ x0 : 1D array, optional
278
+ Guess values of the decision variables, which will be refined by
279
+ the optimization algorithm. This argument is currently used only by the
280
+ 'revised simplex' method, and can only be used if `x0` represents a
281
+ basic feasible solution.
282
+
283
+ """
284
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
285
+
286
+ if c is None:
287
+ raise TypeError
288
+
289
+ try:
290
+ c = np.array(c, dtype=np.float64, copy=True).squeeze()
291
+ except ValueError as e:
292
+ raise TypeError(
293
+ "Invalid input for linprog: c must be a 1-D array of numerical "
294
+ "coefficients") from e
295
+ else:
296
+ # If c is a single value, convert it to a 1-D array.
297
+ if c.size == 1:
298
+ c = c.reshape(-1)
299
+
300
+ n_x = len(c)
301
+ if n_x == 0 or len(c.shape) != 1:
302
+ raise ValueError(
303
+ "Invalid input for linprog: c must be a 1-D array and must "
304
+ "not have more than one non-singleton dimension")
305
+ if not np.isfinite(c).all():
306
+ raise ValueError(
307
+ "Invalid input for linprog: c must not contain values "
308
+ "inf, nan, or None")
309
+
310
+ sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
311
+ try:
312
+ A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
313
+ except ValueError as e:
314
+ raise TypeError(
315
+ "Invalid input for linprog: A_ub must be a 2-D array "
316
+ "of numerical values") from e
317
+ else:
318
+ n_ub = A_ub.shape[0]
319
+ if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
320
+ raise ValueError(
321
+ "Invalid input for linprog: A_ub must have exactly two "
322
+ "dimensions, and the number of columns in A_ub must be "
323
+ "equal to the size of c")
324
+ if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
325
+ or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
326
+ raise ValueError(
327
+ "Invalid input for linprog: A_ub must not contain values "
328
+ "inf, nan, or None")
329
+
330
+ try:
331
+ b_ub = _format_b_constraints(b_ub)
332
+ except ValueError as e:
333
+ raise TypeError(
334
+ "Invalid input for linprog: b_ub must be a 1-D array of "
335
+ "numerical values, each representing the upper bound of an "
336
+ "inequality constraint (row) in A_ub") from e
337
+ else:
338
+ if b_ub.shape != (n_ub,):
339
+ raise ValueError(
340
+ "Invalid input for linprog: b_ub must be a 1-D array; b_ub "
341
+ "must not have more than one non-singleton dimension and "
342
+ "the number of rows in A_ub must equal the number of values "
343
+ "in b_ub")
344
+ if not np.isfinite(b_ub).all():
345
+ raise ValueError(
346
+ "Invalid input for linprog: b_ub must not contain values "
347
+ "inf, nan, or None")
348
+
349
+ try:
350
+ A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
351
+ except ValueError as e:
352
+ raise TypeError(
353
+ "Invalid input for linprog: A_eq must be a 2-D array "
354
+ "of numerical values") from e
355
+ else:
356
+ n_eq = A_eq.shape[0]
357
+ if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
358
+ raise ValueError(
359
+ "Invalid input for linprog: A_eq must have exactly two "
360
+ "dimensions, and the number of columns in A_eq must be "
361
+ "equal to the size of c")
362
+
363
+ if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
364
+ or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
365
+ raise ValueError(
366
+ "Invalid input for linprog: A_eq must not contain values "
367
+ "inf, nan, or None")
368
+
369
+ try:
370
+ b_eq = _format_b_constraints(b_eq)
371
+ except ValueError as e:
372
+ raise TypeError(
373
+ "Invalid input for linprog: b_eq must be a dense, 1-D array of "
374
+ "numerical values, each representing the right hand side of an "
375
+ "equality constraint (row) in A_eq") from e
376
+ else:
377
+ if b_eq.shape != (n_eq,):
378
+ raise ValueError(
379
+ "Invalid input for linprog: b_eq must be a 1-D array; b_eq "
380
+ "must not have more than one non-singleton dimension and "
381
+ "the number of rows in A_eq must equal the number of values "
382
+ "in b_eq")
383
+ if not np.isfinite(b_eq).all():
384
+ raise ValueError(
385
+ "Invalid input for linprog: b_eq must not contain values "
386
+ "inf, nan, or None")
387
+
388
+ # x0 gives a (optional) starting solution to the solver. If x0 is None,
389
+ # skip the checks. Initial solution will be generated automatically.
390
+ if x0 is not None:
391
+ try:
392
+ x0 = np.array(x0, dtype=float, copy=True).squeeze()
393
+ except ValueError as e:
394
+ raise TypeError(
395
+ "Invalid input for linprog: x0 must be a 1-D array of "
396
+ "numerical coefficients") from e
397
+ if x0.ndim == 0:
398
+ x0 = x0.reshape(-1)
399
+ if len(x0) == 0 or x0.ndim != 1:
400
+ raise ValueError(
401
+ "Invalid input for linprog: x0 should be a 1-D array; it "
402
+ "must not have more than one non-singleton dimension")
403
+ if not x0.size == c.size:
404
+ raise ValueError(
405
+ "Invalid input for linprog: x0 and c should contain the "
406
+ "same number of elements")
407
+ if not np.isfinite(x0).all():
408
+ raise ValueError(
409
+ "Invalid input for linprog: x0 must not contain values "
410
+ "inf, nan, or None")
411
+
412
+ # Bounds can be one of these formats:
413
+ # (1) a 2-D array or sequence, with shape N x 2
414
+ # (2) a 1-D or 2-D sequence or array with 2 scalars
415
+ # (3) None (or an empty sequence or array)
416
+ # Unspecified bounds can be represented by None or (-)np.inf.
417
+ # All formats are converted into a N x 2 np.array with (-)np.inf where
418
+ # bounds are unspecified.
419
+
420
+ # Prepare clean bounds array
421
+ bounds_clean = np.zeros((n_x, 2), dtype=float)
422
+
423
+ # Convert to a numpy array.
424
+ # np.array(..,dtype=float) raises an error if dimensions are inconsistent
425
+ # or if there are invalid data types in bounds. Just add a linprog prefix
426
+ # to the error and re-raise.
427
+ # Creating at least a 2-D array simplifies the cases to distinguish below.
428
+ if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
429
+ bounds = (0, np.inf)
430
+ try:
431
+ bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
432
+ except ValueError as e:
433
+ raise ValueError(
434
+ "Invalid input for linprog: unable to interpret bounds, "
435
+ "check values and dimensions: " + e.args[0]) from e
436
+ except TypeError as e:
437
+ raise TypeError(
438
+ "Invalid input for linprog: unable to interpret bounds, "
439
+ "check values and dimensions: " + e.args[0]) from e
440
+
441
+ # Check bounds options
442
+ bsh = bounds_conv.shape
443
+ if len(bsh) > 2:
444
+ # Do not try to handle multidimensional bounds input
445
+ raise ValueError(
446
+ "Invalid input for linprog: provide a 2-D array for bounds, "
447
+ f"not a {len(bsh):d}-D array.")
448
+ elif np.all(bsh == (n_x, 2)):
449
+ # Regular N x 2 array
450
+ bounds_clean = bounds_conv
451
+ elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
452
+ # 2 values: interpret as overall lower and upper bound
453
+ bounds_flat = bounds_conv.flatten()
454
+ bounds_clean[:, 0] = bounds_flat[0]
455
+ bounds_clean[:, 1] = bounds_flat[1]
456
+ elif np.all(bsh == (2, n_x)):
457
+ # Reject a 2 x N array
458
+ raise ValueError(
459
+ f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, "
460
+ f"not a 2 x {n_x:d} array.")
461
+ else:
462
+ raise ValueError(
463
+ "Invalid input for linprog: unable to interpret bounds with this "
464
+ f"dimension tuple: {bsh}.")
465
+
466
+ # The process above creates nan-s where the input specified None
467
+ # Convert the nan-s in the 1st column to -np.inf and in the 2nd column
468
+ # to np.inf
469
+ i_none = np.isnan(bounds_clean[:, 0])
470
+ bounds_clean[i_none, 0] = -np.inf
471
+ i_none = np.isnan(bounds_clean[:, 1])
472
+ bounds_clean[i_none, 1] = np.inf
473
+
474
+ return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
475
+
476
+
477
+ def _presolve(lp, rr, rr_method, tol=1e-9):
478
+ """
479
+ Given inputs for a linear programming problem in preferred format,
480
+ presolve the problem: identify trivial infeasibilities, redundancies,
481
+ and unboundedness, tighten bounds where possible, and eliminate fixed
482
+ variables.
483
+
484
+ Parameters
485
+ ----------
486
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
487
+
488
+ c : 1D array
489
+ The coefficients of the linear objective function to be minimized.
490
+ A_ub : 2D array, optional
491
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
492
+ coefficients of a linear inequality constraint on ``x``.
493
+ b_ub : 1D array, optional
494
+ The inequality constraint vector. Each element represents an
495
+ upper bound on the corresponding value of ``A_ub @ x``.
496
+ A_eq : 2D array, optional
497
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
498
+ coefficients of a linear equality constraint on ``x``.
499
+ b_eq : 1D array, optional
500
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
501
+ the corresponding element of ``b_eq``.
502
+ bounds : 2D array
503
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
504
+ elements of ``x``. The N x 2 array contains lower bounds in the first
505
+ column and upper bounds in the 2nd. Unbounded variables have lower
506
+ bound -np.inf and/or upper bound np.inf.
507
+ x0 : 1D array, optional
508
+ Guess values of the decision variables, which will be refined by
509
+ the optimization algorithm. This argument is currently used only by the
510
+ 'revised simplex' method, and can only be used if `x0` represents a
511
+ basic feasible solution.
512
+
513
+ rr : bool
514
+ If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
515
+ Set False if ``A_eq`` is known to be of full row rank, or if you are
516
+ looking for a potential speedup (at the expense of reliability).
517
+ rr_method : string
518
+ Method used to identify and remove redundant rows from the
519
+ equality constraint matrix after presolve.
520
+ tol : float
521
+ The tolerance which determines when a solution is "close enough" to
522
+ zero in Phase 1 to be considered a basic feasible solution or close
523
+ enough to positive to serve as an optimal solution.
524
+
525
+ Returns
526
+ -------
527
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
528
+
529
+ c : 1D array
530
+ The coefficients of the linear objective function to be minimized.
531
+ A_ub : 2D array, optional
532
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
533
+ coefficients of a linear inequality constraint on ``x``.
534
+ b_ub : 1D array, optional
535
+ The inequality constraint vector. Each element represents an
536
+ upper bound on the corresponding value of ``A_ub @ x``.
537
+ A_eq : 2D array, optional
538
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
539
+ coefficients of a linear equality constraint on ``x``.
540
+ b_eq : 1D array, optional
541
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
542
+ the corresponding element of ``b_eq``.
543
+ bounds : 2D array
544
+ The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
545
+ x0 : 1D array, optional
546
+ Guess values of the decision variables, which will be refined by
547
+ the optimization algorithm. This argument is currently used only by the
548
+ 'revised simplex' method, and can only be used if `x0` represents a
549
+ basic feasible solution.
550
+
551
+ c0 : 1D array
552
+ Constant term in objective function due to fixed (and eliminated)
553
+ variables.
554
+ x : 1D array
555
+ Solution vector (when the solution is trivial and can be determined
556
+ in presolve)
557
+ revstack: list of functions
558
+ the functions in the list reverse the operations of _presolve()
559
+ the function signature is x_org = f(x_mod), where x_mod is the result
560
+ of a presolve step and x_org the value at the start of the step
561
+ (currently, the revstack contains only one function)
562
+ complete: bool
563
+ Whether the solution is complete (solved or determined to be infeasible
564
+ or unbounded in presolve)
565
+ status : int
566
+ An integer representing the exit status of the optimization::
567
+
568
+ 0 : Optimization terminated successfully
569
+ 1 : Iteration limit reached
570
+ 2 : Problem appears to be infeasible
571
+ 3 : Problem appears to be unbounded
572
+ 4 : Serious numerical difficulties encountered
573
+
574
+ message : str
575
+ A string descriptor of the exit status of the optimization.
576
+
577
+ References
578
+ ----------
579
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
580
+ large-scale linear programming." Optimization Methods and Software
581
+ 6.3 (1995): 219-227.
582
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
583
+ programming." Mathematical Programming 71.2 (1995): 221-245.
584
+
585
+ """
586
+ # ideas from Reference [5] by Andersen and Andersen
587
+ # however, unlike the reference, this is performed before converting
588
+ # problem to standard form
589
+ # There are a few advantages:
590
+ # * artificial variables have not been added, so matrices are smaller
591
+ # * bounds have not been converted to constraints yet. (It is better to
592
+ # do that after presolve because presolve may adjust the simple bounds.)
593
+ # There are many improvements that can be made, namely:
594
+ # * implement remaining checks from [5]
595
+ # * loop presolve until no additional changes are made
596
+ # * implement additional efficiency improvements in redundancy removal [2]
597
+
598
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
599
+
600
+ revstack = [] # record of variables eliminated from problem
601
+ # constant term in cost function may be added if variables are eliminated
602
+ c0 = 0
603
+ complete = False # complete is True if detected infeasible/unbounded
604
+ x = np.zeros(c.shape) # this is solution vector if completed in presolve
605
+
606
+ status = 0 # all OK unless determined otherwise
607
+ message = ""
608
+
609
+ # Lower and upper bounds. Copy to prevent feedback.
610
+ lb = bounds[:, 0].copy()
611
+ ub = bounds[:, 1].copy()
612
+
613
+ m_eq, n = A_eq.shape
614
+ m_ub, n = A_ub.shape
615
+
616
+ if (rr_method is not None
617
+ and rr_method.lower() not in {"svd", "pivot", "id"}):
618
+ message = ("'" + str(rr_method) + "' is not a valid option "
619
+ "for redundancy removal. Valid options are 'SVD', "
620
+ "'pivot', and 'ID'.")
621
+ raise ValueError(message)
622
+
623
+ if sps.issparse(A_eq):
624
+ A_eq = A_eq.tocsr()
625
+ A_ub = A_ub.tocsr()
626
+
627
+ def where(A):
628
+ return A.nonzero()
629
+
630
+ vstack = sps.vstack
631
+ else:
632
+ where = np.where
633
+ vstack = np.vstack
634
+
635
+ # upper bounds > lower bounds
636
+ if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
637
+ status = 2
638
+ message = ("The problem is (trivially) infeasible since one "
639
+ "or more upper bounds are smaller than the corresponding "
640
+ "lower bounds, a lower bound is np.inf or an upper bound "
641
+ "is -np.inf.")
642
+ complete = True
643
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
644
+ c0, x, revstack, complete, status, message)
645
+
646
+ # zero row in equality constraints
647
+ zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
648
+ if np.any(zero_row):
649
+ if np.any(
650
+ np.logical_and(
651
+ zero_row,
652
+ np.abs(b_eq) > tol)): # test_zero_row_1
653
+ # infeasible if RHS is not zero
654
+ status = 2
655
+ message = ("The problem is (trivially) infeasible due to a row "
656
+ "of zeros in the equality constraint matrix with a "
657
+ "nonzero corresponding constraint value.")
658
+ complete = True
659
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
660
+ c0, x, revstack, complete, status, message)
661
+ else: # test_zero_row_2
662
+ # if RHS is zero, we can eliminate this equation entirely
663
+ A_eq = A_eq[np.logical_not(zero_row), :]
664
+ b_eq = b_eq[np.logical_not(zero_row)]
665
+
666
+ # zero row in inequality constraints
667
+ zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
668
+ if np.any(zero_row):
669
+ if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
670
+ # infeasible if RHS is less than zero (because LHS is zero)
671
+ status = 2
672
+ message = ("The problem is (trivially) infeasible due to a row "
673
+ "of zeros in the equality constraint matrix with a "
674
+ "nonzero corresponding constraint value.")
675
+ complete = True
676
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
677
+ c0, x, revstack, complete, status, message)
678
+ else: # test_zero_row_2
679
+ # if LHS is >= 0, we can eliminate this constraint entirely
680
+ A_ub = A_ub[np.logical_not(zero_row), :]
681
+ b_ub = b_ub[np.logical_not(zero_row)]
682
+
683
+ # zero column in (both) constraints
684
+ # this indicates that a variable isn't constrained and can be removed
685
+ A = vstack((A_eq, A_ub))
686
+ if A.shape[0] > 0:
687
+ zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
688
+ # variable will be at upper or lower bound, depending on objective
689
+ x[np.logical_and(zero_col, c < 0)] = ub[
690
+ np.logical_and(zero_col, c < 0)]
691
+ x[np.logical_and(zero_col, c > 0)] = lb[
692
+ np.logical_and(zero_col, c > 0)]
693
+ if np.any(np.isinf(x)): # if an unconstrained variable has no bound
694
+ status = 3
695
+ message = ("If feasible, the problem is (trivially) unbounded "
696
+ "due to a zero column in the constraint matrices. If "
697
+ "you wish to check whether the problem is infeasible, "
698
+ "turn presolve off.")
699
+ complete = True
700
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
701
+ c0, x, revstack, complete, status, message)
702
+ # variables will equal upper/lower bounds will be removed later
703
+ lb[np.logical_and(zero_col, c < 0)] = ub[
704
+ np.logical_and(zero_col, c < 0)]
705
+ ub[np.logical_and(zero_col, c > 0)] = lb[
706
+ np.logical_and(zero_col, c > 0)]
707
+
708
+ # row singleton in equality constraints
709
+ # this fixes a variable and removes the constraint
710
+ singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
711
+ rows = where(singleton_row)[0]
712
+ cols = where(A_eq[rows, :])[1]
713
+ if len(rows) > 0:
714
+ for row, col in zip(rows, cols):
715
+ val = b_eq[row] / A_eq[row, col]
716
+ if not lb[col] - tol <= val <= ub[col] + tol:
717
+ # infeasible if fixed value is not within bounds
718
+ status = 2
719
+ message = ("The problem is (trivially) infeasible because a "
720
+ "singleton row in the equality constraints is "
721
+ "inconsistent with the bounds.")
722
+ complete = True
723
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
724
+ c0, x, revstack, complete, status, message)
725
+ else:
726
+ # sets upper and lower bounds at that fixed value - variable
727
+ # will be removed later
728
+ lb[col] = val
729
+ ub[col] = val
730
+ A_eq = A_eq[np.logical_not(singleton_row), :]
731
+ b_eq = b_eq[np.logical_not(singleton_row)]
732
+
733
+ # row singleton in inequality constraints
734
+ # this indicates a simple bound and the constraint can be removed
735
+ # simple bounds may be adjusted here
736
+ # After all of the simple bound information is combined here, get_Abc will
737
+ # turn the simple bounds into constraints
738
+ singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
739
+ cols = where(A_ub[singleton_row, :])[1]
740
+ rows = where(singleton_row)[0]
741
+ if len(rows) > 0:
742
+ for row, col in zip(rows, cols):
743
+ val = b_ub[row] / A_ub[row, col]
744
+ if A_ub[row, col] > 0: # upper bound
745
+ if val < lb[col] - tol: # infeasible
746
+ complete = True
747
+ elif val < ub[col]: # new upper bound
748
+ ub[col] = val
749
+ else: # lower bound
750
+ if val > ub[col] + tol: # infeasible
751
+ complete = True
752
+ elif val > lb[col]: # new lower bound
753
+ lb[col] = val
754
+ if complete:
755
+ status = 2
756
+ message = ("The problem is (trivially) infeasible because a "
757
+ "singleton row in the upper bound constraints is "
758
+ "inconsistent with the bounds.")
759
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
760
+ c0, x, revstack, complete, status, message)
761
+ A_ub = A_ub[np.logical_not(singleton_row), :]
762
+ b_ub = b_ub[np.logical_not(singleton_row)]
763
+
764
+ # identical bounds indicate that variable can be removed
765
+ i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
766
+ i_nf = np.logical_not(i_f) # indices of "not fixed" variables
767
+
768
+ # test_bounds_equal_but_infeasible
769
+ if np.all(i_f): # if bounds define solution, check for consistency
770
+ residual = b_eq - A_eq.dot(lb)
771
+ slack = b_ub - A_ub.dot(lb)
772
+ if ((A_ub.size > 0 and np.any(slack < 0)) or
773
+ (A_eq.size > 0 and not np.allclose(residual, 0))):
774
+ status = 2
775
+ message = ("The problem is (trivially) infeasible because the "
776
+ "bounds fix all variables to values inconsistent with "
777
+ "the constraints")
778
+ complete = True
779
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
780
+ c0, x, revstack, complete, status, message)
781
+
782
+ ub_mod = ub
783
+ lb_mod = lb
784
+ if np.any(i_f):
785
+ c0 += c[i_f].dot(lb[i_f])
786
+ b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
787
+ b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
788
+ c = c[i_nf]
789
+ x_undo = lb[i_f] # not x[i_f], x is just zeroes
790
+ x = x[i_nf]
791
+ # user guess x0 stays separate from presolve solution x
792
+ if x0 is not None:
793
+ x0 = x0[i_nf]
794
+ A_eq = A_eq[:, i_nf]
795
+ A_ub = A_ub[:, i_nf]
796
+ # modify bounds
797
+ lb_mod = lb[i_nf]
798
+ ub_mod = ub[i_nf]
799
+
800
+ def rev(x_mod):
801
+ # Function to restore x: insert x_undo into x_mod.
802
+ # When elements have been removed at positions k1, k2, k3, ...
803
+ # then these must be replaced at (after) positions k1-1, k2-2,
804
+ # k3-3, ... in the modified array to recreate the original
805
+ i = np.flatnonzero(i_f)
806
+ # Number of variables to restore
807
+ N = len(i)
808
+ index_offset = np.arange(N)
809
+ # Create insert indices
810
+ insert_indices = i - index_offset
811
+ x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
812
+ return x_rev
813
+
814
+ # Use revstack as a list of functions, currently just this one.
815
+ revstack.append(rev)
816
+
817
+ # no constraints indicates that problem is trivial
818
+ if A_eq.size == 0 and A_ub.size == 0:
819
+ b_eq = np.array([])
820
+ b_ub = np.array([])
821
+ # test_empty_constraint_1
822
+ if c.size == 0:
823
+ status = 0
824
+ message = ("The solution was determined in presolve as there are "
825
+ "no non-trivial constraints.")
826
+ elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
827
+ np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
828
+ # test_no_constraints()
829
+ # test_unbounded_no_nontrivial_constraints_1
830
+ # test_unbounded_no_nontrivial_constraints_2
831
+ status = 3
832
+ message = ("The problem is (trivially) unbounded "
833
+ "because there are no non-trivial constraints and "
834
+ "a) at least one decision variable is unbounded "
835
+ "above and its corresponding cost is negative, or "
836
+ "b) at least one decision variable is unbounded below "
837
+ "and its corresponding cost is positive. ")
838
+ else: # test_empty_constraint_2
839
+ status = 0
840
+ message = ("The solution was determined in presolve as there are "
841
+ "no non-trivial constraints.")
842
+ complete = True
843
+ x[c < 0] = ub_mod[c < 0]
844
+ x[c > 0] = lb_mod[c > 0]
845
+ # where c is zero, set x to a finite bound or zero
846
+ x_zero_c = ub_mod[c == 0]
847
+ x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
848
+ x_zero_c[np.isinf(x_zero_c)] = 0
849
+ x[c == 0] = x_zero_c
850
+ # if this is not the last step of presolve, should convert bounds back
851
+ # to array and return here
852
+
853
+ # Convert modified lb and ub back into N x 2 bounds
854
+ bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
855
+
856
+ # remove redundant (linearly dependent) rows from equality constraints
857
+ n_rows_A = A_eq.shape[0]
858
+ redundancy_warning = ("A_eq does not appear to be of full row rank. To "
859
+ "improve performance, check the problem formulation "
860
+ "for redundant equality constraints.")
861
+ if (sps.issparse(A_eq)):
862
+ if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
863
+ rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
864
+ A_eq, b_eq, status, message = rr_res
865
+ if A_eq.shape[0] < n_rows_A:
866
+ warn(redundancy_warning, OptimizeWarning, stacklevel=1)
867
+ if status != 0:
868
+ complete = True
869
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
870
+ c0, x, revstack, complete, status, message)
871
+
872
+ # This is a wild guess for which redundancy removal algorithm will be
873
+ # faster. More testing would be good.
874
+ small_nullspace = 5
875
+ if rr and A_eq.size > 0:
876
+ try: # TODO: use results of first SVD in _remove_redundancy_svd
877
+ rank = np.linalg.matrix_rank(A_eq)
878
+ # oh well, we'll have to go with _remove_redundancy_pivot_dense
879
+ except Exception:
880
+ rank = 0
881
+ if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
882
+ warn(redundancy_warning, OptimizeWarning, stacklevel=3)
883
+ dim_row_nullspace = A_eq.shape[0]-rank
884
+ if rr_method is None:
885
+ if dim_row_nullspace <= small_nullspace:
886
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
887
+ A_eq, b_eq, status, message = rr_res
888
+ if dim_row_nullspace > small_nullspace or status == 4:
889
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
890
+ A_eq, b_eq, status, message = rr_res
891
+
892
+ else:
893
+ rr_method = rr_method.lower()
894
+ if rr_method == "svd":
895
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
896
+ A_eq, b_eq, status, message = rr_res
897
+ elif rr_method == "pivot":
898
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
899
+ A_eq, b_eq, status, message = rr_res
900
+ elif rr_method == "id":
901
+ rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
902
+ A_eq, b_eq, status, message = rr_res
903
+ else: # shouldn't get here; option validity checked above
904
+ pass
905
+ if A_eq.shape[0] < rank:
906
+ message = ("Due to numerical issues, redundant equality "
907
+ "constraints could not be removed automatically. "
908
+ "Try providing your constraint matrices as sparse "
909
+ "matrices to activate sparse presolve, try turning "
910
+ "off redundancy removal, or try turning off presolve "
911
+ "altogether.")
912
+ status = 4
913
+ if status != 0:
914
+ complete = True
915
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
916
+ c0, x, revstack, complete, status, message)
917
+
918
+
919
+ def _parse_linprog(lp, options, meth):
920
+ """
921
+ Parse the provided linear programming problem
922
+
923
+ ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
924
+ ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
925
+ provided constraints (``A_ub`` and ``A_eq) and if these match the provided
926
+ sparsity optional values.
927
+
928
+ ``_clean inputs`` checks of the provided inputs. If no violations are
929
+ identified the objective vector, upper bound constraints, equality
930
+ constraints, and simple bounds are returned in the expected format.
931
+
932
+ Parameters
933
+ ----------
934
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
935
+
936
+ c : 1D array
937
+ The coefficients of the linear objective function to be minimized.
938
+ A_ub : 2D array, optional
939
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
940
+ coefficients of a linear inequality constraint on ``x``.
941
+ b_ub : 1D array, optional
942
+ The inequality constraint vector. Each element represents an
943
+ upper bound on the corresponding value of ``A_ub @ x``.
944
+ A_eq : 2D array, optional
945
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
946
+ coefficients of a linear equality constraint on ``x``.
947
+ b_eq : 1D array, optional
948
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
949
+ the corresponding element of ``b_eq``.
950
+ bounds : various valid formats, optional
951
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
952
+ If bounds are specified for all N variables separately, valid formats are:
953
+ * a 2D array (2 x N or N x 2);
954
+ * a sequence of N sequences, each with 2 values.
955
+ If all variables have the same bounds, a single pair of values can
956
+ be specified. Valid formats are:
957
+ * a sequence with 2 scalar values;
958
+ * a sequence with a single element containing 2 scalar values.
959
+ If all variables have a lower bound of 0 and no upper bound, the bounds
960
+ parameter can be omitted (or given as None).
961
+ x0 : 1D array, optional
962
+ Guess values of the decision variables, which will be refined by
963
+ the optimization algorithm. This argument is currently used only by the
964
+ 'revised simplex' method, and can only be used if `x0` represents a
965
+ basic feasible solution.
966
+
967
+ options : dict
968
+ A dictionary of solver options. All methods accept the following
969
+ generic options:
970
+
971
+ maxiter : int
972
+ Maximum number of iterations to perform.
973
+ disp : bool
974
+ Set to True to print convergence messages.
975
+
976
+ For method-specific options, see :func:`show_options('linprog')`.
977
+
978
+ Returns
979
+ -------
980
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
981
+
982
+ c : 1D array
983
+ The coefficients of the linear objective function to be minimized.
984
+ A_ub : 2D array, optional
985
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
986
+ coefficients of a linear inequality constraint on ``x``.
987
+ b_ub : 1D array, optional
988
+ The inequality constraint vector. Each element represents an
989
+ upper bound on the corresponding value of ``A_ub @ x``.
990
+ A_eq : 2D array, optional
991
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
992
+ coefficients of a linear equality constraint on ``x``.
993
+ b_eq : 1D array, optional
994
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
995
+ the corresponding element of ``b_eq``.
996
+ bounds : 2D array
997
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
998
+ elements of ``x``. The N x 2 array contains lower bounds in the first
999
+ column and upper bounds in the 2nd. Unbounded variables have lower
1000
+ bound -np.inf and/or upper bound np.inf.
1001
+ x0 : 1D array, optional
1002
+ Guess values of the decision variables, which will be refined by
1003
+ the optimization algorithm. This argument is currently used only by the
1004
+ 'revised simplex' method, and can only be used if `x0` represents a
1005
+ basic feasible solution.
1006
+
1007
+ options : dict, optional
1008
+ A dictionary of solver options. All methods accept the following
1009
+ generic options:
1010
+
1011
+ maxiter : int
1012
+ Maximum number of iterations to perform.
1013
+ disp : bool
1014
+ Set to True to print convergence messages.
1015
+
1016
+ For method-specific options, see :func:`show_options('linprog')`.
1017
+
1018
+ """
1019
+ if options is None:
1020
+ options = {}
1021
+
1022
+ solver_options = {k: v for k, v in options.items()}
1023
+ solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
1024
+ lp.A_ub, lp.A_eq)
1025
+ # Convert lists to numpy arrays, etc...
1026
+ lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
1027
+ return lp, solver_options
1028
+
1029
+
1030
+ def _get_Abc(lp, c0):
1031
+ """
1032
+ Given a linear programming problem of the form:
1033
+
1034
+ Minimize::
1035
+
1036
+ c @ x
1037
+
1038
+ Subject to::
1039
+
1040
+ A_ub @ x <= b_ub
1041
+ A_eq @ x == b_eq
1042
+ lb <= x <= ub
1043
+
1044
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
1045
+
1046
+ Return the problem in standard form:
1047
+
1048
+ Minimize::
1049
+
1050
+ c @ x
1051
+
1052
+ Subject to::
1053
+
1054
+ A @ x == b
1055
+ x >= 0
1056
+
1057
+ by adding slack variables and making variable substitutions as necessary.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1062
+
1063
+ c : 1D array
1064
+ The coefficients of the linear objective function to be minimized.
1065
+ A_ub : 2D array, optional
1066
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1067
+ coefficients of a linear inequality constraint on ``x``.
1068
+ b_ub : 1D array, optional
1069
+ The inequality constraint vector. Each element represents an
1070
+ upper bound on the corresponding value of ``A_ub @ x``.
1071
+ A_eq : 2D array, optional
1072
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1073
+ coefficients of a linear equality constraint on ``x``.
1074
+ b_eq : 1D array, optional
1075
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1076
+ the corresponding element of ``b_eq``.
1077
+ bounds : 2D array
1078
+ The bounds of ``x``, lower bounds in the 1st column, upper
1079
+ bounds in the 2nd column. The bounds are possibly tightened
1080
+ by the presolve procedure.
1081
+ x0 : 1D array, optional
1082
+ Guess values of the decision variables, which will be refined by
1083
+ the optimization algorithm. This argument is currently used only by the
1084
+ 'revised simplex' method, and can only be used if `x0` represents a
1085
+ basic feasible solution.
1086
+
1087
+ c0 : float
1088
+ Constant term in objective function due to fixed (and eliminated)
1089
+ variables.
1090
+
1091
+ Returns
1092
+ -------
1093
+ A : 2-D array
1094
+ 2-D array such that ``A`` @ ``x``, gives the values of the equality
1095
+ constraints at ``x``.
1096
+ b : 1-D array
1097
+ 1-D array of values representing the RHS of each equality constraint
1098
+ (row) in A (for standard form problem).
1099
+ c : 1-D array
1100
+ Coefficients of the linear objective function to be minimized (for
1101
+ standard form problem).
1102
+ c0 : float
1103
+ Constant term in objective function due to fixed (and eliminated)
1104
+ variables.
1105
+ x0 : 1-D array
1106
+ Starting values of the independent variables, which will be refined by
1107
+ the optimization algorithm
1108
+
1109
+ References
1110
+ ----------
1111
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1112
+ programming." Athena Scientific 1 (1997): 997.
1113
+
1114
+ """
1115
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
1116
+
1117
+ if sps.issparse(A_eq):
1118
+ sparse = True
1119
+ A_eq = sps.csr_matrix(A_eq)
1120
+ A_ub = sps.csr_matrix(A_ub)
1121
+
1122
+ def hstack(blocks):
1123
+ return sps.hstack(blocks, format="csr")
1124
+
1125
+ def vstack(blocks):
1126
+ return sps.vstack(blocks, format="csr")
1127
+
1128
+ zeros = sps.csr_matrix
1129
+ eye = sps.eye
1130
+ else:
1131
+ sparse = False
1132
+ hstack = np.hstack
1133
+ vstack = np.vstack
1134
+ zeros = np.zeros
1135
+ eye = np.eye
1136
+
1137
+ # Variables lbs and ubs (see below) may be changed, which feeds back into
1138
+ # bounds, so copy.
1139
+ bounds = np.array(bounds, copy=True)
1140
+
1141
+ # modify problem such that all variables have only non-negativity bounds
1142
+ lbs = bounds[:, 0]
1143
+ ubs = bounds[:, 1]
1144
+ m_ub, n_ub = A_ub.shape
1145
+
1146
+ lb_none = np.equal(lbs, -np.inf)
1147
+ ub_none = np.equal(ubs, np.inf)
1148
+ lb_some = np.logical_not(lb_none)
1149
+ ub_some = np.logical_not(ub_none)
1150
+
1151
+ # unbounded below: substitute xi = -xi' (unbounded above)
1152
+ # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
1153
+ l_nolb_someub = np.logical_and(lb_none, ub_some)
1154
+ i_nolb = np.nonzero(l_nolb_someub)[0]
1155
+ lbs[l_nolb_someub], ubs[l_nolb_someub] = (
1156
+ -ubs[l_nolb_someub], -lbs[l_nolb_someub])
1157
+ lb_none = np.equal(lbs, -np.inf)
1158
+ ub_none = np.equal(ubs, np.inf)
1159
+ lb_some = np.logical_not(lb_none)
1160
+ ub_some = np.logical_not(ub_none)
1161
+ c[i_nolb] *= -1
1162
+ if x0 is not None:
1163
+ x0[i_nolb] *= -1
1164
+ if len(i_nolb) > 0:
1165
+ if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
1166
+ A_ub[:, i_nolb] *= -1
1167
+ if A_eq.shape[0] > 0:
1168
+ A_eq[:, i_nolb] *= -1
1169
+
1170
+ # upper bound: add inequality constraint
1171
+ i_newub, = ub_some.nonzero()
1172
+ ub_newub = ubs[ub_some]
1173
+ n_bounds = len(i_newub)
1174
+ if n_bounds > 0:
1175
+ shape = (n_bounds, A_ub.shape[1])
1176
+ if sparse:
1177
+ idxs = (np.arange(n_bounds), i_newub)
1178
+ A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
1179
+ shape=shape)))
1180
+ else:
1181
+ A_ub = vstack((A_ub, np.zeros(shape)))
1182
+ A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
1183
+ b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
1184
+ b_ub[m_ub:] = ub_newub
1185
+
1186
+ A1 = vstack((A_ub, A_eq))
1187
+ b = np.concatenate((b_ub, b_eq))
1188
+ c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
1189
+ if x0 is not None:
1190
+ x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
1191
+ # unbounded: substitute xi = xi+ + xi-
1192
+ l_free = np.logical_and(lb_none, ub_none)
1193
+ i_free = np.nonzero(l_free)[0]
1194
+ n_free = len(i_free)
1195
+ c = np.concatenate((c, np.zeros(n_free)))
1196
+ if x0 is not None:
1197
+ x0 = np.concatenate((x0, np.zeros(n_free)))
1198
+ A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
1199
+ c[n_ub:n_ub+n_free] = -c[i_free]
1200
+ if x0 is not None:
1201
+ i_free_neg = x0[i_free] < 0
1202
+ x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
1203
+ x0[i_free[i_free_neg]] = 0
1204
+
1205
+ # add slack variables
1206
+ A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
1207
+
1208
+ A = hstack([A1, A2])
1209
+
1210
+ # lower bound: substitute xi = xi' + lb
1211
+ # now there is a constant term in objective
1212
+ i_shift = np.nonzero(lb_some)[0]
1213
+ lb_shift = lbs[lb_some].astype(float)
1214
+ c0 += np.sum(lb_shift * c[i_shift])
1215
+ if sparse:
1216
+ b = b.reshape(-1, 1)
1217
+ A = A.tocsc()
1218
+ b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
1219
+ b = b.ravel()
1220
+ else:
1221
+ b -= (A[:, i_shift] * lb_shift).sum(axis=1)
1222
+ if x0 is not None:
1223
+ x0[i_shift] -= lb_shift
1224
+
1225
+ return A, b, c, c0, x0
1226
+
1227
+
1228
+ def _round_to_power_of_two(x):
1229
+ """
1230
+ Round elements of the array to the nearest power of two.
1231
+ """
1232
+ return 2**np.around(np.log2(x))
1233
+
1234
+
1235
+ def _autoscale(A, b, c, x0):
1236
+ """
1237
+ Scales the problem according to equilibration from [12].
1238
+ Also normalizes the right hand side vector by its maximum element.
1239
+ """
1240
+ m, n = A.shape
1241
+
1242
+ C = 1
1243
+ R = 1
1244
+
1245
+ if A.size > 0:
1246
+
1247
+ R = np.max(np.abs(A), axis=1)
1248
+ if sps.issparse(A):
1249
+ R = R.toarray().flatten()
1250
+ R[R == 0] = 1
1251
+ R = 1/_round_to_power_of_two(R)
1252
+ A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
1253
+ b = b*R
1254
+
1255
+ C = np.max(np.abs(A), axis=0)
1256
+ if sps.issparse(A):
1257
+ C = C.toarray().flatten()
1258
+ C[C == 0] = 1
1259
+ C = 1/_round_to_power_of_two(C)
1260
+ A = A*sps.diags(C) if sps.issparse(A) else A*C
1261
+ c = c*C
1262
+
1263
+ b_scale = np.max(np.abs(b)) if b.size > 0 else 1
1264
+ if b_scale == 0:
1265
+ b_scale = 1.
1266
+ b = b/b_scale
1267
+
1268
+ if x0 is not None:
1269
+ x0 = x0/b_scale*(1/C)
1270
+ return A, b, c, x0, C, b_scale
1271
+
1272
+
1273
+ def _unscale(x, C, b_scale):
1274
+ """
1275
+ Converts solution to _autoscale problem -> solution to original problem.
1276
+ """
1277
+
1278
+ try:
1279
+ n = len(C)
1280
+ # fails if sparse or scalar; that's OK.
1281
+ # this is only needed for original simplex (never sparse)
1282
+ except TypeError:
1283
+ n = len(x)
1284
+
1285
+ return x[:n]*b_scale*C
1286
+
1287
+
1288
+ def _display_summary(message, status, fun, iteration):
1289
+ """
1290
+ Print the termination summary of the linear program
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ message : str
1295
+ A string descriptor of the exit status of the optimization.
1296
+ status : int
1297
+ An integer representing the exit status of the optimization::
1298
+
1299
+ 0 : Optimization terminated successfully
1300
+ 1 : Iteration limit reached
1301
+ 2 : Problem appears to be infeasible
1302
+ 3 : Problem appears to be unbounded
1303
+ 4 : Serious numerical difficulties encountered
1304
+
1305
+ fun : float
1306
+ Value of the objective function.
1307
+ iteration : iteration
1308
+ The number of iterations performed.
1309
+ """
1310
+ print(message)
1311
+ if status in (0, 1):
1312
+ print(f" Current function value: {fun: <12.6f}")
1313
+ print(f" Iterations: {iteration:d}")
1314
+
1315
+
1316
+ def _postsolve(x, postsolve_args, complete=False):
1317
+ """
1318
+ Given solution x to presolved, standard form linear program x, add
1319
+ fixed variables back into the problem and undo the variable substitutions
1320
+ to get solution to original linear program. Also, calculate the objective
1321
+ function value, slack in original upper bound constraints, and residuals
1322
+ in original equality constraints.
1323
+
1324
+ Parameters
1325
+ ----------
1326
+ x : 1-D array
1327
+ Solution vector to the standard-form problem.
1328
+ postsolve_args : tuple
1329
+ Data needed by _postsolve to convert the solution to the standard-form
1330
+ problem into the solution to the original problem, including:
1331
+
1332
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1333
+
1334
+ c : 1D array
1335
+ The coefficients of the linear objective function to be minimized.
1336
+ A_ub : 2D array, optional
1337
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1338
+ coefficients of a linear inequality constraint on ``x``.
1339
+ b_ub : 1D array, optional
1340
+ The inequality constraint vector. Each element represents an
1341
+ upper bound on the corresponding value of ``A_ub @ x``.
1342
+ A_eq : 2D array, optional
1343
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1344
+ coefficients of a linear equality constraint on ``x``.
1345
+ b_eq : 1D array, optional
1346
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1347
+ the corresponding element of ``b_eq``.
1348
+ bounds : 2D array
1349
+ The bounds of ``x``, lower bounds in the 1st column, upper
1350
+ bounds in the 2nd column. The bounds are possibly tightened
1351
+ by the presolve procedure.
1352
+ x0 : 1D array, optional
1353
+ Guess values of the decision variables, which will be refined by
1354
+ the optimization algorithm. This argument is currently used only by the
1355
+ 'revised simplex' method, and can only be used if `x0` represents a
1356
+ basic feasible solution.
1357
+
1358
+ revstack: list of functions
1359
+ the functions in the list reverse the operations of _presolve()
1360
+ the function signature is x_org = f(x_mod), where x_mod is the result
1361
+ of a presolve step and x_org the value at the start of the step
1362
+ complete : bool
1363
+ Whether the solution is was determined in presolve (``True`` if so)
1364
+
1365
+ Returns
1366
+ -------
1367
+ x : 1-D array
1368
+ Solution vector to original linear programming problem
1369
+ fun: float
1370
+ optimal objective value for original problem
1371
+ slack : 1-D array
1372
+ The (non-negative) slack in the upper bound constraints, that is,
1373
+ ``b_ub - A_ub @ x``
1374
+ con : 1-D array
1375
+ The (nominally zero) residuals of the equality constraints, that is,
1376
+ ``b - A_eq @ x``
1377
+ """
1378
+ # note that all the inputs are the ORIGINAL, unmodified versions
1379
+ # no rows, columns have been removed
1380
+
1381
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
1382
+ revstack, C, b_scale = postsolve_args[1:]
1383
+
1384
+ x = _unscale(x, C, b_scale)
1385
+
1386
+ # Undo variable substitutions of _get_Abc()
1387
+ # if "complete", problem was solved in presolve; don't do anything here
1388
+ n_x = bounds.shape[0]
1389
+ if not complete and bounds is not None: # bounds are never none, probably
1390
+ n_unbounded = 0
1391
+ for i, bi in enumerate(bounds):
1392
+ lbi = bi[0]
1393
+ ubi = bi[1]
1394
+ if lbi == -np.inf and ubi == np.inf:
1395
+ n_unbounded += 1
1396
+ x[i] = x[i] - x[n_x + n_unbounded - 1]
1397
+ else:
1398
+ if lbi == -np.inf:
1399
+ x[i] = ubi - x[i]
1400
+ else:
1401
+ x[i] += lbi
1402
+ # all the rest of the variables were artificial
1403
+ x = x[:n_x]
1404
+
1405
+ # If there were variables removed from the problem, add them back into the
1406
+ # solution vector
1407
+ # Apply the functions in revstack (reverse direction)
1408
+ for rev in reversed(revstack):
1409
+ x = rev(x)
1410
+
1411
+ fun = x.dot(c)
1412
+ slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
1413
+ # report residuals of ORIGINAL EQ constraints
1414
+ con = b_eq - A_eq.dot(x)
1415
+
1416
+ return x, fun, slack, con
1417
+
1418
+
1419
+ def _check_result(x, fun, status, slack, con, bounds, tol, message,
1420
+ integrality):
1421
+ """
1422
+ Check the validity of the provided solution.
1423
+
1424
+ A valid (optimal) solution satisfies all bounds, all slack variables are
1425
+ negative and all equality constraint residuals are strictly non-zero.
1426
+ Further, the lower-bounds, upper-bounds, slack and residuals contain
1427
+ no nan values.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ x : 1-D array
1432
+ Solution vector to original linear programming problem
1433
+ fun: float
1434
+ optimal objective value for original problem
1435
+ status : int
1436
+ An integer representing the exit status of the optimization::
1437
+
1438
+ 0 : Optimization terminated successfully
1439
+ 1 : Iteration limit reached
1440
+ 2 : Problem appears to be infeasible
1441
+ 3 : Problem appears to be unbounded
1442
+ 4 : Serious numerical difficulties encountered
1443
+
1444
+ slack : 1-D array
1445
+ The (non-negative) slack in the upper bound constraints, that is,
1446
+ ``b_ub - A_ub @ x``
1447
+ con : 1-D array
1448
+ The (nominally zero) residuals of the equality constraints, that is,
1449
+ ``b - A_eq @ x``
1450
+ bounds : 2D array
1451
+ The bounds on the original variables ``x``
1452
+ message : str
1453
+ A string descriptor of the exit status of the optimization.
1454
+ tol : float
1455
+ Termination tolerance; see [1]_ Section 4.5.
1456
+
1457
+ Returns
1458
+ -------
1459
+ status : int
1460
+ An integer representing the exit status of the optimization::
1461
+
1462
+ 0 : Optimization terminated successfully
1463
+ 1 : Iteration limit reached
1464
+ 2 : Problem appears to be infeasible
1465
+ 3 : Problem appears to be unbounded
1466
+ 4 : Serious numerical difficulties encountered
1467
+
1468
+ message : str
1469
+ A string descriptor of the exit status of the optimization.
1470
+ """
1471
+ # Somewhat arbitrary
1472
+ tol = np.sqrt(tol) * 10
1473
+
1474
+ if x is None:
1475
+ # HiGHS does not provide x if infeasible/unbounded
1476
+ if status == 0: # Observed with HiGHS Simplex Primal
1477
+ status = 4
1478
+ message = ("The solver did not provide a solution nor did it "
1479
+ "report a failure. Please submit a bug report.")
1480
+ return status, message
1481
+
1482
+ contains_nans = (
1483
+ np.isnan(x).any()
1484
+ or np.isnan(fun)
1485
+ or np.isnan(slack).any()
1486
+ or np.isnan(con).any()
1487
+ )
1488
+
1489
+ if contains_nans:
1490
+ is_feasible = False
1491
+ else:
1492
+ if integrality is None:
1493
+ integrality = 0
1494
+ valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol)
1495
+ # When integrality is 2 or 3, x must be within bounds OR take value 0
1496
+ valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol)
1497
+ invalid_bounds = not np.all(valid_bounds)
1498
+
1499
+ invalid_slack = status != 3 and (slack < -tol).any()
1500
+ invalid_con = status != 3 and (np.abs(con) > tol).any()
1501
+ is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
1502
+
1503
+ if status == 0 and not is_feasible:
1504
+ status = 4
1505
+ message = ("The solution does not satisfy the constraints within the "
1506
+ "required tolerance of " + f"{tol:.2E}" + ", yet "
1507
+ "no errors were raised and there is no certificate of "
1508
+ "infeasibility or unboundedness. Check whether "
1509
+ "the slack and constraint residuals are acceptable; "
1510
+ "if not, consider enabling presolve, adjusting the "
1511
+ "tolerance option(s), and/or using a different method. "
1512
+ "Please consider submitting a bug report.")
1513
+ elif status == 2 and is_feasible:
1514
+ # Occurs if the simplex method exits after phase one with a very
1515
+ # nearly basic feasible solution. Postsolving can make the solution
1516
+ # basic, however, this solution is NOT optimal
1517
+ status = 4
1518
+ message = ("The solution is feasible, but the solver did not report "
1519
+ "that the solution was optimal. Please try a different "
1520
+ "method.")
1521
+
1522
+ return status, message
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minimize.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to minimization algorithms.
3
+
4
+ Functions
5
+ ---------
6
+ - minimize : minimization of a function of several variables.
7
+ - minimize_scalar : minimization of a function of one variable.
8
+ """
9
+
10
+ __all__ = ['minimize', 'minimize_scalar']
11
+
12
+
13
+ from warnings import warn
14
+
15
+ import numpy as np
16
+
17
+ # unconstrained minimization
18
+ from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
19
+ _minimize_bfgs, _minimize_newtoncg,
20
+ _minimize_scalar_brent, _minimize_scalar_bounded,
21
+ _minimize_scalar_golden, MemoizeJac, OptimizeResult,
22
+ _wrap_callback, _recover_from_bracket_error)
23
+ from ._trustregion_dogleg import _minimize_dogleg
24
+ from ._trustregion_ncg import _minimize_trust_ncg
25
+ from ._trustregion_krylov import _minimize_trust_krylov
26
+ from ._trustregion_exact import _minimize_trustregion_exact
27
+ from ._trustregion_constr import _minimize_trustregion_constr
28
+
29
+ # constrained minimization
30
+ from ._lbfgsb_py import _minimize_lbfgsb
31
+ from ._tnc import _minimize_tnc
32
+ from ._cobyla_py import _minimize_cobyla
33
+ from ._slsqp_py import _minimize_slsqp
34
+ from ._constraints import (old_bound_to_new, new_bounds_to_old,
35
+ old_constraint_to_new, new_constraint_to_old,
36
+ NonlinearConstraint, LinearConstraint, Bounds,
37
+ PreparedConstraint)
38
+ from ._differentiable_functions import FD_METHODS
39
+
40
+ MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
41
+ 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
42
+ 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
43
+
44
+ # These methods support the new callback interface (passed an OptimizeResult)
45
+ MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
46
+ 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg',
47
+ 'trust-exact', 'trust-krylov']
48
+
49
+ MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
50
+
51
+ def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
52
+ hessp=None, bounds=None, constraints=(), tol=None,
53
+ callback=None, options=None):
54
+ """Minimization of scalar function of one or more variables.
55
+
56
+ Parameters
57
+ ----------
58
+ fun : callable
59
+ The objective function to be minimized.
60
+
61
+ ``fun(x, *args) -> float``
62
+
63
+ where ``x`` is a 1-D array with shape (n,) and ``args``
64
+ is a tuple of the fixed parameters needed to completely
65
+ specify the function.
66
+ x0 : ndarray, shape (n,)
67
+ Initial guess. Array of real elements of size (n,),
68
+ where ``n`` is the number of independent variables.
69
+ args : tuple, optional
70
+ Extra arguments passed to the objective function and its
71
+ derivatives (`fun`, `jac` and `hess` functions).
72
+ method : str or callable, optional
73
+ Type of solver. Should be one of
74
+
75
+ - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
76
+ - 'Powell' :ref:`(see here) <optimize.minimize-powell>`
77
+ - 'CG' :ref:`(see here) <optimize.minimize-cg>`
78
+ - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
79
+ - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
80
+ - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
81
+ - 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
82
+ - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
83
+ - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
84
+ - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
85
+ - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
86
+ - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
87
+ - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
88
+ - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
89
+ - custom - a callable object, see below for description.
90
+
91
+ If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
92
+ depending on whether or not the problem has constraints or bounds.
93
+ jac : {callable, '2-point', '3-point', 'cs', bool}, optional
94
+ Method for computing the gradient vector. Only for CG, BFGS,
95
+ Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
96
+ trust-exact and trust-constr.
97
+ If it is a callable, it should be a function that returns the gradient
98
+ vector:
99
+
100
+ ``jac(x, *args) -> array_like, shape (n,)``
101
+
102
+ where ``x`` is an array with shape (n,) and ``args`` is a tuple with
103
+ the fixed parameters. If `jac` is a Boolean and is True, `fun` is
104
+ assumed to return a tuple ``(f, g)`` containing the objective
105
+ function and the gradient.
106
+ Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
107
+ 'trust-krylov' require that either a callable be supplied, or that
108
+ `fun` return the objective and gradient.
109
+ If None or False, the gradient will be estimated using 2-point finite
110
+ difference estimation with an absolute step size.
111
+ Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
112
+ to select a finite difference scheme for numerical estimation of the
113
+ gradient with a relative step size. These finite difference schemes
114
+ obey any specified `bounds`.
115
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
116
+ Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
117
+ trust-ncg, trust-krylov, trust-exact and trust-constr.
118
+ If it is callable, it should return the Hessian matrix:
119
+
120
+ ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
121
+
122
+ where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
123
+ parameters.
124
+ The keywords {'2-point', '3-point', 'cs'} can also be used to select
125
+ a finite difference scheme for numerical estimation of the hessian.
126
+ Alternatively, objects implementing the `HessianUpdateStrategy`
127
+ interface can be used to approximate the Hessian. Available
128
+ quasi-Newton methods implementing this interface are:
129
+
130
+ - `BFGS`;
131
+ - `SR1`.
132
+
133
+ Not all of the options are available for each of the methods; for
134
+ availability refer to the notes.
135
+ hessp : callable, optional
136
+ Hessian of objective function times an arbitrary vector p. Only for
137
+ Newton-CG, trust-ncg, trust-krylov, trust-constr.
138
+ Only one of `hessp` or `hess` needs to be given. If `hess` is
139
+ provided, then `hessp` will be ignored. `hessp` must compute the
140
+ Hessian times an arbitrary vector:
141
+
142
+ ``hessp(x, p, *args) -> ndarray shape (n,)``
143
+
144
+ where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
145
+ dimension (n,) and ``args`` is a tuple with the fixed
146
+ parameters.
147
+ bounds : sequence or `Bounds`, optional
148
+ Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell,
149
+ trust-constr, and COBYLA methods. There are two ways to specify the
150
+ bounds:
151
+
152
+ 1. Instance of `Bounds` class.
153
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`. None
154
+ is used to specify no bound.
155
+
156
+ constraints : {Constraint, dict} or List of {Constraint, dict}, optional
157
+ Constraints definition. Only for COBYLA, SLSQP and trust-constr.
158
+
159
+ Constraints for 'trust-constr' are defined as a single object or a
160
+ list of objects specifying constraints to the optimization problem.
161
+ Available constraints are:
162
+
163
+ - `LinearConstraint`
164
+ - `NonlinearConstraint`
165
+
166
+ Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
167
+ Each dictionary with fields:
168
+
169
+ type : str
170
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
171
+ fun : callable
172
+ The function defining the constraint.
173
+ jac : callable, optional
174
+ The Jacobian of `fun` (only for SLSQP).
175
+ args : sequence, optional
176
+ Extra arguments to be passed to the function and Jacobian.
177
+
178
+ Equality constraint means that the constraint function result is to
179
+ be zero whereas inequality means that it is to be non-negative.
180
+ Note that COBYLA only supports inequality constraints.
181
+ tol : float, optional
182
+ Tolerance for termination. When `tol` is specified, the selected
183
+ minimization algorithm sets some relevant solver-specific tolerance(s)
184
+ equal to `tol`. For detailed control, use solver-specific
185
+ options.
186
+ options : dict, optional
187
+ A dictionary of solver options. All methods except `TNC` accept the
188
+ following generic options:
189
+
190
+ maxiter : int
191
+ Maximum number of iterations to perform. Depending on the
192
+ method each iteration may use several function evaluations.
193
+
194
+ For `TNC` use `maxfun` instead of `maxiter`.
195
+ disp : bool
196
+ Set to True to print convergence messages.
197
+
198
+ For method-specific options, see :func:`show_options()`.
199
+ callback : callable, optional
200
+ A callable called after each iteration.
201
+
202
+ All methods except TNC, SLSQP, and COBYLA support a callable with
203
+ the signature:
204
+
205
+ ``callback(intermediate_result: OptimizeResult)``
206
+
207
+ where ``intermediate_result`` is a keyword parameter containing an
208
+ `OptimizeResult` with attributes ``x`` and ``fun``, the present values
209
+ of the parameter vector and objective function. Note that the name
210
+ of the parameter must be ``intermediate_result`` for the callback
211
+ to be passed an `OptimizeResult`. These methods will also terminate if
212
+ the callback raises ``StopIteration``.
213
+
214
+ All methods except trust-constr (also) support a signature like:
215
+
216
+ ``callback(xk)``
217
+
218
+ where ``xk`` is the current parameter vector.
219
+
220
+ Introspection is used to determine which of the signatures above to
221
+ invoke.
222
+
223
+ Returns
224
+ -------
225
+ res : OptimizeResult
226
+ The optimization result represented as a ``OptimizeResult`` object.
227
+ Important attributes are: ``x`` the solution array, ``success`` a
228
+ Boolean flag indicating if the optimizer exited successfully and
229
+ ``message`` which describes the cause of the termination. See
230
+ `OptimizeResult` for a description of other attributes.
231
+
232
+ See also
233
+ --------
234
+ minimize_scalar : Interface to minimization algorithms for scalar
235
+ univariate functions
236
+ show_options : Additional options accepted by the solvers
237
+
238
+ Notes
239
+ -----
240
+ This section describes the available solvers that can be selected by the
241
+ 'method' parameter. The default method is *BFGS*.
242
+
243
+ **Unconstrained minimization**
244
+
245
+ Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
246
+ gradient algorithm by Polak and Ribiere, a variant of the
247
+ Fletcher-Reeves method described in [5]_ pp.120-122. Only the
248
+ first derivatives are used.
249
+
250
+ Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
251
+ method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
252
+ pp. 136. It uses the first derivatives only. BFGS has proven good
253
+ performance even for non-smooth optimizations. This method also
254
+ returns an approximation of the Hessian inverse, stored as
255
+ `hess_inv` in the OptimizeResult object.
256
+
257
+ Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
258
+ Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
259
+ Newton method). It uses a CG method to the compute the search
260
+ direction. See also *TNC* method for a box-constrained
261
+ minimization with a similar algorithm. Suitable for large-scale
262
+ problems.
263
+
264
+ Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
265
+ trust-region algorithm [5]_ for unconstrained minimization. This
266
+ algorithm requires the gradient and Hessian; furthermore the
267
+ Hessian is required to be positive definite.
268
+
269
+ Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
270
+ Newton conjugate gradient trust-region algorithm [5]_ for
271
+ unconstrained minimization. This algorithm requires the gradient
272
+ and either the Hessian or a function that computes the product of
273
+ the Hessian with a given vector. Suitable for large-scale problems.
274
+
275
+ Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
276
+ the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
277
+ minimization. This algorithm requires the gradient
278
+ and either the Hessian or a function that computes the product of
279
+ the Hessian with a given vector. Suitable for large-scale problems.
280
+ On indefinite problems it requires usually less iterations than the
281
+ `trust-ncg` method and is recommended for medium and large-scale problems.
282
+
283
+ Method :ref:`trust-exact <optimize.minimize-trustexact>`
284
+ is a trust-region method for unconstrained minimization in which
285
+ quadratic subproblems are solved almost exactly [13]_. This
286
+ algorithm requires the gradient and the Hessian (which is
287
+ *not* required to be positive definite). It is, in many
288
+ situations, the Newton method to converge in fewer iterations
289
+ and the most recommended for small and medium-size problems.
290
+
291
+ **Bound-Constrained minimization**
292
+
293
+ Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
294
+ Simplex algorithm [1]_, [2]_. This algorithm is robust in many
295
+ applications. However, if numerical computation of derivative can be
296
+ trusted, other algorithms using the first and/or second derivatives
297
+ information might be preferred for their better performance in
298
+ general.
299
+
300
+ Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
301
+ algorithm [6]_, [7]_ for bound constrained minimization.
302
+
303
+ Method :ref:`Powell <optimize.minimize-powell>` is a modification
304
+ of Powell's method [3]_, [4]_ which is a conjugate direction
305
+ method. It performs sequential one-dimensional minimizations along
306
+ each vector of the directions set (`direc` field in `options` and
307
+ `info`), which is updated at each iteration of the main
308
+ minimization loop. The function need not be differentiable, and no
309
+ derivatives are taken. If bounds are not provided, then an
310
+ unbounded line search will be used. If bounds are provided and
311
+ the initial guess is within the bounds, then every function
312
+ evaluation throughout the minimization procedure will be within
313
+ the bounds. If bounds are provided, the initial guess is outside
314
+ the bounds, and `direc` is full rank (default has full rank), then
315
+ some function evaluations during the first iteration may be
316
+ outside the bounds, but every function evaluation after the first
317
+ iteration will be within the bounds. If `direc` is not full rank,
318
+ then some parameters may not be optimized and the solution is not
319
+ guaranteed to be within the bounds.
320
+
321
+ Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
322
+ algorithm [5]_, [8]_ to minimize a function with variables subject
323
+ to bounds. This algorithm uses gradient information; it is also
324
+ called Newton Conjugate-Gradient. It differs from the *Newton-CG*
325
+ method described above as it wraps a C implementation and allows
326
+ each variable to be given upper and lower bounds.
327
+
328
+ **Constrained Minimization**
329
+
330
+ Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
331
+ Constrained Optimization BY Linear Approximation (COBYLA) method
332
+ [9]_, [10]_, [11]_. The algorithm is based on linear
333
+ approximations to the objective function and each constraint. The
334
+ method wraps a FORTRAN implementation of the algorithm. The
335
+ constraints functions 'fun' may return either a single number
336
+ or an array or list of numbers.
337
+
338
+ Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
339
+ Least SQuares Programming to minimize a function of several
340
+ variables with any combination of bounds, equality and inequality
341
+ constraints. The method wraps the SLSQP Optimization subroutine
342
+ originally implemented by Dieter Kraft [12]_. Note that the
343
+ wrapper handles infinite values in bounds by converting them into
344
+ large floating values.
345
+
346
+ Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
347
+ trust-region algorithm for constrained optimization. It switches
348
+ between two implementations depending on the problem definition.
349
+ It is the most versatile constrained minimization algorithm
350
+ implemented in SciPy and the most appropriate for large-scale problems.
351
+ For equality constrained problems it is an implementation of Byrd-Omojokun
352
+ Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
353
+ inequality constraints are imposed as well, it switches to the trust-region
354
+ interior point method described in [16]_. This interior point algorithm,
355
+ in turn, solves inequality constraints by introducing slack variables
356
+ and solving a sequence of equality-constrained barrier problems
357
+ for progressively smaller values of the barrier parameter.
358
+ The previously described equality constrained SQP method is
359
+ used to solve the subproblems with increasing levels of accuracy
360
+ as the iterate gets closer to a solution.
361
+
362
+ **Finite-Difference Options**
363
+
364
+ For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
365
+ the gradient and the Hessian may be approximated using
366
+ three finite-difference schemes: {'2-point', '3-point', 'cs'}.
367
+ The scheme 'cs' is, potentially, the most accurate but it
368
+ requires the function to correctly handle complex inputs and to
369
+ be differentiable in the complex plane. The scheme '3-point' is more
370
+ accurate than '2-point' but requires twice as many operations. If the
371
+ gradient is estimated via finite-differences the Hessian must be
372
+ estimated using one of the quasi-Newton strategies.
373
+
374
+ **Method specific options for the** `hess` **keyword**
375
+
376
+ +--------------+------+----------+-------------------------+-----+
377
+ | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
378
+ +==============+======+==========+=========================+=====+
379
+ | Newton-CG | x | (n, n) | x | x |
380
+ | | | LO | | |
381
+ +--------------+------+----------+-------------------------+-----+
382
+ | dogleg | | (n, n) | | |
383
+ +--------------+------+----------+-------------------------+-----+
384
+ | trust-ncg | | (n, n) | x | x |
385
+ +--------------+------+----------+-------------------------+-----+
386
+ | trust-krylov | | (n, n) | x | x |
387
+ +--------------+------+----------+-------------------------+-----+
388
+ | trust-exact | | (n, n) | | |
389
+ +--------------+------+----------+-------------------------+-----+
390
+ | trust-constr | x | (n, n) | x | x |
391
+ | | | LO | | |
392
+ | | | sp | | |
393
+ +--------------+------+----------+-------------------------+-----+
394
+
395
+ where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
396
+
397
+ **Custom minimizers**
398
+
399
+ It may be useful to pass a custom minimization method, for example
400
+ when using a frontend to this method such as `scipy.optimize.basinhopping`
401
+ or a different library. You can simply pass a callable as the ``method``
402
+ parameter.
403
+
404
+ The callable is called as ``method(fun, x0, args, **kwargs, **options)``
405
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
406
+ (such as `callback`, `hess`, etc.), except the `options` dict, which has
407
+ its contents also passed as `method` parameters pair by pair. Also, if
408
+ `jac` has been passed as a bool type, `jac` and `fun` are mangled so that
409
+ `fun` returns just the function values and `jac` is converted to a function
410
+ returning the Jacobian. The method shall return an `OptimizeResult`
411
+ object.
412
+
413
+ The provided `method` callable must be able to accept (and possibly ignore)
414
+ arbitrary parameters; the set of parameters accepted by `minimize` may
415
+ expand in future versions and then these parameters will be passed to
416
+ the method. You can find an example in the scipy.optimize tutorial.
417
+
418
+ References
419
+ ----------
420
+ .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
421
+ Minimization. The Computer Journal 7: 308-13.
422
+ .. [2] Wright M H. 1996. Direct search methods: Once scorned, now
423
+ respectable, in Numerical Analysis 1995: Proceedings of the 1995
424
+ Dundee Biennial Conference in Numerical Analysis (Eds. D F
425
+ Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
426
+ 191-208.
427
+ .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
428
+ a function of several variables without calculating derivatives. The
429
+ Computer Journal 7: 155-162.
430
+ .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
431
+ Numerical Recipes (any edition), Cambridge University Press.
432
+ .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
433
+ Springer New York.
434
+ .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
435
+ Algorithm for Bound Constrained Optimization. SIAM Journal on
436
+ Scientific and Statistical Computing 16 (5): 1190-1208.
437
+ .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
438
+ 778: L-BFGS-B, FORTRAN routines for large scale bound constrained
439
+ optimization. ACM Transactions on Mathematical Software 23 (4):
440
+ 550-560.
441
+ .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
442
+ 1984. SIAM Journal of Numerical Analysis 21: 770-778.
443
+ .. [9] Powell, M J D. A direct search optimization method that models
444
+ the objective and constraint functions by linear interpolation.
445
+ 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
446
+ and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
447
+ .. [10] Powell M J D. Direct search algorithms for optimization
448
+ calculations. 1998. Acta Numerica 7: 287-336.
449
+ .. [11] Powell M J D. A view of algorithms for optimization without
450
+ derivatives. 2007.Cambridge University Technical Report DAMTP
451
+ 2007/NA03
452
+ .. [12] Kraft, D. A software package for sequential quadratic
453
+ programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
454
+ Center -- Institute for Flight Mechanics, Koln, Germany.
455
+ .. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
456
+ Trust region methods. 2000. Siam. pp. 169-200.
457
+ .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
458
+ implementation of the GLTR method for iterative solution of
459
+ the trust region problem", :arxiv:`1611.04718`
460
+ .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
461
+ Trust-Region Subproblem using the Lanczos Method",
462
+ SIAM J. Optim., 9(2), 504--525, (1999).
463
+ .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
464
+ An interior point algorithm for large-scale nonlinear programming.
465
+ SIAM Journal on Optimization 9.4: 877-900.
466
+ .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
467
+ implementation of an algorithm for large-scale equality constrained
468
+ optimization. SIAM Journal on Optimization 8.3: 682-706.
469
+
470
+ Examples
471
+ --------
472
+ Let us consider the problem of minimizing the Rosenbrock function. This
473
+ function (and its respective derivatives) is implemented in `rosen`
474
+ (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
475
+
476
+ >>> from scipy.optimize import minimize, rosen, rosen_der
477
+
478
+ A simple application of the *Nelder-Mead* method is:
479
+
480
+ >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
481
+ >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
482
+ >>> res.x
483
+ array([ 1., 1., 1., 1., 1.])
484
+
485
+ Now using the *BFGS* algorithm, using the first derivative and a few
486
+ options:
487
+
488
+ >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
489
+ ... options={'gtol': 1e-6, 'disp': True})
490
+ Optimization terminated successfully.
491
+ Current function value: 0.000000
492
+ Iterations: 26
493
+ Function evaluations: 31
494
+ Gradient evaluations: 31
495
+ >>> res.x
496
+ array([ 1., 1., 1., 1., 1.])
497
+ >>> print(res.message)
498
+ Optimization terminated successfully.
499
+ >>> res.hess_inv
500
+ array([
501
+ [ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
502
+ [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
503
+ [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
504
+ [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
505
+ [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]
506
+ ])
507
+
508
+
509
+ Next, consider a minimization problem with several constraints (namely
510
+ Example 16.4 from [5]_). The objective function is:
511
+
512
+ >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
513
+
514
+ There are three constraints defined as:
515
+
516
+ >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
517
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
518
+ ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
519
+
520
+ And variables must be positive, hence the following bounds:
521
+
522
+ >>> bnds = ((0, None), (0, None))
523
+
524
+ The optimization problem is solved using the SLSQP method as:
525
+
526
+ >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
527
+ ... constraints=cons)
528
+
529
+ It should converge to the theoretical solution (1.4 ,1.7).
530
+
531
+ """
532
+ x0 = np.atleast_1d(np.asarray(x0))
533
+
534
+ if x0.ndim != 1:
535
+ raise ValueError("'x0' must only have one dimension.")
536
+
537
+ if x0.dtype.kind in np.typecodes["AllInteger"]:
538
+ x0 = np.asarray(x0, dtype=float)
539
+
540
+ if not isinstance(args, tuple):
541
+ args = (args,)
542
+
543
+ if method is None:
544
+ # Select automatically
545
+ if constraints:
546
+ method = 'SLSQP'
547
+ elif bounds is not None:
548
+ method = 'L-BFGS-B'
549
+ else:
550
+ method = 'BFGS'
551
+
552
+ if callable(method):
553
+ meth = "_custom"
554
+ else:
555
+ meth = method.lower()
556
+
557
+ if options is None:
558
+ options = {}
559
+ # check if optional parameters are supported by the selected method
560
+ # - jac
561
+ if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
562
+ warn('Method %s does not use gradient information (jac).' % method,
563
+ RuntimeWarning, stacklevel=2)
564
+ # - hess
565
+ if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
566
+ 'trust-krylov', 'trust-exact', '_custom') and hess is not None:
567
+ warn('Method %s does not use Hessian information (hess).' % method,
568
+ RuntimeWarning, stacklevel=2)
569
+ # - hessp
570
+ if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
571
+ 'trust-krylov', '_custom') \
572
+ and hessp is not None:
573
+ warn('Method %s does not use Hessian-vector product '
574
+ 'information (hessp).' % method,
575
+ RuntimeWarning, stacklevel=2)
576
+ # - constraints or bounds
577
+ if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and
578
+ np.any(constraints)):
579
+ warn('Method %s cannot handle constraints.' % method,
580
+ RuntimeWarning, stacklevel=2)
581
+ if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'slsqp',
582
+ 'tnc', 'trust-constr', '_custom') and bounds is not None:
583
+ warn('Method %s cannot handle bounds.' % method,
584
+ RuntimeWarning, stacklevel=2)
585
+ # - return_all
586
+ if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
587
+ options.get('return_all', False)):
588
+ warn('Method %s does not support the return_all option.' % method,
589
+ RuntimeWarning, stacklevel=2)
590
+
591
+ # check gradient vector
592
+ if callable(jac):
593
+ pass
594
+ elif jac is True:
595
+ # fun returns func and grad
596
+ fun = MemoizeJac(fun)
597
+ jac = fun.derivative
598
+ elif (jac in FD_METHODS and
599
+ meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
600
+ # finite differences with relative step
601
+ pass
602
+ elif meth in ['trust-constr']:
603
+ # default jac calculation for this method
604
+ jac = '2-point'
605
+ elif jac is None or bool(jac) is False:
606
+ # this will cause e.g. LBFGS to use forward difference, absolute step
607
+ jac = None
608
+ else:
609
+ # default if jac option is not understood
610
+ jac = None
611
+
612
+ # set default tolerances
613
+ if tol is not None:
614
+ options = dict(options)
615
+ if meth == 'nelder-mead':
616
+ options.setdefault('xatol', tol)
617
+ options.setdefault('fatol', tol)
618
+ if meth in ('newton-cg', 'powell', 'tnc'):
619
+ options.setdefault('xtol', tol)
620
+ if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
621
+ options.setdefault('ftol', tol)
622
+ if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
623
+ 'trust-ncg', 'trust-exact', 'trust-krylov'):
624
+ options.setdefault('gtol', tol)
625
+ if meth in ('cobyla', '_custom'):
626
+ options.setdefault('tol', tol)
627
+ if meth == 'trust-constr':
628
+ options.setdefault('xtol', tol)
629
+ options.setdefault('gtol', tol)
630
+ options.setdefault('barrier_tol', tol)
631
+
632
+ if meth == '_custom':
633
+ # custom method called before bounds and constraints are 'standardised'
634
+ # custom method should be able to accept whatever bounds/constraints
635
+ # are provided to it.
636
+ return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
637
+ bounds=bounds, constraints=constraints,
638
+ callback=callback, **options)
639
+
640
+ constraints = standardize_constraints(constraints, x0, meth)
641
+
642
+ remove_vars = False
643
+ if bounds is not None:
644
+ # convert to new-style bounds so we only have to consider one case
645
+ bounds = standardize_bounds(bounds, x0, 'new')
646
+ bounds = _validate_bounds(bounds, x0, meth)
647
+
648
+ if meth in {"tnc", "slsqp", "l-bfgs-b"}:
649
+ # These methods can't take the finite-difference derivatives they
650
+ # need when a variable is fixed by the bounds. To avoid this issue,
651
+ # remove fixed variables from the problem.
652
+ # NOTE: if this list is expanded, then be sure to update the
653
+ # accompanying tests and test_optimize.eb_data. Consider also if
654
+ # default OptimizeResult will need updating.
655
+
656
+ # determine whether any variables are fixed
657
+ i_fixed = (bounds.lb == bounds.ub)
658
+
659
+ if np.all(i_fixed):
660
+ # all the parameters are fixed, a minimizer is not able to do
661
+ # anything
662
+ return _optimize_result_for_equal_bounds(
663
+ fun, bounds, meth, args=args, constraints=constraints
664
+ )
665
+
666
+ # determine whether finite differences are needed for any grad/jac
667
+ fd_needed = (not callable(jac))
668
+ for con in constraints:
669
+ if not callable(con.get('jac', None)):
670
+ fd_needed = True
671
+
672
+ # If finite differences are ever used, remove all fixed variables
673
+ # Always remove fixed variables for TNC; see gh-14565
674
+ remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
675
+ if remove_vars:
676
+ x_fixed = (bounds.lb)[i_fixed]
677
+ x0 = x0[~i_fixed]
678
+ bounds = _remove_from_bounds(bounds, i_fixed)
679
+ fun = _remove_from_func(fun, i_fixed, x_fixed)
680
+ if callable(callback):
681
+ callback = _remove_from_func(callback, i_fixed, x_fixed)
682
+ if callable(jac):
683
+ jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
684
+
685
+ # make a copy of the constraints so the user's version doesn't
686
+ # get changed. (Shallow copy is ok)
687
+ constraints = [con.copy() for con in constraints]
688
+ for con in constraints: # yes, guaranteed to be a list
689
+ con['fun'] = _remove_from_func(con['fun'], i_fixed,
690
+ x_fixed, min_dim=1,
691
+ remove=0)
692
+ if callable(con.get('jac', None)):
693
+ con['jac'] = _remove_from_func(con['jac'], i_fixed,
694
+ x_fixed, min_dim=2,
695
+ remove=1)
696
+ bounds = standardize_bounds(bounds, x0, meth)
697
+
698
+ callback = _wrap_callback(callback, meth)
699
+
700
+ if meth == 'nelder-mead':
701
+ res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
702
+ **options)
703
+ elif meth == 'powell':
704
+ res = _minimize_powell(fun, x0, args, callback, bounds, **options)
705
+ elif meth == 'cg':
706
+ res = _minimize_cg(fun, x0, args, jac, callback, **options)
707
+ elif meth == 'bfgs':
708
+ res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
709
+ elif meth == 'newton-cg':
710
+ res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
711
+ **options)
712
+ elif meth == 'l-bfgs-b':
713
+ res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
714
+ callback=callback, **options)
715
+ elif meth == 'tnc':
716
+ res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
717
+ **options)
718
+ elif meth == 'cobyla':
719
+ res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
720
+ bounds=bounds, **options)
721
+ elif meth == 'slsqp':
722
+ res = _minimize_slsqp(fun, x0, args, jac, bounds,
723
+ constraints, callback=callback, **options)
724
+ elif meth == 'trust-constr':
725
+ res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
726
+ bounds, constraints,
727
+ callback=callback, **options)
728
+ elif meth == 'dogleg':
729
+ res = _minimize_dogleg(fun, x0, args, jac, hess,
730
+ callback=callback, **options)
731
+ elif meth == 'trust-ncg':
732
+ res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
733
+ callback=callback, **options)
734
+ elif meth == 'trust-krylov':
735
+ res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
736
+ callback=callback, **options)
737
+ elif meth == 'trust-exact':
738
+ res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
739
+ callback=callback, **options)
740
+ else:
741
+ raise ValueError('Unknown solver %s' % method)
742
+
743
+ if remove_vars:
744
+ res.x = _add_to_array(res.x, i_fixed, x_fixed)
745
+ res.jac = _add_to_array(res.jac, i_fixed, np.nan)
746
+ if "hess_inv" in res:
747
+ res.hess_inv = None # unknown
748
+
749
+ if getattr(callback, 'stop_iteration', False):
750
+ res.success = False
751
+ res.status = 99
752
+ res.message = "`callback` raised `StopIteration`."
753
+
754
+ return res
755
+
756
+
757
+ def minimize_scalar(fun, bracket=None, bounds=None, args=(),
758
+ method=None, tol=None, options=None):
759
+ """Local minimization of scalar function of one variable.
760
+
761
+ Parameters
762
+ ----------
763
+ fun : callable
764
+ Objective function.
765
+ Scalar function, must return a scalar.
766
+ bracket : sequence, optional
767
+ For methods 'brent' and 'golden', `bracket` defines the bracketing
768
+ interval and is required.
769
+ Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and
770
+ ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair
771
+ ``(xa, xb)`` to be used as initial points for a downhill bracket search
772
+ (see `scipy.optimize.bracket`).
773
+ The minimizer ``res.x`` will not necessarily satisfy
774
+ ``xa <= res.x <= xb``.
775
+ bounds : sequence, optional
776
+ For method 'bounded', `bounds` is mandatory and must have two finite
777
+ items corresponding to the optimization bounds.
778
+ args : tuple, optional
779
+ Extra arguments passed to the objective function.
780
+ method : str or callable, optional
781
+ Type of solver. Should be one of:
782
+
783
+ - :ref:`Brent <optimize.minimize_scalar-brent>`
784
+ - :ref:`Bounded <optimize.minimize_scalar-bounded>`
785
+ - :ref:`Golden <optimize.minimize_scalar-golden>`
786
+ - custom - a callable object (added in version 0.14.0), see below
787
+
788
+ Default is "Bounded" if bounds are provided and "Brent" otherwise.
789
+ See the 'Notes' section for details of each solver.
790
+
791
+ tol : float, optional
792
+ Tolerance for termination. For detailed control, use solver-specific
793
+ options.
794
+ options : dict, optional
795
+ A dictionary of solver options.
796
+
797
+ maxiter : int
798
+ Maximum number of iterations to perform.
799
+ disp : bool
800
+ Set to True to print convergence messages.
801
+
802
+ See :func:`show_options()` for solver-specific options.
803
+
804
+ Returns
805
+ -------
806
+ res : OptimizeResult
807
+ The optimization result represented as a ``OptimizeResult`` object.
808
+ Important attributes are: ``x`` the solution array, ``success`` a
809
+ Boolean flag indicating if the optimizer exited successfully and
810
+ ``message`` which describes the cause of the termination. See
811
+ `OptimizeResult` for a description of other attributes.
812
+
813
+ See also
814
+ --------
815
+ minimize : Interface to minimization algorithms for scalar multivariate
816
+ functions
817
+ show_options : Additional options accepted by the solvers
818
+
819
+ Notes
820
+ -----
821
+ This section describes the available solvers that can be selected by the
822
+ 'method' parameter. The default method is the ``"Bounded"`` Brent method if
823
+ `bounds` are passed and unbounded ``"Brent"`` otherwise.
824
+
825
+ Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
826
+ algorithm [1]_ to find a local minimum. The algorithm uses inverse
827
+ parabolic interpolation when possible to speed up convergence of
828
+ the golden section method.
829
+
830
+ Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
831
+ golden section search technique [1]_. It uses analog of the bisection
832
+ method to decrease the bracketed interval. It is usually
833
+ preferable to use the *Brent* method.
834
+
835
+ Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
836
+ perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
837
+ local minimum in the interval x1 < xopt < x2.
838
+
839
+ Note that the Brent and Golden methods do not guarantee success unless a
840
+ valid ``bracket`` triple is provided. If a three-point bracket cannot be
841
+ found, consider `scipy.optimize.minimize`. Also, all methods are intended
842
+ only for local minimization. When the function of interest has more than
843
+ one local minimum, consider :ref:`global_optimization`.
844
+
845
+ **Custom minimizers**
846
+
847
+ It may be useful to pass a custom minimization method, for example
848
+ when using some library frontend to minimize_scalar. You can simply
849
+ pass a callable as the ``method`` parameter.
850
+
851
+ The callable is called as ``method(fun, args, **kwargs, **options)``
852
+ where ``kwargs`` corresponds to any other parameters passed to `minimize`
853
+ (such as `bracket`, `tol`, etc.), except the `options` dict, which has
854
+ its contents also passed as `method` parameters pair by pair. The method
855
+ shall return an `OptimizeResult` object.
856
+
857
+ The provided `method` callable must be able to accept (and possibly ignore)
858
+ arbitrary parameters; the set of parameters accepted by `minimize` may
859
+ expand in future versions and then these parameters will be passed to
860
+ the method. You can find an example in the scipy.optimize tutorial.
861
+
862
+ .. versionadded:: 0.11.0
863
+
864
+ References
865
+ ----------
866
+ .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
867
+ Numerical Recipes in C. Cambridge University Press.
868
+ .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
869
+ for Mathematical Computations." Prentice-Hall Series in Automatic
870
+ Computation 259 (1977).
871
+ .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
872
+ Courier Corporation, 2013.
873
+
874
+ Examples
875
+ --------
876
+ Consider the problem of minimizing the following function.
877
+
878
+ >>> def f(x):
879
+ ... return (x - 2) * x * (x + 2)**2
880
+
881
+ Using the *Brent* method, we find the local minimum as:
882
+
883
+ >>> from scipy.optimize import minimize_scalar
884
+ >>> res = minimize_scalar(f)
885
+ >>> res.fun
886
+ -9.9149495908
887
+
888
+ The minimizer is:
889
+
890
+ >>> res.x
891
+ 1.28077640403
892
+
893
+ Using the *Bounded* method, we find a local minimum with specified
894
+ bounds as:
895
+
896
+ >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
897
+ >>> res.fun # minimum
898
+ 3.28365179850e-13
899
+ >>> res.x # minimizer
900
+ -2.0000002026
901
+
902
+ """
903
+ if not isinstance(args, tuple):
904
+ args = (args,)
905
+
906
+ if callable(method):
907
+ meth = "_custom"
908
+ elif method is None:
909
+ meth = 'brent' if bounds is None else 'bounded'
910
+ else:
911
+ meth = method.lower()
912
+ if options is None:
913
+ options = {}
914
+
915
+ if bounds is not None and meth in {'brent', 'golden'}:
916
+ message = f"Use of `bounds` is incompatible with 'method={method}'."
917
+ raise ValueError(message)
918
+
919
+ if tol is not None:
920
+ options = dict(options)
921
+ if meth == 'bounded' and 'xatol' not in options:
922
+ warn("Method 'bounded' does not support relative tolerance in x; "
923
+ "defaulting to absolute tolerance.",
924
+ RuntimeWarning, stacklevel=2)
925
+ options['xatol'] = tol
926
+ elif meth == '_custom':
927
+ options.setdefault('tol', tol)
928
+ else:
929
+ options.setdefault('xtol', tol)
930
+
931
+ # replace boolean "disp" option, if specified, by an integer value.
932
+ disp = options.get('disp')
933
+ if isinstance(disp, bool):
934
+ options['disp'] = 2 * int(disp)
935
+
936
+ if meth == '_custom':
937
+ res = method(fun, args=args, bracket=bracket, bounds=bounds, **options)
938
+ elif meth == 'brent':
939
+ res = _recover_from_bracket_error(_minimize_scalar_brent,
940
+ fun, bracket, args, **options)
941
+ elif meth == 'bounded':
942
+ if bounds is None:
943
+ raise ValueError('The `bounds` parameter is mandatory for '
944
+ 'method `bounded`.')
945
+ res = _minimize_scalar_bounded(fun, bounds, args, **options)
946
+ elif meth == 'golden':
947
+ res = _recover_from_bracket_error(_minimize_scalar_golden,
948
+ fun, bracket, args, **options)
949
+ else:
950
+ raise ValueError('Unknown solver %s' % method)
951
+
952
+ # gh-16196 reported inconsistencies in the output shape of `res.x`. While
953
+ # fixing this, future-proof it for when the function is vectorized:
954
+ # the shape of `res.x` should match that of `res.fun`.
955
+ res.fun = np.asarray(res.fun)[()]
956
+ res.x = np.reshape(res.x, res.fun.shape)[()]
957
+ return res
958
+
959
+
960
+ def _remove_from_bounds(bounds, i_fixed):
961
+ """Removes fixed variables from a `Bounds` instance"""
962
+ lb = bounds.lb[~i_fixed]
963
+ ub = bounds.ub[~i_fixed]
964
+ return Bounds(lb, ub) # don't mutate original Bounds object
965
+
966
+
967
+ def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
968
+ """Wraps a function such that fixed variables need not be passed in"""
969
+ def fun_out(x_in, *args, **kwargs):
970
+ x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
971
+ x_out[i_fixed] = x_fixed
972
+ x_out[~i_fixed] = x_in
973
+ y_out = fun_in(x_out, *args, **kwargs)
974
+ y_out = np.array(y_out)
975
+
976
+ if min_dim == 1:
977
+ y_out = np.atleast_1d(y_out)
978
+ elif min_dim == 2:
979
+ y_out = np.atleast_2d(y_out)
980
+
981
+ if remove == 1:
982
+ y_out = y_out[..., ~i_fixed]
983
+ elif remove == 2:
984
+ y_out = y_out[~i_fixed, ~i_fixed]
985
+
986
+ return y_out
987
+ return fun_out
988
+
989
+
990
+ def _add_to_array(x_in, i_fixed, x_fixed):
991
+ """Adds fixed variables back to an array"""
992
+ i_free = ~i_fixed
993
+ if x_in.ndim == 2:
994
+ i_free = i_free[:, None] @ i_free[None, :]
995
+ x_out = np.zeros_like(i_free, dtype=x_in.dtype)
996
+ x_out[~i_free] = x_fixed
997
+ x_out[i_free] = x_in.ravel()
998
+ return x_out
999
+
1000
+
1001
+ def _validate_bounds(bounds, x0, meth):
1002
+ """Check that bounds are valid."""
1003
+
1004
+ msg = "An upper bound is less than the corresponding lower bound."
1005
+ if np.any(bounds.ub < bounds.lb):
1006
+ raise ValueError(msg)
1007
+
1008
+ msg = "The number of bounds is not compatible with the length of `x0`."
1009
+ try:
1010
+ bounds.lb = np.broadcast_to(bounds.lb, x0.shape)
1011
+ bounds.ub = np.broadcast_to(bounds.ub, x0.shape)
1012
+ except Exception as e:
1013
+ raise ValueError(msg) from e
1014
+
1015
+ return bounds
1016
+
1017
+ def standardize_bounds(bounds, x0, meth):
1018
+ """Converts bounds to the form required by the solver."""
1019
+ if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'new'}:
1020
+ if not isinstance(bounds, Bounds):
1021
+ lb, ub = old_bound_to_new(bounds)
1022
+ bounds = Bounds(lb, ub)
1023
+ elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
1024
+ if isinstance(bounds, Bounds):
1025
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
1026
+ return bounds
1027
+
1028
+
1029
+ def standardize_constraints(constraints, x0, meth):
1030
+ """Converts constraints to the form required by the solver."""
1031
+ all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
1032
+ new_constraint_types = all_constraint_types[:-1]
1033
+ if constraints is None:
1034
+ constraints = []
1035
+ elif isinstance(constraints, all_constraint_types):
1036
+ constraints = [constraints]
1037
+ else:
1038
+ constraints = list(constraints) # ensure it's a mutable sequence
1039
+
1040
+ if meth in ['trust-constr', 'new']:
1041
+ for i, con in enumerate(constraints):
1042
+ if not isinstance(con, new_constraint_types):
1043
+ constraints[i] = old_constraint_to_new(i, con)
1044
+ else:
1045
+ # iterate over copy, changing original
1046
+ for i, con in enumerate(list(constraints)):
1047
+ if isinstance(con, new_constraint_types):
1048
+ old_constraints = new_constraint_to_old(con, x0)
1049
+ constraints[i] = old_constraints[0]
1050
+ constraints.extend(old_constraints[1:]) # appends 1 if present
1051
+
1052
+ return constraints
1053
+
1054
+
1055
+ def _optimize_result_for_equal_bounds(
1056
+ fun, bounds, method, args=(), constraints=()
1057
+ ):
1058
+ """
1059
+ Provides a default OptimizeResult for when a bounded minimization method
1060
+ has (lb == ub).all().
1061
+
1062
+ Parameters
1063
+ ----------
1064
+ fun: callable
1065
+ bounds: Bounds
1066
+ method: str
1067
+ constraints: Constraint
1068
+ """
1069
+ success = True
1070
+ message = 'All independent variables were fixed by bounds.'
1071
+
1072
+ # bounds is new-style
1073
+ x0 = bounds.lb
1074
+
1075
+ if constraints:
1076
+ message = ("All independent variables were fixed by bounds at values"
1077
+ " that satisfy the constraints.")
1078
+ constraints = standardize_constraints(constraints, x0, 'new')
1079
+
1080
+ maxcv = 0
1081
+ for c in constraints:
1082
+ pc = PreparedConstraint(c, x0)
1083
+ violation = pc.violation(x0)
1084
+ if np.sum(violation):
1085
+ maxcv = max(maxcv, np.max(violation))
1086
+ success = False
1087
+ message = (f"All independent variables were fixed by bounds, but "
1088
+ f"the independent variables do not satisfy the "
1089
+ f"constraints exactly. (Maximum violation: {maxcv}).")
1090
+
1091
+ return OptimizeResult(
1092
+ x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
1093
+ njev=0, nhev=0,
1094
+ )
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from . import _minpack
3
+
4
+ import numpy as np
5
+ from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater,
6
+ asarray, inf,
7
+ finfo, inexact, issubdtype, dtype)
8
+ from scipy import linalg
9
+ from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
10
+ from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan
11
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
12
+ from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
13
+ from ._lsq import least_squares
14
+ # from ._lsq.common import make_strictly_feasible
15
+ from ._lsq.least_squares import prepare_bounds
16
+ from scipy.optimize._minimize import Bounds
17
+
18
+ # deprecated imports to be removed in SciPy 1.13.0
19
+ from numpy import dot, eye, take # noqa: F401
20
+ from numpy.linalg import inv # noqa: F401
21
+
22
+ error = _minpack.error
23
+
24
+ __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
25
+
26
+
27
+ def _check_func(checker, argname, thefunc, x0, args, numinputs,
28
+ output_shape=None):
29
+ res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
30
+ if (output_shape is not None) and (shape(res) != output_shape):
31
+ if (output_shape[0] != 1):
32
+ if len(output_shape) > 1:
33
+ if output_shape[1] == 1:
34
+ return shape(res)
35
+ msg = f"{checker}: there is a mismatch between the input and output " \
36
+ f"shape of the '{argname}' argument"
37
+ func_name = getattr(thefunc, '__name__', None)
38
+ if func_name:
39
+ msg += " '%s'." % func_name
40
+ else:
41
+ msg += "."
42
+ msg += f'Shape should be {output_shape} but it is {shape(res)}.'
43
+ raise TypeError(msg)
44
+ if issubdtype(res.dtype, inexact):
45
+ dt = res.dtype
46
+ else:
47
+ dt = dtype(float)
48
+ return shape(res), dt
49
+
50
+
51
+ def fsolve(func, x0, args=(), fprime=None, full_output=0,
52
+ col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
53
+ epsfcn=None, factor=100, diag=None):
54
+ """
55
+ Find the roots of a function.
56
+
57
+ Return the roots of the (non-linear) equations defined by
58
+ ``func(x) = 0`` given a starting estimate.
59
+
60
+ Parameters
61
+ ----------
62
+ func : callable ``f(x, *args)``
63
+ A function that takes at least one (possibly vector) argument,
64
+ and returns a value of the same length.
65
+ x0 : ndarray
66
+ The starting estimate for the roots of ``func(x) = 0``.
67
+ args : tuple, optional
68
+ Any extra arguments to `func`.
69
+ fprime : callable ``f(x, *args)``, optional
70
+ A function to compute the Jacobian of `func` with derivatives
71
+ across the rows. By default, the Jacobian will be estimated.
72
+ full_output : bool, optional
73
+ If True, return optional outputs.
74
+ col_deriv : bool, optional
75
+ Specify whether the Jacobian function computes derivatives down
76
+ the columns (faster, because there is no transpose operation).
77
+ xtol : float, optional
78
+ The calculation will terminate if the relative error between two
79
+ consecutive iterates is at most `xtol`.
80
+ maxfev : int, optional
81
+ The maximum number of calls to the function. If zero, then
82
+ ``100*(N+1)`` is the maximum where N is the number of elements
83
+ in `x0`.
84
+ band : tuple, optional
85
+ If set to a two-sequence containing the number of sub- and
86
+ super-diagonals within the band of the Jacobi matrix, the
87
+ Jacobi matrix is considered banded (only for ``fprime=None``).
88
+ epsfcn : float, optional
89
+ A suitable step length for the forward-difference
90
+ approximation of the Jacobian (for ``fprime=None``). If
91
+ `epsfcn` is less than the machine precision, it is assumed
92
+ that the relative errors in the functions are of the order of
93
+ the machine precision.
94
+ factor : float, optional
95
+ A parameter determining the initial step bound
96
+ (``factor * || diag * x||``). Should be in the interval
97
+ ``(0.1, 100)``.
98
+ diag : sequence, optional
99
+ N positive entries that serve as a scale factors for the
100
+ variables.
101
+
102
+ Returns
103
+ -------
104
+ x : ndarray
105
+ The solution (or the result of the last iteration for
106
+ an unsuccessful call).
107
+ infodict : dict
108
+ A dictionary of optional outputs with the keys:
109
+
110
+ ``nfev``
111
+ number of function calls
112
+ ``njev``
113
+ number of Jacobian calls
114
+ ``fvec``
115
+ function evaluated at the output
116
+ ``fjac``
117
+ the orthogonal matrix, q, produced by the QR
118
+ factorization of the final approximate Jacobian
119
+ matrix, stored column wise
120
+ ``r``
121
+ upper triangular matrix produced by QR factorization
122
+ of the same matrix
123
+ ``qtf``
124
+ the vector ``(transpose(q) * fvec)``
125
+
126
+ ier : int
127
+ An integer flag. Set to 1 if a solution was found, otherwise refer
128
+ to `mesg` for more information.
129
+ mesg : str
130
+ If no solution is found, `mesg` details the cause of failure.
131
+
132
+ See Also
133
+ --------
134
+ root : Interface to root finding algorithms for multivariate
135
+ functions. See the ``method='hybr'`` in particular.
136
+
137
+ Notes
138
+ -----
139
+ ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
140
+
141
+ Examples
142
+ --------
143
+ Find a solution to the system of equations:
144
+ ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
145
+
146
+ >>> import numpy as np
147
+ >>> from scipy.optimize import fsolve
148
+ >>> def func(x):
149
+ ... return [x[0] * np.cos(x[1]) - 4,
150
+ ... x[1] * x[0] - x[1] - 5]
151
+ >>> root = fsolve(func, [1, 1])
152
+ >>> root
153
+ array([6.50409711, 0.90841421])
154
+ >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
155
+ array([ True, True])
156
+
157
+ """
158
+ options = {'col_deriv': col_deriv,
159
+ 'xtol': xtol,
160
+ 'maxfev': maxfev,
161
+ 'band': band,
162
+ 'eps': epsfcn,
163
+ 'factor': factor,
164
+ 'diag': diag}
165
+
166
+ res = _root_hybr(func, x0, args, jac=fprime, **options)
167
+ if full_output:
168
+ x = res['x']
169
+ info = {k: res.get(k)
170
+ for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res}
171
+ info['fvec'] = res['fun']
172
+ return x, info, res['status'], res['message']
173
+ else:
174
+ status = res['status']
175
+ msg = res['message']
176
+ if status == 0:
177
+ raise TypeError(msg)
178
+ elif status == 1:
179
+ pass
180
+ elif status in [2, 3, 4, 5]:
181
+ warnings.warn(msg, RuntimeWarning, stacklevel=2)
182
+ else:
183
+ raise TypeError(msg)
184
+ return res['x']
185
+
186
+
187
+ def _root_hybr(func, x0, args=(), jac=None,
188
+ col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
189
+ factor=100, diag=None, **unknown_options):
190
+ """
191
+ Find the roots of a multivariate function using MINPACK's hybrd and
192
+ hybrj routines (modified Powell method).
193
+
194
+ Options
195
+ -------
196
+ col_deriv : bool
197
+ Specify whether the Jacobian function computes derivatives down
198
+ the columns (faster, because there is no transpose operation).
199
+ xtol : float
200
+ The calculation will terminate if the relative error between two
201
+ consecutive iterates is at most `xtol`.
202
+ maxfev : int
203
+ The maximum number of calls to the function. If zero, then
204
+ ``100*(N+1)`` is the maximum where N is the number of elements
205
+ in `x0`.
206
+ band : tuple
207
+ If set to a two-sequence containing the number of sub- and
208
+ super-diagonals within the band of the Jacobi matrix, the
209
+ Jacobi matrix is considered banded (only for ``fprime=None``).
210
+ eps : float
211
+ A suitable step length for the forward-difference
212
+ approximation of the Jacobian (for ``fprime=None``). If
213
+ `eps` is less than the machine precision, it is assumed
214
+ that the relative errors in the functions are of the order of
215
+ the machine precision.
216
+ factor : float
217
+ A parameter determining the initial step bound
218
+ (``factor * || diag * x||``). Should be in the interval
219
+ ``(0.1, 100)``.
220
+ diag : sequence
221
+ N positive entries that serve as a scale factors for the
222
+ variables.
223
+
224
+ """
225
+ _check_unknown_options(unknown_options)
226
+ epsfcn = eps
227
+
228
+ x0 = asarray(x0).flatten()
229
+ n = len(x0)
230
+ if not isinstance(args, tuple):
231
+ args = (args,)
232
+ shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
233
+ if epsfcn is None:
234
+ epsfcn = finfo(dtype).eps
235
+ Dfun = jac
236
+ if Dfun is None:
237
+ if band is None:
238
+ ml, mu = -10, -10
239
+ else:
240
+ ml, mu = band[:2]
241
+ if maxfev == 0:
242
+ maxfev = 200 * (n + 1)
243
+ retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
244
+ ml, mu, epsfcn, factor, diag)
245
+ else:
246
+ _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
247
+ if (maxfev == 0):
248
+ maxfev = 100 * (n + 1)
249
+ retval = _minpack._hybrj(func, Dfun, x0, args, 1,
250
+ col_deriv, xtol, maxfev, factor, diag)
251
+
252
+ x, status = retval[0], retval[-1]
253
+
254
+ errors = {0: "Improper input parameters were entered.",
255
+ 1: "The solution converged.",
256
+ 2: "The number of calls to function has "
257
+ "reached maxfev = %d." % maxfev,
258
+ 3: "xtol=%f is too small, no further improvement "
259
+ "in the approximate\n solution "
260
+ "is possible." % xtol,
261
+ 4: "The iteration is not making good progress, as measured "
262
+ "by the \n improvement from the last five "
263
+ "Jacobian evaluations.",
264
+ 5: "The iteration is not making good progress, "
265
+ "as measured by the \n improvement from the last "
266
+ "ten iterations.",
267
+ 'unknown': "An error occurred."}
268
+
269
+ info = retval[1]
270
+ info['fun'] = info.pop('fvec')
271
+ sol = OptimizeResult(x=x, success=(status == 1), status=status,
272
+ method="hybr")
273
+ sol.update(info)
274
+ try:
275
+ sol['message'] = errors[status]
276
+ except KeyError:
277
+ sol['message'] = errors['unknown']
278
+
279
+ return sol
280
+
281
+
282
+ LEASTSQ_SUCCESS = [1, 2, 3, 4]
283
+ LEASTSQ_FAILURE = [5, 6, 7, 8]
284
+
285
+
286
+ def leastsq(func, x0, args=(), Dfun=None, full_output=False,
287
+ col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8,
288
+ gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
289
+ """
290
+ Minimize the sum of squares of a set of equations.
291
+
292
+ ::
293
+
294
+ x = arg min(sum(func(y)**2,axis=0))
295
+ y
296
+
297
+ Parameters
298
+ ----------
299
+ func : callable
300
+ Should take at least one (possibly length ``N`` vector) argument and
301
+ returns ``M`` floating point numbers. It must not return NaNs or
302
+ fitting might fail. ``M`` must be greater than or equal to ``N``.
303
+ x0 : ndarray
304
+ The starting estimate for the minimization.
305
+ args : tuple, optional
306
+ Any extra arguments to func are placed in this tuple.
307
+ Dfun : callable, optional
308
+ A function or method to compute the Jacobian of func with derivatives
309
+ across the rows. If this is None, the Jacobian will be estimated.
310
+ full_output : bool, optional
311
+ If ``True``, return all optional outputs (not just `x` and `ier`).
312
+ col_deriv : bool, optional
313
+ If ``True``, specify that the Jacobian function computes derivatives
314
+ down the columns (faster, because there is no transpose operation).
315
+ ftol : float, optional
316
+ Relative error desired in the sum of squares.
317
+ xtol : float, optional
318
+ Relative error desired in the approximate solution.
319
+ gtol : float, optional
320
+ Orthogonality desired between the function vector and the columns of
321
+ the Jacobian.
322
+ maxfev : int, optional
323
+ The maximum number of calls to the function. If `Dfun` is provided,
324
+ then the default `maxfev` is 100*(N+1) where N is the number of elements
325
+ in x0, otherwise the default `maxfev` is 200*(N+1).
326
+ epsfcn : float, optional
327
+ A variable used in determining a suitable step length for the forward-
328
+ difference approximation of the Jacobian (for Dfun=None).
329
+ Normally the actual step length will be sqrt(epsfcn)*x
330
+ If epsfcn is less than the machine precision, it is assumed that the
331
+ relative errors are of the order of the machine precision.
332
+ factor : float, optional
333
+ A parameter determining the initial step bound
334
+ (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
335
+ diag : sequence, optional
336
+ N positive entries that serve as a scale factors for the variables.
337
+
338
+ Returns
339
+ -------
340
+ x : ndarray
341
+ The solution (or the result of the last iteration for an unsuccessful
342
+ call).
343
+ cov_x : ndarray
344
+ The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
345
+ estimate of the Hessian. A value of None indicates a singular matrix,
346
+ which means the curvature in parameters `x` is numerically flat. To
347
+ obtain the covariance matrix of the parameters `x`, `cov_x` must be
348
+ multiplied by the variance of the residuals -- see curve_fit. Only
349
+ returned if `full_output` is ``True``.
350
+ infodict : dict
351
+ a dictionary of optional outputs with the keys:
352
+
353
+ ``nfev``
354
+ The number of function calls
355
+ ``fvec``
356
+ The function evaluated at the output
357
+ ``fjac``
358
+ A permutation of the R matrix of a QR
359
+ factorization of the final approximate
360
+ Jacobian matrix, stored column wise.
361
+ Together with ipvt, the covariance of the
362
+ estimate can be approximated.
363
+ ``ipvt``
364
+ An integer array of length N which defines
365
+ a permutation matrix, p, such that
366
+ fjac*p = q*r, where r is upper triangular
367
+ with diagonal elements of nonincreasing
368
+ magnitude. Column j of p is column ipvt(j)
369
+ of the identity matrix.
370
+ ``qtf``
371
+ The vector (transpose(q) * fvec).
372
+
373
+ Only returned if `full_output` is ``True``.
374
+ mesg : str
375
+ A string message giving information about the cause of failure.
376
+ Only returned if `full_output` is ``True``.
377
+ ier : int
378
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
379
+ found. Otherwise, the solution was not found. In either case, the
380
+ optional output variable 'mesg' gives more information.
381
+
382
+ See Also
383
+ --------
384
+ least_squares : Newer interface to solve nonlinear least-squares problems
385
+ with bounds on the variables. See ``method='lm'`` in particular.
386
+
387
+ Notes
388
+ -----
389
+ "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
390
+
391
+ cov_x is a Jacobian approximation to the Hessian of the least squares
392
+ objective function.
393
+ This approximation assumes that the objective function is based on the
394
+ difference between some observed target data (ydata) and a (non-linear)
395
+ function of the parameters `f(xdata, params)` ::
396
+
397
+ func(params) = ydata - f(xdata, params)
398
+
399
+ so that the objective function is ::
400
+
401
+ min sum((ydata - f(xdata, params))**2, axis=0)
402
+ params
403
+
404
+ The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
405
+ or whether `x0` is a scalar.
406
+
407
+ Examples
408
+ --------
409
+ >>> from scipy.optimize import leastsq
410
+ >>> def func(x):
411
+ ... return 2*(x-3)**2+1
412
+ >>> leastsq(func, 0)
413
+ (array([2.99999999]), 1)
414
+
415
+ """
416
+ x0 = asarray(x0).flatten()
417
+ n = len(x0)
418
+ if not isinstance(args, tuple):
419
+ args = (args,)
420
+ shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
421
+ m = shape[0]
422
+
423
+ if n > m:
424
+ raise TypeError(f"Improper input: func input vector length N={n} must"
425
+ f" not exceed func output vector length M={m}")
426
+
427
+ if epsfcn is None:
428
+ epsfcn = finfo(dtype).eps
429
+
430
+ if Dfun is None:
431
+ if maxfev == 0:
432
+ maxfev = 200*(n + 1)
433
+ retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
434
+ gtol, maxfev, epsfcn, factor, diag)
435
+ else:
436
+ if col_deriv:
437
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
438
+ else:
439
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
440
+ if maxfev == 0:
441
+ maxfev = 100 * (n + 1)
442
+ retval = _minpack._lmder(func, Dfun, x0, args, full_output,
443
+ col_deriv, ftol, xtol, gtol, maxfev,
444
+ factor, diag)
445
+
446
+ errors = {0: ["Improper input parameters.", TypeError],
447
+ 1: ["Both actual and predicted relative reductions "
448
+ "in the sum of squares\n are at most %f" % ftol, None],
449
+ 2: ["The relative error between two consecutive "
450
+ "iterates is at most %f" % xtol, None],
451
+ 3: ["Both actual and predicted relative reductions in "
452
+ f"the sum of squares\n are at most {ftol:f} and the "
453
+ "relative error between two consecutive "
454
+ f"iterates is at \n most {xtol:f}", None],
455
+ 4: ["The cosine of the angle between func(x) and any "
456
+ "column of the\n Jacobian is at most %f in "
457
+ "absolute value" % gtol, None],
458
+ 5: ["Number of calls to function has reached "
459
+ "maxfev = %d." % maxfev, ValueError],
460
+ 6: ["ftol=%f is too small, no further reduction "
461
+ "in the sum of squares\n is possible." % ftol,
462
+ ValueError],
463
+ 7: ["xtol=%f is too small, no further improvement in "
464
+ "the approximate\n solution is possible." % xtol,
465
+ ValueError],
466
+ 8: ["gtol=%f is too small, func(x) is orthogonal to the "
467
+ "columns of\n the Jacobian to machine "
468
+ "precision." % gtol, ValueError]}
469
+
470
+ # The FORTRAN return value (possible return values are >= 0 and <= 8)
471
+ info = retval[-1]
472
+
473
+ if full_output:
474
+ cov_x = None
475
+ if info in LEASTSQ_SUCCESS:
476
+ # This was
477
+ # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
478
+ # r = triu(transpose(retval[1]['fjac'])[:n, :])
479
+ # R = dot(r, perm)
480
+ # cov_x = inv(dot(transpose(R), R))
481
+ # but the explicit dot product was not necessary and sometimes
482
+ # the result was not symmetric positive definite. See gh-4555.
483
+ perm = retval[1]['ipvt'] - 1
484
+ n = len(perm)
485
+ r = triu(transpose(retval[1]['fjac'])[:n, :])
486
+ inv_triu = linalg.get_lapack_funcs('trtri', (r,))
487
+ try:
488
+ # inverse of permuted matrix is a permutation of matrix inverse
489
+ invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
490
+ if trtri_info != 0: # explicit comparison for readability
491
+ raise LinAlgError(f'trtri returned info {trtri_info}')
492
+ invR[perm] = invR.copy()
493
+ cov_x = invR @ invR.T
494
+ except (LinAlgError, ValueError):
495
+ pass
496
+ return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
497
+ else:
498
+ if info in LEASTSQ_FAILURE:
499
+ warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2)
500
+ elif info == 0:
501
+ raise errors[info][1](errors[info][0])
502
+ return retval[0], info
503
+
504
+
505
+ def _lightweight_memoizer(f):
506
+ # very shallow memoization to address gh-13670: only remember the first set
507
+ # of parameters and corresponding function value, and only attempt to use
508
+ # them twice (the number of times the function is evaluated at x0).
509
+ def _memoized_func(params):
510
+ if _memoized_func.skip_lookup:
511
+ return f(params)
512
+
513
+ if np.all(_memoized_func.last_params == params):
514
+ return _memoized_func.last_val
515
+ elif _memoized_func.last_params is not None:
516
+ _memoized_func.skip_lookup = True
517
+
518
+ val = f(params)
519
+
520
+ if _memoized_func.last_params is None:
521
+ _memoized_func.last_params = np.copy(params)
522
+ _memoized_func.last_val = val
523
+
524
+ return val
525
+
526
+ _memoized_func.last_params = None
527
+ _memoized_func.last_val = None
528
+ _memoized_func.skip_lookup = False
529
+ return _memoized_func
530
+
531
+
532
+ def _wrap_func(func, xdata, ydata, transform):
533
+ if transform is None:
534
+ def func_wrapped(params):
535
+ return func(xdata, *params) - ydata
536
+ elif transform.size == 1 or transform.ndim == 1:
537
+ def func_wrapped(params):
538
+ return transform * (func(xdata, *params) - ydata)
539
+ else:
540
+ # Chisq = (y - yd)^T C^{-1} (y-yd)
541
+ # transform = L such that C = L L^T
542
+ # C^{-1} = L^{-T} L^{-1}
543
+ # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
544
+ # Define (y-yd)' = L^{-1} (y-yd)
545
+ # by solving
546
+ # L (y-yd)' = (y-yd)
547
+ # and minimize (y-yd)'^T (y-yd)'
548
+ def func_wrapped(params):
549
+ return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
550
+ return func_wrapped
551
+
552
+
553
+ def _wrap_jac(jac, xdata, transform):
554
+ if transform is None:
555
+ def jac_wrapped(params):
556
+ return jac(xdata, *params)
557
+ elif transform.ndim == 1:
558
+ def jac_wrapped(params):
559
+ return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
560
+ else:
561
+ def jac_wrapped(params):
562
+ return solve_triangular(transform,
563
+ np.asarray(jac(xdata, *params)),
564
+ lower=True)
565
+ return jac_wrapped
566
+
567
+
568
+ def _initialize_feasible(lb, ub):
569
+ p0 = np.ones_like(lb)
570
+ lb_finite = np.isfinite(lb)
571
+ ub_finite = np.isfinite(ub)
572
+
573
+ mask = lb_finite & ub_finite
574
+ p0[mask] = 0.5 * (lb[mask] + ub[mask])
575
+
576
+ mask = lb_finite & ~ub_finite
577
+ p0[mask] = lb[mask] + 1
578
+
579
+ mask = ~lb_finite & ub_finite
580
+ p0[mask] = ub[mask] - 1
581
+
582
+ return p0
583
+
584
+
585
+ def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
586
+ check_finite=None, bounds=(-np.inf, np.inf), method=None,
587
+ jac=None, *, full_output=False, nan_policy=None,
588
+ **kwargs):
589
+ """
590
+ Use non-linear least squares to fit a function, f, to data.
591
+
592
+ Assumes ``ydata = f(xdata, *params) + eps``.
593
+
594
+ Parameters
595
+ ----------
596
+ f : callable
597
+ The model function, f(x, ...). It must take the independent
598
+ variable as the first argument and the parameters to fit as
599
+ separate remaining arguments.
600
+ xdata : array_like
601
+ The independent variable where the data is measured.
602
+ Should usually be an M-length sequence or an (k,M)-shaped array for
603
+ functions with k predictors, and each element should be float
604
+ convertible if it is an array like object.
605
+ ydata : array_like
606
+ The dependent data, a length M array - nominally ``f(xdata, ...)``.
607
+ p0 : array_like, optional
608
+ Initial guess for the parameters (length N). If None, then the
609
+ initial values will all be 1 (if the number of parameters for the
610
+ function can be determined using introspection, otherwise a
611
+ ValueError is raised).
612
+ sigma : None or scalar or M-length sequence or MxM array, optional
613
+ Determines the uncertainty in `ydata`. If we define residuals as
614
+ ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
615
+ depends on its number of dimensions:
616
+
617
+ - A scalar or 1-D `sigma` should contain values of standard deviations of
618
+ errors in `ydata`. In this case, the optimized function is
619
+ ``chisq = sum((r / sigma) ** 2)``.
620
+
621
+ - A 2-D `sigma` should contain the covariance matrix of
622
+ errors in `ydata`. In this case, the optimized function is
623
+ ``chisq = r.T @ inv(sigma) @ r``.
624
+
625
+ .. versionadded:: 0.19
626
+
627
+ None (default) is equivalent of 1-D `sigma` filled with ones.
628
+ absolute_sigma : bool, optional
629
+ If True, `sigma` is used in an absolute sense and the estimated parameter
630
+ covariance `pcov` reflects these absolute values.
631
+
632
+ If False (default), only the relative magnitudes of the `sigma` values matter.
633
+ The returned parameter covariance matrix `pcov` is based on scaling
634
+ `sigma` by a constant factor. This constant is set by demanding that the
635
+ reduced `chisq` for the optimal parameters `popt` when using the
636
+ *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
637
+ match the sample variance of the residuals after the fit. Default is False.
638
+ Mathematically,
639
+ ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
640
+ check_finite : bool, optional
641
+ If True, check that the input arrays do not contain nans of infs,
642
+ and raise a ValueError if they do. Setting this parameter to
643
+ False may silently produce nonsensical results if the input arrays
644
+ do contain nans. Default is True if `nan_policy` is not specified
645
+ explicitly and False otherwise.
646
+ bounds : 2-tuple of array_like or `Bounds`, optional
647
+ Lower and upper bounds on parameters. Defaults to no bounds.
648
+ There are two ways to specify the bounds:
649
+
650
+ - Instance of `Bounds` class.
651
+
652
+ - 2-tuple of array_like: Each element of the tuple must be either
653
+ an array with the length equal to the number of parameters, or a
654
+ scalar (in which case the bound is taken to be the same for all
655
+ parameters). Use ``np.inf`` with an appropriate sign to disable
656
+ bounds on all or some parameters.
657
+
658
+ method : {'lm', 'trf', 'dogbox'}, optional
659
+ Method to use for optimization. See `least_squares` for more details.
660
+ Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
661
+ provided. The method 'lm' won't work when the number of observations
662
+ is less than the number of variables, use 'trf' or 'dogbox' in this
663
+ case.
664
+
665
+ .. versionadded:: 0.17
666
+ jac : callable, string or None, optional
667
+ Function with signature ``jac(x, ...)`` which computes the Jacobian
668
+ matrix of the model function with respect to parameters as a dense
669
+ array_like structure. It will be scaled according to provided `sigma`.
670
+ If None (default), the Jacobian will be estimated numerically.
671
+ String keywords for 'trf' and 'dogbox' methods can be used to select
672
+ a finite difference scheme, see `least_squares`.
673
+
674
+ .. versionadded:: 0.18
675
+ full_output : boolean, optional
676
+ If True, this function returns additioal information: `infodict`,
677
+ `mesg`, and `ier`.
678
+
679
+ .. versionadded:: 1.9
680
+ nan_policy : {'raise', 'omit', None}, optional
681
+ Defines how to handle when input contains nan.
682
+ The following options are available (default is None):
683
+
684
+ * 'raise': throws an error
685
+ * 'omit': performs the calculations ignoring nan values
686
+ * None: no special handling of NaNs is performed
687
+ (except what is done by check_finite); the behavior when NaNs
688
+ are present is implementation-dependent and may change.
689
+
690
+ Note that if this value is specified explicitly (not None),
691
+ `check_finite` will be set as False.
692
+
693
+ .. versionadded:: 1.11
694
+ **kwargs
695
+ Keyword arguments passed to `leastsq` for ``method='lm'`` or
696
+ `least_squares` otherwise.
697
+
698
+ Returns
699
+ -------
700
+ popt : array
701
+ Optimal values for the parameters so that the sum of the squared
702
+ residuals of ``f(xdata, *popt) - ydata`` is minimized.
703
+ pcov : 2-D array
704
+ The estimated approximate covariance of popt. The diagonals provide
705
+ the variance of the parameter estimate. To compute one standard
706
+ deviation errors on the parameters, use
707
+ ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between
708
+ `cov` and parameter error estimates is derived based on a linear
709
+ approximation to the model function around the optimum [1].
710
+ When this approximation becomes inaccurate, `cov` may not provide an
711
+ accurate measure of uncertainty.
712
+
713
+ How the `sigma` parameter affects the estimated covariance
714
+ depends on `absolute_sigma` argument, as described above.
715
+
716
+ If the Jacobian matrix at the solution doesn't have a full rank, then
717
+ 'lm' method returns a matrix filled with ``np.inf``, on the other hand
718
+ 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
719
+ the covariance matrix. Covariance matrices with large condition numbers
720
+ (e.g. computed with `numpy.linalg.cond`) may indicate that results are
721
+ unreliable.
722
+ infodict : dict (returned only if `full_output` is True)
723
+ a dictionary of optional outputs with the keys:
724
+
725
+ ``nfev``
726
+ The number of function calls. Methods 'trf' and 'dogbox' do not
727
+ count function calls for numerical Jacobian approximation,
728
+ as opposed to 'lm' method.
729
+ ``fvec``
730
+ The residual values evaluated at the solution, for a 1-D `sigma`
731
+ this is ``(f(x, *popt) - ydata)/sigma``.
732
+ ``fjac``
733
+ A permutation of the R matrix of a QR
734
+ factorization of the final approximate
735
+ Jacobian matrix, stored column wise.
736
+ Together with ipvt, the covariance of the
737
+ estimate can be approximated.
738
+ Method 'lm' only provides this information.
739
+ ``ipvt``
740
+ An integer array of length N which defines
741
+ a permutation matrix, p, such that
742
+ fjac*p = q*r, where r is upper triangular
743
+ with diagonal elements of nonincreasing
744
+ magnitude. Column j of p is column ipvt(j)
745
+ of the identity matrix.
746
+ Method 'lm' only provides this information.
747
+ ``qtf``
748
+ The vector (transpose(q) * fvec).
749
+ Method 'lm' only provides this information.
750
+
751
+ .. versionadded:: 1.9
752
+ mesg : str (returned only if `full_output` is True)
753
+ A string message giving information about the solution.
754
+
755
+ .. versionadded:: 1.9
756
+ ier : int (returned only if `full_output` is True)
757
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
758
+ found. Otherwise, the solution was not found. In either case, the
759
+ optional output variable `mesg` gives more information.
760
+
761
+ .. versionadded:: 1.9
762
+
763
+ Raises
764
+ ------
765
+ ValueError
766
+ if either `ydata` or `xdata` contain NaNs, or if incompatible options
767
+ are used.
768
+
769
+ RuntimeError
770
+ if the least-squares minimization fails.
771
+
772
+ OptimizeWarning
773
+ if covariance of the parameters can not be estimated.
774
+
775
+ See Also
776
+ --------
777
+ least_squares : Minimize the sum of squares of nonlinear functions.
778
+ scipy.stats.linregress : Calculate a linear least squares regression for
779
+ two sets of measurements.
780
+
781
+ Notes
782
+ -----
783
+ Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
784
+ are ``float64``, or else the optimization may return incorrect results.
785
+
786
+ With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
787
+ through `leastsq`. Note that this algorithm can only deal with
788
+ unconstrained problems.
789
+
790
+ Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
791
+ the docstring of `least_squares` for more information.
792
+
793
+ Parameters to be fitted must have similar scale. Differences of multiple
794
+ orders of magnitude can lead to incorrect results. For the 'trf' and
795
+ 'dogbox' methods, the `x_scale` keyword argument can be used to scale
796
+ the parameters.
797
+
798
+ References
799
+ ----------
800
+ [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear
801
+ regression in groundwater flow: Three case studies. Water Resources
802
+ Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804`
803
+
804
+ Examples
805
+ --------
806
+ >>> import numpy as np
807
+ >>> import matplotlib.pyplot as plt
808
+ >>> from scipy.optimize import curve_fit
809
+
810
+ >>> def func(x, a, b, c):
811
+ ... return a * np.exp(-b * x) + c
812
+
813
+ Define the data to be fit with some noise:
814
+
815
+ >>> xdata = np.linspace(0, 4, 50)
816
+ >>> y = func(xdata, 2.5, 1.3, 0.5)
817
+ >>> rng = np.random.default_rng()
818
+ >>> y_noise = 0.2 * rng.normal(size=xdata.size)
819
+ >>> ydata = y + y_noise
820
+ >>> plt.plot(xdata, ydata, 'b-', label='data')
821
+
822
+ Fit for the parameters a, b, c of the function `func`:
823
+
824
+ >>> popt, pcov = curve_fit(func, xdata, ydata)
825
+ >>> popt
826
+ array([2.56274217, 1.37268521, 0.47427475])
827
+ >>> plt.plot(xdata, func(xdata, *popt), 'r-',
828
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
829
+
830
+ Constrain the optimization to the region of ``0 <= a <= 3``,
831
+ ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
832
+
833
+ >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
834
+ >>> popt
835
+ array([2.43736712, 1. , 0.34463856])
836
+ >>> plt.plot(xdata, func(xdata, *popt), 'g--',
837
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
838
+
839
+ >>> plt.xlabel('x')
840
+ >>> plt.ylabel('y')
841
+ >>> plt.legend()
842
+ >>> plt.show()
843
+
844
+ For reliable results, the model `func` should not be overparametrized;
845
+ redundant parameters can cause unreliable covariance matrices and, in some
846
+ cases, poorer quality fits. As a quick check of whether the model may be
847
+ overparameterized, calculate the condition number of the covariance matrix:
848
+
849
+ >>> np.linalg.cond(pcov)
850
+ 34.571092161547405 # may vary
851
+
852
+ The value is small, so it does not raise much concern. If, however, we were
853
+ to add a fourth parameter ``d`` to `func` with the same effect as ``a``:
854
+
855
+ >>> def func2(x, a, b, c, d):
856
+ ... return a * d * np.exp(-b * x) + c # a and d are redundant
857
+ >>> popt, pcov = curve_fit(func2, xdata, ydata)
858
+ >>> np.linalg.cond(pcov)
859
+ 1.13250718925596e+32 # may vary
860
+
861
+ Such a large value is cause for concern. The diagonal elements of the
862
+ covariance matrix, which is related to uncertainty of the fit, gives more
863
+ information:
864
+
865
+ >>> np.diag(pcov)
866
+ array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary
867
+
868
+ Note that the first and last terms are much larger than the other elements,
869
+ suggesting that the optimal values of these parameters are ambiguous and
870
+ that only one of these parameters is needed in the model.
871
+
872
+ If the optimal parameters of `f` differ by multiple orders of magnitude, the
873
+ resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any
874
+ results:
875
+
876
+ >>> ydata = func(xdata, 500000, 0.01, 15)
877
+ >>> try:
878
+ ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf')
879
+ ... except RuntimeError as e:
880
+ ... print(e)
881
+ Optimal parameters not found: The maximum number of function evaluations is exceeded.
882
+
883
+ If parameter scale is roughly known beforehand, it can be defined in
884
+ `x_scale` argument:
885
+
886
+ >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf',
887
+ ... x_scale = [1000, 1, 1])
888
+ >>> popt
889
+ array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01])
890
+ """
891
+ if p0 is None:
892
+ # determine number of parameters by inspecting the function
893
+ sig = _getfullargspec(f)
894
+ args = sig.args
895
+ if len(args) < 2:
896
+ raise ValueError("Unable to determine number of fit parameters.")
897
+ n = len(args) - 1
898
+ else:
899
+ p0 = np.atleast_1d(p0)
900
+ n = p0.size
901
+
902
+ if isinstance(bounds, Bounds):
903
+ lb, ub = bounds.lb, bounds.ub
904
+ else:
905
+ lb, ub = prepare_bounds(bounds, n)
906
+ if p0 is None:
907
+ p0 = _initialize_feasible(lb, ub)
908
+
909
+ bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
910
+ if method is None:
911
+ if bounded_problem:
912
+ method = 'trf'
913
+ else:
914
+ method = 'lm'
915
+
916
+ if method == 'lm' and bounded_problem:
917
+ raise ValueError("Method 'lm' only works for unconstrained problems. "
918
+ "Use 'trf' or 'dogbox' instead.")
919
+
920
+ if check_finite is None:
921
+ check_finite = True if nan_policy is None else False
922
+
923
+ # optimization may produce garbage for float32 inputs, cast them to float64
924
+ if check_finite:
925
+ ydata = np.asarray_chkfinite(ydata, float)
926
+ else:
927
+ ydata = np.asarray(ydata, float)
928
+
929
+ if isinstance(xdata, (list, tuple, np.ndarray)):
930
+ # `xdata` is passed straight to the user-defined `f`, so allow
931
+ # non-array_like `xdata`.
932
+ if check_finite:
933
+ xdata = np.asarray_chkfinite(xdata, float)
934
+ else:
935
+ xdata = np.asarray(xdata, float)
936
+
937
+ if ydata.size == 0:
938
+ raise ValueError("`ydata` must not be empty!")
939
+
940
+ # nan handling is needed only if check_finite is False because if True,
941
+ # the x-y data are already checked, and they don't contain nans.
942
+ if not check_finite and nan_policy is not None:
943
+ if nan_policy == "propagate":
944
+ raise ValueError("`nan_policy='propagate'` is not supported "
945
+ "by this function.")
946
+
947
+ policies = [None, 'raise', 'omit']
948
+ x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy,
949
+ policies=policies)
950
+ y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy,
951
+ policies=policies)
952
+
953
+ if (x_contains_nan or y_contains_nan) and nan_policy == 'omit':
954
+ # ignore NaNs for N dimensional arrays
955
+ has_nan = np.isnan(xdata)
956
+ has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1)))
957
+ has_nan |= np.isnan(ydata)
958
+
959
+ xdata = xdata[..., ~has_nan]
960
+ ydata = ydata[~has_nan]
961
+
962
+ # Determine type of sigma
963
+ if sigma is not None:
964
+ sigma = np.asarray(sigma)
965
+
966
+ # if 1-D or a scalar, sigma are errors, define transform = 1/sigma
967
+ if sigma.size == 1 or sigma.shape == (ydata.size, ):
968
+ transform = 1.0 / sigma
969
+ # if 2-D, sigma is the covariance matrix,
970
+ # define transform = L such that L L^T = C
971
+ elif sigma.shape == (ydata.size, ydata.size):
972
+ try:
973
+ # scipy.linalg.cholesky requires lower=True to return L L^T = A
974
+ transform = cholesky(sigma, lower=True)
975
+ except LinAlgError as e:
976
+ raise ValueError("`sigma` must be positive definite.") from e
977
+ else:
978
+ raise ValueError("`sigma` has incorrect shape.")
979
+ else:
980
+ transform = None
981
+
982
+ func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform))
983
+
984
+ if callable(jac):
985
+ jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform))
986
+ elif jac is None and method != 'lm':
987
+ jac = '2-point'
988
+
989
+ if 'args' in kwargs:
990
+ # The specification for the model function `f` does not support
991
+ # additional arguments. Refer to the `curve_fit` docstring for
992
+ # acceptable call signatures of `f`.
993
+ raise ValueError("'args' is not a supported keyword argument.")
994
+
995
+ if method == 'lm':
996
+ # if ydata.size == 1, this might be used for broadcast.
997
+ if ydata.size != 1 and n > ydata.size:
998
+ raise TypeError(f"The number of func parameters={n} must not"
999
+ f" exceed the number of data points={ydata.size}")
1000
+ res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
1001
+ popt, pcov, infodict, errmsg, ier = res
1002
+ ysize = len(infodict['fvec'])
1003
+ cost = np.sum(infodict['fvec'] ** 2)
1004
+ if ier not in [1, 2, 3, 4]:
1005
+ raise RuntimeError("Optimal parameters not found: " + errmsg)
1006
+ else:
1007
+ # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
1008
+ if 'max_nfev' not in kwargs:
1009
+ kwargs['max_nfev'] = kwargs.pop('maxfev', None)
1010
+
1011
+ res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
1012
+ **kwargs)
1013
+
1014
+ if not res.success:
1015
+ raise RuntimeError("Optimal parameters not found: " + res.message)
1016
+
1017
+ infodict = dict(nfev=res.nfev, fvec=res.fun)
1018
+ ier = res.status
1019
+ errmsg = res.message
1020
+
1021
+ ysize = len(res.fun)
1022
+ cost = 2 * res.cost # res.cost is half sum of squares!
1023
+ popt = res.x
1024
+
1025
+ # Do Moore-Penrose inverse discarding zero singular values.
1026
+ _, s, VT = svd(res.jac, full_matrices=False)
1027
+ threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
1028
+ s = s[s > threshold]
1029
+ VT = VT[:s.size]
1030
+ pcov = np.dot(VT.T / s**2, VT)
1031
+
1032
+ warn_cov = False
1033
+ if pcov is None or np.isnan(pcov).any():
1034
+ # indeterminate covariance
1035
+ pcov = zeros((len(popt), len(popt)), dtype=float)
1036
+ pcov.fill(inf)
1037
+ warn_cov = True
1038
+ elif not absolute_sigma:
1039
+ if ysize > p0.size:
1040
+ s_sq = cost / (ysize - p0.size)
1041
+ pcov = pcov * s_sq
1042
+ else:
1043
+ pcov.fill(inf)
1044
+ warn_cov = True
1045
+
1046
+ if warn_cov:
1047
+ warnings.warn('Covariance of the parameters could not be estimated',
1048
+ category=OptimizeWarning, stacklevel=2)
1049
+
1050
+ if full_output:
1051
+ return popt, pcov, infodict, errmsg, ier
1052
+ else:
1053
+ return popt, pcov
1054
+
1055
+
1056
+ def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
1057
+ """Perform a simple check on the gradient for correctness.
1058
+
1059
+ """
1060
+
1061
+ x = atleast_1d(x0)
1062
+ n = len(x)
1063
+ x = x.reshape((n,))
1064
+ fvec = atleast_1d(fcn(x, *args))
1065
+ m = len(fvec)
1066
+ fvec = fvec.reshape((m,))
1067
+ ldfjac = m
1068
+ fjac = atleast_1d(Dfcn(x, *args))
1069
+ fjac = fjac.reshape((m, n))
1070
+ if col_deriv == 0:
1071
+ fjac = transpose(fjac)
1072
+
1073
+ xp = zeros((n,), float)
1074
+ err = zeros((m,), float)
1075
+ fvecp = None
1076
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
1077
+
1078
+ fvecp = atleast_1d(fcn(xp, *args))
1079
+ fvecp = fvecp.reshape((m,))
1080
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
1081
+
1082
+ good = (prod(greater(err, 0.5), axis=0))
1083
+
1084
+ return (good, err)
1085
+
1086
+
1087
+ def _del2(p0, p1, d):
1088
+ return p0 - np.square(p1 - p0) / d
1089
+
1090
+
1091
+ def _relerr(actual, desired):
1092
+ return (actual - desired) / desired
1093
+
1094
+
1095
+ def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
1096
+ p0 = x0
1097
+ for i in range(maxiter):
1098
+ p1 = func(p0, *args)
1099
+ if use_accel:
1100
+ p2 = func(p1, *args)
1101
+ d = p2 - 2.0 * p1 + p0
1102
+ p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
1103
+ else:
1104
+ p = p1
1105
+ relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
1106
+ if np.all(np.abs(relerr) < xtol):
1107
+ return p
1108
+ p0 = p
1109
+ msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
1110
+ raise RuntimeError(msg)
1111
+
1112
+
1113
+ def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
1114
+ """
1115
+ Find a fixed point of the function.
1116
+
1117
+ Given a function of one or more variables and a starting point, find a
1118
+ fixed point of the function: i.e., where ``func(x0) == x0``.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ func : function
1123
+ Function to evaluate.
1124
+ x0 : array_like
1125
+ Fixed point of function.
1126
+ args : tuple, optional
1127
+ Extra arguments to `func`.
1128
+ xtol : float, optional
1129
+ Convergence tolerance, defaults to 1e-08.
1130
+ maxiter : int, optional
1131
+ Maximum number of iterations, defaults to 500.
1132
+ method : {"del2", "iteration"}, optional
1133
+ Method of finding the fixed-point, defaults to "del2",
1134
+ which uses Steffensen's Method with Aitken's ``Del^2``
1135
+ convergence acceleration [1]_. The "iteration" method simply iterates
1136
+ the function until convergence is detected, without attempting to
1137
+ accelerate the convergence.
1138
+
1139
+ References
1140
+ ----------
1141
+ .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
1142
+
1143
+ Examples
1144
+ --------
1145
+ >>> import numpy as np
1146
+ >>> from scipy import optimize
1147
+ >>> def func(x, c1, c2):
1148
+ ... return np.sqrt(c1/(x+c2))
1149
+ >>> c1 = np.array([10,12.])
1150
+ >>> c2 = np.array([3, 5.])
1151
+ >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
1152
+ array([ 1.4920333 , 1.37228132])
1153
+
1154
+ """
1155
+ use_accel = {'del2': True, 'iteration': False}[method]
1156
+ x0 = _asarray_validated(x0, as_inexact=True)
1157
+ return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_numdiff.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines for numerical differentiation."""
2
+ import functools
3
+ import numpy as np
4
+ from numpy.linalg import norm
5
+
6
+ from scipy.sparse.linalg import LinearOperator
7
+ from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
8
+ from ._group_columns import group_dense, group_sparse
9
+ from scipy._lib._array_api import atleast_nd, array_namespace
10
+
11
+
12
+ def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
13
+ """Adjust final difference scheme to the presence of bounds.
14
+
15
+ Parameters
16
+ ----------
17
+ x0 : ndarray, shape (n,)
18
+ Point at which we wish to estimate derivative.
19
+ h : ndarray, shape (n,)
20
+ Desired absolute finite difference steps.
21
+ num_steps : int
22
+ Number of `h` steps in one direction required to implement finite
23
+ difference scheme. For example, 2 means that we need to evaluate
24
+ f(x0 + 2 * h) or f(x0 - 2 * h)
25
+ scheme : {'1-sided', '2-sided'}
26
+ Whether steps in one or both directions are required. In other
27
+ words '1-sided' applies to forward and backward schemes, '2-sided'
28
+ applies to center schemes.
29
+ lb : ndarray, shape (n,)
30
+ Lower bounds on independent variables.
31
+ ub : ndarray, shape (n,)
32
+ Upper bounds on independent variables.
33
+
34
+ Returns
35
+ -------
36
+ h_adjusted : ndarray, shape (n,)
37
+ Adjusted absolute step sizes. Step size decreases only if a sign flip
38
+ or switching to one-sided scheme doesn't allow to take a full step.
39
+ use_one_sided : ndarray of bool, shape (n,)
40
+ Whether to switch to one-sided scheme. Informative only for
41
+ ``scheme='2-sided'``.
42
+ """
43
+ if scheme == '1-sided':
44
+ use_one_sided = np.ones_like(h, dtype=bool)
45
+ elif scheme == '2-sided':
46
+ h = np.abs(h)
47
+ use_one_sided = np.zeros_like(h, dtype=bool)
48
+ else:
49
+ raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
50
+
51
+ if np.all((lb == -np.inf) & (ub == np.inf)):
52
+ return h, use_one_sided
53
+
54
+ h_total = h * num_steps
55
+ h_adjusted = h.copy()
56
+
57
+ lower_dist = x0 - lb
58
+ upper_dist = ub - x0
59
+
60
+ if scheme == '1-sided':
61
+ x = x0 + h_total
62
+ violated = (x < lb) | (x > ub)
63
+ fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
64
+ h_adjusted[violated & fitting] *= -1
65
+
66
+ forward = (upper_dist >= lower_dist) & ~fitting
67
+ h_adjusted[forward] = upper_dist[forward] / num_steps
68
+ backward = (upper_dist < lower_dist) & ~fitting
69
+ h_adjusted[backward] = -lower_dist[backward] / num_steps
70
+ elif scheme == '2-sided':
71
+ central = (lower_dist >= h_total) & (upper_dist >= h_total)
72
+
73
+ forward = (upper_dist >= lower_dist) & ~central
74
+ h_adjusted[forward] = np.minimum(
75
+ h[forward], 0.5 * upper_dist[forward] / num_steps)
76
+ use_one_sided[forward] = True
77
+
78
+ backward = (upper_dist < lower_dist) & ~central
79
+ h_adjusted[backward] = -np.minimum(
80
+ h[backward], 0.5 * lower_dist[backward] / num_steps)
81
+ use_one_sided[backward] = True
82
+
83
+ min_dist = np.minimum(upper_dist, lower_dist) / num_steps
84
+ adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
85
+ h_adjusted[adjusted_central] = min_dist[adjusted_central]
86
+ use_one_sided[adjusted_central] = False
87
+
88
+ return h_adjusted, use_one_sided
89
+
90
+
91
+ @functools.lru_cache
92
+ def _eps_for_method(x0_dtype, f0_dtype, method):
93
+ """
94
+ Calculates relative EPS step to use for a given data type
95
+ and numdiff step method.
96
+
97
+ Progressively smaller steps are used for larger floating point types.
98
+
99
+ Parameters
100
+ ----------
101
+ f0_dtype: np.dtype
102
+ dtype of function evaluation
103
+
104
+ x0_dtype: np.dtype
105
+ dtype of parameter vector
106
+
107
+ method: {'2-point', '3-point', 'cs'}
108
+
109
+ Returns
110
+ -------
111
+ EPS: float
112
+ relative step size. May be np.float16, np.float32, np.float64
113
+
114
+ Notes
115
+ -----
116
+ The default relative step will be np.float64. However, if x0 or f0 are
117
+ smaller floating point types (np.float16, np.float32), then the smallest
118
+ floating point type is chosen.
119
+ """
120
+ # the default EPS value
121
+ EPS = np.finfo(np.float64).eps
122
+
123
+ x0_is_fp = False
124
+ if np.issubdtype(x0_dtype, np.inexact):
125
+ # if you're a floating point type then over-ride the default EPS
126
+ EPS = np.finfo(x0_dtype).eps
127
+ x0_itemsize = np.dtype(x0_dtype).itemsize
128
+ x0_is_fp = True
129
+
130
+ if np.issubdtype(f0_dtype, np.inexact):
131
+ f0_itemsize = np.dtype(f0_dtype).itemsize
132
+ # choose the smallest itemsize between x0 and f0
133
+ if x0_is_fp and f0_itemsize < x0_itemsize:
134
+ EPS = np.finfo(f0_dtype).eps
135
+
136
+ if method in ["2-point", "cs"]:
137
+ return EPS**0.5
138
+ elif method in ["3-point"]:
139
+ return EPS**(1/3)
140
+ else:
141
+ raise RuntimeError("Unknown step method, should be one of "
142
+ "{'2-point', '3-point', 'cs'}")
143
+
144
+
145
+ def _compute_absolute_step(rel_step, x0, f0, method):
146
+ """
147
+ Computes an absolute step from a relative step for finite difference
148
+ calculation.
149
+
150
+ Parameters
151
+ ----------
152
+ rel_step: None or array-like
153
+ Relative step for the finite difference calculation
154
+ x0 : np.ndarray
155
+ Parameter vector
156
+ f0 : np.ndarray or scalar
157
+ method : {'2-point', '3-point', 'cs'}
158
+
159
+ Returns
160
+ -------
161
+ h : float
162
+ The absolute step size
163
+
164
+ Notes
165
+ -----
166
+ `h` will always be np.float64. However, if `x0` or `f0` are
167
+ smaller floating point dtypes (e.g. np.float32), then the absolute
168
+ step size will be calculated from the smallest floating point size.
169
+ """
170
+ # this is used instead of np.sign(x0) because we need
171
+ # sign_x0 to be 1 when x0 == 0.
172
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
173
+
174
+ rstep = _eps_for_method(x0.dtype, f0.dtype, method)
175
+
176
+ if rel_step is None:
177
+ abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
178
+ else:
179
+ # User has requested specific relative steps.
180
+ # Don't multiply by max(1, abs(x0) because if x0 < 1 then their
181
+ # requested step is not used.
182
+ abs_step = rel_step * sign_x0 * np.abs(x0)
183
+
184
+ # however we don't want an abs_step of 0, which can happen if
185
+ # rel_step is 0, or x0 is 0. Instead, substitute a realistic step
186
+ dx = ((x0 + abs_step) - x0)
187
+ abs_step = np.where(dx == 0,
188
+ rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
189
+ abs_step)
190
+
191
+ return abs_step
192
+
193
+
194
+ def _prepare_bounds(bounds, x0):
195
+ """
196
+ Prepares new-style bounds from a two-tuple specifying the lower and upper
197
+ limits for values in x0. If a value is not bound then the lower/upper bound
198
+ will be expected to be -np.inf/np.inf.
199
+
200
+ Examples
201
+ --------
202
+ >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
203
+ (array([0., 1., 2.]), array([ 1., 2., inf]))
204
+ """
205
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
206
+ if lb.ndim == 0:
207
+ lb = np.resize(lb, x0.shape)
208
+
209
+ if ub.ndim == 0:
210
+ ub = np.resize(ub, x0.shape)
211
+
212
+ return lb, ub
213
+
214
+
215
+ def group_columns(A, order=0):
216
+ """Group columns of a 2-D matrix for sparse finite differencing [1]_.
217
+
218
+ Two columns are in the same group if in each row at least one of them
219
+ has zero. A greedy sequential algorithm is used to construct groups.
220
+
221
+ Parameters
222
+ ----------
223
+ A : array_like or sparse matrix, shape (m, n)
224
+ Matrix of which to group columns.
225
+ order : int, iterable of int with shape (n,) or None
226
+ Permutation array which defines the order of columns enumeration.
227
+ If int or None, a random permutation is used with `order` used as
228
+ a random seed. Default is 0, that is use a random permutation but
229
+ guarantee repeatability.
230
+
231
+ Returns
232
+ -------
233
+ groups : ndarray of int, shape (n,)
234
+ Contains values from 0 to n_groups-1, where n_groups is the number
235
+ of found groups. Each value ``groups[i]`` is an index of a group to
236
+ which ith column assigned. The procedure was helpful only if
237
+ n_groups is significantly less than n.
238
+
239
+ References
240
+ ----------
241
+ .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
242
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
243
+ and its Applications, 13 (1974), pp. 117-120.
244
+ """
245
+ if issparse(A):
246
+ A = csc_matrix(A)
247
+ else:
248
+ A = np.atleast_2d(A)
249
+ A = (A != 0).astype(np.int32)
250
+
251
+ if A.ndim != 2:
252
+ raise ValueError("`A` must be 2-dimensional.")
253
+
254
+ m, n = A.shape
255
+
256
+ if order is None or np.isscalar(order):
257
+ rng = np.random.RandomState(order)
258
+ order = rng.permutation(n)
259
+ else:
260
+ order = np.asarray(order)
261
+ if order.shape != (n,):
262
+ raise ValueError("`order` has incorrect shape.")
263
+
264
+ A = A[:, order]
265
+
266
+ if issparse(A):
267
+ groups = group_sparse(m, n, A.indices, A.indptr)
268
+ else:
269
+ groups = group_dense(m, n, A)
270
+
271
+ groups[order] = groups.copy()
272
+
273
+ return groups
274
+
275
+
276
+ def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
277
+ f0=None, bounds=(-np.inf, np.inf), sparsity=None,
278
+ as_linear_operator=False, args=(), kwargs={}):
279
+ """Compute finite difference approximation of the derivatives of a
280
+ vector-valued function.
281
+
282
+ If a function maps from R^n to R^m, its derivatives form m-by-n matrix
283
+ called the Jacobian, where an element (i, j) is a partial derivative of
284
+ f[i] with respect to x[j].
285
+
286
+ Parameters
287
+ ----------
288
+ fun : callable
289
+ Function of which to estimate the derivatives. The argument x
290
+ passed to this function is ndarray of shape (n,) (never a scalar
291
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
292
+ x0 : array_like of shape (n,) or float
293
+ Point at which to estimate the derivatives. Float will be converted
294
+ to a 1-D array.
295
+ method : {'3-point', '2-point', 'cs'}, optional
296
+ Finite difference method to use:
297
+ - '2-point' - use the first order accuracy forward or backward
298
+ difference.
299
+ - '3-point' - use central difference in interior points and the
300
+ second order accuracy forward or backward difference
301
+ near the boundary.
302
+ - 'cs' - use a complex-step finite difference scheme. This assumes
303
+ that the user function is real-valued and can be
304
+ analytically continued to the complex plane. Otherwise,
305
+ produces bogus results.
306
+ rel_step : None or array_like, optional
307
+ Relative step size to use. If None (default) the absolute step size is
308
+ computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
309
+ `rel_step` being selected automatically, see Notes. Otherwise
310
+ ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
311
+ sign of `h` is ignored. The calculated step size is possibly adjusted
312
+ to fit into the bounds.
313
+ abs_step : array_like, optional
314
+ Absolute step size to use, possibly adjusted to fit into the bounds.
315
+ For ``method='3-point'`` the sign of `abs_step` is ignored. By default
316
+ relative steps are used, only if ``abs_step is not None`` are absolute
317
+ steps used.
318
+ f0 : None or array_like, optional
319
+ If not None it is assumed to be equal to ``fun(x0)``, in this case
320
+ the ``fun(x0)`` is not called. Default is None.
321
+ bounds : tuple of array_like, optional
322
+ Lower and upper bounds on independent variables. Defaults to no bounds.
323
+ Each bound must match the size of `x0` or be a scalar, in the latter
324
+ case the bound will be the same for all variables. Use it to limit the
325
+ range of function evaluation. Bounds checking is not implemented
326
+ when `as_linear_operator` is True.
327
+ sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
328
+ Defines a sparsity structure of the Jacobian matrix. If the Jacobian
329
+ matrix is known to have only few non-zero elements in each row, then
330
+ it's possible to estimate its several columns by a single function
331
+ evaluation [3]_. To perform such economic computations two ingredients
332
+ are required:
333
+
334
+ * structure : array_like or sparse matrix of shape (m, n). A zero
335
+ element means that a corresponding element of the Jacobian
336
+ identically equals to zero.
337
+ * groups : array_like of shape (n,). A column grouping for a given
338
+ sparsity structure, use `group_columns` to obtain it.
339
+
340
+ A single array or a sparse matrix is interpreted as a sparsity
341
+ structure, and groups are computed inside the function. A tuple is
342
+ interpreted as (structure, groups). If None (default), a standard
343
+ dense differencing will be used.
344
+
345
+ Note, that sparse differencing makes sense only for large Jacobian
346
+ matrices where each row contains few non-zero elements.
347
+ as_linear_operator : bool, optional
348
+ When True the function returns an `scipy.sparse.linalg.LinearOperator`.
349
+ Otherwise it returns a dense array or a sparse matrix depending on
350
+ `sparsity`. The linear operator provides an efficient way of computing
351
+ ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
352
+ direct access to individual elements of the matrix. By default
353
+ `as_linear_operator` is False.
354
+ args, kwargs : tuple and dict, optional
355
+ Additional arguments passed to `fun`. Both empty by default.
356
+ The calling signature is ``fun(x, *args, **kwargs)``.
357
+
358
+ Returns
359
+ -------
360
+ J : {ndarray, sparse matrix, LinearOperator}
361
+ Finite difference approximation of the Jacobian matrix.
362
+ If `as_linear_operator` is True returns a LinearOperator
363
+ with shape (m, n). Otherwise it returns a dense array or sparse
364
+ matrix depending on how `sparsity` is defined. If `sparsity`
365
+ is None then a ndarray with shape (m, n) is returned. If
366
+ `sparsity` is not None returns a csr_matrix with shape (m, n).
367
+ For sparse matrices and linear operators it is always returned as
368
+ a 2-D structure, for ndarrays, if m=1 it is returned
369
+ as a 1-D gradient array with shape (n,).
370
+
371
+ See Also
372
+ --------
373
+ check_derivative : Check correctness of a function computing derivatives.
374
+
375
+ Notes
376
+ -----
377
+ If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
378
+ determined from the smallest floating point dtype of `x0` or `fun(x0)`,
379
+ ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
380
+ s=3 for '3-point' method. Such relative step approximately minimizes a sum
381
+ of truncation and round-off errors, see [1]_. Relative steps are used by
382
+ default. However, absolute steps are used when ``abs_step is not None``.
383
+ If any of the absolute or relative steps produces an indistinguishable
384
+ difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
385
+ automatic step size is substituted for that particular entry.
386
+
387
+ A finite difference scheme for '3-point' method is selected automatically.
388
+ The well-known central difference scheme is used for points sufficiently
389
+ far from the boundary, and 3-point forward or backward scheme is used for
390
+ points near the boundary. Both schemes have the second-order accuracy in
391
+ terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
392
+ forward and backward difference schemes.
393
+
394
+ For dense differencing when m=1 Jacobian is returned with a shape (n,),
395
+ on the other hand when n=1 Jacobian is returned with a shape (m, 1).
396
+ Our motivation is the following: a) It handles a case of gradient
397
+ computation (m=1) in a conventional way. b) It clearly separates these two
398
+ different cases. b) In all cases np.atleast_2d can be called to get 2-D
399
+ Jacobian with correct dimensions.
400
+
401
+ References
402
+ ----------
403
+ .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
404
+ Computing. 3rd edition", sec. 5.7.
405
+
406
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
407
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
408
+ and its Applications, 13 (1974), pp. 117-120.
409
+
410
+ .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
411
+ Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
412
+
413
+ Examples
414
+ --------
415
+ >>> import numpy as np
416
+ >>> from scipy.optimize._numdiff import approx_derivative
417
+ >>>
418
+ >>> def f(x, c1, c2):
419
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
420
+ ... x[0] * np.cos(c2 * x[1])])
421
+ ...
422
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
423
+ >>> approx_derivative(f, x0, args=(1, 2))
424
+ array([[ 1., 0.],
425
+ [-1., 0.]])
426
+
427
+ Bounds can be used to limit the region of function evaluation.
428
+ In the example below we compute left and right derivative at point 1.0.
429
+
430
+ >>> def g(x):
431
+ ... return x**2 if x >= 1 else x
432
+ ...
433
+ >>> x0 = 1.0
434
+ >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
435
+ array([ 1.])
436
+ >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
437
+ array([ 2.])
438
+ """
439
+ if method not in ['2-point', '3-point', 'cs']:
440
+ raise ValueError("Unknown method '%s'. " % method)
441
+
442
+ xp = array_namespace(x0)
443
+ _x = atleast_nd(x0, ndim=1, xp=xp)
444
+ _dtype = xp.float64
445
+ if xp.isdtype(_x.dtype, "real floating"):
446
+ _dtype = _x.dtype
447
+
448
+ # promotes to floating
449
+ x0 = xp.astype(_x, _dtype)
450
+
451
+ if x0.ndim > 1:
452
+ raise ValueError("`x0` must have at most 1 dimension.")
453
+
454
+ lb, ub = _prepare_bounds(bounds, x0)
455
+
456
+ if lb.shape != x0.shape or ub.shape != x0.shape:
457
+ raise ValueError("Inconsistent shapes between bounds and `x0`.")
458
+
459
+ if as_linear_operator and not (np.all(np.isinf(lb))
460
+ and np.all(np.isinf(ub))):
461
+ raise ValueError("Bounds not supported when "
462
+ "`as_linear_operator` is True.")
463
+
464
+ def fun_wrapped(x):
465
+ # send user function same fp type as x0. (but only if cs is not being
466
+ # used
467
+ if xp.isdtype(x.dtype, "real floating"):
468
+ x = xp.astype(x, x0.dtype)
469
+
470
+ f = np.atleast_1d(fun(x, *args, **kwargs))
471
+ if f.ndim > 1:
472
+ raise RuntimeError("`fun` return value has "
473
+ "more than 1 dimension.")
474
+ return f
475
+
476
+ if f0 is None:
477
+ f0 = fun_wrapped(x0)
478
+ else:
479
+ f0 = np.atleast_1d(f0)
480
+ if f0.ndim > 1:
481
+ raise ValueError("`f0` passed has more than 1 dimension.")
482
+
483
+ if np.any((x0 < lb) | (x0 > ub)):
484
+ raise ValueError("`x0` violates bound constraints.")
485
+
486
+ if as_linear_operator:
487
+ if rel_step is None:
488
+ rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
489
+
490
+ return _linear_operator_difference(fun_wrapped, x0,
491
+ f0, rel_step, method)
492
+ else:
493
+ # by default we use rel_step
494
+ if abs_step is None:
495
+ h = _compute_absolute_step(rel_step, x0, f0, method)
496
+ else:
497
+ # user specifies an absolute step
498
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
499
+ h = abs_step
500
+
501
+ # cannot have a zero step. This might happen if x0 is very large
502
+ # or small. In which case fall back to relative step.
503
+ dx = ((x0 + h) - x0)
504
+ h = np.where(dx == 0,
505
+ _eps_for_method(x0.dtype, f0.dtype, method) *
506
+ sign_x0 * np.maximum(1.0, np.abs(x0)),
507
+ h)
508
+
509
+ if method == '2-point':
510
+ h, use_one_sided = _adjust_scheme_to_bounds(
511
+ x0, h, 1, '1-sided', lb, ub)
512
+ elif method == '3-point':
513
+ h, use_one_sided = _adjust_scheme_to_bounds(
514
+ x0, h, 1, '2-sided', lb, ub)
515
+ elif method == 'cs':
516
+ use_one_sided = False
517
+
518
+ if sparsity is None:
519
+ return _dense_difference(fun_wrapped, x0, f0, h,
520
+ use_one_sided, method)
521
+ else:
522
+ if not issparse(sparsity) and len(sparsity) == 2:
523
+ structure, groups = sparsity
524
+ else:
525
+ structure = sparsity
526
+ groups = group_columns(sparsity)
527
+
528
+ if issparse(structure):
529
+ structure = csc_matrix(structure)
530
+ else:
531
+ structure = np.atleast_2d(structure)
532
+
533
+ groups = np.atleast_1d(groups)
534
+ return _sparse_difference(fun_wrapped, x0, f0, h,
535
+ use_one_sided, structure,
536
+ groups, method)
537
+
538
+
539
+ def _linear_operator_difference(fun, x0, f0, h, method):
540
+ m = f0.size
541
+ n = x0.size
542
+
543
+ if method == '2-point':
544
+ def matvec(p):
545
+ if np.array_equal(p, np.zeros_like(p)):
546
+ return np.zeros(m)
547
+ dx = h / norm(p)
548
+ x = x0 + dx*p
549
+ df = fun(x) - f0
550
+ return df / dx
551
+
552
+ elif method == '3-point':
553
+ def matvec(p):
554
+ if np.array_equal(p, np.zeros_like(p)):
555
+ return np.zeros(m)
556
+ dx = 2*h / norm(p)
557
+ x1 = x0 - (dx/2)*p
558
+ x2 = x0 + (dx/2)*p
559
+ f1 = fun(x1)
560
+ f2 = fun(x2)
561
+ df = f2 - f1
562
+ return df / dx
563
+
564
+ elif method == 'cs':
565
+ def matvec(p):
566
+ if np.array_equal(p, np.zeros_like(p)):
567
+ return np.zeros(m)
568
+ dx = h / norm(p)
569
+ x = x0 + dx*p*1.j
570
+ f1 = fun(x)
571
+ df = f1.imag
572
+ return df / dx
573
+
574
+ else:
575
+ raise RuntimeError("Never be here.")
576
+
577
+ return LinearOperator((m, n), matvec)
578
+
579
+
580
+ def _dense_difference(fun, x0, f0, h, use_one_sided, method):
581
+ m = f0.size
582
+ n = x0.size
583
+ J_transposed = np.empty((n, m))
584
+ h_vecs = np.diag(h)
585
+
586
+ for i in range(h.size):
587
+ if method == '2-point':
588
+ x = x0 + h_vecs[i]
589
+ dx = x[i] - x0[i] # Recompute dx as exactly representable number.
590
+ df = fun(x) - f0
591
+ elif method == '3-point' and use_one_sided[i]:
592
+ x1 = x0 + h_vecs[i]
593
+ x2 = x0 + 2 * h_vecs[i]
594
+ dx = x2[i] - x0[i]
595
+ f1 = fun(x1)
596
+ f2 = fun(x2)
597
+ df = -3.0 * f0 + 4 * f1 - f2
598
+ elif method == '3-point' and not use_one_sided[i]:
599
+ x1 = x0 - h_vecs[i]
600
+ x2 = x0 + h_vecs[i]
601
+ dx = x2[i] - x1[i]
602
+ f1 = fun(x1)
603
+ f2 = fun(x2)
604
+ df = f2 - f1
605
+ elif method == 'cs':
606
+ f1 = fun(x0 + h_vecs[i]*1.j)
607
+ df = f1.imag
608
+ dx = h_vecs[i, i]
609
+ else:
610
+ raise RuntimeError("Never be here.")
611
+
612
+ J_transposed[i] = df / dx
613
+
614
+ if m == 1:
615
+ J_transposed = np.ravel(J_transposed)
616
+
617
+ return J_transposed.T
618
+
619
+
620
+ def _sparse_difference(fun, x0, f0, h, use_one_sided,
621
+ structure, groups, method):
622
+ m = f0.size
623
+ n = x0.size
624
+ row_indices = []
625
+ col_indices = []
626
+ fractions = []
627
+
628
+ n_groups = np.max(groups) + 1
629
+ for group in range(n_groups):
630
+ # Perturb variables which are in the same group simultaneously.
631
+ e = np.equal(group, groups)
632
+ h_vec = h * e
633
+ if method == '2-point':
634
+ x = x0 + h_vec
635
+ dx = x - x0
636
+ df = fun(x) - f0
637
+ # The result is written to columns which correspond to perturbed
638
+ # variables.
639
+ cols, = np.nonzero(e)
640
+ # Find all non-zero elements in selected columns of Jacobian.
641
+ i, j, _ = find(structure[:, cols])
642
+ # Restore column indices in the full array.
643
+ j = cols[j]
644
+ elif method == '3-point':
645
+ # Here we do conceptually the same but separate one-sided
646
+ # and two-sided schemes.
647
+ x1 = x0.copy()
648
+ x2 = x0.copy()
649
+
650
+ mask_1 = use_one_sided & e
651
+ x1[mask_1] += h_vec[mask_1]
652
+ x2[mask_1] += 2 * h_vec[mask_1]
653
+
654
+ mask_2 = ~use_one_sided & e
655
+ x1[mask_2] -= h_vec[mask_2]
656
+ x2[mask_2] += h_vec[mask_2]
657
+
658
+ dx = np.zeros(n)
659
+ dx[mask_1] = x2[mask_1] - x0[mask_1]
660
+ dx[mask_2] = x2[mask_2] - x1[mask_2]
661
+
662
+ f1 = fun(x1)
663
+ f2 = fun(x2)
664
+
665
+ cols, = np.nonzero(e)
666
+ i, j, _ = find(structure[:, cols])
667
+ j = cols[j]
668
+
669
+ mask = use_one_sided[j]
670
+ df = np.empty(m)
671
+
672
+ rows = i[mask]
673
+ df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
674
+
675
+ rows = i[~mask]
676
+ df[rows] = f2[rows] - f1[rows]
677
+ elif method == 'cs':
678
+ f1 = fun(x0 + h_vec*1.j)
679
+ df = f1.imag
680
+ dx = h_vec
681
+ cols, = np.nonzero(e)
682
+ i, j, _ = find(structure[:, cols])
683
+ j = cols[j]
684
+ else:
685
+ raise ValueError("Never be here.")
686
+
687
+ # All that's left is to compute the fraction. We store i, j and
688
+ # fractions as separate arrays and later construct coo_matrix.
689
+ row_indices.append(i)
690
+ col_indices.append(j)
691
+ fractions.append(df[i] / dx[j])
692
+
693
+ row_indices = np.hstack(row_indices)
694
+ col_indices = np.hstack(col_indices)
695
+ fractions = np.hstack(fractions)
696
+ J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
697
+ return csr_matrix(J)
698
+
699
+
700
+ def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
701
+ kwargs={}):
702
+ """Check correctness of a function computing derivatives (Jacobian or
703
+ gradient) by comparison with a finite difference approximation.
704
+
705
+ Parameters
706
+ ----------
707
+ fun : callable
708
+ Function of which to estimate the derivatives. The argument x
709
+ passed to this function is ndarray of shape (n,) (never a scalar
710
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
711
+ jac : callable
712
+ Function which computes Jacobian matrix of `fun`. It must work with
713
+ argument x the same way as `fun`. The return value must be array_like
714
+ or sparse matrix with an appropriate shape.
715
+ x0 : array_like of shape (n,) or float
716
+ Point at which to estimate the derivatives. Float will be converted
717
+ to 1-D array.
718
+ bounds : 2-tuple of array_like, optional
719
+ Lower and upper bounds on independent variables. Defaults to no bounds.
720
+ Each bound must match the size of `x0` or be a scalar, in the latter
721
+ case the bound will be the same for all variables. Use it to limit the
722
+ range of function evaluation.
723
+ args, kwargs : tuple and dict, optional
724
+ Additional arguments passed to `fun` and `jac`. Both empty by default.
725
+ The calling signature is ``fun(x, *args, **kwargs)`` and the same
726
+ for `jac`.
727
+
728
+ Returns
729
+ -------
730
+ accuracy : float
731
+ The maximum among all relative errors for elements with absolute values
732
+ higher than 1 and absolute errors for elements with absolute values
733
+ less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
734
+ then it is likely that your `jac` implementation is correct.
735
+
736
+ See Also
737
+ --------
738
+ approx_derivative : Compute finite difference approximation of derivative.
739
+
740
+ Examples
741
+ --------
742
+ >>> import numpy as np
743
+ >>> from scipy.optimize._numdiff import check_derivative
744
+ >>>
745
+ >>>
746
+ >>> def f(x, c1, c2):
747
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
748
+ ... x[0] * np.cos(c2 * x[1])])
749
+ ...
750
+ >>> def jac(x, c1, c2):
751
+ ... return np.array([
752
+ ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
753
+ ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
754
+ ... ])
755
+ ...
756
+ >>>
757
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
758
+ >>> check_derivative(f, jac, x0, args=(1, 2))
759
+ 2.4492935982947064e-16
760
+ """
761
+ J_to_test = jac(x0, *args, **kwargs)
762
+ if issparse(J_to_test):
763
+ J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
764
+ args=args, kwargs=kwargs)
765
+ J_to_test = csr_matrix(J_to_test)
766
+ abs_err = J_to_test - J_diff
767
+ i, j, abs_err_data = find(abs_err)
768
+ J_diff_data = np.asarray(J_diff[i, j]).ravel()
769
+ return np.max(np.abs(abs_err_data) /
770
+ np.maximum(1, np.abs(J_diff_data)))
771
+ else:
772
+ J_diff = approx_derivative(fun, x0, bounds=bounds,
773
+ args=args, kwargs=kwargs)
774
+ abs_err = np.abs(J_to_test - J_diff)
775
+ return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_optimize.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (224 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root.py ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to root finding algorithms.
3
+
4
+ Functions
5
+ ---------
6
+ - root : find a root of a vector function.
7
+ """
8
+ __all__ = ['root']
9
+
10
+ import numpy as np
11
+
12
+ from warnings import warn
13
+
14
+ from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options
15
+ from ._minpack_py import _root_hybr, leastsq
16
+ from ._spectral import _root_df_sane
17
+ from . import _nonlin as nonlin
18
+
19
+
20
+ ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
21
+ 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov',
22
+ 'df-sane']
23
+
24
+
25
+ def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
26
+ options=None):
27
+ r"""
28
+ Find a root of a vector function.
29
+
30
+ Parameters
31
+ ----------
32
+ fun : callable
33
+ A vector function to find a root of.
34
+ x0 : ndarray
35
+ Initial guess.
36
+ args : tuple, optional
37
+ Extra arguments passed to the objective function and its Jacobian.
38
+ method : str, optional
39
+ Type of solver. Should be one of
40
+
41
+ - 'hybr' :ref:`(see here) <optimize.root-hybr>`
42
+ - 'lm' :ref:`(see here) <optimize.root-lm>`
43
+ - 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
44
+ - 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
45
+ - 'anderson' :ref:`(see here) <optimize.root-anderson>`
46
+ - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
47
+ - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
48
+ - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
49
+ - 'krylov' :ref:`(see here) <optimize.root-krylov>`
50
+ - 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
51
+
52
+ jac : bool or callable, optional
53
+ If `jac` is a Boolean and is True, `fun` is assumed to return the
54
+ value of Jacobian along with the objective function. If False, the
55
+ Jacobian will be estimated numerically.
56
+ `jac` can also be a callable returning the Jacobian of `fun`. In
57
+ this case, it must accept the same arguments as `fun`.
58
+ tol : float, optional
59
+ Tolerance for termination. For detailed control, use solver-specific
60
+ options.
61
+ callback : function, optional
62
+ Optional callback function. It is called on every iteration as
63
+ ``callback(x, f)`` where `x` is the current solution and `f`
64
+ the corresponding residual. For all methods but 'hybr' and 'lm'.
65
+ options : dict, optional
66
+ A dictionary of solver options. E.g., `xtol` or `maxiter`, see
67
+ :obj:`show_options()` for details.
68
+
69
+ Returns
70
+ -------
71
+ sol : OptimizeResult
72
+ The solution represented as a ``OptimizeResult`` object.
73
+ Important attributes are: ``x`` the solution array, ``success`` a
74
+ Boolean flag indicating if the algorithm exited successfully and
75
+ ``message`` which describes the cause of the termination. See
76
+ `OptimizeResult` for a description of other attributes.
77
+
78
+ See also
79
+ --------
80
+ show_options : Additional options accepted by the solvers
81
+
82
+ Notes
83
+ -----
84
+ This section describes the available solvers that can be selected by the
85
+ 'method' parameter. The default method is *hybr*.
86
+
87
+ Method *hybr* uses a modification of the Powell hybrid method as
88
+ implemented in MINPACK [1]_.
89
+
90
+ Method *lm* solves the system of nonlinear equations in a least squares
91
+ sense using a modification of the Levenberg-Marquardt algorithm as
92
+ implemented in MINPACK [1]_.
93
+
94
+ Method *df-sane* is a derivative-free spectral method. [3]_
95
+
96
+ Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
97
+ *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
98
+ with backtracking or full line searches [2]_. Each method corresponds
99
+ to a particular Jacobian approximations.
100
+
101
+ - Method *broyden1* uses Broyden's first Jacobian approximation, it is
102
+ known as Broyden's good method.
103
+ - Method *broyden2* uses Broyden's second Jacobian approximation, it
104
+ is known as Broyden's bad method.
105
+ - Method *anderson* uses (extended) Anderson mixing.
106
+ - Method *Krylov* uses Krylov approximation for inverse Jacobian. It
107
+ is suitable for large-scale problem.
108
+ - Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
109
+ - Method *linearmixing* uses a scalar Jacobian approximation.
110
+ - Method *excitingmixing* uses a tuned diagonal Jacobian
111
+ approximation.
112
+
113
+ .. warning::
114
+
115
+ The algorithms implemented for methods *diagbroyden*,
116
+ *linearmixing* and *excitingmixing* may be useful for specific
117
+ problems, but whether they will work may depend strongly on the
118
+ problem.
119
+
120
+ .. versionadded:: 0.11.0
121
+
122
+ References
123
+ ----------
124
+ .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
125
+ 1980. User Guide for MINPACK-1.
126
+ .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
127
+ Equations. Society for Industrial and Applied Mathematics.
128
+ <https://archive.siam.org/books/kelley/fr16/>
129
+ .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
130
+
131
+ Examples
132
+ --------
133
+ The following functions define a system of nonlinear equations and its
134
+ jacobian.
135
+
136
+ >>> import numpy as np
137
+ >>> def fun(x):
138
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
139
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
140
+
141
+ >>> def jac(x):
142
+ ... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
143
+ ... -1.5 * (x[0] - x[1])**2],
144
+ ... [-1.5 * (x[1] - x[0])**2,
145
+ ... 1 + 1.5 * (x[1] - x[0])**2]])
146
+
147
+ A solution can be obtained as follows.
148
+
149
+ >>> from scipy import optimize
150
+ >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
151
+ >>> sol.x
152
+ array([ 0.8411639, 0.1588361])
153
+
154
+ **Large problem**
155
+
156
+ Suppose that we needed to solve the following integrodifferential
157
+ equation on the square :math:`[0,1]\times[0,1]`:
158
+
159
+ .. math::
160
+
161
+ \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
162
+
163
+ with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
164
+ the square.
165
+
166
+ The solution can be found using the ``method='krylov'`` solver:
167
+
168
+ >>> from scipy import optimize
169
+ >>> # parameters
170
+ >>> nx, ny = 75, 75
171
+ >>> hx, hy = 1./(nx-1), 1./(ny-1)
172
+
173
+ >>> P_left, P_right = 0, 0
174
+ >>> P_top, P_bottom = 1, 0
175
+
176
+ >>> def residual(P):
177
+ ... d2x = np.zeros_like(P)
178
+ ... d2y = np.zeros_like(P)
179
+ ...
180
+ ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
181
+ ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
182
+ ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
183
+ ...
184
+ ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
185
+ ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
186
+ ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
187
+ ...
188
+ ... return d2x + d2y - 10*np.cosh(P).mean()**2
189
+
190
+ >>> guess = np.zeros((nx, ny), float)
191
+ >>> sol = optimize.root(residual, guess, method='krylov')
192
+ >>> print('Residual: %g' % abs(residual(sol.x)).max())
193
+ Residual: 5.7972e-06 # may vary
194
+
195
+ >>> import matplotlib.pyplot as plt
196
+ >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
197
+ >>> plt.pcolormesh(x, y, sol.x, shading='gouraud')
198
+ >>> plt.colorbar()
199
+ >>> plt.show()
200
+
201
+ """
202
+ if not isinstance(args, tuple):
203
+ args = (args,)
204
+
205
+ meth = method.lower()
206
+ if options is None:
207
+ options = {}
208
+
209
+ if callback is not None and meth in ('hybr', 'lm'):
210
+ warn('Method %s does not accept callback.' % method,
211
+ RuntimeWarning, stacklevel=2)
212
+
213
+ # fun also returns the Jacobian
214
+ if not callable(jac) and meth in ('hybr', 'lm'):
215
+ if bool(jac):
216
+ fun = MemoizeJac(fun)
217
+ jac = fun.derivative
218
+ else:
219
+ jac = None
220
+
221
+ # set default tolerances
222
+ if tol is not None:
223
+ options = dict(options)
224
+ if meth in ('hybr', 'lm'):
225
+ options.setdefault('xtol', tol)
226
+ elif meth in ('df-sane',):
227
+ options.setdefault('ftol', tol)
228
+ elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
229
+ 'diagbroyden', 'excitingmixing', 'krylov'):
230
+ options.setdefault('xtol', tol)
231
+ options.setdefault('xatol', np.inf)
232
+ options.setdefault('ftol', np.inf)
233
+ options.setdefault('fatol', np.inf)
234
+
235
+ if meth == 'hybr':
236
+ sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
237
+ elif meth == 'lm':
238
+ sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
239
+ elif meth == 'df-sane':
240
+ _warn_jac_unused(jac, method)
241
+ sol = _root_df_sane(fun, x0, args=args, callback=callback,
242
+ **options)
243
+ elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
244
+ 'diagbroyden', 'excitingmixing', 'krylov'):
245
+ _warn_jac_unused(jac, method)
246
+ sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
247
+ _method=meth, _callback=callback,
248
+ **options)
249
+ else:
250
+ raise ValueError('Unknown solver %s' % method)
251
+
252
+ return sol
253
+
254
+
255
+ def _warn_jac_unused(jac, method):
256
+ if jac is not None:
257
+ warn(f'Method {method} does not use the jacobian (jac).',
258
+ RuntimeWarning, stacklevel=2)
259
+
260
+
261
+ def _root_leastsq(fun, x0, args=(), jac=None,
262
+ col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
263
+ gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
264
+ **unknown_options):
265
+ """
266
+ Solve for least squares with Levenberg-Marquardt
267
+
268
+ Options
269
+ -------
270
+ col_deriv : bool
271
+ non-zero to specify that the Jacobian function computes derivatives
272
+ down the columns (faster, because there is no transpose operation).
273
+ ftol : float
274
+ Relative error desired in the sum of squares.
275
+ xtol : float
276
+ Relative error desired in the approximate solution.
277
+ gtol : float
278
+ Orthogonality desired between the function vector and the columns
279
+ of the Jacobian.
280
+ maxiter : int
281
+ The maximum number of calls to the function. If zero, then
282
+ 100*(N+1) is the maximum where N is the number of elements in x0.
283
+ epsfcn : float
284
+ A suitable step length for the forward-difference approximation of
285
+ the Jacobian (for Dfun=None). If epsfcn is less than the machine
286
+ precision, it is assumed that the relative errors in the functions
287
+ are of the order of the machine precision.
288
+ factor : float
289
+ A parameter determining the initial step bound
290
+ (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
291
+ diag : sequence
292
+ N positive entries that serve as a scale factors for the variables.
293
+ """
294
+
295
+ _check_unknown_options(unknown_options)
296
+ x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac,
297
+ full_output=True,
298
+ col_deriv=col_deriv, xtol=xtol,
299
+ ftol=ftol, gtol=gtol,
300
+ maxfev=maxiter, epsfcn=eps,
301
+ factor=factor, diag=diag)
302
+ sol = OptimizeResult(x=x, message=msg, status=ier,
303
+ success=ier in (1, 2, 3, 4), cov_x=cov_x,
304
+ fun=info.pop('fvec'), method="lm")
305
+ sol.update(info)
306
+ return sol
307
+
308
+
309
+ def _root_nonlin_solve(fun, x0, args=(), jac=None,
310
+ _callback=None, _method=None,
311
+ nit=None, disp=False, maxiter=None,
312
+ ftol=None, fatol=None, xtol=None, xatol=None,
313
+ tol_norm=None, line_search='armijo', jac_options=None,
314
+ **unknown_options):
315
+ _check_unknown_options(unknown_options)
316
+
317
+ f_tol = fatol
318
+ f_rtol = ftol
319
+ x_tol = xatol
320
+ x_rtol = xtol
321
+ verbose = disp
322
+ if jac_options is None:
323
+ jac_options = dict()
324
+
325
+ jacobian = {'broyden1': nonlin.BroydenFirst,
326
+ 'broyden2': nonlin.BroydenSecond,
327
+ 'anderson': nonlin.Anderson,
328
+ 'linearmixing': nonlin.LinearMixing,
329
+ 'diagbroyden': nonlin.DiagBroyden,
330
+ 'excitingmixing': nonlin.ExcitingMixing,
331
+ 'krylov': nonlin.KrylovJacobian
332
+ }[_method]
333
+
334
+ if args:
335
+ if jac is True:
336
+ def f(x):
337
+ return fun(x, *args)[0]
338
+ else:
339
+ def f(x):
340
+ return fun(x, *args)
341
+ else:
342
+ f = fun
343
+
344
+ x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
345
+ iter=nit, verbose=verbose,
346
+ maxiter=maxiter, f_tol=f_tol,
347
+ f_rtol=f_rtol, x_tol=x_tol,
348
+ x_rtol=x_rtol, tol_norm=tol_norm,
349
+ line_search=line_search,
350
+ callback=_callback, full_output=True,
351
+ raise_exception=False)
352
+ sol = OptimizeResult(x=x, method=_method)
353
+ sol.update(info)
354
+ return sol
355
+
356
+ def _root_broyden1_doc():
357
+ """
358
+ Options
359
+ -------
360
+ nit : int, optional
361
+ Number of iterations to make. If omitted (default), make as many
362
+ as required to meet tolerances.
363
+ disp : bool, optional
364
+ Print status to stdout on every iteration.
365
+ maxiter : int, optional
366
+ Maximum number of iterations to make.
367
+ ftol : float, optional
368
+ Relative tolerance for the residual. If omitted, not used.
369
+ fatol : float, optional
370
+ Absolute tolerance (in max-norm) for the residual.
371
+ If omitted, default is 6e-6.
372
+ xtol : float, optional
373
+ Relative minimum step size. If omitted, not used.
374
+ xatol : float, optional
375
+ Absolute minimum step size, as determined from the Jacobian
376
+ approximation. If the step size is smaller than this, optimization
377
+ is terminated as successful. If omitted, not used.
378
+ tol_norm : function(vector) -> scalar, optional
379
+ Norm to use in convergence check. Default is the maximum norm.
380
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
381
+ Which type of a line search to use to determine the step size in
382
+ the direction given by the Jacobian approximation. Defaults to
383
+ 'armijo'.
384
+ jac_options : dict, optional
385
+ Options for the respective Jacobian approximation.
386
+ alpha : float, optional
387
+ Initial guess for the Jacobian is (-1/alpha).
388
+ reduction_method : str or tuple, optional
389
+ Method used in ensuring that the rank of the Broyden
390
+ matrix stays low. Can either be a string giving the
391
+ name of the method, or a tuple of the form ``(method,
392
+ param1, param2, ...)`` that gives the name of the
393
+ method and values for additional parameters.
394
+
395
+ Methods available:
396
+
397
+ - ``restart``
398
+ Drop all matrix columns. Has no
399
+ extra parameters.
400
+ - ``simple``
401
+ Drop oldest matrix column. Has no
402
+ extra parameters.
403
+ - ``svd``
404
+ Keep only the most significant SVD
405
+ components.
406
+
407
+ Extra parameters:
408
+
409
+ - ``to_retain``
410
+ Number of SVD components to
411
+ retain when rank reduction is done.
412
+ Default is ``max_rank - 2``.
413
+ max_rank : int, optional
414
+ Maximum rank for the Broyden matrix.
415
+ Default is infinity (i.e., no rank reduction).
416
+
417
+ Examples
418
+ --------
419
+ >>> def func(x):
420
+ ... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
421
+ ...
422
+ >>> from scipy import optimize
423
+ >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14)
424
+ >>> x = res.x
425
+ >>> x
426
+ array([4.04674914, 3.91158389, 2.71791677, 1.61756251])
427
+ >>> np.cos(x) + x[::-1]
428
+ array([1., 2., 3., 4.])
429
+
430
+ """
431
+ pass
432
+
433
+ def _root_broyden2_doc():
434
+ """
435
+ Options
436
+ -------
437
+ nit : int, optional
438
+ Number of iterations to make. If omitted (default), make as many
439
+ as required to meet tolerances.
440
+ disp : bool, optional
441
+ Print status to stdout on every iteration.
442
+ maxiter : int, optional
443
+ Maximum number of iterations to make.
444
+ ftol : float, optional
445
+ Relative tolerance for the residual. If omitted, not used.
446
+ fatol : float, optional
447
+ Absolute tolerance (in max-norm) for the residual.
448
+ If omitted, default is 6e-6.
449
+ xtol : float, optional
450
+ Relative minimum step size. If omitted, not used.
451
+ xatol : float, optional
452
+ Absolute minimum step size, as determined from the Jacobian
453
+ approximation. If the step size is smaller than this, optimization
454
+ is terminated as successful. If omitted, not used.
455
+ tol_norm : function(vector) -> scalar, optional
456
+ Norm to use in convergence check. Default is the maximum norm.
457
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
458
+ Which type of a line search to use to determine the step size in
459
+ the direction given by the Jacobian approximation. Defaults to
460
+ 'armijo'.
461
+ jac_options : dict, optional
462
+ Options for the respective Jacobian approximation.
463
+
464
+ alpha : float, optional
465
+ Initial guess for the Jacobian is (-1/alpha).
466
+ reduction_method : str or tuple, optional
467
+ Method used in ensuring that the rank of the Broyden
468
+ matrix stays low. Can either be a string giving the
469
+ name of the method, or a tuple of the form ``(method,
470
+ param1, param2, ...)`` that gives the name of the
471
+ method and values for additional parameters.
472
+
473
+ Methods available:
474
+
475
+ - ``restart``
476
+ Drop all matrix columns. Has no
477
+ extra parameters.
478
+ - ``simple``
479
+ Drop oldest matrix column. Has no
480
+ extra parameters.
481
+ - ``svd``
482
+ Keep only the most significant SVD
483
+ components.
484
+
485
+ Extra parameters:
486
+
487
+ - ``to_retain``
488
+ Number of SVD components to
489
+ retain when rank reduction is done.
490
+ Default is ``max_rank - 2``.
491
+ max_rank : int, optional
492
+ Maximum rank for the Broyden matrix.
493
+ Default is infinity (i.e., no rank reduction).
494
+ """
495
+ pass
496
+
497
+ def _root_anderson_doc():
498
+ """
499
+ Options
500
+ -------
501
+ nit : int, optional
502
+ Number of iterations to make. If omitted (default), make as many
503
+ as required to meet tolerances.
504
+ disp : bool, optional
505
+ Print status to stdout on every iteration.
506
+ maxiter : int, optional
507
+ Maximum number of iterations to make.
508
+ ftol : float, optional
509
+ Relative tolerance for the residual. If omitted, not used.
510
+ fatol : float, optional
511
+ Absolute tolerance (in max-norm) for the residual.
512
+ If omitted, default is 6e-6.
513
+ xtol : float, optional
514
+ Relative minimum step size. If omitted, not used.
515
+ xatol : float, optional
516
+ Absolute minimum step size, as determined from the Jacobian
517
+ approximation. If the step size is smaller than this, optimization
518
+ is terminated as successful. If omitted, not used.
519
+ tol_norm : function(vector) -> scalar, optional
520
+ Norm to use in convergence check. Default is the maximum norm.
521
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
522
+ Which type of a line search to use to determine the step size in
523
+ the direction given by the Jacobian approximation. Defaults to
524
+ 'armijo'.
525
+ jac_options : dict, optional
526
+ Options for the respective Jacobian approximation.
527
+
528
+ alpha : float, optional
529
+ Initial guess for the Jacobian is (-1/alpha).
530
+ M : float, optional
531
+ Number of previous vectors to retain. Defaults to 5.
532
+ w0 : float, optional
533
+ Regularization parameter for numerical stability.
534
+ Compared to unity, good values of the order of 0.01.
535
+ """
536
+ pass
537
+
538
+ def _root_linearmixing_doc():
539
+ """
540
+ Options
541
+ -------
542
+ nit : int, optional
543
+ Number of iterations to make. If omitted (default), make as many
544
+ as required to meet tolerances.
545
+ disp : bool, optional
546
+ Print status to stdout on every iteration.
547
+ maxiter : int, optional
548
+ Maximum number of iterations to make.
549
+ ftol : float, optional
550
+ Relative tolerance for the residual. If omitted, not used.
551
+ fatol : float, optional
552
+ Absolute tolerance (in max-norm) for the residual.
553
+ If omitted, default is 6e-6.
554
+ xtol : float, optional
555
+ Relative minimum step size. If omitted, not used.
556
+ xatol : float, optional
557
+ Absolute minimum step size, as determined from the Jacobian
558
+ approximation. If the step size is smaller than this, optimization
559
+ is terminated as successful. If omitted, not used.
560
+ tol_norm : function(vector) -> scalar, optional
561
+ Norm to use in convergence check. Default is the maximum norm.
562
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
563
+ Which type of a line search to use to determine the step size in
564
+ the direction given by the Jacobian approximation. Defaults to
565
+ 'armijo'.
566
+ jac_options : dict, optional
567
+ Options for the respective Jacobian approximation.
568
+
569
+ alpha : float, optional
570
+ initial guess for the jacobian is (-1/alpha).
571
+ """
572
+ pass
573
+
574
+ def _root_diagbroyden_doc():
575
+ """
576
+ Options
577
+ -------
578
+ nit : int, optional
579
+ Number of iterations to make. If omitted (default), make as many
580
+ as required to meet tolerances.
581
+ disp : bool, optional
582
+ Print status to stdout on every iteration.
583
+ maxiter : int, optional
584
+ Maximum number of iterations to make.
585
+ ftol : float, optional
586
+ Relative tolerance for the residual. If omitted, not used.
587
+ fatol : float, optional
588
+ Absolute tolerance (in max-norm) for the residual.
589
+ If omitted, default is 6e-6.
590
+ xtol : float, optional
591
+ Relative minimum step size. If omitted, not used.
592
+ xatol : float, optional
593
+ Absolute minimum step size, as determined from the Jacobian
594
+ approximation. If the step size is smaller than this, optimization
595
+ is terminated as successful. If omitted, not used.
596
+ tol_norm : function(vector) -> scalar, optional
597
+ Norm to use in convergence check. Default is the maximum norm.
598
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
599
+ Which type of a line search to use to determine the step size in
600
+ the direction given by the Jacobian approximation. Defaults to
601
+ 'armijo'.
602
+ jac_options : dict, optional
603
+ Options for the respective Jacobian approximation.
604
+
605
+ alpha : float, optional
606
+ initial guess for the jacobian is (-1/alpha).
607
+ """
608
+ pass
609
+
610
+ def _root_excitingmixing_doc():
611
+ """
612
+ Options
613
+ -------
614
+ nit : int, optional
615
+ Number of iterations to make. If omitted (default), make as many
616
+ as required to meet tolerances.
617
+ disp : bool, optional
618
+ Print status to stdout on every iteration.
619
+ maxiter : int, optional
620
+ Maximum number of iterations to make.
621
+ ftol : float, optional
622
+ Relative tolerance for the residual. If omitted, not used.
623
+ fatol : float, optional
624
+ Absolute tolerance (in max-norm) for the residual.
625
+ If omitted, default is 6e-6.
626
+ xtol : float, optional
627
+ Relative minimum step size. If omitted, not used.
628
+ xatol : float, optional
629
+ Absolute minimum step size, as determined from the Jacobian
630
+ approximation. If the step size is smaller than this, optimization
631
+ is terminated as successful. If omitted, not used.
632
+ tol_norm : function(vector) -> scalar, optional
633
+ Norm to use in convergence check. Default is the maximum norm.
634
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
635
+ Which type of a line search to use to determine the step size in
636
+ the direction given by the Jacobian approximation. Defaults to
637
+ 'armijo'.
638
+ jac_options : dict, optional
639
+ Options for the respective Jacobian approximation.
640
+
641
+ alpha : float, optional
642
+ Initial Jacobian approximation is (-1/alpha).
643
+ alphamax : float, optional
644
+ The entries of the diagonal Jacobian are kept in the range
645
+ ``[alpha, alphamax]``.
646
+ """
647
+ pass
648
+
649
+ def _root_krylov_doc():
650
+ """
651
+ Options
652
+ -------
653
+ nit : int, optional
654
+ Number of iterations to make. If omitted (default), make as many
655
+ as required to meet tolerances.
656
+ disp : bool, optional
657
+ Print status to stdout on every iteration.
658
+ maxiter : int, optional
659
+ Maximum number of iterations to make.
660
+ ftol : float, optional
661
+ Relative tolerance for the residual. If omitted, not used.
662
+ fatol : float, optional
663
+ Absolute tolerance (in max-norm) for the residual.
664
+ If omitted, default is 6e-6.
665
+ xtol : float, optional
666
+ Relative minimum step size. If omitted, not used.
667
+ xatol : float, optional
668
+ Absolute minimum step size, as determined from the Jacobian
669
+ approximation. If the step size is smaller than this, optimization
670
+ is terminated as successful. If omitted, not used.
671
+ tol_norm : function(vector) -> scalar, optional
672
+ Norm to use in convergence check. Default is the maximum norm.
673
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
674
+ Which type of a line search to use to determine the step size in
675
+ the direction given by the Jacobian approximation. Defaults to
676
+ 'armijo'.
677
+ jac_options : dict, optional
678
+ Options for the respective Jacobian approximation.
679
+
680
+ rdiff : float, optional
681
+ Relative step size to use in numerical differentiation.
682
+ method : str or callable, optional
683
+ Krylov method to use to approximate the Jacobian. Can be a string,
684
+ or a function implementing the same interface as the iterative
685
+ solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
686
+ ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
687
+ ``'tfqmr'``.
688
+
689
+ The default is `scipy.sparse.linalg.lgmres`.
690
+ inner_M : LinearOperator or InverseJacobian
691
+ Preconditioner for the inner Krylov iteration.
692
+ Note that you can use also inverse Jacobians as (adaptive)
693
+ preconditioners. For example,
694
+
695
+ >>> jac = BroydenFirst()
696
+ >>> kjac = KrylovJacobian(inner_M=jac.inverse).
697
+
698
+ If the preconditioner has a method named 'update', it will
699
+ be called as ``update(x, f)`` after each nonlinear step,
700
+ with ``x`` giving the current point, and ``f`` the current
701
+ function value.
702
+ inner_tol, inner_maxiter, ...
703
+ Parameters to pass on to the "inner" Krylov solver.
704
+ See `scipy.sparse.linalg.gmres` for details.
705
+ outer_k : int, optional
706
+ Size of the subspace kept across LGMRES nonlinear
707
+ iterations.
708
+
709
+ See `scipy.sparse.linalg.lgmres` for details.
710
+ """
711
+ pass
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified interfaces to root finding algorithms for real or complex
3
+ scalar functions.
4
+
5
+ Functions
6
+ ---------
7
+ - root : find a root of a scalar function.
8
+ """
9
+ import numpy as np
10
+
11
+ from . import _zeros_py as optzeros
12
+ from ._numdiff import approx_derivative
13
+
14
+ __all__ = ['root_scalar']
15
+
16
+ ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748',
17
+ 'newton', 'secant', 'halley']
18
+
19
+
20
+ class MemoizeDer:
21
+ """Decorator that caches the value and derivative(s) of function each
22
+ time it is called.
23
+
24
+ This is a simplistic memoizer that calls and caches a single value
25
+ of `f(x, *args)`.
26
+ It assumes that `args` does not change between invocations.
27
+ It supports the use case of a root-finder where `args` is fixed,
28
+ `x` changes, and only rarely, if at all, does x assume the same value
29
+ more than once."""
30
+ def __init__(self, fun):
31
+ self.fun = fun
32
+ self.vals = None
33
+ self.x = None
34
+ self.n_calls = 0
35
+
36
+ def __call__(self, x, *args):
37
+ r"""Calculate f or use cached value if available"""
38
+ # Derivative may be requested before the function itself, always check
39
+ if self.vals is None or x != self.x:
40
+ fg = self.fun(x, *args)
41
+ self.x = x
42
+ self.n_calls += 1
43
+ self.vals = fg[:]
44
+ return self.vals[0]
45
+
46
+ def fprime(self, x, *args):
47
+ r"""Calculate f' or use a cached value if available"""
48
+ if self.vals is None or x != self.x:
49
+ self(x, *args)
50
+ return self.vals[1]
51
+
52
+ def fprime2(self, x, *args):
53
+ r"""Calculate f'' or use a cached value if available"""
54
+ if self.vals is None or x != self.x:
55
+ self(x, *args)
56
+ return self.vals[2]
57
+
58
+ def ncalls(self):
59
+ return self.n_calls
60
+
61
+
62
+ def root_scalar(f, args=(), method=None, bracket=None,
63
+ fprime=None, fprime2=None,
64
+ x0=None, x1=None,
65
+ xtol=None, rtol=None, maxiter=None,
66
+ options=None):
67
+ """
68
+ Find a root of a scalar function.
69
+
70
+ Parameters
71
+ ----------
72
+ f : callable
73
+ A function to find a root of.
74
+ args : tuple, optional
75
+ Extra arguments passed to the objective function and its derivative(s).
76
+ method : str, optional
77
+ Type of solver. Should be one of
78
+
79
+ - 'bisect' :ref:`(see here) <optimize.root_scalar-bisect>`
80
+ - 'brentq' :ref:`(see here) <optimize.root_scalar-brentq>`
81
+ - 'brenth' :ref:`(see here) <optimize.root_scalar-brenth>`
82
+ - 'ridder' :ref:`(see here) <optimize.root_scalar-ridder>`
83
+ - 'toms748' :ref:`(see here) <optimize.root_scalar-toms748>`
84
+ - 'newton' :ref:`(see here) <optimize.root_scalar-newton>`
85
+ - 'secant' :ref:`(see here) <optimize.root_scalar-secant>`
86
+ - 'halley' :ref:`(see here) <optimize.root_scalar-halley>`
87
+
88
+ bracket: A sequence of 2 floats, optional
89
+ An interval bracketing a root. `f(x, *args)` must have different
90
+ signs at the two endpoints.
91
+ x0 : float, optional
92
+ Initial guess.
93
+ x1 : float, optional
94
+ A second guess.
95
+ fprime : bool or callable, optional
96
+ If `fprime` is a boolean and is True, `f` is assumed to return the
97
+ value of the objective function and of the derivative.
98
+ `fprime` can also be a callable returning the derivative of `f`. In
99
+ this case, it must accept the same arguments as `f`.
100
+ fprime2 : bool or callable, optional
101
+ If `fprime2` is a boolean and is True, `f` is assumed to return the
102
+ value of the objective function and of the
103
+ first and second derivatives.
104
+ `fprime2` can also be a callable returning the second derivative of `f`.
105
+ In this case, it must accept the same arguments as `f`.
106
+ xtol : float, optional
107
+ Tolerance (absolute) for termination.
108
+ rtol : float, optional
109
+ Tolerance (relative) for termination.
110
+ maxiter : int, optional
111
+ Maximum number of iterations.
112
+ options : dict, optional
113
+ A dictionary of solver options. E.g., ``k``, see
114
+ :obj:`show_options()` for details.
115
+
116
+ Returns
117
+ -------
118
+ sol : RootResults
119
+ The solution represented as a ``RootResults`` object.
120
+ Important attributes are: ``root`` the solution , ``converged`` a
121
+ boolean flag indicating if the algorithm exited successfully and
122
+ ``flag`` which describes the cause of the termination. See
123
+ `RootResults` for a description of other attributes.
124
+
125
+ See also
126
+ --------
127
+ show_options : Additional options accepted by the solvers
128
+ root : Find a root of a vector function.
129
+
130
+ Notes
131
+ -----
132
+ This section describes the available solvers that can be selected by the
133
+ 'method' parameter.
134
+
135
+ The default is to use the best method available for the situation
136
+ presented.
137
+ If a bracket is provided, it may use one of the bracketing methods.
138
+ If a derivative and an initial value are specified, it may
139
+ select one of the derivative-based methods.
140
+ If no method is judged applicable, it will raise an Exception.
141
+
142
+ Arguments for each method are as follows (x=required, o=optional).
143
+
144
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
145
+ | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options |
146
+ +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+
147
+ | :ref:`bisect <optimize.root_scalar-bisect>` | x | o | x | | | | | o | o | o | o |
148
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
149
+ | :ref:`brentq <optimize.root_scalar-brentq>` | x | o | x | | | | | o | o | o | o |
150
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
151
+ | :ref:`brenth <optimize.root_scalar-brenth>` | x | o | x | | | | | o | o | o | o |
152
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
153
+ | :ref:`ridder <optimize.root_scalar-ridder>` | x | o | x | | | | | o | o | o | o |
154
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
155
+ | :ref:`toms748 <optimize.root_scalar-toms748>` | x | o | x | | | | | o | o | o | o |
156
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
157
+ | :ref:`secant <optimize.root_scalar-secant>` | x | o | | x | o | | | o | o | o | o |
158
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
159
+ | :ref:`newton <optimize.root_scalar-newton>` | x | o | | x | | o | | o | o | o | o |
160
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
161
+ | :ref:`halley <optimize.root_scalar-halley>` | x | o | | x | | x | x | o | o | o | o |
162
+ +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
163
+
164
+ Examples
165
+ --------
166
+
167
+ Find the root of a simple cubic
168
+
169
+ >>> from scipy import optimize
170
+ >>> def f(x):
171
+ ... return (x**3 - 1) # only one real root at x = 1
172
+
173
+ >>> def fprime(x):
174
+ ... return 3*x**2
175
+
176
+ The `brentq` method takes as input a bracket
177
+
178
+ >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq')
179
+ >>> sol.root, sol.iterations, sol.function_calls
180
+ (1.0, 10, 11)
181
+
182
+ The `newton` method takes as input a single point and uses the
183
+ derivative(s).
184
+
185
+ >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton')
186
+ >>> sol.root, sol.iterations, sol.function_calls
187
+ (1.0, 11, 22)
188
+
189
+ The function can provide the value and derivative(s) in a single call.
190
+
191
+ >>> def f_p_pp(x):
192
+ ... return (x**3 - 1), 3*x**2, 6*x
193
+
194
+ >>> sol = optimize.root_scalar(
195
+ ... f_p_pp, x0=0.2, fprime=True, method='newton'
196
+ ... )
197
+ >>> sol.root, sol.iterations, sol.function_calls
198
+ (1.0, 11, 11)
199
+
200
+ >>> sol = optimize.root_scalar(
201
+ ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley'
202
+ ... )
203
+ >>> sol.root, sol.iterations, sol.function_calls
204
+ (1.0, 7, 8)
205
+
206
+
207
+ """ # noqa: E501
208
+ if not isinstance(args, tuple):
209
+ args = (args,)
210
+
211
+ if options is None:
212
+ options = {}
213
+
214
+ # fun also returns the derivative(s)
215
+ is_memoized = False
216
+ if fprime2 is not None and not callable(fprime2):
217
+ if bool(fprime2):
218
+ f = MemoizeDer(f)
219
+ is_memoized = True
220
+ fprime2 = f.fprime2
221
+ fprime = f.fprime
222
+ else:
223
+ fprime2 = None
224
+ if fprime is not None and not callable(fprime):
225
+ if bool(fprime):
226
+ f = MemoizeDer(f)
227
+ is_memoized = True
228
+ fprime = f.fprime
229
+ else:
230
+ fprime = None
231
+
232
+ # respect solver-specific default tolerances - only pass in if actually set
233
+ kwargs = {}
234
+ for k in ['xtol', 'rtol', 'maxiter']:
235
+ v = locals().get(k)
236
+ if v is not None:
237
+ kwargs[k] = v
238
+
239
+ # Set any solver-specific options
240
+ if options:
241
+ kwargs.update(options)
242
+ # Always request full_output from the underlying method as _root_scalar
243
+ # always returns a RootResults object
244
+ kwargs.update(full_output=True, disp=False)
245
+
246
+ # Pick a method if not specified.
247
+ # Use the "best" method available for the situation.
248
+ if not method:
249
+ if bracket:
250
+ method = 'brentq'
251
+ elif x0 is not None:
252
+ if fprime:
253
+ if fprime2:
254
+ method = 'halley'
255
+ else:
256
+ method = 'newton'
257
+ elif x1 is not None:
258
+ method = 'secant'
259
+ else:
260
+ method = 'newton'
261
+ if not method:
262
+ raise ValueError('Unable to select a solver as neither bracket '
263
+ 'nor starting point provided.')
264
+
265
+ meth = method.lower()
266
+ map2underlying = {'halley': 'newton', 'secant': 'newton'}
267
+
268
+ try:
269
+ methodc = getattr(optzeros, map2underlying.get(meth, meth))
270
+ except AttributeError as e:
271
+ raise ValueError('Unknown solver %s' % meth) from e
272
+
273
+ if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']:
274
+ if not isinstance(bracket, (list, tuple, np.ndarray)):
275
+ raise ValueError('Bracket needed for %s' % method)
276
+
277
+ a, b = bracket[:2]
278
+ try:
279
+ r, sol = methodc(f, a, b, args=args, **kwargs)
280
+ except ValueError as e:
281
+ # gh-17622 fixed some bugs in low-level solvers by raising an error
282
+ # (rather than returning incorrect results) when the callable
283
+ # returns a NaN. It did so by wrapping the callable rather than
284
+ # modifying compiled code, so the iteration count is not available.
285
+ if hasattr(e, "_x"):
286
+ sol = optzeros.RootResults(root=e._x,
287
+ iterations=np.nan,
288
+ function_calls=e._function_calls,
289
+ flag=str(e), method=method)
290
+ else:
291
+ raise
292
+
293
+ elif meth in ['secant']:
294
+ if x0 is None:
295
+ raise ValueError('x0 must not be None for %s' % method)
296
+ if 'xtol' in kwargs:
297
+ kwargs['tol'] = kwargs.pop('xtol')
298
+ r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None,
299
+ x1=x1, **kwargs)
300
+ elif meth in ['newton']:
301
+ if x0 is None:
302
+ raise ValueError('x0 must not be None for %s' % method)
303
+ if not fprime:
304
+ # approximate fprime with finite differences
305
+
306
+ def fprime(x, *args):
307
+ # `root_scalar` doesn't actually seem to support vectorized
308
+ # use of `newton`. In that case, `approx_derivative` will
309
+ # always get scalar input. Nonetheless, it always returns an
310
+ # array, so we extract the element to produce scalar output.
311
+ return approx_derivative(f, x, method='2-point', args=args)[0]
312
+
313
+ if 'xtol' in kwargs:
314
+ kwargs['tol'] = kwargs.pop('xtol')
315
+ r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None,
316
+ **kwargs)
317
+ elif meth in ['halley']:
318
+ if x0 is None:
319
+ raise ValueError('x0 must not be None for %s' % method)
320
+ if not fprime:
321
+ raise ValueError('fprime must be specified for %s' % method)
322
+ if not fprime2:
323
+ raise ValueError('fprime2 must be specified for %s' % method)
324
+ if 'xtol' in kwargs:
325
+ kwargs['tol'] = kwargs.pop('xtol')
326
+ r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs)
327
+ else:
328
+ raise ValueError('Unknown solver %s' % method)
329
+
330
+ if is_memoized:
331
+ # Replace the function_calls count with the memoized count.
332
+ # Avoids double and triple-counting.
333
+ n_calls = f.n_calls
334
+ sol.function_calls = n_calls
335
+
336
+ return sol
337
+
338
+
339
+ def _root_scalar_brentq_doc():
340
+ r"""
341
+ Options
342
+ -------
343
+ args : tuple, optional
344
+ Extra arguments passed to the objective function.
345
+ bracket: A sequence of 2 floats, optional
346
+ An interval bracketing a root. `f(x, *args)` must have different
347
+ signs at the two endpoints.
348
+ xtol : float, optional
349
+ Tolerance (absolute) for termination.
350
+ rtol : float, optional
351
+ Tolerance (relative) for termination.
352
+ maxiter : int, optional
353
+ Maximum number of iterations.
354
+ options: dict, optional
355
+ Specifies any method-specific options not covered above
356
+
357
+ """
358
+ pass
359
+
360
+
361
+ def _root_scalar_brenth_doc():
362
+ r"""
363
+ Options
364
+ -------
365
+ args : tuple, optional
366
+ Extra arguments passed to the objective function.
367
+ bracket: A sequence of 2 floats, optional
368
+ An interval bracketing a root. `f(x, *args)` must have different
369
+ signs at the two endpoints.
370
+ xtol : float, optional
371
+ Tolerance (absolute) for termination.
372
+ rtol : float, optional
373
+ Tolerance (relative) for termination.
374
+ maxiter : int, optional
375
+ Maximum number of iterations.
376
+ options: dict, optional
377
+ Specifies any method-specific options not covered above.
378
+
379
+ """
380
+ pass
381
+
382
+ def _root_scalar_toms748_doc():
383
+ r"""
384
+ Options
385
+ -------
386
+ args : tuple, optional
387
+ Extra arguments passed to the objective function.
388
+ bracket: A sequence of 2 floats, optional
389
+ An interval bracketing a root. `f(x, *args)` must have different
390
+ signs at the two endpoints.
391
+ xtol : float, optional
392
+ Tolerance (absolute) for termination.
393
+ rtol : float, optional
394
+ Tolerance (relative) for termination.
395
+ maxiter : int, optional
396
+ Maximum number of iterations.
397
+ options: dict, optional
398
+ Specifies any method-specific options not covered above.
399
+
400
+ """
401
+ pass
402
+
403
+
404
+ def _root_scalar_secant_doc():
405
+ r"""
406
+ Options
407
+ -------
408
+ args : tuple, optional
409
+ Extra arguments passed to the objective function.
410
+ xtol : float, optional
411
+ Tolerance (absolute) for termination.
412
+ rtol : float, optional
413
+ Tolerance (relative) for termination.
414
+ maxiter : int, optional
415
+ Maximum number of iterations.
416
+ x0 : float, required
417
+ Initial guess.
418
+ x1 : float, required
419
+ A second guess.
420
+ options: dict, optional
421
+ Specifies any method-specific options not covered above.
422
+
423
+ """
424
+ pass
425
+
426
+
427
+ def _root_scalar_newton_doc():
428
+ r"""
429
+ Options
430
+ -------
431
+ args : tuple, optional
432
+ Extra arguments passed to the objective function and its derivative.
433
+ xtol : float, optional
434
+ Tolerance (absolute) for termination.
435
+ rtol : float, optional
436
+ Tolerance (relative) for termination.
437
+ maxiter : int, optional
438
+ Maximum number of iterations.
439
+ x0 : float, required
440
+ Initial guess.
441
+ fprime : bool or callable, optional
442
+ If `fprime` is a boolean and is True, `f` is assumed to return the
443
+ value of derivative along with the objective function.
444
+ `fprime` can also be a callable returning the derivative of `f`. In
445
+ this case, it must accept the same arguments as `f`.
446
+ options: dict, optional
447
+ Specifies any method-specific options not covered above.
448
+
449
+ """
450
+ pass
451
+
452
+
453
+ def _root_scalar_halley_doc():
454
+ r"""
455
+ Options
456
+ -------
457
+ args : tuple, optional
458
+ Extra arguments passed to the objective function and its derivatives.
459
+ xtol : float, optional
460
+ Tolerance (absolute) for termination.
461
+ rtol : float, optional
462
+ Tolerance (relative) for termination.
463
+ maxiter : int, optional
464
+ Maximum number of iterations.
465
+ x0 : float, required
466
+ Initial guess.
467
+ fprime : bool or callable, required
468
+ If `fprime` is a boolean and is True, `f` is assumed to return the
469
+ value of derivative along with the objective function.
470
+ `fprime` can also be a callable returning the derivative of `f`. In
471
+ this case, it must accept the same arguments as `f`.
472
+ fprime2 : bool or callable, required
473
+ If `fprime2` is a boolean and is True, `f` is assumed to return the
474
+ value of 1st and 2nd derivatives along with the objective function.
475
+ `fprime2` can also be a callable returning the 2nd derivative of `f`.
476
+ In this case, it must accept the same arguments as `f`.
477
+ options: dict, optional
478
+ Specifies any method-specific options not covered above.
479
+
480
+ """
481
+ pass
482
+
483
+
484
+ def _root_scalar_ridder_doc():
485
+ r"""
486
+ Options
487
+ -------
488
+ args : tuple, optional
489
+ Extra arguments passed to the objective function.
490
+ bracket: A sequence of 2 floats, optional
491
+ An interval bracketing a root. `f(x, *args)` must have different
492
+ signs at the two endpoints.
493
+ xtol : float, optional
494
+ Tolerance (absolute) for termination.
495
+ rtol : float, optional
496
+ Tolerance (relative) for termination.
497
+ maxiter : int, optional
498
+ Maximum number of iterations.
499
+ options: dict, optional
500
+ Specifies any method-specific options not covered above.
501
+
502
+ """
503
+ pass
504
+
505
+
506
+ def _root_scalar_bisect_doc():
507
+ r"""
508
+ Options
509
+ -------
510
+ args : tuple, optional
511
+ Extra arguments passed to the objective function.
512
+ bracket: A sequence of 2 floats, optional
513
+ An interval bracketing a root. `f(x, *args)` must have different
514
+ signs at the two endpoints.
515
+ xtol : float, optional
516
+ Tolerance (absolute) for termination.
517
+ rtol : float, optional
518
+ Tolerance (relative) for termination.
519
+ maxiter : int, optional
520
+ Maximum number of iterations.
521
+ options: dict, optional
522
+ Specifies any method-specific options not covered above.
523
+
524
+ """
525
+ pass
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo.py ADDED
@@ -0,0 +1,1595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """shgo: The simplicial homology global optimisation algorithm."""
2
+ from collections import namedtuple
3
+ import time
4
+ import logging
5
+ import warnings
6
+ import sys
7
+
8
+ import numpy as np
9
+
10
+ from scipy import spatial
11
+ from scipy.optimize import OptimizeResult, minimize, Bounds
12
+ from scipy.optimize._optimize import MemoizeJac
13
+ from scipy.optimize._constraints import new_bounds_to_old
14
+ from scipy.optimize._minimize import standardize_constraints
15
+ from scipy._lib._util import _FunctionWrapper
16
+
17
+ from scipy.optimize._shgo_lib._complex import Complex
18
+
19
+ __all__ = ['shgo']
20
+
21
+
22
+ def shgo(
23
+ func, bounds, args=(), constraints=None, n=100, iters=1, callback=None,
24
+ minimizer_kwargs=None, options=None, sampling_method='simplicial', *,
25
+ workers=1
26
+ ):
27
+ """
28
+ Finds the global minimum of a function using SHG optimization.
29
+
30
+ SHGO stands for "simplicial homology global optimization".
31
+
32
+ Parameters
33
+ ----------
34
+ func : callable
35
+ The objective function to be minimized. Must be in the form
36
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
37
+ and ``args`` is a tuple of any additional fixed parameters needed to
38
+ completely specify the function.
39
+ bounds : sequence or `Bounds`
40
+ Bounds for variables. There are two ways to specify the bounds:
41
+
42
+ 1. Instance of `Bounds` class.
43
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`.
44
+
45
+ args : tuple, optional
46
+ Any additional fixed parameters needed to completely specify the
47
+ objective function.
48
+ constraints : {Constraint, dict} or List of {Constraint, dict}, optional
49
+ Constraints definition. Only for COBYLA, SLSQP and trust-constr.
50
+ See the tutorial [5]_ for further details on specifying constraints.
51
+
52
+ .. note::
53
+
54
+ Only COBYLA, SLSQP, and trust-constr local minimize methods
55
+ currently support constraint arguments. If the ``constraints``
56
+ sequence used in the local optimization problem is not defined in
57
+ ``minimizer_kwargs`` and a constrained method is used then the
58
+ global ``constraints`` will be used.
59
+ (Defining a ``constraints`` sequence in ``minimizer_kwargs``
60
+ means that ``constraints`` will not be added so if equality
61
+ constraints and so forth need to be added then the inequality
62
+ functions in ``constraints`` need to be added to
63
+ ``minimizer_kwargs`` too).
64
+ COBYLA only supports inequality constraints.
65
+
66
+ .. versionchanged:: 1.11.0
67
+
68
+ ``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`.
69
+
70
+ n : int, optional
71
+ Number of sampling points used in the construction of the simplicial
72
+ complex. For the default ``simplicial`` sampling method 2**dim + 1
73
+ sampling points are generated instead of the default `n=100`. For all
74
+ other specified values `n` sampling points are generated. For
75
+ ``sobol``, ``halton`` and other arbitrary `sampling_methods` `n=100` or
76
+ another specified number of sampling points are generated.
77
+ iters : int, optional
78
+ Number of iterations used in the construction of the simplicial
79
+ complex. Default is 1.
80
+ callback : callable, optional
81
+ Called after each iteration, as ``callback(xk)``, where ``xk`` is the
82
+ current parameter vector.
83
+ minimizer_kwargs : dict, optional
84
+ Extra keyword arguments to be passed to the minimizer
85
+ ``scipy.optimize.minimize`` Some important options could be:
86
+
87
+ * method : str
88
+ The minimization method. If not given, chosen to be one of
89
+ BFGS, L-BFGS-B, SLSQP, depending on whether or not the
90
+ problem has constraints or bounds.
91
+ * args : tuple
92
+ Extra arguments passed to the objective function (``func``) and
93
+ its derivatives (Jacobian, Hessian).
94
+ * options : dict, optional
95
+ Note that by default the tolerance is specified as
96
+ ``{ftol: 1e-12}``
97
+
98
+ options : dict, optional
99
+ A dictionary of solver options. Many of the options specified for the
100
+ global routine are also passed to the scipy.optimize.minimize routine.
101
+ The options that are also passed to the local routine are marked with
102
+ "(L)".
103
+
104
+ Stopping criteria, the algorithm will terminate if any of the specified
105
+ criteria are met. However, the default algorithm does not require any
106
+ to be specified:
107
+
108
+ * maxfev : int (L)
109
+ Maximum number of function evaluations in the feasible domain.
110
+ (Note only methods that support this option will terminate
111
+ the routine at precisely exact specified value. Otherwise the
112
+ criterion will only terminate during a global iteration)
113
+ * f_min
114
+ Specify the minimum objective function value, if it is known.
115
+ * f_tol : float
116
+ Precision goal for the value of f in the stopping
117
+ criterion. Note that the global routine will also
118
+ terminate if a sampling point in the global routine is
119
+ within this tolerance.
120
+ * maxiter : int
121
+ Maximum number of iterations to perform.
122
+ * maxev : int
123
+ Maximum number of sampling evaluations to perform (includes
124
+ searching in infeasible points).
125
+ * maxtime : float
126
+ Maximum processing runtime allowed
127
+ * minhgrd : int
128
+ Minimum homology group rank differential. The homology group of the
129
+ objective function is calculated (approximately) during every
130
+ iteration. The rank of this group has a one-to-one correspondence
131
+ with the number of locally convex subdomains in the objective
132
+ function (after adequate sampling points each of these subdomains
133
+ contain a unique global minimum). If the difference in the hgr is 0
134
+ between iterations for ``maxhgrd`` specified iterations the
135
+ algorithm will terminate.
136
+
137
+ Objective function knowledge:
138
+
139
+ * symmetry : list or bool
140
+ Specify if the objective function contains symmetric variables.
141
+ The search space (and therefore performance) is decreased by up to
142
+ O(n!) times in the fully symmetric case. If `True` is specified
143
+ then all variables will be set symmetric to the first variable.
144
+ Default
145
+ is set to False.
146
+
147
+ E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
148
+
149
+ In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
150
+ x_6 are symmetric to x_4, this can be specified to the solver as:
151
+
152
+ symmetry = [0, # Variable 1
153
+ 0, # symmetric to variable 1
154
+ 0, # symmetric to variable 1
155
+ 3, # Variable 4
156
+ 3, # symmetric to variable 4
157
+ 3, # symmetric to variable 4
158
+ ]
159
+
160
+ * jac : bool or callable, optional
161
+ Jacobian (gradient) of objective function. Only for CG, BFGS,
162
+ Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a
163
+ boolean and is True, ``fun`` is assumed to return the gradient
164
+ along with the objective function. If False, the gradient will be
165
+ estimated numerically. ``jac`` can also be a callable returning the
166
+ gradient of the objective. In this case, it must accept the same
167
+ arguments as ``fun``. (Passed to `scipy.optimize.minimize`
168
+ automatically)
169
+
170
+ * hess, hessp : callable, optional
171
+ Hessian (matrix of second-order derivatives) of objective function
172
+ or Hessian of objective function times an arbitrary vector p.
173
+ Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or
174
+ ``hess`` needs to be given. If ``hess`` is provided, then
175
+ ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is
176
+ provided, then the Hessian product will be approximated using
177
+ finite differences on ``jac``. ``hessp`` must compute the Hessian
178
+ times an arbitrary vector. (Passed to `scipy.optimize.minimize`
179
+ automatically)
180
+
181
+ Algorithm settings:
182
+
183
+ * minimize_every_iter : bool
184
+ If True then promising global sampling points will be passed to a
185
+ local minimization routine every iteration. If True then only the
186
+ final minimizer pool will be run. Defaults to True.
187
+ * local_iter : int
188
+ Only evaluate a few of the best minimizer pool candidates every
189
+ iteration. If False all potential points are passed to the local
190
+ minimization routine.
191
+ * infty_constraints : bool
192
+ If True then any sampling points generated which are outside will
193
+ the feasible domain will be saved and given an objective function
194
+ value of ``inf``. If False then these points will be discarded.
195
+ Using this functionality could lead to higher performance with
196
+ respect to function evaluations before the global minimum is found,
197
+ specifying False will use less memory at the cost of a slight
198
+ decrease in performance. Defaults to True.
199
+
200
+ Feedback:
201
+
202
+ * disp : bool (L)
203
+ Set to True to print convergence messages.
204
+
205
+ sampling_method : str or function, optional
206
+ Current built in sampling method options are ``halton``, ``sobol`` and
207
+ ``simplicial``. The default ``simplicial`` provides
208
+ the theoretical guarantee of convergence to the global minimum in
209
+ finite time. ``halton`` and ``sobol`` method are faster in terms of
210
+ sampling point generation at the cost of the loss of
211
+ guaranteed convergence. It is more appropriate for most "easier"
212
+ problems where the convergence is relatively fast.
213
+ User defined sampling functions must accept two arguments of ``n``
214
+ sampling points of dimension ``dim`` per call and output an array of
215
+ sampling points with shape `n x dim`.
216
+
217
+ workers : int or map-like callable, optional
218
+ Sample and run the local serial minimizations in parallel.
219
+ Supply -1 to use all available CPU cores, or an int to use
220
+ that many Processes (uses `multiprocessing.Pool <multiprocessing>`).
221
+
222
+ Alternatively supply a map-like callable, such as
223
+ `multiprocessing.Pool.map` for parallel evaluation.
224
+ This evaluation is carried out as ``workers(func, iterable)``.
225
+ Requires that `func` be pickleable.
226
+
227
+ .. versionadded:: 1.11.0
228
+
229
+ Returns
230
+ -------
231
+ res : OptimizeResult
232
+ The optimization result represented as a `OptimizeResult` object.
233
+ Important attributes are:
234
+ ``x`` the solution array corresponding to the global minimum,
235
+ ``fun`` the function output at the global solution,
236
+ ``xl`` an ordered list of local minima solutions,
237
+ ``funl`` the function output at the corresponding local solutions,
238
+ ``success`` a Boolean flag indicating if the optimizer exited
239
+ successfully,
240
+ ``message`` which describes the cause of the termination,
241
+ ``nfev`` the total number of objective function evaluations including
242
+ the sampling calls,
243
+ ``nlfev`` the total number of objective function evaluations
244
+ culminating from all local search optimizations,
245
+ ``nit`` number of iterations performed by the global routine.
246
+
247
+ Notes
248
+ -----
249
+ Global optimization using simplicial homology global optimization [1]_.
250
+ Appropriate for solving general purpose NLP and blackbox optimization
251
+ problems to global optimality (low-dimensional problems).
252
+
253
+ In general, the optimization problems are of the form::
254
+
255
+ minimize f(x) subject to
256
+
257
+ g_i(x) >= 0, i = 1,...,m
258
+ h_j(x) = 0, j = 1,...,p
259
+
260
+ where x is a vector of one or more variables. ``f(x)`` is the objective
261
+ function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and
262
+ ``h_j(x)`` are the equality constraints.
263
+
264
+ Optionally, the lower and upper bounds for each element in x can also be
265
+ specified using the `bounds` argument.
266
+
267
+ While most of the theoretical advantages of SHGO are only proven for when
268
+ ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to
269
+ converge to the global optimum for the more general case where ``f(x)`` is
270
+ non-continuous, non-convex and non-smooth, if the default sampling method
271
+ is used [1]_.
272
+
273
+ The local search method may be specified using the ``minimizer_kwargs``
274
+ parameter which is passed on to ``scipy.optimize.minimize``. By default,
275
+ the ``SLSQP`` method is used. In general, it is recommended to use the
276
+ ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints
277
+ are defined for the problem since the other methods do not use constraints.
278
+
279
+ The ``halton`` and ``sobol`` method points are generated using
280
+ `scipy.stats.qmc`. Any other QMC method could be used.
281
+
282
+ References
283
+ ----------
284
+ .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology
285
+ algorithm for lipschitz optimisation", Journal of Global
286
+ Optimization.
287
+ .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with
288
+ better two-dimensional projections", SIAM J. Sci. Comput. 30,
289
+ 2635-2654.
290
+ .. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear
291
+ programming codes", Lecture Notes in Economics and Mathematical
292
+ Systems, 187. Springer-Verlag, New York.
293
+ http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
294
+ .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and
295
+ dynamics from the potential energy landscape",
296
+ Journal of Chemical Physics, 142(13), 2015.
297
+ .. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
298
+
299
+ Examples
300
+ --------
301
+ First consider the problem of minimizing the Rosenbrock function, `rosen`:
302
+
303
+ >>> from scipy.optimize import rosen, shgo
304
+ >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
305
+ >>> result = shgo(rosen, bounds)
306
+ >>> result.x, result.fun
307
+ (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18)
308
+
309
+ Note that bounds determine the dimensionality of the objective
310
+ function and is therefore a required input, however you can specify
311
+ empty bounds using ``None`` or objects like ``np.inf`` which will be
312
+ converted to large float numbers.
313
+
314
+ >>> bounds = [(None, None), ]*4
315
+ >>> result = shgo(rosen, bounds)
316
+ >>> result.x
317
+ array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ])
318
+
319
+ Next, we consider the Eggholder function, a problem with several local
320
+ minima and one global minimum. We will demonstrate the use of arguments and
321
+ the capabilities of `shgo`.
322
+ (https://en.wikipedia.org/wiki/Test_functions_for_optimization)
323
+
324
+ >>> import numpy as np
325
+ >>> def eggholder(x):
326
+ ... return (-(x[1] + 47.0)
327
+ ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
328
+ ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
329
+ ... )
330
+ ...
331
+ >>> bounds = [(-512, 512), (-512, 512)]
332
+
333
+ `shgo` has built-in low discrepancy sampling sequences. First, we will
334
+ input 64 initial sampling points of the *Sobol'* sequence:
335
+
336
+ >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol')
337
+ >>> result.x, result.fun
338
+ (array([512. , 404.23180824]), -959.6406627208397)
339
+
340
+ `shgo` also has a return for any other local minima that was found, these
341
+ can be called using:
342
+
343
+ >>> result.xl
344
+ array([[ 512. , 404.23180824],
345
+ [ 283.0759062 , -487.12565635],
346
+ [-294.66820039, -462.01964031],
347
+ [-105.87688911, 423.15323845],
348
+ [-242.97926 , 274.38030925],
349
+ [-506.25823477, 6.3131022 ],
350
+ [-408.71980731, -156.10116949],
351
+ [ 150.23207937, 301.31376595],
352
+ [ 91.00920901, -391.283763 ],
353
+ [ 202.89662724, -269.38043241],
354
+ [ 361.66623976, -106.96493868],
355
+ [-219.40612786, -244.06020508]])
356
+
357
+ >>> result.funl
358
+ array([-959.64066272, -718.16745962, -704.80659592, -565.99778097,
359
+ -559.78685655, -557.36868733, -507.87385942, -493.9605115 ,
360
+ -426.48799655, -421.15571437, -419.31194957, -410.98477763])
361
+
362
+ These results are useful in applications where there are many global minima
363
+ and the values of other global minima are desired or where the local minima
364
+ can provide insight into the system (for example morphologies
365
+ in physical chemistry [4]_).
366
+
367
+ If we want to find a larger number of local minima, we can increase the
368
+ number of sampling points or the number of iterations. We'll increase the
369
+ number of sampling points to 64 and the number of iterations from the
370
+ default of 1 to 3. Using ``simplicial`` this would have given us
371
+ 64 x 3 = 192 initial sampling points.
372
+
373
+ >>> result_2 = shgo(eggholder,
374
+ ... bounds, n=64, iters=3, sampling_method='sobol')
375
+ >>> len(result.xl), len(result_2.xl)
376
+ (12, 23)
377
+
378
+ Note the difference between, e.g., ``n=192, iters=1`` and ``n=64,
379
+ iters=3``.
380
+ In the first case the promising points contained in the minimiser pool
381
+ are processed only once. In the latter case it is processed every 64
382
+ sampling points for a total of 3 times.
383
+
384
+ To demonstrate solving problems with non-linear constraints consider the
385
+ following example from Hock and Schittkowski problem 73 (cattle-feed)
386
+ [3]_::
387
+
388
+ minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4
389
+
390
+ subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0,
391
+
392
+ 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21
393
+ -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 +
394
+ 20.5 * x_3**2 + 0.62 * x_4**2) >= 0,
395
+
396
+ x_1 + x_2 + x_3 + x_4 - 1 == 0,
397
+
398
+ 1 >= x_i >= 0 for all i
399
+
400
+ The approximate answer given in [3]_ is::
401
+
402
+ f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378
403
+
404
+ >>> def f(x): # (cattle-feed)
405
+ ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3]
406
+ ...
407
+ >>> def g1(x):
408
+ ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0
409
+ ...
410
+ >>> def g2(x):
411
+ ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21
412
+ ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2
413
+ ... + 20.5*x[2]**2 + 0.62*x[3]**2)
414
+ ... ) # >=0
415
+ ...
416
+ >>> def h1(x):
417
+ ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0
418
+ ...
419
+ >>> cons = ({'type': 'ineq', 'fun': g1},
420
+ ... {'type': 'ineq', 'fun': g2},
421
+ ... {'type': 'eq', 'fun': h1})
422
+ >>> bounds = [(0, 1.0),]*4
423
+ >>> res = shgo(f, bounds, n=150, constraints=cons)
424
+ >>> res
425
+ message: Optimization terminated successfully.
426
+ success: True
427
+ fun: 29.894378159142136
428
+ funl: [ 2.989e+01]
429
+ x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] # may vary
430
+ xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] # may vary
431
+ nit: 1
432
+ nfev: 142 # may vary
433
+ nlfev: 35 # may vary
434
+ nljev: 5
435
+ nlhev: 0
436
+
437
+ >>> g1(res.x), g2(res.x), h1(res.x)
438
+ (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0)
439
+
440
+ """
441
+ # if necessary, convert bounds class to old bounds
442
+ if isinstance(bounds, Bounds):
443
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
444
+
445
+ # Initiate SHGO class
446
+ # use in context manager to make sure that any parallelization
447
+ # resources are freed.
448
+ with SHGO(func, bounds, args=args, constraints=constraints, n=n,
449
+ iters=iters, callback=callback,
450
+ minimizer_kwargs=minimizer_kwargs,
451
+ options=options, sampling_method=sampling_method,
452
+ workers=workers) as shc:
453
+ # Run the algorithm, process results and test success
454
+ shc.iterate_all()
455
+
456
+ if not shc.break_routine:
457
+ if shc.disp:
458
+ logging.info("Successfully completed construction of complex.")
459
+
460
+ # Test post iterations success
461
+ if len(shc.LMC.xl_maps) == 0:
462
+ # If sampling failed to find pool, return lowest sampled point
463
+ # with a warning
464
+ shc.find_lowest_vertex()
465
+ shc.break_routine = True
466
+ shc.fail_routine(mes="Failed to find a feasible minimizer point. "
467
+ f"Lowest sampling point = {shc.f_lowest}")
468
+ shc.res.fun = shc.f_lowest
469
+ shc.res.x = shc.x_lowest
470
+ shc.res.nfev = shc.fn
471
+ shc.res.tnev = shc.n_sampled
472
+ else:
473
+ # Test that the optimal solutions do not violate any constraints
474
+ pass # TODO
475
+
476
+ # Confirm the routine ran successfully
477
+ if not shc.break_routine:
478
+ shc.res.message = 'Optimization terminated successfully.'
479
+ shc.res.success = True
480
+
481
+ # Return the final results
482
+ return shc.res
483
+
484
+
485
+ class SHGO:
486
+ def __init__(self, func, bounds, args=(), constraints=None, n=None,
487
+ iters=None, callback=None, minimizer_kwargs=None,
488
+ options=None, sampling_method='simplicial', workers=1):
489
+ from scipy.stats import qmc
490
+ # Input checks
491
+ methods = ['halton', 'sobol', 'simplicial']
492
+ if isinstance(sampling_method, str) and sampling_method not in methods:
493
+ raise ValueError(("Unknown sampling_method specified."
494
+ " Valid methods: {}").format(', '.join(methods)))
495
+
496
+ # Split obj func if given with Jac
497
+ try:
498
+ if ((minimizer_kwargs['jac'] is True) and
499
+ (not callable(minimizer_kwargs['jac']))):
500
+ self.func = MemoizeJac(func)
501
+ jac = self.func.derivative
502
+ minimizer_kwargs['jac'] = jac
503
+ func = self.func # .fun
504
+ else:
505
+ self.func = func # Normal definition of objective function
506
+ except (TypeError, KeyError):
507
+ self.func = func # Normal definition of objective function
508
+
509
+ # Initiate class
510
+ self.func = _FunctionWrapper(func, args)
511
+ self.bounds = bounds
512
+ self.args = args
513
+ self.callback = callback
514
+
515
+ # Bounds
516
+ abound = np.array(bounds, float)
517
+ self.dim = np.shape(abound)[0] # Dimensionality of problem
518
+
519
+ # Set none finite values to large floats
520
+ infind = ~np.isfinite(abound)
521
+ abound[infind[:, 0], 0] = -1e50
522
+ abound[infind[:, 1], 1] = 1e50
523
+
524
+ # Check if bounds are correctly specified
525
+ bnderr = abound[:, 0] > abound[:, 1]
526
+ if bnderr.any():
527
+ raise ValueError('Error: lb > ub in bounds {}.'
528
+ .format(', '.join(str(b) for b in bnderr)))
529
+
530
+ self.bounds = abound
531
+
532
+ # Constraints
533
+ # Process constraint dict sequence:
534
+ self.constraints = constraints
535
+ if constraints is not None:
536
+ self.min_cons = constraints
537
+ self.g_cons = []
538
+ self.g_args = []
539
+
540
+ # shgo internals deals with old-style constraints
541
+ # self.constraints is used to create Complex, so need
542
+ # to be stored internally in old-style.
543
+ # `minimize` takes care of normalising these constraints
544
+ # for slsqp/cobyla/trust-constr.
545
+ self.constraints = standardize_constraints(
546
+ constraints,
547
+ np.empty(self.dim, float),
548
+ 'old'
549
+ )
550
+ for cons in self.constraints:
551
+ if cons['type'] in ('ineq'):
552
+ self.g_cons.append(cons['fun'])
553
+ try:
554
+ self.g_args.append(cons['args'])
555
+ except KeyError:
556
+ self.g_args.append(())
557
+ self.g_cons = tuple(self.g_cons)
558
+ self.g_args = tuple(self.g_args)
559
+ else:
560
+ self.g_cons = None
561
+ self.g_args = None
562
+
563
+ # Define local minimization keyword arguments
564
+ # Start with defaults
565
+ self.minimizer_kwargs = {'method': 'SLSQP',
566
+ 'bounds': self.bounds,
567
+ 'options': {},
568
+ 'callback': self.callback
569
+ }
570
+ if minimizer_kwargs is not None:
571
+ # Overwrite with supplied values
572
+ self.minimizer_kwargs.update(minimizer_kwargs)
573
+
574
+ else:
575
+ self.minimizer_kwargs['options'] = {'ftol': 1e-12}
576
+
577
+ if (
578
+ self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla',
579
+ 'trust-constr')
580
+ and (
581
+ minimizer_kwargs is not None and
582
+ 'constraints' not in minimizer_kwargs and
583
+ constraints is not None
584
+ ) or
585
+ (self.g_cons is not None)
586
+ ):
587
+ self.minimizer_kwargs['constraints'] = self.min_cons
588
+
589
+ # Process options dict
590
+ if options is not None:
591
+ self.init_options(options)
592
+ else: # Default settings:
593
+ self.f_min_true = None
594
+ self.minimize_every_iter = True
595
+
596
+ # Algorithm limits
597
+ self.maxiter = None
598
+ self.maxfev = None
599
+ self.maxev = None
600
+ self.maxtime = None
601
+ self.f_min_true = None
602
+ self.minhgrd = None
603
+
604
+ # Objective function knowledge
605
+ self.symmetry = None
606
+
607
+ # Algorithm functionality
608
+ self.infty_cons_sampl = True
609
+ self.local_iter = False
610
+
611
+ # Feedback
612
+ self.disp = False
613
+
614
+ # Remove unknown arguments in self.minimizer_kwargs
615
+ # Start with arguments all the solvers have in common
616
+ self.min_solver_args = ['fun', 'x0', 'args',
617
+ 'callback', 'options', 'method']
618
+ # then add the ones unique to specific solvers
619
+ solver_args = {
620
+ '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'],
621
+ 'nelder-mead': [],
622
+ 'powell': [],
623
+ 'cg': ['jac'],
624
+ 'bfgs': ['jac'],
625
+ 'newton-cg': ['jac', 'hess', 'hessp'],
626
+ 'l-bfgs-b': ['jac', 'bounds'],
627
+ 'tnc': ['jac', 'bounds'],
628
+ 'cobyla': ['constraints', 'catol'],
629
+ 'slsqp': ['jac', 'bounds', 'constraints'],
630
+ 'dogleg': ['jac', 'hess'],
631
+ 'trust-ncg': ['jac', 'hess', 'hessp'],
632
+ 'trust-krylov': ['jac', 'hess', 'hessp'],
633
+ 'trust-exact': ['jac', 'hess'],
634
+ 'trust-constr': ['jac', 'hess', 'hessp', 'constraints'],
635
+ }
636
+ method = self.minimizer_kwargs['method']
637
+ self.min_solver_args += solver_args[method.lower()]
638
+
639
+ # Only retain the known arguments
640
+ def _restrict_to_keys(dictionary, goodkeys):
641
+ """Remove keys from dictionary if not in goodkeys - inplace"""
642
+ existingkeys = set(dictionary)
643
+ for key in existingkeys - set(goodkeys):
644
+ dictionary.pop(key, None)
645
+
646
+ _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args)
647
+ _restrict_to_keys(self.minimizer_kwargs['options'],
648
+ self.min_solver_args + ['ftol'])
649
+
650
+ # Algorithm controls
651
+ # Global controls
652
+ self.stop_global = False # Used in the stopping_criteria method
653
+ self.break_routine = False # Break the algorithm globally
654
+ self.iters = iters # Iterations to be ran
655
+ self.iters_done = 0 # Iterations completed
656
+ self.n = n # Sampling points per iteration
657
+ self.nc = 0 # n # Sampling points to sample in current iteration
658
+ self.n_prc = 0 # Processed points (used to track Delaunay iters)
659
+ self.n_sampled = 0 # To track no. of sampling points already generated
660
+ self.fn = 0 # Number of feasible sampling points evaluations performed
661
+ self.hgr = 0 # Homology group rank
662
+ # Initially attempt to build the triangulation incrementally:
663
+ self.qhull_incremental = True
664
+
665
+ # Default settings if no sampling criteria.
666
+ if (self.n is None) and (self.iters is None) \
667
+ and (sampling_method == 'simplicial'):
668
+ self.n = 2 ** self.dim + 1
669
+ self.nc = 0 # self.n
670
+ if self.iters is None:
671
+ self.iters = 1
672
+ if (self.n is None) and not (sampling_method == 'simplicial'):
673
+ self.n = self.n = 100
674
+ self.nc = 0 # self.n
675
+ if (self.n == 100) and (sampling_method == 'simplicial'):
676
+ self.n = 2 ** self.dim + 1
677
+
678
+ if not ((self.maxiter is None) and (self.maxfev is None) and (
679
+ self.maxev is None)
680
+ and (self.minhgrd is None) and (self.f_min_true is None)):
681
+ self.iters = None
682
+
683
+ # Set complex construction mode based on a provided stopping criteria:
684
+ # Initialise sampling Complex and function cache
685
+ # Note that sfield_args=() since args are already wrapped in self.func
686
+ # using the_FunctionWrapper class.
687
+ self.HC = Complex(dim=self.dim, domain=self.bounds,
688
+ sfield=self.func, sfield_args=(),
689
+ symmetry=self.symmetry,
690
+ constraints=self.constraints,
691
+ workers=workers)
692
+
693
+ # Choose complex constructor
694
+ if sampling_method == 'simplicial':
695
+ self.iterate_complex = self.iterate_hypercube
696
+ self.sampling_method = sampling_method
697
+
698
+ elif sampling_method in ['halton', 'sobol'] or \
699
+ not isinstance(sampling_method, str):
700
+ self.iterate_complex = self.iterate_delaunay
701
+ # Sampling method used
702
+ if sampling_method in ['halton', 'sobol']:
703
+ if sampling_method == 'sobol':
704
+ self.n = int(2 ** np.ceil(np.log2(self.n)))
705
+ # self.n #TODO: Should always be self.n, this is
706
+ # unacceptable for shgo, check that nfev behaves as
707
+ # expected.
708
+ self.nc = 0
709
+ self.sampling_method = 'sobol'
710
+ self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False,
711
+ seed=0)
712
+ else:
713
+ self.sampling_method = 'halton'
714
+ self.qmc_engine = qmc.Halton(d=self.dim, scramble=True,
715
+ seed=0)
716
+
717
+ def sampling_method(n, d):
718
+ return self.qmc_engine.random(n)
719
+
720
+ else:
721
+ # A user defined sampling method:
722
+ self.sampling_method = 'custom'
723
+
724
+ self.sampling = self.sampling_custom
725
+ self.sampling_function = sampling_method # F(n, d)
726
+
727
+ # Local controls
728
+ self.stop_l_iter = False # Local minimisation iterations
729
+ self.stop_complex_iter = False # Sampling iterations
730
+
731
+ # Initiate storage objects used in algorithm classes
732
+ self.minimizer_pool = []
733
+
734
+ # Cache of local minimizers mapped
735
+ self.LMC = LMapCache()
736
+
737
+ # Initialize return object
738
+ self.res = OptimizeResult() # scipy.optimize.OptimizeResult object
739
+ self.res.nfev = 0 # Includes each sampling point as func evaluation
740
+ self.res.nlfev = 0 # Local function evals for all minimisers
741
+ self.res.nljev = 0 # Local Jacobian evals for all minimisers
742
+ self.res.nlhev = 0 # Local Hessian evals for all minimisers
743
+
744
+ # Initiation aids
745
+ def init_options(self, options):
746
+ """
747
+ Initiates the options.
748
+
749
+ Can also be useful to change parameters after class initiation.
750
+
751
+ Parameters
752
+ ----------
753
+ options : dict
754
+
755
+ Returns
756
+ -------
757
+ None
758
+
759
+ """
760
+ # Update 'options' dict passed to optimize.minimize
761
+ # Do this first so we don't mutate `options` below.
762
+ self.minimizer_kwargs['options'].update(options)
763
+
764
+ # Ensure that 'jac', 'hess', and 'hessp' are passed directly to
765
+ # `minimize` as keywords, not as part of its 'options' dictionary.
766
+ for opt in ['jac', 'hess', 'hessp']:
767
+ if opt in self.minimizer_kwargs['options']:
768
+ self.minimizer_kwargs[opt] = (
769
+ self.minimizer_kwargs['options'].pop(opt))
770
+
771
+ # Default settings:
772
+ self.minimize_every_iter = options.get('minimize_every_iter', True)
773
+
774
+ # Algorithm limits
775
+ # Maximum number of iterations to perform.
776
+ self.maxiter = options.get('maxiter', None)
777
+ # Maximum number of function evaluations in the feasible domain
778
+ self.maxfev = options.get('maxfev', None)
779
+ # Maximum number of sampling evaluations (includes searching in
780
+ # infeasible points
781
+ self.maxev = options.get('maxev', None)
782
+ # Maximum processing runtime allowed
783
+ self.init = time.time()
784
+ self.maxtime = options.get('maxtime', None)
785
+ if 'f_min' in options:
786
+ # Specify the minimum objective function value, if it is known.
787
+ self.f_min_true = options['f_min']
788
+ self.f_tol = options.get('f_tol', 1e-4)
789
+ else:
790
+ self.f_min_true = None
791
+
792
+ self.minhgrd = options.get('minhgrd', None)
793
+
794
+ # Objective function knowledge
795
+ self.symmetry = options.get('symmetry', False)
796
+ if self.symmetry:
797
+ self.symmetry = [0, ]*len(self.bounds)
798
+ else:
799
+ self.symmetry = None
800
+ # Algorithm functionality
801
+ # Only evaluate a few of the best candidates
802
+ self.local_iter = options.get('local_iter', False)
803
+ self.infty_cons_sampl = options.get('infty_constraints', True)
804
+
805
+ # Feedback
806
+ self.disp = options.get('disp', False)
807
+
808
+ def __enter__(self):
809
+ return self
810
+
811
+ def __exit__(self, *args):
812
+ return self.HC.V._mapwrapper.__exit__(*args)
813
+
814
+ # Iteration properties
815
+ # Main construction loop:
816
+ def iterate_all(self):
817
+ """
818
+ Construct for `iters` iterations.
819
+
820
+ If uniform sampling is used, every iteration adds 'n' sampling points.
821
+
822
+ Iterations if a stopping criteria (e.g., sampling points or
823
+ processing time) has been met.
824
+
825
+ """
826
+ if self.disp:
827
+ logging.info('Splitting first generation')
828
+
829
+ while not self.stop_global:
830
+ if self.break_routine:
831
+ break
832
+ # Iterate complex, process minimisers
833
+ self.iterate()
834
+ self.stopping_criteria()
835
+
836
+ # Build minimiser pool
837
+ # Final iteration only needed if pools weren't minimised every
838
+ # iteration
839
+ if not self.minimize_every_iter:
840
+ if not self.break_routine:
841
+ self.find_minima()
842
+
843
+ self.res.nit = self.iters_done # + 1
844
+ self.fn = self.HC.V.nfev
845
+
846
+ def find_minima(self):
847
+ """
848
+ Construct the minimizer pool, map the minimizers to local minima
849
+ and sort the results into a global return object.
850
+ """
851
+ if self.disp:
852
+ logging.info('Searching for minimizer pool...')
853
+
854
+ self.minimizers()
855
+
856
+ if len(self.X_min) != 0:
857
+ # Minimize the pool of minimizers with local minimization methods
858
+ # Note that if Options['local_iter'] is an `int` instead of default
859
+ # value False then only that number of candidates will be minimized
860
+ self.minimise_pool(self.local_iter)
861
+ # Sort results and build the global return object
862
+ self.sort_result()
863
+
864
+ # Lowest values used to report in case of failures
865
+ self.f_lowest = self.res.fun
866
+ self.x_lowest = self.res.x
867
+ else:
868
+ self.find_lowest_vertex()
869
+
870
+ if self.disp:
871
+ logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}")
872
+
873
+ def find_lowest_vertex(self):
874
+ # Find the lowest objective function value on one of
875
+ # the vertices of the simplicial complex
876
+ self.f_lowest = np.inf
877
+ for x in self.HC.V.cache:
878
+ if self.HC.V[x].f < self.f_lowest:
879
+ if self.disp:
880
+ logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}')
881
+ self.f_lowest = self.HC.V[x].f
882
+ self.x_lowest = self.HC.V[x].x_a
883
+ for lmc in self.LMC.cache:
884
+ if self.LMC[lmc].f_min < self.f_lowest:
885
+ self.f_lowest = self.LMC[lmc].f_min
886
+ self.x_lowest = self.LMC[lmc].x_l
887
+
888
+ if self.f_lowest == np.inf: # no feasible point
889
+ self.f_lowest = None
890
+ self.x_lowest = None
891
+
892
+ # Stopping criteria functions:
893
+ def finite_iterations(self):
894
+ mi = min(x for x in [self.iters, self.maxiter] if x is not None)
895
+ if self.disp:
896
+ logging.info(f'Iterations done = {self.iters_done} / {mi}')
897
+ if self.iters is not None:
898
+ if self.iters_done >= (self.iters):
899
+ self.stop_global = True
900
+
901
+ if self.maxiter is not None: # Stop for infeasible sampling
902
+ if self.iters_done >= (self.maxiter):
903
+ self.stop_global = True
904
+ return self.stop_global
905
+
906
+ def finite_fev(self):
907
+ # Finite function evals in the feasible domain
908
+ if self.disp:
909
+ logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}')
910
+ if self.fn >= self.maxfev:
911
+ self.stop_global = True
912
+ return self.stop_global
913
+
914
+ def finite_ev(self):
915
+ # Finite evaluations including infeasible sampling points
916
+ if self.disp:
917
+ logging.info(f'Sampling evaluations done = {self.n_sampled} '
918
+ f'/ {self.maxev}')
919
+ if self.n_sampled >= self.maxev:
920
+ self.stop_global = True
921
+
922
+ def finite_time(self):
923
+ if self.disp:
924
+ logging.info(f'Time elapsed = {time.time() - self.init} '
925
+ f'/ {self.maxtime}')
926
+ if (time.time() - self.init) >= self.maxtime:
927
+ self.stop_global = True
928
+
929
+ def finite_precision(self):
930
+ """
931
+ Stop the algorithm if the final function value is known
932
+
933
+ Specify in options (with ``self.f_min_true = options['f_min']``)
934
+ and the tolerance with ``f_tol = options['f_tol']``
935
+ """
936
+ # If no minimizer has been found use the lowest sampling value
937
+ self.find_lowest_vertex()
938
+ if self.disp:
939
+ logging.info(f'Lowest function evaluation = {self.f_lowest}')
940
+ logging.info(f'Specified minimum = {self.f_min_true}')
941
+ # If no feasible point was return from test
942
+ if self.f_lowest is None:
943
+ return self.stop_global
944
+
945
+ # Function to stop algorithm at specified percentage error:
946
+ if self.f_min_true == 0.0:
947
+ if self.f_lowest <= self.f_tol:
948
+ self.stop_global = True
949
+ else:
950
+ pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true)
951
+ if self.f_lowest <= self.f_min_true:
952
+ self.stop_global = True
953
+ # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)):
954
+ if abs(pe) >= 2 * self.f_tol:
955
+ warnings.warn(
956
+ f"A much lower value than expected f* = {self.f_min_true} "
957
+ f"was found f_lowest = {self.f_lowest}",
958
+ stacklevel=3
959
+ )
960
+ if pe <= self.f_tol:
961
+ self.stop_global = True
962
+
963
+ return self.stop_global
964
+
965
+ def finite_homology_growth(self):
966
+ """
967
+ Stop the algorithm if homology group rank did not grow in iteration.
968
+ """
969
+ if self.LMC.size == 0:
970
+ return # pass on no reason to stop yet.
971
+ self.hgrd = self.LMC.size - self.hgr
972
+
973
+ self.hgr = self.LMC.size
974
+ if self.hgrd <= self.minhgrd:
975
+ self.stop_global = True
976
+ if self.disp:
977
+ logging.info(f'Current homology growth = {self.hgrd} '
978
+ f' (minimum growth = {self.minhgrd})')
979
+ return self.stop_global
980
+
981
+ def stopping_criteria(self):
982
+ """
983
+ Various stopping criteria ran every iteration
984
+
985
+ Returns
986
+ -------
987
+ stop : bool
988
+ """
989
+ if self.maxiter is not None:
990
+ self.finite_iterations()
991
+ if self.iters is not None:
992
+ self.finite_iterations()
993
+ if self.maxfev is not None:
994
+ self.finite_fev()
995
+ if self.maxev is not None:
996
+ self.finite_ev()
997
+ if self.maxtime is not None:
998
+ self.finite_time()
999
+ if self.f_min_true is not None:
1000
+ self.finite_precision()
1001
+ if self.minhgrd is not None:
1002
+ self.finite_homology_growth()
1003
+ return self.stop_global
1004
+
1005
+ def iterate(self):
1006
+ self.iterate_complex()
1007
+
1008
+ # Build minimizer pool
1009
+ if self.minimize_every_iter:
1010
+ if not self.break_routine:
1011
+ self.find_minima() # Process minimizer pool
1012
+
1013
+ # Algorithm updates
1014
+ self.iters_done += 1
1015
+
1016
+ def iterate_hypercube(self):
1017
+ """
1018
+ Iterate a subdivision of the complex
1019
+
1020
+ Note: called with ``self.iterate_complex()`` after class initiation
1021
+ """
1022
+ # Iterate the complex
1023
+ if self.disp:
1024
+ logging.info('Constructing and refining simplicial complex graph '
1025
+ 'structure')
1026
+ if self.n is None:
1027
+ self.HC.refine_all()
1028
+ self.n_sampled = self.HC.V.size() # nevs counted
1029
+ else:
1030
+ self.HC.refine(self.n)
1031
+ self.n_sampled += self.n
1032
+
1033
+ if self.disp:
1034
+ logging.info('Triangulation completed, evaluating all constraints '
1035
+ 'and objective function values.')
1036
+
1037
+ # Re-add minimisers to complex
1038
+ if len(self.LMC.xl_maps) > 0:
1039
+ for xl in self.LMC.cache:
1040
+ v = self.HC.V[xl]
1041
+ v_near = v.star()
1042
+ for v in v.nn:
1043
+ v_near = v_near.union(v.nn)
1044
+ # Reconnect vertices to complex
1045
+ # if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l),
1046
+ # near=v_near):
1047
+ # continue
1048
+ # else:
1049
+ # If failure to find in v_near, then search all vertices
1050
+ # (very expensive operation:
1051
+ # self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l)
1052
+ # )
1053
+
1054
+ # Evaluate all constraints and functions
1055
+ self.HC.V.process_pools()
1056
+ if self.disp:
1057
+ logging.info('Evaluations completed.')
1058
+
1059
+ # feasible sampling points counted by the triangulation.py routines
1060
+ self.fn = self.HC.V.nfev
1061
+ return
1062
+
1063
+ def iterate_delaunay(self):
1064
+ """
1065
+ Build a complex of Delaunay triangulated points
1066
+
1067
+ Note: called with ``self.iterate_complex()`` after class initiation
1068
+ """
1069
+ self.nc += self.n
1070
+ self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl)
1071
+
1072
+ # Add sampled points to a triangulation, construct self.Tri
1073
+ if self.disp:
1074
+ logging.info(f'self.n = {self.n}')
1075
+ logging.info(f'self.nc = {self.nc}')
1076
+ logging.info('Constructing and refining simplicial complex graph '
1077
+ 'structure from sampling points.')
1078
+
1079
+ if self.dim < 2:
1080
+ self.Ind_sorted = np.argsort(self.C, axis=0)
1081
+ self.Ind_sorted = self.Ind_sorted.flatten()
1082
+ tris = []
1083
+ for ind, ind_s in enumerate(self.Ind_sorted):
1084
+ if ind > 0:
1085
+ tris.append(self.Ind_sorted[ind - 1:ind + 1])
1086
+
1087
+ tris = np.array(tris)
1088
+ # Store 1D triangulation:
1089
+ self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris)
1090
+ self.points = {}
1091
+ else:
1092
+ if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built
1093
+ self.delaunay_triangulation(n_prc=self.n_prc)
1094
+ self.n_prc = self.C.shape[0]
1095
+
1096
+ if self.disp:
1097
+ logging.info('Triangulation completed, evaluating all '
1098
+ 'constraints and objective function values.')
1099
+
1100
+ if hasattr(self, 'Tri'):
1101
+ self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices)
1102
+
1103
+ # Process all pools
1104
+ # Evaluate all constraints and functions
1105
+ if self.disp:
1106
+ logging.info('Triangulation completed, evaluating all constraints '
1107
+ 'and objective function values.')
1108
+
1109
+ # Evaluate all constraints and functions
1110
+ self.HC.V.process_pools()
1111
+ if self.disp:
1112
+ logging.info('Evaluations completed.')
1113
+
1114
+ # feasible sampling points counted by the triangulation.py routines
1115
+ self.fn = self.HC.V.nfev
1116
+ self.n_sampled = self.nc # nevs counted in triangulation
1117
+ return
1118
+
1119
+ # Hypercube minimizers
1120
+ def minimizers(self):
1121
+ """
1122
+ Returns the indexes of all minimizers
1123
+ """
1124
+ self.minimizer_pool = []
1125
+ # Note: Can implement parallelization here
1126
+ for x in self.HC.V.cache:
1127
+ in_LMC = False
1128
+ if len(self.LMC.xl_maps) > 0:
1129
+ for xlmi in self.LMC.xl_maps:
1130
+ if np.all(np.array(x) == np.array(xlmi)):
1131
+ in_LMC = True
1132
+ if in_LMC:
1133
+ continue
1134
+
1135
+ if self.HC.V[x].minimiser():
1136
+ if self.disp:
1137
+ logging.info('=' * 60)
1138
+ logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer')
1139
+ logging.info(f'v.f = {self.HC.V[x].f} is minimizer')
1140
+ logging.info('=' * 30)
1141
+
1142
+ if self.HC.V[x] not in self.minimizer_pool:
1143
+ self.minimizer_pool.append(self.HC.V[x])
1144
+
1145
+ if self.disp:
1146
+ logging.info('Neighbors:')
1147
+ logging.info('=' * 30)
1148
+ for vn in self.HC.V[x].nn:
1149
+ logging.info(f'x = {vn.x} || f = {vn.f}')
1150
+
1151
+ logging.info('=' * 60)
1152
+ self.minimizer_pool_F = []
1153
+ self.X_min = []
1154
+ # normalized tuple in the Vertex cache
1155
+ self.X_min_cache = {} # Cache used in hypercube sampling
1156
+
1157
+ for v in self.minimizer_pool:
1158
+ self.X_min.append(v.x_a)
1159
+ self.minimizer_pool_F.append(v.f)
1160
+ self.X_min_cache[tuple(v.x_a)] = v.x
1161
+
1162
+ self.minimizer_pool_F = np.array(self.minimizer_pool_F)
1163
+ self.X_min = np.array(self.X_min)
1164
+
1165
+ # TODO: Only do this if global mode
1166
+ self.sort_min_pool()
1167
+
1168
+ return self.X_min
1169
+
1170
+ # Local minimisation
1171
+ # Minimiser pool processing
1172
+ def minimise_pool(self, force_iter=False):
1173
+ """
1174
+ This processing method can optionally minimise only the best candidate
1175
+ solutions in the minimiser pool
1176
+
1177
+ Parameters
1178
+ ----------
1179
+ force_iter : int
1180
+ Number of starting minimizers to process (can be specified
1181
+ globally or locally)
1182
+
1183
+ """
1184
+ # Find first local minimum
1185
+ # NOTE: Since we always minimize this value regardless it is a waste to
1186
+ # build the topograph first before minimizing
1187
+ lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0])
1188
+
1189
+ # Trim minimized point from current minimizer set
1190
+ self.trim_min_pool(0)
1191
+
1192
+ while not self.stop_l_iter:
1193
+ # Global stopping criteria:
1194
+ self.stopping_criteria()
1195
+
1196
+ # Note first iteration is outside loop:
1197
+ if force_iter:
1198
+ force_iter -= 1
1199
+ if force_iter == 0:
1200
+ self.stop_l_iter = True
1201
+ break
1202
+
1203
+ if np.shape(self.X_min)[0] == 0:
1204
+ self.stop_l_iter = True
1205
+ break
1206
+
1207
+ # Construct topograph from current minimizer set
1208
+ # (NOTE: This is a very small topograph using only the minizer pool
1209
+ # , it might be worth using some graph theory tools instead.
1210
+ self.g_topograph(lres_f_min.x, self.X_min)
1211
+
1212
+ # Find local minimum at the miniser with the greatest Euclidean
1213
+ # distance from the current solution
1214
+ ind_xmin_l = self.Z[:, -1]
1215
+ lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1])
1216
+
1217
+ # Trim minimised point from current minimizer set
1218
+ self.trim_min_pool(ind_xmin_l)
1219
+
1220
+ # Reset controls
1221
+ self.stop_l_iter = False
1222
+ return
1223
+
1224
+ def sort_min_pool(self):
1225
+ # Sort to find minimum func value in min_pool
1226
+ self.ind_f_min = np.argsort(self.minimizer_pool_F)
1227
+ self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min]
1228
+ self.minimizer_pool_F = np.array(self.minimizer_pool_F)[
1229
+ self.ind_f_min]
1230
+ return
1231
+
1232
+ def trim_min_pool(self, trim_ind):
1233
+ self.X_min = np.delete(self.X_min, trim_ind, axis=0)
1234
+ self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind)
1235
+ self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind)
1236
+ return
1237
+
1238
+ def g_topograph(self, x_min, X_min):
1239
+ """
1240
+ Returns the topographical vector stemming from the specified value
1241
+ ``x_min`` for the current feasible set ``X_min`` with True boolean
1242
+ values indicating positive entries and False values indicating
1243
+ negative entries.
1244
+
1245
+ """
1246
+ x_min = np.array([x_min])
1247
+ self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean')
1248
+ # Find sorted indexes of spatial distances:
1249
+ self.Z = np.argsort(self.Y, axis=-1)
1250
+
1251
+ self.Ss = X_min[self.Z][0]
1252
+ self.minimizer_pool = self.minimizer_pool[self.Z]
1253
+ self.minimizer_pool = self.minimizer_pool[0]
1254
+ return self.Ss
1255
+
1256
+ # Local bound functions
1257
+ def construct_lcb_simplicial(self, v_min):
1258
+ """
1259
+ Construct locally (approximately) convex bounds
1260
+
1261
+ Parameters
1262
+ ----------
1263
+ v_min : Vertex object
1264
+ The minimizer vertex
1265
+
1266
+ Returns
1267
+ -------
1268
+ cbounds : list of lists
1269
+ List of size dimension with length-2 list of bounds for each
1270
+ dimension.
1271
+
1272
+ """
1273
+ cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
1274
+ # Loop over all bounds
1275
+ for vn in v_min.nn:
1276
+ for i, x_i in enumerate(vn.x_a):
1277
+ # Lower bound
1278
+ if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]):
1279
+ cbounds[i][0] = x_i
1280
+
1281
+ # Upper bound
1282
+ if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]):
1283
+ cbounds[i][1] = x_i
1284
+
1285
+ if self.disp:
1286
+ logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}')
1287
+ logging.info(f'cbounds = {cbounds}')
1288
+
1289
+ return cbounds
1290
+
1291
+ def construct_lcb_delaunay(self, v_min, ind=None):
1292
+ """
1293
+ Construct locally (approximately) convex bounds
1294
+
1295
+ Parameters
1296
+ ----------
1297
+ v_min : Vertex object
1298
+ The minimizer vertex
1299
+
1300
+ Returns
1301
+ -------
1302
+ cbounds : list of lists
1303
+ List of size dimension with length-2 list of bounds for each
1304
+ dimension.
1305
+ """
1306
+ cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
1307
+
1308
+ return cbounds
1309
+
1310
+ # Minimize a starting point locally
1311
+ def minimize(self, x_min, ind=None):
1312
+ """
1313
+ This function is used to calculate the local minima using the specified
1314
+ sampling point as a starting value.
1315
+
1316
+ Parameters
1317
+ ----------
1318
+ x_min : vector of floats
1319
+ Current starting point to minimize.
1320
+
1321
+ Returns
1322
+ -------
1323
+ lres : OptimizeResult
1324
+ The local optimization result represented as a `OptimizeResult`
1325
+ object.
1326
+ """
1327
+ # Use minima maps if vertex was already run
1328
+ if self.disp:
1329
+ logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}')
1330
+
1331
+ if self.LMC[x_min].lres is not None:
1332
+ logging.info(f'Found self.LMC[x_min].lres = '
1333
+ f'{self.LMC[x_min].lres}')
1334
+ return self.LMC[x_min].lres
1335
+
1336
+ if self.callback is not None:
1337
+ logging.info(f'Callback for minimizer starting at {x_min}:')
1338
+
1339
+ if self.disp:
1340
+ logging.info(f'Starting minimization at {x_min}...')
1341
+
1342
+ if self.sampling_method == 'simplicial':
1343
+ x_min_t = tuple(x_min)
1344
+ # Find the normalized tuple in the Vertex cache:
1345
+ x_min_t_norm = self.X_min_cache[tuple(x_min_t)]
1346
+ x_min_t_norm = tuple(x_min_t_norm)
1347
+ g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm])
1348
+ if 'bounds' in self.min_solver_args:
1349
+ self.minimizer_kwargs['bounds'] = g_bounds
1350
+ logging.info(self.minimizer_kwargs['bounds'])
1351
+
1352
+ else:
1353
+ g_bounds = self.construct_lcb_delaunay(x_min, ind=ind)
1354
+ if 'bounds' in self.min_solver_args:
1355
+ self.minimizer_kwargs['bounds'] = g_bounds
1356
+ logging.info(self.minimizer_kwargs['bounds'])
1357
+
1358
+ if self.disp and 'bounds' in self.minimizer_kwargs:
1359
+ logging.info('bounds in kwarg:')
1360
+ logging.info(self.minimizer_kwargs['bounds'])
1361
+
1362
+ # Local minimization using scipy.optimize.minimize:
1363
+ lres = minimize(self.func, x_min, **self.minimizer_kwargs)
1364
+
1365
+ if self.disp:
1366
+ logging.info(f'lres = {lres}')
1367
+
1368
+ # Local function evals for all minimizers
1369
+ self.res.nlfev += lres.nfev
1370
+ if 'njev' in lres:
1371
+ self.res.nljev += lres.njev
1372
+ if 'nhev' in lres:
1373
+ self.res.nlhev += lres.nhev
1374
+
1375
+ try: # Needed because of the brain dead 1x1 NumPy arrays
1376
+ lres.fun = lres.fun[0]
1377
+ except (IndexError, TypeError):
1378
+ lres.fun
1379
+
1380
+ # Append minima maps
1381
+ self.LMC[x_min]
1382
+ self.LMC.add_res(x_min, lres, bounds=g_bounds)
1383
+
1384
+ return lres
1385
+
1386
+ # Post local minimization processing
1387
+ def sort_result(self):
1388
+ """
1389
+ Sort results and build the global return object
1390
+ """
1391
+ # Sort results in local minima cache
1392
+ results = self.LMC.sort_cache_result()
1393
+ self.res.xl = results['xl']
1394
+ self.res.funl = results['funl']
1395
+ self.res.x = results['x']
1396
+ self.res.fun = results['fun']
1397
+
1398
+ # Add local func evals to sampling func evals
1399
+ # Count the number of feasible vertices and add to local func evals:
1400
+ self.res.nfev = self.fn + self.res.nlfev
1401
+ return self.res
1402
+
1403
+ # Algorithm controls
1404
+ def fail_routine(self, mes=("Failed to converge")):
1405
+ self.break_routine = True
1406
+ self.res.success = False
1407
+ self.X_min = [None]
1408
+ self.res.message = mes
1409
+
1410
+ def sampled_surface(self, infty_cons_sampl=False):
1411
+ """
1412
+ Sample the function surface.
1413
+
1414
+ There are 2 modes, if ``infty_cons_sampl`` is True then the sampled
1415
+ points that are generated outside the feasible domain will be
1416
+ assigned an ``inf`` value in accordance with SHGO rules.
1417
+ This guarantees convergence and usually requires less objective
1418
+ function evaluations at the computational costs of more Delaunay
1419
+ triangulation points.
1420
+
1421
+ If ``infty_cons_sampl`` is False, then the infeasible points are
1422
+ discarded and only a subspace of the sampled points are used. This
1423
+ comes at the cost of the loss of guaranteed convergence and usually
1424
+ requires more objective function evaluations.
1425
+ """
1426
+ # Generate sampling points
1427
+ if self.disp:
1428
+ logging.info('Generating sampling points')
1429
+ self.sampling(self.nc, self.dim)
1430
+ if len(self.LMC.xl_maps) > 0:
1431
+ self.C = np.vstack((self.C, np.array(self.LMC.xl_maps)))
1432
+ if not infty_cons_sampl:
1433
+ # Find subspace of feasible points
1434
+ if self.g_cons is not None:
1435
+ self.sampling_subspace()
1436
+
1437
+ # Sort remaining samples
1438
+ self.sorted_samples()
1439
+
1440
+ # Find objective function references
1441
+ self.n_sampled = self.nc
1442
+
1443
+ def sampling_custom(self, n, dim):
1444
+ """
1445
+ Generates uniform sampling points in a hypercube and scales the points
1446
+ to the bound limits.
1447
+ """
1448
+ # Generate sampling points.
1449
+ # Generate uniform sample points in [0, 1]^m \subset R^m
1450
+ if self.n_sampled == 0:
1451
+ self.C = self.sampling_function(n, dim)
1452
+ else:
1453
+ self.C = self.sampling_function(n, dim)
1454
+ # Distribute over bounds
1455
+ for i in range(len(self.bounds)):
1456
+ self.C[:, i] = (self.C[:, i] *
1457
+ (self.bounds[i][1] - self.bounds[i][0])
1458
+ + self.bounds[i][0])
1459
+ return self.C
1460
+
1461
+ def sampling_subspace(self):
1462
+ """Find subspace of feasible points from g_func definition"""
1463
+ # Subspace of feasible points.
1464
+ for ind, g in enumerate(self.g_cons):
1465
+ # C.shape = (Z, dim) where Z is the number of sampling points to
1466
+ # evaluate and dim is the dimensionality of the problem.
1467
+ # the constraint function may not be vectorised so have to step
1468
+ # through each sampling point sequentially.
1469
+ feasible = np.array(
1470
+ [np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C],
1471
+ dtype=bool
1472
+ )
1473
+ self.C = self.C[feasible]
1474
+
1475
+ if self.C.size == 0:
1476
+ self.res.message = ('No sampling point found within the '
1477
+ + 'feasible set. Increasing sampling '
1478
+ + 'size.')
1479
+ # sampling correctly for both 1-D and >1-D cases
1480
+ if self.disp:
1481
+ logging.info(self.res.message)
1482
+
1483
+ def sorted_samples(self): # Validated
1484
+ """Find indexes of the sorted sampling points"""
1485
+ self.Ind_sorted = np.argsort(self.C, axis=0)
1486
+ self.Xs = self.C[self.Ind_sorted]
1487
+ return self.Ind_sorted, self.Xs
1488
+
1489
+ def delaunay_triangulation(self, n_prc=0):
1490
+ if hasattr(self, 'Tri') and self.qhull_incremental:
1491
+ # TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps)
1492
+ # in self.sampled_surface
1493
+ self.Tri.add_points(self.C[n_prc:, :])
1494
+ else:
1495
+ try:
1496
+ self.Tri = spatial.Delaunay(self.C,
1497
+ incremental=self.qhull_incremental,
1498
+ )
1499
+ except spatial.QhullError:
1500
+ if str(sys.exc_info()[1])[:6] == 'QH6239':
1501
+ logging.warning('QH6239 Qhull precision error detected, '
1502
+ 'this usually occurs when no bounds are '
1503
+ 'specified, Qhull can only run with '
1504
+ 'handling cocircular/cospherical points'
1505
+ ' and in this case incremental mode is '
1506
+ 'switched off. The performance of shgo '
1507
+ 'will be reduced in this mode.')
1508
+ self.qhull_incremental = False
1509
+ self.Tri = spatial.Delaunay(self.C,
1510
+ incremental=
1511
+ self.qhull_incremental)
1512
+ else:
1513
+ raise
1514
+
1515
+ return self.Tri
1516
+
1517
+
1518
+ class LMap:
1519
+ def __init__(self, v):
1520
+ self.v = v
1521
+ self.x_l = None
1522
+ self.lres = None
1523
+ self.f_min = None
1524
+ self.lbounds = []
1525
+
1526
+
1527
+ class LMapCache:
1528
+ def __init__(self):
1529
+ self.cache = {}
1530
+
1531
+ # Lists for search queries
1532
+ self.v_maps = []
1533
+ self.xl_maps = []
1534
+ self.xl_maps_set = set()
1535
+ self.f_maps = []
1536
+ self.lbound_maps = []
1537
+ self.size = 0
1538
+
1539
+ def __getitem__(self, v):
1540
+ try:
1541
+ v = np.ndarray.tolist(v)
1542
+ except TypeError:
1543
+ pass
1544
+ v = tuple(v)
1545
+ try:
1546
+ return self.cache[v]
1547
+ except KeyError:
1548
+ xval = LMap(v)
1549
+ self.cache[v] = xval
1550
+
1551
+ return self.cache[v]
1552
+
1553
+ def add_res(self, v, lres, bounds=None):
1554
+ v = np.ndarray.tolist(v)
1555
+ v = tuple(v)
1556
+ self.cache[v].x_l = lres.x
1557
+ self.cache[v].lres = lres
1558
+ self.cache[v].f_min = lres.fun
1559
+ self.cache[v].lbounds = bounds
1560
+
1561
+ # Update cache size
1562
+ self.size += 1
1563
+
1564
+ # Cache lists for search queries
1565
+ self.v_maps.append(v)
1566
+ self.xl_maps.append(lres.x)
1567
+ self.xl_maps_set.add(tuple(lres.x))
1568
+ self.f_maps.append(lres.fun)
1569
+ self.lbound_maps.append(bounds)
1570
+
1571
+ def sort_cache_result(self):
1572
+ """
1573
+ Sort results and build the global return object
1574
+ """
1575
+ results = {}
1576
+ # Sort results and save
1577
+ self.xl_maps = np.array(self.xl_maps)
1578
+ self.f_maps = np.array(self.f_maps)
1579
+
1580
+ # Sorted indexes in Func_min
1581
+ ind_sorted = np.argsort(self.f_maps)
1582
+
1583
+ # Save ordered list of minima
1584
+ results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals
1585
+ self.f_maps = np.array(self.f_maps)
1586
+ results['funl'] = self.f_maps[ind_sorted]
1587
+ results['funl'] = results['funl'].T
1588
+
1589
+ # Find global of all minimizers
1590
+ results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima
1591
+ results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value
1592
+
1593
+ self.xl_maps = np.ndarray.tolist(self.xl_maps)
1594
+ self.f_maps = np.ndarray.tolist(self.f_maps)
1595
+ return results
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for low memory simplicial complex structures."""
2
+ import copy
3
+ import logging
4
+ import itertools
5
+ import decimal
6
+ from functools import cache
7
+
8
+ import numpy
9
+
10
+ from ._vertex import (VertexCacheField, VertexCacheIndex)
11
+
12
+
13
+ class Complex:
14
+ """
15
+ Base class for a simplicial complex described as a cache of vertices
16
+ together with their connections.
17
+
18
+ Important methods:
19
+ Domain triangulation:
20
+ Complex.triangulate, Complex.split_generation
21
+ Triangulating arbitrary points (must be traingulable,
22
+ may exist outside domain):
23
+ Complex.triangulate(sample_set)
24
+ Converting another simplicial complex structure data type to the
25
+ structure used in Complex (ex. OBJ wavefront)
26
+ Complex.convert(datatype, data)
27
+
28
+ Important objects:
29
+ HC.V: The cache of vertices and their connection
30
+ HC.H: Storage structure of all vertex groups
31
+
32
+ Parameters
33
+ ----------
34
+ dim : int
35
+ Spatial dimensionality of the complex R^dim
36
+ domain : list of tuples, optional
37
+ The bounds [x_l, x_u]^dim of the hyperrectangle space
38
+ ex. The default domain is the hyperrectangle [0, 1]^dim
39
+ Note: The domain must be convex, non-convex spaces can be cut
40
+ away from this domain using the non-linear
41
+ g_cons functions to define any arbitrary domain
42
+ (these domains may also be disconnected from each other)
43
+ sfield :
44
+ A scalar function defined in the associated domain f: R^dim --> R
45
+ sfield_args : tuple
46
+ Additional arguments to be passed to `sfield`
47
+ vfield :
48
+ A scalar function defined in the associated domain
49
+ f: R^dim --> R^m
50
+ (for example a gradient function of the scalar field)
51
+ vfield_args : tuple
52
+ Additional arguments to be passed to vfield
53
+ symmetry : None or list
54
+ Specify if the objective function contains symmetric variables.
55
+ The search space (and therefore performance) is decreased by up to
56
+ O(n!) times in the fully symmetric case.
57
+
58
+ E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
59
+
60
+ In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
61
+ x_6 are symmetric to x_4, this can be specified to the solver as:
62
+
63
+ symmetry = [0, # Variable 1
64
+ 0, # symmetric to variable 1
65
+ 0, # symmetric to variable 1
66
+ 3, # Variable 4
67
+ 3, # symmetric to variable 4
68
+ 3, # symmetric to variable 4
69
+ ]
70
+
71
+ constraints : dict or sequence of dict, optional
72
+ Constraints definition.
73
+ Function(s) ``R**n`` in the form::
74
+
75
+ g(x) <= 0 applied as g : R^n -> R^m
76
+ h(x) == 0 applied as h : R^n -> R^p
77
+
78
+ Each constraint is defined in a dictionary with fields:
79
+
80
+ type : str
81
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
82
+ fun : callable
83
+ The function defining the constraint.
84
+ jac : callable, optional
85
+ The Jacobian of `fun` (only for SLSQP).
86
+ args : sequence, optional
87
+ Extra arguments to be passed to the function and Jacobian.
88
+
89
+ Equality constraint means that the constraint function result is to
90
+ be zero whereas inequality means that it is to be
91
+ non-negative.constraints : dict or sequence of dict, optional
92
+ Constraints definition.
93
+ Function(s) ``R**n`` in the form::
94
+
95
+ g(x) <= 0 applied as g : R^n -> R^m
96
+ h(x) == 0 applied as h : R^n -> R^p
97
+
98
+ Each constraint is defined in a dictionary with fields:
99
+
100
+ type : str
101
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
102
+ fun : callable
103
+ The function defining the constraint.
104
+ jac : callable, optional
105
+ The Jacobian of `fun` (unused).
106
+ args : sequence, optional
107
+ Extra arguments to be passed to the function and Jacobian.
108
+
109
+ Equality constraint means that the constraint function result is to
110
+ be zero whereas inequality means that it is to be non-negative.
111
+
112
+ workers : int optional
113
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
114
+ functions in parallel.
115
+ """
116
+ def __init__(self, dim, domain=None, sfield=None, sfield_args=(),
117
+ symmetry=None, constraints=None, workers=1):
118
+ self.dim = dim
119
+
120
+ # Domains
121
+ self.domain = domain
122
+ if domain is None:
123
+ self.bounds = [(0.0, 1.0), ] * dim
124
+ else:
125
+ self.bounds = domain
126
+ self.symmetry = symmetry
127
+ # here in init to avoid if checks
128
+
129
+ # Field functions
130
+ self.sfield = sfield
131
+ self.sfield_args = sfield_args
132
+
133
+ # Process constraints
134
+ # Constraints
135
+ # Process constraint dict sequence:
136
+ if constraints is not None:
137
+ self.min_cons = constraints
138
+ self.g_cons = []
139
+ self.g_args = []
140
+ if not isinstance(constraints, (tuple, list)):
141
+ constraints = (constraints,)
142
+
143
+ for cons in constraints:
144
+ if cons['type'] in ('ineq'):
145
+ self.g_cons.append(cons['fun'])
146
+ try:
147
+ self.g_args.append(cons['args'])
148
+ except KeyError:
149
+ self.g_args.append(())
150
+ self.g_cons = tuple(self.g_cons)
151
+ self.g_args = tuple(self.g_args)
152
+ else:
153
+ self.g_cons = None
154
+ self.g_args = None
155
+
156
+ # Homology properties
157
+ self.gen = 0
158
+ self.perm_cycle = 0
159
+
160
+ # Every cell is stored in a list of its generation,
161
+ # ex. the initial cell is stored in self.H[0]
162
+ # 1st get new cells are stored in self.H[1] etc.
163
+ # When a cell is sub-generated it is removed from this list
164
+
165
+ self.H = [] # Storage structure of vertex groups
166
+
167
+ # Cache of all vertices
168
+ if (sfield is not None) or (self.g_cons is not None):
169
+ # Initiate a vertex cache and an associated field cache, note that
170
+ # the field case is always initiated inside the vertex cache if an
171
+ # associated field scalar field is defined:
172
+ if sfield is not None:
173
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
174
+ g_cons=self.g_cons,
175
+ g_cons_args=self.g_args,
176
+ workers=workers)
177
+ elif self.g_cons is not None:
178
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
179
+ g_cons=self.g_cons,
180
+ g_cons_args=self.g_args,
181
+ workers=workers)
182
+ else:
183
+ self.V = VertexCacheIndex()
184
+
185
+ self.V_non_symm = [] # List of non-symmetric vertices
186
+
187
+ def __call__(self):
188
+ return self.H
189
+
190
+ # %% Triangulation methods
191
+ def cyclic_product(self, bounds, origin, supremum, centroid=True):
192
+ """Generate initial triangulation using cyclic product"""
193
+ # Define current hyperrectangle
194
+ vot = tuple(origin)
195
+ vut = tuple(supremum) # Hyperrectangle supremum
196
+ self.V[vot]
197
+ vo = self.V[vot]
198
+ yield vo.x
199
+ self.V[vut].connect(self.V[vot])
200
+ yield vut
201
+ # Cyclic group approach with second x_l --- x_u operation.
202
+
203
+ # These containers store the "lower" and "upper" vertices
204
+ # corresponding to the origin or supremum of every C2 group.
205
+ # It has the structure of `dim` times embedded lists each containing
206
+ # these vertices as the entire complex grows. Bounds[0] has to be done
207
+ # outside the loops before we have symmetric containers.
208
+ # NOTE: This means that bounds[0][1] must always exist
209
+ C0x = [[self.V[vot]]]
210
+ a_vo = copy.copy(list(origin))
211
+ a_vo[0] = vut[0] # Update aN Origin
212
+ a_vo = self.V[tuple(a_vo)]
213
+ # self.V[vot].connect(self.V[tuple(a_vo)])
214
+ self.V[vot].connect(a_vo)
215
+ yield a_vo.x
216
+ C1x = [[a_vo]]
217
+ # C1x = [[self.V[tuple(a_vo)]]]
218
+ ab_C = [] # Container for a + b operations
219
+
220
+ # Loop over remaining bounds
221
+ for i, x in enumerate(bounds[1:]):
222
+ # Update lower and upper containers
223
+ C0x.append([])
224
+ C1x.append([])
225
+ # try to access a second bound (if not, C1 is symmetric)
226
+ try:
227
+ # Early try so that we don't have to copy the cache before
228
+ # moving on to next C1/C2: Try to add the operation of a new
229
+ # C2 product by accessing the upper bound
230
+ x[1]
231
+ # Copy lists for iteration
232
+ cC0x = [x[:] for x in C0x[:i + 1]]
233
+ cC1x = [x[:] for x in C1x[:i + 1]]
234
+ for j, (VL, VU) in enumerate(zip(cC0x, cC1x)):
235
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
236
+ # Build aN vertices for each lower-upper pair in N:
237
+ a_vl = list(vl.x)
238
+ a_vu = list(vu.x)
239
+ a_vl[i + 1] = vut[i + 1]
240
+ a_vu[i + 1] = vut[i + 1]
241
+ a_vl = self.V[tuple(a_vl)]
242
+
243
+ # Connect vertices in N to corresponding vertices
244
+ # in aN:
245
+ vl.connect(a_vl)
246
+
247
+ yield a_vl.x
248
+
249
+ a_vu = self.V[tuple(a_vu)]
250
+ # Connect vertices in N to corresponding vertices
251
+ # in aN:
252
+ vu.connect(a_vu)
253
+
254
+ # Connect new vertex pair in aN:
255
+ a_vl.connect(a_vu)
256
+
257
+ # Connect lower pair to upper (triangulation
258
+ # operation of a + b (two arbitrary operations):
259
+ vl.connect(a_vu)
260
+ ab_C.append((vl, a_vu))
261
+
262
+ # Update the containers
263
+ C0x[i + 1].append(vl)
264
+ C0x[i + 1].append(vu)
265
+ C1x[i + 1].append(a_vl)
266
+ C1x[i + 1].append(a_vu)
267
+
268
+ # Update old containers
269
+ C0x[j].append(a_vl)
270
+ C1x[j].append(a_vu)
271
+
272
+ # Yield new points
273
+ yield a_vu.x
274
+
275
+ # Try to connect aN lower source of previous a + b
276
+ # operation with a aN vertex
277
+ ab_Cc = copy.copy(ab_C)
278
+
279
+ for vp in ab_Cc:
280
+ b_v = list(vp[0].x)
281
+ ab_v = list(vp[1].x)
282
+ b_v[i + 1] = vut[i + 1]
283
+ ab_v[i + 1] = vut[i + 1]
284
+ b_v = self.V[tuple(b_v)] # b + vl
285
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
286
+ # Note o---o is already connected
287
+ vp[0].connect(ab_v) # o-s
288
+ b_v.connect(ab_v) # s-s
289
+
290
+ # Add new list of cross pairs
291
+ ab_C.append((vp[0], ab_v))
292
+ ab_C.append((b_v, ab_v))
293
+
294
+ except IndexError:
295
+ cC0x = C0x[i]
296
+ cC1x = C1x[i]
297
+ VL, VU = cC0x, cC1x
298
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
299
+ # Build aN vertices for each lower-upper pair in N:
300
+ a_vu = list(vu.x)
301
+ a_vu[i + 1] = vut[i + 1]
302
+ # Connect vertices in N to corresponding vertices
303
+ # in aN:
304
+ a_vu = self.V[tuple(a_vu)]
305
+ # Connect vertices in N to corresponding vertices
306
+ # in aN:
307
+ vu.connect(a_vu)
308
+ # Connect new vertex pair in aN:
309
+ # a_vl.connect(a_vu)
310
+ # Connect lower pair to upper (triangulation
311
+ # operation of a + b (two arbitrary operations):
312
+ vl.connect(a_vu)
313
+ ab_C.append((vl, a_vu))
314
+ C0x[i + 1].append(vu)
315
+ C1x[i + 1].append(a_vu)
316
+ # Yield new points
317
+ a_vu.connect(self.V[vut])
318
+ yield a_vu.x
319
+ ab_Cc = copy.copy(ab_C)
320
+ for vp in ab_Cc:
321
+ if vp[1].x[i] == vut[i]:
322
+ ab_v = list(vp[1].x)
323
+ ab_v[i + 1] = vut[i + 1]
324
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
325
+ # Note o---o is already connected
326
+ vp[0].connect(ab_v) # o-s
327
+
328
+ # Add new list of cross pairs
329
+ ab_C.append((vp[0], ab_v))
330
+
331
+ # Clean class trash
332
+ try:
333
+ del C0x
334
+ del cC0x
335
+ del C1x
336
+ del cC1x
337
+ del ab_C
338
+ del ab_Cc
339
+ except UnboundLocalError:
340
+ pass
341
+
342
+ # Extra yield to ensure that the triangulation is completed
343
+ if centroid:
344
+ vo = self.V[vot]
345
+ vs = self.V[vut]
346
+ # Disconnect the origin and supremum
347
+ vo.disconnect(vs)
348
+ # Build centroid
349
+ vc = self.split_edge(vot, vut)
350
+ for v in vo.nn:
351
+ v.connect(vc)
352
+ yield vc.x
353
+ return vc.x
354
+ else:
355
+ yield vut
356
+ return vut
357
+
358
+ def triangulate(self, n=None, symmetry=None, centroid=True,
359
+ printout=False):
360
+ """
361
+ Triangulate the initial domain, if n is not None then a limited number
362
+ of points will be generated
363
+
364
+ Parameters
365
+ ----------
366
+ n : int, Number of points to be sampled.
367
+ symmetry :
368
+
369
+ Ex. Dictionary/hashtable
370
+ f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
371
+
372
+ symmetry = symmetry[0]: 0, # Variable 1
373
+ symmetry[1]: 0, # symmetric to variable 1
374
+ symmetry[2]: 0, # symmetric to variable 1
375
+ symmetry[3]: 3, # Variable 4
376
+ symmetry[4]: 3, # symmetric to variable 4
377
+ symmetry[5]: 3, # symmetric to variable 4
378
+ }
379
+ centroid : bool, if True add a central point to the hypercube
380
+ printout : bool, if True print out results
381
+
382
+ NOTES:
383
+ ------
384
+ Rather than using the combinatorial algorithm to connect vertices we
385
+ make the following observation:
386
+
387
+ The bound pairs are similar a C2 cyclic group and the structure is
388
+ formed using the cartesian product:
389
+
390
+ H = C2 x C2 x C2 ... x C2 (dim times)
391
+
392
+ So construct any normal subgroup N and consider H/N first, we connect
393
+ all vertices within N (ex. N is C2 (the first dimension), then we move
394
+ to a left coset aN (an operation moving around the defined H/N group by
395
+ for example moving from the lower bound in C2 (dimension 2) to the
396
+ higher bound in C2. During this operation connection all the vertices.
397
+ Now repeat the N connections. Note that these elements can be connected
398
+ in parallel.
399
+ """
400
+ # Inherit class arguments
401
+ if symmetry is None:
402
+ symmetry = self.symmetry
403
+ # Build origin and supremum vectors
404
+ origin = [i[0] for i in self.bounds]
405
+ self.origin = origin
406
+ supremum = [i[1] for i in self.bounds]
407
+
408
+ self.supremum = supremum
409
+
410
+ if symmetry is None:
411
+ cbounds = self.bounds
412
+ else:
413
+ cbounds = copy.copy(self.bounds)
414
+ for i, j in enumerate(symmetry):
415
+ if i is not j:
416
+ # pop second entry on second symmetry vars
417
+ cbounds[i] = [self.bounds[symmetry[i]][0]]
418
+ # Sole (first) entry is the sup value and there is no
419
+ # origin:
420
+ cbounds[i] = [self.bounds[symmetry[i]][1]]
421
+ if (self.bounds[symmetry[i]] is not
422
+ self.bounds[symmetry[j]]):
423
+ logging.warning(f"Variable {i} was specified as "
424
+ f"symmetetric to variable {j}, however"
425
+ f", the bounds {i} ="
426
+ f" {self.bounds[symmetry[i]]} and {j}"
427
+ f" ="
428
+ f" {self.bounds[symmetry[j]]} do not "
429
+ f"match, the mismatch was ignored in "
430
+ f"the initial triangulation.")
431
+ cbounds[i] = self.bounds[symmetry[j]]
432
+
433
+ if n is None:
434
+ # Build generator
435
+ self.cp = self.cyclic_product(cbounds, origin, supremum, centroid)
436
+ for i in self.cp:
437
+ i
438
+
439
+ try:
440
+ self.triangulated_vectors.append((tuple(self.origin),
441
+ tuple(self.supremum)))
442
+ except (AttributeError, KeyError):
443
+ self.triangulated_vectors = [(tuple(self.origin),
444
+ tuple(self.supremum))]
445
+
446
+ else:
447
+ # Check if generator already exists
448
+ try:
449
+ self.cp
450
+ except (AttributeError, KeyError):
451
+ self.cp = self.cyclic_product(cbounds, origin, supremum,
452
+ centroid)
453
+
454
+ try:
455
+ while len(self.V.cache) < n:
456
+ next(self.cp)
457
+ except StopIteration:
458
+ try:
459
+ self.triangulated_vectors.append((tuple(self.origin),
460
+ tuple(self.supremum)))
461
+ except (AttributeError, KeyError):
462
+ self.triangulated_vectors = [(tuple(self.origin),
463
+ tuple(self.supremum))]
464
+
465
+ if printout:
466
+ # for v in self.C0():
467
+ # v.print_out()
468
+ for v in self.V.cache:
469
+ self.V[v].print_out()
470
+
471
+ return
472
+
473
+ def refine(self, n=1):
474
+ if n is None:
475
+ try:
476
+ self.triangulated_vectors
477
+ self.refine_all()
478
+ return
479
+ except AttributeError as ae:
480
+ if str(ae) == "'Complex' object has no attribute " \
481
+ "'triangulated_vectors'":
482
+ self.triangulate(symmetry=self.symmetry)
483
+ return
484
+ else:
485
+ raise
486
+
487
+ nt = len(self.V.cache) + n # Target number of total vertices
488
+ # In the outer while loop we iterate until we have added an extra `n`
489
+ # vertices to the complex:
490
+ while len(self.V.cache) < nt: # while loop 1
491
+ try: # try 1
492
+ # Try to access triangulated_vectors, this should only be
493
+ # defined if an initial triangulation has already been
494
+ # performed:
495
+ self.triangulated_vectors
496
+ # Try a usual iteration of the current generator, if it
497
+ # does not exist or is exhausted then produce a new generator
498
+ try: # try 2
499
+ next(self.rls)
500
+ except (AttributeError, StopIteration, KeyError):
501
+ vp = self.triangulated_vectors[0]
502
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
503
+ next(self.rls)
504
+
505
+ except (AttributeError, KeyError):
506
+ # If an initial triangulation has not been completed, then
507
+ # we start/continue the initial triangulation targeting `nt`
508
+ # vertices, if nt is greater than the initial number of
509
+ # vertices then the `refine` routine will move back to try 1.
510
+ self.triangulate(nt, self.symmetry)
511
+ return
512
+
513
+ def refine_all(self, centroids=True):
514
+ """Refine the entire domain of the current complex."""
515
+ try:
516
+ self.triangulated_vectors
517
+ tvs = copy.copy(self.triangulated_vectors)
518
+ for i, vp in enumerate(tvs):
519
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
520
+ for i in self.rls:
521
+ i
522
+ except AttributeError as ae:
523
+ if str(ae) == "'Complex' object has no attribute " \
524
+ "'triangulated_vectors'":
525
+ self.triangulate(symmetry=self.symmetry, centroid=centroids)
526
+ else:
527
+ raise
528
+
529
+ # This adds a centroid to every new sub-domain generated and defined
530
+ # by self.triangulated_vectors, in addition the vertices ! to complete
531
+ # the triangulation
532
+ return
533
+
534
+ def refine_local_space(self, origin, supremum, bounds, centroid=1):
535
+ # Copy for later removal
536
+ origin_c = copy.copy(origin)
537
+ supremum_c = copy.copy(supremum)
538
+
539
+ # Initiate local variables redefined in later inner `for` loop:
540
+ vl, vu, a_vu = None, None, None
541
+
542
+ # Change the vector orientation so that it is only increasing
543
+ s_ov = list(origin)
544
+ s_origin = list(origin)
545
+ s_sv = list(supremum)
546
+ s_supremum = list(supremum)
547
+ for i, vi in enumerate(s_origin):
548
+ if s_ov[i] > s_sv[i]:
549
+ s_origin[i] = s_sv[i]
550
+ s_supremum[i] = s_ov[i]
551
+
552
+ vot = tuple(s_origin)
553
+ vut = tuple(s_supremum) # Hyperrectangle supremum
554
+
555
+ vo = self.V[vot] # initiate if doesn't exist yet
556
+ vs = self.V[vut]
557
+ # Start by finding the old centroid of the new space:
558
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
559
+
560
+ # Find set of extreme vertices in current local space
561
+ sup_set = copy.copy(vco.nn)
562
+ # Cyclic group approach with second x_l --- x_u operation.
563
+
564
+ # These containers store the "lower" and "upper" vertices
565
+ # corresponding to the origin or supremum of every C2 group.
566
+ # It has the structure of `dim` times embedded lists each containing
567
+ # these vertices as the entire complex grows. Bounds[0] has to be done
568
+ # outside the loops before we have symmetric containers.
569
+ # NOTE: This means that bounds[0][1] must always exist
570
+
571
+ a_vl = copy.copy(list(vot))
572
+ a_vl[0] = vut[0] # Update aN Origin
573
+ if tuple(a_vl) not in self.V.cache:
574
+ vo = self.V[vot] # initiate if doesn't exist yet
575
+ vs = self.V[vut]
576
+ # Start by finding the old centroid of the new space:
577
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
578
+
579
+ # Find set of extreme vertices in current local space
580
+ sup_set = copy.copy(vco.nn)
581
+ a_vl = copy.copy(list(vot))
582
+ a_vl[0] = vut[0] # Update aN Origin
583
+ a_vl = self.V[tuple(a_vl)]
584
+ else:
585
+ a_vl = self.V[tuple(a_vl)]
586
+
587
+ c_v = self.split_edge(vo.x, a_vl.x)
588
+ c_v.connect(vco)
589
+ yield c_v.x
590
+ Cox = [[vo]]
591
+ Ccx = [[c_v]]
592
+ Cux = [[a_vl]]
593
+ ab_C = [] # Container for a + b operations
594
+ s_ab_C = [] # Container for symmetric a + b operations
595
+
596
+ # Loop over remaining bounds
597
+ for i, x in enumerate(bounds[1:]):
598
+ # Update lower and upper containers
599
+ Cox.append([])
600
+ Ccx.append([])
601
+ Cux.append([])
602
+ # try to access a second bound (if not, C1 is symmetric)
603
+ try:
604
+ t_a_vl = list(vot)
605
+ t_a_vl[i + 1] = vut[i + 1]
606
+
607
+ # New: lists are used anyway, so copy all
608
+ # %%
609
+ # Copy lists for iteration
610
+ cCox = [x[:] for x in Cox[:i + 1]]
611
+ cCcx = [x[:] for x in Ccx[:i + 1]]
612
+ cCux = [x[:] for x in Cux[:i + 1]]
613
+ # Try to connect aN lower source of previous a + b
614
+ # operation with a aN vertex
615
+ ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the
616
+ # (VL, VC, VU) for-loop, but we use the copy of the list in the
617
+ # ab_Cc for-loop.
618
+ s_ab_Cc = copy.copy(s_ab_C)
619
+
620
+ # Early try so that we don't have to copy the cache before
621
+ # moving on to next C1/C2: Try to add the operation of a new
622
+ # C2 product by accessing the upper bound
623
+ if tuple(t_a_vl) not in self.V.cache:
624
+ # Raise error to continue symmetric refine
625
+ raise IndexError
626
+ t_a_vu = list(vut)
627
+ t_a_vu[i + 1] = vut[i + 1]
628
+ if tuple(t_a_vu) not in self.V.cache:
629
+ # Raise error to continue symmetric refine:
630
+ raise IndexError
631
+
632
+ for vectors in s_ab_Cc:
633
+ # s_ab_C.append([c_vc, vl, vu, a_vu])
634
+ bc_vc = list(vectors[0].x)
635
+ b_vl = list(vectors[1].x)
636
+ b_vu = list(vectors[2].x)
637
+ ba_vu = list(vectors[3].x)
638
+
639
+ bc_vc[i + 1] = vut[i + 1]
640
+ b_vl[i + 1] = vut[i + 1]
641
+ b_vu[i + 1] = vut[i + 1]
642
+ ba_vu[i + 1] = vut[i + 1]
643
+
644
+ bc_vc = self.V[tuple(bc_vc)]
645
+ bc_vc.connect(vco) # NOTE: Unneeded?
646
+ yield bc_vc
647
+
648
+ # Split to centre, call this centre group "d = 0.5*a"
649
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
650
+ d_bc_vc.connect(bc_vc)
651
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
652
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
653
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
654
+ yield d_bc_vc.x
655
+ b_vl = self.V[tuple(b_vl)]
656
+ bc_vc.connect(b_vl) # Connect aN cross pairs
657
+ d_bc_vc.connect(b_vl) # Connect all to centroid
658
+
659
+ yield b_vl
660
+ b_vu = self.V[tuple(b_vu)]
661
+ bc_vc.connect(b_vu) # Connect aN cross pairs
662
+ d_bc_vc.connect(b_vu) # Connect all to centroid
663
+
664
+ b_vl_c = self.split_edge(b_vu.x, b_vl.x)
665
+ bc_vc.connect(b_vl_c)
666
+
667
+ yield b_vu
668
+ ba_vu = self.V[tuple(ba_vu)]
669
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
670
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
671
+
672
+ # Split the a + b edge of the initial triangulation:
673
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
674
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
675
+ b_vu_c = self.split_edge(b_vu.x, ba_vu.x)
676
+ bc_vc.connect(b_vu_c)
677
+ yield os_v.x # often equal to vco, but not always
678
+ yield ss_v.x # often equal to bc_vu, but not always
679
+ yield ba_vu
680
+ # Split remaining to centre, call this centre group
681
+ # "d = 0.5*a"
682
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
683
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
684
+ yield d_bc_vc.x
685
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
686
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
687
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
688
+ yield d_b_vl.x
689
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
690
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
691
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
692
+ yield d_b_vu.x
693
+ d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x)
694
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
695
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
696
+ yield d_ba_vu
697
+
698
+ # comb = [c_vc, vl, vu, a_vl, a_vu,
699
+ # bc_vc, b_vl, b_vu, ba_vl, ba_vu]
700
+ comb = [vl, vu, a_vu,
701
+ b_vl, b_vu, ba_vu]
702
+ comb_iter = itertools.combinations(comb, 2)
703
+ for vecs in comb_iter:
704
+ self.split_edge(vecs[0].x, vecs[1].x)
705
+ # Add new list of cross pairs
706
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
707
+ ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev
708
+
709
+ for vectors in ab_Cc:
710
+ bc_vc = list(vectors[0].x)
711
+ b_vl = list(vectors[1].x)
712
+ b_vu = list(vectors[2].x)
713
+ ba_vl = list(vectors[3].x)
714
+ ba_vu = list(vectors[4].x)
715
+ bc_vc[i + 1] = vut[i + 1]
716
+ b_vl[i + 1] = vut[i + 1]
717
+ b_vu[i + 1] = vut[i + 1]
718
+ ba_vl[i + 1] = vut[i + 1]
719
+ ba_vu[i + 1] = vut[i + 1]
720
+ bc_vc = self.V[tuple(bc_vc)]
721
+ bc_vc.connect(vco) # NOTE: Unneeded?
722
+ yield bc_vc
723
+
724
+ # Split to centre, call this centre group "d = 0.5*a"
725
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
726
+ d_bc_vc.connect(bc_vc)
727
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
728
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
729
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
730
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
731
+ yield d_bc_vc.x
732
+ b_vl = self.V[tuple(b_vl)]
733
+ bc_vc.connect(b_vl) # Connect aN cross pairs
734
+ d_bc_vc.connect(b_vl) # Connect all to centroid
735
+ yield b_vl
736
+ b_vu = self.V[tuple(b_vu)]
737
+ bc_vc.connect(b_vu) # Connect aN cross pairs
738
+ d_bc_vc.connect(b_vu) # Connect all to centroid
739
+ yield b_vu
740
+ ba_vl = self.V[tuple(ba_vl)]
741
+ bc_vc.connect(ba_vl) # Connect aN cross pairs
742
+ d_bc_vc.connect(ba_vl) # Connect all to centroid
743
+ self.split_edge(b_vu.x, ba_vl.x)
744
+ yield ba_vl
745
+ ba_vu = self.V[tuple(ba_vu)]
746
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
747
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
748
+ # Split the a + b edge of the initial triangulation:
749
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
750
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
751
+ yield os_v.x # often equal to vco, but not always
752
+ yield ss_v.x # often equal to bc_vu, but not always
753
+ yield ba_vu
754
+ # Split remaining to centre, call this centre group
755
+ # "d = 0.5*a"
756
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
757
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
758
+ yield d_bc_vc.x
759
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
760
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
761
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
762
+ yield d_b_vl.x
763
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
764
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
765
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
766
+ yield d_b_vu.x
767
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
768
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
769
+ d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs
770
+ yield d_ba_vl
771
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
772
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
773
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
774
+ yield d_ba_vu
775
+ c_vc, vl, vu, a_vl, a_vu = vectors
776
+
777
+ comb = [vl, vu, a_vl, a_vu,
778
+ b_vl, b_vu, ba_vl, ba_vu]
779
+ comb_iter = itertools.combinations(comb, 2)
780
+ for vecs in comb_iter:
781
+ self.split_edge(vecs[0].x, vecs[1].x)
782
+
783
+ # Add new list of cross pairs
784
+ ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu))
785
+ ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu))
786
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
787
+ ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl))
788
+
789
+ for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)):
790
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
791
+ # Build aN vertices for each lower-upper C3 group in N:
792
+ a_vl = list(vl.x)
793
+ a_vu = list(vu.x)
794
+ a_vl[i + 1] = vut[i + 1]
795
+ a_vu[i + 1] = vut[i + 1]
796
+ a_vl = self.V[tuple(a_vl)]
797
+ a_vu = self.V[tuple(a_vu)]
798
+ # Note, build (a + vc) later for consistent yields
799
+ # Split the a + b edge of the initial triangulation:
800
+ c_vc = self.split_edge(vl.x, a_vu.x)
801
+ self.split_edge(vl.x, vu.x) # Equal to vc
802
+ # Build cN vertices for each lower-upper C3 group in N:
803
+ c_vc.connect(vco)
804
+ c_vc.connect(vc)
805
+ c_vc.connect(vl) # Connect c + ac operations
806
+ c_vc.connect(vu) # Connect c + ac operations
807
+ c_vc.connect(a_vl) # Connect c + ac operations
808
+ c_vc.connect(a_vu) # Connect c + ac operations
809
+ yield c_vc.x
810
+ c_vl = self.split_edge(vl.x, a_vl.x)
811
+ c_vl.connect(vco)
812
+ c_vc.connect(c_vl) # Connect cN group vertices
813
+ yield c_vl.x
814
+ # yield at end of loop:
815
+ c_vu = self.split_edge(vu.x, a_vu.x)
816
+ c_vu.connect(vco)
817
+ # Connect remaining cN group vertices
818
+ c_vc.connect(c_vu) # Connect cN group vertices
819
+ yield c_vu.x
820
+
821
+ a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ?
822
+ a_vc.connect(vco)
823
+ a_vc.connect(c_vc)
824
+
825
+ # Storage for connecting c + ac operations:
826
+ ab_C.append((c_vc, vl, vu, a_vl, a_vu))
827
+
828
+ # Update the containers
829
+ Cox[i + 1].append(vl)
830
+ Cox[i + 1].append(vc)
831
+ Cox[i + 1].append(vu)
832
+ Ccx[i + 1].append(c_vl)
833
+ Ccx[i + 1].append(c_vc)
834
+ Ccx[i + 1].append(c_vu)
835
+ Cux[i + 1].append(a_vl)
836
+ Cux[i + 1].append(a_vc)
837
+ Cux[i + 1].append(a_vu)
838
+
839
+ # Update old containers
840
+ Cox[j].append(c_vl) # !
841
+ Cox[j].append(a_vl)
842
+ Ccx[j].append(c_vc) # !
843
+ Ccx[j].append(a_vc) # !
844
+ Cux[j].append(c_vu) # !
845
+ Cux[j].append(a_vu)
846
+
847
+ # Yield new points
848
+ yield a_vc.x
849
+
850
+ except IndexError:
851
+ for vectors in ab_Cc:
852
+ ba_vl = list(vectors[3].x)
853
+ ba_vu = list(vectors[4].x)
854
+ ba_vl[i + 1] = vut[i + 1]
855
+ ba_vu[i + 1] = vut[i + 1]
856
+ ba_vu = self.V[tuple(ba_vu)]
857
+ yield ba_vu
858
+ d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s
859
+ yield ba_vu
860
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
861
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
862
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
863
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
864
+ yield d_bc_vc.x
865
+ ba_vl = self.V[tuple(ba_vl)]
866
+ yield ba_vl
867
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
868
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
869
+ d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x)
870
+ yield d_ba_vl
871
+ yield d_ba_vu
872
+ yield d_ba_vc
873
+ c_vc, vl, vu, a_vl, a_vu = vectors
874
+ comb = [vl, vu, a_vl, a_vu,
875
+ ba_vl,
876
+ ba_vu]
877
+ comb_iter = itertools.combinations(comb, 2)
878
+ for vecs in comb_iter:
879
+ self.split_edge(vecs[0].x, vecs[1].x)
880
+
881
+ # Copy lists for iteration
882
+ cCox = Cox[i]
883
+ cCcx = Ccx[i]
884
+ cCux = Cux[i]
885
+ VL, VC, VU = cCox, cCcx, cCux
886
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
887
+ # Build aN vertices for each lower-upper pair in N:
888
+ a_vu = list(vu.x)
889
+ a_vu[i + 1] = vut[i + 1]
890
+
891
+ # Connect vertices in N to corresponding vertices
892
+ # in aN:
893
+ a_vu = self.V[tuple(a_vu)]
894
+ yield a_vl.x
895
+ # Split the a + b edge of the initial triangulation:
896
+ c_vc = self.split_edge(vl.x, a_vu.x)
897
+ self.split_edge(vl.x, vu.x) # Equal to vc
898
+ c_vc.connect(vco)
899
+ c_vc.connect(vc)
900
+ c_vc.connect(vl) # Connect c + ac operations
901
+ c_vc.connect(vu) # Connect c + ac operations
902
+ c_vc.connect(a_vu) # Connect c + ac operations
903
+ yield (c_vc.x)
904
+ c_vu = self.split_edge(vu.x,
905
+ a_vu.x) # yield at end of loop
906
+ c_vu.connect(vco)
907
+ # Connect remaining cN group vertices
908
+ c_vc.connect(c_vu) # Connect cN group vertices
909
+ yield (c_vu.x)
910
+
911
+ # Update the containers
912
+ Cox[i + 1].append(vu)
913
+ Ccx[i + 1].append(c_vu)
914
+ Cux[i + 1].append(a_vu)
915
+
916
+ # Update old containers
917
+ s_ab_C.append([c_vc, vl, vu, a_vu])
918
+
919
+ yield a_vu.x
920
+
921
+ # Clean class trash
922
+ try:
923
+ del Cox
924
+ del Ccx
925
+ del Cux
926
+ del ab_C
927
+ del ab_Cc
928
+ except UnboundLocalError:
929
+ pass
930
+
931
+ try:
932
+ self.triangulated_vectors.remove((tuple(origin_c),
933
+ tuple(supremum_c)))
934
+ except ValueError:
935
+ # Turn this into a logging warning?
936
+ pass
937
+ # Add newly triangulated vectors:
938
+ for vs in sup_set:
939
+ self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x)))
940
+
941
+ # Extra yield to ensure that the triangulation is completed
942
+ if centroid:
943
+ vcn_set = set()
944
+ c_nn_lists = []
945
+ for vs in sup_set:
946
+ # Build centroid
947
+ c_nn = self.vpool(vco.x, vs.x)
948
+ try:
949
+ c_nn.remove(vcn_set)
950
+ except KeyError:
951
+ pass
952
+ c_nn_lists.append(c_nn)
953
+
954
+ for c_nn in c_nn_lists:
955
+ try:
956
+ c_nn.remove(vcn_set)
957
+ except KeyError:
958
+ pass
959
+
960
+ for vs, c_nn in zip(sup_set, c_nn_lists):
961
+ # Build centroid
962
+ vcn = self.split_edge(vco.x, vs.x)
963
+ vcn_set.add(vcn)
964
+ try: # Shouldn't be needed?
965
+ c_nn.remove(vcn_set)
966
+ except KeyError:
967
+ pass
968
+ for vnn in c_nn:
969
+ vcn.connect(vnn)
970
+ yield vcn.x
971
+ else:
972
+ pass
973
+
974
+ yield vut
975
+ return
976
+
977
+ def refine_star(self, v):
978
+ """Refine the star domain of a vertex `v`."""
979
+ # Copy lists before iteration
980
+ vnn = copy.copy(v.nn)
981
+ v1nn = []
982
+ d_v0v1_set = set()
983
+ for v1 in vnn:
984
+ v1nn.append(copy.copy(v1.nn))
985
+
986
+ for v1, v1nn in zip(vnn, v1nn):
987
+ vnnu = v1nn.intersection(vnn)
988
+
989
+ d_v0v1 = self.split_edge(v.x, v1.x)
990
+ for o_d_v0v1 in d_v0v1_set:
991
+ d_v0v1.connect(o_d_v0v1)
992
+ d_v0v1_set.add(d_v0v1)
993
+ for v2 in vnnu:
994
+ d_v1v2 = self.split_edge(v1.x, v2.x)
995
+ d_v0v1.connect(d_v1v2)
996
+ return
997
+
998
+ @cache
999
+ def split_edge(self, v1, v2):
1000
+ v1 = self.V[v1]
1001
+ v2 = self.V[v2]
1002
+ # Destroy original edge, if it exists:
1003
+ v1.disconnect(v2)
1004
+ # Compute vertex on centre of edge:
1005
+ try:
1006
+ vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a
1007
+ except TypeError: # Allow for decimal operations
1008
+ vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a
1009
+
1010
+ vc = self.V[tuple(vct)]
1011
+ # Connect to original 2 vertices to the new centre vertex
1012
+ vc.connect(v1)
1013
+ vc.connect(v2)
1014
+ return vc
1015
+
1016
+ def vpool(self, origin, supremum):
1017
+ vot = tuple(origin)
1018
+ vst = tuple(supremum)
1019
+ # Initiate vertices in case they don't exist
1020
+ vo = self.V[vot]
1021
+ vs = self.V[vst]
1022
+
1023
+ # Remove origin - supremum disconnect
1024
+
1025
+ # Find the lower/upper bounds of the refinement hyperrectangle
1026
+ bl = list(vot)
1027
+ bu = list(vst)
1028
+ for i, (voi, vsi) in enumerate(zip(vot, vst)):
1029
+ if bl[i] > vsi:
1030
+ bl[i] = vsi
1031
+ if bu[i] < voi:
1032
+ bu[i] = voi
1033
+
1034
+ # NOTE: This is mostly done with sets/lists because we aren't sure
1035
+ # how well the numpy arrays will scale to thousands of
1036
+ # dimensions.
1037
+ vn_pool = set()
1038
+ vn_pool.update(vo.nn)
1039
+ vn_pool.update(vs.nn)
1040
+ cvn_pool = copy.copy(vn_pool)
1041
+ for vn in cvn_pool:
1042
+ for i, xi in enumerate(vn.x):
1043
+ if bl[i] <= xi <= bu[i]:
1044
+ pass
1045
+ else:
1046
+ try:
1047
+ vn_pool.remove(vn)
1048
+ except KeyError:
1049
+ pass # NOTE: Not all neigbouds are in initial pool
1050
+ return vn_pool
1051
+
1052
+ def vf_to_vv(self, vertices, simplices):
1053
+ """
1054
+ Convert a vertex-face mesh to a vertex-vertex mesh used by this class
1055
+
1056
+ Parameters
1057
+ ----------
1058
+ vertices : list
1059
+ Vertices
1060
+ simplices : list
1061
+ Simplices
1062
+ """
1063
+ if self.dim > 1:
1064
+ for s in simplices:
1065
+ edges = itertools.combinations(s, self.dim)
1066
+ for e in edges:
1067
+ self.V[tuple(vertices[e[0]])].connect(
1068
+ self.V[tuple(vertices[e[1]])])
1069
+ else:
1070
+ for e in simplices:
1071
+ self.V[tuple(vertices[e[0]])].connect(
1072
+ self.V[tuple(vertices[e[1]])])
1073
+ return
1074
+
1075
+ def connect_vertex_non_symm(self, v_x, near=None):
1076
+ """
1077
+ Adds a vertex at coords v_x to the complex that is not symmetric to the
1078
+ initial triangulation and sub-triangulation.
1079
+
1080
+ If near is specified (for example; a star domain or collections of
1081
+ cells known to contain v) then only those simplices containd in near
1082
+ will be searched, this greatly speeds up the process.
1083
+
1084
+ If near is not specified this method will search the entire simplicial
1085
+ complex structure.
1086
+
1087
+ Parameters
1088
+ ----------
1089
+ v_x : tuple
1090
+ Coordinates of non-symmetric vertex
1091
+ near : set or list
1092
+ List of vertices, these are points near v to check for
1093
+ """
1094
+ if near is None:
1095
+ star = self.V
1096
+ else:
1097
+ star = near
1098
+ # Create the vertex origin
1099
+ if tuple(v_x) in self.V.cache:
1100
+ if self.V[v_x] in self.V_non_symm:
1101
+ pass
1102
+ else:
1103
+ return
1104
+
1105
+ self.V[v_x]
1106
+ found_nn = False
1107
+ S_rows = []
1108
+ for v in star:
1109
+ S_rows.append(v.x)
1110
+
1111
+ S_rows = numpy.array(S_rows)
1112
+ A = numpy.array(S_rows) - numpy.array(v_x)
1113
+ # Iterate through all the possible simplices of S_rows
1114
+ for s_i in itertools.combinations(range(S_rows.shape[0]),
1115
+ r=self.dim + 1):
1116
+ # Check if connected, else s_i is not a simplex
1117
+ valid_simplex = True
1118
+ for i in itertools.combinations(s_i, r=2):
1119
+ # Every combination of vertices must be connected, we check of
1120
+ # the current iteration of all combinations of s_i are
1121
+ # connected we break the loop if it is not.
1122
+ if ((self.V[tuple(S_rows[i[1]])] not in
1123
+ self.V[tuple(S_rows[i[0]])].nn)
1124
+ and (self.V[tuple(S_rows[i[0]])] not in
1125
+ self.V[tuple(S_rows[i[1]])].nn)):
1126
+ valid_simplex = False
1127
+ break
1128
+
1129
+ S = S_rows[tuple([s_i])]
1130
+ if valid_simplex:
1131
+ if self.deg_simplex(S, proj=None):
1132
+ valid_simplex = False
1133
+
1134
+ # If s_i is a valid simplex we can test if v_x is inside si
1135
+ if valid_simplex:
1136
+ # Find the A_j0 value from the precalculated values
1137
+ A_j0 = A[tuple([s_i])]
1138
+ if self.in_simplex(S, v_x, A_j0):
1139
+ found_nn = True
1140
+ # breaks the main for loop, s_i is the target simplex:
1141
+ break
1142
+
1143
+ # Connect the simplex to point
1144
+ if found_nn:
1145
+ for i in s_i:
1146
+ self.V[v_x].connect(self.V[tuple(S_rows[i])])
1147
+ # Attached the simplex to storage for all non-symmetric vertices
1148
+ self.V_non_symm.append(self.V[v_x])
1149
+ # this bool value indicates a successful connection if True:
1150
+ return found_nn
1151
+
1152
+ def in_simplex(self, S, v_x, A_j0=None):
1153
+ """Check if a vector v_x is in simplex `S`.
1154
+
1155
+ Parameters
1156
+ ----------
1157
+ S : array_like
1158
+ Array containing simplex entries of vertices as rows
1159
+ v_x :
1160
+ A candidate vertex
1161
+ A_j0 : array, optional,
1162
+ Allows for A_j0 to be pre-calculated
1163
+
1164
+ Returns
1165
+ -------
1166
+ res : boolean
1167
+ True if `v_x` is in `S`
1168
+ """
1169
+ A_11 = numpy.delete(S, 0, 0) - S[0]
1170
+
1171
+ sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1172
+ if sign_det_A_11 == 0:
1173
+ # NOTE: We keep the variable A_11, but we loop through A_jj
1174
+ # ind=
1175
+ # while sign_det_A_11 == 0:
1176
+ # A_11 = numpy.delete(S, ind, 0) - S[ind]
1177
+ # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1178
+
1179
+ sign_det_A_11 = -1 # TODO: Choose another det of j instead?
1180
+ # TODO: Unlikely to work in many cases
1181
+
1182
+ if A_j0 is None:
1183
+ A_j0 = S - v_x
1184
+
1185
+ for d in range(self.dim + 1):
1186
+ det_A_jj = (-1)**d * sign_det_A_11
1187
+ # TODO: Note that scipy might be faster to add as an optional
1188
+ # dependency
1189
+ sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d,
1190
+ 0)))
1191
+ # TODO: Note if sign_det_A_j0 == then the point is coplanar to the
1192
+ # current simplex facet, so perhaps return True and attach?
1193
+ if det_A_jj == sign_det_A_j0:
1194
+ continue
1195
+ else:
1196
+ return False
1197
+
1198
+ return True
1199
+
1200
+ def deg_simplex(self, S, proj=None):
1201
+ """Test a simplex S for degeneracy (linear dependence in R^dim).
1202
+
1203
+ Parameters
1204
+ ----------
1205
+ S : np.array
1206
+ Simplex with rows as vertex vectors
1207
+ proj : array, optional,
1208
+ If the projection S[1:] - S[0] is already
1209
+ computed it can be added as an optional argument.
1210
+ """
1211
+ # Strategy: we test all combination of faces, if any of the
1212
+ # determinants are zero then the vectors lie on the same face and is
1213
+ # therefore linearly dependent in the space of R^dim
1214
+ if proj is None:
1215
+ proj = S[1:] - S[0]
1216
+
1217
+ # TODO: Is checking the projection of one vertex against faces of other
1218
+ # vertices sufficient? Or do we need to check more vertices in
1219
+ # dimensions higher than 2?
1220
+ # TODO: Literature seems to suggest using proj.T, but why is this
1221
+ # needed?
1222
+ if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance?
1223
+ return True # Simplex is degenerate
1224
+ else:
1225
+ return False # Simplex is not degenerate
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from abc import ABC, abstractmethod
3
+
4
+ import numpy as np
5
+
6
+ from scipy._lib._util import MapWrapper
7
+
8
+
9
+ class VertexBase(ABC):
10
+ """
11
+ Base class for a vertex.
12
+ """
13
+ def __init__(self, x, nn=None, index=None):
14
+ """
15
+ Initiation of a vertex object.
16
+
17
+ Parameters
18
+ ----------
19
+ x : tuple or vector
20
+ The geometric location (domain).
21
+ nn : list, optional
22
+ Nearest neighbour list.
23
+ index : int, optional
24
+ Index of vertex.
25
+ """
26
+ self.x = x
27
+ self.hash = hash(self.x) # Save precomputed hash
28
+
29
+ if nn is not None:
30
+ self.nn = set(nn) # can use .indexupdate to add a new list
31
+ else:
32
+ self.nn = set()
33
+
34
+ self.index = index
35
+
36
+ def __hash__(self):
37
+ return self.hash
38
+
39
+ def __getattr__(self, item):
40
+ if item not in ['x_a']:
41
+ raise AttributeError(f"{type(self)} object has no attribute "
42
+ f"'{item}'")
43
+ if item == 'x_a':
44
+ self.x_a = np.array(self.x)
45
+ return self.x_a
46
+
47
+ @abstractmethod
48
+ def connect(self, v):
49
+ raise NotImplementedError("This method is only implemented with an "
50
+ "associated child of the base class.")
51
+
52
+ @abstractmethod
53
+ def disconnect(self, v):
54
+ raise NotImplementedError("This method is only implemented with an "
55
+ "associated child of the base class.")
56
+
57
+ def star(self):
58
+ """Returns the star domain ``st(v)`` of the vertex.
59
+
60
+ Parameters
61
+ ----------
62
+ v :
63
+ The vertex ``v`` in ``st(v)``
64
+
65
+ Returns
66
+ -------
67
+ st : set
68
+ A set containing all the vertices in ``st(v)``
69
+ """
70
+ self.st = self.nn
71
+ self.st.add(self)
72
+ return self.st
73
+
74
+
75
+ class VertexScalarField(VertexBase):
76
+ """
77
+ Add homology properties of a scalar field f: R^n --> R associated with
78
+ the geometry built from the VertexBase class
79
+ """
80
+
81
+ def __init__(self, x, field=None, nn=None, index=None, field_args=(),
82
+ g_cons=None, g_cons_args=()):
83
+ """
84
+ Parameters
85
+ ----------
86
+ x : tuple,
87
+ vector of vertex coordinates
88
+ field : callable, optional
89
+ a scalar field f: R^n --> R associated with the geometry
90
+ nn : list, optional
91
+ list of nearest neighbours
92
+ index : int, optional
93
+ index of the vertex
94
+ field_args : tuple, optional
95
+ additional arguments to be passed to field
96
+ g_cons : callable, optional
97
+ constraints on the vertex
98
+ g_cons_args : tuple, optional
99
+ additional arguments to be passed to g_cons
100
+
101
+ """
102
+ super().__init__(x, nn=nn, index=index)
103
+
104
+ # Note Vertex is only initiated once for all x so only
105
+ # evaluated once
106
+ # self.feasible = None
107
+
108
+ # self.f is externally defined by the cache to allow parallel
109
+ # processing
110
+ # None type that will break arithmetic operations unless defined
111
+ # self.f = None
112
+
113
+ self.check_min = True
114
+ self.check_max = True
115
+
116
+ def connect(self, v):
117
+ """Connects self to another vertex object v.
118
+
119
+ Parameters
120
+ ----------
121
+ v : VertexBase or VertexScalarField object
122
+ """
123
+ if v is not self and v not in self.nn:
124
+ self.nn.add(v)
125
+ v.nn.add(self)
126
+
127
+ # Flags for checking homology properties:
128
+ self.check_min = True
129
+ self.check_max = True
130
+ v.check_min = True
131
+ v.check_max = True
132
+
133
+ def disconnect(self, v):
134
+ if v in self.nn:
135
+ self.nn.remove(v)
136
+ v.nn.remove(self)
137
+
138
+ # Flags for checking homology properties:
139
+ self.check_min = True
140
+ self.check_max = True
141
+ v.check_min = True
142
+ v.check_max = True
143
+
144
+ def minimiser(self):
145
+ """Check whether this vertex is strictly less than all its
146
+ neighbours"""
147
+ if self.check_min:
148
+ self._min = all(self.f < v.f for v in self.nn)
149
+ self.check_min = False
150
+
151
+ return self._min
152
+
153
+ def maximiser(self):
154
+ """
155
+ Check whether this vertex is strictly greater than all its
156
+ neighbours.
157
+ """
158
+ if self.check_max:
159
+ self._max = all(self.f > v.f for v in self.nn)
160
+ self.check_max = False
161
+
162
+ return self._max
163
+
164
+
165
+ class VertexVectorField(VertexBase):
166
+ """
167
+ Add homology properties of a scalar field f: R^n --> R^m associated with
168
+ the geometry built from the VertexBase class.
169
+ """
170
+
171
+ def __init__(self, x, sfield=None, vfield=None, field_args=(),
172
+ vfield_args=(), g_cons=None,
173
+ g_cons_args=(), nn=None, index=None):
174
+ super().__init__(x, nn=nn, index=index)
175
+
176
+ raise NotImplementedError("This class is still a work in progress")
177
+
178
+
179
+ class VertexCacheBase:
180
+ """Base class for a vertex cache for a simplicial complex."""
181
+ def __init__(self):
182
+
183
+ self.cache = collections.OrderedDict()
184
+ self.nfev = 0 # Feasible points
185
+ self.index = -1
186
+
187
+ def __iter__(self):
188
+ for v in self.cache:
189
+ yield self.cache[v]
190
+ return
191
+
192
+ def size(self):
193
+ """Returns the size of the vertex cache."""
194
+ return self.index + 1
195
+
196
+ def print_out(self):
197
+ headlen = len(f"Vertex cache of size: {len(self.cache)}:")
198
+ print('=' * headlen)
199
+ print(f"Vertex cache of size: {len(self.cache)}:")
200
+ print('=' * headlen)
201
+ for v in self.cache:
202
+ self.cache[v].print_out()
203
+
204
+
205
+ class VertexCube(VertexBase):
206
+ """Vertex class to be used for a pure simplicial complex with no associated
207
+ differential geometry (single level domain that exists in R^n)"""
208
+ def __init__(self, x, nn=None, index=None):
209
+ super().__init__(x, nn=nn, index=index)
210
+
211
+ def connect(self, v):
212
+ if v is not self and v not in self.nn:
213
+ self.nn.add(v)
214
+ v.nn.add(self)
215
+
216
+ def disconnect(self, v):
217
+ if v in self.nn:
218
+ self.nn.remove(v)
219
+ v.nn.remove(self)
220
+
221
+
222
+ class VertexCacheIndex(VertexCacheBase):
223
+ def __init__(self):
224
+ """
225
+ Class for a vertex cache for a simplicial complex without an associated
226
+ field. Useful only for building and visualising a domain complex.
227
+
228
+ Parameters
229
+ ----------
230
+ """
231
+ super().__init__()
232
+ self.Vertex = VertexCube
233
+
234
+ def __getitem__(self, x, nn=None):
235
+ try:
236
+ return self.cache[x]
237
+ except KeyError:
238
+ self.index += 1
239
+ xval = self.Vertex(x, index=self.index)
240
+ # logging.info("New generated vertex at x = {}".format(x))
241
+ # NOTE: Surprisingly high performance increase if logging
242
+ # is commented out
243
+ self.cache[x] = xval
244
+ return self.cache[x]
245
+
246
+
247
+ class VertexCacheField(VertexCacheBase):
248
+ def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(),
249
+ workers=1):
250
+ """
251
+ Class for a vertex cache for a simplicial complex with an associated
252
+ field.
253
+
254
+ Parameters
255
+ ----------
256
+ field : callable
257
+ Scalar or vector field callable.
258
+ field_args : tuple, optional
259
+ Any additional fixed parameters needed to completely specify the
260
+ field function
261
+ g_cons : dict or sequence of dict, optional
262
+ Constraints definition.
263
+ Function(s) ``R**n`` in the form::
264
+ g_cons_args : tuple, optional
265
+ Any additional fixed parameters needed to completely specify the
266
+ constraint functions
267
+ workers : int optional
268
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
269
+ functions in parallel.
270
+
271
+ """
272
+ super().__init__()
273
+ self.index = -1
274
+ self.Vertex = VertexScalarField
275
+ self.field = field
276
+ self.field_args = field_args
277
+ self.wfield = FieldWrapper(field, field_args) # if workers is not 1
278
+
279
+ self.g_cons = g_cons
280
+ self.g_cons_args = g_cons_args
281
+ self.wgcons = ConstraintWrapper(g_cons, g_cons_args)
282
+ self.gpool = set() # A set of tuples to process for feasibility
283
+
284
+ # Field processing objects
285
+ self.fpool = set() # A set of tuples to process for scalar function
286
+ self.sfc_lock = False # True if self.fpool is non-Empty
287
+
288
+ self.workers = workers
289
+ self._mapwrapper = MapWrapper(workers)
290
+
291
+ if workers == 1:
292
+ self.process_gpool = self.proc_gpool
293
+ if g_cons is None:
294
+ self.process_fpool = self.proc_fpool_nog
295
+ else:
296
+ self.process_fpool = self.proc_fpool_g
297
+ else:
298
+ self.process_gpool = self.pproc_gpool
299
+ if g_cons is None:
300
+ self.process_fpool = self.pproc_fpool_nog
301
+ else:
302
+ self.process_fpool = self.pproc_fpool_g
303
+
304
+ def __getitem__(self, x, nn=None):
305
+ try:
306
+ return self.cache[x]
307
+ except KeyError:
308
+ self.index += 1
309
+ xval = self.Vertex(x, field=self.field, nn=nn, index=self.index,
310
+ field_args=self.field_args,
311
+ g_cons=self.g_cons,
312
+ g_cons_args=self.g_cons_args)
313
+
314
+ self.cache[x] = xval # Define in cache
315
+ self.gpool.add(xval) # Add to pool for processing feasibility
316
+ self.fpool.add(xval) # Add to pool for processing field values
317
+ return self.cache[x]
318
+
319
+ def __getstate__(self):
320
+ self_dict = self.__dict__.copy()
321
+ del self_dict['pool']
322
+ return self_dict
323
+
324
+ def process_pools(self):
325
+ if self.g_cons is not None:
326
+ self.process_gpool()
327
+ self.process_fpool()
328
+ self.proc_minimisers()
329
+
330
+ def feasibility_check(self, v):
331
+ v.feasible = True
332
+ for g, args in zip(self.g_cons, self.g_cons_args):
333
+ # constraint may return more than 1 value.
334
+ if np.any(g(v.x_a, *args) < 0.0):
335
+ v.f = np.inf
336
+ v.feasible = False
337
+ break
338
+
339
+ def compute_sfield(self, v):
340
+ """Compute the scalar field values of a vertex object `v`.
341
+
342
+ Parameters
343
+ ----------
344
+ v : VertexBase or VertexScalarField object
345
+ """
346
+ try:
347
+ v.f = self.field(v.x_a, *self.field_args)
348
+ self.nfev += 1
349
+ except AttributeError:
350
+ v.f = np.inf
351
+ # logging.warning(f"Field function not found at x = {self.x_a}")
352
+ if np.isnan(v.f):
353
+ v.f = np.inf
354
+
355
+ def proc_gpool(self):
356
+ """Process all constraints."""
357
+ if self.g_cons is not None:
358
+ for v in self.gpool:
359
+ self.feasibility_check(v)
360
+ # Clean the pool
361
+ self.gpool = set()
362
+
363
+ def pproc_gpool(self):
364
+ """Process all constraints in parallel."""
365
+ gpool_l = []
366
+ for v in self.gpool:
367
+ gpool_l.append(v.x_a)
368
+
369
+ G = self._mapwrapper(self.wgcons.gcons, gpool_l)
370
+ for v, g in zip(self.gpool, G):
371
+ v.feasible = g # set vertex object attribute v.feasible = g (bool)
372
+
373
+ def proc_fpool_g(self):
374
+ """Process all field functions with constraints supplied."""
375
+ for v in self.fpool:
376
+ if v.feasible:
377
+ self.compute_sfield(v)
378
+ # Clean the pool
379
+ self.fpool = set()
380
+
381
+ def proc_fpool_nog(self):
382
+ """Process all field functions with no constraints supplied."""
383
+ for v in self.fpool:
384
+ self.compute_sfield(v)
385
+ # Clean the pool
386
+ self.fpool = set()
387
+
388
+ def pproc_fpool_g(self):
389
+ """
390
+ Process all field functions with constraints supplied in parallel.
391
+ """
392
+ self.wfield.func
393
+ fpool_l = []
394
+ for v in self.fpool:
395
+ if v.feasible:
396
+ fpool_l.append(v.x_a)
397
+ else:
398
+ v.f = np.inf
399
+ F = self._mapwrapper(self.wfield.func, fpool_l)
400
+ for va, f in zip(fpool_l, F):
401
+ vt = tuple(va)
402
+ self[vt].f = f # set vertex object attribute v.f = f
403
+ self.nfev += 1
404
+ # Clean the pool
405
+ self.fpool = set()
406
+
407
+ def pproc_fpool_nog(self):
408
+ """
409
+ Process all field functions with no constraints supplied in parallel.
410
+ """
411
+ self.wfield.func
412
+ fpool_l = []
413
+ for v in self.fpool:
414
+ fpool_l.append(v.x_a)
415
+ F = self._mapwrapper(self.wfield.func, fpool_l)
416
+ for va, f in zip(fpool_l, F):
417
+ vt = tuple(va)
418
+ self[vt].f = f # set vertex object attribute v.f = f
419
+ self.nfev += 1
420
+ # Clean the pool
421
+ self.fpool = set()
422
+
423
+ def proc_minimisers(self):
424
+ """Check for minimisers."""
425
+ for v in self:
426
+ v.minimiser()
427
+ v.maximiser()
428
+
429
+
430
+ class ConstraintWrapper:
431
+ """Object to wrap constraints to pass to `multiprocessing.Pool`."""
432
+ def __init__(self, g_cons, g_cons_args):
433
+ self.g_cons = g_cons
434
+ self.g_cons_args = g_cons_args
435
+
436
+ def gcons(self, v_x_a):
437
+ vfeasible = True
438
+ for g, args in zip(self.g_cons, self.g_cons_args):
439
+ # constraint may return more than 1 value.
440
+ if np.any(g(v_x_a, *args) < 0.0):
441
+ vfeasible = False
442
+ break
443
+ return vfeasible
444
+
445
+
446
+ class FieldWrapper:
447
+ """Object to wrap field to pass to `multiprocessing.Pool`."""
448
+ def __init__(self, field, field_args):
449
+ self.field = field
450
+ self.field_args = field_args
451
+
452
+ def func(self, v_x_a):
453
+ try:
454
+ v_f = self.field(v_x_a, *self.field_args)
455
+ except Exception:
456
+ v_f = np.inf
457
+ if np.isnan(v_f):
458
+ v_f = np.inf
459
+
460
+ return v_f
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (86.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements the Sequential Least Squares Programming optimization
3
+ algorithm (SLSQP), originally developed by Dieter Kraft.
4
+ See http://www.netlib.org/toms/733
5
+
6
+ Functions
7
+ ---------
8
+ .. autosummary::
9
+ :toctree: generated/
10
+
11
+ approx_jacobian
12
+ fmin_slsqp
13
+
14
+ """
15
+
16
+ __all__ = ['approx_jacobian', 'fmin_slsqp']
17
+
18
+ import numpy as np
19
+ from scipy.optimize._slsqp import slsqp
20
+ from numpy import (zeros, array, linalg, append, concatenate, finfo,
21
+ sqrt, vstack, isfinite, atleast_1d)
22
+ from ._optimize import (OptimizeResult, _check_unknown_options,
23
+ _prepare_scalar_function, _clip_x_for_func,
24
+ _check_clip_x)
25
+ from ._numdiff import approx_derivative
26
+ from ._constraints import old_bound_to_new, _arr_to_scalar
27
+ from scipy._lib._array_api import atleast_nd, array_namespace
28
+
29
+ # deprecated imports to be removed in SciPy 1.13.0
30
+ from numpy import exp, inf # noqa: F401
31
+
32
+
33
+ __docformat__ = "restructuredtext en"
34
+
35
+ _epsilon = sqrt(finfo(float).eps)
36
+
37
+
38
+ def approx_jacobian(x, func, epsilon, *args):
39
+ """
40
+ Approximate the Jacobian matrix of a callable function.
41
+
42
+ Parameters
43
+ ----------
44
+ x : array_like
45
+ The state vector at which to compute the Jacobian matrix.
46
+ func : callable f(x,*args)
47
+ The vector-valued function.
48
+ epsilon : float
49
+ The perturbation used to determine the partial derivatives.
50
+ args : sequence
51
+ Additional arguments passed to func.
52
+
53
+ Returns
54
+ -------
55
+ An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length
56
+ of the outputs of `func`, and ``lenx`` is the number of elements in
57
+ `x`.
58
+
59
+ Notes
60
+ -----
61
+ The approximation is done using forward differences.
62
+
63
+ """
64
+ # approx_derivative returns (m, n) == (lenf, lenx)
65
+ jac = approx_derivative(func, x, method='2-point', abs_step=epsilon,
66
+ args=args)
67
+ # if func returns a scalar jac.shape will be (lenx,). Make sure
68
+ # it's at least a 2D array.
69
+ return np.atleast_2d(jac)
70
+
71
+
72
+ def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
73
+ bounds=(), fprime=None, fprime_eqcons=None,
74
+ fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
75
+ iprint=1, disp=None, full_output=0, epsilon=_epsilon,
76
+ callback=None):
77
+ """
78
+ Minimize a function using Sequential Least Squares Programming
79
+
80
+ Python interface function for the SLSQP Optimization subroutine
81
+ originally implemented by Dieter Kraft.
82
+
83
+ Parameters
84
+ ----------
85
+ func : callable f(x,*args)
86
+ Objective function. Must return a scalar.
87
+ x0 : 1-D ndarray of float
88
+ Initial guess for the independent variable(s).
89
+ eqcons : list, optional
90
+ A list of functions of length n such that
91
+ eqcons[j](x,*args) == 0.0 in a successfully optimized
92
+ problem.
93
+ f_eqcons : callable f(x,*args), optional
94
+ Returns a 1-D array in which each element must equal 0.0 in a
95
+ successfully optimized problem. If f_eqcons is specified,
96
+ eqcons is ignored.
97
+ ieqcons : list, optional
98
+ A list of functions of length n such that
99
+ ieqcons[j](x,*args) >= 0.0 in a successfully optimized
100
+ problem.
101
+ f_ieqcons : callable f(x,*args), optional
102
+ Returns a 1-D ndarray in which each element must be greater or
103
+ equal to 0.0 in a successfully optimized problem. If
104
+ f_ieqcons is specified, ieqcons is ignored.
105
+ bounds : list, optional
106
+ A list of tuples specifying the lower and upper bound
107
+ for each independent variable [(xl0, xu0),(xl1, xu1),...]
108
+ Infinite values will be interpreted as large floating values.
109
+ fprime : callable `f(x,*args)`, optional
110
+ A function that evaluates the partial derivatives of func.
111
+ fprime_eqcons : callable `f(x,*args)`, optional
112
+ A function of the form `f(x, *args)` that returns the m by n
113
+ array of equality constraint normals. If not provided,
114
+ the normals will be approximated. The array returned by
115
+ fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
116
+ fprime_ieqcons : callable `f(x,*args)`, optional
117
+ A function of the form `f(x, *args)` that returns the m by n
118
+ array of inequality constraint normals. If not provided,
119
+ the normals will be approximated. The array returned by
120
+ fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
121
+ args : sequence, optional
122
+ Additional arguments passed to func and fprime.
123
+ iter : int, optional
124
+ The maximum number of iterations.
125
+ acc : float, optional
126
+ Requested accuracy.
127
+ iprint : int, optional
128
+ The verbosity of fmin_slsqp :
129
+
130
+ * iprint <= 0 : Silent operation
131
+ * iprint == 1 : Print summary upon completion (default)
132
+ * iprint >= 2 : Print status of each iterate and summary
133
+ disp : int, optional
134
+ Overrides the iprint interface (preferred).
135
+ full_output : bool, optional
136
+ If False, return only the minimizer of func (default).
137
+ Otherwise, output final objective function and summary
138
+ information.
139
+ epsilon : float, optional
140
+ The step size for finite-difference derivative estimates.
141
+ callback : callable, optional
142
+ Called after each iteration, as ``callback(x)``, where ``x`` is the
143
+ current parameter vector.
144
+
145
+ Returns
146
+ -------
147
+ out : ndarray of float
148
+ The final minimizer of func.
149
+ fx : ndarray of float, if full_output is true
150
+ The final value of the objective function.
151
+ its : int, if full_output is true
152
+ The number of iterations.
153
+ imode : int, if full_output is true
154
+ The exit mode from the optimizer (see below).
155
+ smode : string, if full_output is true
156
+ Message describing the exit mode from the optimizer.
157
+
158
+ See also
159
+ --------
160
+ minimize: Interface to minimization algorithms for multivariate
161
+ functions. See the 'SLSQP' `method` in particular.
162
+
163
+ Notes
164
+ -----
165
+ Exit modes are defined as follows ::
166
+
167
+ -1 : Gradient evaluation required (g & a)
168
+ 0 : Optimization terminated successfully
169
+ 1 : Function evaluation required (f & c)
170
+ 2 : More equality constraints than independent variables
171
+ 3 : More than 3*n iterations in LSQ subproblem
172
+ 4 : Inequality constraints incompatible
173
+ 5 : Singular matrix E in LSQ subproblem
174
+ 6 : Singular matrix C in LSQ subproblem
175
+ 7 : Rank-deficient equality constraint subproblem HFTI
176
+ 8 : Positive directional derivative for linesearch
177
+ 9 : Iteration limit reached
178
+
179
+ Examples
180
+ --------
181
+ Examples are given :ref:`in the tutorial <tutorial-sqlsp>`.
182
+
183
+ """
184
+ if disp is not None:
185
+ iprint = disp
186
+
187
+ opts = {'maxiter': iter,
188
+ 'ftol': acc,
189
+ 'iprint': iprint,
190
+ 'disp': iprint != 0,
191
+ 'eps': epsilon,
192
+ 'callback': callback}
193
+
194
+ # Build the constraints as a tuple of dictionaries
195
+ cons = ()
196
+ # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take
197
+ # the same extra arguments as the objective function.
198
+ cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons)
199
+ cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons)
200
+ # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian
201
+ # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments
202
+ # as the objective function.
203
+ if f_eqcons:
204
+ cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons,
205
+ 'args': args}, )
206
+ if f_ieqcons:
207
+ cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
208
+ 'args': args}, )
209
+
210
+ res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
211
+ constraints=cons, **opts)
212
+ if full_output:
213
+ return res['x'], res['fun'], res['nit'], res['status'], res['message']
214
+ else:
215
+ return res['x']
216
+
217
+
218
+ def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
219
+ constraints=(),
220
+ maxiter=100, ftol=1.0E-6, iprint=1, disp=False,
221
+ eps=_epsilon, callback=None, finite_diff_rel_step=None,
222
+ **unknown_options):
223
+ """
224
+ Minimize a scalar function of one or more variables using Sequential
225
+ Least Squares Programming (SLSQP).
226
+
227
+ Options
228
+ -------
229
+ ftol : float
230
+ Precision goal for the value of f in the stopping criterion.
231
+ eps : float
232
+ Step size used for numerical approximation of the Jacobian.
233
+ disp : bool
234
+ Set to True to print convergence messages. If False,
235
+ `verbosity` is ignored and set to 0.
236
+ maxiter : int
237
+ Maximum number of iterations.
238
+ finite_diff_rel_step : None or array_like, optional
239
+ If `jac in ['2-point', '3-point', 'cs']` the relative step size to
240
+ use for numerical approximation of `jac`. The absolute step
241
+ size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
242
+ possibly adjusted to fit into the bounds. For ``method='3-point'``
243
+ the sign of `h` is ignored. If None (default) then step is selected
244
+ automatically.
245
+ """
246
+ _check_unknown_options(unknown_options)
247
+ iter = maxiter - 1
248
+ acc = ftol
249
+ epsilon = eps
250
+
251
+ if not disp:
252
+ iprint = 0
253
+
254
+ # Transform x0 into an array.
255
+ xp = array_namespace(x0)
256
+ x0 = atleast_nd(x0, ndim=1, xp=xp)
257
+ dtype = xp.float64
258
+ if xp.isdtype(x0.dtype, "real floating"):
259
+ dtype = x0.dtype
260
+ x = xp.reshape(xp.astype(x0, dtype), -1)
261
+
262
+ # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by
263
+ # ScalarFunction
264
+ if bounds is None or len(bounds) == 0:
265
+ new_bounds = (-np.inf, np.inf)
266
+ else:
267
+ new_bounds = old_bound_to_new(bounds)
268
+
269
+ # clip the initial guess to bounds, otherwise ScalarFunction doesn't work
270
+ x = np.clip(x, new_bounds[0], new_bounds[1])
271
+
272
+ # Constraints are triaged per type into a dictionary of tuples
273
+ if isinstance(constraints, dict):
274
+ constraints = (constraints, )
275
+
276
+ cons = {'eq': (), 'ineq': ()}
277
+ for ic, con in enumerate(constraints):
278
+ # check type
279
+ try:
280
+ ctype = con['type'].lower()
281
+ except KeyError as e:
282
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
283
+ except TypeError as e:
284
+ raise TypeError('Constraints must be defined using a '
285
+ 'dictionary.') from e
286
+ except AttributeError as e:
287
+ raise TypeError("Constraint's type must be a string.") from e
288
+ else:
289
+ if ctype not in ['eq', 'ineq']:
290
+ raise ValueError("Unknown constraint type '%s'." % con['type'])
291
+
292
+ # check function
293
+ if 'fun' not in con:
294
+ raise ValueError('Constraint %d has no function defined.' % ic)
295
+
296
+ # check Jacobian
297
+ cjac = con.get('jac')
298
+ if cjac is None:
299
+ # approximate Jacobian function. The factory function is needed
300
+ # to keep a reference to `fun`, see gh-4240.
301
+ def cjac_factory(fun):
302
+ def cjac(x, *args):
303
+ x = _check_clip_x(x, new_bounds)
304
+
305
+ if jac in ['2-point', '3-point', 'cs']:
306
+ return approx_derivative(fun, x, method=jac, args=args,
307
+ rel_step=finite_diff_rel_step,
308
+ bounds=new_bounds)
309
+ else:
310
+ return approx_derivative(fun, x, method='2-point',
311
+ abs_step=epsilon, args=args,
312
+ bounds=new_bounds)
313
+
314
+ return cjac
315
+ cjac = cjac_factory(con['fun'])
316
+
317
+ # update constraints' dictionary
318
+ cons[ctype] += ({'fun': con['fun'],
319
+ 'jac': cjac,
320
+ 'args': con.get('args', ())}, )
321
+
322
+ exit_modes = {-1: "Gradient evaluation required (g & a)",
323
+ 0: "Optimization terminated successfully",
324
+ 1: "Function evaluation required (f & c)",
325
+ 2: "More equality constraints than independent variables",
326
+ 3: "More than 3*n iterations in LSQ subproblem",
327
+ 4: "Inequality constraints incompatible",
328
+ 5: "Singular matrix E in LSQ subproblem",
329
+ 6: "Singular matrix C in LSQ subproblem",
330
+ 7: "Rank-deficient equality constraint subproblem HFTI",
331
+ 8: "Positive directional derivative for linesearch",
332
+ 9: "Iteration limit reached"}
333
+
334
+ # Set the parameters that SLSQP will need
335
+ # meq, mieq: number of equality and inequality constraints
336
+ meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
337
+ for c in cons['eq']]))
338
+ mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
339
+ for c in cons['ineq']]))
340
+ # m = The total number of constraints
341
+ m = meq + mieq
342
+ # la = The number of constraints, or 1 if there are no constraints
343
+ la = array([1, m]).max()
344
+ # n = The number of independent variables
345
+ n = len(x)
346
+
347
+ # Define the workspaces for SLSQP
348
+ n1 = n + 1
349
+ mineq = m - meq + n1 + n1
350
+ len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \
351
+ + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1
352
+ len_jw = mineq
353
+ w = zeros(len_w)
354
+ jw = zeros(len_jw)
355
+
356
+ # Decompose bounds into xl and xu
357
+ if bounds is None or len(bounds) == 0:
358
+ xl = np.empty(n, dtype=float)
359
+ xu = np.empty(n, dtype=float)
360
+ xl.fill(np.nan)
361
+ xu.fill(np.nan)
362
+ else:
363
+ bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u))
364
+ for (l, u) in bounds], float)
365
+ if bnds.shape[0] != n:
366
+ raise IndexError('SLSQP Error: the length of bounds is not '
367
+ 'compatible with that of x0.')
368
+
369
+ with np.errstate(invalid='ignore'):
370
+ bnderr = bnds[:, 0] > bnds[:, 1]
371
+
372
+ if bnderr.any():
373
+ raise ValueError('SLSQP Error: lb > ub in bounds %s.' %
374
+ ', '.join(str(b) for b in bnderr))
375
+ xl, xu = bnds[:, 0], bnds[:, 1]
376
+
377
+ # Mark infinite bounds with nans; the Fortran code understands this
378
+ infbnd = ~isfinite(bnds)
379
+ xl[infbnd[:, 0]] = np.nan
380
+ xu[infbnd[:, 1]] = np.nan
381
+
382
+ # ScalarFunction provides function and gradient evaluation
383
+ sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
384
+ finite_diff_rel_step=finite_diff_rel_step,
385
+ bounds=new_bounds)
386
+ # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this
387
+ # doesn't get sent to the func/grad evaluator.
388
+ wrapped_fun = _clip_x_for_func(sf.fun, new_bounds)
389
+ wrapped_grad = _clip_x_for_func(sf.grad, new_bounds)
390
+
391
+ # Initialize the iteration counter and the mode value
392
+ mode = array(0, int)
393
+ acc = array(acc, float)
394
+ majiter = array(iter, int)
395
+ majiter_prev = 0
396
+
397
+ # Initialize internal SLSQP state variables
398
+ alpha = array(0, float)
399
+ f0 = array(0, float)
400
+ gs = array(0, float)
401
+ h1 = array(0, float)
402
+ h2 = array(0, float)
403
+ h3 = array(0, float)
404
+ h4 = array(0, float)
405
+ t = array(0, float)
406
+ t0 = array(0, float)
407
+ tol = array(0, float)
408
+ iexact = array(0, int)
409
+ incons = array(0, int)
410
+ ireset = array(0, int)
411
+ itermx = array(0, int)
412
+ line = array(0, int)
413
+ n1 = array(0, int)
414
+ n2 = array(0, int)
415
+ n3 = array(0, int)
416
+
417
+ # Print the header if iprint >= 2
418
+ if iprint >= 2:
419
+ print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM"))
420
+
421
+ # mode is zero on entry, so call objective, constraints and gradients
422
+ # there should be no func evaluations here because it's cached from
423
+ # ScalarFunction
424
+ fx = wrapped_fun(x)
425
+ g = append(wrapped_grad(x), 0.0)
426
+ c = _eval_constraint(x, cons)
427
+ a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
428
+
429
+ while 1:
430
+ # Call SLSQP
431
+ slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw,
432
+ alpha, f0, gs, h1, h2, h3, h4, t, t0, tol,
433
+ iexact, incons, ireset, itermx, line,
434
+ n1, n2, n3)
435
+
436
+ if mode == 1: # objective and constraint evaluation required
437
+ fx = wrapped_fun(x)
438
+ c = _eval_constraint(x, cons)
439
+
440
+ if mode == -1: # gradient evaluation required
441
+ g = append(wrapped_grad(x), 0.0)
442
+ a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
443
+
444
+ if majiter > majiter_prev:
445
+ # call callback if major iteration has incremented
446
+ if callback is not None:
447
+ callback(np.copy(x))
448
+
449
+ # Print the status of the current iterate if iprint > 2
450
+ if iprint >= 2:
451
+ print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev,
452
+ fx, linalg.norm(g)))
453
+
454
+ # If exit mode is not -1 or 1, slsqp has completed
455
+ if abs(mode) != 1:
456
+ break
457
+
458
+ majiter_prev = int(majiter)
459
+
460
+ # Optimization loop complete. Print status if requested
461
+ if iprint >= 1:
462
+ print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')')
463
+ print(" Current function value:", fx)
464
+ print(" Iterations:", majiter)
465
+ print(" Function evaluations:", sf.nfev)
466
+ print(" Gradient evaluations:", sf.ngev)
467
+
468
+ return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter),
469
+ nfev=sf.nfev, njev=sf.ngev, status=int(mode),
470
+ message=exit_modes[int(mode)], success=(mode == 0))
471
+
472
+
473
+ def _eval_constraint(x, cons):
474
+ # Compute constraints
475
+ if cons['eq']:
476
+ c_eq = concatenate([atleast_1d(con['fun'](x, *con['args']))
477
+ for con in cons['eq']])
478
+ else:
479
+ c_eq = zeros(0)
480
+
481
+ if cons['ineq']:
482
+ c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args']))
483
+ for con in cons['ineq']])
484
+ else:
485
+ c_ieq = zeros(0)
486
+
487
+ # Now combine c_eq and c_ieq into a single matrix
488
+ c = concatenate((c_eq, c_ieq))
489
+ return c
490
+
491
+
492
+ def _eval_con_normals(x, cons, la, n, m, meq, mieq):
493
+ # Compute the normals of the constraints
494
+ if cons['eq']:
495
+ a_eq = vstack([con['jac'](x, *con['args'])
496
+ for con in cons['eq']])
497
+ else: # no equality constraint
498
+ a_eq = zeros((meq, n))
499
+
500
+ if cons['ineq']:
501
+ a_ieq = vstack([con['jac'](x, *con['args'])
502
+ for con in cons['ineq']])
503
+ else: # no inequality constraint
504
+ a_ieq = zeros((mieq, n))
505
+
506
+ # Now combine a_eq and a_ieq into a single a matrix
507
+ if m == 0: # no constraints
508
+ a = zeros((la, n))
509
+ else:
510
+ a = vstack((a_eq, a_ieq))
511
+ a = concatenate((a, zeros([la, 1])), 1)
512
+
513
+ return a
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_spectral.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spectral Algorithm for Nonlinear Equations
3
+ """
4
+ import collections
5
+
6
+ import numpy as np
7
+ from scipy.optimize import OptimizeResult
8
+ from scipy.optimize._optimize import _check_unknown_options
9
+ from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng
10
+
11
+ class _NoConvergence(Exception):
12
+ pass
13
+
14
+
15
+ def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
16
+ fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
17
+ sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
18
+ r"""
19
+ Solve nonlinear equation with the DF-SANE method
20
+
21
+ Options
22
+ -------
23
+ ftol : float, optional
24
+ Relative norm tolerance.
25
+ fatol : float, optional
26
+ Absolute norm tolerance.
27
+ Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
28
+ fnorm : callable, optional
29
+ Norm to use in the convergence check. If None, 2-norm is used.
30
+ maxfev : int, optional
31
+ Maximum number of function evaluations.
32
+ disp : bool, optional
33
+ Whether to print convergence process to stdout.
34
+ eta_strategy : callable, optional
35
+ Choice of the ``eta_k`` parameter, which gives slack for growth
36
+ of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with
37
+ `k` the iteration number, `x` the current iterate and `F` the current
38
+ residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
39
+ Default: ``||F||**2 / (1 + k)**2``.
40
+ sigma_eps : float, optional
41
+ The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
42
+ Default: 1e-10
43
+ sigma_0 : float, optional
44
+ Initial spectral coefficient.
45
+ Default: 1.0
46
+ M : int, optional
47
+ Number of iterates to include in the nonmonotonic line search.
48
+ Default: 10
49
+ line_search : {'cruz', 'cheng'}
50
+ Type of line search to employ. 'cruz' is the original one defined in
51
+ [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
52
+ a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
53
+ Default: 'cruz'
54
+
55
+ References
56
+ ----------
57
+ .. [1] "Spectral residual method without gradient information for solving
58
+ large-scale nonlinear systems of equations." W. La Cruz,
59
+ J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
60
+ .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
61
+ .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).
62
+
63
+ """
64
+ _check_unknown_options(unknown_options)
65
+
66
+ if line_search not in ('cheng', 'cruz'):
67
+ raise ValueError(f"Invalid value {line_search!r} for 'line_search'")
68
+
69
+ nexp = 2
70
+
71
+ if eta_strategy is None:
72
+ # Different choice from [1], as their eta is not invariant
73
+ # vs. scaling of F.
74
+ def eta_strategy(k, x, F):
75
+ # Obtain squared 2-norm of the initial residual from the outer scope
76
+ return f_0 / (1 + k)**2
77
+
78
+ if fnorm is None:
79
+ def fnorm(F):
80
+ # Obtain squared 2-norm of the current residual from the outer scope
81
+ return f_k**(1.0/nexp)
82
+
83
+ def fmerit(F):
84
+ return np.linalg.norm(F)**nexp
85
+
86
+ nfev = [0]
87
+ f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit,
88
+ nfev, maxfev, args)
89
+
90
+ k = 0
91
+ f_0 = f_k
92
+ sigma_k = sigma_0
93
+
94
+ F_0_norm = fnorm(F_k)
95
+
96
+ # For the 'cruz' line search
97
+ prev_fs = collections.deque([f_k], M)
98
+
99
+ # For the 'cheng' line search
100
+ Q = 1.0
101
+ C = f_0
102
+
103
+ converged = False
104
+ message = "too many function evaluations required"
105
+
106
+ while True:
107
+ F_k_norm = fnorm(F_k)
108
+
109
+ if disp:
110
+ print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))
111
+
112
+ if callback is not None:
113
+ callback(x_k, F_k)
114
+
115
+ if F_k_norm < ftol * F_0_norm + fatol:
116
+ # Converged!
117
+ message = "successful convergence"
118
+ converged = True
119
+ break
120
+
121
+ # Control spectral parameter, from [2]
122
+ if abs(sigma_k) > 1/sigma_eps:
123
+ sigma_k = 1/sigma_eps * np.sign(sigma_k)
124
+ elif abs(sigma_k) < sigma_eps:
125
+ sigma_k = sigma_eps
126
+
127
+ # Line search direction
128
+ d = -sigma_k * F_k
129
+
130
+ # Nonmonotone line search
131
+ eta = eta_strategy(k, x_k, F_k)
132
+ try:
133
+ if line_search == 'cruz':
134
+ alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs,
135
+ eta=eta)
136
+ elif line_search == 'cheng':
137
+ alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k,
138
+ C, Q, eta=eta)
139
+ except _NoConvergence:
140
+ break
141
+
142
+ # Update spectral parameter
143
+ s_k = xp - x_k
144
+ y_k = Fp - F_k
145
+ sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)
146
+
147
+ # Take step
148
+ x_k = xp
149
+ F_k = Fp
150
+ f_k = fp
151
+
152
+ # Store function value
153
+ if line_search == 'cruz':
154
+ prev_fs.append(fp)
155
+
156
+ k += 1
157
+
158
+ x = _wrap_result(x_k, is_complex, shape=x_shape)
159
+ F = _wrap_result(F_k, is_complex)
160
+
161
+ result = OptimizeResult(x=x, success=converged,
162
+ message=message,
163
+ fun=F, nfev=nfev[0], nit=k, method="df-sane")
164
+
165
+ return result
166
+
167
+
168
+ def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()):
169
+ """
170
+ Wrap a function and an initial value so that (i) complex values
171
+ are wrapped to reals, and (ii) value for a merit function
172
+ fmerit(x, f) is computed at the same time, (iii) iteration count
173
+ is maintained and an exception is raised if it is exceeded.
174
+
175
+ Parameters
176
+ ----------
177
+ func : callable
178
+ Function to wrap
179
+ x0 : ndarray
180
+ Initial value
181
+ fmerit : callable
182
+ Merit function fmerit(f) for computing merit value from residual.
183
+ nfev_list : list
184
+ List to store number of evaluations in. Should be [0] in the beginning.
185
+ maxfev : int
186
+ Maximum number of evaluations before _NoConvergence is raised.
187
+ args : tuple
188
+ Extra arguments to func
189
+
190
+ Returns
191
+ -------
192
+ wrap_func : callable
193
+ Wrapped function, to be called as
194
+ ``F, fp = wrap_func(x0)``
195
+ x0_wrap : ndarray of float
196
+ Wrapped initial value; raveled to 1-D and complex
197
+ values mapped to reals.
198
+ x0_shape : tuple
199
+ Shape of the initial value array
200
+ f : float
201
+ Merit function at F
202
+ F : ndarray of float
203
+ Residual at x0_wrap
204
+ is_complex : bool
205
+ Whether complex values were mapped to reals
206
+
207
+ """
208
+ x0 = np.asarray(x0)
209
+ x0_shape = x0.shape
210
+ F = np.asarray(func(x0, *args)).ravel()
211
+ is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F)
212
+ x0 = x0.ravel()
213
+
214
+ nfev_list[0] = 1
215
+
216
+ if is_complex:
217
+ def wrap_func(x):
218
+ if nfev_list[0] >= maxfev:
219
+ raise _NoConvergence()
220
+ nfev_list[0] += 1
221
+ z = _real2complex(x).reshape(x0_shape)
222
+ v = np.asarray(func(z, *args)).ravel()
223
+ F = _complex2real(v)
224
+ f = fmerit(F)
225
+ return f, F
226
+
227
+ x0 = _complex2real(x0)
228
+ F = _complex2real(F)
229
+ else:
230
+ def wrap_func(x):
231
+ if nfev_list[0] >= maxfev:
232
+ raise _NoConvergence()
233
+ nfev_list[0] += 1
234
+ x = x.reshape(x0_shape)
235
+ F = np.asarray(func(x, *args)).ravel()
236
+ f = fmerit(F)
237
+ return f, F
238
+
239
+ return wrap_func, x0, x0_shape, fmerit(F), F, is_complex
240
+
241
+
242
+ def _wrap_result(result, is_complex, shape=None):
243
+ """
244
+ Convert from real to complex and reshape result arrays.
245
+ """
246
+ if is_complex:
247
+ z = _real2complex(result)
248
+ else:
249
+ z = result
250
+ if shape is not None:
251
+ z = z.reshape(shape)
252
+ return z
253
+
254
+
255
+ def _real2complex(x):
256
+ return np.ascontiguousarray(x, dtype=float).view(np.complex128)
257
+
258
+
259
+ def _complex2real(z):
260
+ return np.ascontiguousarray(z, dtype=complex).view(np.float64)
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tnc.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TNC Python interface
2
+ # @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
3
+
4
+ # Copyright (c) 2004-2005, Jean-Sebastien Roy ([email protected])
5
+
6
+ # Permission is hereby granted, free of charge, to any person obtaining a
7
+ # copy of this software and associated documentation files (the
8
+ # "Software"), to deal in the Software without restriction, including
9
+ # without limitation the rights to use, copy, modify, merge, publish,
10
+ # distribute, sublicense, and/or sell copies of the Software, and to
11
+ # permit persons to whom the Software is furnished to do so, subject to
12
+ # the following conditions:
13
+
14
+ # The above copyright notice and this permission notice shall be included
15
+ # in all copies or substantial portions of the Software.
16
+
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18
+ # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21
+ # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22
+ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23
+ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24
+
25
+ """
26
+ TNC: A Python interface to the TNC non-linear optimizer
27
+
28
+ TNC is a non-linear optimizer. To use it, you must provide a function to
29
+ minimize. The function must take one argument: the list of coordinates where to
30
+ evaluate the function; and it must return either a tuple, whose first element is the
31
+ value of the function, and whose second argument is the gradient of the function
32
+ (as a list of values); or None, to abort the minimization.
33
+ """
34
+
35
+ from scipy.optimize import _moduleTNC as moduleTNC
36
+ from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
37
+ _prepare_scalar_function)
38
+ from ._constraints import old_bound_to_new
39
+ from scipy._lib._array_api import atleast_nd, array_namespace
40
+
41
+ from numpy import inf, array, zeros
42
+
43
+ __all__ = ['fmin_tnc']
44
+
45
+
46
+ MSG_NONE = 0 # No messages
47
+ MSG_ITER = 1 # One line per iteration
48
+ MSG_INFO = 2 # Informational messages
49
+ MSG_VERS = 4 # Version info
50
+ MSG_EXIT = 8 # Exit reasons
51
+ MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
52
+
53
+ MSGS = {
54
+ MSG_NONE: "No messages",
55
+ MSG_ITER: "One line per iteration",
56
+ MSG_INFO: "Informational messages",
57
+ MSG_VERS: "Version info",
58
+ MSG_EXIT: "Exit reasons",
59
+ MSG_ALL: "All messages"
60
+ }
61
+
62
+ INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
63
+ LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
64
+ FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
65
+ XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
66
+ MAXFUN = 3 # Max. number of function evaluations reached
67
+ LSFAIL = 4 # Linear search failed
68
+ CONSTANT = 5 # All lower bounds are equal to the upper bounds
69
+ NOPROGRESS = 6 # Unable to progress
70
+ USERABORT = 7 # User requested end of minimization
71
+
72
+ RCSTRINGS = {
73
+ INFEASIBLE: "Infeasible (lower bound > upper bound)",
74
+ LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
75
+ FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
76
+ XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
77
+ MAXFUN: "Max. number of function evaluations reached",
78
+ LSFAIL: "Linear search failed",
79
+ CONSTANT: "All lower bounds are equal to the upper bounds",
80
+ NOPROGRESS: "Unable to progress",
81
+ USERABORT: "User requested end of minimization"
82
+ }
83
+
84
+ # Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
85
+ # SciPy
86
+
87
+
88
+ def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
89
+ bounds=None, epsilon=1e-8, scale=None, offset=None,
90
+ messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
91
+ stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
92
+ rescale=-1, disp=None, callback=None):
93
+ """
94
+ Minimize a function with variables subject to bounds, using
95
+ gradient information in a truncated Newton algorithm. This
96
+ method wraps a C implementation of the algorithm.
97
+
98
+ Parameters
99
+ ----------
100
+ func : callable ``func(x, *args)``
101
+ Function to minimize. Must do one of:
102
+
103
+ 1. Return f and g, where f is the value of the function and g its
104
+ gradient (a list of floats).
105
+
106
+ 2. Return the function value but supply gradient function
107
+ separately as `fprime`.
108
+
109
+ 3. Return the function value and set ``approx_grad=True``.
110
+
111
+ If the function returns None, the minimization
112
+ is aborted.
113
+ x0 : array_like
114
+ Initial estimate of minimum.
115
+ fprime : callable ``fprime(x, *args)``, optional
116
+ Gradient of `func`. If None, then either `func` must return the
117
+ function value and the gradient (``f,g = func(x, *args)``)
118
+ or `approx_grad` must be True.
119
+ args : tuple, optional
120
+ Arguments to pass to function.
121
+ approx_grad : bool, optional
122
+ If true, approximate the gradient numerically.
123
+ bounds : list, optional
124
+ (min, max) pairs for each element in x0, defining the
125
+ bounds on that parameter. Use None or +/-inf for one of
126
+ min or max when there is no bound in that direction.
127
+ epsilon : float, optional
128
+ Used if approx_grad is True. The stepsize in a finite
129
+ difference approximation for fprime.
130
+ scale : array_like, optional
131
+ Scaling factors to apply to each variable. If None, the
132
+ factors are up-low for interval bounded variables and
133
+ 1+|x| for the others. Defaults to None.
134
+ offset : array_like, optional
135
+ Value to subtract from each variable. If None, the
136
+ offsets are (up+low)/2 for interval bounded variables
137
+ and x for the others.
138
+ messages : int, optional
139
+ Bit mask used to select messages display during
140
+ minimization values defined in the MSGS dict. Defaults to
141
+ MGS_ALL.
142
+ disp : int, optional
143
+ Integer interface to messages. 0 = no message, 5 = all messages
144
+ maxCGit : int, optional
145
+ Maximum number of hessian*vector evaluations per main
146
+ iteration. If maxCGit == 0, the direction chosen is
147
+ -gradient if maxCGit < 0, maxCGit is set to
148
+ max(1,min(50,n/2)). Defaults to -1.
149
+ maxfun : int, optional
150
+ Maximum number of function evaluation. If None, maxfun is
151
+ set to max(100, 10*len(x0)). Defaults to None. Note that this function
152
+ may violate the limit because of evaluating gradients by numerical
153
+ differentiation.
154
+ eta : float, optional
155
+ Severity of the line search. If < 0 or > 1, set to 0.25.
156
+ Defaults to -1.
157
+ stepmx : float, optional
158
+ Maximum step for the line search. May be increased during
159
+ call. If too small, it will be set to 10.0. Defaults to 0.
160
+ accuracy : float, optional
161
+ Relative precision for finite difference calculations. If
162
+ <= machine_precision, set to sqrt(machine_precision).
163
+ Defaults to 0.
164
+ fmin : float, optional
165
+ Minimum function value estimate. Defaults to 0.
166
+ ftol : float, optional
167
+ Precision goal for the value of f in the stopping criterion.
168
+ If ftol < 0.0, ftol is set to 0.0 defaults to -1.
169
+ xtol : float, optional
170
+ Precision goal for the value of x in the stopping
171
+ criterion (after applying x scaling factors). If xtol <
172
+ 0.0, xtol is set to sqrt(machine_precision). Defaults to
173
+ -1.
174
+ pgtol : float, optional
175
+ Precision goal for the value of the projected gradient in
176
+ the stopping criterion (after applying x scaling factors).
177
+ If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
178
+ Setting it to 0.0 is not recommended. Defaults to -1.
179
+ rescale : float, optional
180
+ Scaling factor (in log10) used to trigger f value
181
+ rescaling. If 0, rescale at each iteration. If a large
182
+ value, never rescale. If < 0, rescale is set to 1.3.
183
+ callback : callable, optional
184
+ Called after each iteration, as callback(xk), where xk is the
185
+ current parameter vector.
186
+
187
+ Returns
188
+ -------
189
+ x : ndarray
190
+ The solution.
191
+ nfeval : int
192
+ The number of function evaluations.
193
+ rc : int
194
+ Return code, see below
195
+
196
+ See also
197
+ --------
198
+ minimize: Interface to minimization algorithms for multivariate
199
+ functions. See the 'TNC' `method` in particular.
200
+
201
+ Notes
202
+ -----
203
+ The underlying algorithm is truncated Newton, also called
204
+ Newton Conjugate-Gradient. This method differs from
205
+ scipy.optimize.fmin_ncg in that
206
+
207
+ 1. it wraps a C implementation of the algorithm
208
+ 2. it allows each variable to be given an upper and lower bound.
209
+
210
+ The algorithm incorporates the bound constraints by determining
211
+ the descent direction as in an unconstrained truncated Newton,
212
+ but never taking a step-size large enough to leave the space
213
+ of feasible x's. The algorithm keeps track of a set of
214
+ currently active constraints, and ignores them when computing
215
+ the minimum allowable step size. (The x's associated with the
216
+ active constraint are kept fixed.) If the maximum allowable
217
+ step size is zero then a new constraint is added. At the end
218
+ of each iteration one of the constraints may be deemed no
219
+ longer active and removed. A constraint is considered
220
+ no longer active is if it is currently active
221
+ but the gradient for that variable points inward from the
222
+ constraint. The specific constraint removed is the one
223
+ associated with the variable of largest index whose
224
+ constraint is no longer active.
225
+
226
+ Return codes are defined as follows::
227
+
228
+ -1 : Infeasible (lower bound > upper bound)
229
+ 0 : Local minimum reached (|pg| ~= 0)
230
+ 1 : Converged (|f_n-f_(n-1)| ~= 0)
231
+ 2 : Converged (|x_n-x_(n-1)| ~= 0)
232
+ 3 : Max. number of function evaluations reached
233
+ 4 : Linear search failed
234
+ 5 : All lower bounds are equal to the upper bounds
235
+ 6 : Unable to progress
236
+ 7 : User requested end of minimization
237
+
238
+ References
239
+ ----------
240
+ Wright S., Nocedal J. (2006), 'Numerical Optimization'
241
+
242
+ Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
243
+ SIAM Journal of Numerical Analysis 21, pp. 770-778
244
+
245
+ """
246
+ # handle fprime/approx_grad
247
+ if approx_grad:
248
+ fun = func
249
+ jac = None
250
+ elif fprime is None:
251
+ fun = MemoizeJac(func)
252
+ jac = fun.derivative
253
+ else:
254
+ fun = func
255
+ jac = fprime
256
+
257
+ if disp is not None: # disp takes precedence over messages
258
+ mesg_num = disp
259
+ else:
260
+ mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
261
+ 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
262
+ # build options
263
+ opts = {'eps': epsilon,
264
+ 'scale': scale,
265
+ 'offset': offset,
266
+ 'mesg_num': mesg_num,
267
+ 'maxCGit': maxCGit,
268
+ 'maxfun': maxfun,
269
+ 'eta': eta,
270
+ 'stepmx': stepmx,
271
+ 'accuracy': accuracy,
272
+ 'minfev': fmin,
273
+ 'ftol': ftol,
274
+ 'xtol': xtol,
275
+ 'gtol': pgtol,
276
+ 'rescale': rescale,
277
+ 'disp': False}
278
+
279
+ res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
280
+
281
+ return res['x'], res['nfev'], res['status']
282
+
283
+
284
+ def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
285
+ eps=1e-8, scale=None, offset=None, mesg_num=None,
286
+ maxCGit=-1, eta=-1, stepmx=0, accuracy=0,
287
+ minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
288
+ callback=None, finite_diff_rel_step=None, maxfun=None,
289
+ **unknown_options):
290
+ """
291
+ Minimize a scalar function of one or more variables using a truncated
292
+ Newton (TNC) algorithm.
293
+
294
+ Options
295
+ -------
296
+ eps : float or ndarray
297
+ If `jac is None` the absolute step size used for numerical
298
+ approximation of the jacobian via forward differences.
299
+ scale : list of floats
300
+ Scaling factors to apply to each variable. If None, the
301
+ factors are up-low for interval bounded variables and
302
+ 1+|x] for the others. Defaults to None.
303
+ offset : float
304
+ Value to subtract from each variable. If None, the
305
+ offsets are (up+low)/2 for interval bounded variables
306
+ and x for the others.
307
+ disp : bool
308
+ Set to True to print convergence messages.
309
+ maxCGit : int
310
+ Maximum number of hessian*vector evaluations per main
311
+ iteration. If maxCGit == 0, the direction chosen is
312
+ -gradient if maxCGit < 0, maxCGit is set to
313
+ max(1,min(50,n/2)). Defaults to -1.
314
+ eta : float
315
+ Severity of the line search. If < 0 or > 1, set to 0.25.
316
+ Defaults to -1.
317
+ stepmx : float
318
+ Maximum step for the line search. May be increased during
319
+ call. If too small, it will be set to 10.0. Defaults to 0.
320
+ accuracy : float
321
+ Relative precision for finite difference calculations. If
322
+ <= machine_precision, set to sqrt(machine_precision).
323
+ Defaults to 0.
324
+ minfev : float
325
+ Minimum function value estimate. Defaults to 0.
326
+ ftol : float
327
+ Precision goal for the value of f in the stopping criterion.
328
+ If ftol < 0.0, ftol is set to 0.0 defaults to -1.
329
+ xtol : float
330
+ Precision goal for the value of x in the stopping
331
+ criterion (after applying x scaling factors). If xtol <
332
+ 0.0, xtol is set to sqrt(machine_precision). Defaults to
333
+ -1.
334
+ gtol : float
335
+ Precision goal for the value of the projected gradient in
336
+ the stopping criterion (after applying x scaling factors).
337
+ If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
338
+ Setting it to 0.0 is not recommended. Defaults to -1.
339
+ rescale : float
340
+ Scaling factor (in log10) used to trigger f value
341
+ rescaling. If 0, rescale at each iteration. If a large
342
+ value, never rescale. If < 0, rescale is set to 1.3.
343
+ finite_diff_rel_step : None or array_like, optional
344
+ If `jac in ['2-point', '3-point', 'cs']` the relative step size to
345
+ use for numerical approximation of the jacobian. The absolute step
346
+ size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
347
+ possibly adjusted to fit into the bounds. For ``method='3-point'``
348
+ the sign of `h` is ignored. If None (default) then step is selected
349
+ automatically.
350
+ maxfun : int
351
+ Maximum number of function evaluations. If None, `maxfun` is
352
+ set to max(100, 10*len(x0)). Defaults to None.
353
+ """
354
+ _check_unknown_options(unknown_options)
355
+ fmin = minfev
356
+ pgtol = gtol
357
+
358
+ xp = array_namespace(x0)
359
+ x0 = atleast_nd(x0, ndim=1, xp=xp)
360
+ dtype = xp.float64
361
+ if xp.isdtype(x0.dtype, "real floating"):
362
+ dtype = x0.dtype
363
+ x0 = xp.reshape(xp.astype(x0, dtype), -1)
364
+
365
+ n = len(x0)
366
+
367
+ if bounds is None:
368
+ bounds = [(None,None)] * n
369
+ if len(bounds) != n:
370
+ raise ValueError('length of x0 != length of bounds')
371
+ new_bounds = old_bound_to_new(bounds)
372
+
373
+ if mesg_num is not None:
374
+ messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
375
+ 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
376
+ elif disp:
377
+ messages = MSG_ALL
378
+ else:
379
+ messages = MSG_NONE
380
+
381
+ sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
382
+ finite_diff_rel_step=finite_diff_rel_step,
383
+ bounds=new_bounds)
384
+ func_and_grad = sf.fun_and_grad
385
+
386
+ """
387
+ low, up : the bounds (lists of floats)
388
+ if low is None, the lower bounds are removed.
389
+ if up is None, the upper bounds are removed.
390
+ low and up defaults to None
391
+ """
392
+ low = zeros(n)
393
+ up = zeros(n)
394
+ for i in range(n):
395
+ if bounds[i] is None:
396
+ l, u = -inf, inf
397
+ else:
398
+ l,u = bounds[i]
399
+ if l is None:
400
+ low[i] = -inf
401
+ else:
402
+ low[i] = l
403
+ if u is None:
404
+ up[i] = inf
405
+ else:
406
+ up[i] = u
407
+
408
+ if scale is None:
409
+ scale = array([])
410
+
411
+ if offset is None:
412
+ offset = array([])
413
+
414
+ if maxfun is None:
415
+ maxfun = max(100, 10*len(x0))
416
+
417
+ rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize(
418
+ func_and_grad, x0, low, up, scale,
419
+ offset, messages, maxCGit, maxfun,
420
+ eta, stepmx, accuracy, fmin, ftol,
421
+ xtol, pgtol, rescale, callback
422
+ )
423
+ # the TNC documentation states: "On output, x, f and g may be very
424
+ # slightly out of sync because of scaling". Therefore re-evaluate
425
+ # func_and_grad so they are synced.
426
+ funv, jacv = func_and_grad(x)
427
+
428
+ return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
429
+ nit=nit, status=rc, message=RCSTRINGS[rc],
430
+ success=(-1 < rc < 3))
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._trlib import TRLIBQuadraticSubproblem
2
+
3
+ __all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem']
4
+
5
+
6
+ def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False):
7
+ def subproblem_factory(x, fun, jac, hess, hessp):
8
+ return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp,
9
+ tol_rel_i=tol_rel_i,
10
+ tol_rel_b=tol_rel_b,
11
+ disp=disp)
12
+ return subproblem_factory